Bug Summary

File:dev/pci/qle.c
Warning:line 2001, column 2
Value stored to 'rv' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name qle.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/qle.c
1/* $OpenBSD: qle.c,v 1.61 2020/09/22 19:32:53 krw Exp $ */
2
3/*
4 * Copyright (c) 2013, 2014 Jonathan Matthew <jmatthew@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "bio.h"
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/atomic.h>
24#include <sys/malloc.h>
25#include <sys/device.h>
26#include <sys/sensors.h>
27#include <sys/rwlock.h>
28#include <sys/task.h>
29#include <sys/timeout.h>
30
31#include <machine/bus.h>
32
33#include <dev/pci/pcireg.h>
34#include <dev/pci/pcivar.h>
35#include <dev/pci/pcidevs.h>
36
37#ifdef __sparc64__
38#include <dev/ofw/openfirm.h>
39#endif
40
41#include <scsi/scsi_all.h>
42#include <scsi/scsiconf.h>
43
44#include <dev/pci/qlereg.h>
45
46#ifdef QLE_DEBUG
47#define DPRINTF(m, f...) do { if ((qledebug & (m)) == (m)) printf(f); } \
48 while (0)
49#define QLE_D_MBOX 0x01
50#define QLE_D_INTR 0x02
51#define QLE_D_PORT 0x04
52#define QLE_D_IO 0x08
53#define QLE_D_IOCB 0x10
54int qledebug = QLE_D_PORT;
55#else
56#define DPRINTF(m, f...)
57#endif
58
59#ifndef QLE_NOFIRMWARE
60#include <dev/microcode/isp/asm_2400.h>
61#include <dev/microcode/isp/asm_2500.h>
62#endif
63
64#define QLE_PCI_MEM_BAR0x14 0x14
65#define QLE_PCI_IO_BAR0x10 0x10
66
67
68#define QLE_DEFAULT_PORT_NAME0x400000007F000003ULL 0x400000007F000003ULL /* from isp(4) */
69
70#define QLE_WAIT_FOR_LOOP10 10 /* seconds */
71#define QLE_LOOP_SETTLE200 200 /* ms */
72
73/* rounded up range of assignable handles */
74#define QLE_MAX_TARGETS2048 2048
75
76/* maximum number of segments allowed for in a single io */
77#define QLE_MAX_SEGS32 32
78
79enum qle_isp_gen {
80 QLE_GEN_ISP24XX = 1,
81 QLE_GEN_ISP25XX
82};
83
84enum qle_isp_type {
85 QLE_ISP2422 = 1,
86 QLE_ISP2432,
87 QLE_ISP2512,
88 QLE_ISP2522,
89 QLE_ISP2532
90};
91
92/* port database things */
93#define QLE_SCRATCH_SIZE0x1000 0x1000
94
95enum qle_port_disp {
96 QLE_PORT_DISP_NEW,
97 QLE_PORT_DISP_GONE,
98 QLE_PORT_DISP_SAME,
99 QLE_PORT_DISP_CHANGED,
100 QLE_PORT_DISP_MOVED,
101 QLE_PORT_DISP_DUP
102};
103
104#define QLE_LOCATION_LOOP(1 << 24) (1 << 24)
105#define QLE_LOCATION_FABRIC(2 << 24) (2 << 24)
106#define QLE_LOCATION_LOOP_ID(l)(l | (1 << 24)) (l | QLE_LOCATION_LOOP(1 << 24))
107#define QLE_LOCATION_PORT_ID(p)(p | (2 << 24)) (p | QLE_LOCATION_FABRIC(2 << 24))
108
109struct qle_fc_port {
110 TAILQ_ENTRY(qle_fc_port)struct { struct qle_fc_port *tqe_next; struct qle_fc_port **tqe_prev
; }
ports;
111 TAILQ_ENTRY(qle_fc_port)struct { struct qle_fc_port *tqe_next; struct qle_fc_port **tqe_prev
; }
update;
112
113 u_int64_t node_name;
114 u_int64_t port_name;
115 u_int32_t location; /* port id or loop id */
116
117 int flags;
118#define QLE_PORT_FLAG_IS_TARGET1 1
119#define QLE_PORT_FLAG_NEEDS_LOGIN2 2
120
121 u_int32_t portid;
122 u_int16_t loopid;
123};
124
125
126/* request/response queue stuff */
127#define QLE_QUEUE_ENTRY_SIZE64 64
128
129struct qle_ccb {
130 struct qle_softc *ccb_sc;
131 int ccb_id;
132 struct scsi_xfer *ccb_xs;
133
134 bus_dmamap_t ccb_dmamap;
135
136 struct qle_iocb_seg *ccb_segs;
137 u_int64_t ccb_seg_offset;
138
139 SIMPLEQ_ENTRY(qle_ccb)struct { struct qle_ccb *sqe_next; } ccb_link;
140};
141
142SIMPLEQ_HEAD(qle_ccb_list, qle_ccb)struct qle_ccb_list { struct qle_ccb *sqh_first; struct qle_ccb
**sqh_last; }
;
143
144struct qle_dmamem {
145 bus_dmamap_t qdm_map;
146 bus_dma_segment_t qdm_seg;
147 size_t qdm_size;
148 caddr_t qdm_kva;
149};
150#define QLE_DMA_MAP(_qdm)((_qdm)->qdm_map) ((_qdm)->qdm_map)
151#define QLE_DMA_LEN(_qdm)((_qdm)->qdm_size) ((_qdm)->qdm_size)
152#define QLE_DMA_DVA(_qdm)((u_int64_t)(_qdm)->qdm_map->dm_segs[0].ds_addr) ((u_int64_t)(_qdm)->qdm_map->dm_segs[0].ds_addr)
153#define QLE_DMA_KVA(_qdm)((void *)(_qdm)->qdm_kva) ((void *)(_qdm)->qdm_kva)
154
155struct qle_softc {
156 struct device sc_dev;
157
158 pci_chipset_tag_t sc_pc;
159 pcitag_t sc_tag;
160
161 void *sc_ih;
162 bus_space_tag_t sc_iot;
163 bus_space_handle_t sc_ioh;
164 bus_size_t sc_ios;
165 bus_dma_tag_t sc_dmat;
166
167 struct scsibus_softc *sc_scsibus;
168
169 enum qle_isp_type sc_isp_type;
170 enum qle_isp_gen sc_isp_gen;
171 int sc_port;
172
173 bus_space_handle_t sc_mbox_ioh;
174 u_int16_t sc_mbox[QLE_MBOX_COUNT32];
175 int sc_mbox_pending;
176 struct mutex sc_mbox_mtx;
177
178 int sc_loop_up;
179 int sc_topology;
180 int sc_loop_id;
181 int sc_port_id;
182 int sc_loop_max_id;
183 u_int64_t sc_sns_port_name;
184
185 struct mutex sc_port_mtx;
186 TAILQ_HEAD(, qle_fc_port)struct { struct qle_fc_port *tqh_first; struct qle_fc_port **
tqh_last; }
sc_ports;
187 TAILQ_HEAD(, qle_fc_port)struct { struct qle_fc_port *tqh_first; struct qle_fc_port **
tqh_last; }
sc_ports_new;
188 TAILQ_HEAD(, qle_fc_port)struct { struct qle_fc_port *tqh_first; struct qle_fc_port **
tqh_last; }
sc_ports_gone;
189 TAILQ_HEAD(, qle_fc_port)struct { struct qle_fc_port *tqh_first; struct qle_fc_port **
tqh_last; }
sc_ports_found;
190 struct qle_fc_port *sc_targets[QLE_MAX_TARGETS2048];
191
192 struct taskq *sc_update_taskq;
193 struct task sc_update_task;
194 struct timeout sc_update_timeout;
195 int sc_update;
196 int sc_update_tasks;
197#define QLE_UPDATE_TASK_CLEAR_ALL0x00000001 0x00000001
198#define QLE_UPDATE_TASK_SOFTRESET0x00000002 0x00000002
199#define QLE_UPDATE_TASK_UPDATE_TOPO0x00000004 0x00000004
200#define QLE_UPDATE_TASK_GET_PORT_LIST0x00000008 0x00000008
201#define QLE_UPDATE_TASK_PORT_LIST0x00000010 0x00000010
202#define QLE_UPDATE_TASK_SCAN_FABRIC0x00000020 0x00000020
203#define QLE_UPDATE_TASK_SCANNING_FABRIC0x00000040 0x00000040
204#define QLE_UPDATE_TASK_FABRIC_LOGIN0x00000080 0x00000080
205#define QLE_UPDATE_TASK_FABRIC_RELOGIN0x00000100 0x00000100
206#define QLE_UPDATE_TASK_DETACH_TARGET0x00000200 0x00000200
207#define QLE_UPDATE_TASK_ATTACH_TARGET0x00000400 0x00000400
208
209 int sc_maxcmds;
210 struct qle_dmamem *sc_requests;
211 struct qle_dmamem *sc_responses;
212 struct qle_dmamem *sc_segments;
213 struct qle_dmamem *sc_pri_requests;
214 struct qle_dmamem *sc_scratch;
215 struct qle_dmamem *sc_fcp_cmnds;
216 struct qle_ccb *sc_ccbs;
217 struct qle_ccb_list sc_ccb_free;
218 struct mutex sc_ccb_mtx;
219 struct mutex sc_queue_mtx;
220 struct scsi_iopool sc_iopool;
221 u_int32_t sc_next_req_id;
222 u_int32_t sc_last_resp_id;
223 int sc_marker_required;
224 int sc_fabric_pending;
225 u_int8_t sc_fabric_response[QLE_QUEUE_ENTRY_SIZE64];
226
227 struct qle_nvram sc_nvram;
228 int sc_nvram_valid;
229};
230#define DEVNAME(_sc)((_sc)->sc_dev.dv_xname) ((_sc)->sc_dev.dv_xname)
231
232int qle_intr(void *);
233
234int qle_match(struct device *, void *, void *);
235void qle_attach(struct device *, struct device *, void *);
236int qle_detach(struct device *, int);
237
238struct cfattach qle_ca = {
239 sizeof(struct qle_softc),
240 qle_match,
241 qle_attach,
242 qle_detach
243};
244
245struct cfdriver qle_cd = {
246 NULL((void *)0),
247 "qle",
248 DV_DULL
249};
250
251void qle_scsi_cmd(struct scsi_xfer *);
252int qle_scsi_probe(struct scsi_link *);
253
254
255struct scsi_adapter qle_switch = {
256 qle_scsi_cmd, NULL((void *)0), qle_scsi_probe, NULL((void *)0), NULL((void *)0)
257};
258
259u_int32_t qle_read(struct qle_softc *, int);
260void qle_write(struct qle_softc *, int, u_int32_t);
261void qle_host_cmd(struct qle_softc *sc, u_int32_t);
262
263int qle_mbox(struct qle_softc *, int);
264int qle_ct_pass_through(struct qle_softc *sc,
265 u_int32_t port_handle, struct qle_dmamem *mem,
266 size_t req_size, size_t resp_size);
267void qle_mbox_putaddr(u_int16_t *, struct qle_dmamem *);
268u_int16_t qle_read_mbox(struct qle_softc *, int);
269void qle_write_mbox(struct qle_softc *, int, u_int16_t);
270
271void qle_handle_intr(struct qle_softc *, u_int16_t, u_int16_t);
272void qle_set_ints(struct qle_softc *, int);
273int qle_read_isr(struct qle_softc *, u_int16_t *, u_int16_t *);
274void qle_clear_isr(struct qle_softc *, u_int16_t);
275
276void qle_put_marker(struct qle_softc *, void *);
277void qle_put_cmd(struct qle_softc *, void *, struct scsi_xfer *,
278 struct qle_ccb *, u_int32_t);
279struct qle_ccb *qle_handle_resp(struct qle_softc *, u_int32_t);
280void qle_sge(struct qle_iocb_seg *, u_int64_t, u_int32_t);
281
282struct qle_fc_port *qle_next_fabric_port(struct qle_softc *, u_int32_t *,
283 u_int32_t *);
284int qle_get_port_db(struct qle_softc *, u_int16_t,
285 struct qle_dmamem *);
286int qle_get_port_name_list(struct qle_softc *sc, u_int32_t);
287int qle_add_loop_port(struct qle_softc *, struct qle_fc_port *);
288int qle_add_fabric_port(struct qle_softc *, struct qle_fc_port *);
289int qle_add_logged_in_port(struct qle_softc *, u_int16_t,
290 u_int32_t);
291int qle_classify_port(struct qle_softc *, u_int32_t, u_int64_t,
292 u_int64_t, struct qle_fc_port **);
293int qle_get_loop_id(struct qle_softc *sc, int);
294void qle_clear_port_lists(struct qle_softc *);
295int qle_softreset(struct qle_softc *);
296void qle_update_topology(struct qle_softc *);
297int qle_update_fabric(struct qle_softc *);
298int qle_fabric_plogx(struct qle_softc *, struct qle_fc_port *, int,
299 u_int32_t *);
300int qle_fabric_plogi(struct qle_softc *, struct qle_fc_port *);
301void qle_fabric_plogo(struct qle_softc *, struct qle_fc_port *);
302
303void qle_update_start(struct qle_softc *, int);
304void qle_update_defer(struct qle_softc *, int);
305void qle_update_cancel(struct qle_softc *);
306void qle_update_done(struct qle_softc *, int);
307void qle_do_update(void *);
308void qle_deferred_update(void *);
309int qle_async(struct qle_softc *, u_int16_t);
310
311int qle_load_fwchunk(struct qle_softc *,
312 struct qle_dmamem *, const u_int32_t *);
313u_int32_t qle_read_ram_word(struct qle_softc *, u_int32_t);
314int qle_verify_firmware(struct qle_softc *, u_int32_t);
315int qle_load_firmware_chunks(struct qle_softc *, const u_int32_t *);
316int qle_read_nvram(struct qle_softc *);
317
318struct qle_dmamem *qle_dmamem_alloc(struct qle_softc *, size_t);
319void qle_dmamem_free(struct qle_softc *, struct qle_dmamem *);
320
321int qle_alloc_ccbs(struct qle_softc *);
322void qle_free_ccbs(struct qle_softc *);
323void *qle_get_ccb(void *);
324void qle_put_ccb(void *, void *);
325
326void qle_dump_stuff(struct qle_softc *, void *, int);
327void qle_dump_iocb(struct qle_softc *, void *);
328void qle_dump_iocb_segs(struct qle_softc *, void *, int);
329
330static const struct pci_matchid qle_devices[] = {
331 { PCI_VENDOR_QLOGIC0x1077, PCI_PRODUCT_QLOGIC_ISP24220x2422 },
332 { PCI_VENDOR_QLOGIC0x1077, PCI_PRODUCT_QLOGIC_ISP24320x2432 },
333 { PCI_VENDOR_QLOGIC0x1077, PCI_PRODUCT_QLOGIC_ISP25120x2512 },
334 { PCI_VENDOR_QLOGIC0x1077, PCI_PRODUCT_QLOGIC_ISP25220x2522 },
335 { PCI_VENDOR_QLOGIC0x1077, PCI_PRODUCT_QLOGIC_ISP25320x2532 },
336};
337
338int
339qle_match(struct device *parent, void *match, void *aux)
340{
341 return (pci_matchbyid(aux, qle_devices, nitems(qle_devices)(sizeof((qle_devices)) / sizeof((qle_devices)[0]))));
342}
343
344void
345qle_attach(struct device *parent, struct device *self, void *aux)
346{
347 struct qle_softc *sc = (void *)self;
348 struct pci_attach_args *pa = aux;
349 pci_intr_handle_t ih;
350 const char *intrstr;
351 u_int32_t pcictl;
352 struct scsibus_attach_args saa;
353 struct qle_init_cb *icb;
354 bus_size_t mbox_base;
355 u_int32_t firmware_addr;
356#ifndef QLE_NOFIRMWARE
357 const u_int32_t *firmware = NULL((void *)0);
358#endif
359
360 pcireg_t bars[] = { QLE_PCI_MEM_BAR0x14, QLE_PCI_IO_BAR0x10 };
361 pcireg_t memtype;
362 int r, i, rv, loop_up;
363
364 sc->sc_pc = pa->pa_pc;
365 sc->sc_tag = pa->pa_tag;
366 sc->sc_ih = NULL((void *)0);
367 sc->sc_dmat = pa->pa_dmat;
368 sc->sc_ios = 0;
369
370 for (r = 0; r < nitems(bars)(sizeof((bars)) / sizeof((bars)[0])); r++) {
371 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, bars[r]);
372 if (pci_mapreg_map(pa, bars[r], memtype, 0,
373 &sc->sc_iot, &sc->sc_ioh, NULL((void *)0), &sc->sc_ios, 0) == 0)
374 break;
375
376 sc->sc_ios = 0;
377 }
378 if (sc->sc_ios == 0) {
379 printf(": unable to map registers\n");
380 return;
381 }
382
383 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
384 printf(": unable to map interrupt\n");
385 goto unmap;
386 }
387 intrstr = pci_intr_string(sc->sc_pc, ih);
388 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO0x6,
389 qle_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
390 if (sc->sc_ih == NULL((void *)0)) {
391 printf(": unable to establish interrupt");
392 if (intrstr != NULL((void *)0))
393 printf(" at %s", intrstr);
394 printf("\n");
395 goto deintr;
396 }
397
398 printf(": %s\n", intrstr);
399
400 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG0x04);
401 pcictl |= PCI_COMMAND_INVALIDATE_ENABLE0x00000010 |
402 PCI_COMMAND_PARITY_ENABLE0x00000040 | PCI_COMMAND_SERR_ENABLE0x00000100;
403 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG0x04, pcictl);
404
405 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG0x0c);
406 pcictl &= ~(PCI_LATTIMER_MASK0xff << PCI_LATTIMER_SHIFT8);
407 pcictl &= ~(PCI_CACHELINE_MASK0xff << PCI_CACHELINE_SHIFT0);
408 pcictl |= (0x80 << PCI_LATTIMER_SHIFT8);
409 pcictl |= (0x10 << PCI_CACHELINE_SHIFT0);
410 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG0x0c, pcictl);
411
412 pcictl = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ROM_REG0x30);
413 pcictl &= ~1;
414 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_ROM_REG0x30, pcictl);
415
416 switch (PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff)) {
417 case PCI_PRODUCT_QLOGIC_ISP24220x2422:
418 sc->sc_isp_type = QLE_ISP2422;
419 sc->sc_isp_gen = QLE_GEN_ISP24XX;
420 break;
421 case PCI_PRODUCT_QLOGIC_ISP24320x2432:
422 sc->sc_isp_type = QLE_ISP2432;
423 sc->sc_isp_gen = QLE_GEN_ISP24XX;
424 break;
425 case PCI_PRODUCT_QLOGIC_ISP25120x2512:
426 sc->sc_isp_type = QLE_ISP2512;
427 sc->sc_isp_gen = QLE_GEN_ISP25XX;
428 break;
429 case PCI_PRODUCT_QLOGIC_ISP25220x2522:
430 sc->sc_isp_type = QLE_ISP2522;
431 sc->sc_isp_gen = QLE_GEN_ISP25XX;
432 break;
433 case PCI_PRODUCT_QLOGIC_ISP25320x2532:
434 sc->sc_isp_type = QLE_ISP2532;
435 sc->sc_isp_gen = QLE_GEN_ISP25XX;
436 break;
437
438 default:
439 printf("unknown pci id %x", pa->pa_id);
440 goto deintr;
441 }
442
443 /* these are the same for 24xx and 25xx but may vary later */
444 mbox_base = QLE_MBOX_BASE_24XX0x080;
445 firmware_addr = QLE_2400_CODE_ORG0x00100000;
446
447 if (bus_space_subregion(sc->sc_iot, sc->sc_ioh, mbox_base,
448 sizeof(sc->sc_mbox), &sc->sc_mbox_ioh) != 0) {
449 printf("%s: unable to map mbox registers\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
450 goto deintr;
451 }
452
453 sc->sc_port = pa->pa_function;
454
455 TAILQ_INIT(&sc->sc_ports)do { (&sc->sc_ports)->tqh_first = ((void *)0); (&
sc->sc_ports)->tqh_last = &(&sc->sc_ports)->
tqh_first; } while (0)
;
456 TAILQ_INIT(&sc->sc_ports_new)do { (&sc->sc_ports_new)->tqh_first = ((void *)0); (
&sc->sc_ports_new)->tqh_last = &(&sc->sc_ports_new
)->tqh_first; } while (0)
;
457 TAILQ_INIT(&sc->sc_ports_gone)do { (&sc->sc_ports_gone)->tqh_first = ((void *)0);
(&sc->sc_ports_gone)->tqh_last = &(&sc->
sc_ports_gone)->tqh_first; } while (0)
;
458 TAILQ_INIT(&sc->sc_ports_found)do { (&sc->sc_ports_found)->tqh_first = ((void *)0)
; (&sc->sc_ports_found)->tqh_last = &(&sc->
sc_ports_found)->tqh_first; } while (0)
;
459
460 /* after reset, mbox regs 1 and 2 contain the string "ISP " */
461 if (qle_read_mbox(sc, 1) != 0x4953 ||
462 qle_read_mbox(sc, 2) != 0x5020) {
463 /* try releasing the risc processor */
464 qle_host_cmd(sc, QLE_HOST_CMD_RELEASE0x4);
465 }
466
467 qle_host_cmd(sc, QLE_HOST_CMD_PAUSE0x3);
468 if (qle_softreset(sc) != 0) {
469 printf("softreset failed\n");
470 goto deintr;
471 }
472
473 if (qle_read_nvram(sc) == 0)
474 sc->sc_nvram_valid = 1;
475
476#ifdef QLE_NOFIRMWARE
477 if (qle_verify_firmware(sc, firmware_addr)) {
478 printf("%s: no firmware loaded\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
479 goto deintr;
480 }
481#else
482 switch (sc->sc_isp_gen) {
483 case QLE_GEN_ISP24XX:
484 firmware = isp_2400_risc_code;
485 break;
486 case QLE_GEN_ISP25XX:
487 firmware = isp_2500_risc_code;
488 break;
489 default:
490 printf("%s: no firmware to load?\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
491 goto deintr;
492 }
493 if (qle_load_firmware_chunks(sc, firmware)) {
494 printf("%s: firmware load failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
495 goto deintr;
496 }
497#endif
498
499 /* execute firmware */
500 sc->sc_mbox[0] = QLE_MBOX_EXEC_FIRMWARE0x0002;
501 sc->sc_mbox[1] = firmware_addr >> 16;
502 sc->sc_mbox[2] = firmware_addr & 0xffff;
503#ifdef QLE_NOFIRMWARE
504 sc->sc_mbox[3] = 1;
505#else
506 sc->sc_mbox[3] = 0;
507#endif
508 sc->sc_mbox[4] = 0;
509 if (qle_mbox(sc, 0x001f)) {
510 printf("ISP couldn't exec firmware: %x\n", sc->sc_mbox[0]);
511 goto deintr;
512 }
513
514 delay(250000)(*delay_func)(250000); /* from isp(4) */
515
516 sc->sc_mbox[0] = QLE_MBOX_ABOUT_FIRMWARE0x0008;
517 if (qle_mbox(sc, 0x0001)) {
518 printf("ISP not talking after firmware exec: %x\n",
519 sc->sc_mbox[0]);
520 goto deintr;
521 }
522 printf("%s: firmware rev %d.%d.%d, attrs 0x%x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
523 sc->sc_mbox[1], sc->sc_mbox[2], sc->sc_mbox[3], sc->sc_mbox[6]);
524
525 sc->sc_maxcmds = 4096;
526
527 /* reserve queue slots for markers and fabric ops */
528 sc->sc_maxcmds -= 2;
529
530 if (qle_alloc_ccbs(sc)) {
531 /* error already printed */
532 goto deintr;
533 }
534 sc->sc_scratch = qle_dmamem_alloc(sc, QLE_SCRATCH_SIZE0x1000);
535 if (sc->sc_scratch == NULL((void *)0)) {
536 printf("%s: unable to allocate scratch\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
537 goto free_ccbs;
538 }
539
540 /* build init buffer thing */
541 icb = (struct qle_init_cb *)QLE_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
542 memset(icb, 0, sizeof(*icb))__builtin_memset((icb), (0), (sizeof(*icb)));
543 icb->icb_version = QLE_ICB_VERSION1;
544 if (sc->sc_nvram_valid) {
545 icb->icb_max_frame_len = sc->sc_nvram.frame_payload_size;
546 icb->icb_exec_throttle = sc->sc_nvram.execution_throttle;
547 icb->icb_hardaddr = sc->sc_nvram.hard_address;
548 icb->icb_portname = sc->sc_nvram.port_name;
549 icb->icb_nodename = sc->sc_nvram.node_name;
550 icb->icb_login_retry = sc->sc_nvram.login_retry;
551 icb->icb_login_timeout = sc->sc_nvram.login_timeout;
552 icb->icb_fwoptions1 = sc->sc_nvram.fwoptions1;
553 icb->icb_fwoptions2 = sc->sc_nvram.fwoptions2;
554 icb->icb_fwoptions3 = sc->sc_nvram.fwoptions3;
555 } else {
556 /* defaults copied from isp(4) */
557 htolem16(&icb->icb_max_frame_len, 1024)(*(__uint16_t *)(&icb->icb_max_frame_len) = ((__uint16_t
)(1024)))
;
558 htolem16(&icb->icb_exec_throttle, 16)(*(__uint16_t *)(&icb->icb_exec_throttle) = ((__uint16_t
)(16)))
;
559 icb->icb_portname = htobe64(QLE_DEFAULT_PORT_NAME)(__uint64_t)(__builtin_constant_p(0x400000007F000003ULL) ? (__uint64_t
)((((__uint64_t)(0x400000007F000003ULL) & 0xff) << 56
) | ((__uint64_t)(0x400000007F000003ULL) & 0xff00ULL) <<
40 | ((__uint64_t)(0x400000007F000003ULL) & 0xff0000ULL)
<< 24 | ((__uint64_t)(0x400000007F000003ULL) & 0xff000000ULL
) << 8 | ((__uint64_t)(0x400000007F000003ULL) & 0xff00000000ULL
) >> 8 | ((__uint64_t)(0x400000007F000003ULL) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(0x400000007F000003ULL) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(0x400000007F000003ULL) & 0xff00000000000000ULL
) >> 56) : __swap64md(0x400000007F000003ULL))
;
560 icb->icb_nodename = 0;
561 icb->icb_login_retry = 3;
562
563 htolem32(&icb->icb_fwoptions1, QLE_ICB_FW1_FAIRNESS |(*(__uint32_t *)(&icb->icb_fwoptions1) = ((__uint32_t)
(0x0002 | 0x0001 | 0x0004)))
564 QLE_ICB_FW1_HARD_ADDR | QLE_ICB_FW1_FULL_DUPLEX)(*(__uint32_t *)(&icb->icb_fwoptions1) = ((__uint32_t)
(0x0002 | 0x0001 | 0x0004)))
;
565 htolem32(&icb->icb_fwoptions2, QLE_ICB_FW2_LOOP_PTP)(*(__uint32_t *)(&icb->icb_fwoptions2) = ((__uint32_t)
(0x0020)))
;
566 htolem32(&icb->icb_fwoptions3, QLE_ICB_FW3_FCP_RSP_24_0 |(*(__uint32_t *)(&icb->icb_fwoptions3) = ((__uint32_t)
(0x0020 | 0x4000)))
567 QLE_ICB_FW3_AUTONEG)(*(__uint32_t *)(&icb->icb_fwoptions3) = ((__uint32_t)
(0x0020 | 0x4000)))
;
568 }
569
570 icb->icb_exchange_count = 0;
571
572 icb->icb_req_out = 0;
573 icb->icb_resp_in = 0;
574 icb->icb_pri_req_out = 0;
575 htolem16(&icb->icb_req_queue_len, sc->sc_maxcmds)(*(__uint16_t *)(&icb->icb_req_queue_len) = ((__uint16_t
)(sc->sc_maxcmds)))
;
576 htolem16(&icb->icb_resp_queue_len, sc->sc_maxcmds)(*(__uint16_t *)(&icb->icb_resp_queue_len) = ((__uint16_t
)(sc->sc_maxcmds)))
;
577 htolem16(&icb->icb_pri_req_queue_len, 8)(*(__uint16_t *)(&icb->icb_pri_req_queue_len) = ((__uint16_t
)(8)))
; /* apparently the minimum */
578 htolem32(&icb->icb_req_queue_addr_lo,(*(__uint32_t *)(&icb->icb_req_queue_addr_lo) = ((__uint32_t
)(((u_int64_t)(sc->sc_requests)->qdm_map->dm_segs[0]
.ds_addr))))
579 QLE_DMA_DVA(sc->sc_requests))(*(__uint32_t *)(&icb->icb_req_queue_addr_lo) = ((__uint32_t
)(((u_int64_t)(sc->sc_requests)->qdm_map->dm_segs[0]
.ds_addr))))
;
580 htolem32(&icb->icb_req_queue_addr_hi,(*(__uint32_t *)(&icb->icb_req_queue_addr_hi) = ((__uint32_t
)(((u_int64_t)(sc->sc_requests)->qdm_map->dm_segs[0]
.ds_addr) >> 32)))
581 QLE_DMA_DVA(sc->sc_requests) >> 32)(*(__uint32_t *)(&icb->icb_req_queue_addr_hi) = ((__uint32_t
)(((u_int64_t)(sc->sc_requests)->qdm_map->dm_segs[0]
.ds_addr) >> 32)))
;
582 htolem32(&icb->icb_resp_queue_addr_lo,(*(__uint32_t *)(&icb->icb_resp_queue_addr_lo) = ((__uint32_t
)(((u_int64_t)(sc->sc_responses)->qdm_map->dm_segs[0
].ds_addr))))
583 QLE_DMA_DVA(sc->sc_responses))(*(__uint32_t *)(&icb->icb_resp_queue_addr_lo) = ((__uint32_t
)(((u_int64_t)(sc->sc_responses)->qdm_map->dm_segs[0
].ds_addr))))
;
584 htolem32(&icb->icb_resp_queue_addr_hi,(*(__uint32_t *)(&icb->icb_resp_queue_addr_hi) = ((__uint32_t
)(((u_int64_t)(sc->sc_responses)->qdm_map->dm_segs[0
].ds_addr) >> 32)))
585 QLE_DMA_DVA(sc->sc_responses) >> 32)(*(__uint32_t *)(&icb->icb_resp_queue_addr_hi) = ((__uint32_t
)(((u_int64_t)(sc->sc_responses)->qdm_map->dm_segs[0
].ds_addr) >> 32)))
;
586 htolem32(&icb->icb_pri_req_queue_addr_lo,(*(__uint32_t *)(&icb->icb_pri_req_queue_addr_lo) = ((
__uint32_t)(((u_int64_t)(sc->sc_pri_requests)->qdm_map->
dm_segs[0].ds_addr))))
587 QLE_DMA_DVA(sc->sc_pri_requests))(*(__uint32_t *)(&icb->icb_pri_req_queue_addr_lo) = ((
__uint32_t)(((u_int64_t)(sc->sc_pri_requests)->qdm_map->
dm_segs[0].ds_addr))))
;
588 htolem32(&icb->icb_pri_req_queue_addr_hi,(*(__uint32_t *)(&icb->icb_pri_req_queue_addr_hi) = ((
__uint32_t)(((u_int64_t)(sc->sc_pri_requests)->qdm_map->
dm_segs[0].ds_addr) >> 32)))
589 QLE_DMA_DVA(sc->sc_pri_requests) >> 32)(*(__uint32_t *)(&icb->icb_pri_req_queue_addr_hi) = ((
__uint32_t)(((u_int64_t)(sc->sc_pri_requests)->qdm_map->
dm_segs[0].ds_addr) >> 32)))
;
590
591 htolem16(&icb->icb_link_down_nos, 200)(*(__uint16_t *)(&icb->icb_link_down_nos) = ((__uint16_t
)(200)))
;
592 icb->icb_int_delay = 0;
593 icb->icb_login_timeout = 0;
594
595 sc->sc_mbox[0] = QLE_MBOX_INIT_FIRMWARE0x0060;
596 sc->sc_mbox[4] = 0;
597 sc->sc_mbox[5] = 0;
598 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch);
599 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(*icb)), (0x04))
600 sizeof(*icb), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(*icb)), (0x04))
;
601 rv = qle_mbox(sc, 0x00fd);
602 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(*icb)), (0x08))
603 sizeof(*icb), BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(*icb)), (0x08))
;
604
605 if (rv != 0) {
606 printf("%s: ISP firmware init failed: %x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
607 sc->sc_mbox[0]);
608 goto free_scratch;
609 }
610
611 /* enable some more notifications */
612 sc->sc_mbox[0] = QLE_MBOX_SET_FIRMWARE_OPTIONS0x0038;
613 sc->sc_mbox[1] = QLE_FW_OPTION1_ASYNC_LIP_F80x0001 |
614 QLE_FW_OPTION1_ASYNC_LIP_RESET0x0002 |
615 QLE_FW_OPTION1_ASYNC_LIP_ERROR0x0080 |
616 QLE_FW_OPTION1_ASYNC_LOGIN_RJT0x0800;
617 sc->sc_mbox[2] = 0;
618 sc->sc_mbox[3] = 0;
619 if (qle_mbox(sc, 0x000f)) {
620 printf("%s: setting firmware options failed: %x\n",
621 DEVNAME(sc)((sc)->sc_dev.dv_xname), sc->sc_mbox[0]);
622 goto free_scratch;
623 }
624
625 sc->sc_update_taskq = taskq_create(DEVNAME(sc)((sc)->sc_dev.dv_xname), 1, IPL_BIO0x6, 0);
626 task_set(&sc->sc_update_task, qle_do_update, sc);
627 timeout_set(&sc->sc_update_timeout, qle_deferred_update, sc);
628
629 /* wait a bit for link to come up so we can scan and attach devices */
630 for (i = 0; i < QLE_WAIT_FOR_LOOP10 * 1000; i++) {
631 u_int16_t isr, info;
632
633 if (sc->sc_loop_up) {
634 if (++loop_up == QLE_LOOP_SETTLE200)
635 break;
636 } else
637 loop_up = 0;
638
639 delay(1000)(*delay_func)(1000);
640
641 if (qle_read_isr(sc, &isr, &info) == 0)
642 continue;
643
644 qle_handle_intr(sc, isr, info);
645
646 }
647
648 if (sc->sc_loop_up) {
649 qle_do_update(sc);
650 } else {
651 DPRINTF(QLE_D_PORT, "%s: loop still down, giving up\n",
652 DEVNAME(sc));
653 }
654
655 saa.saa_adapter = &qle_switch;
656 saa.saa_adapter_softc = sc;
657 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff;
658 saa.saa_adapter_buswidth = QLE_MAX_TARGETS2048;
659 saa.saa_luns = 8;
660 saa.saa_openings = sc->sc_maxcmds;
661 saa.saa_pool = &sc->sc_iopool;
662 if (sc->sc_nvram_valid) {
663 saa.saa_wwpn = betoh64(sc->sc_nvram.port_name)(__uint64_t)(__builtin_constant_p(sc->sc_nvram.port_name) ?
(__uint64_t)((((__uint64_t)(sc->sc_nvram.port_name) &
0xff) << 56) | ((__uint64_t)(sc->sc_nvram.port_name
) & 0xff00ULL) << 40 | ((__uint64_t)(sc->sc_nvram
.port_name) & 0xff0000ULL) << 24 | ((__uint64_t)(sc
->sc_nvram.port_name) & 0xff000000ULL) << 8 | ((
__uint64_t)(sc->sc_nvram.port_name) & 0xff00000000ULL)
>> 8 | ((__uint64_t)(sc->sc_nvram.port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(sc->sc_nvram.port_name) &
0xff000000000000ULL) >> 40 | ((__uint64_t)(sc->sc_nvram
.port_name) & 0xff00000000000000ULL) >> 56) : __swap64md
(sc->sc_nvram.port_name))
;
664 saa.saa_wwnn = betoh64(sc->sc_nvram.node_name)(__uint64_t)(__builtin_constant_p(sc->sc_nvram.node_name) ?
(__uint64_t)((((__uint64_t)(sc->sc_nvram.node_name) &
0xff) << 56) | ((__uint64_t)(sc->sc_nvram.node_name
) & 0xff00ULL) << 40 | ((__uint64_t)(sc->sc_nvram
.node_name) & 0xff0000ULL) << 24 | ((__uint64_t)(sc
->sc_nvram.node_name) & 0xff000000ULL) << 8 | ((
__uint64_t)(sc->sc_nvram.node_name) & 0xff00000000ULL)
>> 8 | ((__uint64_t)(sc->sc_nvram.node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(sc->sc_nvram.node_name) &
0xff000000000000ULL) >> 40 | ((__uint64_t)(sc->sc_nvram
.node_name) & 0xff00000000000000ULL) >> 56) : __swap64md
(sc->sc_nvram.node_name))
;
665 } else {
666 saa.saa_wwpn = QLE_DEFAULT_PORT_NAME0x400000007F000003ULL;
667 saa.saa_wwnn = 0;
668 }
669 if (saa.saa_wwnn == 0) {
670 /*
671 * mask out the port number from the port name to get
672 * the node name.
673 */
674 saa.saa_wwnn = saa.saa_wwpn;
675 saa.saa_wwnn &= ~(0xfULL << 56);
676 }
677 saa.saa_quirks = saa.saa_flags = 0;
678
679 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
680 &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
681
682 return;
683
684free_scratch:
685 qle_dmamem_free(sc, sc->sc_scratch);
686free_ccbs:
687 qle_free_ccbs(sc);
688deintr:
689 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
690 sc->sc_ih = NULL((void *)0);
691unmap:
692 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
693 sc->sc_ios = 0;
694}
695
696int
697qle_detach(struct device *self, int flags)
698{
699 struct qle_softc *sc = (struct qle_softc *)self;
700
701 if (sc->sc_ih == NULL((void *)0)) {
702 /* we didnt attach properly, so nothing to detach */
703 return (0);
704 }
705
706 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
707 sc->sc_ih = NULL((void *)0);
708
709 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
710 sc->sc_ios = 0;
711
712 return (0);
713}
714
715int
716qle_classify_port(struct qle_softc *sc, u_int32_t location,
717 u_int64_t port_name, u_int64_t node_name, struct qle_fc_port **prev)
718{
719 struct qle_fc_port *port, *locmatch, *wwnmatch;
720 locmatch = NULL((void *)0);
721 wwnmatch = NULL((void *)0);
722
723 /* make sure we don't try to add a port or location twice */
724 TAILQ_FOREACH(port, &sc->sc_ports_new, update)for((port) = ((&sc->sc_ports_new)->tqh_first); (port
) != ((void *)0); (port) = ((port)->update.tqe_next))
{
725 if ((port->port_name == port_name &&
726 port->node_name == node_name) ||
727 port->location == location) {
728 *prev = port;
729 return (QLE_PORT_DISP_DUP);
730 }
731 }
732
733 /* if we're attaching, everything is new */
734 if (sc->sc_scsibus == NULL((void *)0)) {
735 *prev = NULL((void *)0);
736 return (QLE_PORT_DISP_NEW);
737 }
738
739 TAILQ_FOREACH(port, &sc->sc_ports, ports)for((port) = ((&sc->sc_ports)->tqh_first); (port) !=
((void *)0); (port) = ((port)->ports.tqe_next))
{
740 if (port->location == location)
741 locmatch = port;
742
743 if (port->port_name == port_name &&
744 port->node_name == node_name)
745 wwnmatch = port;
746 }
747
748 if (locmatch == NULL((void *)0) && wwnmatch == NULL((void *)0)) {
749 *prev = NULL((void *)0);
750 return (QLE_PORT_DISP_NEW);
751 } else if (locmatch == wwnmatch) {
752 *prev = locmatch;
753 return (QLE_PORT_DISP_SAME);
754 } else if (wwnmatch != NULL((void *)0)) {
755 *prev = wwnmatch;
756 return (QLE_PORT_DISP_MOVED);
757 } else {
758 *prev = locmatch;
759 return (QLE_PORT_DISP_CHANGED);
760 }
761}
762
763int
764qle_get_loop_id(struct qle_softc *sc, int start)
765{
766 int i, last;
767
768 i = QLE_MIN_HANDLE0x81;
769 last = QLE_MAX_HANDLE0x7EF;
770 if (i < start)
771 i = start;
772
773 for (; i <= last; i++) {
774 if (sc->sc_targets[i] == NULL((void *)0))
775 return (i);
776 }
777
778 return (-1);
779}
780
781int
782qle_get_port_db(struct qle_softc *sc, u_int16_t loopid, struct qle_dmamem *mem)
783{
784 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_DB0x0064;
785 sc->sc_mbox[1] = loopid;
786 qle_mbox_putaddr(sc->sc_mbox, mem);
787 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (sizeof(struct qle_get_port_db)), (0x01
))
788 sizeof(struct qle_get_port_db), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (sizeof(struct qle_get_port_db)), (0x01
))
;
789 if (qle_mbox(sc, 0x00cf)) {
790 DPRINTF(QLE_D_PORT, "%s: get port db for %d failed: %x\n",
791 DEVNAME(sc), loopid, sc->sc_mbox[0]);
792 return (1);
793 }
794
795 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (sizeof(struct qle_get_port_db)), (0x02
))
796 sizeof(struct qle_get_port_db), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (sizeof(struct qle_get_port_db)), (0x02
))
;
797 return (0);
798}
799
800int
801qle_get_port_name_list(struct qle_softc *sc, u_int32_t match)
802{
803 struct qle_port_name_list *l;
804 struct qle_fc_port *port;
805 int i;
806
807 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_NAME_LIST0x0075;
808 sc->sc_mbox[1] = 0;
809 sc->sc_mbox[8] = QLE_DMA_LEN(sc->sc_scratch)((sc->sc_scratch)->qdm_size);
810 sc->sc_mbox[9] = 0;
811 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch);
812 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (((sc->sc_scratch)->qdm_size
)), (0x01))
813 QLE_DMA_LEN(sc->sc_scratch), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (((sc->sc_scratch)->qdm_size
)), (0x01))
;
814 if (qle_mbox(sc, 0x03cf)) {
815 DPRINTF(QLE_D_PORT, "%s: get port name list failed: %x\n",
816 DEVNAME(sc), sc->sc_mbox[0]);
817 return (1);
818 }
819 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sc->sc_mbox[1]), (0x02))
820 sc->sc_mbox[1], BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sc->sc_mbox[1]), (0x02))
;
821
822 i = 0;
823 l = QLE_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
824 mtx_enter(&sc->sc_port_mtx);
825 while (i * sizeof(*l) < sc->sc_mbox[1]) {
826 u_int16_t loopid;
827 u_int32_t loc;
828
829 loopid = lemtoh16(&l[i].loopid)((__uint16_t)(*(__uint16_t *)(&l[i].loopid))) & 0xfff;
830 /* skip special ports */
831 switch (loopid) {
832 case QLE_F_PORT_HANDLE0x7FE:
833 case QLE_SNS_HANDLE0x7FC:
834 case QLE_FABRIC_CTRL_HANDLE0x7FD:
835 case QLE_IP_BCAST_HANDLE0xFFF:
836 loc = 0;
837 break;
838 default:
839 if (loopid <= sc->sc_loop_max_id) {
840 loc = QLE_LOCATION_LOOP_ID(loopid)(loopid | (1 << 24));
841 } else {
842 /*
843 * we don't have the port id here, so just
844 * indicate it's a fabric port.
845 */
846 loc = QLE_LOCATION_FABRIC(2 << 24);
847 }
848 break;
849 }
850
851 if (match & loc) {
852 port = malloc(sizeof(*port), M_DEVBUF2, M_ZERO0x0008 |
853 M_NOWAIT0x0002);
854 if (port == NULL((void *)0)) {
855 printf("%s: failed to allocate port struct\n",
856 DEVNAME(sc)((sc)->sc_dev.dv_xname));
857 break;
858 }
859 port->location = loc;
860 port->loopid = loopid;
861 port->port_name = letoh64(l[i].port_name)((__uint64_t)(l[i].port_name));
862 DPRINTF(QLE_D_PORT, "%s: loop id %d, port name %llx\n",
863 DEVNAME(sc), port->loopid, port->port_name);
864 TAILQ_INSERT_TAIL(&sc->sc_ports_found, port, update)do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_found)->tqh_last; *(&
sc->sc_ports_found)->tqh_last = (port); (&sc->sc_ports_found
)->tqh_last = &(port)->update.tqe_next; } while (0)
;
865 }
866 i++;
867 }
868 mtx_leave(&sc->sc_port_mtx);
869
870 return (0);
871}
872
873int
874qle_add_loop_port(struct qle_softc *sc, struct qle_fc_port *port)
875{
876 struct qle_get_port_db *pdb;
877 struct qle_fc_port *pport;
878 int disp;
879
880 if (qle_get_port_db(sc, port->loopid, sc->sc_scratch) != 0) {
881 return (1);
882 }
883 pdb = QLE_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
884
885 if (lemtoh16(&pdb->prli_svc_word3)((__uint16_t)(*(__uint16_t *)(&pdb->prli_svc_word3))) & QLE_SVC3_TARGET_ROLE0x0010)
886 port->flags |= QLE_PORT_FLAG_IS_TARGET1;
887
888 port->port_name = betoh64(pdb->port_name)(__uint64_t)(__builtin_constant_p(pdb->port_name) ? (__uint64_t
)((((__uint64_t)(pdb->port_name) & 0xff) << 56) |
((__uint64_t)(pdb->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->port_name))
;
889 port->node_name = betoh64(pdb->node_name)(__uint64_t)(__builtin_constant_p(pdb->node_name) ? (__uint64_t
)((((__uint64_t)(pdb->node_name) & 0xff) << 56) |
((__uint64_t)(pdb->node_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->node_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->node_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->node_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->node_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->node_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->node_name))
;
890 port->portid = (pdb->port_id[0] << 16) | (pdb->port_id[1] << 8) |
891 pdb->port_id[2];
892
893 mtx_enter(&sc->sc_port_mtx);
894 disp = qle_classify_port(sc, port->location, port->port_name,
895 port->node_name, &pport);
896 switch (disp) {
897 case QLE_PORT_DISP_CHANGED:
898 case QLE_PORT_DISP_MOVED:
899 case QLE_PORT_DISP_NEW:
900 TAILQ_INSERT_TAIL(&sc->sc_ports_new, port, update)do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_new)->tqh_last; *(&sc
->sc_ports_new)->tqh_last = (port); (&sc->sc_ports_new
)->tqh_last = &(port)->update.tqe_next; } while (0)
;
901 sc->sc_targets[port->loopid] = port;
902 break;
903 case QLE_PORT_DISP_DUP:
904 free(port, M_DEVBUF2, sizeof *port);
905 break;
906 case QLE_PORT_DISP_SAME:
907 TAILQ_REMOVE(&sc->sc_ports_gone, pport, update)do { if (((pport)->update.tqe_next) != ((void *)0)) (pport
)->update.tqe_next->update.tqe_prev = (pport)->update
.tqe_prev; else (&sc->sc_ports_gone)->tqh_last = (pport
)->update.tqe_prev; *(pport)->update.tqe_prev = (pport)
->update.tqe_next; ((pport)->update.tqe_prev) = ((void *
)-1); ((pport)->update.tqe_next) = ((void *)-1); } while (
0)
;
908 free(port, M_DEVBUF2, sizeof *port);
909 break;
910 }
911 mtx_leave(&sc->sc_port_mtx);
912
913 switch (disp) {
914 case QLE_PORT_DISP_CHANGED:
915 case QLE_PORT_DISP_MOVED:
916 case QLE_PORT_DISP_NEW:
917 DPRINTF(QLE_D_PORT, "%s: %s %d; name %llx\n",
918 DEVNAME(sc), ISSET(port->flags, QLE_PORT_FLAG_IS_TARGET) ?
919 "target" : "non-target", port->loopid,
920 betoh64(pdb->port_name));
921 break;
922 default:
923 break;
924 }
925 return (0);
926}
927
928int
929qle_add_fabric_port(struct qle_softc *sc, struct qle_fc_port *port)
930{
931 struct qle_get_port_db *pdb;
932
933 if (qle_get_port_db(sc, port->loopid, sc->sc_scratch) != 0) {
934 free(port, M_DEVBUF2, sizeof *port);
935 return (1);
936 }
937 pdb = QLE_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
938
939 if (lemtoh16(&pdb->prli_svc_word3)((__uint16_t)(*(__uint16_t *)(&pdb->prli_svc_word3))) & QLE_SVC3_TARGET_ROLE0x0010)
940 port->flags |= QLE_PORT_FLAG_IS_TARGET1;
941
942 /*
943 * if we only know about this port because qle_get_port_name_list
944 * returned it, we don't have its port id or node name, so fill
945 * those in and update its location.
946 */
947 if (port->location == QLE_LOCATION_FABRIC(2 << 24)) {
948 port->node_name = betoh64(pdb->node_name)(__uint64_t)(__builtin_constant_p(pdb->node_name) ? (__uint64_t
)((((__uint64_t)(pdb->node_name) & 0xff) << 56) |
((__uint64_t)(pdb->node_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->node_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->node_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->node_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->node_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->node_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->node_name))
;
949 port->port_name = betoh64(pdb->port_name)(__uint64_t)(__builtin_constant_p(pdb->port_name) ? (__uint64_t
)((((__uint64_t)(pdb->port_name) & 0xff) << 56) |
((__uint64_t)(pdb->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->port_name))
;
950 port->portid = (pdb->port_id[0] << 16) |
951 (pdb->port_id[1] << 8) | pdb->port_id[2];
952 port->location = QLE_LOCATION_PORT_ID(port->portid)(port->portid | (2 << 24));
953 }
954
955 mtx_enter(&sc->sc_port_mtx);
956 TAILQ_INSERT_TAIL(&sc->sc_ports_new, port, update)do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_new)->tqh_last; *(&sc
->sc_ports_new)->tqh_last = (port); (&sc->sc_ports_new
)->tqh_last = &(port)->update.tqe_next; } while (0)
;
957 sc->sc_targets[port->loopid] = port;
958 mtx_leave(&sc->sc_port_mtx);
959
960 DPRINTF(QLE_D_PORT, "%s: %s %d; name %llx\n",
961 DEVNAME(sc), ISSET(port->flags, QLE_PORT_FLAG_IS_TARGET) ?
962 "target" : "non-target", port->loopid, port->port_name);
963 return (0);
964}
965
966int
967qle_add_logged_in_port(struct qle_softc *sc, u_int16_t loopid,
968 u_int32_t portid)
969{
970 struct qle_fc_port *port;
971 struct qle_get_port_db *pdb;
972 u_int64_t node_name, port_name;
973 int flags, ret;
974
975 ret = qle_get_port_db(sc, loopid, sc->sc_scratch);
976 mtx_enter(&sc->sc_port_mtx);
977 if (ret != 0) {
978 /* put in a fake port to prevent use of this loop id */
979 printf("%s: loop id %d used, but can't see what's using it\n",
980 DEVNAME(sc)((sc)->sc_dev.dv_xname), loopid);
981 node_name = 0;
982 port_name = 0;
983 flags = 0;
984 } else {
985 pdb = QLE_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
986 node_name = betoh64(pdb->node_name)(__uint64_t)(__builtin_constant_p(pdb->node_name) ? (__uint64_t
)((((__uint64_t)(pdb->node_name) & 0xff) << 56) |
((__uint64_t)(pdb->node_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->node_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->node_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->node_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->node_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->node_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->node_name))
;
987 port_name = betoh64(pdb->port_name)(__uint64_t)(__builtin_constant_p(pdb->port_name) ? (__uint64_t
)((((__uint64_t)(pdb->port_name) & 0xff) << 56) |
((__uint64_t)(pdb->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->port_name))
;
988 flags = 0;
989 if (lemtoh16(&pdb->prli_svc_word3)((__uint16_t)(*(__uint16_t *)(&pdb->prli_svc_word3))) & QLE_SVC3_TARGET_ROLE0x0010)
990 flags |= QLE_PORT_FLAG_IS_TARGET1;
991
992 /* see if we've already found this port */
993 TAILQ_FOREACH(port, &sc->sc_ports_found, update)for((port) = ((&sc->sc_ports_found)->tqh_first); (port
) != ((void *)0); (port) = ((port)->update.tqe_next))
{
994 if ((port->node_name == node_name) &&
995 (port->port_name == port_name) &&
996 (port->portid == portid)) {
997 mtx_leave(&sc->sc_port_mtx);
998 DPRINTF(QLE_D_PORT, "%s: already found port "
999 "%06x\n", DEVNAME(sc), portid);
1000 return (0);
1001 }
1002 }
1003 }
1004
1005 port = malloc(sizeof(*port), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
1006 if (port == NULL((void *)0)) {
1007 mtx_leave(&sc->sc_port_mtx);
1008 printf("%s: failed to allocate a port structure\n",
1009 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1010 return (1);
1011 }
1012 port->location = QLE_LOCATION_PORT_ID(portid)(portid | (2 << 24));
1013 port->port_name = port_name;
1014 port->node_name = node_name;
1015 port->loopid = loopid;
1016 port->portid = portid;
1017 port->flags = flags;
1018
1019 TAILQ_INSERT_TAIL(&sc->sc_ports, port, ports)do { (port)->ports.tqe_next = ((void *)0); (port)->ports
.tqe_prev = (&sc->sc_ports)->tqh_last; *(&sc->
sc_ports)->tqh_last = (port); (&sc->sc_ports)->tqh_last
= &(port)->ports.tqe_next; } while (0)
;
1020 sc->sc_targets[port->loopid] = port;
1021 mtx_leave(&sc->sc_port_mtx);
1022
1023 DPRINTF(QLE_D_PORT, "%s: added logged in port %06x at %d\n",
1024 DEVNAME(sc), portid, loopid);
1025 return (0);
1026}
1027
1028struct qle_ccb *
1029qle_handle_resp(struct qle_softc *sc, u_int32_t id)
1030{
1031 struct qle_ccb *ccb;
1032 struct qle_iocb_status *status;
1033 struct qle_iocb_req6 *req;
1034 struct scsi_xfer *xs;
1035 u_int32_t handle;
1036 u_int16_t completion;
1037 u_int8_t *entry;
1038 u_int8_t *data;
1039
1040 ccb = NULL((void *)0);
1041 entry = QLE_DMA_KVA(sc->sc_responses)((void *)(sc->sc_responses)->qdm_kva) + (id * QLE_QUEUE_ENTRY_SIZE64);
1042
1043 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
1044 QLE_DMA_MAP(sc->sc_responses), id * QLE_QUEUE_ENTRY_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
1045 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_responses)->qdm_map)), (id * 64), (64), (0x02))
;
1046
1047 qle_dump_iocb(sc, entry);
1048 switch(entry[0]) {
1049 case QLE_IOCB_STATUS0x03:
1050 status = (struct qle_iocb_status *)entry;
1051 handle = status->handle;
1052 if (handle > sc->sc_maxcmds) {
1053 panic("bad completed command handle: %d (> %d)",
1054 handle, sc->sc_maxcmds);
1055 }
1056
1057 ccb = &sc->sc_ccbs[handle];
1058 xs = ccb->ccb_xs;
1059 if (xs == NULL((void *)0)) {
1060 DPRINTF(QLE_D_IO, "%s: got status for inactive ccb %d\n",
1061 DEVNAME(sc), handle);
1062 ccb = NULL((void *)0);
1063 break;
1064 }
1065 if (xs->io != ccb) {
1066 panic("completed command handle doesn't match xs "
1067 "(handle %d, ccb %p, xs->io %p)", handle, ccb,
1068 xs->io);
1069 }
1070
1071 if (xs->datalen > 0) {
1072 if (ccb->ccb_dmamap->dm_nsegs >
1073 QLE_IOCB_SEGS_PER_CMD2) {
1074 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_segs) * ccb->ccb_dmamap->dm_nsegs + 1), (
0x08))
1075 QLE_DMA_MAP(sc->sc_segments),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_segs) * ccb->ccb_dmamap->dm_nsegs + 1), (
0x08))
1076 ccb->ccb_seg_offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_segs) * ccb->ccb_dmamap->dm_nsegs + 1), (
0x08))
1077 sizeof(*ccb->ccb_segs) *(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_segs) * ccb->ccb_dmamap->dm_nsegs + 1), (
0x08))
1078 ccb->ccb_dmamap->dm_nsegs + 1,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_segs) * ccb->ccb_dmamap->dm_nsegs + 1), (
0x08))
1079 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (sizeof
(*ccb->ccb_segs) * ccb->ccb_dmamap->dm_nsegs + 1), (
0x08))
;
1080 }
1081
1082 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
1083 ccb->ccb_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
1084 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
1085 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((xs->
flags & 0x00800) ? 0x02 : 0x08))
;
1086 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
1087 }
1088
1089 xs->status = lemtoh16(&status->scsi_status)((__uint16_t)(*(__uint16_t *)(&status->scsi_status))) & 0xff;
1090 xs->resid = 0;
1091 completion = lemtoh16(&status->completion)((__uint16_t)(*(__uint16_t *)(&status->completion)));
1092 switch (completion) {
1093 case QLE_IOCB_STATUS_DATA_UNDERRUN0x0015:
1094 xs->resid = lemtoh32(&status->resid)((__uint32_t)(*(__uint32_t *)(&status->resid)));
1095 /* FALLTHROUGH */
1096 case QLE_IOCB_STATUS_DATA_OVERRUN0x0007:
1097 case QLE_IOCB_STATUS_COMPLETE0x0000:
1098 if (lemtoh16(&status->scsi_status)((__uint16_t)(*(__uint16_t *)(&status->scsi_status))) &
1099 QLE_SCSI_STATUS_SENSE_VALID0x0200) {
1100 u_int32_t *pp;
1101 int sr;
1102 data = status->data +
1103 lemtoh32(&status->fcp_rsp_len)((__uint32_t)(*(__uint32_t *)(&status->fcp_rsp_len)));
1104 sr = MIN(lemtoh32(&status->fcp_sense_len),(((((__uint32_t)(*(__uint32_t *)(&status->fcp_sense_len
))))<(sizeof(xs->sense)))?(((__uint32_t)(*(__uint32_t *
)(&status->fcp_sense_len)))):(sizeof(xs->sense)))
1105 sizeof(xs->sense))(((((__uint32_t)(*(__uint32_t *)(&status->fcp_sense_len
))))<(sizeof(xs->sense)))?(((__uint32_t)(*(__uint32_t *
)(&status->fcp_sense_len)))):(sizeof(xs->sense)))
;
1106 memcpy(&xs->sense, data, sr)__builtin_memcpy((&xs->sense), (data), (sr));
1107 xs->error = XS_SENSE1;
1108 pp = (u_int32_t *)&xs->sense;
1109 for (sr = 0; sr < sizeof(xs->sense)/4; sr++) {
1110 pp[sr] = swap32(pp[sr])(__uint32_t)(__builtin_constant_p(pp[sr]) ? (__uint32_t)(((__uint32_t
)(pp[sr]) & 0xff) << 24 | ((__uint32_t)(pp[sr]) &
0xff00) << 8 | ((__uint32_t)(pp[sr]) & 0xff0000) >>
8 | ((__uint32_t)(pp[sr]) & 0xff000000) >> 24) : __swap32md
(pp[sr]))
;
1111 }
1112 } else {
1113 xs->error = XS_NOERROR0;
1114 }
1115 break;
1116
1117 case QLE_IOCB_STATUS_DMA_ERROR0x0002:
1118 DPRINTF(QLE_D_IO, "%s: dma error\n", DEVNAME(sc));
1119 /* set resid apparently? */
1120 break;
1121
1122 case QLE_IOCB_STATUS_RESET0x0004:
1123 DPRINTF(QLE_D_IO, "%s: reset destroyed command\n",
1124 DEVNAME(sc));
1125 sc->sc_marker_required = 1;
1126 xs->error = XS_RESET8;
1127 break;
1128
1129 case QLE_IOCB_STATUS_ABORTED0x0005:
1130 DPRINTF(QLE_D_IO, "%s: aborted\n", DEVNAME(sc));
1131 sc->sc_marker_required = 1;
1132 xs->error = XS_DRIVER_STUFFUP2;
1133 break;
1134
1135 case QLE_IOCB_STATUS_TIMEOUT0x0006:
1136 DPRINTF(QLE_D_IO, "%s: command timed out\n",
1137 DEVNAME(sc));
1138 xs->error = XS_TIMEOUT4;
1139 break;
1140
1141 case QLE_IOCB_STATUS_QUEUE_FULL0x001C:
1142 DPRINTF(QLE_D_IO, "%s: queue full\n", DEVNAME(sc));
1143 xs->error = XS_BUSY5;
1144 break;
1145
1146 case QLE_IOCB_STATUS_PORT_UNAVAIL0x0028:
1147 case QLE_IOCB_STATUS_PORT_LOGGED_OUT0x0029:
1148 case QLE_IOCB_STATUS_PORT_CHANGED0x002A:
1149 DPRINTF(QLE_D_IO, "%s: dev gone\n", DEVNAME(sc));
1150 xs->error = XS_SELTIMEOUT3;
1151 /* mark port as needing relogin? */
1152 break;
1153
1154 default:
1155 DPRINTF(QLE_D_IO, "%s: unexpected completion status "
1156 "%x\n", DEVNAME(sc), status->completion);
1157 xs->error = XS_DRIVER_STUFFUP2;
1158 break;
1159 }
1160 break;
1161
1162 case QLE_IOCB_STATUS_CONT0x10:
1163 DPRINTF(QLE_D_IO, "%s: ignoring status continuation iocb\n",
1164 DEVNAME(sc));
1165 break;
1166
1167 case QLE_IOCB_PLOGX0x52:
1168 case QLE_IOCB_CT_PASSTHROUGH0x29:
1169 if (sc->sc_fabric_pending) {
1170 qle_dump_iocb(sc, entry);
1171 memcpy(sc->sc_fabric_response, entry,__builtin_memcpy((sc->sc_fabric_response), (entry), (64))
1172 QLE_QUEUE_ENTRY_SIZE)__builtin_memcpy((sc->sc_fabric_response), (entry), (64));
1173 sc->sc_fabric_pending = 2;
1174 wakeup(sc->sc_scratch);
1175 } else {
1176 DPRINTF(QLE_D_IO, "%s: unexpected fabric response %x\n",
1177 DEVNAME(sc), entry[0]);
1178 }
1179 break;
1180
1181 case QLE_IOCB_MARKER0x04:
1182 break;
1183
1184 case QLE_IOCB_CMD_TYPE_60x48:
1185 case QLE_IOCB_CMD_TYPE_70x18:
1186 DPRINTF(QLE_D_IO, "%s: request bounced back\n", DEVNAME(sc));
1187 req = (struct qle_iocb_req6 *)entry;
1188 handle = req->req_handle;
1189 if (handle > sc->sc_maxcmds) {
1190 panic("bad bounced command handle: %d (> %d)",
1191 handle, sc->sc_maxcmds);
1192 }
1193
1194 ccb = &sc->sc_ccbs[handle];
1195 xs = ccb->ccb_xs;
1196 xs->error = XS_DRIVER_STUFFUP2;
1197 break;
1198 default:
1199 DPRINTF(QLE_D_IO, "%s: unexpected response entry type %x\n",
1200 DEVNAME(sc), entry[0]);
1201 break;
1202 }
1203
1204 return (ccb);
1205}
1206
1207void
1208qle_handle_intr(struct qle_softc *sc, u_int16_t isr, u_int16_t info)
1209{
1210 int i;
1211 u_int32_t rspin;
1212 struct qle_ccb *ccb;
1213
1214 switch (isr) {
1215 case QLE_INT_TYPE_ASYNC2:
1216 qle_async(sc, info);
1217 break;
1218
1219 case QLE_INT_TYPE_IO3:
1220 rspin = qle_read(sc, QLE_RESP_IN0x024);
1221 if (rspin == sc->sc_last_resp_id)
1222 break;
1223
1224 do {
1225 ccb = qle_handle_resp(sc, sc->sc_last_resp_id);
1226 if (ccb)
1227 scsi_done(ccb->ccb_xs);
1228
1229 sc->sc_last_resp_id++;
1230 sc->sc_last_resp_id %= sc->sc_maxcmds;
1231 } while (sc->sc_last_resp_id != rspin);
1232
1233 qle_write(sc, QLE_RESP_OUT0x028, sc->sc_last_resp_id);
1234 break;
1235
1236 case QLE_INT_TYPE_MBOX1:
1237 mtx_enter(&sc->sc_mbox_mtx);
1238 if (sc->sc_mbox_pending) {
1239 for (i = 0; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
1240 sc->sc_mbox[i] = qle_read_mbox(sc, i);
1241 }
1242 sc->sc_mbox_pending = 2;
1243 wakeup(sc->sc_mbox);
1244 mtx_leave(&sc->sc_mbox_mtx);
1245 } else {
1246 mtx_leave(&sc->sc_mbox_mtx);
1247 DPRINTF(QLE_D_INTR, "%s: unexpected mbox interrupt: "
1248 "%x\n", DEVNAME(sc), info);
1249 }
1250 break;
1251
1252 default:
1253 break;
1254 }
1255
1256 qle_clear_isr(sc, isr);
1257}
1258
1259int
1260qle_intr(void *xsc)
1261{
1262 struct qle_softc *sc = xsc;
1263 u_int16_t isr;
1264 u_int16_t info;
1265
1266 if (qle_read_isr(sc, &isr, &info) == 0)
1267 return (0);
1268
1269 qle_handle_intr(sc, isr, info);
1270 return (1);
1271}
1272
1273int
1274qle_scsi_probe(struct scsi_link *link)
1275{
1276 struct qle_softc *sc = link->bus->sb_adapter_softc;
1277 int rv = 0;
1278
1279 mtx_enter(&sc->sc_port_mtx);
1280 if (sc->sc_targets[link->target] == NULL((void *)0))
1281 rv = ENXIO6;
1282 else if (!ISSET(sc->sc_targets[link->target]->flags,((sc->sc_targets[link->target]->flags) & (1))
1283 QLE_PORT_FLAG_IS_TARGET)((sc->sc_targets[link->target]->flags) & (1)))
1284 rv = ENXIO6;
1285 mtx_leave(&sc->sc_port_mtx);
1286
1287 return (rv);
1288}
1289
1290void
1291qle_scsi_cmd(struct scsi_xfer *xs)
1292{
1293 struct scsi_link *link = xs->sc_link;
1294 struct qle_softc *sc = link->bus->sb_adapter_softc;
1295 struct qle_ccb *ccb;
1296 void *iocb;
1297 struct qle_ccb_list list;
1298 u_int16_t req;
1299 u_int32_t portid;
1300 int offset, error, done;
1301 bus_dmamap_t dmap;
1302
1303 if (xs->cmdlen > 16) {
1304 DPRINTF(QLE_D_IO, "%s: cmd too big (%d)\n", DEVNAME(sc),
1305 xs->cmdlen);
1306 memset(&xs->sense, 0, sizeof(xs->sense))__builtin_memset((&xs->sense), (0), (sizeof(xs->sense
)))
;
1307 xs->sense.error_code = SSD_ERRCODE_VALID0x80 | SSD_ERRCODE_CURRENT0x70;
1308 xs->sense.flags = SKEY_ILLEGAL_REQUEST0x05;
1309 xs->sense.add_sense_code = 0x20;
1310 xs->error = XS_SENSE1;
1311 scsi_done(xs);
1312 return;
1313 }
1314
1315 portid = 0xffffffff;
1316 mtx_enter(&sc->sc_port_mtx);
1317 if (sc->sc_targets[xs->sc_link->target] != NULL((void *)0)) {
1318 portid = sc->sc_targets[xs->sc_link->target]->portid;
1319 }
1320 mtx_leave(&sc->sc_port_mtx);
1321 if (portid == 0xffffffff) {
1322 xs->error = XS_DRIVER_STUFFUP2;
1323 scsi_done(xs);
1324 return;
1325 }
1326
1327 ccb = xs->io;
1328 dmap = ccb->ccb_dmamap;
1329 if (xs->datalen > 0) {
1330 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
1331 xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ?(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
1332 BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
;
1333 if (error) {
1334 xs->error = XS_DRIVER_STUFFUP2;
1335 scsi_done(xs);
1336 return;
1337 }
1338
1339 bus_dmamap_sync(sc->sc_dmat, dmap, 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1340 dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1341 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1342 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
;
1343 }
1344
1345 mtx_enter(&sc->sc_queue_mtx);
1346
1347 /* put in a sync marker if required */
1348 if (sc->sc_marker_required) {
1349 req = sc->sc_next_req_id++;
1350 if (sc->sc_next_req_id == sc->sc_maxcmds)
1351 sc->sc_next_req_id = 0;
1352
1353 DPRINTF(QLE_D_IO, "%s: writing marker at request %d\n",
1354 DEVNAME(sc), req);
1355 offset = (req * QLE_QUEUE_ENTRY_SIZE64);
1356 iocb = QLE_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
1357 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
1358 offset, QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
1359 qle_put_marker(sc, iocb);
1360 qle_write(sc, QLE_REQ_IN0x01C, sc->sc_next_req_id);
1361 sc->sc_marker_required = 0;
1362 }
1363
1364 req = sc->sc_next_req_id++;
1365 if (sc->sc_next_req_id == sc->sc_maxcmds)
1366 sc->sc_next_req_id = 0;
1367
1368 offset = (req * QLE_QUEUE_ENTRY_SIZE64);
1369 iocb = QLE_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
1370 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
1371 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
1372
1373 ccb->ccb_xs = xs;
1374
1375 qle_put_cmd(sc, iocb, xs, ccb, portid);
1376
1377 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x01))
1378 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x01))
;
1379 qle_write(sc, QLE_REQ_IN0x01C, sc->sc_next_req_id);
1380
1381 if (!ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) {
1382 mtx_leave(&sc->sc_queue_mtx);
1383 return;
1384 }
1385
1386 done = 0;
1387 SIMPLEQ_INIT(&list)do { (&list)->sqh_first = ((void *)0); (&list)->
sqh_last = &(&list)->sqh_first; } while (0)
;
1388 do {
1389 u_int16_t isr, info;
1390 u_int32_t rspin;
1391 delay(100)(*delay_func)(100);
1392
1393 if (qle_read_isr(sc, &isr, &info) == 0) {
1394 continue;
1395 }
1396
1397 if (isr != QLE_INT_TYPE_IO3) {
1398 qle_handle_intr(sc, isr, info);
1399 continue;
1400 }
1401
1402 rspin = qle_read(sc, QLE_RESP_IN0x024);
1403 while (rspin != sc->sc_last_resp_id) {
1404 ccb = qle_handle_resp(sc, sc->sc_last_resp_id);
1405
1406 sc->sc_last_resp_id++;
1407 if (sc->sc_last_resp_id == sc->sc_maxcmds)
1408 sc->sc_last_resp_id = 0;
1409
1410 if (ccb != NULL((void *)0))
1411 SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link)do { (ccb)->ccb_link.sqe_next = ((void *)0); *(&list)->
sqh_last = (ccb); (&list)->sqh_last = &(ccb)->ccb_link
.sqe_next; } while (0)
;
1412 if (ccb == xs->io)
1413 done = 1;
1414 }
1415 qle_write(sc, QLE_RESP_OUT0x028, sc->sc_last_resp_id);
1416 qle_clear_isr(sc, isr);
1417 } while (done == 0);
1418
1419 mtx_leave(&sc->sc_queue_mtx);
1420
1421 while ((ccb = SIMPLEQ_FIRST(&list)((&list)->sqh_first)) != NULL((void *)0)) {
1422 SIMPLEQ_REMOVE_HEAD(&list, ccb_link)do { if (((&list)->sqh_first = (&list)->sqh_first
->ccb_link.sqe_next) == ((void *)0)) (&list)->sqh_last
= &(&list)->sqh_first; } while (0)
;
1423 scsi_done(ccb->ccb_xs);
1424 }
1425}
1426
1427u_int32_t
1428qle_read(struct qle_softc *sc, int offset)
1429{
1430 u_int32_t v;
1431 v = bus_space_read_4(sc->sc_iot, sc->sc_ioh, offset)((sc->sc_iot)->read_4((sc->sc_ioh), (offset)));
1432 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 4,
1433 BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
1434 return (v);
1435}
1436
1437void
1438qle_write(struct qle_softc *sc, int offset, u_int32_t value)
1439{
1440 bus_space_write_4(sc->sc_iot, sc->sc_ioh, offset, value)((sc->sc_iot)->write_4((sc->sc_ioh), (offset), (value
)))
;
1441 bus_space_barrier(sc->sc_iot, sc->sc_ioh, offset, 4,
1442 BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
1443}
1444
1445u_int16_t
1446qle_read_mbox(struct qle_softc *sc, int mbox)
1447{
1448 u_int16_t v;
1449 bus_size_t offset = mbox * 2;
1450 v = bus_space_read_2(sc->sc_iot, sc->sc_mbox_ioh, offset)((sc->sc_iot)->read_2((sc->sc_mbox_ioh), (offset)));
1451 bus_space_barrier(sc->sc_iot, sc->sc_mbox_ioh, offset, 2,
1452 BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
1453 return (v);
1454}
1455
1456void
1457qle_write_mbox(struct qle_softc *sc, int mbox, u_int16_t value)
1458{
1459 bus_size_t offset = (mbox * 2);
1460 bus_space_write_2(sc->sc_iot, sc->sc_mbox_ioh, offset, value)((sc->sc_iot)->write_2((sc->sc_mbox_ioh), (offset), (
value)))
;
1461 bus_space_barrier(sc->sc_iot, sc->sc_mbox_ioh, offset, 2,
1462 BUS_SPACE_BARRIER_READ0x01 | BUS_SPACE_BARRIER_WRITE0x02);
1463}
1464
1465void
1466qle_host_cmd(struct qle_softc *sc, u_int32_t cmd)
1467{
1468 qle_write(sc, QLE_HOST_CMD_CTRL0x048, cmd << QLE_HOST_CMD_SHIFT28);
1469}
1470
1471#define MBOX_COMMAND_TIMEOUT400000 400000
1472
1473int
1474qle_mbox(struct qle_softc *sc, int maskin)
1475{
1476 int i;
1477 int result = 0;
1478 int rv;
1479
1480 for (i = 0; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
1481 if (maskin & (1 << i)) {
1482 qle_write_mbox(sc, i, sc->sc_mbox[i]);
1483 }
1484 }
1485 qle_host_cmd(sc, QLE_HOST_CMD_SET_HOST_INT0x5);
1486
1487 if (sc->sc_scsibus != NULL((void *)0)) {
1488 mtx_enter(&sc->sc_mbox_mtx);
1489 sc->sc_mbox_pending = 1;
1490 while (sc->sc_mbox_pending == 1) {
1491 msleep_nsec(sc->sc_mbox, &sc->sc_mbox_mtx, PRIBIO16,
1492 "qlembox", INFSLP0xffffffffffffffffULL);
1493 }
1494 result = sc->sc_mbox[0];
1495 sc->sc_mbox_pending = 0;
1496 mtx_leave(&sc->sc_mbox_mtx);
1497 return (result == QLE_MBOX_COMPLETE0x4000 ? 0 : result);
1498 }
1499
1500 for (i = 0; i < MBOX_COMMAND_TIMEOUT400000 && result == 0; i++) {
1501 u_int16_t isr, info;
1502
1503 delay(100)(*delay_func)(100);
1504
1505 if (qle_read_isr(sc, &isr, &info) == 0)
1506 continue;
1507
1508 switch (isr) {
1509 case QLE_INT_TYPE_MBOX1:
1510 result = info;
1511 break;
1512
1513 default:
1514 qle_handle_intr(sc, isr, info);
1515 break;
1516 }
1517 }
1518
1519 if (result == 0) {
1520 /* timed out; do something? */
1521 DPRINTF(QLE_D_MBOX, "%s: mbox timed out\n", DEVNAME(sc));
1522 rv = 1;
1523 } else {
1524 for (i = 0; i < nitems(sc->sc_mbox)(sizeof((sc->sc_mbox)) / sizeof((sc->sc_mbox)[0])); i++) {
1525 sc->sc_mbox[i] = qle_read_mbox(sc, i);
1526 }
1527 rv = (result == QLE_MBOX_COMPLETE0x4000 ? 0 : result);
1528 }
1529
1530 qle_clear_isr(sc, QLE_INT_TYPE_MBOX1);
1531 return (rv);
1532}
1533
1534void
1535qle_mbox_putaddr(u_int16_t *mbox, struct qle_dmamem *mem)
1536{
1537 mbox[2] = (QLE_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 16) & 0xffff;
1538 mbox[3] = (QLE_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 0) & 0xffff;
1539 mbox[6] = (QLE_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 48) & 0xffff;
1540 mbox[7] = (QLE_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) >> 32) & 0xffff;
1541}
1542
1543void
1544qle_set_ints(struct qle_softc *sc, int enabled)
1545{
1546 u_int32_t v = enabled ? QLE_INT_CTRL_ENABLE0x00000008 : 0;
1547 qle_write(sc, QLE_INT_CTRL0x00C, v);
1548}
1549
1550int
1551qle_read_isr(struct qle_softc *sc, u_int16_t *isr, u_int16_t *info)
1552{
1553 u_int32_t v;
1554
1555 switch (sc->sc_isp_gen) {
1556 case QLE_GEN_ISP24XX:
1557 case QLE_GEN_ISP25XX:
1558 if ((qle_read(sc, QLE_INT_STATUS0x010) & QLE_RISC_INT_REQ0x00000008) == 0)
1559 return (0);
1560
1561 v = qle_read(sc, QLE_RISC_STATUS0x044);
1562
1563 switch (v & QLE_INT_STATUS_MASK0x000000FF) {
1564 case QLE_24XX_INT_ROM_MBOX0x01:
1565 case QLE_24XX_INT_ROM_MBOX_FAIL0x02:
1566 case QLE_24XX_INT_MBOX0x10:
1567 case QLE_24XX_INT_MBOX_FAIL0x11:
1568 *isr = QLE_INT_TYPE_MBOX1;
1569 break;
1570
1571 case QLE_24XX_INT_ASYNC0x12:
1572 *isr = QLE_INT_TYPE_ASYNC2;
1573 break;
1574
1575 case QLE_24XX_INT_RSPQ0x13:
1576 *isr = QLE_INT_TYPE_IO3;
1577 break;
1578
1579 default:
1580 *isr = QLE_INT_TYPE_OTHER4;
1581 break;
1582 }
1583
1584 *info = (v >> QLE_INT_INFO_SHIFT16);
1585 return (1);
1586
1587 default:
1588 return (0);
1589 }
1590}
1591
1592void
1593qle_clear_isr(struct qle_softc *sc, u_int16_t isr)
1594{
1595 qle_host_cmd(sc, QLE_HOST_CMD_CLR_RISC_INT0xA);
1596}
1597
1598void
1599qle_update_done(struct qle_softc *sc, int task)
1600{
1601 atomic_clearbits_intx86_atomic_clearbits_u32(&sc->sc_update_tasks, task);
1602}
1603
1604void
1605qle_update_cancel(struct qle_softc *sc)
1606{
1607 atomic_swap_uint(&sc->sc_update_tasks, 0)_atomic_swap_uint((&sc->sc_update_tasks), (0));
1608 timeout_del(&sc->sc_update_timeout);
1609 task_del(sc->sc_update_taskq, &sc->sc_update_task);
1610}
1611
1612void
1613qle_update_start(struct qle_softc *sc, int task)
1614{
1615 atomic_setbits_intx86_atomic_setbits_u32(&sc->sc_update_tasks, task);
1616 if (!timeout_pending(&sc->sc_update_timeout)((&sc->sc_update_timeout)->to_flags & 0x02))
1617 task_add(sc->sc_update_taskq, &sc->sc_update_task);
1618}
1619
1620void
1621qle_update_defer(struct qle_softc *sc, int task)
1622{
1623 atomic_setbits_intx86_atomic_setbits_u32(&sc->sc_update_tasks, task);
1624 timeout_del(&sc->sc_update_timeout);
1625 task_del(sc->sc_update_taskq, &sc->sc_update_task);
1626 timeout_add_msec(&sc->sc_update_timeout, QLE_LOOP_SETTLE200);
1627}
1628
1629void
1630qle_clear_port_lists(struct qle_softc *sc)
1631{
1632 struct qle_fc_port *port;
1633 while (!TAILQ_EMPTY(&sc->sc_ports_found)(((&sc->sc_ports_found)->tqh_first) == ((void *)0))) {
1634 port = TAILQ_FIRST(&sc->sc_ports_found)((&sc->sc_ports_found)->tqh_first);
1635 TAILQ_REMOVE(&sc->sc_ports_found, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_found)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
1636 free(port, M_DEVBUF2, sizeof *port);
1637 }
1638
1639 while (!TAILQ_EMPTY(&sc->sc_ports_new)(((&sc->sc_ports_new)->tqh_first) == ((void *)0))) {
1640 port = TAILQ_FIRST(&sc->sc_ports_new)((&sc->sc_ports_new)->tqh_first);
1641 TAILQ_REMOVE(&sc->sc_ports_new, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_new)->tqh_last = (port)->update
.tqe_prev; *(port)->update.tqe_prev = (port)->update.tqe_next
; ((port)->update.tqe_prev) = ((void *)-1); ((port)->update
.tqe_next) = ((void *)-1); } while (0)
;
1642 free(port, M_DEVBUF2, sizeof *port);
1643 }
1644
1645 while (!TAILQ_EMPTY(&sc->sc_ports_gone)(((&sc->sc_ports_gone)->tqh_first) == ((void *)0))) {
1646 port = TAILQ_FIRST(&sc->sc_ports_gone)((&sc->sc_ports_gone)->tqh_first);
1647 TAILQ_REMOVE(&sc->sc_ports_gone, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
1648 }
1649}
1650
1651int
1652qle_softreset(struct qle_softc *sc)
1653{
1654 int i;
1655 qle_set_ints(sc, 0);
1656
1657 /* set led control bits, stop dma */
1658 qle_write(sc, QLE_GPIO_DATA0x04C, 0);
1659 qle_write(sc, QLE_CTRL_STATUS0x008, QLE_CTRL_DMA_SHUTDOWN0x00010000);
1660 while (qle_read(sc, QLE_CTRL_STATUS0x008) & QLE_CTRL_DMA_ACTIVE0x00020000) {
1661 DPRINTF(QLE_D_IO, "%s: dma still active\n", DEVNAME(sc));
1662 delay(100)(*delay_func)(100);
1663 }
1664
1665 /* reset */
1666 qle_write(sc, QLE_CTRL_STATUS0x008, QLE_CTRL_RESET0x00000001 | QLE_CTRL_DMA_SHUTDOWN0x00010000);
1667 delay(100)(*delay_func)(100);
1668 /* clear data and control dma engines? */
1669
1670 /* wait for soft reset to clear */
1671 for (i = 0; i < 1000; i++) {
1672 if (qle_read_mbox(sc, 0) == 0x0000)
1673 break;
1674
1675 delay(100)(*delay_func)(100);
1676 }
1677
1678 if (i == 1000) {
1679 printf("%s: reset mbox didn't clear\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1680 qle_set_ints(sc, 0);
1681 return (ENXIO6);
1682 }
1683
1684 for (i = 0; i < 500000; i++) {
1685 if ((qle_read(sc, QLE_CTRL_STATUS0x008) & QLE_CTRL_RESET0x00000001) == 0)
1686 break;
1687 delay(5)(*delay_func)(5);
1688 }
1689 if (i == 500000) {
1690 printf("%s: reset status didn't clear\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1691 return (ENXIO6);
1692 }
1693
1694 /* reset risc processor */
1695 qle_host_cmd(sc, QLE_HOST_CMD_RESET0x1);
1696 qle_host_cmd(sc, QLE_HOST_CMD_RELEASE0x4);
1697 qle_host_cmd(sc, QLE_HOST_CMD_CLEAR_RESET0x2);
1698
1699 /* wait for reset to clear */
1700 for (i = 0; i < 1000; i++) {
1701 if (qle_read_mbox(sc, 0) == 0x0000)
1702 break;
1703 delay(100)(*delay_func)(100);
1704 }
1705 if (i == 1000) {
1706 printf("%s: risc not ready after reset\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1707 return (ENXIO6);
1708 }
1709
1710 /* reset queue pointers */
1711 qle_write(sc, QLE_REQ_IN0x01C, 0);
1712 qle_write(sc, QLE_REQ_OUT0x020, 0);
1713 qle_write(sc, QLE_RESP_IN0x024, 0);
1714 qle_write(sc, QLE_RESP_OUT0x028, 0);
1715
1716 qle_set_ints(sc, 1);
1717
1718 /* do a basic mailbox operation to check we're alive */
1719 sc->sc_mbox[0] = QLE_MBOX_NOP0x0000;
1720 if (qle_mbox(sc, 0x0001)) {
1721 printf("ISP not responding after reset\n");
1722 return (ENXIO6);
1723 }
1724
1725 return (0);
1726}
1727
1728void
1729qle_update_topology(struct qle_softc *sc)
1730{
1731 sc->sc_mbox[0] = QLE_MBOX_GET_ID0x0020;
1732 if (qle_mbox(sc, 0x0001)) {
1733 DPRINTF(QLE_D_PORT, "%s: unable to get loop id\n", DEVNAME(sc));
1734 sc->sc_topology = QLE_TOPO_N_PORT_NO_TARGET4;
1735 } else {
1736 sc->sc_topology = sc->sc_mbox[6];
1737 sc->sc_loop_id = sc->sc_mbox[1];
1738
1739 switch (sc->sc_topology) {
1740 case QLE_TOPO_NL_PORT0:
1741 case QLE_TOPO_N_PORT2:
1742 DPRINTF(QLE_D_PORT, "%s: loop id %d\n", DEVNAME(sc),
1743 sc->sc_loop_id);
1744 break;
1745
1746 case QLE_TOPO_FL_PORT1:
1747 case QLE_TOPO_F_PORT3:
1748 sc->sc_port_id = sc->sc_mbox[2] |
1749 (sc->sc_mbox[3] << 16);
1750 DPRINTF(QLE_D_PORT, "%s: fabric port id %06x\n",
1751 DEVNAME(sc), sc->sc_port_id);
1752 break;
1753
1754 case QLE_TOPO_N_PORT_NO_TARGET4:
1755 default:
1756 DPRINTF(QLE_D_PORT, "%s: not useful\n", DEVNAME(sc));
1757 break;
1758 }
1759
1760 switch (sc->sc_topology) {
1761 case QLE_TOPO_NL_PORT0:
1762 case QLE_TOPO_FL_PORT1:
1763 sc->sc_loop_max_id = 126;
1764 break;
1765
1766 case QLE_TOPO_N_PORT2:
1767 sc->sc_loop_max_id = 2;
1768 break;
1769
1770 default:
1771 sc->sc_loop_max_id = 0;
1772 break;
1773 }
1774 }
1775}
1776
1777int
1778qle_update_fabric(struct qle_softc *sc)
1779{
1780 /*struct qle_sns_rft_id *rft;*/
1781
1782 switch (sc->sc_topology) {
1783 case QLE_TOPO_F_PORT3:
1784 case QLE_TOPO_FL_PORT1:
1785 break;
1786
1787 default:
1788 return (0);
1789 }
1790
1791 /* get the name server's port db entry */
1792 sc->sc_mbox[0] = QLE_MBOX_GET_PORT_DB0x0064;
1793 sc->sc_mbox[1] = QLE_F_PORT_HANDLE0x7FE;
1794 qle_mbox_putaddr(sc->sc_mbox, sc->sc_scratch);
1795 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(struct qle_get_port_db
)), (0x01))
1796 sizeof(struct qle_get_port_db), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(struct qle_get_port_db
)), (0x01))
;
1797 if (qle_mbox(sc, 0x00cf)) {
1798 DPRINTF(QLE_D_PORT, "%s: get port db for SNS failed: %x\n",
1799 DEVNAME(sc), sc->sc_mbox[0]);
1800 sc->sc_sns_port_name = 0;
1801 } else {
1802 struct qle_get_port_db *pdb;
1803 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_scratch), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(struct qle_get_port_db
)), (0x02))
1804 sizeof(struct qle_get_port_db), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_scratch)->qdm_map)), (0), (sizeof(struct qle_get_port_db
)), (0x02))
;
1805 pdb = QLE_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
1806 DPRINTF(QLE_D_PORT, "%s: SNS port name %llx\n", DEVNAME(sc),
1807 betoh64(pdb->port_name));
1808 sc->sc_sns_port_name = betoh64(pdb->port_name)(__uint64_t)(__builtin_constant_p(pdb->port_name) ? (__uint64_t
)((((__uint64_t)(pdb->port_name) & 0xff) << 56) |
((__uint64_t)(pdb->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(pdb->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(pdb->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(pdb->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(pdb->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(pdb->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(pdb->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(pdb->port_name))
;
1809 }
1810
1811 /*
1812 * register fc4 types with the fabric
1813 * some switches do this automatically, but apparently
1814 * some don't.
1815 */
1816 /*
1817 rft = QLE_DMA_KVA(sc->sc_scratch);
1818 memset(rft, 0, sizeof(*rft) + sizeof(struct qle_sns_req_hdr));
1819 htolem16(&rft->subcmd, QLE_SNS_RFT_ID);
1820 htolem16(&rft->max_word, sizeof(struct qle_sns_req_hdr) / 4);
1821 htolem32(&rft->port_id, sc->sc_port_id);
1822 rft->fc4_types[0] = (1 << QLE_FC4_SCSI);
1823 if (qle_sns_req(sc, sc->sc_scratch, sizeof(*rft))) {
1824 printf("%s: RFT_ID failed\n", DEVNAME(sc));
1825 / * we might be able to continue after this fails * /
1826 }
1827 */
1828
1829 return (1);
1830}
1831
1832int
1833qle_ct_pass_through(struct qle_softc *sc, u_int32_t port_handle,
1834 struct qle_dmamem *mem, size_t req_size, size_t resp_size)
1835{
1836 struct qle_iocb_ct_passthrough *iocb;
1837 u_int16_t req;
1838 u_int64_t offset;
1839 int rv;
1840
1841 mtx_enter(&sc->sc_queue_mtx);
1842
1843 req = sc->sc_next_req_id++;
1844 if (sc->sc_next_req_id == sc->sc_maxcmds)
1845 sc->sc_next_req_id = 0;
1846
1847 offset = (req * QLE_QUEUE_ENTRY_SIZE64);
1848 iocb = QLE_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
1849 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
1850 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
1851
1852 memset(iocb, 0, QLE_QUEUE_ENTRY_SIZE)__builtin_memset((iocb), (0), (64));
1853 iocb->entry_type = QLE_IOCB_CT_PASSTHROUGH0x29;
1854 iocb->entry_count = 1;
1855
1856 iocb->req_handle = 9;
1857 htolem16(&iocb->req_nport_handle, port_handle)(*(__uint16_t *)(&iocb->req_nport_handle) = ((__uint16_t
)(port_handle)))
;
1858 htolem16(&iocb->req_dsd_count, 1)(*(__uint16_t *)(&iocb->req_dsd_count) = ((__uint16_t)
(1)))
;
1859 htolem16(&iocb->req_resp_dsd_count, 1)(*(__uint16_t *)(&iocb->req_resp_dsd_count) = ((__uint16_t
)(1)))
;
1860 htolem32(&iocb->req_cmd_byte_count, req_size)(*(__uint32_t *)(&iocb->req_cmd_byte_count) = ((__uint32_t
)(req_size)))
;
1861 htolem32(&iocb->req_resp_byte_count, resp_size)(*(__uint32_t *)(&iocb->req_resp_byte_count) = ((__uint32_t
)(resp_size)))
;
1862 qle_sge(&iocb->req_cmd_seg, QLE_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr), req_size);
1863 qle_sge(&iocb->req_resp_seg, QLE_DMA_DVA(mem)((u_int64_t)(mem)->qdm_map->dm_segs[0].ds_addr) + req_size, resp_size);
1864
1865 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, QLE_DMA_LEN(mem),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x01 | 0x04))
1866 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x01 | 0x04))
;
1867 qle_write(sc, QLE_REQ_IN0x01C, sc->sc_next_req_id);
1868 sc->sc_fabric_pending = 1;
1869 mtx_leave(&sc->sc_queue_mtx);
1870
1871 /* maybe put a proper timeout on this */
1872 rv = 0;
1873 while (sc->sc_fabric_pending == 1) {
1874 if (sc->sc_scsibus == NULL((void *)0)) {
1875 u_int16_t isr, info;
1876
1877 delay(100)(*delay_func)(100);
1878 if (qle_read_isr(sc, &isr, &info) != 0)
1879 qle_handle_intr(sc, isr, info);
1880 } else {
1881 tsleep_nsec(sc->sc_scratch, PRIBIO16, "qle_fabric",
1882 SEC_TO_NSEC(1));
1883 }
1884 }
1885 if (rv == 0)
1886 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x02 | 0x08))
1887 QLE_DMA_LEN(mem), BUS_DMASYNC_POSTREAD |(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x02 | 0x08))
1888 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (((mem)->qdm_size)), (0x02 | 0x08))
;
1889
1890 sc->sc_fabric_pending = 0;
1891
1892 return (rv);
1893}
1894
1895struct qle_fc_port *
1896qle_next_fabric_port(struct qle_softc *sc, u_int32_t *firstport,
1897 u_int32_t *lastport)
1898{
1899 struct qle_ct_ga_nxt_req *ga;
1900 struct qle_ct_ga_nxt_resp *gar;
1901 struct qle_fc_port *fport;
1902 int result;
1903
1904 /* get the next port from the fabric nameserver */
1905 ga = QLE_DMA_KVA(sc->sc_scratch)((void *)(sc->sc_scratch)->qdm_kva);
1906 memset(ga, 0, sizeof(*ga) + sizeof(*gar))__builtin_memset((ga), (0), (sizeof(*ga) + sizeof(*gar)));
1907 ga->header.ct_revision = 0x01;
1908 ga->header.ct_gs_type = 0xfc;
1909 ga->header.ct_gs_subtype = 0x02;
1910 ga->subcmd = htobe16(QLE_SNS_GA_NXT)(__uint16_t)(__builtin_constant_p(0x0100) ? (__uint16_t)(((__uint16_t
)(0x0100) & 0xffU) << 8 | ((__uint16_t)(0x0100) &
0xff00U) >> 8) : __swap16md(0x0100))
;
1911 ga->max_word = htobe16((sizeof(*gar) - 16) / 4)(__uint16_t)(__builtin_constant_p((sizeof(*gar) - 16) / 4) ? (
__uint16_t)(((__uint16_t)((sizeof(*gar) - 16) / 4) & 0xffU
) << 8 | ((__uint16_t)((sizeof(*gar) - 16) / 4) & 0xff00U
) >> 8) : __swap16md((sizeof(*gar) - 16) / 4))
;
1912 ga->port_id = htobe32(*lastport)(__uint32_t)(__builtin_constant_p(*lastport) ? (__uint32_t)((
(__uint32_t)(*lastport) & 0xff) << 24 | ((__uint32_t
)(*lastport) & 0xff00) << 8 | ((__uint32_t)(*lastport
) & 0xff0000) >> 8 | ((__uint32_t)(*lastport) &
0xff000000) >> 24) : __swap32md(*lastport))
;
1913 result = qle_ct_pass_through(sc, QLE_SNS_HANDLE0x7FC, sc->sc_scratch,
1914 sizeof(*ga), sizeof(*gar));
1915 if (result) {
1916 DPRINTF(QLE_D_PORT, "%s: GA_NXT %06x failed: %x\n", DEVNAME(sc),
1917 *lastport, result);
1918 *lastport = 0xffffffff;
1919 return (NULL((void *)0));
1920 }
1921
1922 gar = (struct qle_ct_ga_nxt_resp *)(ga + 1);
1923 /* if the response is all zeroes, try again */
1924 if (gar->port_type_id == 0 && gar->port_name == 0 &&
1925 gar->node_name == 0) {
1926 DPRINTF(QLE_D_PORT, "%s: GA_NXT returned junk\n", DEVNAME(sc));
1927 return (NULL((void *)0));
1928 }
1929
1930 /* are we back at the start? */
1931 *lastport = betoh32(gar->port_type_id)(__uint32_t)(__builtin_constant_p(gar->port_type_id) ? (__uint32_t
)(((__uint32_t)(gar->port_type_id) & 0xff) << 24
| ((__uint32_t)(gar->port_type_id) & 0xff00) <<
8 | ((__uint32_t)(gar->port_type_id) & 0xff0000) >>
8 | ((__uint32_t)(gar->port_type_id) & 0xff000000) >>
24) : __swap32md(gar->port_type_id))
& 0xffffff;
1932 if (*lastport == *firstport) {
1933 *lastport = 0xffffffff;
1934 return (NULL((void *)0));
1935 }
1936 if (*firstport == 0xffffffff)
1937 *firstport = *lastport;
1938
1939 DPRINTF(QLE_D_PORT, "%s: GA_NXT: port id: %06x, wwpn %llx, wwnn %llx\n",
1940 DEVNAME(sc), *lastport, betoh64(gar->port_name),
1941 betoh64(gar->node_name));
1942
1943 /* don't try to log in to ourselves */
1944 if (*lastport == sc->sc_port_id) {
1945 return (NULL((void *)0));
1946 }
1947
1948 fport = malloc(sizeof(*fport), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002);
1949 if (fport == NULL((void *)0)) {
1950 printf("%s: failed to allocate a port struct\n",
1951 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1952 *lastport = 0xffffffff;
1953 return (NULL((void *)0));
1954 }
1955 fport->port_name = betoh64(gar->port_name)(__uint64_t)(__builtin_constant_p(gar->port_name) ? (__uint64_t
)((((__uint64_t)(gar->port_name) & 0xff) << 56) |
((__uint64_t)(gar->port_name) & 0xff00ULL) << 40
| ((__uint64_t)(gar->port_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(gar->port_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(gar->port_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(gar->port_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(gar->port_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(gar->port_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(gar->port_name))
;
1956 fport->node_name = betoh64(gar->node_name)(__uint64_t)(__builtin_constant_p(gar->node_name) ? (__uint64_t
)((((__uint64_t)(gar->node_name) & 0xff) << 56) |
((__uint64_t)(gar->node_name) & 0xff00ULL) << 40
| ((__uint64_t)(gar->node_name) & 0xff0000ULL) <<
24 | ((__uint64_t)(gar->node_name) & 0xff000000ULL) <<
8 | ((__uint64_t)(gar->node_name) & 0xff00000000ULL) >>
8 | ((__uint64_t)(gar->node_name) & 0xff0000000000ULL
) >> 24 | ((__uint64_t)(gar->node_name) & 0xff000000000000ULL
) >> 40 | ((__uint64_t)(gar->node_name) & 0xff00000000000000ULL
) >> 56) : __swap64md(gar->node_name))
;
1957 fport->location = QLE_LOCATION_PORT_ID(*lastport)(*lastport | (2 << 24));
1958 fport->portid = *lastport;
1959 return (fport);
1960}
1961
1962int
1963qle_fabric_plogx(struct qle_softc *sc, struct qle_fc_port *port, int flags,
1964 u_int32_t *info)
1965{
1966 struct qle_iocb_plogx *iocb;
1967 u_int16_t req;
1968 u_int64_t offset;
1969 int rv;
1970
1971 mtx_enter(&sc->sc_queue_mtx);
1972
1973 req = sc->sc_next_req_id++;
1974 if (sc->sc_next_req_id == sc->sc_maxcmds)
1975 sc->sc_next_req_id = 0;
1976
1977 offset = (req * QLE_QUEUE_ENTRY_SIZE64);
1978 iocb = QLE_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva) + offset;
1979 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(sc->sc_requests), offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
1980 QLE_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->qdm_map)), (offset), (64), (0x08))
;
1981
1982 memset(iocb, 0, QLE_QUEUE_ENTRY_SIZE)__builtin_memset((iocb), (0), (64));
1983 iocb->entry_type = QLE_IOCB_PLOGX0x52;
1984 iocb->entry_count = 1;
1985
1986 iocb->req_handle = 7;
1987 htolem16(&iocb->req_nport_handle, port->loopid)(*(__uint16_t *)(&iocb->req_nport_handle) = ((__uint16_t
)(port->loopid)))
;
1988 htolem16(&iocb->req_port_id_lo, port->portid)(*(__uint16_t *)(&iocb->req_port_id_lo) = ((__uint16_t
)(port->portid)))
;
1989 iocb->req_port_id_hi = port->portid >> 16;
1990 htolem16(&iocb->req_flags, flags)(*(__uint16_t *)(&iocb->req_flags) = ((__uint16_t)(flags
)))
;
1991
1992 DPRINTF(QLE_D_PORT, "%s: plogx loop id %d port %06x, flags %x\n",
1993 DEVNAME(sc), port->loopid, port->portid, flags);
1994 qle_dump_iocb(sc, iocb);
1995
1996 qle_write(sc, QLE_REQ_IN0x01C, sc->sc_next_req_id);
1997 sc->sc_fabric_pending = 1;
1998 mtx_leave(&sc->sc_queue_mtx);
1999
2000 /* maybe put a proper timeout on this */
2001 rv = 0;
Value stored to 'rv' is never read
2002 while (sc->sc_fabric_pending == 1) {
2003 if (sc->sc_scsibus == NULL((void *)0)) {
2004 u_int16_t isr, info;
2005
2006 delay(100)(*delay_func)(100);
2007 if (qle_read_isr(sc, &isr, &info) != 0)
2008 qle_handle_intr(sc, isr, info);
2009 } else {
2010 tsleep_nsec(sc->sc_scratch, PRIBIO16, "qle_fabric",
2011 SEC_TO_NSEC(1));
2012 }
2013 }
2014 sc->sc_fabric_pending = 0;
2015
2016 iocb = (struct qle_iocb_plogx *)&sc->sc_fabric_response;
2017 rv = lemtoh16(&iocb->req_status)((__uint16_t)(*(__uint16_t *)(&iocb->req_status)));
2018 if (rv == QLE_PLOGX_ERROR0x31) {
2019 rv = lemtoh32(&iocb->req_ioparms[0])((__uint32_t)(*(__uint32_t *)(&iocb->req_ioparms[0])));
2020 *info = lemtoh32(&iocb->req_ioparms[1])((__uint32_t)(*(__uint32_t *)(&iocb->req_ioparms[1])));
2021 }
2022
2023 return (rv);
2024}
2025
2026int
2027qle_fabric_plogi(struct qle_softc *sc, struct qle_fc_port *port)
2028{
2029 u_int32_t info;
2030 int err, loopid;
2031
2032 loopid = 0;
2033retry:
2034 if (port->loopid == 0) {
2035
2036 mtx_enter(&sc->sc_port_mtx);
2037 loopid = qle_get_loop_id(sc, loopid);
2038 mtx_leave(&sc->sc_port_mtx);
2039 if (loopid == -1) {
2040 printf("%s: ran out of loop ids\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2041 return (1);
2042 }
2043
2044 port->loopid = loopid;
2045 }
2046
2047 err = qle_fabric_plogx(sc, port, QLE_PLOGX_LOGIN0x0000, &info);
2048 switch (err) {
2049 case 0:
2050 DPRINTF(QLE_D_PORT, "%s: logged in to %06x as %d\n",
2051 DEVNAME(sc), port->portid, port->loopid);
2052 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN2;
2053 return (0);
2054
2055 case QLE_PLOGX_ERROR_PORT_ID_USED0x1A:
2056 DPRINTF(QLE_D_PORT, "%s: already logged in to %06x as %d\n",
2057 DEVNAME(sc), port->portid, info);
2058 port->loopid = info;
2059 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN2;
2060 return (0);
2061
2062 case QLE_PLOGX_ERROR_HANDLE_USED0x1B:
2063 if (qle_add_logged_in_port(sc, loopid, info)) {
2064 return (1);
2065 }
2066 port->loopid = 0;
2067 loopid++;
2068 goto retry;
2069
2070 default:
2071 DPRINTF(QLE_D_PORT, "%s: error %x logging in to port %06x\n",
2072 DEVNAME(sc), err, port->portid);
2073 port->loopid = 0;
2074 return (1);
2075 }
2076}
2077
2078void
2079qle_fabric_plogo(struct qle_softc *sc, struct qle_fc_port *port)
2080{
2081 int err;
2082 u_int32_t info;
2083
2084 /*
2085 * we only log out if we can't see the port any more, so we always
2086 * want to do an explicit logout and free the n-port handle.
2087 */
2088 err = qle_fabric_plogx(sc, port, QLE_PLOGX_LOGOUT0x0008 |
2089 QLE_PLOGX_LOGOUT_EXPLICIT0x0040 | QLE_PLOGX_LOGOUT_FREE_HANDLE0x0080, &info);
2090 if (err == 0) {
2091 DPRINTF(QLE_D_PORT, "%s: logged out of port %06x\n",
2092 DEVNAME(sc), port->portid);
2093 } else {
2094 DPRINTF(QLE_D_PORT, "%s: failed to log out of port %06x: "
2095 "%x %x\n", DEVNAME(sc), port->portid, err, info);
2096 }
2097}
2098
2099void
2100qle_deferred_update(void *xsc)
2101{
2102 struct qle_softc *sc = xsc;
2103 task_add(sc->sc_update_taskq, &sc->sc_update_task);
2104}
2105
2106void
2107qle_do_update(void *xsc)
2108{
2109 struct qle_softc *sc = xsc;
2110 int firstport, lastport;
2111 struct qle_fc_port *port, *fport;
2112
2113 DPRINTF(QLE_D_PORT, "%s: updating\n", DEVNAME(sc));
2114 while (sc->sc_update_tasks != 0) {
2115 if (sc->sc_update_tasks & QLE_UPDATE_TASK_CLEAR_ALL0x00000001) {
2116 TAILQ_HEAD(, qle_fc_port)struct { struct qle_fc_port *tqh_first; struct qle_fc_port **
tqh_last; }
detach;
2117 DPRINTF(QLE_D_PORT, "%s: detaching everything\n",
2118 DEVNAME(sc));
2119
2120 mtx_enter(&sc->sc_port_mtx);
2121 qle_clear_port_lists(sc);
2122 TAILQ_INIT(&detach)do { (&detach)->tqh_first = ((void *)0); (&detach)
->tqh_last = &(&detach)->tqh_first; } while (0)
;
2123 TAILQ_CONCAT(&detach, &sc->sc_ports, ports)do { if (!(((&sc->sc_ports)->tqh_first) == ((void *
)0))) { *(&detach)->tqh_last = (&sc->sc_ports)->
tqh_first; (&sc->sc_ports)->tqh_first->ports.tqe_prev
= (&detach)->tqh_last; (&detach)->tqh_last = (
&sc->sc_ports)->tqh_last; do { ((&sc->sc_ports
))->tqh_first = ((void *)0); ((&sc->sc_ports))->
tqh_last = &((&sc->sc_ports))->tqh_first; } while
(0); } } while (0)
;
2124 mtx_leave(&sc->sc_port_mtx);
2125
2126 while (!TAILQ_EMPTY(&detach)(((&detach)->tqh_first) == ((void *)0))) {
2127 port = TAILQ_FIRST(&detach)((&detach)->tqh_first);
2128 TAILQ_REMOVE(&detach, port, ports)do { if (((port)->ports.tqe_next) != ((void *)0)) (port)->
ports.tqe_next->ports.tqe_prev = (port)->ports.tqe_prev
; else (&detach)->tqh_last = (port)->ports.tqe_prev
; *(port)->ports.tqe_prev = (port)->ports.tqe_next; ((port
)->ports.tqe_prev) = ((void *)-1); ((port)->ports.tqe_next
) = ((void *)-1); } while (0)
;
2129 if (port->flags & QLE_PORT_FLAG_IS_TARGET1) {
2130 scsi_detach_target(sc->sc_scsibus,
2131 port->loopid, DETACH_FORCE0x01 |
2132 DETACH_QUIET0x02);
2133 sc->sc_targets[port->loopid] = NULL((void *)0);
2134 }
2135 if (port->location & QLE_LOCATION_FABRIC(2 << 24))
2136 qle_fabric_plogo(sc, port);
2137
2138 free(port, M_DEVBUF2, sizeof *port);
2139 }
2140
2141 qle_update_done(sc, QLE_UPDATE_TASK_CLEAR_ALL0x00000001);
2142 continue;
2143 }
2144
2145 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SOFTRESET0x00000002) {
2146 DPRINTF(QLE_D_IO, "%s: attempting softreset\n",
2147 DEVNAME(sc));
2148 if (qle_softreset(sc) != 0) {
2149 DPRINTF(QLE_D_IO, "%s: couldn't softreset\n",
2150 DEVNAME(sc));
2151 }
2152 qle_update_done(sc, QLE_UPDATE_TASK_SOFTRESET0x00000002);
2153 continue;
2154 }
2155
2156 if (sc->sc_update_tasks & QLE_UPDATE_TASK_UPDATE_TOPO0x00000004) {
2157 DPRINTF(QLE_D_PORT, "%s: updating topology\n",
2158 DEVNAME(sc));
2159 qle_update_topology(sc);
2160 qle_update_done(sc, QLE_UPDATE_TASK_UPDATE_TOPO0x00000004);
2161 continue;
2162 }
2163
2164 if (sc->sc_update_tasks & QLE_UPDATE_TASK_GET_PORT_LIST0x00000008) {
2165 DPRINTF(QLE_D_PORT, "%s: getting port name list\n",
2166 DEVNAME(sc));
2167 mtx_enter(&sc->sc_port_mtx);
2168 qle_clear_port_lists(sc);
2169 mtx_leave(&sc->sc_port_mtx);
2170
2171 qle_get_port_name_list(sc, QLE_LOCATION_LOOP(1 << 24) |
2172 QLE_LOCATION_FABRIC(2 << 24));
2173 mtx_enter(&sc->sc_port_mtx);
2174 TAILQ_FOREACH(port, &sc->sc_ports, ports)for((port) = ((&sc->sc_ports)->tqh_first); (port) !=
((void *)0); (port) = ((port)->ports.tqe_next))
{
2175 TAILQ_INSERT_TAIL(&sc->sc_ports_gone, port,do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_gone)->tqh_last; *(&
sc->sc_ports_gone)->tqh_last = (port); (&sc->sc_ports_gone
)->tqh_last = &(port)->update.tqe_next; } while (0)
2176 update)do { (port)->update.tqe_next = ((void *)0); (port)->update
.tqe_prev = (&sc->sc_ports_gone)->tqh_last; *(&
sc->sc_ports_gone)->tqh_last = (port); (&sc->sc_ports_gone
)->tqh_last = &(port)->update.tqe_next; } while (0)
;
2177 if (port->location & QLE_LOCATION_FABRIC(2 << 24)) {
2178 port->flags |=
2179 QLE_PORT_FLAG_NEEDS_LOGIN2;
2180 }
2181 }
2182
2183 /* take care of ports that haven't changed first */
2184 TAILQ_FOREACH(fport, &sc->sc_ports_found, update)for((fport) = ((&sc->sc_ports_found)->tqh_first); (
fport) != ((void *)0); (fport) = ((fport)->update.tqe_next
))
{
2185 port = sc->sc_targets[fport->loopid];
2186 if (port == NULL((void *)0) || fport->port_name !=
2187 port->port_name) {
2188 /* new or changed port, handled later */
2189 continue;
2190 }
2191
2192 /*
2193 * the port hasn't been logged out, which
2194 * means we don't need to log in again, and,
2195 * for loop ports, that the port still exists
2196 */
2197 port->flags &= ~QLE_PORT_FLAG_NEEDS_LOGIN2;
2198 if (port->location & QLE_LOCATION_LOOP(1 << 24))
2199 TAILQ_REMOVE(&sc->sc_ports_gone,do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
2200 port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
2201
2202 fport->location = 0;
2203 }
2204 mtx_leave(&sc->sc_port_mtx);
2205 qle_update_start(sc, QLE_UPDATE_TASK_PORT_LIST0x00000010);
2206 qle_update_done(sc, QLE_UPDATE_TASK_GET_PORT_LIST0x00000008);
2207 continue;
2208 }
2209
2210 if (sc->sc_update_tasks & QLE_UPDATE_TASK_PORT_LIST0x00000010) {
2211 mtx_enter(&sc->sc_port_mtx);
2212 fport = TAILQ_FIRST(&sc->sc_ports_found)((&sc->sc_ports_found)->tqh_first);
2213 if (fport != NULL((void *)0)) {
2214 TAILQ_REMOVE(&sc->sc_ports_found, fport,do { if (((fport)->update.tqe_next) != ((void *)0)) (fport
)->update.tqe_next->update.tqe_prev = (fport)->update
.tqe_prev; else (&sc->sc_ports_found)->tqh_last = (
fport)->update.tqe_prev; *(fport)->update.tqe_prev = (fport
)->update.tqe_next; ((fport)->update.tqe_prev) = ((void
*)-1); ((fport)->update.tqe_next) = ((void *)-1); } while
(0)
2215 update)do { if (((fport)->update.tqe_next) != ((void *)0)) (fport
)->update.tqe_next->update.tqe_prev = (fport)->update
.tqe_prev; else (&sc->sc_ports_found)->tqh_last = (
fport)->update.tqe_prev; *(fport)->update.tqe_prev = (fport
)->update.tqe_next; ((fport)->update.tqe_prev) = ((void
*)-1); ((fport)->update.tqe_next) = ((void *)-1); } while
(0)
;
2216 }
2217 mtx_leave(&sc->sc_port_mtx);
2218
2219 if (fport == NULL((void *)0)) {
2220 DPRINTF(QLE_D_PORT, "%s: done with ports\n",
2221 DEVNAME(sc));
2222 qle_update_done(sc,
2223 QLE_UPDATE_TASK_PORT_LIST0x00000010);
2224 qle_update_start(sc,
2225 QLE_UPDATE_TASK_SCAN_FABRIC0x00000020);
2226 } else if (fport->location & QLE_LOCATION_LOOP(1 << 24)) {
2227 DPRINTF(QLE_D_PORT, "%s: loop port %04x\n",
2228 DEVNAME(sc), fport->loopid);
2229 if (qle_add_loop_port(sc, fport) != 0)
2230 free(fport, M_DEVBUF2, sizeof *port);
2231 } else if (fport->location & QLE_LOCATION_FABRIC(2 << 24)) {
2232 qle_add_fabric_port(sc, fport);
2233 } else {
2234 /* already processed */
2235 free(fport, M_DEVBUF2, sizeof *port);
2236 }
2237 continue;
2238 }
2239
2240 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SCAN_FABRIC0x00000020) {
2241 DPRINTF(QLE_D_PORT, "%s: starting fabric scan\n",
2242 DEVNAME(sc));
2243 lastport = sc->sc_port_id;
2244 firstport = 0xffffffff;
2245 if (qle_update_fabric(sc))
2246 qle_update_start(sc,
2247 QLE_UPDATE_TASK_SCANNING_FABRIC0x00000040);
2248 else
2249 qle_update_start(sc,
2250 QLE_UPDATE_TASK_ATTACH_TARGET0x00000400 |
2251 QLE_UPDATE_TASK_DETACH_TARGET0x00000200);
2252
2253 qle_update_done(sc, QLE_UPDATE_TASK_SCAN_FABRIC0x00000020);
2254 continue;
2255 }
2256
2257 if (sc->sc_update_tasks & QLE_UPDATE_TASK_SCANNING_FABRIC0x00000040) {
2258 fport = qle_next_fabric_port(sc, &firstport, &lastport);
2259 if (fport != NULL((void *)0)) {
2260 int disp;
2261
2262 mtx_enter(&sc->sc_port_mtx);
2263 disp = qle_classify_port(sc, fport->location,
2264 fport->port_name, fport->node_name, &port);
2265 switch (disp) {
2266 case QLE_PORT_DISP_CHANGED:
2267 case QLE_PORT_DISP_MOVED:
2268 /* we'll log out the old port later */
2269 case QLE_PORT_DISP_NEW:
2270 DPRINTF(QLE_D_PORT, "%s: new port "
2271 "%06x\n", DEVNAME(sc),
2272 fport->portid);
2273 TAILQ_INSERT_TAIL(&sc->sc_ports_found,do { (fport)->update.tqe_next = ((void *)0); (fport)->update
.tqe_prev = (&sc->sc_ports_found)->tqh_last; *(&
sc->sc_ports_found)->tqh_last = (fport); (&sc->sc_ports_found
)->tqh_last = &(fport)->update.tqe_next; } while (0
)
2274 fport, update)do { (fport)->update.tqe_next = ((void *)0); (fport)->update
.tqe_prev = (&sc->sc_ports_found)->tqh_last; *(&
sc->sc_ports_found)->tqh_last = (fport); (&sc->sc_ports_found
)->tqh_last = &(fport)->update.tqe_next; } while (0
)
;
2275 break;
2276 case QLE_PORT_DISP_DUP:
2277 free(fport, M_DEVBUF2, sizeof *port);
2278 break;
2279 case QLE_PORT_DISP_SAME:
2280 DPRINTF(QLE_D_PORT, "%s: existing port "
2281 " %06x\n", DEVNAME(sc),
2282 fport->portid);
2283 TAILQ_REMOVE(&sc->sc_ports_gone, port,do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
2284 update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
2285 free(fport, M_DEVBUF2, sizeof *port);
2286 break;
2287 }
2288 mtx_leave(&sc->sc_port_mtx);
2289 }
2290 if (lastport == 0xffffffff) {
2291 DPRINTF(QLE_D_PORT, "%s: finished\n",
2292 DEVNAME(sc));
2293 qle_update_done(sc,
2294 QLE_UPDATE_TASK_SCANNING_FABRIC0x00000040);
2295 qle_update_start(sc,
2296 QLE_UPDATE_TASK_FABRIC_LOGIN0x00000080);
2297 }
2298 continue;
2299 }
2300
2301 if (sc->sc_update_tasks & QLE_UPDATE_TASK_FABRIC_LOGIN0x00000080) {
2302 mtx_enter(&sc->sc_port_mtx);
2303 port = TAILQ_FIRST(&sc->sc_ports_found)((&sc->sc_ports_found)->tqh_first);
2304 if (port != NULL((void *)0)) {
2305 TAILQ_REMOVE(&sc->sc_ports_found, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_found)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
2306 }
2307 mtx_leave(&sc->sc_port_mtx);
2308
2309 if (port != NULL((void *)0)) {
2310 DPRINTF(QLE_D_PORT, "%s: found port %06x\n",
2311 DEVNAME(sc), port->portid);
2312 if (qle_fabric_plogi(sc, port) == 0) {
2313 qle_add_fabric_port(sc, port);
2314 } else {
2315 DPRINTF(QLE_D_PORT, "%s: plogi %06x "
2316 "failed\n", DEVNAME(sc),
2317 port->portid);
2318 free(port, M_DEVBUF2, sizeof *port);
2319 }
2320 } else {
2321 DPRINTF(QLE_D_PORT, "%s: done with logins\n",
2322 DEVNAME(sc));
2323 qle_update_done(sc,
2324 QLE_UPDATE_TASK_FABRIC_LOGIN0x00000080);
2325 qle_update_start(sc,
2326 QLE_UPDATE_TASK_ATTACH_TARGET0x00000400 |
2327 QLE_UPDATE_TASK_DETACH_TARGET0x00000200);
2328 }
2329 continue;
2330 }
2331
2332 if (sc->sc_update_tasks & QLE_UPDATE_TASK_FABRIC_RELOGIN0x00000100) {
2333 TAILQ_FOREACH(port, &sc->sc_ports, ports)for((port) = ((&sc->sc_ports)->tqh_first); (port) !=
((void *)0); (port) = ((port)->ports.tqe_next))
{
2334 if (port->flags & QLE_PORT_FLAG_NEEDS_LOGIN2) {
2335 qle_fabric_plogi(sc, port);
2336 break;
2337 }
2338 }
2339
2340 if (port == NULL((void *)0))
2341 qle_update_done(sc,
2342 QLE_UPDATE_TASK_FABRIC_RELOGIN0x00000100);
2343 continue;
2344 }
2345
2346 if (sc->sc_update_tasks & QLE_UPDATE_TASK_DETACH_TARGET0x00000200) {
2347 mtx_enter(&sc->sc_port_mtx);
2348 port = TAILQ_FIRST(&sc->sc_ports_gone)((&sc->sc_ports_gone)->tqh_first);
2349 if (port != NULL((void *)0)) {
2350 sc->sc_targets[port->loopid] = NULL((void *)0);
2351 TAILQ_REMOVE(&sc->sc_ports_gone, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_gone)->tqh_last = (port)->
update.tqe_prev; *(port)->update.tqe_prev = (port)->update
.tqe_next; ((port)->update.tqe_prev) = ((void *)-1); ((port
)->update.tqe_next) = ((void *)-1); } while (0)
;
2352 TAILQ_REMOVE(&sc->sc_ports, port, ports)do { if (((port)->ports.tqe_next) != ((void *)0)) (port)->
ports.tqe_next->ports.tqe_prev = (port)->ports.tqe_prev
; else (&sc->sc_ports)->tqh_last = (port)->ports
.tqe_prev; *(port)->ports.tqe_prev = (port)->ports.tqe_next
; ((port)->ports.tqe_prev) = ((void *)-1); ((port)->ports
.tqe_next) = ((void *)-1); } while (0)
;
2353 }
2354 mtx_leave(&sc->sc_port_mtx);
2355
2356 if (port != NULL((void *)0)) {
2357 DPRINTF(QLE_D_PORT, "%s: detaching port %06x\n",
2358 DEVNAME(sc), port->portid);
2359 if (sc->sc_scsibus != NULL((void *)0))
2360 scsi_detach_target(sc->sc_scsibus,
2361 port->loopid, DETACH_FORCE0x01 |
2362 DETACH_QUIET0x02);
2363
2364 if (port->location & QLE_LOCATION_FABRIC(2 << 24))
2365 qle_fabric_plogo(sc, port);
2366
2367 free(port, M_DEVBUF2, sizeof *port);
2368 } else {
2369 DPRINTF(QLE_D_PORT, "%s: nothing to detach\n",
2370 DEVNAME(sc));
2371 qle_update_done(sc,
2372 QLE_UPDATE_TASK_DETACH_TARGET0x00000200);
2373 }
2374 continue;
2375 }
2376
2377 if (sc->sc_update_tasks & QLE_UPDATE_TASK_ATTACH_TARGET0x00000400) {
2378 mtx_enter(&sc->sc_port_mtx);
2379 port = TAILQ_FIRST(&sc->sc_ports_new)((&sc->sc_ports_new)->tqh_first);
2380 if (port != NULL((void *)0)) {
2381 TAILQ_REMOVE(&sc->sc_ports_new, port, update)do { if (((port)->update.tqe_next) != ((void *)0)) (port)->
update.tqe_next->update.tqe_prev = (port)->update.tqe_prev
; else (&sc->sc_ports_new)->tqh_last = (port)->update
.tqe_prev; *(port)->update.tqe_prev = (port)->update.tqe_next
; ((port)->update.tqe_prev) = ((void *)-1); ((port)->update
.tqe_next) = ((void *)-1); } while (0)
;
2382 TAILQ_INSERT_TAIL(&sc->sc_ports, port, ports)do { (port)->ports.tqe_next = ((void *)0); (port)->ports
.tqe_prev = (&sc->sc_ports)->tqh_last; *(&sc->
sc_ports)->tqh_last = (port); (&sc->sc_ports)->tqh_last
= &(port)->ports.tqe_next; } while (0)
;
2383 }
2384 mtx_leave(&sc->sc_port_mtx);
2385
2386 if (port != NULL((void *)0)) {
2387 if (sc->sc_scsibus != NULL((void *)0))
2388 scsi_probe_target(sc->sc_scsibus,
2389 port->loopid);
2390 } else {
2391 qle_update_done(sc,
2392 QLE_UPDATE_TASK_ATTACH_TARGET0x00000400);
2393 }
2394 continue;
2395 }
2396
2397 }
2398
2399 DPRINTF(QLE_D_PORT, "%s: done updating\n", DEVNAME(sc));
2400}
2401
2402int
2403qle_async(struct qle_softc *sc, u_int16_t info)
2404{
2405 switch (info) {
2406 case QLE_ASYNC_SYSTEM_ERROR0x8002:
2407 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET0x00000002);
2408 break;
2409
2410 case QLE_ASYNC_REQ_XFER_ERROR0x8003:
2411 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET0x00000002);
2412 break;
2413
2414 case QLE_ASYNC_RSP_XFER_ERROR0x8004:
2415 qle_update_start(sc, QLE_UPDATE_TASK_SOFTRESET0x00000002);
2416 break;
2417
2418 case QLE_ASYNC_LIP_OCCURRED0x8010:
2419 DPRINTF(QLE_D_INTR, "%s: lip occurred\n", DEVNAME(sc));
2420 break;
2421
2422 case QLE_ASYNC_LOOP_UP0x8011:
2423 DPRINTF(QLE_D_PORT, "%s: loop up\n", DEVNAME(sc));
2424 sc->sc_loop_up = 1;
2425 sc->sc_marker_required = 1;
2426 qle_update_defer(sc, QLE_UPDATE_TASK_UPDATE_TOPO0x00000004 |
2427 QLE_UPDATE_TASK_GET_PORT_LIST0x00000008);
2428 break;
2429
2430 case QLE_ASYNC_LOOP_DOWN0x8012:
2431 DPRINTF(QLE_D_PORT, "%s: loop down\n", DEVNAME(sc));
2432 sc->sc_loop_up = 0;
2433 qle_update_cancel(sc);
2434 qle_update_start(sc, QLE_UPDATE_TASK_CLEAR_ALL0x00000001);
2435 break;
2436
2437 case QLE_ASYNC_LIP_RESET0x8013:
2438 DPRINTF(QLE_D_PORT, "%s: lip reset\n", DEVNAME(sc));
2439 sc->sc_marker_required = 1;
2440 qle_update_defer(sc, QLE_UPDATE_TASK_FABRIC_RELOGIN0x00000100);
2441 break;
2442
2443 case QLE_ASYNC_PORT_DB_CHANGE0x8014:
2444 DPRINTF(QLE_D_PORT, "%s: port db changed %x\n", DEVNAME(sc),
2445 qle_read_mbox(sc, 1));
2446 qle_update_start(sc, QLE_UPDATE_TASK_GET_PORT_LIST0x00000008);
2447 break;
2448
2449 case QLE_ASYNC_CHANGE_NOTIFY0x8015:
2450 DPRINTF(QLE_D_PORT, "%s: name server change (%02x:%02x)\n",
2451 DEVNAME(sc), qle_read_mbox(sc, 1), qle_read_mbox(sc, 2));
2452 qle_update_start(sc, QLE_UPDATE_TASK_GET_PORT_LIST0x00000008);
2453 break;
2454
2455 case QLE_ASYNC_LIP_F80x8016:
2456 DPRINTF(QLE_D_INTR, "%s: lip f8\n", DEVNAME(sc));
2457 break;
2458
2459 case QLE_ASYNC_LOOP_INIT_ERROR0x8017:
2460 DPRINTF(QLE_D_PORT, "%s: loop initialization error: %x\n",
2461 DEVNAME(sc), qle_read_mbox(sc, 1));
2462 break;
2463
2464 case QLE_ASYNC_POINT_TO_POINT0x8030:
2465 DPRINTF(QLE_D_PORT, "%s: connected in point-to-point mode\n",
2466 DEVNAME(sc));
2467 break;
2468
2469 case QLE_ASYNC_ZIO_RESP_UPDATE0x8040:
2470 /* shouldn't happen, we don't do zio */
2471 break;
2472
2473 default:
2474 DPRINTF(QLE_D_INTR, "%s: unknown async %x\n", DEVNAME(sc), info);
2475 break;
2476 }
2477 return (1);
2478}
2479
2480void
2481qle_dump_stuff(struct qle_softc *sc, void *buf, int n)
2482{
2483#ifdef QLE_DEBUG
2484 u_int8_t *d = buf;
2485 int l;
2486
2487 if ((qledebug & QLE_D_IOCB) == 0)
2488 return;
2489
2490 printf("%s: stuff\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2491 for (l = 0; l < n; l++) {
2492 printf(" %2.2x", d[l]);
2493 if (l % 16 == 15)
2494 printf("\n");
2495 }
2496 if (n % 16 != 0)
2497 printf("\n");
2498#endif
2499}
2500
2501void
2502qle_dump_iocb(struct qle_softc *sc, void *buf)
2503{
2504#ifdef QLE_DEBUG
2505 u_int8_t *iocb = buf;
2506 int l;
2507 int b;
2508
2509 if ((qledebug & QLE_D_IOCB) == 0)
2510 return;
2511
2512 printf("%s: iocb:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2513 for (l = 0; l < 4; l++) {
2514 for (b = 0; b < 16; b++) {
2515 printf(" %2.2x", iocb[(l*16)+b]);
2516 }
2517 printf("\n");
2518 }
2519#endif
2520}
2521
2522void
2523qle_dump_iocb_segs(struct qle_softc *sc, void *segs, int n)
2524{
2525#ifdef QLE_DEBUG
2526 u_int8_t *buf = segs;
2527 int s, b;
2528
2529 if ((qledebug & QLE_D_IOCB) == 0)
2530 return;
2531
2532 printf("%s: iocb segs:\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2533 for (s = 0; s < n; s++) {
2534 for (b = 0; b < sizeof(struct qle_iocb_seg); b++) {
2535 printf(" %2.2x", buf[(s*(sizeof(struct qle_iocb_seg)))
2536 + b]);
2537 }
2538 printf("\n");
2539 }
2540#endif
2541}
2542
2543void
2544qle_put_marker(struct qle_softc *sc, void *buf)
2545{
2546 struct qle_iocb_marker *marker = buf;
2547
2548 marker->entry_type = QLE_IOCB_MARKER0x04;
2549 marker->entry_count = 1;
2550 marker->seqno = 0;
2551 marker->flags = 0;
2552
2553 /* could be more specific here; isp(4) isn't */
2554 marker->target = 0;
2555 marker->modifier = QLE_IOCB_MARKER_SYNC_ALL2;
2556}
2557
2558void
2559qle_sge(struct qle_iocb_seg *seg, u_int64_t addr, u_int32_t len)
2560{
2561 htolem32(&seg->seg_addr_lo, addr)(*(__uint32_t *)(&seg->seg_addr_lo) = ((__uint32_t)(addr
)))
;
2562 htolem32(&seg->seg_addr_hi, addr >> 32)(*(__uint32_t *)(&seg->seg_addr_hi) = ((__uint32_t)(addr
>> 32)))
;
2563 htolem32(&seg->seg_len, len)(*(__uint32_t *)(&seg->seg_len) = ((__uint32_t)(len)));
2564}
2565
2566void
2567qle_put_cmd(struct qle_softc *sc, void *buf, struct scsi_xfer *xs,
2568 struct qle_ccb *ccb, u_int32_t target_port)
2569{
2570 bus_dmamap_t dmap = ccb->ccb_dmamap;
2571 struct qle_iocb_req6 *req = buf;
2572 struct qle_fcp_cmnd *cmnd;
2573 u_int64_t fcp_cmnd_offset;
2574 u_int32_t fcp_dl;
2575 int seg;
2576 int target = xs->sc_link->target;
2577 int lun = xs->sc_link->lun;
2578 u_int16_t flags;
2579
2580 memset(req, 0, sizeof(*req))__builtin_memset((req), (0), (sizeof(*req)));
2581 req->entry_type = QLE_IOCB_CMD_TYPE_60x48;
2582 req->entry_count = 1;
2583
2584 req->req_handle = ccb->ccb_id;
2585 htolem16(&req->req_nport_handle, target)(*(__uint16_t *)(&req->req_nport_handle) = ((__uint16_t
)(target)))
;
2586
2587 /*
2588 * timeout is in seconds. make sure it's at least 1 if a timeout
2589 * was specified in xs
2590 */
2591 if (xs->timeout != 0)
2592 htolem16(&req->req_timeout, MAX(1, xs->timeout/1000))(*(__uint16_t *)(&req->req_timeout) = ((__uint16_t)(((
(1)>(xs->timeout/1000))?(1):(xs->timeout/1000)))))
;
2593
2594 if (xs->datalen > 0) {
2595 flags = (xs->flags & SCSI_DATA_IN0x00800) ?
2596 QLE_IOCB_CTRL_FLAG_READ0x0002 : QLE_IOCB_CTRL_FLAG_WRITE0x0001;
2597 if (dmap->dm_nsegs == 1) {
2598 qle_sge(&req->req_data_seg, dmap->dm_segs[0].ds_addr,
2599 dmap->dm_segs[0].ds_len);
2600 } else {
2601 flags |= QLE_IOCB_CTRL_FLAG_EXT_SEG0x0004;
2602 for (seg = 0; seg < dmap->dm_nsegs; seg++) {
2603 qle_sge(&ccb->ccb_segs[seg],
2604 dmap->dm_segs[seg].ds_addr,
2605 dmap->dm_segs[seg].ds_len);
2606 }
2607 qle_sge(&ccb->ccb_segs[seg++], 0, 0);
2608
2609 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (seg * sizeof
(*ccb->ccb_segs)), (0x04))
2610 QLE_DMA_MAP(sc->sc_segments), ccb->ccb_seg_offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (seg * sizeof
(*ccb->ccb_segs)), (0x04))
2611 seg * sizeof(*ccb->ccb_segs),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (seg * sizeof
(*ccb->ccb_segs)), (0x04))
2612 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_segments)->qdm_map)), (ccb->ccb_seg_offset), (seg * sizeof
(*ccb->ccb_segs)), (0x04))
;
2613
2614 qle_sge(&req->req_data_seg,
2615 QLE_DMA_DVA(sc->sc_segments)((u_int64_t)(sc->sc_segments)->qdm_map->dm_segs[0].ds_addr
)
+ ccb->ccb_seg_offset,
2616 seg * sizeof(struct qle_iocb_seg));
2617 }
2618
2619 htolem16(&req->req_data_seg_count, dmap->dm_nsegs)(*(__uint16_t *)(&req->req_data_seg_count) = ((__uint16_t
)(dmap->dm_nsegs)))
;
2620 htolem32(&req->req_data_len, xs->datalen)(*(__uint32_t *)(&req->req_data_len) = ((__uint32_t)(xs
->datalen)))
;
2621 htolem16(&req->req_ctrl_flags, flags)(*(__uint16_t *)(&req->req_ctrl_flags) = ((__uint16_t)
(flags)))
;
2622 }
2623
2624 htobem16(&req->req_fcp_lun[0], lun)(*(__uint16_t *)(&req->req_fcp_lun[0]) = (__uint16_t)(
__builtin_constant_p(lun) ? (__uint16_t)(((__uint16_t)(lun) &
0xffU) << 8 | ((__uint16_t)(lun) & 0xff00U) >>
8) : __swap16md(lun)))
;
2625 htobem16(&req->req_fcp_lun[1], lun >> 16)(*(__uint16_t *)(&req->req_fcp_lun[1]) = (__uint16_t)(
__builtin_constant_p(lun >> 16) ? (__uint16_t)(((__uint16_t
)(lun >> 16) & 0xffU) << 8 | ((__uint16_t)(lun
>> 16) & 0xff00U) >> 8) : __swap16md(lun >>
16)))
;
2626 htolem32(&req->req_target_id, target_port & 0xffffff)(*(__uint32_t *)(&req->req_target_id) = ((__uint32_t)(
target_port & 0xffffff)))
;
2627
2628 fcp_cmnd_offset = ccb->ccb_id * sizeof(*cmnd);
2629 /* set up FCP_CMND */
2630 cmnd = (struct qle_fcp_cmnd *)QLE_DMA_KVA(sc->sc_fcp_cmnds)((void *)(sc->sc_fcp_cmnds)->qdm_kva) +
2631 ccb->ccb_id;
2632
2633 memset(cmnd, 0, sizeof(*cmnd))__builtin_memset((cmnd), (0), (sizeof(*cmnd)));
2634 htobem16(&cmnd->fcp_lun[0], lun)(*(__uint16_t *)(&cmnd->fcp_lun[0]) = (__uint16_t)(__builtin_constant_p
(lun) ? (__uint16_t)(((__uint16_t)(lun) & 0xffU) <<
8 | ((__uint16_t)(lun) & 0xff00U) >> 8) : __swap16md
(lun)))
;
2635 htobem16(&cmnd->fcp_lun[1], lun >> 16)(*(__uint16_t *)(&cmnd->fcp_lun[1]) = (__uint16_t)(__builtin_constant_p
(lun >> 16) ? (__uint16_t)(((__uint16_t)(lun >> 16
) & 0xffU) << 8 | ((__uint16_t)(lun >> 16) &
0xff00U) >> 8) : __swap16md(lun >> 16)))
;
2636 /* cmnd->fcp_task_attr = TSK_SIMPLE; */
2637 /* cmnd->fcp_task_mgmt = 0; */
2638 memcpy(cmnd->fcp_cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((cmnd->fcp_cdb), (&xs->cmd), (xs->
cmdlen))
;
2639
2640 /* FCP_DL goes after the cdb */
2641 fcp_dl = htobe32(xs->datalen)(__uint32_t)(__builtin_constant_p(xs->datalen) ? (__uint32_t
)(((__uint32_t)(xs->datalen) & 0xff) << 24 | ((__uint32_t
)(xs->datalen) & 0xff00) << 8 | ((__uint32_t)(xs
->datalen) & 0xff0000) >> 8 | ((__uint32_t)(xs->
datalen) & 0xff000000) >> 24) : __swap32md(xs->datalen
))
;
2642 if (xs->cmdlen > 16) {
2643 htolem16(&req->req_fcp_cmnd_len, 12 + xs->cmdlen + 4)(*(__uint16_t *)(&req->req_fcp_cmnd_len) = ((__uint16_t
)(12 + xs->cmdlen + 4)))
;
2644 cmnd->fcp_add_cdb_len = xs->cmdlen - 16;
2645 memcpy(cmnd->fcp_cdb + xs->cmdlen, &fcp_dl, sizeof(fcp_dl))__builtin_memcpy((cmnd->fcp_cdb + xs->cmdlen), (&fcp_dl
), (sizeof(fcp_dl)))
;
2646 } else {
2647 htolem16(&req->req_fcp_cmnd_len, 12 + 16 + 4)(*(__uint16_t *)(&req->req_fcp_cmnd_len) = ((__uint16_t
)(12 + 16 + 4)))
;
2648 cmnd->fcp_add_cdb_len = 0;
2649 memcpy(cmnd->fcp_cdb + 16, &fcp_dl, sizeof(fcp_dl))__builtin_memcpy((cmnd->fcp_cdb + 16), (&fcp_dl), (sizeof
(fcp_dl)))
;
2650 }
2651 if (xs->datalen > 0)
2652 cmnd->fcp_add_cdb_len |= (xs->flags & SCSI_DATA_IN0x00800) ? 2 : 1;
2653
2654 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_fcp_cmnds)->qdm_map)), (fcp_cmnd_offset), (sizeof(*cmnd
)), (0x04))
2655 QLE_DMA_MAP(sc->sc_fcp_cmnds), fcp_cmnd_offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_fcp_cmnds)->qdm_map)), (fcp_cmnd_offset), (sizeof(*cmnd
)), (0x04))
2656 sizeof(*cmnd), BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_fcp_cmnds)->qdm_map)), (fcp_cmnd_offset), (sizeof(*cmnd
)), (0x04))
;
2657
2658 /* link req to cmnd */
2659 fcp_cmnd_offset += QLE_DMA_DVA(sc->sc_fcp_cmnds)((u_int64_t)(sc->sc_fcp_cmnds)->qdm_map->dm_segs[0].
ds_addr)
;
2660 htolem32(&req->req_fcp_cmnd_addr_lo, fcp_cmnd_offset)(*(__uint32_t *)(&req->req_fcp_cmnd_addr_lo) = ((__uint32_t
)(fcp_cmnd_offset)))
;
2661 htolem32(&req->req_fcp_cmnd_addr_hi, fcp_cmnd_offset >> 32)(*(__uint32_t *)(&req->req_fcp_cmnd_addr_hi) = ((__uint32_t
)(fcp_cmnd_offset >> 32)))
;
2662}
2663
2664int
2665qle_load_fwchunk(struct qle_softc *sc, struct qle_dmamem *mem,
2666 const u_int32_t *src)
2667{
2668 u_int32_t dest, done, total;
2669 int i;
2670
2671 dest = src[2];
2672 done = 0;
2673 total = src[3];
2674
2675 while (done < total) {
2676 u_int32_t *copy;
2677 u_int32_t words;
2678
2679 /* limit transfer size otherwise it just doesn't work */
2680 words = MIN(total - done, 1 << 10)(((total - done)<(1 << 10))?(total - done):(1 <<
10))
;
2681 copy = QLE_DMA_KVA(mem)((void *)(mem)->qdm_kva);
2682 for (i = 0; i < words; i++) {
2683 htolem32(&copy[i], src[done++])(*(__uint32_t *)(&copy[i]) = ((__uint32_t)(src[done++])));
2684 }
2685 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, words * 4,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (words * 4), (0x04))
2686 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (words * 4), (0x04))
;
2687
2688 sc->sc_mbox[0] = QLE_MBOX_LOAD_RISC_RAM0x000B;
2689 sc->sc_mbox[1] = dest;
2690 sc->sc_mbox[4] = words >> 16;
2691 sc->sc_mbox[5] = words & 0xffff;
2692 sc->sc_mbox[8] = dest >> 16;
2693 qle_mbox_putaddr(sc->sc_mbox, mem);
2694 if (qle_mbox(sc, 0x01ff)) {
2695 printf("firmware load failed\n");
2696 return (1);
2697 }
2698 bus_dmamap_sync(sc->sc_dmat, QLE_DMA_MAP(mem), 0, words * 4,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (words * 4), (0x08))
2699 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mem
)->qdm_map)), (0), (words * 4), (0x08))
;
2700
2701 dest += words;
2702 }
2703
2704 return (qle_verify_firmware(sc, src[2]));
2705}
2706
2707int
2708qle_load_firmware_chunks(struct qle_softc *sc, const u_int32_t *fw)
2709{
2710 struct qle_dmamem *mem;
2711 int res = 0;
2712
2713 mem = qle_dmamem_alloc(sc, 65536);
2714 for (;;) {
2715 if (qle_load_fwchunk(sc, mem, fw)) {
2716 res = 1;
2717 break;
2718 }
2719 if (fw[1] == 0)
2720 break;
2721 fw += fw[3];
2722 }
2723
2724 qle_dmamem_free(sc, mem);
2725 return (res);
2726}
2727
2728u_int32_t
2729qle_read_ram_word(struct qle_softc *sc, u_int32_t addr)
2730{
2731 sc->sc_mbox[0] = QLE_MBOX_READ_RISC_RAM0x000F;
2732 sc->sc_mbox[1] = addr & 0xffff;
2733 sc->sc_mbox[8] = addr >> 16;
2734 if (qle_mbox(sc, 0x0103)) {
2735 return (0);
2736 }
2737 return ((sc->sc_mbox[3] << 16) | sc->sc_mbox[2]);
2738}
2739
2740int
2741qle_verify_firmware(struct qle_softc *sc, u_int32_t addr)
2742{
2743 /*
2744 * QLE_MBOX_VERIFY_CSUM requires at least the firmware header
2745 * to be correct, otherwise it wanders all over ISP memory and
2746 * gets lost. Check that chunk address (addr+2) is right and
2747 * size (addr+3) is plausible first.
2748 */
2749 if ((qle_read_ram_word(sc, addr+2) != addr) ||
2750 (qle_read_ram_word(sc, addr+3) > 0xffff)) {
2751 return (1);
2752 }
2753
2754 sc->sc_mbox[0] = QLE_MBOX_VERIFY_CSUM0x0007;
2755 sc->sc_mbox[1] = addr >> 16;
2756 sc->sc_mbox[2] = addr;
2757 if (qle_mbox(sc, 0x0007)) {
2758 return (1);
2759 }
2760 return (0);
2761}
2762
2763int
2764qle_read_nvram(struct qle_softc *sc)
2765{
2766 u_int32_t data[sizeof(sc->sc_nvram) / 4];
2767 u_int32_t csum, tmp, v;
2768 int i, base, l;
2769
2770 switch (sc->sc_isp_gen) {
2771 case QLE_GEN_ISP24XX:
2772 base = 0x7ffe0080;
2773 break;
2774 case QLE_GEN_ISP25XX:
2775 base = 0x7ff48080;
2776 break;
2777 }
2778 base += sc->sc_port * 0x100;
2779
2780 csum = 0;
2781 for (i = 0; i < nitems(data)(sizeof((data)) / sizeof((data)[0])); i++) {
2782 data[i] = 0xffffffff;
2783 qle_write(sc, QLE_FLASH_NVRAM_ADDR0x000, base + i);
2784 for (l = 0; l < 5000; l++) {
2785 delay(10)(*delay_func)(10);
2786 tmp = qle_read(sc, QLE_FLASH_NVRAM_ADDR0x000);
2787 if (tmp & (1U << 31)) {
2788 v = qle_read(sc, QLE_FLASH_NVRAM_DATA0x004);
2789 csum += v;
2790 data[i] = letoh32(v)((__uint32_t)(v));
2791 break;
2792 }
2793 }
2794 }
2795
2796 bcopy(data, &sc->sc_nvram, sizeof(sc->sc_nvram));
2797 /* id field should be 'ISP' */
2798 if (sc->sc_nvram.id[0] != 'I' || sc->sc_nvram.id[1] != 'S' ||
2799 sc->sc_nvram.id[2] != 'P' || csum != 0) {
2800 printf("%s: nvram corrupt\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2801 return (1);
2802 }
2803 return (0);
2804}
2805
2806struct qle_dmamem *
2807qle_dmamem_alloc(struct qle_softc *sc, size_t size)
2808{
2809 struct qle_dmamem *m;
2810 int nsegs;
2811
2812 m = malloc(sizeof(*m), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2813 if (m == NULL((void *)0))
2814 return (NULL((void *)0));
2815
2816 m->qdm_size = size;
2817
2818 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->qdm_map))
2819 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->qdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->qdm_map))
!= 0)
2820 goto qdmfree;
2821
2822 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->qdm_seg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->qdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
2823 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->qdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
!= 0)
2824 goto destroy;
2825
2826 if (bus_dmamem_map(sc->sc_dmat, &m->qdm_seg, nsegs, size, &m->qdm_kva,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->qdm_seg), (nsegs), (size), (&m->qdm_kva), (0x0001
))
2827 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->qdm_seg), (nsegs), (size), (&m->qdm_kva), (0x0001
))
!= 0)
2828 goto free;
2829
2830 if (bus_dmamap_load(sc->sc_dmat, m->qdm_map, m->qdm_kva, size, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
qdm_map), (m->qdm_kva), (size), (((void *)0)), (0x0001))
2831 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
qdm_map), (m->qdm_kva), (size), (((void *)0)), (0x0001))
!= 0)
2832 goto unmap;
2833
2834 return (m);
2835
2836unmap:
2837 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
qdm_kva), (m->qdm_size))
;
2838free:
2839 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->qdm_seg), (1))
;
2840destroy:
2841 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
qdm_map))
;
2842qdmfree:
2843 free(m, M_DEVBUF2, sizeof *m);
2844
2845 return (NULL((void *)0));
2846}
2847
2848void
2849qle_dmamem_free(struct qle_softc *sc, struct qle_dmamem *m)
2850{
2851 bus_dmamap_unload(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (m->
qdm_map))
;
2852 bus_dmamem_unmap(sc->sc_dmat, m->qdm_kva, m->qdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
qdm_kva), (m->qdm_size))
;
2853 bus_dmamem_free(sc->sc_dmat, &m->qdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->qdm_seg), (1))
;
2854 bus_dmamap_destroy(sc->sc_dmat, m->qdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
qdm_map))
;
2855 free(m, M_DEVBUF2, sizeof *m);
2856}
2857
2858int
2859qle_alloc_ccbs(struct qle_softc *sc)
2860{
2861 struct qle_ccb *ccb;
2862 u_int8_t *cmd;
2863 int i;
2864
2865 SIMPLEQ_INIT(&sc->sc_ccb_free)do { (&sc->sc_ccb_free)->sqh_first = ((void *)0); (
&sc->sc_ccb_free)->sqh_last = &(&sc->sc_ccb_free
)->sqh_first; } while (0)
;
2866 mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ?
0x9 : ((0x6)))); } while (0)
;
2867 mtx_init(&sc->sc_queue_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_queue_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
2868 mtx_init(&sc->sc_port_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_port_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9)
? 0x9 : ((0x6)))); } while (0)
;
2869 mtx_init(&sc->sc_mbox_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_mbox_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9)
? 0x9 : ((0x6)))); } while (0)
;
2870
2871 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct qle_ccb),
2872 M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
2873 if (sc->sc_ccbs == NULL((void *)0)) {
2874 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2875 return (1);
2876 }
2877
2878 sc->sc_requests = qle_dmamem_alloc(sc, sc->sc_maxcmds *
2879 QLE_QUEUE_ENTRY_SIZE64);
2880 if (sc->sc_requests == NULL((void *)0)) {
2881 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2882 goto free_ccbs;
2883 }
2884 sc->sc_responses = qle_dmamem_alloc(sc, sc->sc_maxcmds *
2885 QLE_QUEUE_ENTRY_SIZE64);
2886 if (sc->sc_responses == NULL((void *)0)) {
2887 printf("%s: unable to allocate rcb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2888 goto free_req;
2889 }
2890 sc->sc_pri_requests = qle_dmamem_alloc(sc, 8 * QLE_QUEUE_ENTRY_SIZE64);
2891 if (sc->sc_pri_requests == NULL((void *)0)) {
2892 printf("%s: unable to allocate pri ccb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2893 goto free_res;
2894 }
2895 sc->sc_segments = qle_dmamem_alloc(sc, sc->sc_maxcmds * QLE_MAX_SEGS32 *
2896 sizeof(struct qle_iocb_seg));
2897 if (sc->sc_segments == NULL((void *)0)) {
2898 printf("%s: unable to allocate iocb segments\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2899 goto free_pri;
2900 }
2901
2902 sc->sc_fcp_cmnds = qle_dmamem_alloc(sc, sc->sc_maxcmds *
2903 sizeof(struct qle_fcp_cmnd));
2904 if (sc->sc_fcp_cmnds == NULL((void *)0)) {
2905 printf("%s: unable to allocate FCP_CMNDs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2906 goto free_seg;
2907 }
2908
2909 cmd = QLE_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->qdm_kva);
2910 memset(cmd, 0, QLE_QUEUE_ENTRY_SIZE * sc->sc_maxcmds)__builtin_memset((cmd), (0), (64 * sc->sc_maxcmds));
2911 for (i = 0; i < sc->sc_maxcmds; i++) {
2912 ccb = &sc->sc_ccbs[i];
2913
2914 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (32 -1), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
2915 QLE_MAX_SEGS-1, MAXPHYS, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (32 -1), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
2916 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (32 -1), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
2917 &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (32 -1), ((64 * 1024)), (0), (0x0001 | 0x0002), (&
ccb->ccb_dmamap))
!= 0) {
2918 printf("%s: unable to create dma map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2919 goto free_maps;
2920 }
2921
2922 ccb->ccb_sc = sc;
2923 ccb->ccb_id = i;
2924
2925 ccb->ccb_seg_offset = i * QLE_MAX_SEGS32 *
2926 sizeof(struct qle_iocb_seg);
2927 ccb->ccb_segs = QLE_DMA_KVA(sc->sc_segments)((void *)(sc->sc_segments)->qdm_kva) +
2928 ccb->ccb_seg_offset;
2929
2930 qle_put_ccb(sc, ccb);
2931 }
2932
2933 scsi_iopool_init(&sc->sc_iopool, sc, qle_get_ccb, qle_put_ccb);
2934 return (0);
2935
2936free_maps:
2937 while ((ccb = qle_get_ccb(sc)) != NULL((void *)0))
2938 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
2939
2940 qle_dmamem_free(sc, sc->sc_fcp_cmnds);
2941free_seg:
2942 qle_dmamem_free(sc, sc->sc_segments);
2943free_pri:
2944 qle_dmamem_free(sc, sc->sc_pri_requests);
2945free_res:
2946 qle_dmamem_free(sc, sc->sc_responses);
2947free_req:
2948 qle_dmamem_free(sc, sc->sc_requests);
2949free_ccbs:
2950 free(sc->sc_ccbs, M_DEVBUF2, 0);
2951
2952 return (1);
2953}
2954
2955void
2956qle_free_ccbs(struct qle_softc *sc)
2957{
2958 struct qle_ccb *ccb;
2959
2960 scsi_iopool_destroy(&sc->sc_iopool);
2961 while ((ccb = qle_get_ccb(sc)) != NULL((void *)0))
2962 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
2963 qle_dmamem_free(sc, sc->sc_segments);
2964 qle_dmamem_free(sc, sc->sc_responses);
2965 qle_dmamem_free(sc, sc->sc_requests);
2966 free(sc->sc_ccbs, M_DEVBUF2, 0);
2967}
2968
2969void *
2970qle_get_ccb(void *xsc)
2971{
2972 struct qle_softc *sc = xsc;
2973 struct qle_ccb *ccb;
2974
2975 mtx_enter(&sc->sc_ccb_mtx);
2976 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free)((&sc->sc_ccb_free)->sqh_first);
2977 if (ccb != NULL((void *)0)) {
2978 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link)do { if (((&sc->sc_ccb_free)->sqh_first = (&sc->
sc_ccb_free)->sqh_first->ccb_link.sqe_next) == ((void *
)0)) (&sc->sc_ccb_free)->sqh_last = &(&sc->
sc_ccb_free)->sqh_first; } while (0)
;
2979 }
2980 mtx_leave(&sc->sc_ccb_mtx);
2981 return (ccb);
2982}
2983
2984void
2985qle_put_ccb(void *xsc, void *io)
2986{
2987 struct qle_softc *sc = xsc;
2988 struct qle_ccb *ccb = io;
2989
2990 ccb->ccb_xs = NULL((void *)0);
2991 mtx_enter(&sc->sc_ccb_mtx);
2992 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link)do { if (((ccb)->ccb_link.sqe_next = (&sc->sc_ccb_free
)->sqh_first) == ((void *)0)) (&sc->sc_ccb_free)->
sqh_last = &(ccb)->ccb_link.sqe_next; (&sc->sc_ccb_free
)->sqh_first = (ccb); } while (0)
;
2993 mtx_leave(&sc->sc_ccb_mtx);
2994}