Bug Summary

File:dev/ic/mpi.c
Warning:line 1559, column 16
Access to field 'sg_hdr' results in a dereference of a null pointer (loaded from variable 'sge')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name mpi.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/ic/mpi.c
1/* $OpenBSD: mpi.c,v 1.223 2020/09/22 19:32:52 krw Exp $ */
2
3/*
4 * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5 * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include "bio.h"
21
22#include <sys/param.h>
23#include <sys/systm.h>
24#include <sys/buf.h>
25#include <sys/device.h>
26#include <sys/malloc.h>
27#include <sys/kernel.h>
28#include <sys/mutex.h>
29#include <sys/rwlock.h>
30#include <sys/sensors.h>
31#include <sys/dkio.h>
32#include <sys/task.h>
33
34#include <machine/bus.h>
35
36#include <scsi/scsi_all.h>
37#include <scsi/scsiconf.h>
38
39#include <dev/biovar.h>
40#include <dev/ic/mpireg.h>
41#include <dev/ic/mpivar.h>
42
43#ifdef MPI_DEBUG
44uint32_t mpi_debug = 0
45/* | MPI_D_CMD */
46/* | MPI_D_INTR */
47/* | MPI_D_MISC */
48/* | MPI_D_DMA */
49/* | MPI_D_IOCTL */
50/* | MPI_D_RW */
51/* | MPI_D_MEM */
52/* | MPI_D_CCB */
53/* | MPI_D_PPR */
54/* | MPI_D_RAID */
55/* | MPI_D_EVT */
56 ;
57#endif
58
59struct cfdriver mpi_cd = {
60 NULL((void *)0),
61 "mpi",
62 DV_DULL
63};
64
65void mpi_scsi_cmd(struct scsi_xfer *);
66void mpi_scsi_cmd_done(struct mpi_ccb *);
67int mpi_scsi_probe(struct scsi_link *);
68int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
69 int);
70
71struct scsi_adapter mpi_switch = {
72 mpi_scsi_cmd, NULL((void *)0), mpi_scsi_probe, NULL((void *)0), mpi_scsi_ioctl
73};
74
75struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t);
76void mpi_dmamem_free(struct mpi_softc *,
77 struct mpi_dmamem *);
78int mpi_alloc_ccbs(struct mpi_softc *);
79void *mpi_get_ccb(void *);
80void mpi_put_ccb(void *, void *);
81int mpi_alloc_replies(struct mpi_softc *);
82void mpi_push_replies(struct mpi_softc *);
83void mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
84
85void mpi_start(struct mpi_softc *, struct mpi_ccb *);
86int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
87void mpi_poll_done(struct mpi_ccb *);
88void mpi_reply(struct mpi_softc *, u_int32_t);
89
90void mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
91void mpi_wait_done(struct mpi_ccb *);
92
93int mpi_cfg_spi_port(struct mpi_softc *);
94void mpi_squash_ppr(struct mpi_softc *);
95void mpi_run_ppr(struct mpi_softc *);
96int mpi_ppr(struct mpi_softc *, struct scsi_link *,
97 struct mpi_cfg_raid_physdisk *, int, int, int);
98int mpi_inq(struct mpi_softc *, u_int16_t, int);
99
100int mpi_cfg_sas(struct mpi_softc *);
101int mpi_cfg_fc(struct mpi_softc *);
102
103void mpi_timeout_xs(void *);
104int mpi_load_xs(struct mpi_ccb *);
105
106u_int32_t mpi_read(struct mpi_softc *, bus_size_t);
107void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
108int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
109 u_int32_t);
110int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
111 u_int32_t);
112
113int mpi_init(struct mpi_softc *);
114int mpi_reset_soft(struct mpi_softc *);
115int mpi_reset_hard(struct mpi_softc *);
116
117int mpi_handshake_send(struct mpi_softc *, void *, size_t);
118int mpi_handshake_recv_dword(struct mpi_softc *,
119 u_int32_t *);
120int mpi_handshake_recv(struct mpi_softc *, void *, size_t);
121
122void mpi_empty_done(struct mpi_ccb *);
123
124int mpi_iocinit(struct mpi_softc *);
125int mpi_iocfacts(struct mpi_softc *);
126int mpi_portfacts(struct mpi_softc *);
127int mpi_portenable(struct mpi_softc *);
128int mpi_cfg_coalescing(struct mpi_softc *);
129void mpi_get_raid(struct mpi_softc *);
130int mpi_fwupload(struct mpi_softc *);
131int mpi_manufacturing(struct mpi_softc *);
132int mpi_scsi_probe_virtual(struct scsi_link *);
133
134int mpi_eventnotify(struct mpi_softc *);
135void mpi_eventnotify_done(struct mpi_ccb *);
136void mpi_eventnotify_free(struct mpi_softc *,
137 struct mpi_rcb *);
138void mpi_eventack(void *, void *);
139void mpi_eventack_done(struct mpi_ccb *);
140int mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
141void mpi_evt_sas_detach(void *, void *);
142void mpi_evt_sas_detach_done(struct mpi_ccb *);
143void mpi_fc_rescan(void *);
144
145int mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
146 u_int8_t, u_int32_t, int, void *);
147int mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
148 void *, int, void *, size_t);
149
150int mpi_ioctl_cache(struct scsi_link *, u_long,
151 struct dk_cache *);
152
153#if NBIO1 > 0
154int mpi_bio_get_pg0_raid(struct mpi_softc *, int);
155int mpi_ioctl(struct device *, u_long, caddr_t);
156int mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
157int mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
158int mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
159int mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
160#ifndef SMALL_KERNEL
161int mpi_create_sensors(struct mpi_softc *);
162void mpi_refresh_sensors(void *);
163#endif /* SMALL_KERNEL */
164#endif /* NBIO > 0 */
165
166#define DEVNAME(s)((s)->sc_dev.dv_xname) ((s)->sc_dev.dv_xname)
167
168#define dwordsof(s)(sizeof(s) / sizeof(u_int32_t)) (sizeof(s) / sizeof(u_int32_t))
169
170#define mpi_read_db(s)mpi_read((s), 0x00) mpi_read((s), MPI_DOORBELL0x00)
171#define mpi_write_db(s, v)mpi_write((s), 0x00, (v)) mpi_write((s), MPI_DOORBELL0x00, (v))
172#define mpi_read_intr(s)(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x30))) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x30)))
173 MPI_INTR_STATUS)(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x30)))
174#define mpi_write_intr(s, v)mpi_write((s), 0x30, (v)) mpi_write((s), MPI_INTR_STATUS0x30, (v))
175#define mpi_pop_reply(s)(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x44))) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x44)))
176 MPI_REPLY_QUEUE)(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x44)))
177#define mpi_push_reply_db(s, v)(((s)->sc_iot)->write_4(((s)->sc_ioh), (0x44), ((v))
))
bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \(((s)->sc_iot)->write_4(((s)->sc_ioh), (0x44), ((v))
))
178 MPI_REPLY_QUEUE, (v))(((s)->sc_iot)->write_4(((s)->sc_ioh), (0x44), ((v))
))
179
180#define mpi_wait_db_int(s)mpi_wait_ne((s), 0x30, (1<<0), 0) mpi_wait_ne((s), MPI_INTR_STATUS0x30, \
181 MPI_INTR_STATUS_DOORBELL(1<<0), 0)
182#define mpi_wait_db_ack(s)mpi_wait_eq((s), 0x30, (1<<31), 0) mpi_wait_eq((s), MPI_INTR_STATUS0x30, \
183 MPI_INTR_STATUS_IOCDOORBELL(1<<31), 0)
184
185#define MPI_PG_EXTENDED(1<<0) (1<<0)
186#define MPI_PG_POLL(1<<1) (1<<1)
187#define MPI_PG_FMT"\020" "\002POLL" "\001EXTENDED" "\020" "\002POLL" "\001EXTENDED"
188
189#define mpi_cfg_header(_s, _t, _n, _a, _h)mpi_req_cfg_header((_s), (_t), (_n), (_a), (1<<1), (_h)
)
\
190 mpi_req_cfg_header((_s), (_t), (_n), (_a), \
191 MPI_PG_POLL(1<<1), (_h))
192#define mpi_ecfg_header(_s, _t, _n, _a, _h)mpi_req_cfg_header((_s), (_t), (_n), (_a), (1<<1)|(1<<
0), (_h))
\
193 mpi_req_cfg_header((_s), (_t), (_n), (_a), \
194 MPI_PG_POLL(1<<1)|MPI_PG_EXTENDED(1<<0), (_h))
195
196#define mpi_cfg_page(_s, _a, _h, _r, _p, _l)mpi_req_cfg_page((_s), (_a), (1<<1), (_h), (_r), (_p), (
_l))
\
197 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL(1<<1), \
198 (_h), (_r), (_p), (_l))
199#define mpi_ecfg_page(_s, _a, _h, _r, _p, _l)mpi_req_cfg_page((_s), (_a), (1<<1)|(1<<0), (_h),
(_r), (_p), (_l))
\
200 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL(1<<1)|MPI_PG_EXTENDED(1<<0), \
201 (_h), (_r), (_p), (_l))
202
203static inline void
204mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva)
205{
206 htolem32(&sge->sg_addr_lo, dva)(*(__uint32_t *)(&sge->sg_addr_lo) = ((__uint32_t)(dva
)))
;
207 htolem32(&sge->sg_addr_hi, dva >> 32)(*(__uint32_t *)(&sge->sg_addr_hi) = ((__uint32_t)(dva
>> 32)))
;
208}
209
210int
211mpi_attach(struct mpi_softc *sc)
212{
213 struct scsibus_attach_args saa;
214 struct mpi_ccb *ccb;
215
216 printf("\n");
217
218 rw_init(&sc->sc_lock, "mpi_lock")_rw_init_flags(&sc->sc_lock, "mpi_lock", 0, ((void *)0
))
;
219 task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc);
220
221 /* disable interrupts */
222 mpi_write(sc, MPI_INTR_MASK0x34,
223 MPI_INTR_MASK_REPLY(1<<3) | MPI_INTR_MASK_DOORBELL(1<<0));
224
225 if (mpi_init(sc) != 0) {
226 printf("%s: unable to initialise\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
227 return (1);
228 }
229
230 if (mpi_iocfacts(sc) != 0) {
231 printf("%s: unable to get iocfacts\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
232 return (1);
233 }
234
235 if (mpi_alloc_ccbs(sc) != 0) {
236 /* error already printed */
237 return (1);
238 }
239
240 if (mpi_alloc_replies(sc) != 0) {
241 printf("%s: unable to allocate reply space\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
242 goto free_ccbs;
243 }
244
245 if (mpi_iocinit(sc) != 0) {
246 printf("%s: unable to send iocinit\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
247 goto free_ccbs;
248 }
249
250 /* spin until we're operational */
251 if (mpi_wait_eq(sc, MPI_DOORBELL0x00, MPI_DOORBELL_STATE(0xf<<28),
252 MPI_DOORBELL_STATE_OPER(0x2<<28)) != 0) {
253 printf("%s: state: 0x%08x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
254 mpi_read_db(sc)mpi_read((sc), 0x00) & MPI_DOORBELL_STATE(0xf<<28));
255 printf("%s: operational state timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
256 goto free_ccbs;
257 }
258
259 mpi_push_replies(sc);
260
261 if (mpi_portfacts(sc) != 0) {
262 printf("%s: unable to get portfacts\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
263 goto free_replies;
264 }
265
266 if (mpi_cfg_coalescing(sc) != 0) {
267 printf("%s: unable to configure coalescing\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
268 goto free_replies;
269 }
270
271 switch (sc->sc_porttype) {
272 case MPI_PORTFACTS_PORTTYPE_SAS0x30:
273 SIMPLEQ_INIT(&sc->sc_evt_scan_queue)do { (&sc->sc_evt_scan_queue)->sqh_first = ((void *
)0); (&sc->sc_evt_scan_queue)->sqh_last = &(&
sc->sc_evt_scan_queue)->sqh_first; } while (0)
;
274 mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_evt_scan_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
275 scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
276 mpi_evt_sas_detach, sc);
277 /* FALLTHROUGH */
278 case MPI_PORTFACTS_PORTTYPE_FC0x10:
279 if (mpi_eventnotify(sc) != 0) {
280 printf("%s: unable to enable events\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
281 goto free_replies;
282 }
283 break;
284 }
285
286 if (mpi_portenable(sc) != 0) {
287 printf("%s: unable to enable port\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
288 goto free_replies;
289 }
290
291 if (mpi_fwupload(sc) != 0) {
292 printf("%s: unable to upload firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
293 goto free_replies;
294 }
295
296 if (mpi_manufacturing(sc) != 0) {
297 printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
298 goto free_replies;
299 }
300
301 switch (sc->sc_porttype) {
302 case MPI_PORTFACTS_PORTTYPE_SCSI0x01:
303 if (mpi_cfg_spi_port(sc) != 0) {
304 printf("%s: unable to configure spi\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
305 goto free_replies;
306 }
307 mpi_squash_ppr(sc);
308 break;
309 case MPI_PORTFACTS_PORTTYPE_SAS0x30:
310 if (mpi_cfg_sas(sc) != 0) {
311 printf("%s: unable to configure sas\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
312 goto free_replies;
313 }
314 break;
315 case MPI_PORTFACTS_PORTTYPE_FC0x10:
316 if (mpi_cfg_fc(sc) != 0) {
317 printf("%s: unable to configure fc\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
318 goto free_replies;
319 }
320 break;
321 }
322
323 /* get raid pages */
324 mpi_get_raid(sc);
325#if NBIO1 > 0
326 if (sc->sc_flags & MPI_F_RAID(1<<1)) {
327 if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
328 panic("%s: controller registration failed",
329 DEVNAME(sc)((sc)->sc_dev.dv_xname));
330 else {
331 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,mpi_req_cfg_header((sc), ((0x01)), (2), (0), (1<<1), (&
sc->sc_cfg_hdr))
332 2, 0, &sc->sc_cfg_hdr)mpi_req_cfg_header((sc), ((0x01)), (2), (0), (1<<1), (&
sc->sc_cfg_hdr))
!= 0) {
333 panic("%s: can't get IOC page 2 hdr",
334 DEVNAME(sc)((sc)->sc_dev.dv_xname));
335 }
336
337 sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length,
338 4, M_TEMP127, M_WAITOK0x0001 | M_CANFAIL0x0004);
339 if (sc->sc_vol_page == NULL((void *)0)) {
340 panic("%s: can't get memory for IOC page 2, "
341 "bio disabled", DEVNAME(sc)((sc)->sc_dev.dv_xname));
342 }
343
344 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
345 sc->sc_vol_page,mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
346 sc->sc_cfg_hdr.page_length * 4)mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
!= 0) {
347 panic("%s: can't get IOC page 2", DEVNAME(sc)((sc)->sc_dev.dv_xname));
348 }
349
350 sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
351 (sc->sc_vol_page + 1);
352
353 sc->sc_ioctl = mpi_ioctl;
354 }
355 }
356#endif /* NBIO > 0 */
357
358 saa.saa_adapter = &mpi_switch;
359 saa.saa_adapter_softc = sc;
360 saa.saa_adapter_target = sc->sc_target;
361 saa.saa_adapter_buswidth = sc->sc_buswidth;
362 saa.saa_luns = 8;
363 saa.saa_openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16)(((sc->sc_maxcmds / sc->sc_buswidth)>(16))?(sc->sc_maxcmds
/ sc->sc_buswidth):(16))
;
364 saa.saa_pool = &sc->sc_iopool;
365 saa.saa_wwpn = sc->sc_port_wwn;
366 saa.saa_wwnn = sc->sc_node_wwn;
367 saa.saa_quirks = saa.saa_flags = 0;
368
369 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
370 &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
371
372 /* do domain validation */
373 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI0x01)
374 mpi_run_ppr(sc);
375
376 /* enable interrupts */
377 mpi_write(sc, MPI_INTR_MASK0x34, MPI_INTR_MASK_DOORBELL(1<<0));
378
379#if NBIO1 > 0
380#ifndef SMALL_KERNEL
381 mpi_create_sensors(sc);
382#endif /* SMALL_KERNEL */
383#endif /* NBIO > 0 */
384
385 return (0);
386
387free_replies:
388 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_repq * 80), (0x02)
)
389 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_repq * 80), (0x02)
)
;
390 mpi_dmamem_free(sc, sc->sc_replies);
391free_ccbs:
392 while ((ccb = mpi_get_ccb(sc)) != NULL((void *)0))
393 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
394 mpi_dmamem_free(sc, sc->sc_requests);
395 free(sc->sc_ccbs, M_DEVBUF2, 0);
396
397 return(1);
398}
399
400int
401mpi_cfg_spi_port(struct mpi_softc *sc)
402{
403 struct mpi_cfg_hdr hdr;
404 struct mpi_cfg_spi_port_pg1 port;
405
406 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,mpi_req_cfg_header((sc), ((0x03)), (1), (0x0), (1<<1), (
&hdr))
407 &hdr)mpi_req_cfg_header((sc), ((0x03)), (1), (0x0), (1<<1), (
&hdr))
!= 0)
408 return (1);
409
410 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port))mpi_req_cfg_page((sc), (0x0), (1<<1), (&hdr), (1), (
&port), (sizeof(port)))
!= 0)
411 return (1);
412
413 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
414 DNPRINTF(MPI_D_MISC, "%s: port_scsi_id: %d port_resp_ids 0x%04x\n",
415 DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
416 DNPRINTF(MPI_D_MISC, "%s: on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
417 letoh32(port.port_scsi_id));
418 DNPRINTF(MPI_D_MISC, "%s: target_config: 0x%02x id_config: 0x%04x\n",
419 DEVNAME(sc), port.target_config, letoh16(port.id_config));
420
421 if (port.port_scsi_id == sc->sc_target &&
422 port.port_resp_ids == htole16(1 << sc->sc_target)((__uint16_t)(1 << sc->sc_target)) &&
423 port.on_bus_timer_value != htole32(0x0)((__uint32_t)(0x0)))
424 return (0);
425
426 DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
427 sc->sc_target);
428 port.port_scsi_id = sc->sc_target;
429 port.port_resp_ids = htole16(1 << sc->sc_target)((__uint16_t)(1 << sc->sc_target));
430 port.on_bus_timer_value = htole32(0x07000000)((__uint32_t)(0x07000000)); /* XXX magic */
431
432 if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port))mpi_req_cfg_page((sc), (0x0), (1<<1), (&hdr), (0), (
&port), (sizeof(port)))
!= 0) {
433 printf("%s: unable to configure port scsi id\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
434 return (1);
435 }
436
437 return (0);
438}
439
440void
441mpi_squash_ppr(struct mpi_softc *sc)
442{
443 struct mpi_cfg_hdr hdr;
444 struct mpi_cfg_spi_dev_pg1 page;
445 int i;
446
447 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
448
449 for (i = 0; i < sc->sc_buswidth; i++) {
450 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,mpi_req_cfg_header((sc), ((0x04)), (1), (i), (1<<1), (&
hdr))
451 1, i, &hdr)mpi_req_cfg_header((sc), ((0x04)), (1), (i), (1<<1), (&
hdr))
!= 0)
452 return;
453
454 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page))mpi_req_cfg_page((sc), (i), (1<<1), (&hdr), (1), (&
page), (sizeof(page)))
!= 0)
455 return;
456
457 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x "
458 "req_offset: 0x%02x req_period: 0x%02x "
459 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
460 page.req_params1, page.req_offset, page.req_period,
461 page.req_params2, letoh32(page.configuration));
462
463 page.req_params1 = 0x0;
464 page.req_offset = 0x0;
465 page.req_period = 0x0;
466 page.req_params2 = 0x0;
467 page.configuration = htole32(0x0)((__uint32_t)(0x0));
468
469 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page))mpi_req_cfg_page((sc), (i), (1<<1), (&hdr), (0), (&
page), (sizeof(page)))
!= 0)
470 return;
471 }
472}
473
474void
475mpi_run_ppr(struct mpi_softc *sc)
476{
477 struct mpi_cfg_hdr hdr;
478 struct mpi_cfg_spi_port_pg0 port_pg;
479 struct mpi_cfg_ioc_pg3 *physdisk_pg;
480 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk;
481 size_t pagelen;
482 struct scsi_link *link;
483 int i, tries;
484
485 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,mpi_req_cfg_header((sc), ((0x03)), (0), (0x0), (1<<1), (
&hdr))
486 &hdr)mpi_req_cfg_header((sc), ((0x03)), (0), (0x0), (1<<1), (
&hdr))
!= 0) {
487 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
488 DEVNAME(sc));
489 return;
490 }
491
492 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg))mpi_req_cfg_page((sc), (0x0), (1<<1), (&hdr), (1), (
&port_pg), (sizeof(port_pg)))
!= 0) {
493 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
494 DEVNAME(sc));
495 return;
496 }
497
498 for (i = 0; i < sc->sc_buswidth; i++) {
499 link = scsi_get_link(sc->sc_scsibus, i, 0);
500 if (link == NULL((void *)0))
501 continue;
502
503 /* do not ppr volumes */
504 if (link->flags & SDEV_VIRTUAL0x0800)
505 continue;
506
507 tries = 0;
508 while (mpi_ppr(sc, link, NULL((void *)0), port_pg.min_period,
509 port_pg.max_offset, tries) == EAGAIN35)
510 tries++;
511 }
512
513 if ((sc->sc_flags & MPI_F_RAID(1<<1)) == 0)
514 return;
515
516 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,mpi_req_cfg_header((sc), ((0x01)), (3), (0x0), (1<<1), (
&hdr))
517 &hdr)mpi_req_cfg_header((sc), ((0x01)), (3), (0x0), (1<<1), (
&hdr))
!= 0) {
518 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
519 "fetch ioc pg 3 header\n", DEVNAME(sc));
520 return;
521 }
522
523 pagelen = hdr.page_length * 4; /* dwords to bytes */
524 physdisk_pg = malloc(pagelen, M_TEMP127, M_WAITOK0x0001|M_CANFAIL0x0004);
525 if (physdisk_pg == NULL((void *)0)) {
526 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
527 "allocate ioc pg 3\n", DEVNAME(sc));
528 return;
529 }
530 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
531
532 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen)mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (physdisk_pg
), (pagelen))
!= 0) {
533 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
534 "fetch ioc page 3\n", DEVNAME(sc));
535 goto out;
536 }
537
538 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc),
539 physdisk_pg->no_phys_disks);
540
541 for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
542 physdisk = &physdisk_list[i];
543
544 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d "
545 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
546 physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
547 physdisk->phys_disk_num);
548
549 if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
550 continue;
551
552 tries = 0;
553 while (mpi_ppr(sc, NULL((void *)0), physdisk, port_pg.min_period,
554 port_pg.max_offset, tries) == EAGAIN35)
555 tries++;
556 }
557
558out:
559 free(physdisk_pg, M_TEMP127, pagelen);
560}
561
562int
563mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
564 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
565{
566 struct mpi_cfg_hdr hdr0, hdr1;
567 struct mpi_cfg_spi_dev_pg0 pg0;
568 struct mpi_cfg_spi_dev_pg1 pg1;
569 u_int32_t address;
570 int id;
571 int raid = 0;
572
573 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
574 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
575 link->quirks);
576
577 if (try >= 3)
578 return (EIO5);
579
580 if (physdisk == NULL((void *)0)) {
581 if ((link->inqdata.device & SID_TYPE0x1f) == T_PROCESSOR0x03)
582 return (EIO5);
583
584 address = link->target;
585 id = link->target;
586 } else {
587 raid = 1;
588 address = (physdisk->phys_disk_bus << 8) |
589 (physdisk->phys_disk_id);
590 id = physdisk->phys_disk_num;
591 }
592
593 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,mpi_req_cfg_header((sc), ((0x04)), (0), (address), (1<<
1), (&hdr0))
594 address, &hdr0)mpi_req_cfg_header((sc), ((0x04)), (0), (address), (1<<
1), (&hdr0))
!= 0) {
595 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
596 DEVNAME(sc));
597 return (EIO5);
598 }
599
600 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,mpi_req_cfg_header((sc), ((0x04)), (1), (address), (1<<
1), (&hdr1))
601 address, &hdr1)mpi_req_cfg_header((sc), ((0x04)), (1), (address), (1<<
1), (&hdr1))
!= 0) {
602 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
603 DEVNAME(sc));
604 return (EIO5);
605 }
606
607#ifdef MPI_DEBUG
608 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr0), (
1), (&pg0), (sizeof(pg0)))
!= 0) {
609 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
610 DEVNAME(sc));
611 return (EIO5);
612 }
613
614 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
615 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
616 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
617 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
618#endif
619
620 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr1), (
1), (&pg1), (sizeof(pg1)))
!= 0) {
621 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
622 DEVNAME(sc));
623 return (EIO5);
624 }
625
626 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
627 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
628 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
629 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
630
631 pg1.req_params1 = 0;
632 pg1.req_offset = offset;
633 pg1.req_period = period;
634 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH(1<<5);
635
636 if (raid || !(link->quirks & SDEV_NOSYNC0x0002)) {
637 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE(1<<5);
638
639 switch (try) {
640 case 0: /* U320 */
641 break;
642 case 1: /* U160 */
643 pg1.req_period = 0x09;
644 break;
645 case 2: /* U80 */
646 pg1.req_period = 0x0a;
647 break;
648 }
649
650 if (pg1.req_period < 0x09) {
651 /* Ultra320: enable QAS & PACKETIZED */
652 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS(1<<2) |
653 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED(1<<0);
654 }
655 if (pg1.req_period < 0xa) {
656 /* >= Ultra160: enable dual xfers */
657 pg1.req_params1 |=
658 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS(1<<1);
659 }
660 }
661
662 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
663 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
664 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
665 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
666
667 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr1), (
0), (&pg1), (sizeof(pg1)))
!= 0) {
668 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
669 DEVNAME(sc));
670 return (EIO5);
671 }
672
673 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr1), (
1), (&pg1), (sizeof(pg1)))
!= 0) {
674 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
675 DEVNAME(sc));
676 return (EIO5);
677 }
678
679 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
680 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
681 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
682 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
683
684 if (mpi_inq(sc, id, raid) != 0) {
685 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
686 "target %d\n", DEVNAME(sc), link->target);
687 return (EIO5);
688 }
689
690 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr0), (
1), (&pg0), (sizeof(pg0)))
!= 0) {
691 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
692 "inquiry\n", DEVNAME(sc));
693 return (EIO5);
694 }
695
696 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
697 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
698 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
699 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
700
701 if (!(lemtoh32(&pg0.information)((__uint32_t)(*(__uint32_t *)(&pg0.information))) & 0x07) && (try == 0)) {
702 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
703 DEVNAME(sc));
704 return (EAGAIN35);
705 }
706
707 if ((((lemtoh32(&pg0.information)((__uint32_t)(*(__uint32_t *)(&pg0.information))) >> 8) & 0xff) > 0x09) && (try == 1)) {
708 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
709 DEVNAME(sc));
710 return (EAGAIN35);
711 }
712
713 if (lemtoh32(&pg0.information)((__uint32_t)(*(__uint32_t *)(&pg0.information))) & 0x0e) {
714 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
715 DEVNAME(sc), lemtoh32(&pg0.information));
716 return (EAGAIN35);
717 }
718
719 switch(pg0.neg_period) {
720 case 0x08:
721 period = 160;
722 break;
723 case 0x09:
724 period = 80;
725 break;
726 case 0x0a:
727 period = 40;
728 break;
729 case 0x0b:
730 period = 20;
731 break;
732 case 0x0c:
733 period = 10;
734 break;
735 default:
736 period = 0;
737 break;
738 }
739
740 printf("%s: %s %d %s at %dMHz width %dbit offset %d "
741 "QAS %d DT %d IU %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), raid ? "phys disk" : "target",
742 id, period ? "Sync" : "Async", period,
743 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE(1<<5)) ? 16 : 8,
744 pg0.neg_offset,
745 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS(1<<2)) ? 1 : 0,
746 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS(1<<1)) ? 1 : 0,
747 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED(1<<0)) ? 1 : 0);
748
749 return (0);
750}
751
752int
753mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
754{
755 struct mpi_ccb *ccb;
756 struct scsi_inquiry inq;
757 struct inq_bundle {
758 struct mpi_msg_scsi_io io;
759 struct mpi_sge sge;
760 struct scsi_inquiry_data inqbuf;
761 struct scsi_sense_data sense;
762 } __packed__attribute__((__packed__)) *bundle;
763 struct mpi_msg_scsi_io *io;
764 struct mpi_sge *sge;
765
766 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
767
768 memset(&inq, 0, sizeof(inq))__builtin_memset((&inq), (0), (sizeof(inq)));
769 inq.opcode = INQUIRY0x12;
770 _lto2b(sizeof(struct scsi_inquiry_data), inq.length);
771
772 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
773 if (ccb == NULL((void *)0))
774 return (1);
775
776 ccb->ccb_done = mpi_empty_done;
777
778 bundle = ccb->ccb_cmd;
779 io = &bundle->io;
780 sge = &bundle->sge;
781
782 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH(0x16) :
783 MPI_FUNCTION_SCSI_IO_REQUEST(0x00);
784 /*
785 * bus is always 0
786 * io->bus = htole16(sc->sc_bus);
787 */
788 io->target_id = target;
789
790 io->cdb_length = sizeof(inq);
791 io->sense_buf_len = sizeof(struct scsi_sense_data);
792 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64(1<<0);
793
794 /*
795 * always lun 0
796 * io->lun[0] = htobe16(link->lun);
797 */
798
799 io->direction = MPI_SCSIIO_DIR_READ(0x2);
800 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT(0x7);
801
802 memcpy(io->cdb, &inq, sizeof(inq))__builtin_memcpy((io->cdb), (&inq), (sizeof(inq)));
803
804 htolem32(&io->data_length, sizeof(struct scsi_inquiry_data))(*(__uint32_t *)(&io->data_length) = ((__uint32_t)(sizeof
(struct scsi_inquiry_data))))
;
805
806 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +(*(__uint32_t *)(&io->sense_buf_low_addr) = ((__uint32_t
)(ccb->ccb_cmd_dva + __builtin_offsetof(struct inq_bundle,
sense))))
807 offsetof(struct inq_bundle, sense))(*(__uint32_t *)(&io->sense_buf_low_addr) = ((__uint32_t
)(ccb->ccb_cmd_dva + __builtin_offsetof(struct inq_bundle,
sense))))
;
808
809 htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |(*(__uint32_t *)(&sge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<25) | (0x1<<31) | (0x1<<30) | (
0x1<<24) | (u_int32_t)sizeof(inq))))
810 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |(*(__uint32_t *)(&sge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<25) | (0x1<<31) | (0x1<<30) | (
0x1<<24) | (u_int32_t)sizeof(inq))))
811 (u_int32_t)sizeof(inq))(*(__uint32_t *)(&sge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<25) | (0x1<<31) | (0x1<<30) | (
0x1<<24) | (u_int32_t)sizeof(inq))))
;
812
813 mpi_dvatosge(sge, ccb->ccb_cmd_dva +
814 offsetof(struct inq_bundle, inqbuf)__builtin_offsetof(struct inq_bundle, inqbuf));
815
816 if (mpi_poll(sc, ccb, 5000) != 0)
817 return (1);
818
819 if (ccb->ccb_rcb != NULL((void *)0))
820 mpi_push_reply(sc, ccb->ccb_rcb);
821
822 scsi_io_put(&sc->sc_iopool, ccb);
823
824 return (0);
825}
826
827int
828mpi_cfg_sas(struct mpi_softc *sc)
829{
830 struct mpi_ecfg_hdr ehdr;
831 struct mpi_cfg_sas_iou_pg1 *pg;
832 size_t pagelen;
833 int rv = 0;
834
835 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,mpi_req_cfg_header((sc), ((0x10)), (1), (0), (1<<1)|(1<<
0), (&ehdr))
836 &ehdr)mpi_req_cfg_header((sc), ((0x10)), (1), (0), (1<<1)|(1<<
0), (&ehdr))
!= 0)
837 return (0);
838
839 pagelen = lemtoh16(&ehdr.ext_page_length)((__uint16_t)(*(__uint16_t *)(&ehdr.ext_page_length))) * 4;
840 pg = malloc(pagelen, M_TEMP127, M_NOWAIT0x0002 | M_ZERO0x0008);
841 if (pg == NULL((void *)0))
842 return (ENOMEM12);
843
844 if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen)mpi_req_cfg_page((sc), (0), (1<<1)|(1<<0), (&
ehdr), (1), (pg), (pagelen))
!= 0)
845 goto out;
846
847 if (pg->max_sata_q_depth != 32) {
848 pg->max_sata_q_depth = 32;
849
850 if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen)mpi_req_cfg_page((sc), (0), (1<<1)|(1<<0), (&
ehdr), (0), (pg), (pagelen))
!= 0)
851 goto out;
852 }
853
854out:
855 free(pg, M_TEMP127, pagelen);
856 return (rv);
857}
858
859int
860mpi_cfg_fc(struct mpi_softc *sc)
861{
862 struct mpi_cfg_hdr hdr;
863 struct mpi_cfg_fc_port_pg0 pg0;
864 struct mpi_cfg_fc_port_pg1 pg1;
865
866 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,mpi_req_cfg_header((sc), ((0x05)), (0), (0), (1<<1), (&
hdr))
867 &hdr)mpi_req_cfg_header((sc), ((0x05)), (0), (0), (1<<1), (&
hdr))
!= 0) {
868 printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
869 return (1);
870 }
871
872 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (&
pg0), (sizeof(pg0)))
!= 0) {
873 printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
874 return (1);
875 }
876
877 sc->sc_port_wwn = letoh64(pg0.wwpn)((__uint64_t)(pg0.wwpn));
878 sc->sc_node_wwn = letoh64(pg0.wwnn)((__uint64_t)(pg0.wwnn));
879
880 /* configure port config more to our liking */
881 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,mpi_req_cfg_header((sc), ((0x05)), (1), (0), (1<<1), (&
hdr))
882 &hdr)mpi_req_cfg_header((sc), ((0x05)), (1), (0), (1<<1), (&
hdr))
!= 0) {
883 printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
884 return (1);
885 }
886
887 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (&
pg1), (sizeof(pg1)))
!= 0) {
888 printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
889 return (1);
890 }
891
892 SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |((pg1.flags) |= (((__uint32_t)((1<<26) | (1<<24))
)))
893 MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN))((pg1.flags) |= (((__uint32_t)((1<<26) | (1<<24))
)))
;
894
895 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (0), (&
pg1), (sizeof(pg1)))
!= 0) {
896 printf("%s: unable to set FC port page 1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
897 return (1);
898 }
899
900 return (0);
901}
902
903void
904mpi_detach(struct mpi_softc *sc)
905{
906
907}
908
909int
910mpi_intr(void *arg)
911{
912 struct mpi_softc *sc = arg;
913 u_int32_t reg;
914 int rv = 0;
915
916 if ((mpi_read_intr(sc)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x30))) & MPI_INTR_STATUS_REPLY(1<<3)) == 0)
917 return (rv);
918
919 while ((reg = mpi_pop_reply(sc)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x44)))) != 0xffffffff) {
920 mpi_reply(sc, reg);
921 rv = 1;
922 }
923
924 return (rv);
925}
926
927void
928mpi_reply(struct mpi_softc *sc, u_int32_t reg)
929{
930 struct mpi_ccb *ccb;
931 struct mpi_rcb *rcb = NULL((void *)0);
932 struct mpi_msg_reply *reply = NULL((void *)0);
933 u_int32_t reply_dva;
934 int id;
935 int i;
936
937 DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
938
939 if (reg & MPI_REPLY_QUEUE_ADDRESS(1<<31)) {
940 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK0x7fffffff) << 1;
941 i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].ds_addr
)
) /
942 MPI_REPLY_SIZE80;
943 rcb = &sc->sc_rcbs[i];
944
945 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x02)
)
946 MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x02)
)
947 MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x02)
)
;
948
949 reply = rcb->rcb_reply;
950
951 id = lemtoh32(&reply->msg_context)((__uint32_t)(*(__uint32_t *)(&reply->msg_context)));
952 } else {
953 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK(3<<29)) {
954 case MPI_REPLY_QUEUE_TYPE_INIT(0<<29):
955 id = reg & MPI_REPLY_QUEUE_CONTEXT0x1fffffff;
956 break;
957
958 default:
959 panic("%s: unsupported context reply",
960 DEVNAME(sc)((sc)->sc_dev.dv_xname));
961 }
962 }
963
964 DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
965 DEVNAME(sc), id, reply);
966
967 ccb = &sc->sc_ccbs[id];
968
969 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x02
| 0x08))
970 ccb->ccb_offset, MPI_REQUEST_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x02
| 0x08))
971 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x02
| 0x08))
;
972 ccb->ccb_state = MPI_CCB_READY;
973 ccb->ccb_rcb = rcb;
974
975 ccb->ccb_done(ccb);
976}
977
978struct mpi_dmamem *
979mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
980{
981 struct mpi_dmamem *mdm;
982 int nsegs;
983
984 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
985 if (mdm == NULL((void *)0))
986 return (NULL((void *)0));
987
988 mdm->mdm_size = size;
989
990 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&mdm->mdm_map
))
991 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&mdm->mdm_map
))
!= 0)
992 goto mdmfree;
993
994 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&mdm->mdm_seg), (1), (&
nsegs), (0x0001 | 0x1000))
995 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&mdm->mdm_seg), (1), (&
nsegs), (0x0001 | 0x1000))
!= 0)
996 goto destroy;
997
998 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&mdm
->mdm_seg), (nsegs), (size), (&mdm->mdm_kva), (0x0001
))
999 &mdm->mdm_kva, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&mdm
->mdm_seg), (nsegs), (size), (&mdm->mdm_kva), (0x0001
))
!= 0)
1000 goto free;
1001
1002 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (mdm->
mdm_map), (mdm->mdm_kva), (size), (((void *)0)), (0x0001))
1003 NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (mdm->
mdm_map), (mdm->mdm_kva), (size), (((void *)0)), (0x0001))
!= 0)
1004 goto unmap;
1005
1006 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1007 "map: %#x nsegs: %d segs: %#x kva: %x\n",
1008 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1009
1010 return (mdm);
1011
1012unmap:
1013 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (mdm->
mdm_kva), (size))
;
1014free:
1015 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
mdm->mdm_seg), (1))
;
1016destroy:
1017 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mdm
->mdm_map))
;
1018mdmfree:
1019 free(mdm, M_DEVBUF2, sizeof *mdm);
1020
1021 return (NULL((void *)0));
1022}
1023
1024void
1025mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1026{
1027 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1028
1029 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (mdm
->mdm_map))
;
1030 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (mdm->
mdm_kva), (mdm->mdm_size))
;
1031 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
mdm->mdm_seg), (1))
;
1032 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mdm
->mdm_map))
;
1033 free(mdm, M_DEVBUF2, sizeof *mdm);
1034}
1035
1036int
1037mpi_alloc_ccbs(struct mpi_softc *sc)
1038{
1039 struct mpi_ccb *ccb;
1040 u_int8_t *cmd;
1041 int i;
1042
1043 SLIST_INIT(&sc->sc_ccb_free){ ((&sc->sc_ccb_free)->slh_first) = ((void *)0); };
1044 mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ?
0x9 : ((0x6)))); } while (0)
;
1045
1046 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb),
1047 M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
1048 if (sc->sc_ccbs == NULL((void *)0)) {
1049 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1050 return (1);
1051 }
1052
1053 sc->sc_requests = mpi_dmamem_alloc(sc,
1054 MPI_REQUEST_SIZE512 * sc->sc_maxcmds);
1055 if (sc->sc_requests == NULL((void *)0)) {
1056 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1057 goto free_ccbs;
1058 }
1059 cmd = MPI_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->mdm_kva);
1060 memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds)__builtin_memset((cmd), (0), (512 * sc->sc_maxcmds));
1061
1062 for (i = 0; i < sc->sc_maxcmds; i++) {
1063 ccb = &sc->sc_ccbs[i];
1064
1065 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl_len), ((64 * 1024)), (0), (0x0001
| 0x0002), (&ccb->ccb_dmamap))
1066 sc->sc_max_sgl_len, MAXPHYS, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl_len), ((64 * 1024)), (0), (0x0001
| 0x0002), (&ccb->ccb_dmamap))
1067 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl_len), ((64 * 1024)), (0), (0x0001
| 0x0002), (&ccb->ccb_dmamap))
1068 &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl_len), ((64 * 1024)), (0), (0x0001
| 0x0002), (&ccb->ccb_dmamap))
!= 0) {
1069 printf("%s: unable to create dma map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1070 goto free_maps;
1071 }
1072
1073 ccb->ccb_sc = sc;
1074 ccb->ccb_id = i;
1075 ccb->ccb_offset = MPI_REQUEST_SIZE512 * i;
1076 ccb->ccb_state = MPI_CCB_READY;
1077
1078 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1079 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests)((u_int64_t)(sc->sc_requests)->mdm_map->dm_segs[0].ds_addr
)
+
1080 ccb->ccb_offset;
1081
1082 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1083 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1084 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1085 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1086 ccb->ccb_cmd_dva);
1087
1088 mpi_put_ccb(sc, ccb);
1089 }
1090
1091 scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1092
1093 return (0);
1094
1095free_maps:
1096 while ((ccb = mpi_get_ccb(sc)) != NULL((void *)0))
1097 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
1098
1099 mpi_dmamem_free(sc, sc->sc_requests);
1100free_ccbs:
1101 free(sc->sc_ccbs, M_DEVBUF2, 0);
1102
1103 return (1);
1104}
1105
1106void *
1107mpi_get_ccb(void *xsc)
1108{
1109 struct mpi_softc *sc = xsc;
1110 struct mpi_ccb *ccb;
1111
1112 mtx_enter(&sc->sc_ccb_mtx);
1113 ccb = SLIST_FIRST(&sc->sc_ccb_free)((&sc->sc_ccb_free)->slh_first);
1114 if (ccb != NULL((void *)0)) {
1115 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link)do { (&sc->sc_ccb_free)->slh_first = (&sc->sc_ccb_free
)->slh_first->ccb_link.sle_next; } while (0)
;
1116 ccb->ccb_state = MPI_CCB_READY;
1117 }
1118 mtx_leave(&sc->sc_ccb_mtx);
1119
1120 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1121
1122 return (ccb);
1123}
1124
1125void
1126mpi_put_ccb(void *xsc, void *io)
1127{
1128 struct mpi_softc *sc = xsc;
1129 struct mpi_ccb *ccb = io;
1130
1131 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1132
1133#ifdef DIAGNOSTIC1
1134 if (ccb->ccb_state == MPI_CCB_FREE)
1135 panic("mpi_put_ccb: double free");
1136#endif
1137
1138 ccb->ccb_state = MPI_CCB_FREE;
1139 ccb->ccb_cookie = NULL((void *)0);
1140 ccb->ccb_done = NULL((void *)0);
1141 memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE)__builtin_memset((ccb->ccb_cmd), (0), (512));
1142 mtx_enter(&sc->sc_ccb_mtx);
1143 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link)do { (ccb)->ccb_link.sle_next = (&sc->sc_ccb_free)->
slh_first; (&sc->sc_ccb_free)->slh_first = (ccb); }
while (0)
;
1144 mtx_leave(&sc->sc_ccb_mtx);
1145}
1146
1147int
1148mpi_alloc_replies(struct mpi_softc *sc)
1149{
1150 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1151
1152 sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF2,
1153 M_WAITOK0x0001|M_CANFAIL0x0004);
1154 if (sc->sc_rcbs == NULL((void *)0))
1155 return (1);
1156
1157 sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE80);
1158 if (sc->sc_replies == NULL((void *)0)) {
1159 free(sc->sc_rcbs, M_DEVBUF2, 0);
1160 return (1);
1161 }
1162
1163 return (0);
1164}
1165
1166void
1167mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1168{
1169 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x01)
)
1170 rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x01)
)
;
1171 mpi_push_reply_db(sc, rcb->rcb_reply_dva)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x44), ((rcb
->rcb_reply_dva))))
;
1172}
1173
1174void
1175mpi_push_replies(struct mpi_softc *sc)
1176{
1177 struct mpi_rcb *rcb;
1178 char *kva = MPI_DMA_KVA(sc->sc_replies)((void *)(sc->sc_replies)->mdm_kva);
1179 int i;
1180
1181 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_repq * 80), (0x01)
)
1182 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_repq * 80), (0x01)
)
;
1183
1184 for (i = 0; i < sc->sc_repq; i++) {
1185 rcb = &sc->sc_rcbs[i];
1186
1187 rcb->rcb_reply = kva + MPI_REPLY_SIZE80 * i;
1188 rcb->rcb_offset = MPI_REPLY_SIZE80 * i;
1189 rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies)((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].ds_addr
)
+
1190 MPI_REPLY_SIZE80 * i;
1191 mpi_push_reply_db(sc, rcb->rcb_reply_dva)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x44), ((rcb
->rcb_reply_dva))))
;
1192 }
1193}
1194
1195void
1196mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1197{
1198 struct mpi_msg_request *msg;
1199
1200 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1201 ccb->ccb_cmd_dva);
1202
1203 msg = ccb->ccb_cmd;
1204 htolem32(&msg->msg_context, ccb->ccb_id)(*(__uint32_t *)(&msg->msg_context) = ((__uint32_t)(ccb
->ccb_id)))
;
1205
1206 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x01
| 0x04))
1207 ccb->ccb_offset, MPI_REQUEST_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x01
| 0x04))
1208 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x01
| 0x04))
;
1209
1210 ccb->ccb_state = MPI_CCB_QUEUED;
1211 bus_space_write_4(sc->sc_iot, sc->sc_ioh,((sc->sc_iot)->write_4((sc->sc_ioh), (0x40), (ccb->
ccb_cmd_dva)))
1212 MPI_REQ_QUEUE, ccb->ccb_cmd_dva)((sc->sc_iot)->write_4((sc->sc_ioh), (0x40), (ccb->
ccb_cmd_dva)))
;
1213}
1214
1215int
1216mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1217{
1218 void (*done)(struct mpi_ccb *);
1219 void *cookie;
1220 int rv = 1;
1221 u_int32_t reg;
1222
1223 DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1224 timeout);
1225
1226 done = ccb->ccb_done;
1227 cookie = ccb->ccb_cookie;
1228
1229 ccb->ccb_done = mpi_poll_done;
1230 ccb->ccb_cookie = &rv;
1231
1232 mpi_start(sc, ccb);
1233 while (rv == 1) {
1234 reg = mpi_pop_reply(sc)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x44)));
1235 if (reg == 0xffffffff) {
1236 if (timeout-- == 0) {
1237 printf("%s: timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1238 goto timeout;
1239 }
1240
1241 delay(1000)(*delay_func)(1000);
1242 continue;
1243 }
1244
1245 mpi_reply(sc, reg);
1246 }
1247
1248 ccb->ccb_cookie = cookie;
1249 done(ccb);
1250
1251timeout:
1252 return (rv);
1253}
1254
1255void
1256mpi_poll_done(struct mpi_ccb *ccb)
1257{
1258 int *rv = ccb->ccb_cookie;
1259
1260 *rv = 0;
1261}
1262
1263void
1264mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1265{
1266 struct mutex cookie = MUTEX_INITIALIZER(IPL_BIO){ ((void *)0), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6))), 0x0 }
;
1267 void (*done)(struct mpi_ccb *);
1268
1269 done = ccb->ccb_done;
1270 ccb->ccb_done = mpi_wait_done;
1271 ccb->ccb_cookie = &cookie;
1272
1273 /* XXX this will wait forever for the ccb to complete */
1274
1275 mpi_start(sc, ccb);
1276
1277 mtx_enter(&cookie);
1278 while (ccb->ccb_cookie != NULL((void *)0))
1279 msleep_nsec(ccb, &cookie, PRIBIO16, "mpiwait", INFSLP0xffffffffffffffffULL);
1280 mtx_leave(&cookie);
1281
1282 done(ccb);
1283}
1284
1285void
1286mpi_wait_done(struct mpi_ccb *ccb)
1287{
1288 struct mutex *cookie = ccb->ccb_cookie;
1289
1290 mtx_enter(cookie);
1291 ccb->ccb_cookie = NULL((void *)0);
1292 wakeup_one(ccb)wakeup_n((ccb), 1);
1293 mtx_leave(cookie);
1294}
1295
1296void
1297mpi_scsi_cmd(struct scsi_xfer *xs)
1298{
1299 struct scsi_link *link = xs->sc_link;
1300 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1301 struct mpi_ccb *ccb;
1302 struct mpi_ccb_bundle *mcb;
1303 struct mpi_msg_scsi_io *io;
1304
1305 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1306
1307 KERNEL_UNLOCK()_kernel_unlock();
1308
1309 if (xs->cmdlen > MPI_CDB_LEN16) {
1
Assuming field 'cmdlen' is <= MPI_CDB_LEN
2
Taking false branch
1310 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1311 DEVNAME(sc), xs->cmdlen);
1312 memset(&xs->sense, 0, sizeof(xs->sense))__builtin_memset((&xs->sense), (0), (sizeof(xs->sense
)))
;
1313 xs->sense.error_code = SSD_ERRCODE_VALID0x80 | SSD_ERRCODE_CURRENT0x70;
1314 xs->sense.flags = SKEY_ILLEGAL_REQUEST0x05;
1315 xs->sense.add_sense_code = 0x20;
1316 xs->error = XS_SENSE1;
1317 goto done;
1318 }
1319
1320 ccb = xs->io;
1321
1322 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1323 DEVNAME(sc), ccb->ccb_id, xs->flags);
1324
1325 ccb->ccb_cookie = xs;
1326 ccb->ccb_done = mpi_scsi_cmd_done;
1327
1328 mcb = ccb->ccb_cmd;
1329 io = &mcb->mcb_io;
1330
1331 io->function = MPI_FUNCTION_SCSI_IO_REQUEST(0x00);
1332 /*
1333 * bus is always 0
1334 * io->bus = htole16(sc->sc_bus);
1335 */
1336 io->target_id = link->target;
1337
1338 io->cdb_length = xs->cmdlen;
1339 io->sense_buf_len = sizeof(xs->sense);
1340 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64(1<<0);
1341
1342 htobem16(&io->lun[0], link->lun)(*(__uint16_t *)(&io->lun[0]) = (__uint16_t)(__builtin_constant_p
(link->lun) ? (__uint16_t)(((__uint16_t)(link->lun) &
0xffU) << 8 | ((__uint16_t)(link->lun) & 0xff00U
) >> 8) : __swap16md(link->lun)))
;
3
'?' condition is false
1343
1344 switch (xs->flags & (SCSI_DATA_IN0x00800 | SCSI_DATA_OUT0x01000)) {
4
Control jumps to the 'default' case at line 1351
1345 case SCSI_DATA_IN0x00800:
1346 io->direction = MPI_SCSIIO_DIR_READ(0x2);
1347 break;
1348 case SCSI_DATA_OUT0x01000:
1349 io->direction = MPI_SCSIIO_DIR_WRITE(0x1);
1350 break;
1351 default:
1352 io->direction = MPI_SCSIIO_DIR_NONE(0x0);
1353 break;
5
Execution continues on line 1356
1354 }
1355
1356 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI0x01 &&
6
Assuming field 'sc_porttype' is equal to MPI_PORTFACTS_PORTTYPE_SCSI
1357 (link->quirks & SDEV_NOTAGS0x0008))
1358 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED(0x5);
1359 else
1360 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q(0x0);
1361
1362 memcpy(io->cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((io->cdb), (&xs->cmd), (xs->cmdlen
))
;
1363
1364 htolem32(&io->data_length, xs->datalen)(*(__uint32_t *)(&io->data_length) = ((__uint32_t)(xs->
datalen)))
;
1365
1366 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +(*(__uint32_t *)(&io->sense_buf_low_addr) = ((__uint32_t
)(ccb->ccb_cmd_dva + __builtin_offsetof(struct mpi_ccb_bundle
, mcb_sense))))
1367 offsetof(struct mpi_ccb_bundle, mcb_sense))(*(__uint32_t *)(&io->sense_buf_low_addr) = ((__uint32_t
)(ccb->ccb_cmd_dva + __builtin_offsetof(struct mpi_ccb_bundle
, mcb_sense))))
;
1368
1369 if (mpi_load_xs(ccb) != 0)
7
Calling 'mpi_load_xs'
1370 goto stuffup;
1371
1372 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1373
1374 if (xs->flags & SCSI_POLL0x00002) {
1375 if (mpi_poll(sc, ccb, xs->timeout) != 0)
1376 goto stuffup;
1377 } else
1378 mpi_start(sc, ccb);
1379
1380 KERNEL_LOCK()_kernel_lock();
1381 return;
1382
1383stuffup:
1384 xs->error = XS_DRIVER_STUFFUP2;
1385done:
1386 KERNEL_LOCK()_kernel_lock();
1387 scsi_done(xs);
1388}
1389
1390void
1391mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1392{
1393 struct mpi_softc *sc = ccb->ccb_sc;
1394 struct scsi_xfer *xs = ccb->ccb_cookie;
1395 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd;
1396 bus_dmamap_t dmap = ccb->ccb_dmamap;
1397 struct mpi_msg_scsi_io_error *sie;
1398
1399 if (xs->datalen != 0) {
1400 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
1401 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
1402 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
;
1403
1404 bus_dmamap_unload(sc->sc_dmat, dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dmap
))
;
1405 }
1406
1407 /* timeout_del */
1408 xs->error = XS_NOERROR0;
1409 xs->resid = 0;
1410
1411 if (ccb->ccb_rcb == NULL((void *)0)) {
1412 /* no scsi error, we're ok so drop out early */
1413 xs->status = SCSI_OK0x00;
1414 KERNEL_LOCK()_kernel_lock();
1415 scsi_done(xs);
1416 KERNEL_UNLOCK()_kernel_unlock();
1417 return;
1418 }
1419
1420 sie = ccb->ccb_rcb->rcb_reply;
1421
1422 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1423 "flags 0x%x\n", DEVNAME(sc), xs->cmd.opcode, xs->datalen,
1424 xs->flags);
1425 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d "
1426 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1427 sie->msg_length, sie->function);
1428 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d "
1429 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1430 sie->sense_buf_len, sie->msg_flags);
1431 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
1432 letoh32(sie->msg_context));
1433 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
1434 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1435 sie->scsi_state, letoh16(sie->ioc_status));
1436 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1437 letoh32(sie->ioc_loginfo));
1438 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
1439 letoh32(sie->transfer_count));
1440 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
1441 letoh32(sie->sense_count));
1442 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
1443 letoh32(sie->response_info));
1444 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc),
1445 letoh16(sie->tag));
1446
1447 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS(1<<3))
1448 xs->status = SCSI_TERMINATED0x22;
1449 else
1450 xs->status = sie->scsi_status;
1451 xs->resid = 0;
1452
1453 switch (lemtoh16(&sie->ioc_status)((__uint16_t)(*(__uint16_t *)(&sie->ioc_status)))) {
1454 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN(0x0045):
1455 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count)((__uint32_t)(*(__uint32_t *)(&sie->transfer_count)));
1456 /* FALLTHROUGH */
1457 case MPI_IOCSTATUS_SUCCESS(0x0000):
1458 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR(0x0040):
1459 switch (xs->status) {
1460 case SCSI_OK0x00:
1461 xs->error = XS_NOERROR0;
1462 break;
1463
1464 case SCSI_CHECK0x02:
1465 xs->error = XS_SENSE1;
1466 break;
1467
1468 case SCSI_BUSY0x08:
1469 case SCSI_QUEUE_FULL0x28:
1470 xs->error = XS_BUSY5;
1471 break;
1472
1473 default:
1474 xs->error = XS_DRIVER_STUFFUP2;
1475 break;
1476 }
1477 break;
1478
1479 case MPI_IOCSTATUS_BUSY(0x0002):
1480 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES(0x0006):
1481 xs->error = XS_BUSY5;
1482 break;
1483
1484 case MPI_IOCSTATUS_SCSI_INVALID_BUS(0x0041):
1485 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID(0x0042):
1486 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE(0x0043):
1487 xs->error = XS_SELTIMEOUT3;
1488 break;
1489
1490 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED(0x004B):
1491 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED(0x004C):
1492 xs->error = XS_RESET8;
1493 break;
1494
1495 default:
1496 xs->error = XS_DRIVER_STUFFUP2;
1497 break;
1498 }
1499
1500 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID(1<<0))
1501 memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense))__builtin_memcpy((&xs->sense), (&mcb->mcb_sense
), (sizeof(xs->sense)))
;
1502
1503 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc),
1504 xs->error, xs->status);
1505
1506 mpi_push_reply(sc, ccb->ccb_rcb);
1507 KERNEL_LOCK()_kernel_lock();
1508 scsi_done(xs);
1509 KERNEL_UNLOCK()_kernel_unlock();
1510}
1511
1512void
1513mpi_timeout_xs(void *arg)
1514{
1515 /* XXX */
1516}
1517
1518int
1519mpi_load_xs(struct mpi_ccb *ccb)
1520{
1521 struct mpi_softc *sc = ccb->ccb_sc;
1522 struct scsi_xfer *xs = ccb->ccb_cookie;
1523 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd;
1524 struct mpi_msg_scsi_io *io = &mcb->mcb_io;
1525 struct mpi_sge *sge = NULL((void *)0);
8
'sge' initialized to a null pointer value
1526 struct mpi_sge *nsge = &mcb->mcb_sgl[0];
1527 struct mpi_sge *ce = NULL((void *)0), *nce;
1528 bus_dmamap_t dmap = ccb->ccb_dmamap;
1529 u_int32_t addr, flags;
1530 int i, error;
1531
1532 if (xs->datalen == 0) {
9
Assuming field 'datalen' is not equal to 0
10
Taking false branch
1533 htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&nsge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<31) | (0x1<<30) | (0x1<<24))))
1534 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL)(*(__uint32_t *)(&nsge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<31) | (0x1<<30) | (0x1<<24))))
;
1535 return (0);
1536 }
1537
1538 error = bus_dmamap_load(sc->sc_dmat, dmap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), (0x0100 | (
(xs->flags & 0x00001) ? 0x0001 : 0x0000)))
11
Assuming the condition is false
12
'?' condition is false
1539 xs->data, xs->datalen, NULL, BUS_DMA_STREAMING |(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), (0x0100 | (
(xs->flags & 0x00001) ? 0x0001 : 0x0000)))
1540 ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK))(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), (0x0100 | (
(xs->flags & 0x00001) ? 0x0001 : 0x0000)))
;
1541 if (error) {
13
Assuming 'error' is 0
14
Taking false branch
1542 printf("%s: error %d loading dmamap\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), error);
1543 return (1);
1544 }
1545
1546 flags = MPI_SGE_FL_TYPE_SIMPLE(0x1<<28) | MPI_SGE_FL_SIZE_64(0x1<<25);
1547 if (xs->flags & SCSI_DATA_OUT0x01000)
15
Assuming the condition is false
16
Taking false branch
1548 flags |= MPI_SGE_FL_DIR_OUT(0x1<<26);
1549
1550 if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
17
Assuming field 'dm_nsegs' is > field 'sc_first_sgl_len'
18
Taking true branch
1551 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1552 io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
1553 }
1554
1555 for (i = 0; i < dmap->dm_nsegs; i++) {
19
Assuming 'i' is < field 'dm_nsegs'
20
Loop condition is true. Entering loop body
1556
1557 if (nsge == ce) {
21
Assuming 'nsge' is equal to 'ce'
22
Taking true branch
1558 nsge++;
1559 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST)((__uint32_t)((0x1<<31)));
23
Access to field 'sg_hdr' results in a dereference of a null pointer (loaded from variable 'sge')
1560
1561 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1562 nce = &nsge[sc->sc_chain_len - 1];
1563 addr = (u_int32_t *)nce - (u_int32_t *)nsge;
1564 addr = addr << 16 |
1565 sizeof(struct mpi_sge) * sc->sc_chain_len;
1566 } else {
1567 nce = NULL((void *)0);
1568 addr = sizeof(struct mpi_sge) *
1569 (dmap->dm_nsegs - i);
1570 }
1571
1572 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |((__uint32_t)((0x3<<28) | (0x1<<25) | addr))
1573 MPI_SGE_FL_SIZE_64 | addr)((__uint32_t)((0x3<<28) | (0x1<<25) | addr));
1574
1575 mpi_dvatosge(ce, ccb->ccb_cmd_dva +
1576 ((u_int8_t *)nsge - (u_int8_t *)mcb));
1577
1578 ce = nce;
1579 }
1580
1581 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc),
1582 i, dmap->dm_segs[i].ds_len,
1583 (u_int64_t)dmap->dm_segs[i].ds_addr);
1584
1585 sge = nsge++;
1586
1587 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len)((__uint32_t)(flags | dmap->dm_segs[i].ds_len));
1588 mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr);
1589 }
1590
1591 /* terminate list */
1592 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |((__uint32_t)((0x1<<31) | (0x1<<30) | (0x1<<
24)))
1593 MPI_SGE_FL_EOL)((__uint32_t)((0x1<<31) | (0x1<<30) | (0x1<<
24)))
;
1594
1595 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1596 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1597 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
;
1598
1599 return (0);
1600}
1601
1602int
1603mpi_scsi_probe_virtual(struct scsi_link *link)
1604{
1605 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1606 struct mpi_cfg_hdr hdr;
1607 struct mpi_cfg_raid_vol_pg0 *rp0;
1608 int len;
1609 int rv;
1610
1611 if (!ISSET(sc->sc_flags, MPI_F_RAID)((sc->sc_flags) & ((1<<1))))
1612 return (0);
1613
1614 if (link->lun > 0)
1615 return (0);
1616
1617 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08),
1618 0, link->target, MPI_PG_POLL(1<<1), &hdr);
1619 if (rv != 0)
1620 return (0);
1621
1622 len = hdr.page_length * 4;
1623 rp0 = malloc(len, M_TEMP127, M_NOWAIT0x0002);
1624 if (rp0 == NULL((void *)0))
1625 return (ENOMEM12);
1626
1627 rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL(1<<1), &hdr, 1, rp0, len);
1628 if (rv == 0)
1629 SET(link->flags, SDEV_VIRTUAL)((link->flags) |= (0x0800));
1630
1631 free(rp0, M_TEMP127, len);
1632 return (0);
1633}
1634
1635int
1636mpi_scsi_probe(struct scsi_link *link)
1637{
1638 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1639 struct mpi_ecfg_hdr ehdr;
1640 struct mpi_cfg_sas_dev_pg0 pg0;
1641 u_int32_t address;
1642 int rv;
1643
1644 rv = mpi_scsi_probe_virtual(link);
1645 if (rv != 0)
1646 return (rv);
1647
1648 if (ISSET(link->flags, SDEV_VIRTUAL)((link->flags) & (0x0800)))
1649 return (0);
1650
1651 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS0x30)
1652 return (0);
1653
1654 address = MPI_CFG_SAS_DEV_ADDR_BUS(1<<28) | link->target;
1655
1656 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,mpi_req_cfg_header((sc), ((0x12)), (0), (address), (1<<
1)|(1<<0), (&ehdr))
1657 address, &ehdr)mpi_req_cfg_header((sc), ((0x12)), (0), (address), (1<<
1)|(1<<0), (&ehdr))
!= 0)
1658 return (EIO5);
1659
1660 if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0))mpi_req_cfg_page((sc), (address), (1<<1)|(1<<0), (
&ehdr), (1), (&pg0), (sizeof(pg0)))
!= 0)
1661 return (0);
1662
1663 DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1664 DEVNAME(sc), link->target);
1665 DNPRINTF(MPI_D_MISC, "%s: slot: 0x%04x enc_handle: 0x%04x\n",
1666 DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1667 DNPRINTF(MPI_D_MISC, "%s: sas_addr: 0x%016llx\n", DEVNAME(sc),
1668 letoh64(pg0.sas_addr));
1669 DNPRINTF(MPI_D_MISC, "%s: parent_dev_handle: 0x%04x phy_num: 0x%02x "
1670 "access_status: 0x%02x\n", DEVNAME(sc),
1671 letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1672 DNPRINTF(MPI_D_MISC, "%s: dev_handle: 0x%04x "
1673 "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1674 letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1675 DNPRINTF(MPI_D_MISC, "%s: device_info: 0x%08x\n", DEVNAME(sc),
1676 letoh32(pg0.device_info));
1677 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%04x physical_port: 0x%02x\n",
1678 DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1679
1680 if (ISSET(lemtoh32(&pg0.device_info),((((__uint32_t)(*(__uint32_t *)(&pg0.device_info)))) &
((1<<13)))
1681 MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)((((__uint32_t)(*(__uint32_t *)(&pg0.device_info)))) &
((1<<13)))
) {
1682 DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1683 DEVNAME(sc), link->target);
1684 link->flags |= SDEV_ATAPI0x0200;
1685 }
1686
1687 return (0);
1688}
1689
1690u_int32_t
1691mpi_read(struct mpi_softc *sc, bus_size_t r)
1692{
1693 u_int32_t rv;
1694
1695 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1696 BUS_SPACE_BARRIER_READ0x01);
1697 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)((sc->sc_iot)->read_4((sc->sc_ioh), (r)));
1698
1699 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1700
1701 return (rv);
1702}
1703
1704void
1705mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1706{
1707 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1708
1709 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v)((sc->sc_iot)->write_4((sc->sc_ioh), (r), (v)));
1710 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1711 BUS_SPACE_BARRIER_WRITE0x02);
1712}
1713
1714int
1715mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1716 u_int32_t target)
1717{
1718 int i;
1719
1720 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1721 mask, target);
1722
1723 for (i = 0; i < 10000; i++) {
1724 if ((mpi_read(sc, r) & mask) == target)
1725 return (0);
1726 delay(1000)(*delay_func)(1000);
1727 }
1728
1729 return (1);
1730}
1731
1732int
1733mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1734 u_int32_t target)
1735{
1736 int i;
1737
1738 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1739 mask, target);
1740
1741 for (i = 0; i < 10000; i++) {
1742 if ((mpi_read(sc, r) & mask) != target)
1743 return (0);
1744 delay(1000)(*delay_func)(1000);
1745 }
1746
1747 return (1);
1748}
1749
1750int
1751mpi_init(struct mpi_softc *sc)
1752{
1753 u_int32_t db;
1754 int i;
1755
1756 /* spin until the IOC leaves the RESET state */
1757 if (mpi_wait_ne(sc, MPI_DOORBELL0x00, MPI_DOORBELL_STATE(0xf<<28),
1758 MPI_DOORBELL_STATE_RESET(0x0<<28)) != 0) {
1759 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1760 "reset state\n", DEVNAME(sc));
1761 return (1);
1762 }
1763
1764 /* check current ownership */
1765 db = mpi_read_db(sc)mpi_read((sc), 0x00);
1766 if ((db & MPI_DOORBELL_WHOINIT(0x7<<24)) == MPI_DOORBELL_WHOINIT_PCIPEER(0x3<<24)) {
1767 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1768 DEVNAME(sc));
1769 return (0);
1770 }
1771
1772 for (i = 0; i < 5; i++) {
1773 switch (db & MPI_DOORBELL_STATE(0xf<<28)) {
1774 case MPI_DOORBELL_STATE_READY(0x1<<28):
1775 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1776 DEVNAME(sc));
1777 return (0);
1778
1779 case MPI_DOORBELL_STATE_OPER(0x2<<28):
1780 case MPI_DOORBELL_STATE_FAULT(0x4<<28):
1781 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1782 "reset\n" , DEVNAME(sc));
1783 if (mpi_reset_soft(sc) != 0)
1784 mpi_reset_hard(sc);
1785 break;
1786
1787 case MPI_DOORBELL_STATE_RESET(0x0<<28):
1788 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1789 "out of reset\n", DEVNAME(sc));
1790 if (mpi_wait_ne(sc, MPI_DOORBELL0x00, MPI_DOORBELL_STATE(0xf<<28),
1791 MPI_DOORBELL_STATE_RESET(0x0<<28)) != 0)
1792 return (1);
1793 break;
1794 }
1795 db = mpi_read_db(sc)mpi_read((sc), 0x00);
1796 }
1797
1798 return (1);
1799}
1800
1801int
1802mpi_reset_soft(struct mpi_softc *sc)
1803{
1804 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1805
1806 if (mpi_read_db(sc)mpi_read((sc), 0x00) & MPI_DOORBELL_INUSE(0x1<<27))
1807 return (1);
1808
1809 mpi_write_db(sc,mpi_write((sc), 0x00, (((((0x40)) << 24) & (0xff <<
24))))
1810 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET))mpi_write((sc), 0x00, (((((0x40)) << 24) & (0xff <<
24))))
;
1811 if (mpi_wait_eq(sc, MPI_INTR_STATUS0x30,
1812 MPI_INTR_STATUS_IOCDOORBELL(1<<31), 0) != 0)
1813 return (1);
1814
1815 if (mpi_wait_eq(sc, MPI_DOORBELL0x00, MPI_DOORBELL_STATE(0xf<<28),
1816 MPI_DOORBELL_STATE_READY(0x1<<28)) != 0)
1817 return (1);
1818
1819 return (0);
1820}
1821
1822int
1823mpi_reset_hard(struct mpi_softc *sc)
1824{
1825 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1826
1827 /* enable diagnostic register */
1828 mpi_write(sc, MPI_WRITESEQ0x04, 0xff);
1829 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_10x04);
1830 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_20x0b);
1831 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_30x02);
1832 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_40x07);
1833 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_50x0d);
1834
1835 /* reset ioc */
1836 mpi_write(sc, MPI_HOSTDIAG0x08, MPI_HOSTDIAG_RESET_ADAPTER(1<<2));
1837
1838 delay(10000)(*delay_func)(10000);
1839
1840 /* disable diagnostic register */
1841 mpi_write(sc, MPI_WRITESEQ0x04, 0xff);
1842
1843 /* restore pci bits? */
1844
1845 /* firmware bits? */
1846 return (0);
1847}
1848
1849int
1850mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1851{
1852 u_int32_t *query = buf;
1853 int i;
1854
1855 /* make sure the doorbell is not in use. */
1856 if (mpi_read_db(sc)mpi_read((sc), 0x00) & MPI_DOORBELL_INUSE(0x1<<27))
1857 return (1);
1858
1859 /* clear pending doorbell interrupts */
1860 if (mpi_read_intr(sc)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x30))) & MPI_INTR_STATUS_DOORBELL(1<<0))
1861 mpi_write_intr(sc, 0)mpi_write((sc), 0x30, (0));
1862
1863 /*
1864 * first write the doorbell with the handshake function and the
1865 * dword count.
1866 */
1867 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |mpi_write((sc), 0x00, (((((0x42)) << 24) & (0xff <<
24)) | (((dwords) << 16) & (0xff << 16))))
1868 MPI_DOORBELL_DWORDS(dwords))mpi_write((sc), 0x00, (((((0x42)) << 24) & (0xff <<
24)) | (((dwords) << 16) & (0xff << 16))))
;
1869
1870 /*
1871 * the doorbell used bit will be set because a doorbell function has
1872 * started. Wait for the interrupt and then ack it.
1873 */
1874 if (mpi_wait_db_int(sc)mpi_wait_ne((sc), 0x30, (1<<0), 0) != 0)
1875 return (1);
1876 mpi_write_intr(sc, 0)mpi_write((sc), 0x30, (0));
1877
1878 /* poll for the acknowledgement. */
1879 if (mpi_wait_db_ack(sc)mpi_wait_eq((sc), 0x30, (1<<31), 0) != 0)
1880 return (1);
1881
1882 /* write the query through the doorbell. */
1883 for (i = 0; i < dwords; i++) {
1884 mpi_write_db(sc, htole32(query[i]))mpi_write((sc), 0x00, (((__uint32_t)(query[i]))));
1885 if (mpi_wait_db_ack(sc)mpi_wait_eq((sc), 0x30, (1<<31), 0) != 0)
1886 return (1);
1887 }
1888
1889 return (0);
1890}
1891
1892int
1893mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1894{
1895 u_int16_t *words = (u_int16_t *)dword;
1896 int i;
1897
1898 for (i = 0; i < 2; i++) {
1899 if (mpi_wait_db_int(sc)mpi_wait_ne((sc), 0x30, (1<<0), 0) != 0)
1900 return (1);
1901 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK)((__uint16_t)(mpi_read((sc), 0x00) & 0xffff));
1902 mpi_write_intr(sc, 0)mpi_write((sc), 0x30, (0));
1903 }
1904
1905 return (0);
1906}
1907
1908int
1909mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1910{
1911 struct mpi_msg_reply *reply = buf;
1912 u_int32_t *dbuf = buf, dummy;
1913 int i;
1914
1915 /* get the first dword so we can read the length out of the header. */
1916 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1917 return (1);
1918
1919 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1920 DEVNAME(sc), dwords, reply->msg_length);
1921
1922 /*
1923 * the total length, in dwords, is in the message length field of the
1924 * reply header.
1925 */
1926 for (i = 1; i < MIN(dwords, reply->msg_length)(((dwords)<(reply->msg_length))?(dwords):(reply->msg_length
))
; i++) {
1927 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1928 return (1);
1929 }
1930
1931 /* if there's extra stuff to come off the ioc, discard it */
1932 while (i++ < reply->msg_length) {
1933 if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1934 return (1);
1935 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1936 "0x%08x\n", DEVNAME(sc), dummy);
1937 }
1938
1939 /* wait for the doorbell used bit to be reset and clear the intr */
1940 if (mpi_wait_db_int(sc)mpi_wait_ne((sc), 0x30, (1<<0), 0) != 0)
1941 return (1);
1942 mpi_write_intr(sc, 0)mpi_write((sc), 0x30, (0));
1943
1944 return (0);
1945}
1946
1947void
1948mpi_empty_done(struct mpi_ccb *ccb)
1949{
1950 /* nothing to do */
1951}
1952
1953int
1954mpi_iocfacts(struct mpi_softc *sc)
1955{
1956 struct mpi_msg_iocfacts_request ifq;
1957 struct mpi_msg_iocfacts_reply ifp;
1958
1959 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1960
1961 memset(&ifq, 0, sizeof(ifq))__builtin_memset((&ifq), (0), (sizeof(ifq)));
1962 memset(&ifp, 0, sizeof(ifp))__builtin_memset((&ifp), (0), (sizeof(ifp)));
1963
1964 ifq.function = MPI_FUNCTION_IOC_FACTS(0x03);
1965 ifq.chain_offset = 0;
1966 ifq.msg_flags = 0;
1967 ifq.msg_context = htole32(0xdeadbeef)((__uint32_t)(0xdeadbeef));
1968
1969 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)(sizeof(ifq) / sizeof(u_int32_t))) != 0) {
1970 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1971 DEVNAME(sc));
1972 return (1);
1973 }
1974
1975 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)(sizeof(ifp) / sizeof(u_int32_t))) != 0) {
1976 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1977 DEVNAME(sc));
1978 return (1);
1979 }
1980
1981 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n",
1982 DEVNAME(sc), ifp.function, ifp.msg_length,
1983 ifp.msg_version_maj, ifp.msg_version_min);
1984 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x "
1985 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1986 ifp.ioc_number, ifp.header_version_maj,
1987 ifp.header_version_min);
1988 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc),
1989 letoh32(ifp.msg_context));
1990 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n",
1991 DEVNAME(sc), letoh16(ifp.ioc_status),
1992 letoh16(ifp.ioc_exceptions));
1993 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc),
1994 letoh32(ifp.ioc_loginfo));
1995 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x "
1996 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
1997 ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
1998 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n",
1999 DEVNAME(sc), letoh16(ifp.request_frame_size),
2000 letoh16(ifp.reply_queue_depth));
2001 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc),
2002 letoh16(ifp.product_id));
2003 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2004 letoh32(ifp.current_host_mfa_hi_addr));
2005 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d "
2006 "global_credits: %d\n",
2007 DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2008 letoh16(ifp.global_credits));
2009 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2010 letoh32(ifp.current_sense_buffer_hi_addr));
2011 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n",
2012 DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2013 letoh16(ifp.current_reply_frame_size));
2014 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc),
2015 letoh32(ifp.fw_image_size));
2016 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2017 letoh32(ifp.ioc_capabilities));
2018 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x "
2019 "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2020 ifp.fw_version_maj, ifp.fw_version_min,
2021 ifp.fw_version_unit, ifp.fw_version_dev);
2022 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n",
2023 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2024 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x "
2025 "addr 0x%08lx%08lx\n", DEVNAME(sc),
2026 letoh32(ifp.host_page_buffer_sge.sg_hdr),
2027 letoh32(ifp.host_page_buffer_sge.sg_addr_hi),
2028 letoh32(ifp.host_page_buffer_sge.sg_addr_lo));
2029
2030 sc->sc_fw_maj = ifp.fw_version_maj;
2031 sc->sc_fw_min = ifp.fw_version_min;
2032 sc->sc_fw_unit = ifp.fw_version_unit;
2033 sc->sc_fw_dev = ifp.fw_version_dev;
2034
2035 sc->sc_maxcmds = lemtoh16(&ifp.global_credits)((__uint16_t)(*(__uint16_t *)(&ifp.global_credits)));
2036 sc->sc_maxchdepth = ifp.max_chain_depth;
2037 sc->sc_ioc_number = ifp.ioc_number;
2038 if (sc->sc_flags & MPI_F_SPI(1<<0))
2039 sc->sc_buswidth = 16;
2040 else
2041 sc->sc_buswidth =
2042 (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2043 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT(1<<0))
2044 sc->sc_fw_len = lemtoh32(&ifp.fw_image_size)((__uint32_t)(*(__uint32_t *)(&ifp.fw_image_size)));
2045
2046 sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth))(((128)<(((__uint16_t)(*(__uint16_t *)(&ifp.reply_queue_depth
)))))?(128):(((__uint16_t)(*(__uint16_t *)(&ifp.reply_queue_depth
)))))
;
2047
2048 /*
2049 * you can fit sg elements on the end of the io cmd if they fit in the
2050 * request frame size.
2051 */
2052 sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size)((__uint16_t)(*(__uint16_t *)(&ifp.request_frame_size))) * 4) -
2053 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2054 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc),
2055 sc->sc_first_sgl_len);
2056
2057 sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size)((__uint16_t)(*(__uint16_t *)(&ifp.request_frame_size))) * 4) /
2058 sizeof(struct mpi_sge);
2059 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc),
2060 sc->sc_chain_len);
2061
2062 /* the sgl tailing the io cmd loses an entry to the chain element. */
2063 sc->sc_max_sgl_len = MPI_MAX_SGL36 - 1;
2064 /* the sgl chains lose an entry for each chain element */
2065 sc->sc_max_sgl_len -= (MPI_MAX_SGL36 - sc->sc_first_sgl_len) /
2066 sc->sc_chain_len;
2067 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc),
2068 sc->sc_max_sgl_len);
2069
2070 /* XXX we're ignoring the max chain depth */
2071
2072 return (0);
2073}
2074
2075int
2076mpi_iocinit(struct mpi_softc *sc)
2077{
2078 struct mpi_msg_iocinit_request iiq;
2079 struct mpi_msg_iocinit_reply iip;
2080 u_int32_t hi_addr;
2081
2082 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2083
2084 memset(&iiq, 0, sizeof(iiq))__builtin_memset((&iiq), (0), (sizeof(iiq)));
2085 memset(&iip, 0, sizeof(iip))__builtin_memset((&iip), (0), (sizeof(iip)));
2086
2087 iiq.function = MPI_FUNCTION_IOC_INIT(0x02);
2088 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER0x04;
2089
2090 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2091 iiq.max_buses = 1;
2092
2093 iiq.msg_context = htole32(0xd00fd00f)((__uint32_t)(0xd00fd00f));
2094
2095 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE)((__uint16_t)(80));
2096
2097 hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests)((u_int64_t)(sc->sc_requests)->mdm_map->dm_segs[0].ds_addr
)
>> 32);
2098 htolem32(&iiq.host_mfa_hi_addr, hi_addr)(*(__uint32_t *)(&iiq.host_mfa_hi_addr) = ((__uint32_t)(hi_addr
)))
;
2099 htolem32(&iiq.sense_buffer_hi_addr, hi_addr)(*(__uint32_t *)(&iiq.sense_buffer_hi_addr) = ((__uint32_t
)(hi_addr)))
;
2100
2101 iiq.msg_version_maj = 0x01;
2102 iiq.msg_version_min = 0x02;
2103
2104 iiq.hdr_version_unit = 0x0d;
2105 iiq.hdr_version_dev = 0x00;
2106
2107 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)(sizeof(iiq) / sizeof(u_int32_t))) != 0) {
2108 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2109 DEVNAME(sc));
2110 return (1);
2111 }
2112
2113 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)(sizeof(iip) / sizeof(u_int32_t))) != 0) {
2114 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2115 DEVNAME(sc));
2116 return (1);
2117 }
2118
2119 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d "
2120 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2121 iip.msg_length, iip.whoinit);
2122 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d "
2123 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2124 iip.max_buses, iip.max_devices, iip.flags);
2125 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2126 letoh32(iip.msg_context));
2127 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2128 letoh16(iip.ioc_status));
2129 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2130 letoh32(iip.ioc_loginfo));
2131
2132 return (0);
2133}
2134
2135int
2136mpi_portfacts(struct mpi_softc *sc)
2137{
2138 struct mpi_ccb *ccb;
2139 struct mpi_msg_portfacts_request *pfq;
2140 volatile struct mpi_msg_portfacts_reply *pfp;
2141 int rv = 1;
2142
2143 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2144
2145 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
2146 if (ccb == NULL((void *)0)) {
2147 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2148 DEVNAME(sc));
2149 return (rv);
2150 }
2151
2152 ccb->ccb_done = mpi_empty_done;
2153 pfq = ccb->ccb_cmd;
2154
2155 pfq->function = MPI_FUNCTION_PORT_FACTS(0x05);
2156 pfq->chain_offset = 0;
2157 pfq->msg_flags = 0;
2158 pfq->port_number = 0;
2159
2160 if (mpi_poll(sc, ccb, 50000) != 0) {
2161 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2162 goto err;
2163 }
2164
2165 if (ccb->ccb_rcb == NULL((void *)0)) {
2166 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2167 DEVNAME(sc));
2168 goto err;
2169 }
2170 pfp = ccb->ccb_rcb->rcb_reply;
2171
2172 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n",
2173 DEVNAME(sc), pfp->function, pfp->msg_length);
2174 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n",
2175 DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2176 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2177 letoh32(pfp->msg_context));
2178 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2179 letoh16(pfp->ioc_status));
2180 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2181 letoh32(pfp->ioc_loginfo));
2182 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n",
2183 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2184 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n",
2185 DEVNAME(sc), letoh16(pfp->protocol_flags),
2186 letoh16(pfp->port_scsi_id));
2187 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d "
2188 "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2189 letoh16(pfp->max_persistent_ids),
2190 letoh16(pfp->max_posted_cmd_buffers));
2191 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc),
2192 letoh16(pfp->max_lan_buckets));
2193
2194 sc->sc_porttype = pfp->port_type;
2195 if (sc->sc_target == -1)
2196 sc->sc_target = lemtoh16(&pfp->port_scsi_id)((__uint16_t)(*(__uint16_t *)(&pfp->port_scsi_id)));
2197
2198 mpi_push_reply(sc, ccb->ccb_rcb);
2199 rv = 0;
2200err:
2201 scsi_io_put(&sc->sc_iopool, ccb);
2202
2203 return (rv);
2204}
2205
2206int
2207mpi_cfg_coalescing(struct mpi_softc *sc)
2208{
2209 struct mpi_cfg_hdr hdr;
2210 struct mpi_cfg_ioc_pg1 pg;
2211 u_int32_t flags;
2212
2213 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr)mpi_req_cfg_header((sc), ((0x01)), (1), (0), (1<<1), (&
hdr))
!= 0) {
2214 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2215 DEVNAME(sc));
2216 return (1);
2217 }
2218
2219 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (&
pg), (sizeof(pg)))
!= 0) {
2220 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2221 DEVNAME(sc));
2222 return (1);
2223 }
2224
2225 DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2226 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%08x\n", DEVNAME(sc),
2227 letoh32(pg.flags));
2228 DNPRINTF(MPI_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc),
2229 letoh32(pg.coalescing_timeout));
2230 DNPRINTF(MPI_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n",
2231 DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2232
2233 flags = lemtoh32(&pg.flags)((__uint32_t)(*(__uint32_t *)(&pg.flags)));
2234 if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING)((flags) & ((1<<0))))
2235 return (0);
2236
2237 CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING))((pg.flags) &= ~(((__uint32_t)((1<<0)))));
2238 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (0), (&
pg), (sizeof(pg)))
!= 0) {
2239 DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2240 DEVNAME(sc));
2241 return (1);
2242 }
2243
2244 return (0);
2245}
2246
2247int
2248mpi_eventnotify(struct mpi_softc *sc)
2249{
2250 struct mpi_ccb *ccb;
2251 struct mpi_msg_event_request *enq;
2252
2253 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
2254 if (ccb == NULL((void *)0)) {
2255 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2256 DEVNAME(sc));
2257 return (1);
2258 }
2259
2260 sc->sc_evt_ccb = ccb;
2261 SIMPLEQ_INIT(&sc->sc_evt_ack_queue)do { (&sc->sc_evt_ack_queue)->sqh_first = ((void *)
0); (&sc->sc_evt_ack_queue)->sqh_last = &(&
sc->sc_evt_ack_queue)->sqh_first; } while (0)
;
2262 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_evt_ack_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
2263 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2264 mpi_eventack, sc);
2265
2266 ccb->ccb_done = mpi_eventnotify_done;
2267 enq = ccb->ccb_cmd;
2268
2269 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION(0x07);
2270 enq->chain_offset = 0;
2271 enq->event_switch = MPI_EVENT_SWITCH_ON(0x01);
2272
2273 mpi_start(sc, ccb);
2274 return (0);
2275}
2276
2277void
2278mpi_eventnotify_done(struct mpi_ccb *ccb)
2279{
2280 struct mpi_softc *sc = ccb->ccb_sc;
2281 struct mpi_rcb *rcb = ccb->ccb_rcb;
2282 struct mpi_msg_event_reply *enp = rcb->rcb_reply;
2283
2284 DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2285
2286 DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d "
2287 "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2288 letoh16(enp->data_length));
2289 DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n",
2290 DEVNAME(sc), enp->ack_required, enp->msg_flags);
2291 DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2292 letoh32(enp->msg_context));
2293 DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2294 letoh16(enp->ioc_status));
2295 DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2296 letoh32(enp->ioc_loginfo));
2297 DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc),
2298 letoh32(enp->event));
2299 DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc),
2300 letoh32(enp->event_context));
2301
2302 switch (lemtoh32(&enp->event)((__uint32_t)(*(__uint32_t *)(&enp->event)))) {
2303 /* ignore these */
2304 case MPI_EVENT_EVENT_CHANGE0x0a:
2305 case MPI_EVENT_SAS_PHY_LINK_STATUS0x12:
2306 break;
2307
2308 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE0x0f:
2309 if (sc->sc_scsibus == NULL((void *)0))
2310 break;
2311
2312 if (mpi_evt_sas(sc, rcb) != 0) {
2313 /* reply is freed later on */
2314 return;
2315 }
2316 break;
2317
2318 case MPI_EVENT_RESCAN0x06:
2319 if (sc->sc_scsibus != NULL((void *)0) &&
2320 sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC0x10)
2321 task_add(systq, &sc->sc_evt_rescan);
2322 break;
2323
2324 default:
2325 DNPRINTF(MPI_D_EVT, "%s: unhandled event 0x%02x\n",
2326 DEVNAME(sc), lemtoh32(&enp->event));
2327 break;
2328 }
2329
2330 mpi_eventnotify_free(sc, rcb);
2331}
2332
2333void
2334mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2335{
2336 struct mpi_msg_event_reply *enp = rcb->rcb_reply;
2337
2338 if (enp->ack_required) {
2339 mtx_enter(&sc->sc_evt_ack_mtx);
2340 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link)do { (rcb)->rcb_link.sqe_next = ((void *)0); *(&sc->
sc_evt_ack_queue)->sqh_last = (rcb); (&sc->sc_evt_ack_queue
)->sqh_last = &(rcb)->rcb_link.sqe_next; } while (0
)
;
2341 mtx_leave(&sc->sc_evt_ack_mtx);
2342 scsi_ioh_add(&sc->sc_evt_ack_handler);
2343 } else
2344 mpi_push_reply(sc, rcb);
2345}
2346
2347int
2348mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2349{
2350 struct mpi_evt_sas_change *ch;
2351 u_int8_t *data;
2352
2353 data = rcb->rcb_reply;
2354 data += sizeof(struct mpi_msg_event_reply);
2355 ch = (struct mpi_evt_sas_change *)data;
2356
2357 if (ch->bus != 0)
2358 return (0);
2359
2360 switch (ch->reason) {
2361 case MPI_EVT_SASCH_REASON_ADDED0x03:
2362 case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED0x06:
2363 KERNEL_LOCK()_kernel_lock();
2364 if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2365 printf("%s: unable to request attach of %d\n",
2366 DEVNAME(sc)((sc)->sc_dev.dv_xname), ch->target);
2367 }
2368 KERNEL_UNLOCK()_kernel_unlock();
2369 break;
2370
2371 case MPI_EVT_SASCH_REASON_NOT_RESPONDING0x04:
2372 KERNEL_LOCK()_kernel_lock();
2373 scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE1);
2374 KERNEL_UNLOCK()_kernel_unlock();
2375
2376 mtx_enter(&sc->sc_evt_scan_mtx);
2377 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link)do { (rcb)->rcb_link.sqe_next = ((void *)0); *(&sc->
sc_evt_scan_queue)->sqh_last = (rcb); (&sc->sc_evt_scan_queue
)->sqh_last = &(rcb)->rcb_link.sqe_next; } while (0
)
;
2378 mtx_leave(&sc->sc_evt_scan_mtx);
2379 scsi_ioh_add(&sc->sc_evt_scan_handler);
2380
2381 /* we'll handle event ack later on */
2382 return (1);
2383
2384 case MPI_EVT_SASCH_REASON_SMART_DATA0x05:
2385 case MPI_EVT_SASCH_REASON_UNSUPPORTED0x07:
2386 case MPI_EVT_SASCH_REASON_INTERNAL_RESET0x08:
2387 break;
2388 default:
2389 printf("%s: unknown reason for SAS device status change: "
2390 "0x%02x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), ch->reason);
2391 break;
2392 }
2393
2394 return (0);
2395}
2396
2397void
2398mpi_evt_sas_detach(void *cookie, void *io)
2399{
2400 struct mpi_softc *sc = cookie;
2401 struct mpi_ccb *ccb = io;
2402 struct mpi_rcb *rcb, *next;
2403 struct mpi_msg_event_reply *enp;
2404 struct mpi_evt_sas_change *ch;
2405 struct mpi_msg_scsi_task_request *str;
2406
2407 DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2408
2409 mtx_enter(&sc->sc_evt_scan_mtx);
2410 rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue)((&sc->sc_evt_scan_queue)->sqh_first);
2411 if (rcb != NULL((void *)0)) {
2412 next = SIMPLEQ_NEXT(rcb, rcb_link)((rcb)->rcb_link.sqe_next);
2413 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link)do { if (((&sc->sc_evt_scan_queue)->sqh_first = (&
sc->sc_evt_scan_queue)->sqh_first->rcb_link.sqe_next
) == ((void *)0)) (&sc->sc_evt_scan_queue)->sqh_last
= &(&sc->sc_evt_scan_queue)->sqh_first; } while
(0)
;
2414 }
2415 mtx_leave(&sc->sc_evt_scan_mtx);
2416
2417 if (rcb == NULL((void *)0)) {
2418 scsi_io_put(&sc->sc_iopool, ccb);
2419 return;
2420 }
2421
2422 enp = rcb->rcb_reply;
2423 ch = (struct mpi_evt_sas_change *)(enp + 1);
2424
2425 ccb->ccb_done = mpi_evt_sas_detach_done;
2426 str = ccb->ccb_cmd;
2427
2428 str->target_id = ch->target;
2429 str->bus = 0;
2430 str->function = MPI_FUNCTION_SCSI_TASK_MGMT(0x01);
2431
2432 str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET(0x03);
2433
2434 mpi_eventnotify_free(sc, rcb);
2435
2436 mpi_start(sc, ccb);
2437
2438 if (next != NULL((void *)0))
2439 scsi_ioh_add(&sc->sc_evt_scan_handler);
2440}
2441
2442void
2443mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2444{
2445 struct mpi_softc *sc = ccb->ccb_sc;
2446 struct mpi_msg_scsi_task_reply *r = ccb->ccb_rcb->rcb_reply;
2447
2448 KERNEL_LOCK()_kernel_lock();
2449 if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2450 DETACH_FORCE0x01) != 0) {
2451 printf("%s: unable to request detach of %d\n",
2452 DEVNAME(sc)((sc)->sc_dev.dv_xname), r->target_id);
2453 }
2454 KERNEL_UNLOCK()_kernel_unlock();
2455
2456 mpi_push_reply(sc, ccb->ccb_rcb);
2457 scsi_io_put(&sc->sc_iopool, ccb);
2458}
2459
2460void
2461mpi_fc_rescan(void *xsc)
2462{
2463 struct mpi_softc *sc = xsc;
2464 struct mpi_cfg_hdr hdr;
2465 struct mpi_cfg_fc_device_pg0 pg;
2466 struct scsi_link *link;
2467 u_int8_t devmap[256 / NBBY8];
2468 u_int32_t id = 0xffffff;
2469 int i;
2470
2471 memset(devmap, 0, sizeof(devmap))__builtin_memset((devmap), (0), (sizeof(devmap)));
2472
2473 do {
2474 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV(0x06), 0,
2475 id, 0, &hdr) != 0) {
2476 printf("%s: header get for rescan of 0x%08x failed\n",
2477 DEVNAME(sc)((sc)->sc_dev.dv_xname), id);
2478 return;
2479 }
2480
2481 memset(&pg, 0, sizeof(pg))__builtin_memset((&pg), (0), (sizeof(pg)));
2482 if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2483 break;
2484
2485 if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID)((pg.flags) & ((1<<0))) &&
2486 pg.current_bus == 0)
2487 setbit(devmap, pg.current_target_id)((devmap)[(pg.current_target_id)>>3] |= 1<<((pg.current_target_id
)&(8 -1)))
;
2488
2489 id = lemtoh32(&pg.port_id)((__uint32_t)(*(__uint32_t *)(&pg.port_id)));
2490 } while (id <= 0xff0000);
2491
2492 for (i = 0; i < sc->sc_buswidth; i++) {
2493 link = scsi_get_link(sc->sc_scsibus, i, 0);
2494
2495 if (isset(devmap, i)((devmap)[(i)>>3] & (1<<((i)&(8 -1))))) {
2496 if (link == NULL((void *)0))
2497 scsi_probe_target(sc->sc_scsibus, i);
2498 } else {
2499 if (link != NULL((void *)0)) {
2500 scsi_activate(sc->sc_scsibus, i, -1,
2501 DVACT_DEACTIVATE1);
2502 scsi_detach_target(sc->sc_scsibus, i,
2503 DETACH_FORCE0x01);
2504 }
2505 }
2506 }
2507}
2508
2509void
2510mpi_eventack(void *cookie, void *io)
2511{
2512 struct mpi_softc *sc = cookie;
2513 struct mpi_ccb *ccb = io;
2514 struct mpi_rcb *rcb, *next;
2515 struct mpi_msg_event_reply *enp;
2516 struct mpi_msg_eventack_request *eaq;
2517
2518 DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2519
2520 mtx_enter(&sc->sc_evt_ack_mtx);
2521 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue)((&sc->sc_evt_ack_queue)->sqh_first);
2522 if (rcb != NULL((void *)0)) {
2523 next = SIMPLEQ_NEXT(rcb, rcb_link)((rcb)->rcb_link.sqe_next);
2524 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link)do { if (((&sc->sc_evt_ack_queue)->sqh_first = (&
sc->sc_evt_ack_queue)->sqh_first->rcb_link.sqe_next)
== ((void *)0)) (&sc->sc_evt_ack_queue)->sqh_last =
&(&sc->sc_evt_ack_queue)->sqh_first; } while (
0)
;
2525 }
2526 mtx_leave(&sc->sc_evt_ack_mtx);
2527
2528 if (rcb == NULL((void *)0)) {
2529 scsi_io_put(&sc->sc_iopool, ccb);
2530 return;
2531 }
2532
2533 enp = rcb->rcb_reply;
2534
2535 ccb->ccb_done = mpi_eventack_done;
2536 eaq = ccb->ccb_cmd;
2537
2538 eaq->function = MPI_FUNCTION_EVENT_ACK(0x08);
2539
2540 eaq->event = enp->event;
2541 eaq->event_context = enp->event_context;
2542
2543 mpi_push_reply(sc, rcb);
2544 mpi_start(sc, ccb);
2545
2546 if (next != NULL((void *)0))
2547 scsi_ioh_add(&sc->sc_evt_ack_handler);
2548}
2549
2550void
2551mpi_eventack_done(struct mpi_ccb *ccb)
2552{
2553 struct mpi_softc *sc = ccb->ccb_sc;
2554
2555 DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2556
2557 mpi_push_reply(sc, ccb->ccb_rcb);
2558 scsi_io_put(&sc->sc_iopool, ccb);
2559}
2560
2561int
2562mpi_portenable(struct mpi_softc *sc)
2563{
2564 struct mpi_ccb *ccb;
2565 struct mpi_msg_portenable_request *peq;
2566 int rv = 0;
2567
2568 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2569
2570 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
2571 if (ccb == NULL((void *)0)) {
2572 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2573 DEVNAME(sc));
2574 return (1);
2575 }
2576
2577 ccb->ccb_done = mpi_empty_done;
2578 peq = ccb->ccb_cmd;
2579
2580 peq->function = MPI_FUNCTION_PORT_ENABLE(0x06);
2581 peq->port_number = 0;
2582
2583 if (mpi_poll(sc, ccb, 50000) != 0) {
2584 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2585 return (1);
2586 }
2587
2588 if (ccb->ccb_rcb == NULL((void *)0)) {
2589 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2590 DEVNAME(sc));
2591 rv = 1;
2592 } else
2593 mpi_push_reply(sc, ccb->ccb_rcb);
2594
2595 scsi_io_put(&sc->sc_iopool, ccb);
2596
2597 return (rv);
2598}
2599
2600int
2601mpi_fwupload(struct mpi_softc *sc)
2602{
2603 struct mpi_ccb *ccb;
2604 struct {
2605 struct mpi_msg_fwupload_request req;
2606 struct mpi_sge sge;
2607 } __packed__attribute__((__packed__)) *bundle;
2608 struct mpi_msg_fwupload_reply *upp;
2609 int rv = 0;
2610
2611 if (sc->sc_fw_len == 0)
2612 return (0);
2613
2614 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2615
2616 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2617 if (sc->sc_fw == NULL((void *)0)) {
2618 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2619 DEVNAME(sc), sc->sc_fw_len);
2620 return (1);
2621 }
2622
2623 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
2624 if (ccb == NULL((void *)0)) {
2625 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2626 DEVNAME(sc));
2627 goto err;
2628 }
2629
2630 ccb->ccb_done = mpi_empty_done;
2631 bundle = ccb->ccb_cmd;
2632
2633 bundle->req.function = MPI_FUNCTION_FW_UPLOAD(0x12);
2634
2635 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW(0x00);
2636
2637 bundle->req.tce.details_length = 12;
2638 htolem32(&bundle->req.tce.image_size, sc->sc_fw_len)(*(__uint32_t *)(&bundle->req.tce.image_size) = ((__uint32_t
)(sc->sc_fw_len)))
;
2639
2640 htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&bundle->sge.sg_hdr) = ((__uint32_t)(
(0x1<<28) | (0x1<<25) | (0x1<<31) | (0x1<<
30) | (0x1<<24) | (u_int32_t)sc->sc_fw_len)))
2641 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |(*(__uint32_t *)(&bundle->sge.sg_hdr) = ((__uint32_t)(
(0x1<<28) | (0x1<<25) | (0x1<<31) | (0x1<<
30) | (0x1<<24) | (u_int32_t)sc->sc_fw_len)))
2642 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len)(*(__uint32_t *)(&bundle->sge.sg_hdr) = ((__uint32_t)(
(0x1<<28) | (0x1<<25) | (0x1<<31) | (0x1<<
30) | (0x1<<24) | (u_int32_t)sc->sc_fw_len)))
;
2643 mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw)((u_int64_t)(sc->sc_fw)->mdm_map->dm_segs[0].ds_addr
)
);
2644
2645 if (mpi_poll(sc, ccb, 50000) != 0) {
2646 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2647 goto err;
2648 }
2649
2650 if (ccb->ccb_rcb == NULL((void *)0))
2651 panic("%s: unable to do fw upload", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2652 upp = ccb->ccb_rcb->rcb_reply;
2653
2654 if (lemtoh16(&upp->ioc_status)((__uint16_t)(*(__uint16_t *)(&upp->ioc_status))) != MPI_IOCSTATUS_SUCCESS(0x0000))
2655 rv = 1;
2656
2657 mpi_push_reply(sc, ccb->ccb_rcb);
2658 scsi_io_put(&sc->sc_iopool, ccb);
2659
2660 return (rv);
2661
2662err:
2663 mpi_dmamem_free(sc, sc->sc_fw);
2664 return (1);
2665}
2666
2667int
2668mpi_manufacturing(struct mpi_softc *sc)
2669{
2670 char board_name[33];
2671 struct mpi_cfg_hdr hdr;
2672 struct mpi_cfg_manufacturing_pg0 *pg;
2673 size_t pagelen;
2674 int rv = 1;
2675
2676 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING,mpi_req_cfg_header((sc), ((0x09)), (0), (0), (1<<1), (&
hdr))
2677 0, 0, &hdr)mpi_req_cfg_header((sc), ((0x09)), (0), (0), (1<<1), (&
hdr))
!= 0)
2678 return (1);
2679
2680 pagelen = hdr.page_length * 4; /* dwords to bytes */
2681 if (pagelen < sizeof(*pg))
2682 return (1);
2683
2684 pg = malloc(pagelen, M_TEMP127, M_WAITOK0x0001|M_CANFAIL0x0004);
2685 if (pg == NULL((void *)0))
2686 return (1);
2687
2688 if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen)mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (pg
), (pagelen))
!= 0)
2689 goto out;
2690
2691 scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name));
2692
2693 printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), board_name,
2694 sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev);
2695
2696 rv = 0;
2697
2698out:
2699 free(pg, M_TEMP127, pagelen);
2700 return (rv);
2701}
2702
2703void
2704mpi_get_raid(struct mpi_softc *sc)
2705{
2706 struct mpi_cfg_hdr hdr;
2707 struct mpi_cfg_ioc_pg2 *vol_page;
2708 size_t pagelen;
2709 u_int32_t capabilities;
2710
2711 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2712
2713 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr)mpi_req_cfg_header((sc), ((0x01)), (2), (0), (1<<1), (&
hdr))
!= 0) {
2714 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2715 "for IOC page 2\n", DEVNAME(sc));
2716 return;
2717 }
2718
2719 pagelen = hdr.page_length * 4; /* dwords to bytes */
2720 vol_page = malloc(pagelen, M_TEMP127, M_WAITOK0x0001|M_CANFAIL0x0004);
2721 if (vol_page == NULL((void *)0)) {
2722 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2723 "space for ioc config page 2\n", DEVNAME(sc));
2724 return;
2725 }
2726
2727 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen)mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (vol_page
), (pagelen))
!= 0) {
2728 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2729 "page 2\n", DEVNAME(sc));
2730 goto out;
2731 }
2732
2733 capabilities = lemtoh32(&vol_page->capabilities)((__uint32_t)(*(__uint32_t *)(&vol_page->capabilities)
))
;
2734
2735 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc),
2736 letoh32(vol_page->capabilities));
2737 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d "
2738 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2739 vol_page->active_vols, vol_page->max_vols,
2740 vol_page->active_physdisks, vol_page->max_physdisks);
2741
2742 /* don't walk list if there are no RAID capability */
2743 if (capabilities == 0xdeadbeef) {
2744 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2745 goto out;
2746 }
2747
2748 if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID)((capabilities) & (( (1<<0) | (1<<1) | (1<<
2))))
)
2749 sc->sc_flags |= MPI_F_RAID(1<<1);
2750
2751out:
2752 free(vol_page, M_TEMP127, pagelen);
2753}
2754
2755int
2756mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2757 u_int32_t address, int flags, void *p)
2758{
2759 struct mpi_ccb *ccb;
2760 struct mpi_msg_config_request *cq;
2761 struct mpi_msg_config_reply *cp;
2762 struct mpi_cfg_hdr *hdr = p;
2763 struct mpi_ecfg_hdr *ehdr = p;
2764 int etype = 0;
2765 int rv = 0;
2766
2767 DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2768 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2769 address, flags, MPI_PG_FMT);
2770
2771 ccb = scsi_io_get(&sc->sc_iopool,
2772 ISSET(flags, MPI_PG_POLL)((flags) & ((1<<1))) ? SCSI_NOSLEEP0x00001 : 0);
2773 if (ccb == NULL((void *)0)) {
2774 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2775 DEVNAME(sc));
2776 return (1);
2777 }
2778
2779 if (ISSET(flags, MPI_PG_EXTENDED)((flags) & ((1<<0)))) {
2780 etype = type;
2781 type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED(0x0F);
2782 }
2783
2784 cq = ccb->ccb_cmd;
2785
2786 cq->function = MPI_FUNCTION_CONFIG(0x04);
2787
2788 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER(0x00);
2789
2790 cq->config_header.page_number = number;
2791 cq->config_header.page_type = type;
2792 cq->ext_page_type = etype;
2793 htolem32(&cq->page_address, address)(*(__uint32_t *)(&cq->page_address) = ((__uint32_t)(address
)))
;
2794 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24))))
2795 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL)(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24))))
;
2796
2797 ccb->ccb_done = mpi_empty_done;
2798 if (ISSET(flags, MPI_PG_POLL)((flags) & ((1<<1)))) {
2799 if (mpi_poll(sc, ccb, 50000) != 0) {
2800 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2801 DEVNAME(sc));
2802 return (1);
2803 }
2804 } else
2805 mpi_wait(sc, ccb);
2806
2807 if (ccb->ccb_rcb == NULL((void *)0))
2808 panic("%s: unable to fetch config header", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2809 cp = ccb->ccb_rcb->rcb_reply;
2810
2811 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: "
2812 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2813 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2814 "msg_flags: 0x%02x\n", DEVNAME(sc),
2815 letoh16(cp->ext_page_length), cp->ext_page_type,
2816 cp->msg_flags);
2817 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2818 letoh32(cp->msg_context));
2819 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2820 letoh16(cp->ioc_status));
2821 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2822 letoh32(cp->ioc_loginfo));
2823 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2824 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2825 cp->config_header.page_version,
2826 cp->config_header.page_length,
2827 cp->config_header.page_number,
2828 cp->config_header.page_type);
2829
2830 if (lemtoh16(&cp->ioc_status)((__uint16_t)(*(__uint16_t *)(&cp->ioc_status))) != MPI_IOCSTATUS_SUCCESS(0x0000))
2831 rv = 1;
2832 else if (ISSET(flags, MPI_PG_EXTENDED)((flags) & ((1<<0)))) {
2833 memset(ehdr, 0, sizeof(*ehdr))__builtin_memset((ehdr), (0), (sizeof(*ehdr)));
2834 ehdr->page_version = cp->config_header.page_version;
2835 ehdr->page_number = cp->config_header.page_number;
2836 ehdr->page_type = cp->config_header.page_type;
2837 ehdr->ext_page_length = cp->ext_page_length;
2838 ehdr->ext_page_type = cp->ext_page_type;
2839 } else
2840 *hdr = cp->config_header;
2841
2842 mpi_push_reply(sc, ccb->ccb_rcb);
2843 scsi_io_put(&sc->sc_iopool, ccb);
2844
2845 return (rv);
2846}
2847
2848int
2849mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2850 void *p, int read, void *page, size_t len)
2851{
2852 struct mpi_ccb *ccb;
2853 struct mpi_msg_config_request *cq;
2854 struct mpi_msg_config_reply *cp;
2855 struct mpi_cfg_hdr *hdr = p;
2856 struct mpi_ecfg_hdr *ehdr = p;
2857 char *kva;
2858 int page_length;
2859 int rv = 0;
2860
2861 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2862 DEVNAME(sc), address, read, hdr->page_type);
2863
2864 page_length = ISSET(flags, MPI_PG_EXTENDED)((flags) & ((1<<0))) ?
2865 lemtoh16(&ehdr->ext_page_length)((__uint16_t)(*(__uint16_t *)(&ehdr->ext_page_length))
)
: hdr->page_length;
2866
2867 if (len > MPI_REQUEST_SIZE512 - sizeof(struct mpi_msg_config_request) ||
2868 len < page_length * 4)
2869 return (1);
2870
2871 ccb = scsi_io_get(&sc->sc_iopool,
2872 ISSET(flags, MPI_PG_POLL)((flags) & ((1<<1))) ? SCSI_NOSLEEP0x00001 : 0);
2873 if (ccb == NULL((void *)0)) {
2874 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2875 return (1);
2876 }
2877
2878 cq = ccb->ccb_cmd;
2879
2880 cq->function = MPI_FUNCTION_CONFIG(0x04);
2881
2882 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT(0x01) :
2883 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT(0x02));
2884
2885 if (ISSET(flags, MPI_PG_EXTENDED)((flags) & ((1<<0)))) {
2886 cq->config_header.page_version = ehdr->page_version;
2887 cq->config_header.page_number = ehdr->page_number;
2888 cq->config_header.page_type = ehdr->page_type;
2889 cq->ext_page_len = ehdr->ext_page_length;
2890 cq->ext_page_type = ehdr->ext_page_type;
2891 } else
2892 cq->config_header = *hdr;
2893 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK(0x0f);
2894 htolem32(&cq->page_address, address)(*(__uint32_t *)(&cq->page_address) = ((__uint32_t)(address
)))
;
2895 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (page_length * 4) | (read ? (0x0<<26) : (0x1<<
26)))))
2896 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (page_length * 4) | (read ? (0x0<<26) : (0x1<<
26)))))
2897 (page_length * 4) |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (page_length * 4) | (read ? (0x0<<26) : (0x1<<
26)))))
2898 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT))(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (page_length * 4) | (read ? (0x0<<26) : (0x1<<
26)))))
;
2899
2900 /* bounce the page via the request space to avoid more bus_dma games */
2901 mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2902 sizeof(struct mpi_msg_config_request));
2903
2904 kva = ccb->ccb_cmd;
2905 kva += sizeof(struct mpi_msg_config_request);
2906 if (!read)
2907 memcpy(kva, page, len)__builtin_memcpy((kva), (page), (len));
2908
2909 ccb->ccb_done = mpi_empty_done;
2910 if (ISSET(flags, MPI_PG_POLL)((flags) & ((1<<1)))) {
2911 if (mpi_poll(sc, ccb, 50000) != 0) {
2912 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2913 DEVNAME(sc));
2914 return (1);
2915 }
2916 } else
2917 mpi_wait(sc, ccb);
2918
2919 if (ccb->ccb_rcb == NULL((void *)0)) {
2920 scsi_io_put(&sc->sc_iopool, ccb);
2921 return (1);
2922 }
2923 cp = ccb->ccb_rcb->rcb_reply;
2924
2925 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: "
2926 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2927 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2928 "msg_flags: 0x%02x\n", DEVNAME(sc),
2929 letoh16(cp->ext_page_length), cp->ext_page_type,
2930 cp->msg_flags);
2931 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2932 letoh32(cp->msg_context));
2933 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2934 letoh16(cp->ioc_status));
2935 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2936 letoh32(cp->ioc_loginfo));
2937 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2938 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2939 cp->config_header.page_version,
2940 cp->config_header.page_length,
2941 cp->config_header.page_number,
2942 cp->config_header.page_type);
2943
2944 if (lemtoh16(&cp->ioc_status)((__uint16_t)(*(__uint16_t *)(&cp->ioc_status))) != MPI_IOCSTATUS_SUCCESS(0x0000))
2945 rv = 1;
2946 else if (read)
2947 memcpy(page, kva, len)__builtin_memcpy((page), (kva), (len));
2948
2949 mpi_push_reply(sc, ccb->ccb_rcb);
2950 scsi_io_put(&sc->sc_iopool, ccb);
2951
2952 return (rv);
2953}
2954
2955int
2956mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2957{
2958 struct mpi_softc *sc = link->bus->sb_adapter_softc;
2959
2960 DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2961
2962 switch (cmd) {
2963 case DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((117)))
:
2964 case DIOCSCACHE((unsigned long)0x80000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((118)))
:
2965 if (ISSET(link->flags, SDEV_VIRTUAL)((link->flags) & (0x0800))) {
2966 return (mpi_ioctl_cache(link, cmd,
2967 (struct dk_cache *)addr));
2968 }
2969 break;
2970
2971 default:
2972 if (sc->sc_ioctl)
2973 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2974
2975 break;
2976 }
2977
2978 return (ENOTTY25);
2979}
2980
2981int
2982mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2983{
2984 struct mpi_softc *sc = link->bus->sb_adapter_softc;
2985 struct mpi_ccb *ccb;
2986 int len, rv;
2987 struct mpi_cfg_hdr hdr;
2988 struct mpi_cfg_raid_vol_pg0 *rpg0;
2989 int enabled;
2990 struct mpi_msg_raid_action_request *req;
2991 struct mpi_msg_raid_action_reply *rep;
2992 struct mpi_raid_settings settings;
2993
2994 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08), 0,
2995 link->target, MPI_PG_POLL(1<<1), &hdr);
2996 if (rv != 0)
2997 return (EIO5);
2998
2999 len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
3000 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3001 rpg0 = malloc(len, M_TEMP127, M_NOWAIT0x0002);
3002 if (rpg0 == NULL((void *)0))
3003 return (ENOMEM12);
3004
3005 if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL(1<<1), &hdr, 1,
3006 rpg0, len) != 0) {
3007 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3008 DEVNAME(sc));
3009 rv = EIO5;
3010 goto done;
3011 }
3012
3013 enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings),((((__uint16_t)(*(__uint16_t *)(&rpg0->settings.volume_settings
)))) & ((1<<0)))
3014 MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)((((__uint16_t)(*(__uint16_t *)(&rpg0->settings.volume_settings
)))) & ((1<<0)))
? 1 : 0;
3015
3016 if (cmd == DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((117)))
) {
3017 dc->wrcache = enabled;
3018 dc->rdcache = 0;
3019 goto done;
3020 } /* else DIOCSCACHE */
3021
3022 if (dc->rdcache) {
3023 rv = EOPNOTSUPP45;
3024 goto done;
3025 }
3026
3027 if (((dc->wrcache) ? 1 : 0) == enabled)
3028 goto done;
3029
3030 settings = rpg0->settings;
3031 if (dc->wrcache) {
3032 SET(settings.volume_settings,((settings.volume_settings) |= (((__uint16_t)((1<<0))))
)
3033 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN))((settings.volume_settings) |= (((__uint16_t)((1<<0))))
)
;
3034 } else {
3035 CLR(settings.volume_settings,((settings.volume_settings) &= ~(((__uint16_t)((1<<
0)))))
3036 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN))((settings.volume_settings) &= ~(((__uint16_t)((1<<
0)))))
;
3037 }
3038
3039 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
3040 if (ccb == NULL((void *)0)) {
3041 rv = ENOMEM12;
3042 goto done;
3043 }
3044
3045 req = ccb->ccb_cmd;
3046 req->function = MPI_FUNCTION_RAID_ACTION(0x15);
3047 req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS(0x08);
3048 req->vol_id = rpg0->volume_id;
3049 req->vol_bus = rpg0->volume_bus;
3050
3051 memcpy(&req->data_word, &settings, sizeof(req->data_word))__builtin_memcpy((&req->data_word), (&settings), (
sizeof(req->data_word)))
;
3052 ccb->ccb_done = mpi_empty_done;
3053 if (mpi_poll(sc, ccb, 50000) != 0) {
3054 rv = EIO5;
3055 goto done;
3056 }
3057
3058 rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3059 if (rep == NULL((void *)0))
3060 panic("%s: raid volume settings change failed", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3061
3062 switch (lemtoh16(&rep->action_status)((__uint16_t)(*(__uint16_t *)(&rep->action_status)))) {
3063 case MPI_RAID_ACTION_STATUS_OK(0x0000):
3064 rv = 0;
3065 break;
3066 default:
3067 rv = EIO5;
3068 break;
3069 }
3070
3071 mpi_push_reply(sc, ccb->ccb_rcb);
3072 scsi_io_put(&sc->sc_iopool, ccb);
3073
3074done:
3075 free(rpg0, M_TEMP127, len);
3076 return (rv);
3077}
3078
3079#if NBIO1 > 0
3080int
3081mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3082{
3083 int len, rv = EINVAL22;
3084 u_int32_t address;
3085 struct mpi_cfg_hdr hdr;
3086 struct mpi_cfg_raid_vol_pg0 *rpg0;
3087
3088 /* get IOC page 2 */
3089 if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3090 sc->sc_cfg_hdr.page_length * 4) != 0) {
3091 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3092 "fetch IOC page 2\n", DEVNAME(sc));
3093 goto done;
3094 }
3095
3096 /* XXX return something else than EINVAL to indicate within hs range */
3097 if (id > sc->sc_vol_page->active_vols) {
3098 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3099 "id: %d\n", DEVNAME(sc), id);
3100 goto done;
3101 }
3102
3103 /* replace current buffer with new one */
3104 len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3105 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3106 rpg0 = malloc(len, M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004);
3107 if (rpg0 == NULL((void *)0)) {
3108 printf("%s: can't get memory for RAID page 0, "
3109 "bio disabled\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3110 goto done;
3111 }
3112 if (sc->sc_rpg0)
3113 free(sc->sc_rpg0, M_DEVBUF2, 0);
3114 sc->sc_rpg0 = rpg0;
3115
3116 /* get raid vol page 0 */
3117 address = sc->sc_vol_list[id].vol_id |
3118 (sc->sc_vol_list[id].vol_bus << 8);
3119 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08), 0,
3120 address, 0, &hdr) != 0)
3121 goto done;
3122 if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3123 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3124 DEVNAME(sc));
3125 goto done;
3126 }
3127
3128 rv = 0;
3129done:
3130 return (rv);
3131}
3132
3133int
3134mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3135{
3136 struct mpi_softc *sc = (struct mpi_softc *)dev;
3137 int error = 0;
3138
3139 DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3140
3141 /* make sure we have bio enabled */
3142 if (sc->sc_ioctl != mpi_ioctl)
3143 return (EINVAL22);
3144
3145 rw_enter_write(&sc->sc_lock);
3146
3147 switch (cmd) {
3148 case BIOCINQ(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_inq) & 0x1fff) << 16) | ((('B')) <<
8) | ((32)))
:
3149 DNPRINTF(MPI_D_IOCTL, "inq\n");
3150 error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3151 break;
3152
3153 case BIOCVOL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_vol) & 0x1fff) << 16) | ((('B')) <<
8) | ((34)))
:
3154 DNPRINTF(MPI_D_IOCTL, "vol\n");
3155 error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3156 break;
3157
3158 case BIOCDISK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_disk) & 0x1fff) << 16) | ((('B')) <<
8) | ((33)))
:
3159 DNPRINTF(MPI_D_IOCTL, "disk\n");
3160 error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3161 break;
3162
3163 case BIOCALARM(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_alarm) & 0x1fff) << 16) | ((('B')) <<
8) | ((35)))
:
3164 DNPRINTF(MPI_D_IOCTL, "alarm\n");
3165 break;
3166
3167 case BIOCBLINK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_blink) & 0x1fff) << 16) | ((('B')) <<
8) | ((36)))
:
3168 DNPRINTF(MPI_D_IOCTL, "blink\n");
3169 break;
3170
3171 case BIOCSETSTATE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_setstate) & 0x1fff) << 16) | ((('B')) <<
8) | ((37)))
:
3172 DNPRINTF(MPI_D_IOCTL, "setstate\n");
3173 error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3174 break;
3175
3176 default:
3177 DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3178 error = ENOTTY25;
3179 }
3180
3181 rw_exit_write(&sc->sc_lock);
3182
3183 return (error);
3184}
3185
3186int
3187mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3188{
3189 if (!(sc->sc_flags & MPI_F_RAID(1<<1))) {
3190 bi->bi_novol = 0;
3191 bi->bi_nodisk = 0;
3192 }
3193
3194 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
3195 sc->sc_cfg_hdr.page_length * 4)mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
!= 0) {
3196 DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3197 "page 2\n", DEVNAME(sc));
3198 return (EINVAL22);
3199 }
3200
3201 DNPRINTF(MPI_D_IOCTL, "%s: active_vols: %d max_vols: %d "
3202 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3203 sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3204 sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3205
3206 bi->bi_novol = sc->sc_vol_page->active_vols;
3207 bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3208 strlcpy(bi->bi_dev, DEVNAME(sc)((sc)->sc_dev.dv_xname), sizeof(bi->bi_dev));
3209
3210 return (0);
3211}
3212
3213int
3214mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3215{
3216 int i, vol, id, rv = EINVAL22;
3217 struct device *dev;
3218 struct scsi_link *link;
3219 struct mpi_cfg_raid_vol_pg0 *rpg0;
3220 char *vendp;
3221
3222 id = bv->bv_volid;
3223 if (mpi_bio_get_pg0_raid(sc, id))
3224 goto done;
3225
3226 if (id > sc->sc_vol_page->active_vols)
3227 return (EINVAL22); /* XXX deal with hot spares */
3228
3229 rpg0 = sc->sc_rpg0;
3230 if (rpg0 == NULL((void *)0))
3231 goto done;
3232
3233 /* determine status */
3234 switch (rpg0->volume_state) {
3235 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL(0x00):
3236 bv->bv_status = BIOC_SVONLINE0x00;
3237 break;
3238 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED(0x01):
3239 bv->bv_status = BIOC_SVDEGRADED0x02;
3240 break;
3241 case MPI_CFG_RAID_VOL_0_STATE_FAILED(0x02):
3242 case MPI_CFG_RAID_VOL_0_STATE_MISSING(0x03):
3243 bv->bv_status = BIOC_SVOFFLINE0x01;
3244 break;
3245 default:
3246 bv->bv_status = BIOC_SVINVALID0xff;
3247 }
3248
3249 /* override status if scrubbing or something */
3250 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING(1<<2))
3251 bv->bv_status = BIOC_SVREBUILD0x05;
3252
3253 bv->bv_size = (uint64_t)lemtoh32(&rpg0->max_lba)((__uint32_t)(*(__uint32_t *)(&rpg0->max_lba))) * 512;
3254
3255 switch (sc->sc_vol_list[id].vol_type) {
3256 case MPI_CFG_RAID_TYPE_RAID_IS(0x00):
3257 bv->bv_level = 0;
3258 break;
3259 case MPI_CFG_RAID_TYPE_RAID_IME(0x01):
3260 case MPI_CFG_RAID_TYPE_RAID_IM(0x02):
3261 bv->bv_level = 1;
3262 break;
3263 case MPI_CFG_RAID_TYPE_RAID_5(0x03):
3264 bv->bv_level = 5;
3265 break;
3266 case MPI_CFG_RAID_TYPE_RAID_6(0x04):
3267 bv->bv_level = 6;
3268 break;
3269 case MPI_CFG_RAID_TYPE_RAID_10(0x05):
3270 bv->bv_level = 10;
3271 break;
3272 case MPI_CFG_RAID_TYPE_RAID_50(0x06):
3273 bv->bv_level = 50;
3274 break;
3275 default:
3276 bv->bv_level = -1;
3277 }
3278
3279 bv->bv_nodisk = rpg0->num_phys_disks;
3280
3281 for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3282 link = scsi_get_link(sc->sc_scsibus, i, 0);
3283 if (link == NULL((void *)0))
3284 continue;
3285
3286 /* skip if not a virtual disk */
3287 if (!(link->flags & SDEV_VIRTUAL0x0800))
3288 continue;
3289
3290 vol++;
3291 /* are we it? */
3292 if (vol == bv->bv_volid) {
3293 dev = link->device_softc;
3294 vendp = link->inqdata.vendor;
3295 memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor)__builtin_memcpy((bv->bv_vendor), (vendp), (sizeof bv->
bv_vendor))
;
3296 bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3297 strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3298 break;
3299 }
3300 }
3301 rv = 0;
3302done:
3303 return (rv);
3304}
3305
3306int
3307mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3308{
3309 int pdid, id, rv = EINVAL22;
3310 u_int32_t address;
3311 struct mpi_cfg_hdr hdr;
3312 struct mpi_cfg_raid_vol_pg0 *rpg0;
3313 struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3314 struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3315
3316 id = bd->bd_volid;
3317 if (mpi_bio_get_pg0_raid(sc, id))
3318 goto done;
3319
3320 if (id > sc->sc_vol_page->active_vols)
3321 return (EINVAL22); /* XXX deal with hot spares */
3322
3323 rpg0 = sc->sc_rpg0;
3324 if (rpg0 == NULL((void *)0))
3325 goto done;
3326
3327 pdid = bd->bd_diskid;
3328 if (pdid > rpg0->num_phys_disks)
3329 goto done;
3330 physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3331 physdisk += pdid;
3332
3333 /* get raid phys disk page 0 */
3334 address = physdisk->phys_disk_num;
3335 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,mpi_req_cfg_header((sc), ((0x0A)), (0), (address), (1<<
1), (&hdr))
3336 &hdr)mpi_req_cfg_header((sc), ((0x0A)), (0), (address), (1<<
1), (&hdr))
!= 0)
3337 goto done;
3338 if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)mpi_req_cfg_page((sc), (address), (1<<1), (&hdr), (
1), (&pdpg0), (sizeof pdpg0))
) {
3339 bd->bd_status = BIOC_SDFAILED0x02;
3340 return (0);
3341 }
3342 bd->bd_channel = pdpg0.phys_disk_bus;
3343 bd->bd_target = pdpg0.phys_disk_id;
3344 bd->bd_lun = 0;
3345 bd->bd_size = (uint64_t)lemtoh32(&pdpg0.max_lba)((__uint32_t)(*(__uint32_t *)(&pdpg0.max_lba))) * 512;
3346 strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3347
3348 switch (pdpg0.phys_disk_state) {
3349 case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE(0x00):
3350 bd->bd_status = BIOC_SDONLINE0x00;
3351 break;
3352 case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING(0x01):
3353 case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED(0x03):
3354 bd->bd_status = BIOC_SDFAILED0x02;
3355 break;
3356 case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL(0x06):
3357 case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER(0xff):
3358 case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE(0x05):
3359 bd->bd_status = BIOC_SDOFFLINE0x01;
3360 break;
3361 case MPI_CFG_RAID_PHYDISK_0_STATE_INIT(0x04):
3362 bd->bd_status = BIOC_SDSCRUB0x06;
3363 break;
3364 case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT(0x02):
3365 default:
3366 bd->bd_status = BIOC_SDINVALID0xff;
3367 break;
3368 }
3369
3370 /* XXX figure this out */
3371 /* bd_serial[32]; */
3372 /* bd_procdev[16]; */
3373
3374 rv = 0;
3375done:
3376 return (rv);
3377}
3378
3379int
3380mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3381{
3382 return (ENOTTY25);
3383}
3384
3385#ifndef SMALL_KERNEL
3386int
3387mpi_create_sensors(struct mpi_softc *sc)
3388{
3389 struct device *dev;
3390 struct scsi_link *link;
3391 int i, vol, nsensors;
3392
3393 /* count volumes */
3394 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3395 link = scsi_get_link(sc->sc_scsibus, i, 0);
3396 if (link == NULL((void *)0))
3397 continue;
3398 /* skip if not a virtual disk */
3399 if (!(link->flags & SDEV_VIRTUAL0x0800))
3400 continue;
3401
3402 vol++;
3403 }
3404 if (vol == 0)
3405 return (0);
3406
3407 sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor),
3408 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
3409 if (sc->sc_sensors == NULL((void *)0))
3410 return (1);
3411 nsensors = vol;
3412
3413 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc)((sc)->sc_dev.dv_xname),
3414 sizeof(sc->sc_sensordev.xname));
3415
3416 for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3417 link = scsi_get_link(sc->sc_scsibus, i, 0);
3418 if (link == NULL((void *)0))
3419 continue;
3420 /* skip if not a virtual disk */
3421 if (!(link->flags & SDEV_VIRTUAL0x0800))
3422 continue;
3423
3424 dev = link->device_softc;
3425 strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3426 sizeof(sc->sc_sensors[vol].desc));
3427 sc->sc_sensors[vol].type = SENSOR_DRIVE;
3428 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3429 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3430
3431 vol++;
3432 }
3433
3434 if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL((void *)0))
3435 goto bad;
3436
3437 sensordev_install(&sc->sc_sensordev);
3438
3439 return (0);
3440
3441bad:
3442 free(sc->sc_sensors, M_DEVBUF2, nsensors * sizeof(struct ksensor));
3443 return (1);
3444}
3445
3446void
3447mpi_refresh_sensors(void *arg)
3448{
3449 int i, vol;
3450 struct scsi_link *link;
3451 struct mpi_softc *sc = arg;
3452 struct mpi_cfg_raid_vol_pg0 *rpg0;
3453
3454 rw_enter_write(&sc->sc_lock);
3455
3456 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3457 link = scsi_get_link(sc->sc_scsibus, i, 0);
3458 if (link == NULL((void *)0))
3459 continue;
3460 /* skip if not a virtual disk */
3461 if (!(link->flags & SDEV_VIRTUAL0x0800))
3462 continue;
3463
3464 if (mpi_bio_get_pg0_raid(sc, vol))
3465 continue;
3466
3467 rpg0 = sc->sc_rpg0;
3468 if (rpg0 == NULL((void *)0))
3469 goto done;
3470
3471 /* determine status */
3472 switch (rpg0->volume_state) {
3473 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL(0x00):
3474 sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE4;
3475 sc->sc_sensors[vol].status = SENSOR_S_OK;
3476 break;
3477 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED(0x01):
3478 sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL10;
3479 sc->sc_sensors[vol].status = SENSOR_S_WARN;
3480 break;
3481 case MPI_CFG_RAID_VOL_0_STATE_FAILED(0x02):
3482 case MPI_CFG_RAID_VOL_0_STATE_MISSING(0x03):
3483 sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL9;
3484 sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3485 break;
3486 default:
3487 sc->sc_sensors[vol].value = 0; /* unknown */
3488 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3489 }
3490
3491 /* override status if scrubbing or something */
3492 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING(1<<2)) {
3493 sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD7;
3494 sc->sc_sensors[vol].status = SENSOR_S_WARN;
3495 }
3496
3497 vol++;
3498 }
3499done:
3500 rw_exit_write(&sc->sc_lock);
3501}
3502#endif /* SMALL_KERNEL */
3503#endif /* NBIO > 0 */