Bug Summary

File:dev/ic/mpi.c
Warning:line 1594, column 14
Access to field 'sg_hdr' results in a dereference of a null pointer (loaded from variable 'sge')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name mpi.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/ic/mpi.c
1/* $OpenBSD: mpi.c,v 1.226 2023/07/06 10:17:43 visa Exp $ */
2
3/*
4 * Copyright (c) 2005, 2006, 2009 David Gwynne <dlg@openbsd.org>
5 * Copyright (c) 2005, 2008, 2009 Marco Peereboom <marco@openbsd.org>
6 *
7 * Permission to use, copy, modify, and distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 */
19
20#include "bio.h"
21
22#include <sys/param.h>
23#include <sys/systm.h>
24#include <sys/buf.h>
25#include <sys/device.h>
26#include <sys/malloc.h>
27#include <sys/kernel.h>
28#include <sys/mutex.h>
29#include <sys/rwlock.h>
30#include <sys/sensors.h>
31#include <sys/dkio.h>
32#include <sys/task.h>
33
34#include <machine/bus.h>
35
36#include <scsi/scsi_all.h>
37#include <scsi/scsiconf.h>
38
39#include <dev/biovar.h>
40#include <dev/ic/mpireg.h>
41#include <dev/ic/mpivar.h>
42
43#ifdef MPI_DEBUG
44uint32_t mpi_debug = 0
45/* | MPI_D_CMD */
46/* | MPI_D_INTR */
47/* | MPI_D_MISC */
48/* | MPI_D_DMA */
49/* | MPI_D_IOCTL */
50/* | MPI_D_RW */
51/* | MPI_D_MEM */
52/* | MPI_D_CCB */
53/* | MPI_D_PPR */
54/* | MPI_D_RAID */
55/* | MPI_D_EVT */
56 ;
57#endif
58
59struct cfdriver mpi_cd = {
60 NULL((void *)0),
61 "mpi",
62 DV_DULL
63};
64
65void mpi_scsi_cmd(struct scsi_xfer *);
66void mpi_scsi_cmd_done(struct mpi_ccb *);
67int mpi_scsi_probe(struct scsi_link *);
68int mpi_scsi_ioctl(struct scsi_link *, u_long, caddr_t,
69 int);
70
71const struct scsi_adapter mpi_switch = {
72 mpi_scsi_cmd, NULL((void *)0), mpi_scsi_probe, NULL((void *)0), mpi_scsi_ioctl
73};
74
75struct mpi_dmamem *mpi_dmamem_alloc(struct mpi_softc *, size_t);
76void mpi_dmamem_free(struct mpi_softc *,
77 struct mpi_dmamem *);
78int mpi_alloc_ccbs(struct mpi_softc *);
79void *mpi_get_ccb(void *);
80void mpi_put_ccb(void *, void *);
81int mpi_alloc_replies(struct mpi_softc *);
82void mpi_push_replies(struct mpi_softc *);
83void mpi_push_reply(struct mpi_softc *, struct mpi_rcb *);
84
85void mpi_start(struct mpi_softc *, struct mpi_ccb *);
86int mpi_poll(struct mpi_softc *, struct mpi_ccb *, int);
87void mpi_poll_done(struct mpi_ccb *);
88void mpi_reply(struct mpi_softc *, u_int32_t);
89
90void mpi_wait(struct mpi_softc *sc, struct mpi_ccb *);
91void mpi_wait_done(struct mpi_ccb *);
92
93int mpi_cfg_spi_port(struct mpi_softc *);
94void mpi_squash_ppr(struct mpi_softc *);
95void mpi_run_ppr(struct mpi_softc *);
96int mpi_ppr(struct mpi_softc *, struct scsi_link *,
97 struct mpi_cfg_raid_physdisk *, int, int, int);
98int mpi_inq(struct mpi_softc *, u_int16_t, int);
99
100int mpi_cfg_sas(struct mpi_softc *);
101int mpi_cfg_fc(struct mpi_softc *);
102
103void mpi_timeout_xs(void *);
104int mpi_load_xs(struct mpi_ccb *);
105
106u_int32_t mpi_read(struct mpi_softc *, bus_size_t);
107void mpi_write(struct mpi_softc *, bus_size_t, u_int32_t);
108int mpi_wait_eq(struct mpi_softc *, bus_size_t, u_int32_t,
109 u_int32_t);
110int mpi_wait_ne(struct mpi_softc *, bus_size_t, u_int32_t,
111 u_int32_t);
112
113int mpi_init(struct mpi_softc *);
114int mpi_reset_soft(struct mpi_softc *);
115int mpi_reset_hard(struct mpi_softc *);
116
117int mpi_handshake_send(struct mpi_softc *, void *, size_t);
118int mpi_handshake_recv_dword(struct mpi_softc *,
119 u_int32_t *);
120int mpi_handshake_recv(struct mpi_softc *, void *, size_t);
121
122void mpi_empty_done(struct mpi_ccb *);
123
124int mpi_iocinit(struct mpi_softc *);
125int mpi_iocfacts(struct mpi_softc *);
126int mpi_portfacts(struct mpi_softc *);
127int mpi_portenable(struct mpi_softc *);
128int mpi_cfg_coalescing(struct mpi_softc *);
129void mpi_get_raid(struct mpi_softc *);
130int mpi_fwupload(struct mpi_softc *);
131int mpi_manufacturing(struct mpi_softc *);
132int mpi_scsi_probe_virtual(struct scsi_link *);
133
134int mpi_eventnotify(struct mpi_softc *);
135void mpi_eventnotify_done(struct mpi_ccb *);
136void mpi_eventnotify_free(struct mpi_softc *,
137 struct mpi_rcb *);
138void mpi_eventack(void *, void *);
139void mpi_eventack_done(struct mpi_ccb *);
140int mpi_evt_sas(struct mpi_softc *, struct mpi_rcb *);
141void mpi_evt_sas_detach(void *, void *);
142void mpi_evt_sas_detach_done(struct mpi_ccb *);
143void mpi_fc_rescan(void *);
144
145int mpi_req_cfg_header(struct mpi_softc *, u_int8_t,
146 u_int8_t, u_int32_t, int, void *);
147int mpi_req_cfg_page(struct mpi_softc *, u_int32_t, int,
148 void *, int, void *, size_t);
149
150int mpi_ioctl_cache(struct scsi_link *, u_long,
151 struct dk_cache *);
152
153#if NBIO1 > 0
154int mpi_bio_get_pg0_raid(struct mpi_softc *, int);
155int mpi_ioctl(struct device *, u_long, caddr_t);
156int mpi_ioctl_inq(struct mpi_softc *, struct bioc_inq *);
157int mpi_ioctl_vol(struct mpi_softc *, struct bioc_vol *);
158int mpi_ioctl_disk(struct mpi_softc *, struct bioc_disk *);
159int mpi_ioctl_setstate(struct mpi_softc *, struct bioc_setstate *);
160#ifndef SMALL_KERNEL
161int mpi_create_sensors(struct mpi_softc *);
162void mpi_refresh_sensors(void *);
163#endif /* SMALL_KERNEL */
164#endif /* NBIO > 0 */
165
166#define DEVNAME(s)((s)->sc_dev.dv_xname) ((s)->sc_dev.dv_xname)
167
168#define dwordsof(s)(sizeof(s) / sizeof(u_int32_t)) (sizeof(s) / sizeof(u_int32_t))
169
170#define mpi_read_db(s)mpi_read((s), 0x00) mpi_read((s), MPI_DOORBELL0x00)
171#define mpi_write_db(s, v)mpi_write((s), 0x00, (v)) mpi_write((s), MPI_DOORBELL0x00, (v))
172#define mpi_read_intr(s)(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x30))) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x30)))
173 MPI_INTR_STATUS)(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x30)))
174#define mpi_write_intr(s, v)mpi_write((s), 0x30, (v)) mpi_write((s), MPI_INTR_STATUS0x30, (v))
175#define mpi_pop_reply(s)(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x44))) bus_space_read_4((s)->sc_iot, (s)->sc_ioh, \(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x44)))
176 MPI_REPLY_QUEUE)(((s)->sc_iot)->read_4(((s)->sc_ioh), (0x44)))
177#define mpi_push_reply_db(s, v)(((s)->sc_iot)->write_4(((s)->sc_ioh), (0x44), ((v))
))
bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \(((s)->sc_iot)->write_4(((s)->sc_ioh), (0x44), ((v))
))
178 MPI_REPLY_QUEUE, (v))(((s)->sc_iot)->write_4(((s)->sc_ioh), (0x44), ((v))
))
179
180#define mpi_wait_db_int(s)mpi_wait_ne((s), 0x30, (1<<0), 0) mpi_wait_ne((s), MPI_INTR_STATUS0x30, \
181 MPI_INTR_STATUS_DOORBELL(1<<0), 0)
182#define mpi_wait_db_ack(s)mpi_wait_eq((s), 0x30, (1<<31), 0) mpi_wait_eq((s), MPI_INTR_STATUS0x30, \
183 MPI_INTR_STATUS_IOCDOORBELL(1<<31), 0)
184
185#define MPI_PG_EXTENDED(1<<0) (1<<0)
186#define MPI_PG_POLL(1<<1) (1<<1)
187#define MPI_PG_FMT"\020" "\002POLL" "\001EXTENDED" "\020" "\002POLL" "\001EXTENDED"
188
189#define mpi_cfg_header(_s, _t, _n, _a, _h)mpi_req_cfg_header((_s), (_t), (_n), (_a), (1<<1), (_h)
)
\
190 mpi_req_cfg_header((_s), (_t), (_n), (_a), \
191 MPI_PG_POLL(1<<1), (_h))
192#define mpi_ecfg_header(_s, _t, _n, _a, _h)mpi_req_cfg_header((_s), (_t), (_n), (_a), (1<<1)|(1<<
0), (_h))
\
193 mpi_req_cfg_header((_s), (_t), (_n), (_a), \
194 MPI_PG_POLL(1<<1)|MPI_PG_EXTENDED(1<<0), (_h))
195
196#define mpi_cfg_page(_s, _a, _h, _r, _p, _l)mpi_req_cfg_page((_s), (_a), (1<<1), (_h), (_r), (_p), (
_l))
\
197 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL(1<<1), \
198 (_h), (_r), (_p), (_l))
199#define mpi_ecfg_page(_s, _a, _h, _r, _p, _l)mpi_req_cfg_page((_s), (_a), (1<<1)|(1<<0), (_h),
(_r), (_p), (_l))
\
200 mpi_req_cfg_page((_s), (_a), MPI_PG_POLL(1<<1)|MPI_PG_EXTENDED(1<<0), \
201 (_h), (_r), (_p), (_l))
202
203static inline void
204mpi_dvatosge(struct mpi_sge *sge, u_int64_t dva)
205{
206 htolem32(&sge->sg_addr_lo, dva)(*(__uint32_t *)(&sge->sg_addr_lo) = ((__uint32_t)(dva
)))
;
207 htolem32(&sge->sg_addr_hi, dva >> 32)(*(__uint32_t *)(&sge->sg_addr_hi) = ((__uint32_t)(dva
>> 32)))
;
208}
209
210int
211mpi_attach(struct mpi_softc *sc)
212{
213 struct scsibus_attach_args saa;
214 struct mpi_ccb *ccb;
215
216 printf("\n");
217
218 rw_init(&sc->sc_lock, "mpi_lock")_rw_init_flags(&sc->sc_lock, "mpi_lock", 0, ((void *)0
))
;
219 task_set(&sc->sc_evt_rescan, mpi_fc_rescan, sc);
220
221 /* disable interrupts */
222 mpi_write(sc, MPI_INTR_MASK0x34,
223 MPI_INTR_MASK_REPLY(1<<3) | MPI_INTR_MASK_DOORBELL(1<<0));
224
225 if (mpi_init(sc) != 0) {
226 printf("%s: unable to initialise\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
227 return (1);
228 }
229
230 if (mpi_iocfacts(sc) != 0) {
231 printf("%s: unable to get iocfacts\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
232 return (1);
233 }
234
235 if (mpi_alloc_ccbs(sc) != 0) {
236 /* error already printed */
237 return (1);
238 }
239
240 if (mpi_alloc_replies(sc) != 0) {
241 printf("%s: unable to allocate reply space\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
242 goto free_ccbs;
243 }
244
245 if (mpi_iocinit(sc) != 0) {
246 printf("%s: unable to send iocinit\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
247 goto free_ccbs;
248 }
249
250 /* spin until we're operational */
251 if (mpi_wait_eq(sc, MPI_DOORBELL0x00, MPI_DOORBELL_STATE(0xf<<28),
252 MPI_DOORBELL_STATE_OPER(0x2<<28)) != 0) {
253 printf("%s: state: 0x%08x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
254 mpi_read_db(sc)mpi_read((sc), 0x00) & MPI_DOORBELL_STATE(0xf<<28));
255 printf("%s: operational state timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
256 goto free_ccbs;
257 }
258
259 mpi_push_replies(sc);
260
261 if (mpi_portfacts(sc) != 0) {
262 printf("%s: unable to get portfacts\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
263 goto free_replies;
264 }
265
266 if (mpi_cfg_coalescing(sc) != 0) {
267 printf("%s: unable to configure coalescing\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
268 goto free_replies;
269 }
270
271 switch (sc->sc_porttype) {
272 case MPI_PORTFACTS_PORTTYPE_SAS0x30:
273 SIMPLEQ_INIT(&sc->sc_evt_scan_queue)do { (&sc->sc_evt_scan_queue)->sqh_first = ((void *
)0); (&sc->sc_evt_scan_queue)->sqh_last = &(&
sc->sc_evt_scan_queue)->sqh_first; } while (0)
;
274 mtx_init(&sc->sc_evt_scan_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_evt_scan_mtx), ((((0x3)) > 0x0 && ((0x3)) < 0x9
) ? 0x9 : ((0x3)))); } while (0)
;
275 scsi_ioh_set(&sc->sc_evt_scan_handler, &sc->sc_iopool,
276 mpi_evt_sas_detach, sc);
277 /* FALLTHROUGH */
278 case MPI_PORTFACTS_PORTTYPE_FC0x10:
279 if (mpi_eventnotify(sc) != 0) {
280 printf("%s: unable to enable events\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
281 goto free_replies;
282 }
283 break;
284 }
285
286 if (mpi_portenable(sc) != 0) {
287 printf("%s: unable to enable port\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
288 goto free_replies;
289 }
290
291 if (mpi_fwupload(sc) != 0) {
292 printf("%s: unable to upload firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
293 goto free_replies;
294 }
295
296 if (mpi_manufacturing(sc) != 0) {
297 printf("%s: unable to fetch manufacturing info\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
298 goto free_replies;
299 }
300
301 switch (sc->sc_porttype) {
302 case MPI_PORTFACTS_PORTTYPE_SCSI0x01:
303 if (mpi_cfg_spi_port(sc) != 0) {
304 printf("%s: unable to configure spi\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
305 goto free_replies;
306 }
307 mpi_squash_ppr(sc);
308 break;
309 case MPI_PORTFACTS_PORTTYPE_SAS0x30:
310 if (mpi_cfg_sas(sc) != 0) {
311 printf("%s: unable to configure sas\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
312 goto free_replies;
313 }
314 break;
315 case MPI_PORTFACTS_PORTTYPE_FC0x10:
316 if (mpi_cfg_fc(sc) != 0) {
317 printf("%s: unable to configure fc\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
318 goto free_replies;
319 }
320 break;
321 }
322
323 /* get raid pages */
324 mpi_get_raid(sc);
325#if NBIO1 > 0
326 if (sc->sc_flags & MPI_F_RAID(1<<1)) {
327 if (bio_register(&sc->sc_dev, mpi_ioctl) != 0)
328 panic("%s: controller registration failed",
329 DEVNAME(sc)((sc)->sc_dev.dv_xname));
330 else {
331 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC,mpi_req_cfg_header((sc), ((0x01)), (2), (0), (1<<1), (&
sc->sc_cfg_hdr))
332 2, 0, &sc->sc_cfg_hdr)mpi_req_cfg_header((sc), ((0x01)), (2), (0), (1<<1), (&
sc->sc_cfg_hdr))
!= 0) {
333 panic("%s: can't get IOC page 2 hdr",
334 DEVNAME(sc)((sc)->sc_dev.dv_xname));
335 }
336
337 sc->sc_vol_page = mallocarray(sc->sc_cfg_hdr.page_length,
338 4, M_TEMP127, M_WAITOK0x0001 | M_CANFAIL0x0004);
339 if (sc->sc_vol_page == NULL((void *)0)) {
340 panic("%s: can't get memory for IOC page 2, "
341 "bio disabled", DEVNAME(sc)((sc)->sc_dev.dv_xname));
342 }
343
344 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1,mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
345 sc->sc_vol_page,mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
346 sc->sc_cfg_hdr.page_length * 4)mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
!= 0) {
347 panic("%s: can't get IOC page 2", DEVNAME(sc)((sc)->sc_dev.dv_xname));
348 }
349
350 sc->sc_vol_list = (struct mpi_cfg_raid_vol *)
351 (sc->sc_vol_page + 1);
352
353 sc->sc_ioctl = mpi_ioctl;
354 }
355 }
356#endif /* NBIO > 0 */
357
358 saa.saa_adapter = &mpi_switch;
359 saa.saa_adapter_softc = sc;
360 saa.saa_adapter_target = sc->sc_target;
361 saa.saa_adapter_buswidth = sc->sc_buswidth;
362 saa.saa_luns = 8;
363 saa.saa_openings = MAX(sc->sc_maxcmds / sc->sc_buswidth, 16)(((sc->sc_maxcmds / sc->sc_buswidth)>(16))?(sc->sc_maxcmds
/ sc->sc_buswidth):(16))
;
364 saa.saa_pool = &sc->sc_iopool;
365 saa.saa_wwpn = sc->sc_port_wwn;
366 saa.saa_wwnn = sc->sc_node_wwn;
367 saa.saa_quirks = saa.saa_flags = 0;
368
369 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
370 &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
371
372 /* do domain validation */
373 if (sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_SCSI0x01)
374 mpi_run_ppr(sc);
375
376 /* enable interrupts */
377 mpi_write(sc, MPI_INTR_MASK0x34, MPI_INTR_MASK_DOORBELL(1<<0));
378
379#if NBIO1 > 0
380#ifndef SMALL_KERNEL
381 mpi_create_sensors(sc);
382#endif /* SMALL_KERNEL */
383#endif /* NBIO > 0 */
384
385 return (0);
386
387free_replies:
388 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_repq * 80), (0x02)
)
389 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_repq * 80), (0x02)
)
;
390 mpi_dmamem_free(sc, sc->sc_replies);
391free_ccbs:
392 while ((ccb = mpi_get_ccb(sc)) != NULL((void *)0))
393 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
394 mpi_dmamem_free(sc, sc->sc_requests);
395 free(sc->sc_ccbs, M_DEVBUF2, 0);
396
397 return(1);
398}
399
400int
401mpi_cfg_spi_port(struct mpi_softc *sc)
402{
403 struct mpi_cfg_hdr hdr;
404 struct mpi_cfg_spi_port_pg1 port;
405
406 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 1, 0x0,mpi_req_cfg_header((sc), ((0x03)), (1), (0x0), (1<<1), (
&hdr))
407 &hdr)mpi_req_cfg_header((sc), ((0x03)), (1), (0x0), (1<<1), (
&hdr))
!= 0)
408 return (1);
409
410 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port, sizeof(port))mpi_req_cfg_page((sc), (0x0), (1<<1), (&hdr), (1), (
&port), (sizeof(port)))
!= 0)
411 return (1);
412
413 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_spi_port_pg1\n", DEVNAME(sc));
414 DNPRINTF(MPI_D_MISC, "%s: port_scsi_id: %d port_resp_ids 0x%04x\n",
415 DEVNAME(sc), port.port_scsi_id, letoh16(port.port_resp_ids));
416 DNPRINTF(MPI_D_MISC, "%s: on_bus_timer_value: 0x%08x\n", DEVNAME(sc),
417 letoh32(port.port_scsi_id));
418 DNPRINTF(MPI_D_MISC, "%s: target_config: 0x%02x id_config: 0x%04x\n",
419 DEVNAME(sc), port.target_config, letoh16(port.id_config));
420
421 if (port.port_scsi_id == sc->sc_target &&
422 port.port_resp_ids == htole16(1 << sc->sc_target)((__uint16_t)(1 << sc->sc_target)) &&
423 port.on_bus_timer_value != htole32(0x0)((__uint32_t)(0x0)))
424 return (0);
425
426 DNPRINTF(MPI_D_MISC, "%s: setting port scsi id to %d\n", DEVNAME(sc),
427 sc->sc_target);
428 port.port_scsi_id = sc->sc_target;
429 port.port_resp_ids = htole16(1 << sc->sc_target)((__uint16_t)(1 << sc->sc_target));
430 port.on_bus_timer_value = htole32(0x07000000)((__uint32_t)(0x07000000)); /* XXX magic */
431
432 if (mpi_cfg_page(sc, 0x0, &hdr, 0, &port, sizeof(port))mpi_req_cfg_page((sc), (0x0), (1<<1), (&hdr), (0), (
&port), (sizeof(port)))
!= 0) {
433 printf("%s: unable to configure port scsi id\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
434 return (1);
435 }
436
437 return (0);
438}
439
440void
441mpi_squash_ppr(struct mpi_softc *sc)
442{
443 struct mpi_cfg_hdr hdr;
444 struct mpi_cfg_spi_dev_pg1 page;
445 int i;
446
447 DNPRINTF(MPI_D_PPR, "%s: mpi_squash_ppr\n", DEVNAME(sc));
448
449 for (i = 0; i < sc->sc_buswidth; i++) {
450 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV,mpi_req_cfg_header((sc), ((0x04)), (1), (i), (1<<1), (&
hdr))
451 1, i, &hdr)mpi_req_cfg_header((sc), ((0x04)), (1), (i), (1<<1), (&
hdr))
!= 0)
452 return;
453
454 if (mpi_cfg_page(sc, i, &hdr, 1, &page, sizeof(page))mpi_req_cfg_page((sc), (i), (1<<1), (&hdr), (1), (&
page), (sizeof(page)))
!= 0)
455 return;
456
457 DNPRINTF(MPI_D_PPR, "%s: target: %d req_params1: 0x%02x "
458 "req_offset: 0x%02x req_period: 0x%02x "
459 "req_params2: 0x%02x conf: 0x%08x\n", DEVNAME(sc), i,
460 page.req_params1, page.req_offset, page.req_period,
461 page.req_params2, letoh32(page.configuration));
462
463 page.req_params1 = 0x0;
464 page.req_offset = 0x0;
465 page.req_period = 0x0;
466 page.req_params2 = 0x0;
467 page.configuration = htole32(0x0)((__uint32_t)(0x0));
468
469 if (mpi_cfg_page(sc, i, &hdr, 0, &page, sizeof(page))mpi_req_cfg_page((sc), (i), (1<<1), (&hdr), (0), (&
page), (sizeof(page)))
!= 0)
470 return;
471 }
472}
473
474void
475mpi_run_ppr(struct mpi_softc *sc)
476{
477 struct mpi_cfg_hdr hdr;
478 struct mpi_cfg_spi_port_pg0 port_pg;
479 struct mpi_cfg_ioc_pg3 *physdisk_pg;
480 struct mpi_cfg_raid_physdisk *physdisk_list, *physdisk;
481 size_t pagelen;
482 struct scsi_link *link;
483 int i, tries;
484
485 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_PORT, 0, 0x0,mpi_req_cfg_header((sc), ((0x03)), (0), (0x0), (1<<1), (
&hdr))
486 &hdr)mpi_req_cfg_header((sc), ((0x03)), (0), (0x0), (1<<1), (
&hdr))
!= 0) {
487 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch header\n",
488 DEVNAME(sc));
489 return;
490 }
491
492 if (mpi_cfg_page(sc, 0x0, &hdr, 1, &port_pg, sizeof(port_pg))mpi_req_cfg_page((sc), (0x0), (1<<1), (&hdr), (1), (
&port_pg), (sizeof(port_pg)))
!= 0) {
493 DNPRINTF(MPI_D_PPR, "%s: mpi_run_ppr unable to fetch page\n",
494 DEVNAME(sc));
495 return;
496 }
497
498 for (i = 0; i < sc->sc_buswidth; i++) {
499 link = scsi_get_link(sc->sc_scsibus, i, 0);
500 if (link == NULL((void *)0))
501 continue;
502
503 /* do not ppr volumes */
504 if (link->flags & SDEV_VIRTUAL0x0800)
505 continue;
506
507 tries = 0;
508 while (mpi_ppr(sc, link, NULL((void *)0), port_pg.min_period,
509 port_pg.max_offset, tries) == EAGAIN35)
510 tries++;
511 }
512
513 if ((sc->sc_flags & MPI_F_RAID(1<<1)) == 0)
514 return;
515
516 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 3, 0x0,mpi_req_cfg_header((sc), ((0x01)), (3), (0x0), (1<<1), (
&hdr))
517 &hdr)mpi_req_cfg_header((sc), ((0x01)), (3), (0x0), (1<<1), (
&hdr))
!= 0) {
518 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
519 "fetch ioc pg 3 header\n", DEVNAME(sc));
520 return;
521 }
522
523 pagelen = hdr.page_length * 4; /* dwords to bytes */
524 physdisk_pg = malloc(pagelen, M_TEMP127, M_WAITOK0x0001|M_CANFAIL0x0004);
525 if (physdisk_pg == NULL((void *)0)) {
526 DNPRINTF(MPI_D_RAID|MPI_D_PPR, "%s: mpi_run_ppr unable to "
527 "allocate ioc pg 3\n", DEVNAME(sc));
528 return;
529 }
530 physdisk_list = (struct mpi_cfg_raid_physdisk *)(physdisk_pg + 1);
531
532 if (mpi_cfg_page(sc, 0, &hdr, 1, physdisk_pg, pagelen)mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (physdisk_pg
), (pagelen))
!= 0) {
533 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: mpi_run_ppr unable to "
534 "fetch ioc page 3\n", DEVNAME(sc));
535 goto out;
536 }
537
538 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: no_phys_disks: %d\n", DEVNAME(sc),
539 physdisk_pg->no_phys_disks);
540
541 for (i = 0; i < physdisk_pg->no_phys_disks; i++) {
542 physdisk = &physdisk_list[i];
543
544 DNPRINTF(MPI_D_PPR|MPI_D_PPR, "%s: id: %d bus: %d ioc: %d "
545 "num: %d\n", DEVNAME(sc), physdisk->phys_disk_id,
546 physdisk->phys_disk_bus, physdisk->phys_disk_ioc,
547 physdisk->phys_disk_num);
548
549 if (physdisk->phys_disk_ioc != sc->sc_ioc_number)
550 continue;
551
552 tries = 0;
553 while (mpi_ppr(sc, NULL((void *)0), physdisk, port_pg.min_period,
554 port_pg.max_offset, tries) == EAGAIN35)
555 tries++;
556 }
557
558out:
559 free(physdisk_pg, M_TEMP127, pagelen);
560}
561
562int
563mpi_ppr(struct mpi_softc *sc, struct scsi_link *link,
564 struct mpi_cfg_raid_physdisk *physdisk, int period, int offset, int try)
565{
566 struct mpi_cfg_hdr hdr0, hdr1;
567 struct mpi_cfg_spi_dev_pg0 pg0;
568 struct mpi_cfg_spi_dev_pg1 pg1;
569 u_int32_t address;
570 int id;
571 int raid = 0;
572
573 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr period: %d offset: %d try: %d "
574 "link quirks: 0x%x\n", DEVNAME(sc), period, offset, try,
575 link->quirks);
576
577 if (try >= 3)
578 return (EIO5);
579
580 if (physdisk == NULL((void *)0)) {
581 if ((link->inqdata.device & SID_TYPE0x1f) == T_PROCESSOR0x03)
582 return (EIO5);
583
584 address = link->target;
585 id = link->target;
586 } else {
587 raid = 1;
588 address = (physdisk->phys_disk_bus << 8) |
589 (physdisk->phys_disk_id);
590 id = physdisk->phys_disk_num;
591 }
592
593 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 0,mpi_req_cfg_header((sc), ((0x04)), (0), (address), (1<<
1), (&hdr0))
594 address, &hdr0)mpi_req_cfg_header((sc), ((0x04)), (0), (address), (1<<
1), (&hdr0))
!= 0) {
595 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 0\n",
596 DEVNAME(sc));
597 return (EIO5);
598 }
599
600 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_SCSI_SPI_DEV, 1,mpi_req_cfg_header((sc), ((0x04)), (1), (address), (1<<
1), (&hdr1))
601 address, &hdr1)mpi_req_cfg_header((sc), ((0x04)), (1), (address), (1<<
1), (&hdr1))
!= 0) {
602 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch header 1\n",
603 DEVNAME(sc));
604 return (EIO5);
605 }
606
607#ifdef MPI_DEBUG
608 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr0), (
1), (&pg0), (sizeof(pg0)))
!= 0) {
609 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 0\n",
610 DEVNAME(sc));
611 return (EIO5);
612 }
613
614 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
615 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
616 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
617 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
618#endif
619
620 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr1), (
1), (&pg1), (sizeof(pg1)))
!= 0) {
621 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to fetch page 1\n",
622 DEVNAME(sc));
623 return (EIO5);
624 }
625
626 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
627 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
628 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
629 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
630
631 pg1.req_params1 = 0;
632 pg1.req_offset = offset;
633 pg1.req_period = period;
634 pg1.req_params2 &= ~MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH(1<<5);
635
636 if (raid || !(link->quirks & SDEV_NOSYNC0x0002)) {
637 pg1.req_params2 |= MPI_CFG_SPI_DEV_1_REQPARAMS_WIDTH_WIDE(1<<5);
638
639 switch (try) {
640 case 0: /* U320 */
641 break;
642 case 1: /* U160 */
643 pg1.req_period = 0x09;
644 break;
645 case 2: /* U80 */
646 pg1.req_period = 0x0a;
647 break;
648 }
649
650 if (pg1.req_period < 0x09) {
651 /* Ultra320: enable QAS & PACKETIZED */
652 pg1.req_params1 |= MPI_CFG_SPI_DEV_1_REQPARAMS_QAS(1<<2) |
653 MPI_CFG_SPI_DEV_1_REQPARAMS_PACKETIZED(1<<0);
654 }
655 if (pg1.req_period < 0xa) {
656 /* >= Ultra160: enable dual xfers */
657 pg1.req_params1 |=
658 MPI_CFG_SPI_DEV_1_REQPARAMS_DUALXFERS(1<<1);
659 }
660 }
661
662 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
663 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
664 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
665 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
666
667 if (mpi_cfg_page(sc, address, &hdr1, 0, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr1), (
0), (&pg1), (sizeof(pg1)))
!= 0) {
668 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to write page 1\n",
669 DEVNAME(sc));
670 return (EIO5);
671 }
672
673 if (mpi_cfg_page(sc, address, &hdr1, 1, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr1), (
1), (&pg1), (sizeof(pg1)))
!= 0) {
674 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 1\n",
675 DEVNAME(sc));
676 return (EIO5);
677 }
678
679 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 1 req_params1: 0x%02x "
680 "req_offset: 0x%02x req_period: 0x%02x req_params2: 0x%02x "
681 "conf: 0x%08x\n", DEVNAME(sc), pg1.req_params1, pg1.req_offset,
682 pg1.req_period, pg1.req_params2, letoh32(pg1.configuration));
683
684 if (mpi_inq(sc, id, raid) != 0) {
685 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to do inquiry against "
686 "target %d\n", DEVNAME(sc), link->target);
687 return (EIO5);
688 }
689
690 if (mpi_cfg_page(sc, address, &hdr0, 1, &pg0, sizeof(pg0))mpi_req_cfg_page((sc), (address), (1<<1), (&hdr0), (
1), (&pg0), (sizeof(pg0)))
!= 0) {
691 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr unable to read page 0 after "
692 "inquiry\n", DEVNAME(sc));
693 return (EIO5);
694 }
695
696 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr dev pg 0 neg_params1: 0x%02x "
697 "neg_offset: %d neg_period: 0x%02x neg_params2: 0x%02x "
698 "info: 0x%08x\n", DEVNAME(sc), pg0.neg_params1, pg0.neg_offset,
699 pg0.neg_period, pg0.neg_params2, letoh32(pg0.information));
700
701 if (!(lemtoh32(&pg0.information)((__uint32_t)(*(__uint32_t *)(&pg0.information))) & 0x07) && (try == 0)) {
702 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U320 ppr rejected\n",
703 DEVNAME(sc));
704 return (EAGAIN35);
705 }
706
707 if ((((lemtoh32(&pg0.information)((__uint32_t)(*(__uint32_t *)(&pg0.information))) >> 8) & 0xff) > 0x09) && (try == 1)) {
708 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr U160 ppr rejected\n",
709 DEVNAME(sc));
710 return (EAGAIN35);
711 }
712
713 if (lemtoh32(&pg0.information)((__uint32_t)(*(__uint32_t *)(&pg0.information))) & 0x0e) {
714 DNPRINTF(MPI_D_PPR, "%s: mpi_ppr ppr rejected: %0x\n",
715 DEVNAME(sc), lemtoh32(&pg0.information));
716 return (EAGAIN35);
717 }
718
719 switch(pg0.neg_period) {
720 case 0x08:
721 period = 160;
722 break;
723 case 0x09:
724 period = 80;
725 break;
726 case 0x0a:
727 period = 40;
728 break;
729 case 0x0b:
730 period = 20;
731 break;
732 case 0x0c:
733 period = 10;
734 break;
735 default:
736 period = 0;
737 break;
738 }
739
740 printf("%s: %s %d %s at %dMHz width %dbit offset %d "
741 "QAS %d DT %d IU %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), raid ? "phys disk" : "target",
742 id, period ? "Sync" : "Async", period,
743 (pg0.neg_params2 & MPI_CFG_SPI_DEV_0_NEGPARAMS_WIDTH_WIDE(1<<5)) ? 16 : 8,
744 pg0.neg_offset,
745 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_QAS(1<<2)) ? 1 : 0,
746 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_DUALXFERS(1<<1)) ? 1 : 0,
747 (pg0.neg_params1 & MPI_CFG_SPI_DEV_0_NEGPARAMS_PACKETIZED(1<<0)) ? 1 : 0);
748
749 return (0);
750}
751
752int
753mpi_inq(struct mpi_softc *sc, u_int16_t target, int physdisk)
754{
755 struct mpi_ccb *ccb;
756 struct scsi_inquiry inq;
757 struct inq_bundle {
758 struct mpi_msg_scsi_io io;
759 struct mpi_sge sge;
760 struct scsi_inquiry_data inqbuf;
761 struct scsi_sense_data sense;
762 } __packed__attribute__((__packed__)) *bundle;
763 struct mpi_msg_scsi_io *io;
764 struct mpi_sge *sge;
765
766 DNPRINTF(MPI_D_PPR, "%s: mpi_inq\n", DEVNAME(sc));
767
768 memset(&inq, 0, sizeof(inq))__builtin_memset((&inq), (0), (sizeof(inq)));
769 inq.opcode = INQUIRY0x12;
770 _lto2b(sizeof(struct scsi_inquiry_data), inq.length);
771
772 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
773 if (ccb == NULL((void *)0))
774 return (1);
775
776 ccb->ccb_done = mpi_empty_done;
777
778 bundle = ccb->ccb_cmd;
779 io = &bundle->io;
780 sge = &bundle->sge;
781
782 io->function = physdisk ? MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH(0x16) :
783 MPI_FUNCTION_SCSI_IO_REQUEST(0x00);
784 /*
785 * bus is always 0
786 * io->bus = htole16(sc->sc_bus);
787 */
788 io->target_id = target;
789
790 io->cdb_length = sizeof(inq);
791 io->sense_buf_len = sizeof(struct scsi_sense_data);
792 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64(1<<0);
793
794 /*
795 * always lun 0
796 * io->lun[0] = htobe16(link->lun);
797 */
798
799 io->direction = MPI_SCSIIO_DIR_READ(0x2);
800 io->tagging = MPI_SCSIIO_ATTR_NO_DISCONNECT(0x7);
801
802 memcpy(io->cdb, &inq, sizeof(inq))__builtin_memcpy((io->cdb), (&inq), (sizeof(inq)));
803
804 htolem32(&io->data_length, sizeof(struct scsi_inquiry_data))(*(__uint32_t *)(&io->data_length) = ((__uint32_t)(sizeof
(struct scsi_inquiry_data))))
;
805
806 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +(*(__uint32_t *)(&io->sense_buf_low_addr) = ((__uint32_t
)(ccb->ccb_cmd_dva + __builtin_offsetof(struct inq_bundle,
sense))))
807 offsetof(struct inq_bundle, sense))(*(__uint32_t *)(&io->sense_buf_low_addr) = ((__uint32_t
)(ccb->ccb_cmd_dva + __builtin_offsetof(struct inq_bundle,
sense))))
;
808
809 htolem32(&sge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE | MPI_SGE_FL_SIZE_64 |(*(__uint32_t *)(&sge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<25) | (0x1<<31) | (0x1<<30) | (
0x1<<24) | (u_int32_t)sizeof(inq))))
810 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |(*(__uint32_t *)(&sge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<25) | (0x1<<31) | (0x1<<30) | (
0x1<<24) | (u_int32_t)sizeof(inq))))
811 (u_int32_t)sizeof(inq))(*(__uint32_t *)(&sge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<25) | (0x1<<31) | (0x1<<30) | (
0x1<<24) | (u_int32_t)sizeof(inq))))
;
812
813 mpi_dvatosge(sge, ccb->ccb_cmd_dva +
814 offsetof(struct inq_bundle, inqbuf)__builtin_offsetof(struct inq_bundle, inqbuf));
815
816 if (mpi_poll(sc, ccb, 5000) != 0)
817 return (1);
818
819 if (ccb->ccb_rcb != NULL((void *)0))
820 mpi_push_reply(sc, ccb->ccb_rcb);
821
822 scsi_io_put(&sc->sc_iopool, ccb);
823
824 return (0);
825}
826
827int
828mpi_cfg_sas(struct mpi_softc *sc)
829{
830 struct mpi_ecfg_hdr ehdr;
831 struct mpi_cfg_sas_iou_pg1 *pg;
832 size_t pagelen;
833 int rv = 0;
834
835 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_IO_UNIT, 1, 0,mpi_req_cfg_header((sc), ((0x10)), (1), (0), (1<<1)|(1<<
0), (&ehdr))
836 &ehdr)mpi_req_cfg_header((sc), ((0x10)), (1), (0), (1<<1)|(1<<
0), (&ehdr))
!= 0)
837 return (0);
838
839 pagelen = lemtoh16(&ehdr.ext_page_length)((__uint16_t)(*(__uint16_t *)(&ehdr.ext_page_length))) * 4;
840 pg = malloc(pagelen, M_TEMP127, M_NOWAIT0x0002 | M_ZERO0x0008);
841 if (pg == NULL((void *)0))
842 return (ENOMEM12);
843
844 if (mpi_ecfg_page(sc, 0, &ehdr, 1, pg, pagelen)mpi_req_cfg_page((sc), (0), (1<<1)|(1<<0), (&
ehdr), (1), (pg), (pagelen))
!= 0)
845 goto out;
846
847 if (pg->max_sata_q_depth != 32) {
848 pg->max_sata_q_depth = 32;
849
850 if (mpi_ecfg_page(sc, 0, &ehdr, 0, pg, pagelen)mpi_req_cfg_page((sc), (0), (1<<1)|(1<<0), (&
ehdr), (0), (pg), (pagelen))
!= 0)
851 goto out;
852 }
853
854out:
855 free(pg, M_TEMP127, pagelen);
856 return (rv);
857}
858
859int
860mpi_cfg_fc(struct mpi_softc *sc)
861{
862 struct mpi_cfg_hdr hdr;
863 struct mpi_cfg_fc_port_pg0 pg0;
864 struct mpi_cfg_fc_port_pg1 pg1;
865
866 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 0, 0,mpi_req_cfg_header((sc), ((0x05)), (0), (0), (1<<1), (&
hdr))
867 &hdr)mpi_req_cfg_header((sc), ((0x05)), (0), (0), (1<<1), (&
hdr))
!= 0) {
868 printf("%s: unable to fetch FC port header 0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
869 return (1);
870 }
871
872 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg0, sizeof(pg0))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (&
pg0), (sizeof(pg0)))
!= 0) {
873 printf("%s: unable to fetch FC port page 0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
874 return (1);
875 }
876
877 sc->sc_port_wwn = letoh64(pg0.wwpn)((__uint64_t)(pg0.wwpn));
878 sc->sc_node_wwn = letoh64(pg0.wwnn)((__uint64_t)(pg0.wwnn));
879
880 /* configure port config more to our liking */
881 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_PORT, 1, 0,mpi_req_cfg_header((sc), ((0x05)), (1), (0), (1<<1), (&
hdr))
882 &hdr)mpi_req_cfg_header((sc), ((0x05)), (1), (0), (1<<1), (&
hdr))
!= 0) {
883 printf("%s: unable to fetch FC port header 1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
884 return (1);
885 }
886
887 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (&
pg1), (sizeof(pg1)))
!= 0) {
888 printf("%s: unable to fetch FC port page 1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
889 return (1);
890 }
891
892 SET(pg1.flags, htole32(MPI_CFG_FC_PORT_0_FLAGS_IMMEDIATE_ERROR |((pg1.flags) |= (((__uint32_t)((1<<26) | (1<<24))
)))
893 MPI_CFG_FC_PORT_0_FLAGS_VERBOSE_RESCAN))((pg1.flags) |= (((__uint32_t)((1<<26) | (1<<24))
)))
;
894
895 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg1, sizeof(pg1))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (0), (&
pg1), (sizeof(pg1)))
!= 0) {
896 printf("%s: unable to set FC port page 1\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
897 return (1);
898 }
899
900 return (0);
901}
902
903void
904mpi_detach(struct mpi_softc *sc)
905{
906
907}
908
909int
910mpi_intr(void *arg)
911{
912 struct mpi_softc *sc = arg;
913 u_int32_t reg;
914 int rv = 0;
915
916 if ((mpi_read_intr(sc)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x30))) & MPI_INTR_STATUS_REPLY(1<<3)) == 0)
917 return (rv);
918
919 while ((reg = mpi_pop_reply(sc)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x44)))) != 0xffffffff) {
920 mpi_reply(sc, reg);
921 rv = 1;
922 }
923
924 return (rv);
925}
926
927void
928mpi_reply(struct mpi_softc *sc, u_int32_t reg)
929{
930 struct mpi_ccb *ccb;
931 struct mpi_rcb *rcb = NULL((void *)0);
932 struct mpi_msg_reply *reply = NULL((void *)0);
933 u_int32_t reply_dva;
934 int id;
935 int i;
936
937 DNPRINTF(MPI_D_INTR, "%s: mpi_reply reg: 0x%08x\n", DEVNAME(sc), reg);
938
939 if (reg & MPI_REPLY_QUEUE_ADDRESS(1<<31)) {
940 reply_dva = (reg & MPI_REPLY_QUEUE_ADDRESS_MASK0x7fffffff) << 1;
941 i = (reply_dva - (u_int32_t)MPI_DMA_DVA(sc->sc_replies)((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].ds_addr
)
) /
942 MPI_REPLY_SIZE80;
943 rcb = &sc->sc_rcbs[i];
944
945 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x02)
)
946 MPI_DMA_MAP(sc->sc_replies), rcb->rcb_offset,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x02)
)
947 MPI_REPLY_SIZE, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x02)
)
;
948
949 reply = rcb->rcb_reply;
950
951 id = lemtoh32(&reply->msg_context)((__uint32_t)(*(__uint32_t *)(&reply->msg_context)));
952 } else {
953 switch (reg & MPI_REPLY_QUEUE_TYPE_MASK(3<<29)) {
954 case MPI_REPLY_QUEUE_TYPE_INIT(0<<29):
955 id = reg & MPI_REPLY_QUEUE_CONTEXT0x1fffffff;
956 break;
957
958 default:
959 panic("%s: unsupported context reply",
960 DEVNAME(sc)((sc)->sc_dev.dv_xname));
961 }
962 }
963
964 DNPRINTF(MPI_D_INTR, "%s: mpi_reply id: %d reply: %p\n",
965 DEVNAME(sc), id, reply);
966
967 ccb = &sc->sc_ccbs[id];
968
969 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x02
| 0x08))
970 ccb->ccb_offset, MPI_REQUEST_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x02
| 0x08))
971 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x02
| 0x08))
;
972 ccb->ccb_state = MPI_CCB_READY;
973 ccb->ccb_rcb = rcb;
974
975 ccb->ccb_done(ccb);
976}
977
978struct mpi_dmamem *
979mpi_dmamem_alloc(struct mpi_softc *sc, size_t size)
980{
981 struct mpi_dmamem *mdm;
982 int nsegs;
983
984 mdm = malloc(sizeof(struct mpi_dmamem), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
985 if (mdm == NULL((void *)0))
986 return (NULL((void *)0));
987
988 mdm->mdm_size = size;
989
990 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&mdm->mdm_map
))
991 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&mdm->mdm_map
))
!= 0)
992 goto mdmfree;
993
994 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&mdm->mdm_seg), (1), (&
nsegs), (0x0001 | 0x1000))
995 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&mdm->mdm_seg), (1), (&
nsegs), (0x0001 | 0x1000))
!= 0)
996 goto destroy;
997
998 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&mdm
->mdm_seg), (nsegs), (size), (&mdm->mdm_kva), (0x0001
))
999 &mdm->mdm_kva, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&mdm
->mdm_seg), (nsegs), (size), (&mdm->mdm_kva), (0x0001
))
!= 0)
1000 goto free;
1001
1002 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (mdm->
mdm_map), (mdm->mdm_kva), (size), (((void *)0)), (0x0001))
1003 NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (mdm->
mdm_map), (mdm->mdm_kva), (size), (((void *)0)), (0x0001))
!= 0)
1004 goto unmap;
1005
1006 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_alloc size: %d mdm: %#x "
1007 "map: %#x nsegs: %d segs: %#x kva: %x\n",
1008 DEVNAME(sc), size, mdm->mdm_map, nsegs, mdm->mdm_seg, mdm->mdm_kva);
1009
1010 return (mdm);
1011
1012unmap:
1013 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (mdm->
mdm_kva), (size))
;
1014free:
1015 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
mdm->mdm_seg), (1))
;
1016destroy:
1017 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mdm
->mdm_map))
;
1018mdmfree:
1019 free(mdm, M_DEVBUF2, sizeof *mdm);
1020
1021 return (NULL((void *)0));
1022}
1023
1024void
1025mpi_dmamem_free(struct mpi_softc *sc, struct mpi_dmamem *mdm)
1026{
1027 DNPRINTF(MPI_D_MEM, "%s: mpi_dmamem_free %#x\n", DEVNAME(sc), mdm);
1028
1029 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (mdm
->mdm_map))
;
1030 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (mdm->
mdm_kva), (mdm->mdm_size))
;
1031 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
mdm->mdm_seg), (1))
;
1032 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mdm
->mdm_map))
;
1033 free(mdm, M_DEVBUF2, sizeof *mdm);
1034}
1035
1036int
1037mpi_alloc_ccbs(struct mpi_softc *sc)
1038{
1039 struct mpi_ccb *ccb;
1040 u_int8_t *cmd;
1041 int i;
1042
1043 SLIST_INIT(&sc->sc_ccb_free){ ((&sc->sc_ccb_free)->slh_first) = ((void *)0); };
1044 mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_mtx), ((((0x3)) > 0x0 && ((0x3)) < 0x9) ?
0x9 : ((0x3)))); } while (0)
;
1045
1046 sc->sc_ccbs = mallocarray(sc->sc_maxcmds, sizeof(struct mpi_ccb),
1047 M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
1048 if (sc->sc_ccbs == NULL((void *)0)) {
1049 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1050 return (1);
1051 }
1052
1053 sc->sc_requests = mpi_dmamem_alloc(sc,
1054 MPI_REQUEST_SIZE512 * sc->sc_maxcmds);
1055 if (sc->sc_requests == NULL((void *)0)) {
1056 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1057 goto free_ccbs;
1058 }
1059 cmd = MPI_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->mdm_kva);
1060 memset(cmd, 0, MPI_REQUEST_SIZE * sc->sc_maxcmds)__builtin_memset((cmd), (0), (512 * sc->sc_maxcmds));
1061
1062 for (i = 0; i < sc->sc_maxcmds; i++) {
1063 ccb = &sc->sc_ccbs[i];
1064
1065 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl_len), ((64 * 1024)), (0), (0x0001
| 0x0002), (&ccb->ccb_dmamap))
1066 sc->sc_max_sgl_len, MAXPHYS, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl_len), ((64 * 1024)), (0), (0x0001
| 0x0002), (&ccb->ccb_dmamap))
1067 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl_len), ((64 * 1024)), (0), (0x0001
| 0x0002), (&ccb->ccb_dmamap))
1068 &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl_len), ((64 * 1024)), (0), (0x0001
| 0x0002), (&ccb->ccb_dmamap))
!= 0) {
1069 printf("%s: unable to create dma map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1070 goto free_maps;
1071 }
1072
1073 ccb->ccb_sc = sc;
1074 ccb->ccb_id = i;
1075 ccb->ccb_offset = MPI_REQUEST_SIZE512 * i;
1076 ccb->ccb_state = MPI_CCB_READY;
1077
1078 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
1079 ccb->ccb_cmd_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_requests)((u_int64_t)(sc->sc_requests)->mdm_map->dm_segs[0].ds_addr
)
+
1080 ccb->ccb_offset;
1081
1082 DNPRINTF(MPI_D_CCB, "%s: mpi_alloc_ccbs(%d) ccb: %#x map: %#x "
1083 "sc: %#x id: %#x offs: %#x cmd: %#x dva: %#x\n",
1084 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
1085 ccb->ccb_id, ccb->ccb_offset, ccb->ccb_cmd,
1086 ccb->ccb_cmd_dva);
1087
1088 mpi_put_ccb(sc, ccb);
1089 }
1090
1091 scsi_iopool_init(&sc->sc_iopool, sc, mpi_get_ccb, mpi_put_ccb);
1092
1093 return (0);
1094
1095free_maps:
1096 while ((ccb = mpi_get_ccb(sc)) != NULL((void *)0))
1097 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
1098
1099 mpi_dmamem_free(sc, sc->sc_requests);
1100free_ccbs:
1101 free(sc->sc_ccbs, M_DEVBUF2, 0);
1102
1103 return (1);
1104}
1105
1106void *
1107mpi_get_ccb(void *xsc)
1108{
1109 struct mpi_softc *sc = xsc;
1110 struct mpi_ccb *ccb;
1111
1112 mtx_enter(&sc->sc_ccb_mtx);
1113 ccb = SLIST_FIRST(&sc->sc_ccb_free)((&sc->sc_ccb_free)->slh_first);
1114 if (ccb != NULL((void *)0)) {
1115 SLIST_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link)do { (&sc->sc_ccb_free)->slh_first = (&sc->sc_ccb_free
)->slh_first->ccb_link.sle_next; } while (0)
;
1116 ccb->ccb_state = MPI_CCB_READY;
1117 }
1118 mtx_leave(&sc->sc_ccb_mtx);
1119
1120 DNPRINTF(MPI_D_CCB, "%s: mpi_get_ccb %p\n", DEVNAME(sc), ccb);
1121
1122 return (ccb);
1123}
1124
1125void
1126mpi_put_ccb(void *xsc, void *io)
1127{
1128 struct mpi_softc *sc = xsc;
1129 struct mpi_ccb *ccb = io;
1130
1131 DNPRINTF(MPI_D_CCB, "%s: mpi_put_ccb %p\n", DEVNAME(sc), ccb);
1132
1133#ifdef DIAGNOSTIC1
1134 if (ccb->ccb_state == MPI_CCB_FREE)
1135 panic("mpi_put_ccb: double free");
1136#endif
1137
1138 ccb->ccb_state = MPI_CCB_FREE;
1139 ccb->ccb_cookie = NULL((void *)0);
1140 ccb->ccb_done = NULL((void *)0);
1141 memset(ccb->ccb_cmd, 0, MPI_REQUEST_SIZE)__builtin_memset((ccb->ccb_cmd), (0), (512));
1142 mtx_enter(&sc->sc_ccb_mtx);
1143 SLIST_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link)do { (ccb)->ccb_link.sle_next = (&sc->sc_ccb_free)->
slh_first; (&sc->sc_ccb_free)->slh_first = (ccb); }
while (0)
;
1144 mtx_leave(&sc->sc_ccb_mtx);
1145}
1146
1147int
1148mpi_alloc_replies(struct mpi_softc *sc)
1149{
1150 DNPRINTF(MPI_D_MISC, "%s: mpi_alloc_replies\n", DEVNAME(sc));
1151
1152 sc->sc_rcbs = mallocarray(sc->sc_repq, sizeof(struct mpi_rcb), M_DEVBUF2,
1153 M_WAITOK0x0001|M_CANFAIL0x0004);
1154 if (sc->sc_rcbs == NULL((void *)0))
1155 return (1);
1156
1157 sc->sc_replies = mpi_dmamem_alloc(sc, sc->sc_repq * MPI_REPLY_SIZE80);
1158 if (sc->sc_replies == NULL((void *)0)) {
1159 free(sc->sc_rcbs, M_DEVBUF2, 0);
1160 return (1);
1161 }
1162
1163 return (0);
1164}
1165
1166void
1167mpi_push_reply(struct mpi_softc *sc, struct mpi_rcb *rcb)
1168{
1169 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x01)
)
1170 rcb->rcb_offset, MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (rcb->rcb_offset), (80), (0x01)
)
;
1171 mpi_push_reply_db(sc, rcb->rcb_reply_dva)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x44), ((rcb
->rcb_reply_dva))))
;
1172}
1173
1174void
1175mpi_push_replies(struct mpi_softc *sc)
1176{
1177 struct mpi_rcb *rcb;
1178 char *kva = MPI_DMA_KVA(sc->sc_replies)((void *)(sc->sc_replies)->mdm_kva);
1179 int i;
1180
1181 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_replies), 0,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_repq * 80), (0x01)
)
1182 sc->sc_repq * MPI_REPLY_SIZE, BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_repq * 80), (0x01)
)
;
1183
1184 for (i = 0; i < sc->sc_repq; i++) {
1185 rcb = &sc->sc_rcbs[i];
1186
1187 rcb->rcb_reply = kva + MPI_REPLY_SIZE80 * i;
1188 rcb->rcb_offset = MPI_REPLY_SIZE80 * i;
1189 rcb->rcb_reply_dva = (u_int32_t)MPI_DMA_DVA(sc->sc_replies)((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].ds_addr
)
+
1190 MPI_REPLY_SIZE80 * i;
1191 mpi_push_reply_db(sc, rcb->rcb_reply_dva)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), (0x44), ((rcb
->rcb_reply_dva))))
;
1192 }
1193}
1194
1195void
1196mpi_start(struct mpi_softc *sc, struct mpi_ccb *ccb)
1197{
1198 struct mpi_msg_request *msg;
1199
1200 DNPRINTF(MPI_D_RW, "%s: mpi_start %#x\n", DEVNAME(sc),
1201 ccb->ccb_cmd_dva);
1202
1203 msg = ccb->ccb_cmd;
1204 htolem32(&msg->msg_context, ccb->ccb_id)(*(__uint32_t *)(&msg->msg_context) = ((__uint32_t)(ccb
->ccb_id)))
;
1205
1206 bus_dmamap_sync(sc->sc_dmat, MPI_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x01
| 0x04))
1207 ccb->ccb_offset, MPI_REQUEST_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x01
| 0x04))
1208 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (512), (0x01
| 0x04))
;
1209
1210 ccb->ccb_state = MPI_CCB_QUEUED;
1211 bus_space_write_4(sc->sc_iot, sc->sc_ioh,((sc->sc_iot)->write_4((sc->sc_ioh), (0x40), (ccb->
ccb_cmd_dva)))
1212 MPI_REQ_QUEUE, ccb->ccb_cmd_dva)((sc->sc_iot)->write_4((sc->sc_ioh), (0x40), (ccb->
ccb_cmd_dva)))
;
1213}
1214
1215int
1216mpi_poll(struct mpi_softc *sc, struct mpi_ccb *ccb, int timeout)
1217{
1218 void (*done)(struct mpi_ccb *);
1219 void *cookie;
1220 int rv = 1;
1221 u_int32_t reg;
1222
1223 DNPRINTF(MPI_D_INTR, "%s: mpi_poll timeout %d\n", DEVNAME(sc),
1224 timeout);
1225
1226 done = ccb->ccb_done;
1227 cookie = ccb->ccb_cookie;
1228
1229 ccb->ccb_done = mpi_poll_done;
1230 ccb->ccb_cookie = &rv;
1231
1232 mpi_start(sc, ccb);
1233 while (rv == 1) {
1234 reg = mpi_pop_reply(sc)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x44)));
1235 if (reg == 0xffffffff) {
1236 if (timeout-- == 0) {
1237 printf("%s: timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1238 goto timeout;
1239 }
1240
1241 delay(1000)(*delay_func)(1000);
1242 continue;
1243 }
1244
1245 mpi_reply(sc, reg);
1246 }
1247
1248 ccb->ccb_cookie = cookie;
1249 done(ccb);
1250
1251timeout:
1252 return (rv);
1253}
1254
1255void
1256mpi_poll_done(struct mpi_ccb *ccb)
1257{
1258 int *rv = ccb->ccb_cookie;
1259
1260 *rv = 0;
1261}
1262
1263void
1264mpi_wait(struct mpi_softc *sc, struct mpi_ccb *ccb)
1265{
1266 struct mutex cookie;
1267 void (*done)(struct mpi_ccb *);
1268
1269 mtx_init(&cookie, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&cookie)
, ((((0x3)) > 0x0 && ((0x3)) < 0x9) ? 0x9 : ((0x3
)))); } while (0)
;
1270
1271 done = ccb->ccb_done;
1272 ccb->ccb_done = mpi_wait_done;
1273 ccb->ccb_cookie = &cookie;
1274
1275 /* XXX this will wait forever for the ccb to complete */
1276
1277 mpi_start(sc, ccb);
1278
1279 mtx_enter(&cookie);
1280 while (ccb->ccb_cookie != NULL((void *)0))
1281 msleep_nsec(ccb, &cookie, PRIBIO16, "mpiwait", INFSLP0xffffffffffffffffULL);
1282 mtx_leave(&cookie);
1283
1284 done(ccb);
1285}
1286
1287void
1288mpi_wait_done(struct mpi_ccb *ccb)
1289{
1290 struct mutex *cookie = ccb->ccb_cookie;
1291
1292 mtx_enter(cookie);
1293 ccb->ccb_cookie = NULL((void *)0);
1294 wakeup_one(ccb)wakeup_n((ccb), 1);
1295 mtx_leave(cookie);
1296}
1297
1298void
1299mpi_scsi_cmd(struct scsi_xfer *xs)
1300{
1301 struct scsi_link *link = xs->sc_link;
1302 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1303 struct mpi_ccb *ccb;
1304 struct mpi_ccb_bundle *mcb;
1305 struct mpi_msg_scsi_io *io;
1306
1307 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd\n", DEVNAME(sc));
1308
1309 KERNEL_UNLOCK()_kernel_unlock();
1310
1311 if (xs->cmdlen > MPI_CDB_LEN16) {
1
Assuming field 'cmdlen' is <= MPI_CDB_LEN
2
Taking false branch
1312 DNPRINTF(MPI_D_CMD, "%s: CBD too big %d\n",
1313 DEVNAME(sc), xs->cmdlen);
1314 memset(&xs->sense, 0, sizeof(xs->sense))__builtin_memset((&xs->sense), (0), (sizeof(xs->sense
)))
;
1315 xs->sense.error_code = SSD_ERRCODE_VALID0x80 | SSD_ERRCODE_CURRENT0x70;
1316 xs->sense.flags = SKEY_ILLEGAL_REQUEST0x05;
1317 xs->sense.add_sense_code = 0x20;
1318 xs->error = XS_SENSE1;
1319 goto done;
1320 }
1321
1322 ccb = xs->io;
1323
1324 DNPRINTF(MPI_D_CMD, "%s: ccb_id: %d xs->flags: 0x%x\n",
1325 DEVNAME(sc), ccb->ccb_id, xs->flags);
1326
1327 ccb->ccb_cookie = xs;
1328 ccb->ccb_done = mpi_scsi_cmd_done;
1329
1330 mcb = ccb->ccb_cmd;
1331 io = &mcb->mcb_io;
1332
1333 io->function = MPI_FUNCTION_SCSI_IO_REQUEST(0x00);
1334 /*
1335 * bus is always 0
1336 * io->bus = htole16(sc->sc_bus);
1337 */
1338 io->target_id = link->target;
1339
1340 io->cdb_length = xs->cmdlen;
1341 io->sense_buf_len = sizeof(xs->sense);
1342 io->msg_flags = MPI_SCSIIO_SENSE_BUF_ADDR_WIDTH_64(1<<0);
1343
1344 htobem16(&io->lun[0], link->lun)(*(__uint16_t *)(&io->lun[0]) = (__uint16_t)(__builtin_constant_p
(link->lun) ? (__uint16_t)(((__uint16_t)(link->lun) &
0xffU) << 8 | ((__uint16_t)(link->lun) & 0xff00U
) >> 8) : __swap16md(link->lun)))
;
3
'?' condition is false
1345
1346 switch (xs->flags & (SCSI_DATA_IN0x00800 | SCSI_DATA_OUT0x01000)) {
4
Control jumps to the 'default' case at line 1353
1347 case SCSI_DATA_IN0x00800:
1348 io->direction = MPI_SCSIIO_DIR_READ(0x2);
1349 break;
1350 case SCSI_DATA_OUT0x01000:
1351 io->direction = MPI_SCSIIO_DIR_WRITE(0x1);
1352 break;
1353 default:
1354 io->direction = MPI_SCSIIO_DIR_NONE(0x0);
1355 break;
1356 }
1357
1358 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SCSI0x01 &&
5
Assuming field 'sc_porttype' is equal to MPI_PORTFACTS_PORTTYPE_SCSI
1359 (link->quirks & SDEV_NOTAGS0x0008))
1360 io->tagging = MPI_SCSIIO_ATTR_UNTAGGED(0x5);
1361 else
1362 io->tagging = MPI_SCSIIO_ATTR_SIMPLE_Q(0x0);
1363
1364 memcpy(io->cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((io->cdb), (&xs->cmd), (xs->cmdlen
))
;
1365
1366 htolem32(&io->data_length, xs->datalen)(*(__uint32_t *)(&io->data_length) = ((__uint32_t)(xs->
datalen)))
;
1367
1368 htolem32(&io->sense_buf_low_addr, ccb->ccb_cmd_dva +(*(__uint32_t *)(&io->sense_buf_low_addr) = ((__uint32_t
)(ccb->ccb_cmd_dva + __builtin_offsetof(struct mpi_ccb_bundle
, mcb_sense))))
1369 offsetof(struct mpi_ccb_bundle, mcb_sense))(*(__uint32_t *)(&io->sense_buf_low_addr) = ((__uint32_t
)(ccb->ccb_cmd_dva + __builtin_offsetof(struct mpi_ccb_bundle
, mcb_sense))))
;
1370
1371 if (mpi_load_xs(ccb) != 0)
6
Calling 'mpi_load_xs'
1372 goto stuffup;
1373
1374 timeout_set(&xs->stimeout, mpi_timeout_xs, ccb);
1375
1376 if (xs->flags & SCSI_POLL0x00002) {
1377 if (mpi_poll(sc, ccb, xs->timeout) != 0)
1378 goto stuffup;
1379 } else
1380 mpi_start(sc, ccb);
1381
1382 KERNEL_LOCK()_kernel_lock();
1383 return;
1384
1385stuffup:
1386 xs->error = XS_DRIVER_STUFFUP2;
1387done:
1388 KERNEL_LOCK()_kernel_lock();
1389 scsi_done(xs);
1390}
1391
1392void
1393mpi_scsi_cmd_done(struct mpi_ccb *ccb)
1394{
1395 struct mpi_softc *sc = ccb->ccb_sc;
1396 struct scsi_xfer *xs = ccb->ccb_cookie;
1397 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd;
1398 bus_dmamap_t dmap = ccb->ccb_dmamap;
1399 struct mpi_msg_scsi_io_error *sie;
1400
1401 if (xs->datalen != 0) {
1402 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
1403 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
1404 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
;
1405
1406 bus_dmamap_unload(sc->sc_dmat, dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dmap
))
;
1407 }
1408
1409 /* timeout_del */
1410 xs->error = XS_NOERROR0;
1411 xs->resid = 0;
1412
1413 if (ccb->ccb_rcb == NULL((void *)0)) {
1414 /* no scsi error, we're ok so drop out early */
1415 xs->status = SCSI_OK0x00;
1416 KERNEL_LOCK()_kernel_lock();
1417 scsi_done(xs);
1418 KERNEL_UNLOCK()_kernel_unlock();
1419 return;
1420 }
1421
1422 sie = ccb->ccb_rcb->rcb_reply;
1423
1424 DNPRINTF(MPI_D_CMD, "%s: mpi_scsi_cmd_done xs cmd: 0x%02x len: %d "
1425 "flags 0x%x\n", DEVNAME(sc), xs->cmd.opcode, xs->datalen,
1426 xs->flags);
1427 DNPRINTF(MPI_D_CMD, "%s: target_id: %d bus: %d msg_length: %d "
1428 "function: 0x%02x\n", DEVNAME(sc), sie->target_id, sie->bus,
1429 sie->msg_length, sie->function);
1430 DNPRINTF(MPI_D_CMD, "%s: cdb_length: %d sense_buf_length: %d "
1431 "msg_flags: 0x%02x\n", DEVNAME(sc), sie->cdb_length,
1432 sie->sense_buf_len, sie->msg_flags);
1433 DNPRINTF(MPI_D_CMD, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
1434 letoh32(sie->msg_context));
1435 DNPRINTF(MPI_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
1436 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
1437 sie->scsi_state, letoh16(sie->ioc_status));
1438 DNPRINTF(MPI_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1439 letoh32(sie->ioc_loginfo));
1440 DNPRINTF(MPI_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
1441 letoh32(sie->transfer_count));
1442 DNPRINTF(MPI_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
1443 letoh32(sie->sense_count));
1444 DNPRINTF(MPI_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
1445 letoh32(sie->response_info));
1446 DNPRINTF(MPI_D_CMD, "%s: tag: 0x%04x\n", DEVNAME(sc),
1447 letoh16(sie->tag));
1448
1449 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_NO_SCSI_STATUS(1<<3))
1450 xs->status = SCSI_TERMINATED0x22;
1451 else
1452 xs->status = sie->scsi_status;
1453 xs->resid = 0;
1454
1455 switch (lemtoh16(&sie->ioc_status)((__uint16_t)(*(__uint16_t *)(&sie->ioc_status)))) {
1456 case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN(0x0045):
1457 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count)((__uint32_t)(*(__uint32_t *)(&sie->transfer_count)));
1458 /* FALLTHROUGH */
1459 case MPI_IOCSTATUS_SUCCESS(0x0000):
1460 case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR(0x0040):
1461 switch (xs->status) {
1462 case SCSI_OK0x00:
1463 xs->error = XS_NOERROR0;
1464 break;
1465
1466 case SCSI_CHECK0x02:
1467 xs->error = XS_SENSE1;
1468 break;
1469
1470 case SCSI_BUSY0x08:
1471 case SCSI_QUEUE_FULL0x28:
1472 xs->error = XS_BUSY5;
1473 break;
1474
1475 default:
1476 xs->error = XS_DRIVER_STUFFUP2;
1477 break;
1478 }
1479 break;
1480
1481 case MPI_IOCSTATUS_BUSY(0x0002):
1482 case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES(0x0006):
1483 xs->error = XS_BUSY5;
1484 break;
1485
1486 case MPI_IOCSTATUS_SCSI_INVALID_BUS(0x0041):
1487 case MPI_IOCSTATUS_SCSI_INVALID_TARGETID(0x0042):
1488 case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE(0x0043):
1489 xs->error = XS_SELTIMEOUT3;
1490 break;
1491
1492 case MPI_IOCSTATUS_SCSI_IOC_TERMINATED(0x004B):
1493 case MPI_IOCSTATUS_SCSI_EXT_TERMINATED(0x004C):
1494 xs->error = XS_RESET8;
1495 break;
1496
1497 default:
1498 xs->error = XS_DRIVER_STUFFUP2;
1499 break;
1500 }
1501
1502 if (sie->scsi_state & MPI_SCSIIO_ERR_STATE_AUTOSENSE_VALID(1<<0))
1503 memcpy(&xs->sense, &mcb->mcb_sense, sizeof(xs->sense))__builtin_memcpy((&xs->sense), (&mcb->mcb_sense
), (sizeof(xs->sense)))
;
1504
1505 DNPRINTF(MPI_D_CMD, "%s: xs err: 0x%02x status: %d\n", DEVNAME(sc),
1506 xs->error, xs->status);
1507
1508 mpi_push_reply(sc, ccb->ccb_rcb);
1509 KERNEL_LOCK()_kernel_lock();
1510 scsi_done(xs);
1511 KERNEL_UNLOCK()_kernel_unlock();
1512}
1513
1514void
1515mpi_timeout_xs(void *arg)
1516{
1517 /* XXX */
1518}
1519
1520int
1521mpi_load_xs(struct mpi_ccb *ccb)
1522{
1523 struct mpi_softc *sc = ccb->ccb_sc;
1524 struct scsi_xfer *xs = ccb->ccb_cookie;
1525 struct mpi_ccb_bundle *mcb = ccb->ccb_cmd;
1526 struct mpi_msg_scsi_io *io = &mcb->mcb_io;
1527 struct mpi_sge *sge = NULL((void *)0);
7
'sge' initialized to a null pointer value
1528 struct mpi_sge *nsge = &mcb->mcb_sgl[0];
1529 struct mpi_sge *ce = NULL((void *)0), *nce;
1530 bus_dmamap_t dmap = ccb->ccb_dmamap;
1531 u_int32_t addr, flags;
1532 int i, error;
1533
1534 if (xs->datalen == 0) {
8
Assuming field 'datalen' is not equal to 0
9
Taking false branch
1535 htolem32(&nsge->sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&nsge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<31) | (0x1<<30) | (0x1<<24))))
1536 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL)(*(__uint32_t *)(&nsge->sg_hdr) = ((__uint32_t)((0x1<<
28) | (0x1<<31) | (0x1<<30) | (0x1<<24))))
;
1537 return (0);
1538 }
1539
1540 error = bus_dmamap_load(sc->sc_dmat, dmap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), (0x0100 | (
(xs->flags & 0x00001) ? 0x0001 : 0x0000)))
10
Assuming the condition is false
11
'?' condition is false
1541 xs->data, xs->datalen, NULL, BUS_DMA_STREAMING |(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), (0x0100 | (
(xs->flags & 0x00001) ? 0x0001 : 0x0000)))
1542 ((xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK))(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), (0x0100 | (
(xs->flags & 0x00001) ? 0x0001 : 0x0000)))
;
1543 if (error) {
12
Assuming 'error' is 0
13
Taking false branch
1544 printf("%s: error %d loading dmamap\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), error);
1545 return (1);
1546 }
1547
1548 flags = MPI_SGE_FL_TYPE_SIMPLE(0x1<<28) | MPI_SGE_FL_SIZE_64(0x1<<25);
1549 if (xs->flags & SCSI_DATA_OUT0x01000)
14
Assuming the condition is false
15
Taking false branch
1550 flags |= MPI_SGE_FL_DIR_OUT(0x1<<26);
1551
1552 if (dmap->dm_nsegs > sc->sc_first_sgl_len) {
16
Assuming field 'dm_nsegs' is <= field 'sc_first_sgl_len'
17
Taking false branch
1553 ce = &mcb->mcb_sgl[sc->sc_first_sgl_len - 1];
1554 io->chain_offset = (u_int32_t *)ce - (u_int32_t *)io;
1555 }
1556
1557 for (i = 0; i < dmap->dm_nsegs; i++) {
18
Assuming 'i' is >= field 'dm_nsegs'
19
Loop condition is false. Execution continues on line 1594
1558
1559 if (nsge == ce) {
1560 nsge++;
1561 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST)((__uint32_t)((0x1<<31)));
1562
1563 if ((dmap->dm_nsegs - i) > sc->sc_chain_len) {
1564 nce = &nsge[sc->sc_chain_len - 1];
1565 addr = (u_int32_t *)nce - (u_int32_t *)nsge;
1566 addr = addr << 16 |
1567 sizeof(struct mpi_sge) * sc->sc_chain_len;
1568 } else {
1569 nce = NULL((void *)0);
1570 addr = sizeof(struct mpi_sge) *
1571 (dmap->dm_nsegs - i);
1572 }
1573
1574 ce->sg_hdr = htole32(MPI_SGE_FL_TYPE_CHAIN |((__uint32_t)((0x3<<28) | (0x1<<25) | addr))
1575 MPI_SGE_FL_SIZE_64 | addr)((__uint32_t)((0x3<<28) | (0x1<<25) | addr));
1576
1577 mpi_dvatosge(ce, ccb->ccb_cmd_dva +
1578 ((u_int8_t *)nsge - (u_int8_t *)mcb));
1579
1580 ce = nce;
1581 }
1582
1583 DNPRINTF(MPI_D_DMA, "%s: %d: %d 0x%016llx\n", DEVNAME(sc),
1584 i, dmap->dm_segs[i].ds_len,
1585 (u_int64_t)dmap->dm_segs[i].ds_addr);
1586
1587 sge = nsge++;
1588
1589 sge->sg_hdr = htole32(flags | dmap->dm_segs[i].ds_len)((__uint32_t)(flags | dmap->dm_segs[i].ds_len));
1590 mpi_dvatosge(sge, dmap->dm_segs[i].ds_addr);
1591 }
1592
1593 /* terminate list */
1594 sge->sg_hdr |= htole32(MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |((__uint32_t)((0x1<<31) | (0x1<<30) | (0x1<<
24)))
20
Access to field 'sg_hdr' results in a dereference of a null pointer (loaded from variable 'sge')
1595 MPI_SGE_FL_EOL)((__uint32_t)((0x1<<31) | (0x1<<30) | (0x1<<
24)))
;
1596
1597 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1598 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
1599 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
;
1600
1601 return (0);
1602}
1603
1604int
1605mpi_scsi_probe_virtual(struct scsi_link *link)
1606{
1607 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1608 struct mpi_cfg_hdr hdr;
1609 struct mpi_cfg_raid_vol_pg0 *rp0;
1610 int len;
1611 int rv;
1612
1613 if (!ISSET(sc->sc_flags, MPI_F_RAID)((sc->sc_flags) & ((1<<1))))
1614 return (0);
1615
1616 if (link->lun > 0)
1617 return (0);
1618
1619 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08),
1620 0, link->target, MPI_PG_POLL(1<<1), &hdr);
1621 if (rv != 0)
1622 return (0);
1623
1624 len = hdr.page_length * 4;
1625 rp0 = malloc(len, M_TEMP127, M_NOWAIT0x0002);
1626 if (rp0 == NULL((void *)0))
1627 return (ENOMEM12);
1628
1629 rv = mpi_req_cfg_page(sc, link->target, MPI_PG_POLL(1<<1), &hdr, 1, rp0, len);
1630 if (rv == 0)
1631 SET(link->flags, SDEV_VIRTUAL)((link->flags) |= (0x0800));
1632
1633 free(rp0, M_TEMP127, len);
1634 return (0);
1635}
1636
1637int
1638mpi_scsi_probe(struct scsi_link *link)
1639{
1640 struct mpi_softc *sc = link->bus->sb_adapter_softc;
1641 struct mpi_ecfg_hdr ehdr;
1642 struct mpi_cfg_sas_dev_pg0 pg0;
1643 u_int32_t address;
1644 int rv;
1645
1646 rv = mpi_scsi_probe_virtual(link);
1647 if (rv != 0)
1648 return (rv);
1649
1650 if (ISSET(link->flags, SDEV_VIRTUAL)((link->flags) & (0x0800)))
1651 return (0);
1652
1653 if (sc->sc_porttype != MPI_PORTFACTS_PORTTYPE_SAS0x30)
1654 return (0);
1655
1656 address = MPI_CFG_SAS_DEV_ADDR_BUS(1<<28) | link->target;
1657
1658 if (mpi_ecfg_header(sc, MPI_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE, 0,mpi_req_cfg_header((sc), ((0x12)), (0), (address), (1<<
1)|(1<<0), (&ehdr))
1659 address, &ehdr)mpi_req_cfg_header((sc), ((0x12)), (0), (address), (1<<
1)|(1<<0), (&ehdr))
!= 0)
1660 return (EIO5);
1661
1662 if (mpi_ecfg_page(sc, address, &ehdr, 1, &pg0, sizeof(pg0))mpi_req_cfg_page((sc), (address), (1<<1)|(1<<0), (
&ehdr), (1), (&pg0), (sizeof(pg0)))
!= 0)
1663 return (0);
1664
1665 DNPRINTF(MPI_D_MISC, "%s: mpi_scsi_probe sas dev pg 0 for target %d:\n",
1666 DEVNAME(sc), link->target);
1667 DNPRINTF(MPI_D_MISC, "%s: slot: 0x%04x enc_handle: 0x%04x\n",
1668 DEVNAME(sc), letoh16(pg0.slot), letoh16(pg0.enc_handle));
1669 DNPRINTF(MPI_D_MISC, "%s: sas_addr: 0x%016llx\n", DEVNAME(sc),
1670 letoh64(pg0.sas_addr));
1671 DNPRINTF(MPI_D_MISC, "%s: parent_dev_handle: 0x%04x phy_num: 0x%02x "
1672 "access_status: 0x%02x\n", DEVNAME(sc),
1673 letoh16(pg0.parent_dev_handle), pg0.phy_num, pg0.access_status);
1674 DNPRINTF(MPI_D_MISC, "%s: dev_handle: 0x%04x "
1675 "bus: 0x%02x target: 0x%02x\n", DEVNAME(sc),
1676 letoh16(pg0.dev_handle), pg0.bus, pg0.target);
1677 DNPRINTF(MPI_D_MISC, "%s: device_info: 0x%08x\n", DEVNAME(sc),
1678 letoh32(pg0.device_info));
1679 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%04x physical_port: 0x%02x\n",
1680 DEVNAME(sc), letoh16(pg0.flags), pg0.physical_port);
1681
1682 if (ISSET(lemtoh32(&pg0.device_info),((((__uint32_t)(*(__uint32_t *)(&pg0.device_info)))) &
((1<<13)))
1683 MPI_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)((((__uint32_t)(*(__uint32_t *)(&pg0.device_info)))) &
((1<<13)))
) {
1684 DNPRINTF(MPI_D_MISC, "%s: target %d is an ATAPI device\n",
1685 DEVNAME(sc), link->target);
1686 link->flags |= SDEV_ATAPI0x0200;
1687 }
1688
1689 return (0);
1690}
1691
1692u_int32_t
1693mpi_read(struct mpi_softc *sc, bus_size_t r)
1694{
1695 u_int32_t rv;
1696
1697 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1698 BUS_SPACE_BARRIER_READ0x01);
1699 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)((sc->sc_iot)->read_4((sc->sc_ioh), (r)));
1700
1701 DNPRINTF(MPI_D_RW, "%s: mpi_read %#x %#x\n", DEVNAME(sc), r, rv);
1702
1703 return (rv);
1704}
1705
1706void
1707mpi_write(struct mpi_softc *sc, bus_size_t r, u_int32_t v)
1708{
1709 DNPRINTF(MPI_D_RW, "%s: mpi_write %#x %#x\n", DEVNAME(sc), r, v);
1710
1711 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v)((sc->sc_iot)->write_4((sc->sc_ioh), (r), (v)));
1712 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1713 BUS_SPACE_BARRIER_WRITE0x02);
1714}
1715
1716int
1717mpi_wait_eq(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1718 u_int32_t target)
1719{
1720 int i;
1721
1722 DNPRINTF(MPI_D_RW, "%s: mpi_wait_eq %#x %#x %#x\n", DEVNAME(sc), r,
1723 mask, target);
1724
1725 for (i = 0; i < 10000; i++) {
1726 if ((mpi_read(sc, r) & mask) == target)
1727 return (0);
1728 delay(1000)(*delay_func)(1000);
1729 }
1730
1731 return (1);
1732}
1733
1734int
1735mpi_wait_ne(struct mpi_softc *sc, bus_size_t r, u_int32_t mask,
1736 u_int32_t target)
1737{
1738 int i;
1739
1740 DNPRINTF(MPI_D_RW, "%s: mpi_wait_ne %#x %#x %#x\n", DEVNAME(sc), r,
1741 mask, target);
1742
1743 for (i = 0; i < 10000; i++) {
1744 if ((mpi_read(sc, r) & mask) != target)
1745 return (0);
1746 delay(1000)(*delay_func)(1000);
1747 }
1748
1749 return (1);
1750}
1751
1752int
1753mpi_init(struct mpi_softc *sc)
1754{
1755 u_int32_t db;
1756 int i;
1757
1758 /* spin until the IOC leaves the RESET state */
1759 if (mpi_wait_ne(sc, MPI_DOORBELL0x00, MPI_DOORBELL_STATE(0xf<<28),
1760 MPI_DOORBELL_STATE_RESET(0x0<<28)) != 0) {
1761 DNPRINTF(MPI_D_MISC, "%s: mpi_init timeout waiting to leave "
1762 "reset state\n", DEVNAME(sc));
1763 return (1);
1764 }
1765
1766 /* check current ownership */
1767 db = mpi_read_db(sc)mpi_read((sc), 0x00);
1768 if ((db & MPI_DOORBELL_WHOINIT(0x7<<24)) == MPI_DOORBELL_WHOINIT_PCIPEER(0x3<<24)) {
1769 DNPRINTF(MPI_D_MISC, "%s: mpi_init initialised by pci peer\n",
1770 DEVNAME(sc));
1771 return (0);
1772 }
1773
1774 for (i = 0; i < 5; i++) {
1775 switch (db & MPI_DOORBELL_STATE(0xf<<28)) {
1776 case MPI_DOORBELL_STATE_READY(0x1<<28):
1777 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is ready\n",
1778 DEVNAME(sc));
1779 return (0);
1780
1781 case MPI_DOORBELL_STATE_OPER(0x2<<28):
1782 case MPI_DOORBELL_STATE_FAULT(0x4<<28):
1783 DNPRINTF(MPI_D_MISC, "%s: mpi_init ioc is being "
1784 "reset\n" , DEVNAME(sc));
1785 if (mpi_reset_soft(sc) != 0)
1786 mpi_reset_hard(sc);
1787 break;
1788
1789 case MPI_DOORBELL_STATE_RESET(0x0<<28):
1790 DNPRINTF(MPI_D_MISC, "%s: mpi_init waiting to come "
1791 "out of reset\n", DEVNAME(sc));
1792 if (mpi_wait_ne(sc, MPI_DOORBELL0x00, MPI_DOORBELL_STATE(0xf<<28),
1793 MPI_DOORBELL_STATE_RESET(0x0<<28)) != 0)
1794 return (1);
1795 break;
1796 }
1797 db = mpi_read_db(sc)mpi_read((sc), 0x00);
1798 }
1799
1800 return (1);
1801}
1802
1803int
1804mpi_reset_soft(struct mpi_softc *sc)
1805{
1806 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_soft\n", DEVNAME(sc));
1807
1808 if (mpi_read_db(sc)mpi_read((sc), 0x00) & MPI_DOORBELL_INUSE(0x1<<27))
1809 return (1);
1810
1811 mpi_write_db(sc,mpi_write((sc), 0x00, (((((0x40)) << 24) & (0xff <<
24))))
1812 MPI_DOORBELL_FUNCTION(MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET))mpi_write((sc), 0x00, (((((0x40)) << 24) & (0xff <<
24))))
;
1813 if (mpi_wait_eq(sc, MPI_INTR_STATUS0x30,
1814 MPI_INTR_STATUS_IOCDOORBELL(1<<31), 0) != 0)
1815 return (1);
1816
1817 if (mpi_wait_eq(sc, MPI_DOORBELL0x00, MPI_DOORBELL_STATE(0xf<<28),
1818 MPI_DOORBELL_STATE_READY(0x1<<28)) != 0)
1819 return (1);
1820
1821 return (0);
1822}
1823
1824int
1825mpi_reset_hard(struct mpi_softc *sc)
1826{
1827 DNPRINTF(MPI_D_MISC, "%s: mpi_reset_hard\n", DEVNAME(sc));
1828
1829 /* enable diagnostic register */
1830 mpi_write(sc, MPI_WRITESEQ0x04, 0xff);
1831 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_10x04);
1832 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_20x0b);
1833 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_30x02);
1834 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_40x07);
1835 mpi_write(sc, MPI_WRITESEQ0x04, MPI_WRITESEQ_50x0d);
1836
1837 /* reset ioc */
1838 mpi_write(sc, MPI_HOSTDIAG0x08, MPI_HOSTDIAG_RESET_ADAPTER(1<<2));
1839
1840 delay(10000)(*delay_func)(10000);
1841
1842 /* disable diagnostic register */
1843 mpi_write(sc, MPI_WRITESEQ0x04, 0xff);
1844
1845 /* restore pci bits? */
1846
1847 /* firmware bits? */
1848 return (0);
1849}
1850
1851int
1852mpi_handshake_send(struct mpi_softc *sc, void *buf, size_t dwords)
1853{
1854 u_int32_t *query = buf;
1855 int i;
1856
1857 /* make sure the doorbell is not in use. */
1858 if (mpi_read_db(sc)mpi_read((sc), 0x00) & MPI_DOORBELL_INUSE(0x1<<27))
1859 return (1);
1860
1861 /* clear pending doorbell interrupts */
1862 if (mpi_read_intr(sc)(((sc)->sc_iot)->read_4(((sc)->sc_ioh), (0x30))) & MPI_INTR_STATUS_DOORBELL(1<<0))
1863 mpi_write_intr(sc, 0)mpi_write((sc), 0x30, (0));
1864
1865 /*
1866 * first write the doorbell with the handshake function and the
1867 * dword count.
1868 */
1869 mpi_write_db(sc, MPI_DOORBELL_FUNCTION(MPI_FUNCTION_HANDSHAKE) |mpi_write((sc), 0x00, (((((0x42)) << 24) & (0xff <<
24)) | (((dwords) << 16) & (0xff << 16))))
1870 MPI_DOORBELL_DWORDS(dwords))mpi_write((sc), 0x00, (((((0x42)) << 24) & (0xff <<
24)) | (((dwords) << 16) & (0xff << 16))))
;
1871
1872 /*
1873 * the doorbell used bit will be set because a doorbell function has
1874 * started. Wait for the interrupt and then ack it.
1875 */
1876 if (mpi_wait_db_int(sc)mpi_wait_ne((sc), 0x30, (1<<0), 0) != 0)
1877 return (1);
1878 mpi_write_intr(sc, 0)mpi_write((sc), 0x30, (0));
1879
1880 /* poll for the acknowledgement. */
1881 if (mpi_wait_db_ack(sc)mpi_wait_eq((sc), 0x30, (1<<31), 0) != 0)
1882 return (1);
1883
1884 /* write the query through the doorbell. */
1885 for (i = 0; i < dwords; i++) {
1886 mpi_write_db(sc, htole32(query[i]))mpi_write((sc), 0x00, (((__uint32_t)(query[i]))));
1887 if (mpi_wait_db_ack(sc)mpi_wait_eq((sc), 0x30, (1<<31), 0) != 0)
1888 return (1);
1889 }
1890
1891 return (0);
1892}
1893
1894int
1895mpi_handshake_recv_dword(struct mpi_softc *sc, u_int32_t *dword)
1896{
1897 u_int16_t *words = (u_int16_t *)dword;
1898 int i;
1899
1900 for (i = 0; i < 2; i++) {
1901 if (mpi_wait_db_int(sc)mpi_wait_ne((sc), 0x30, (1<<0), 0) != 0)
1902 return (1);
1903 words[i] = letoh16(mpi_read_db(sc) & MPI_DOORBELL_DATA_MASK)((__uint16_t)(mpi_read((sc), 0x00) & 0xffff));
1904 mpi_write_intr(sc, 0)mpi_write((sc), 0x30, (0));
1905 }
1906
1907 return (0);
1908}
1909
1910int
1911mpi_handshake_recv(struct mpi_softc *sc, void *buf, size_t dwords)
1912{
1913 struct mpi_msg_reply *reply = buf;
1914 u_int32_t *dbuf = buf, dummy;
1915 int i;
1916
1917 /* get the first dword so we can read the length out of the header. */
1918 if (mpi_handshake_recv_dword(sc, &dbuf[0]) != 0)
1919 return (1);
1920
1921 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dwords: %d reply: %d\n",
1922 DEVNAME(sc), dwords, reply->msg_length);
1923
1924 /*
1925 * the total length, in dwords, is in the message length field of the
1926 * reply header.
1927 */
1928 for (i = 1; i < MIN(dwords, reply->msg_length)(((dwords)<(reply->msg_length))?(dwords):(reply->msg_length
))
; i++) {
1929 if (mpi_handshake_recv_dword(sc, &dbuf[i]) != 0)
1930 return (1);
1931 }
1932
1933 /* if there's extra stuff to come off the ioc, discard it */
1934 while (i++ < reply->msg_length) {
1935 if (mpi_handshake_recv_dword(sc, &dummy) != 0)
1936 return (1);
1937 DNPRINTF(MPI_D_CMD, "%s: mpi_handshake_recv dummy read: "
1938 "0x%08x\n", DEVNAME(sc), dummy);
1939 }
1940
1941 /* wait for the doorbell used bit to be reset and clear the intr */
1942 if (mpi_wait_db_int(sc)mpi_wait_ne((sc), 0x30, (1<<0), 0) != 0)
1943 return (1);
1944 mpi_write_intr(sc, 0)mpi_write((sc), 0x30, (0));
1945
1946 return (0);
1947}
1948
1949void
1950mpi_empty_done(struct mpi_ccb *ccb)
1951{
1952 /* nothing to do */
1953}
1954
1955int
1956mpi_iocfacts(struct mpi_softc *sc)
1957{
1958 struct mpi_msg_iocfacts_request ifq;
1959 struct mpi_msg_iocfacts_reply ifp;
1960
1961 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts\n", DEVNAME(sc));
1962
1963 memset(&ifq, 0, sizeof(ifq))__builtin_memset((&ifq), (0), (sizeof(ifq)));
1964 memset(&ifp, 0, sizeof(ifp))__builtin_memset((&ifp), (0), (sizeof(ifp)));
1965
1966 ifq.function = MPI_FUNCTION_IOC_FACTS(0x03);
1967 ifq.chain_offset = 0;
1968 ifq.msg_flags = 0;
1969 ifq.msg_context = htole32(0xdeadbeef)((__uint32_t)(0xdeadbeef));
1970
1971 if (mpi_handshake_send(sc, &ifq, dwordsof(ifq)(sizeof(ifq) / sizeof(u_int32_t))) != 0) {
1972 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts send failed\n",
1973 DEVNAME(sc));
1974 return (1);
1975 }
1976
1977 if (mpi_handshake_recv(sc, &ifp, dwordsof(ifp)(sizeof(ifp) / sizeof(u_int32_t))) != 0) {
1978 DNPRINTF(MPI_D_MISC, "%s: mpi_iocfacts recv failed\n",
1979 DEVNAME(sc));
1980 return (1);
1981 }
1982
1983 DNPRINTF(MPI_D_MISC, "%s: func: 0x%02x len: %d msgver: %d.%d\n",
1984 DEVNAME(sc), ifp.function, ifp.msg_length,
1985 ifp.msg_version_maj, ifp.msg_version_min);
1986 DNPRINTF(MPI_D_MISC, "%s: msgflags: 0x%02x iocnumber: 0x%02x "
1987 "hdrver: %d.%d\n", DEVNAME(sc), ifp.msg_flags,
1988 ifp.ioc_number, ifp.header_version_maj,
1989 ifp.header_version_min);
1990 DNPRINTF(MPI_D_MISC, "%s: message context: 0x%08x\n", DEVNAME(sc),
1991 letoh32(ifp.msg_context));
1992 DNPRINTF(MPI_D_MISC, "%s: iocstatus: 0x%04x ioexcept: 0x%04x\n",
1993 DEVNAME(sc), letoh16(ifp.ioc_status),
1994 letoh16(ifp.ioc_exceptions));
1995 DNPRINTF(MPI_D_MISC, "%s: iocloginfo: 0x%08x\n", DEVNAME(sc),
1996 letoh32(ifp.ioc_loginfo));
1997 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%02x blocksize: %d whoinit: 0x%02x "
1998 "maxchdepth: %d\n", DEVNAME(sc), ifp.flags,
1999 ifp.block_size, ifp.whoinit, ifp.max_chain_depth);
2000 DNPRINTF(MPI_D_MISC, "%s: reqfrsize: %d replyqdepth: %d\n",
2001 DEVNAME(sc), letoh16(ifp.request_frame_size),
2002 letoh16(ifp.reply_queue_depth));
2003 DNPRINTF(MPI_D_MISC, "%s: productid: 0x%04x\n", DEVNAME(sc),
2004 letoh16(ifp.product_id));
2005 DNPRINTF(MPI_D_MISC, "%s: hostmfahiaddr: 0x%08x\n", DEVNAME(sc),
2006 letoh32(ifp.current_host_mfa_hi_addr));
2007 DNPRINTF(MPI_D_MISC, "%s: event_state: 0x%02x number_of_ports: %d "
2008 "global_credits: %d\n",
2009 DEVNAME(sc), ifp.event_state, ifp.number_of_ports,
2010 letoh16(ifp.global_credits));
2011 DNPRINTF(MPI_D_MISC, "%s: sensebufhiaddr: 0x%08x\n", DEVNAME(sc),
2012 letoh32(ifp.current_sense_buffer_hi_addr));
2013 DNPRINTF(MPI_D_MISC, "%s: maxbus: %d maxdev: %d replyfrsize: %d\n",
2014 DEVNAME(sc), ifp.max_buses, ifp.max_devices,
2015 letoh16(ifp.current_reply_frame_size));
2016 DNPRINTF(MPI_D_MISC, "%s: fw_image_size: %d\n", DEVNAME(sc),
2017 letoh32(ifp.fw_image_size));
2018 DNPRINTF(MPI_D_MISC, "%s: ioc_capabilities: 0x%08x\n", DEVNAME(sc),
2019 letoh32(ifp.ioc_capabilities));
2020 DNPRINTF(MPI_D_MISC, "%s: fw_version: %d.%d fw_version_unit: 0x%02x "
2021 "fw_version_dev: 0x%02x\n", DEVNAME(sc),
2022 ifp.fw_version_maj, ifp.fw_version_min,
2023 ifp.fw_version_unit, ifp.fw_version_dev);
2024 DNPRINTF(MPI_D_MISC, "%s: hi_priority_queue_depth: 0x%04x\n",
2025 DEVNAME(sc), letoh16(ifp.hi_priority_queue_depth));
2026 DNPRINTF(MPI_D_MISC, "%s: host_page_buffer_sge: hdr: 0x%08x "
2027 "addr 0x%08lx%08lx\n", DEVNAME(sc),
2028 letoh32(ifp.host_page_buffer_sge.sg_hdr),
2029 letoh32(ifp.host_page_buffer_sge.sg_addr_hi),
2030 letoh32(ifp.host_page_buffer_sge.sg_addr_lo));
2031
2032 sc->sc_fw_maj = ifp.fw_version_maj;
2033 sc->sc_fw_min = ifp.fw_version_min;
2034 sc->sc_fw_unit = ifp.fw_version_unit;
2035 sc->sc_fw_dev = ifp.fw_version_dev;
2036
2037 sc->sc_maxcmds = lemtoh16(&ifp.global_credits)((__uint16_t)(*(__uint16_t *)(&ifp.global_credits)));
2038 sc->sc_maxchdepth = ifp.max_chain_depth;
2039 sc->sc_ioc_number = ifp.ioc_number;
2040 if (sc->sc_flags & MPI_F_SPI(1<<0))
2041 sc->sc_buswidth = 16;
2042 else
2043 sc->sc_buswidth =
2044 (ifp.max_devices == 0) ? 256 : ifp.max_devices;
2045 if (ifp.flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT(1<<0))
2046 sc->sc_fw_len = lemtoh32(&ifp.fw_image_size)((__uint32_t)(*(__uint32_t *)(&ifp.fw_image_size)));
2047
2048 sc->sc_repq = MIN(MPI_REPLYQ_DEPTH, lemtoh16(&ifp.reply_queue_depth))(((128)<(((__uint16_t)(*(__uint16_t *)(&ifp.reply_queue_depth
)))))?(128):(((__uint16_t)(*(__uint16_t *)(&ifp.reply_queue_depth
)))))
;
2049
2050 /*
2051 * you can fit sg elements on the end of the io cmd if they fit in the
2052 * request frame size.
2053 */
2054 sc->sc_first_sgl_len = ((lemtoh16(&ifp.request_frame_size)((__uint16_t)(*(__uint16_t *)(&ifp.request_frame_size))) * 4) -
2055 sizeof(struct mpi_msg_scsi_io)) / sizeof(struct mpi_sge);
2056 DNPRINTF(MPI_D_MISC, "%s: first sgl len: %d\n", DEVNAME(sc),
2057 sc->sc_first_sgl_len);
2058
2059 sc->sc_chain_len = (lemtoh16(&ifp.request_frame_size)((__uint16_t)(*(__uint16_t *)(&ifp.request_frame_size))) * 4) /
2060 sizeof(struct mpi_sge);
2061 DNPRINTF(MPI_D_MISC, "%s: chain len: %d\n", DEVNAME(sc),
2062 sc->sc_chain_len);
2063
2064 /* the sgl tailing the io cmd loses an entry to the chain element. */
2065 sc->sc_max_sgl_len = MPI_MAX_SGL36 - 1;
2066 /* the sgl chains lose an entry for each chain element */
2067 sc->sc_max_sgl_len -= (MPI_MAX_SGL36 - sc->sc_first_sgl_len) /
2068 sc->sc_chain_len;
2069 DNPRINTF(MPI_D_MISC, "%s: max sgl len: %d\n", DEVNAME(sc),
2070 sc->sc_max_sgl_len);
2071
2072 /* XXX we're ignoring the max chain depth */
2073
2074 return (0);
2075}
2076
2077int
2078mpi_iocinit(struct mpi_softc *sc)
2079{
2080 struct mpi_msg_iocinit_request iiq;
2081 struct mpi_msg_iocinit_reply iip;
2082 u_int32_t hi_addr;
2083
2084 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit\n", DEVNAME(sc));
2085
2086 memset(&iiq, 0, sizeof(iiq))__builtin_memset((&iiq), (0), (sizeof(iiq)));
2087 memset(&iip, 0, sizeof(iip))__builtin_memset((&iip), (0), (sizeof(iip)));
2088
2089 iiq.function = MPI_FUNCTION_IOC_INIT(0x02);
2090 iiq.whoinit = MPI_WHOINIT_HOST_DRIVER0x04;
2091
2092 iiq.max_devices = (sc->sc_buswidth == 256) ? 0 : sc->sc_buswidth;
2093 iiq.max_buses = 1;
2094
2095 iiq.msg_context = htole32(0xd00fd00f)((__uint32_t)(0xd00fd00f));
2096
2097 iiq.reply_frame_size = htole16(MPI_REPLY_SIZE)((__uint16_t)(80));
2098
2099 hi_addr = (u_int32_t)(MPI_DMA_DVA(sc->sc_requests)((u_int64_t)(sc->sc_requests)->mdm_map->dm_segs[0].ds_addr
)
>> 32);
2100 htolem32(&iiq.host_mfa_hi_addr, hi_addr)(*(__uint32_t *)(&iiq.host_mfa_hi_addr) = ((__uint32_t)(hi_addr
)))
;
2101 htolem32(&iiq.sense_buffer_hi_addr, hi_addr)(*(__uint32_t *)(&iiq.sense_buffer_hi_addr) = ((__uint32_t
)(hi_addr)))
;
2102
2103 iiq.msg_version_maj = 0x01;
2104 iiq.msg_version_min = 0x02;
2105
2106 iiq.hdr_version_unit = 0x0d;
2107 iiq.hdr_version_dev = 0x00;
2108
2109 if (mpi_handshake_send(sc, &iiq, dwordsof(iiq)(sizeof(iiq) / sizeof(u_int32_t))) != 0) {
2110 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit send failed\n",
2111 DEVNAME(sc));
2112 return (1);
2113 }
2114
2115 if (mpi_handshake_recv(sc, &iip, dwordsof(iip)(sizeof(iip) / sizeof(u_int32_t))) != 0) {
2116 DNPRINTF(MPI_D_MISC, "%s: mpi_iocinit recv failed\n",
2117 DEVNAME(sc));
2118 return (1);
2119 }
2120
2121 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d "
2122 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
2123 iip.msg_length, iip.whoinit);
2124 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x max_buses: %d "
2125 "max_devices: %d flags: 0x%02x\n", DEVNAME(sc), iip.msg_flags,
2126 iip.max_buses, iip.max_devices, iip.flags);
2127 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2128 letoh32(iip.msg_context));
2129 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2130 letoh16(iip.ioc_status));
2131 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2132 letoh32(iip.ioc_loginfo));
2133
2134 return (0);
2135}
2136
2137int
2138mpi_portfacts(struct mpi_softc *sc)
2139{
2140 struct mpi_ccb *ccb;
2141 struct mpi_msg_portfacts_request *pfq;
2142 volatile struct mpi_msg_portfacts_reply *pfp;
2143 int rv = 1;
2144
2145 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts\n", DEVNAME(sc));
2146
2147 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
2148 if (ccb == NULL((void *)0)) {
2149 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts ccb_get\n",
2150 DEVNAME(sc));
2151 return (rv);
2152 }
2153
2154 ccb->ccb_done = mpi_empty_done;
2155 pfq = ccb->ccb_cmd;
2156
2157 pfq->function = MPI_FUNCTION_PORT_FACTS(0x05);
2158 pfq->chain_offset = 0;
2159 pfq->msg_flags = 0;
2160 pfq->port_number = 0;
2161
2162 if (mpi_poll(sc, ccb, 50000) != 0) {
2163 DNPRINTF(MPI_D_MISC, "%s: mpi_portfacts poll\n", DEVNAME(sc));
2164 goto err;
2165 }
2166
2167 if (ccb->ccb_rcb == NULL((void *)0)) {
2168 DNPRINTF(MPI_D_MISC, "%s: empty portfacts reply\n",
2169 DEVNAME(sc));
2170 goto err;
2171 }
2172 pfp = ccb->ccb_rcb->rcb_reply;
2173
2174 DNPRINTF(MPI_D_MISC, "%s: function: 0x%02x msg_length: %d\n",
2175 DEVNAME(sc), pfp->function, pfp->msg_length);
2176 DNPRINTF(MPI_D_MISC, "%s: msg_flags: 0x%02x port_number: %d\n",
2177 DEVNAME(sc), pfp->msg_flags, pfp->port_number);
2178 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2179 letoh32(pfp->msg_context));
2180 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2181 letoh16(pfp->ioc_status));
2182 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2183 letoh32(pfp->ioc_loginfo));
2184 DNPRINTF(MPI_D_MISC, "%s: max_devices: %d port_type: 0x%02x\n",
2185 DEVNAME(sc), letoh16(pfp->max_devices), pfp->port_type);
2186 DNPRINTF(MPI_D_MISC, "%s: protocol_flags: 0x%04x port_scsi_id: %d\n",
2187 DEVNAME(sc), letoh16(pfp->protocol_flags),
2188 letoh16(pfp->port_scsi_id));
2189 DNPRINTF(MPI_D_MISC, "%s: max_persistent_ids: %d "
2190 "max_posted_cmd_buffers: %d\n", DEVNAME(sc),
2191 letoh16(pfp->max_persistent_ids),
2192 letoh16(pfp->max_posted_cmd_buffers));
2193 DNPRINTF(MPI_D_MISC, "%s: max_lan_buckets: %d\n", DEVNAME(sc),
2194 letoh16(pfp->max_lan_buckets));
2195
2196 sc->sc_porttype = pfp->port_type;
2197 if (sc->sc_target == -1)
2198 sc->sc_target = lemtoh16(&pfp->port_scsi_id)((__uint16_t)(*(__uint16_t *)(&pfp->port_scsi_id)));
2199
2200 mpi_push_reply(sc, ccb->ccb_rcb);
2201 rv = 0;
2202err:
2203 scsi_io_put(&sc->sc_iopool, ccb);
2204
2205 return (rv);
2206}
2207
2208int
2209mpi_cfg_coalescing(struct mpi_softc *sc)
2210{
2211 struct mpi_cfg_hdr hdr;
2212 struct mpi_cfg_ioc_pg1 pg;
2213 u_int32_t flags;
2214
2215 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 1, 0, &hdr)mpi_req_cfg_header((sc), ((0x01)), (1), (0), (1<<1), (&
hdr))
!= 0) {
2216 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1 header\n",
2217 DEVNAME(sc));
2218 return (1);
2219 }
2220
2221 if (mpi_cfg_page(sc, 0, &hdr, 1, &pg, sizeof(pg))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (&
pg), (sizeof(pg)))
!= 0) {
2222 DNPRINTF(MPI_D_MISC, "%s: unable to fetch IOC page 1\n",
2223 DEVNAME(sc));
2224 return (1);
2225 }
2226
2227 DNPRINTF(MPI_D_MISC, "%s: IOC page 1\n", DEVNAME(sc));
2228 DNPRINTF(MPI_D_MISC, "%s: flags: 0x%08x\n", DEVNAME(sc),
2229 letoh32(pg.flags));
2230 DNPRINTF(MPI_D_MISC, "%s: coalescing_timeout: %d\n", DEVNAME(sc),
2231 letoh32(pg.coalescing_timeout));
2232 DNPRINTF(MPI_D_MISC, "%s: coalescing_depth: %d pci_slot_num: %d\n",
2233 DEVNAME(sc), pg.coalescing_depth, pg.pci_slot_num);
2234
2235 flags = lemtoh32(&pg.flags)((__uint32_t)(*(__uint32_t *)(&pg.flags)));
2236 if (!ISSET(flags, MPI_CFG_IOC_1_REPLY_COALESCING)((flags) & ((1<<0))))
2237 return (0);
2238
2239 CLR(pg.flags, htole32(MPI_CFG_IOC_1_REPLY_COALESCING))((pg.flags) &= ~(((__uint32_t)((1<<0)))));
2240 if (mpi_cfg_page(sc, 0, &hdr, 0, &pg, sizeof(pg))mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (0), (&
pg), (sizeof(pg)))
!= 0) {
2241 DNPRINTF(MPI_D_MISC, "%s: unable to clear coalescing\n",
2242 DEVNAME(sc));
2243 return (1);
2244 }
2245
2246 return (0);
2247}
2248
2249int
2250mpi_eventnotify(struct mpi_softc *sc)
2251{
2252 struct mpi_ccb *ccb;
2253 struct mpi_msg_event_request *enq;
2254
2255 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
2256 if (ccb == NULL((void *)0)) {
2257 DNPRINTF(MPI_D_MISC, "%s: mpi_eventnotify ccb_get\n",
2258 DEVNAME(sc));
2259 return (1);
2260 }
2261
2262 sc->sc_evt_ccb = ccb;
2263 SIMPLEQ_INIT(&sc->sc_evt_ack_queue)do { (&sc->sc_evt_ack_queue)->sqh_first = ((void *)
0); (&sc->sc_evt_ack_queue)->sqh_last = &(&
sc->sc_evt_ack_queue)->sqh_first; } while (0)
;
2264 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_evt_ack_mtx), ((((0x3)) > 0x0 && ((0x3)) < 0x9
) ? 0x9 : ((0x3)))); } while (0)
;
2265 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
2266 mpi_eventack, sc);
2267
2268 ccb->ccb_done = mpi_eventnotify_done;
2269 enq = ccb->ccb_cmd;
2270
2271 enq->function = MPI_FUNCTION_EVENT_NOTIFICATION(0x07);
2272 enq->chain_offset = 0;
2273 enq->event_switch = MPI_EVENT_SWITCH_ON(0x01);
2274
2275 mpi_start(sc, ccb);
2276 return (0);
2277}
2278
2279void
2280mpi_eventnotify_done(struct mpi_ccb *ccb)
2281{
2282 struct mpi_softc *sc = ccb->ccb_sc;
2283 struct mpi_rcb *rcb = ccb->ccb_rcb;
2284 struct mpi_msg_event_reply *enp = rcb->rcb_reply;
2285
2286 DNPRINTF(MPI_D_EVT, "%s: mpi_eventnotify_done\n", DEVNAME(sc));
2287
2288 DNPRINTF(MPI_D_EVT, "%s: function: 0x%02x msg_length: %d "
2289 "data_length: %d\n", DEVNAME(sc), enp->function, enp->msg_length,
2290 letoh16(enp->data_length));
2291 DNPRINTF(MPI_D_EVT, "%s: ack_required: %d msg_flags 0x%02x\n",
2292 DEVNAME(sc), enp->ack_required, enp->msg_flags);
2293 DNPRINTF(MPI_D_EVT, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2294 letoh32(enp->msg_context));
2295 DNPRINTF(MPI_D_EVT, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2296 letoh16(enp->ioc_status));
2297 DNPRINTF(MPI_D_EVT, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2298 letoh32(enp->ioc_loginfo));
2299 DNPRINTF(MPI_D_EVT, "%s: event: 0x%08x\n", DEVNAME(sc),
2300 letoh32(enp->event));
2301 DNPRINTF(MPI_D_EVT, "%s: event_context: 0x%08x\n", DEVNAME(sc),
2302 letoh32(enp->event_context));
2303
2304 switch (lemtoh32(&enp->event)((__uint32_t)(*(__uint32_t *)(&enp->event)))) {
2305 /* ignore these */
2306 case MPI_EVENT_EVENT_CHANGE0x0a:
2307 case MPI_EVENT_SAS_PHY_LINK_STATUS0x12:
2308 break;
2309
2310 case MPI_EVENT_SAS_DEVICE_STATUS_CHANGE0x0f:
2311 if (sc->sc_scsibus == NULL((void *)0))
2312 break;
2313
2314 if (mpi_evt_sas(sc, rcb) != 0) {
2315 /* reply is freed later on */
2316 return;
2317 }
2318 break;
2319
2320 case MPI_EVENT_RESCAN0x06:
2321 if (sc->sc_scsibus != NULL((void *)0) &&
2322 sc->sc_porttype == MPI_PORTFACTS_PORTTYPE_FC0x10)
2323 task_add(systq, &sc->sc_evt_rescan);
2324 break;
2325
2326 default:
2327 DNPRINTF(MPI_D_EVT, "%s: unhandled event 0x%02x\n",
2328 DEVNAME(sc), lemtoh32(&enp->event));
2329 break;
2330 }
2331
2332 mpi_eventnotify_free(sc, rcb);
2333}
2334
2335void
2336mpi_eventnotify_free(struct mpi_softc *sc, struct mpi_rcb *rcb)
2337{
2338 struct mpi_msg_event_reply *enp = rcb->rcb_reply;
2339
2340 if (enp->ack_required) {
2341 mtx_enter(&sc->sc_evt_ack_mtx);
2342 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link)do { (rcb)->rcb_link.sqe_next = ((void *)0); *(&sc->
sc_evt_ack_queue)->sqh_last = (rcb); (&sc->sc_evt_ack_queue
)->sqh_last = &(rcb)->rcb_link.sqe_next; } while (0
)
;
2343 mtx_leave(&sc->sc_evt_ack_mtx);
2344 scsi_ioh_add(&sc->sc_evt_ack_handler);
2345 } else
2346 mpi_push_reply(sc, rcb);
2347}
2348
2349int
2350mpi_evt_sas(struct mpi_softc *sc, struct mpi_rcb *rcb)
2351{
2352 struct mpi_evt_sas_change *ch;
2353 u_int8_t *data;
2354
2355 data = rcb->rcb_reply;
2356 data += sizeof(struct mpi_msg_event_reply);
2357 ch = (struct mpi_evt_sas_change *)data;
2358
2359 if (ch->bus != 0)
2360 return (0);
2361
2362 switch (ch->reason) {
2363 case MPI_EVT_SASCH_REASON_ADDED0x03:
2364 case MPI_EVT_SASCH_REASON_NO_PERSIST_ADDED0x06:
2365 KERNEL_LOCK()_kernel_lock();
2366 if (scsi_req_probe(sc->sc_scsibus, ch->target, -1) != 0) {
2367 printf("%s: unable to request attach of %d\n",
2368 DEVNAME(sc)((sc)->sc_dev.dv_xname), ch->target);
2369 }
2370 KERNEL_UNLOCK()_kernel_unlock();
2371 break;
2372
2373 case MPI_EVT_SASCH_REASON_NOT_RESPONDING0x04:
2374 KERNEL_LOCK()_kernel_lock();
2375 scsi_activate(sc->sc_scsibus, ch->target, -1, DVACT_DEACTIVATE1);
2376 KERNEL_UNLOCK()_kernel_unlock();
2377
2378 mtx_enter(&sc->sc_evt_scan_mtx);
2379 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_scan_queue, rcb, rcb_link)do { (rcb)->rcb_link.sqe_next = ((void *)0); *(&sc->
sc_evt_scan_queue)->sqh_last = (rcb); (&sc->sc_evt_scan_queue
)->sqh_last = &(rcb)->rcb_link.sqe_next; } while (0
)
;
2380 mtx_leave(&sc->sc_evt_scan_mtx);
2381 scsi_ioh_add(&sc->sc_evt_scan_handler);
2382
2383 /* we'll handle event ack later on */
2384 return (1);
2385
2386 case MPI_EVT_SASCH_REASON_SMART_DATA0x05:
2387 case MPI_EVT_SASCH_REASON_UNSUPPORTED0x07:
2388 case MPI_EVT_SASCH_REASON_INTERNAL_RESET0x08:
2389 break;
2390 default:
2391 printf("%s: unknown reason for SAS device status change: "
2392 "0x%02x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), ch->reason);
2393 break;
2394 }
2395
2396 return (0);
2397}
2398
2399void
2400mpi_evt_sas_detach(void *cookie, void *io)
2401{
2402 struct mpi_softc *sc = cookie;
2403 struct mpi_ccb *ccb = io;
2404 struct mpi_rcb *rcb, *next;
2405 struct mpi_msg_event_reply *enp;
2406 struct mpi_evt_sas_change *ch;
2407 struct mpi_msg_scsi_task_request *str;
2408
2409 DNPRINTF(MPI_D_EVT, "%s: event sas detach handler\n", DEVNAME(sc));
2410
2411 mtx_enter(&sc->sc_evt_scan_mtx);
2412 rcb = SIMPLEQ_FIRST(&sc->sc_evt_scan_queue)((&sc->sc_evt_scan_queue)->sqh_first);
2413 if (rcb != NULL((void *)0)) {
2414 next = SIMPLEQ_NEXT(rcb, rcb_link)((rcb)->rcb_link.sqe_next);
2415 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_scan_queue, rcb_link)do { if (((&sc->sc_evt_scan_queue)->sqh_first = (&
sc->sc_evt_scan_queue)->sqh_first->rcb_link.sqe_next
) == ((void *)0)) (&sc->sc_evt_scan_queue)->sqh_last
= &(&sc->sc_evt_scan_queue)->sqh_first; } while
(0)
;
2416 }
2417 mtx_leave(&sc->sc_evt_scan_mtx);
2418
2419 if (rcb == NULL((void *)0)) {
2420 scsi_io_put(&sc->sc_iopool, ccb);
2421 return;
2422 }
2423
2424 enp = rcb->rcb_reply;
2425 ch = (struct mpi_evt_sas_change *)(enp + 1);
2426
2427 ccb->ccb_done = mpi_evt_sas_detach_done;
2428 str = ccb->ccb_cmd;
2429
2430 str->target_id = ch->target;
2431 str->bus = 0;
2432 str->function = MPI_FUNCTION_SCSI_TASK_MGMT(0x01);
2433
2434 str->task_type = MPI_MSG_SCSI_TASK_TYPE_TARGET_RESET(0x03);
2435
2436 mpi_eventnotify_free(sc, rcb);
2437
2438 mpi_start(sc, ccb);
2439
2440 if (next != NULL((void *)0))
2441 scsi_ioh_add(&sc->sc_evt_scan_handler);
2442}
2443
2444void
2445mpi_evt_sas_detach_done(struct mpi_ccb *ccb)
2446{
2447 struct mpi_softc *sc = ccb->ccb_sc;
2448 struct mpi_msg_scsi_task_reply *r = ccb->ccb_rcb->rcb_reply;
2449
2450 KERNEL_LOCK()_kernel_lock();
2451 if (scsi_req_detach(sc->sc_scsibus, r->target_id, -1,
2452 DETACH_FORCE0x01) != 0) {
2453 printf("%s: unable to request detach of %d\n",
2454 DEVNAME(sc)((sc)->sc_dev.dv_xname), r->target_id);
2455 }
2456 KERNEL_UNLOCK()_kernel_unlock();
2457
2458 mpi_push_reply(sc, ccb->ccb_rcb);
2459 scsi_io_put(&sc->sc_iopool, ccb);
2460}
2461
2462void
2463mpi_fc_rescan(void *xsc)
2464{
2465 struct mpi_softc *sc = xsc;
2466 struct mpi_cfg_hdr hdr;
2467 struct mpi_cfg_fc_device_pg0 pg;
2468 struct scsi_link *link;
2469 u_int8_t devmap[256 / NBBY8];
2470 u_int32_t id = 0xffffff;
2471 int i;
2472
2473 memset(devmap, 0, sizeof(devmap))__builtin_memset((devmap), (0), (sizeof(devmap)));
2474
2475 do {
2476 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_FC_DEV(0x06), 0,
2477 id, 0, &hdr) != 0) {
2478 printf("%s: header get for rescan of 0x%08x failed\n",
2479 DEVNAME(sc)((sc)->sc_dev.dv_xname), id);
2480 return;
2481 }
2482
2483 memset(&pg, 0, sizeof(pg))__builtin_memset((&pg), (0), (sizeof(pg)));
2484 if (mpi_req_cfg_page(sc, id, 0, &hdr, 1, &pg, sizeof(pg)) != 0)
2485 break;
2486
2487 if (ISSET(pg.flags, MPI_CFG_FC_DEV_0_FLAGS_BUSADDR_VALID)((pg.flags) & ((1<<0))) &&
2488 pg.current_bus == 0)
2489 setbit(devmap, pg.current_target_id)((devmap)[(pg.current_target_id)>>3] |= 1<<((pg.current_target_id
)&(8 -1)))
;
2490
2491 id = lemtoh32(&pg.port_id)((__uint32_t)(*(__uint32_t *)(&pg.port_id)));
2492 } while (id <= 0xff0000);
2493
2494 for (i = 0; i < sc->sc_buswidth; i++) {
2495 link = scsi_get_link(sc->sc_scsibus, i, 0);
2496
2497 if (isset(devmap, i)((devmap)[(i)>>3] & (1<<((i)&(8 -1))))) {
2498 if (link == NULL((void *)0))
2499 scsi_probe_target(sc->sc_scsibus, i);
2500 } else {
2501 if (link != NULL((void *)0)) {
2502 scsi_activate(sc->sc_scsibus, i, -1,
2503 DVACT_DEACTIVATE1);
2504 scsi_detach_target(sc->sc_scsibus, i,
2505 DETACH_FORCE0x01);
2506 }
2507 }
2508 }
2509}
2510
2511void
2512mpi_eventack(void *cookie, void *io)
2513{
2514 struct mpi_softc *sc = cookie;
2515 struct mpi_ccb *ccb = io;
2516 struct mpi_rcb *rcb, *next;
2517 struct mpi_msg_event_reply *enp;
2518 struct mpi_msg_eventack_request *eaq;
2519
2520 DNPRINTF(MPI_D_EVT, "%s: event ack\n", DEVNAME(sc));
2521
2522 mtx_enter(&sc->sc_evt_ack_mtx);
2523 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue)((&sc->sc_evt_ack_queue)->sqh_first);
2524 if (rcb != NULL((void *)0)) {
2525 next = SIMPLEQ_NEXT(rcb, rcb_link)((rcb)->rcb_link.sqe_next);
2526 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link)do { if (((&sc->sc_evt_ack_queue)->sqh_first = (&
sc->sc_evt_ack_queue)->sqh_first->rcb_link.sqe_next)
== ((void *)0)) (&sc->sc_evt_ack_queue)->sqh_last =
&(&sc->sc_evt_ack_queue)->sqh_first; } while (
0)
;
2527 }
2528 mtx_leave(&sc->sc_evt_ack_mtx);
2529
2530 if (rcb == NULL((void *)0)) {
2531 scsi_io_put(&sc->sc_iopool, ccb);
2532 return;
2533 }
2534
2535 enp = rcb->rcb_reply;
2536
2537 ccb->ccb_done = mpi_eventack_done;
2538 eaq = ccb->ccb_cmd;
2539
2540 eaq->function = MPI_FUNCTION_EVENT_ACK(0x08);
2541
2542 eaq->event = enp->event;
2543 eaq->event_context = enp->event_context;
2544
2545 mpi_push_reply(sc, rcb);
2546 mpi_start(sc, ccb);
2547
2548 if (next != NULL((void *)0))
2549 scsi_ioh_add(&sc->sc_evt_ack_handler);
2550}
2551
2552void
2553mpi_eventack_done(struct mpi_ccb *ccb)
2554{
2555 struct mpi_softc *sc = ccb->ccb_sc;
2556
2557 DNPRINTF(MPI_D_EVT, "%s: event ack done\n", DEVNAME(sc));
2558
2559 mpi_push_reply(sc, ccb->ccb_rcb);
2560 scsi_io_put(&sc->sc_iopool, ccb);
2561}
2562
2563int
2564mpi_portenable(struct mpi_softc *sc)
2565{
2566 struct mpi_ccb *ccb;
2567 struct mpi_msg_portenable_request *peq;
2568 int rv = 0;
2569
2570 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable\n", DEVNAME(sc));
2571
2572 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
2573 if (ccb == NULL((void *)0)) {
2574 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable ccb_get\n",
2575 DEVNAME(sc));
2576 return (1);
2577 }
2578
2579 ccb->ccb_done = mpi_empty_done;
2580 peq = ccb->ccb_cmd;
2581
2582 peq->function = MPI_FUNCTION_PORT_ENABLE(0x06);
2583 peq->port_number = 0;
2584
2585 if (mpi_poll(sc, ccb, 50000) != 0) {
2586 DNPRINTF(MPI_D_MISC, "%s: mpi_portenable poll\n", DEVNAME(sc));
2587 return (1);
2588 }
2589
2590 if (ccb->ccb_rcb == NULL((void *)0)) {
2591 DNPRINTF(MPI_D_MISC, "%s: empty portenable reply\n",
2592 DEVNAME(sc));
2593 rv = 1;
2594 } else
2595 mpi_push_reply(sc, ccb->ccb_rcb);
2596
2597 scsi_io_put(&sc->sc_iopool, ccb);
2598
2599 return (rv);
2600}
2601
2602int
2603mpi_fwupload(struct mpi_softc *sc)
2604{
2605 struct mpi_ccb *ccb;
2606 struct {
2607 struct mpi_msg_fwupload_request req;
2608 struct mpi_sge sge;
2609 } __packed__attribute__((__packed__)) *bundle;
2610 struct mpi_msg_fwupload_reply *upp;
2611 int rv = 0;
2612
2613 if (sc->sc_fw_len == 0)
2614 return (0);
2615
2616 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload\n", DEVNAME(sc));
2617
2618 sc->sc_fw = mpi_dmamem_alloc(sc, sc->sc_fw_len);
2619 if (sc->sc_fw == NULL((void *)0)) {
2620 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload unable to allocate %d\n",
2621 DEVNAME(sc), sc->sc_fw_len);
2622 return (1);
2623 }
2624
2625 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
2626 if (ccb == NULL((void *)0)) {
2627 DNPRINTF(MPI_D_MISC, "%s: mpi_fwupload ccb_get\n",
2628 DEVNAME(sc));
2629 goto err;
2630 }
2631
2632 ccb->ccb_done = mpi_empty_done;
2633 bundle = ccb->ccb_cmd;
2634
2635 bundle->req.function = MPI_FUNCTION_FW_UPLOAD(0x12);
2636
2637 bundle->req.image_type = MPI_FWUPLOAD_IMAGETYPE_IOC_FW(0x00);
2638
2639 bundle->req.tce.details_length = 12;
2640 htolem32(&bundle->req.tce.image_size, sc->sc_fw_len)(*(__uint32_t *)(&bundle->req.tce.image_size) = ((__uint32_t
)(sc->sc_fw_len)))
;
2641
2642 htolem32(&bundle->sge.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&bundle->sge.sg_hdr) = ((__uint32_t)(
(0x1<<28) | (0x1<<25) | (0x1<<31) | (0x1<<
30) | (0x1<<24) | (u_int32_t)sc->sc_fw_len)))
2643 MPI_SGE_FL_SIZE_64 | MPI_SGE_FL_LAST | MPI_SGE_FL_EOB |(*(__uint32_t *)(&bundle->sge.sg_hdr) = ((__uint32_t)(
(0x1<<28) | (0x1<<25) | (0x1<<31) | (0x1<<
30) | (0x1<<24) | (u_int32_t)sc->sc_fw_len)))
2644 MPI_SGE_FL_EOL | (u_int32_t)sc->sc_fw_len)(*(__uint32_t *)(&bundle->sge.sg_hdr) = ((__uint32_t)(
(0x1<<28) | (0x1<<25) | (0x1<<31) | (0x1<<
30) | (0x1<<24) | (u_int32_t)sc->sc_fw_len)))
;
2645 mpi_dvatosge(&bundle->sge, MPI_DMA_DVA(sc->sc_fw)((u_int64_t)(sc->sc_fw)->mdm_map->dm_segs[0].ds_addr
)
);
2646
2647 if (mpi_poll(sc, ccb, 50000) != 0) {
2648 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n", DEVNAME(sc));
2649 goto err;
2650 }
2651
2652 if (ccb->ccb_rcb == NULL((void *)0))
2653 panic("%s: unable to do fw upload", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2654 upp = ccb->ccb_rcb->rcb_reply;
2655
2656 if (lemtoh16(&upp->ioc_status)((__uint16_t)(*(__uint16_t *)(&upp->ioc_status))) != MPI_IOCSTATUS_SUCCESS(0x0000))
2657 rv = 1;
2658
2659 mpi_push_reply(sc, ccb->ccb_rcb);
2660 scsi_io_put(&sc->sc_iopool, ccb);
2661
2662 return (rv);
2663
2664err:
2665 mpi_dmamem_free(sc, sc->sc_fw);
2666 return (1);
2667}
2668
2669int
2670mpi_manufacturing(struct mpi_softc *sc)
2671{
2672 char board_name[33];
2673 struct mpi_cfg_hdr hdr;
2674 struct mpi_cfg_manufacturing_pg0 *pg;
2675 size_t pagelen;
2676 int rv = 1;
2677
2678 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_MANUFACTURING,mpi_req_cfg_header((sc), ((0x09)), (0), (0), (1<<1), (&
hdr))
2679 0, 0, &hdr)mpi_req_cfg_header((sc), ((0x09)), (0), (0), (1<<1), (&
hdr))
!= 0)
2680 return (1);
2681
2682 pagelen = hdr.page_length * 4; /* dwords to bytes */
2683 if (pagelen < sizeof(*pg))
2684 return (1);
2685
2686 pg = malloc(pagelen, M_TEMP127, M_WAITOK0x0001|M_CANFAIL0x0004);
2687 if (pg == NULL((void *)0))
2688 return (1);
2689
2690 if (mpi_cfg_page(sc, 0, &hdr, 1, pg, pagelen)mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (pg
), (pagelen))
!= 0)
2691 goto out;
2692
2693 scsi_strvis(board_name, pg->board_name, sizeof(pg->board_name));
2694
2695 printf("%s: %s, firmware %d.%d.%d.%d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), board_name,
2696 sc->sc_fw_maj, sc->sc_fw_min, sc->sc_fw_unit, sc->sc_fw_dev);
2697
2698 rv = 0;
2699
2700out:
2701 free(pg, M_TEMP127, pagelen);
2702 return (rv);
2703}
2704
2705void
2706mpi_get_raid(struct mpi_softc *sc)
2707{
2708 struct mpi_cfg_hdr hdr;
2709 struct mpi_cfg_ioc_pg2 *vol_page;
2710 size_t pagelen;
2711 u_int32_t capabilities;
2712
2713 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid\n", DEVNAME(sc));
2714
2715 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_IOC, 2, 0, &hdr)mpi_req_cfg_header((sc), ((0x01)), (2), (0), (1<<1), (&
hdr))
!= 0) {
2716 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch header"
2717 "for IOC page 2\n", DEVNAME(sc));
2718 return;
2719 }
2720
2721 pagelen = hdr.page_length * 4; /* dwords to bytes */
2722 vol_page = malloc(pagelen, M_TEMP127, M_WAITOK0x0001|M_CANFAIL0x0004);
2723 if (vol_page == NULL((void *)0)) {
2724 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to allocate "
2725 "space for ioc config page 2\n", DEVNAME(sc));
2726 return;
2727 }
2728
2729 if (mpi_cfg_page(sc, 0, &hdr, 1, vol_page, pagelen)mpi_req_cfg_page((sc), (0), (1<<1), (&hdr), (1), (vol_page
), (pagelen))
!= 0) {
2730 DNPRINTF(MPI_D_RAID, "%s: mpi_get_raid unable to fetch IOC "
2731 "page 2\n", DEVNAME(sc));
2732 goto out;
2733 }
2734
2735 capabilities = lemtoh32(&vol_page->capabilities)((__uint32_t)(*(__uint32_t *)(&vol_page->capabilities)
))
;
2736
2737 DNPRINTF(MPI_D_RAID, "%s: capabilities: 0x08%x\n", DEVNAME(sc),
2738 letoh32(vol_page->capabilities));
2739 DNPRINTF(MPI_D_RAID, "%s: active_vols: %d max_vols: %d "
2740 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
2741 vol_page->active_vols, vol_page->max_vols,
2742 vol_page->active_physdisks, vol_page->max_physdisks);
2743
2744 /* don't walk list if there are no RAID capability */
2745 if (capabilities == 0xdeadbeef) {
2746 printf("%s: deadbeef in raid configuration\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2747 goto out;
2748 }
2749
2750 if (ISSET(capabilities, MPI_CFG_IOC_2_CAPABILITIES_RAID)((capabilities) & (( (1<<0) | (1<<1) | (1<<
2))))
)
2751 sc->sc_flags |= MPI_F_RAID(1<<1);
2752
2753out:
2754 free(vol_page, M_TEMP127, pagelen);
2755}
2756
2757int
2758mpi_req_cfg_header(struct mpi_softc *sc, u_int8_t type, u_int8_t number,
2759 u_int32_t address, int flags, void *p)
2760{
2761 struct mpi_ccb *ccb;
2762 struct mpi_msg_config_request *cq;
2763 struct mpi_msg_config_reply *cp;
2764 struct mpi_cfg_hdr *hdr = p;
2765 struct mpi_ecfg_hdr *ehdr = p;
2766 int etype = 0;
2767 int rv = 0;
2768
2769 DNPRINTF(MPI_D_MISC, "%s: mpi_req_cfg_header type: %#x number: %x "
2770 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2771 address, flags, MPI_PG_FMT);
2772
2773 ccb = scsi_io_get(&sc->sc_iopool,
2774 ISSET(flags, MPI_PG_POLL)((flags) & ((1<<1))) ? SCSI_NOSLEEP0x00001 : 0);
2775 if (ccb == NULL((void *)0)) {
2776 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header ccb_get\n",
2777 DEVNAME(sc));
2778 return (1);
2779 }
2780
2781 if (ISSET(flags, MPI_PG_EXTENDED)((flags) & ((1<<0)))) {
2782 etype = type;
2783 type = MPI_CONFIG_REQ_PAGE_TYPE_EXTENDED(0x0F);
2784 }
2785
2786 cq = ccb->ccb_cmd;
2787
2788 cq->function = MPI_FUNCTION_CONFIG(0x04);
2789
2790 cq->action = MPI_CONFIG_REQ_ACTION_PAGE_HEADER(0x00);
2791
2792 cq->config_header.page_number = number;
2793 cq->config_header.page_type = type;
2794 cq->ext_page_type = etype;
2795 htolem32(&cq->page_address, address)(*(__uint32_t *)(&cq->page_address) = ((__uint32_t)(address
)))
;
2796 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24))))
2797 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL)(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24))))
;
2798
2799 ccb->ccb_done = mpi_empty_done;
2800 if (ISSET(flags, MPI_PG_POLL)((flags) & ((1<<1)))) {
2801 if (mpi_poll(sc, ccb, 50000) != 0) {
2802 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2803 DEVNAME(sc));
2804 return (1);
2805 }
2806 } else
2807 mpi_wait(sc, ccb);
2808
2809 if (ccb->ccb_rcb == NULL((void *)0))
2810 panic("%s: unable to fetch config header", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2811 cp = ccb->ccb_rcb->rcb_reply;
2812
2813 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: "
2814 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2815 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2816 "msg_flags: 0x%02x\n", DEVNAME(sc),
2817 letoh16(cp->ext_page_length), cp->ext_page_type,
2818 cp->msg_flags);
2819 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2820 letoh32(cp->msg_context));
2821 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2822 letoh16(cp->ioc_status));
2823 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2824 letoh32(cp->ioc_loginfo));
2825 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2826 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2827 cp->config_header.page_version,
2828 cp->config_header.page_length,
2829 cp->config_header.page_number,
2830 cp->config_header.page_type);
2831
2832 if (lemtoh16(&cp->ioc_status)((__uint16_t)(*(__uint16_t *)(&cp->ioc_status))) != MPI_IOCSTATUS_SUCCESS(0x0000))
2833 rv = 1;
2834 else if (ISSET(flags, MPI_PG_EXTENDED)((flags) & ((1<<0)))) {
2835 memset(ehdr, 0, sizeof(*ehdr))__builtin_memset((ehdr), (0), (sizeof(*ehdr)));
2836 ehdr->page_version = cp->config_header.page_version;
2837 ehdr->page_number = cp->config_header.page_number;
2838 ehdr->page_type = cp->config_header.page_type;
2839 ehdr->ext_page_length = cp->ext_page_length;
2840 ehdr->ext_page_type = cp->ext_page_type;
2841 } else
2842 *hdr = cp->config_header;
2843
2844 mpi_push_reply(sc, ccb->ccb_rcb);
2845 scsi_io_put(&sc->sc_iopool, ccb);
2846
2847 return (rv);
2848}
2849
2850int
2851mpi_req_cfg_page(struct mpi_softc *sc, u_int32_t address, int flags,
2852 void *p, int read, void *page, size_t len)
2853{
2854 struct mpi_ccb *ccb;
2855 struct mpi_msg_config_request *cq;
2856 struct mpi_msg_config_reply *cp;
2857 struct mpi_cfg_hdr *hdr = p;
2858 struct mpi_ecfg_hdr *ehdr = p;
2859 char *kva;
2860 int page_length;
2861 int rv = 0;
2862
2863 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page address: %d read: %d type: %x\n",
2864 DEVNAME(sc), address, read, hdr->page_type);
2865
2866 page_length = ISSET(flags, MPI_PG_EXTENDED)((flags) & ((1<<0))) ?
2867 lemtoh16(&ehdr->ext_page_length)((__uint16_t)(*(__uint16_t *)(&ehdr->ext_page_length))
)
: hdr->page_length;
2868
2869 if (len > MPI_REQUEST_SIZE512 - sizeof(struct mpi_msg_config_request) ||
2870 len < page_length * 4)
2871 return (1);
2872
2873 ccb = scsi_io_get(&sc->sc_iopool,
2874 ISSET(flags, MPI_PG_POLL)((flags) & ((1<<1))) ? SCSI_NOSLEEP0x00001 : 0);
2875 if (ccb == NULL((void *)0)) {
2876 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_page ccb_get\n", DEVNAME(sc));
2877 return (1);
2878 }
2879
2880 cq = ccb->ccb_cmd;
2881
2882 cq->function = MPI_FUNCTION_CONFIG(0x04);
2883
2884 cq->action = (read ? MPI_CONFIG_REQ_ACTION_PAGE_READ_CURRENT(0x01) :
2885 MPI_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT(0x02));
2886
2887 if (ISSET(flags, MPI_PG_EXTENDED)((flags) & ((1<<0)))) {
2888 cq->config_header.page_version = ehdr->page_version;
2889 cq->config_header.page_number = ehdr->page_number;
2890 cq->config_header.page_type = ehdr->page_type;
2891 cq->ext_page_len = ehdr->ext_page_length;
2892 cq->ext_page_type = ehdr->ext_page_type;
2893 } else
2894 cq->config_header = *hdr;
2895 cq->config_header.page_type &= MPI_CONFIG_REQ_PAGE_TYPE_MASK(0x0f);
2896 htolem32(&cq->page_address, address)(*(__uint32_t *)(&cq->page_address) = ((__uint32_t)(address
)))
;
2897 htolem32(&cq->page_buffer.sg_hdr, MPI_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (page_length * 4) | (read ? (0x0<<26) : (0x1<<
26)))))
2898 MPI_SGE_FL_LAST | MPI_SGE_FL_EOB | MPI_SGE_FL_EOL |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (page_length * 4) | (read ? (0x0<<26) : (0x1<<
26)))))
2899 (page_length * 4) |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (page_length * 4) | (read ? (0x0<<26) : (0x1<<
26)))))
2900 (read ? MPI_SGE_FL_DIR_IN : MPI_SGE_FL_DIR_OUT))(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (page_length * 4) | (read ? (0x0<<26) : (0x1<<
26)))))
;
2901
2902 /* bounce the page via the request space to avoid more bus_dma games */
2903 mpi_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2904 sizeof(struct mpi_msg_config_request));
2905
2906 kva = ccb->ccb_cmd;
2907 kva += sizeof(struct mpi_msg_config_request);
2908 if (!read)
2909 memcpy(kva, page, len)__builtin_memcpy((kva), (page), (len));
2910
2911 ccb->ccb_done = mpi_empty_done;
2912 if (ISSET(flags, MPI_PG_POLL)((flags) & ((1<<1)))) {
2913 if (mpi_poll(sc, ccb, 50000) != 0) {
2914 DNPRINTF(MPI_D_MISC, "%s: mpi_cfg_header poll\n",
2915 DEVNAME(sc));
2916 return (1);
2917 }
2918 } else
2919 mpi_wait(sc, ccb);
2920
2921 if (ccb->ccb_rcb == NULL((void *)0)) {
2922 scsi_io_put(&sc->sc_iopool, ccb);
2923 return (1);
2924 }
2925 cp = ccb->ccb_rcb->rcb_reply;
2926
2927 DNPRINTF(MPI_D_MISC, "%s: action: 0x%02x msg_length: %d function: "
2928 "0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length, cp->function);
2929 DNPRINTF(MPI_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2930 "msg_flags: 0x%02x\n", DEVNAME(sc),
2931 letoh16(cp->ext_page_length), cp->ext_page_type,
2932 cp->msg_flags);
2933 DNPRINTF(MPI_D_MISC, "%s: msg_context: 0x%08x\n", DEVNAME(sc),
2934 letoh32(cp->msg_context));
2935 DNPRINTF(MPI_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2936 letoh16(cp->ioc_status));
2937 DNPRINTF(MPI_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2938 letoh32(cp->ioc_loginfo));
2939 DNPRINTF(MPI_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2940 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2941 cp->config_header.page_version,
2942 cp->config_header.page_length,
2943 cp->config_header.page_number,
2944 cp->config_header.page_type);
2945
2946 if (lemtoh16(&cp->ioc_status)((__uint16_t)(*(__uint16_t *)(&cp->ioc_status))) != MPI_IOCSTATUS_SUCCESS(0x0000))
2947 rv = 1;
2948 else if (read)
2949 memcpy(page, kva, len)__builtin_memcpy((page), (kva), (len));
2950
2951 mpi_push_reply(sc, ccb->ccb_rcb);
2952 scsi_io_put(&sc->sc_iopool, ccb);
2953
2954 return (rv);
2955}
2956
2957int
2958mpi_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2959{
2960 struct mpi_softc *sc = link->bus->sb_adapter_softc;
2961
2962 DNPRINTF(MPI_D_IOCTL, "%s: mpi_scsi_ioctl\n", DEVNAME(sc));
2963
2964 switch (cmd) {
2965 case DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((117)))
:
2966 case DIOCSCACHE((unsigned long)0x80000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((118)))
:
2967 if (ISSET(link->flags, SDEV_VIRTUAL)((link->flags) & (0x0800))) {
2968 return (mpi_ioctl_cache(link, cmd,
2969 (struct dk_cache *)addr));
2970 }
2971 break;
2972
2973 default:
2974 if (sc->sc_ioctl)
2975 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2976
2977 break;
2978 }
2979
2980 return (ENOTTY25);
2981}
2982
2983int
2984mpi_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2985{
2986 struct mpi_softc *sc = link->bus->sb_adapter_softc;
2987 struct mpi_ccb *ccb;
2988 int len, rv;
2989 struct mpi_cfg_hdr hdr;
2990 struct mpi_cfg_raid_vol_pg0 *rpg0;
2991 int enabled;
2992 struct mpi_msg_raid_action_request *req;
2993 struct mpi_msg_raid_action_reply *rep;
2994 struct mpi_raid_settings settings;
2995
2996 rv = mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08), 0,
2997 link->target, MPI_PG_POLL(1<<1), &hdr);
2998 if (rv != 0)
2999 return (EIO5);
3000
3001 len = sizeof(*rpg0) + sc->sc_vol_page->max_physdisks *
3002 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3003 rpg0 = malloc(len, M_TEMP127, M_NOWAIT0x0002);
3004 if (rpg0 == NULL((void *)0))
3005 return (ENOMEM12);
3006
3007 if (mpi_req_cfg_page(sc, link->target, MPI_PG_POLL(1<<1), &hdr, 1,
3008 rpg0, len) != 0) {
3009 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3010 DEVNAME(sc));
3011 rv = EIO5;
3012 goto done;
3013 }
3014
3015 enabled = ISSET(lemtoh16(&rpg0->settings.volume_settings),((((__uint16_t)(*(__uint16_t *)(&rpg0->settings.volume_settings
)))) & ((1<<0)))
3016 MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN)((((__uint16_t)(*(__uint16_t *)(&rpg0->settings.volume_settings
)))) & ((1<<0)))
? 1 : 0;
3017
3018 if (cmd == DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((117)))
) {
3019 dc->wrcache = enabled;
3020 dc->rdcache = 0;
3021 goto done;
3022 } /* else DIOCSCACHE */
3023
3024 if (dc->rdcache) {
3025 rv = EOPNOTSUPP45;
3026 goto done;
3027 }
3028
3029 if (((dc->wrcache) ? 1 : 0) == enabled)
3030 goto done;
3031
3032 settings = rpg0->settings;
3033 if (dc->wrcache) {
3034 SET(settings.volume_settings,((settings.volume_settings) |= (((__uint16_t)((1<<0))))
)
3035 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN))((settings.volume_settings) |= (((__uint16_t)((1<<0))))
)
;
3036 } else {
3037 CLR(settings.volume_settings,((settings.volume_settings) &= ~(((__uint16_t)((1<<
0)))))
3038 htole16(MPI_CFG_RAID_VOL_0_SETTINGS_WRITE_CACHE_EN))((settings.volume_settings) &= ~(((__uint16_t)((1<<
0)))))
;
3039 }
3040
3041 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
3042 if (ccb == NULL((void *)0)) {
3043 rv = ENOMEM12;
3044 goto done;
3045 }
3046
3047 req = ccb->ccb_cmd;
3048 req->function = MPI_FUNCTION_RAID_ACTION(0x15);
3049 req->action = MPI_MSG_RAID_ACTION_CH_VOL_SETTINGS(0x08);
3050 req->vol_id = rpg0->volume_id;
3051 req->vol_bus = rpg0->volume_bus;
3052
3053 memcpy(&req->data_word, &settings, sizeof(req->data_word))__builtin_memcpy((&req->data_word), (&settings), (
sizeof(req->data_word)))
;
3054 ccb->ccb_done = mpi_empty_done;
3055 if (mpi_poll(sc, ccb, 50000) != 0) {
3056 rv = EIO5;
3057 goto done;
3058 }
3059
3060 rep = (struct mpi_msg_raid_action_reply *)ccb->ccb_rcb;
3061 if (rep == NULL((void *)0))
3062 panic("%s: raid volume settings change failed", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3063
3064 switch (lemtoh16(&rep->action_status)((__uint16_t)(*(__uint16_t *)(&rep->action_status)))) {
3065 case MPI_RAID_ACTION_STATUS_OK(0x0000):
3066 rv = 0;
3067 break;
3068 default:
3069 rv = EIO5;
3070 break;
3071 }
3072
3073 mpi_push_reply(sc, ccb->ccb_rcb);
3074 scsi_io_put(&sc->sc_iopool, ccb);
3075
3076done:
3077 free(rpg0, M_TEMP127, len);
3078 return (rv);
3079}
3080
3081#if NBIO1 > 0
3082int
3083mpi_bio_get_pg0_raid(struct mpi_softc *sc, int id)
3084{
3085 int len, rv = EINVAL22;
3086 u_int32_t address;
3087 struct mpi_cfg_hdr hdr;
3088 struct mpi_cfg_raid_vol_pg0 *rpg0;
3089
3090 /* get IOC page 2 */
3091 if (mpi_req_cfg_page(sc, 0, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,
3092 sc->sc_cfg_hdr.page_length * 4) != 0) {
3093 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid unable to "
3094 "fetch IOC page 2\n", DEVNAME(sc));
3095 goto done;
3096 }
3097
3098 /* XXX return something else than EINVAL to indicate within hs range */
3099 if (id > sc->sc_vol_page->active_vols) {
3100 DNPRINTF(MPI_D_IOCTL, "%s: mpi_bio_get_pg0_raid invalid vol "
3101 "id: %d\n", DEVNAME(sc), id);
3102 goto done;
3103 }
3104
3105 /* replace current buffer with new one */
3106 len = sizeof *rpg0 + sc->sc_vol_page->max_physdisks *
3107 sizeof(struct mpi_cfg_raid_vol_pg0_physdisk);
3108 rpg0 = malloc(len, M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004);
3109 if (rpg0 == NULL((void *)0)) {
3110 printf("%s: can't get memory for RAID page 0, "
3111 "bio disabled\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3112 goto done;
3113 }
3114 if (sc->sc_rpg0)
3115 free(sc->sc_rpg0, M_DEVBUF2, 0);
3116 sc->sc_rpg0 = rpg0;
3117
3118 /* get raid vol page 0 */
3119 address = sc->sc_vol_list[id].vol_id |
3120 (sc->sc_vol_list[id].vol_bus << 8);
3121 if (mpi_req_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08), 0,
3122 address, 0, &hdr) != 0)
3123 goto done;
3124 if (mpi_req_cfg_page(sc, address, 0, &hdr, 1, rpg0, len)) {
3125 DNPRINTF(MPI_D_RAID, "%s: can't get RAID vol cfg page 0\n",
3126 DEVNAME(sc));
3127 goto done;
3128 }
3129
3130 rv = 0;
3131done:
3132 return (rv);
3133}
3134
3135int
3136mpi_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3137{
3138 struct mpi_softc *sc = (struct mpi_softc *)dev;
3139 int error = 0;
3140
3141 DNPRINTF(MPI_D_IOCTL, "%s: mpi_ioctl ", DEVNAME(sc));
3142
3143 /* make sure we have bio enabled */
3144 if (sc->sc_ioctl != mpi_ioctl)
3145 return (EINVAL22);
3146
3147 rw_enter_write(&sc->sc_lock);
3148
3149 switch (cmd) {
3150 case BIOCINQ(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_inq) & 0x1fff) << 16) | ((('B')) <<
8) | ((32)))
:
3151 DNPRINTF(MPI_D_IOCTL, "inq\n");
3152 error = mpi_ioctl_inq(sc, (struct bioc_inq *)addr);
3153 break;
3154
3155 case BIOCVOL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_vol) & 0x1fff) << 16) | ((('B')) <<
8) | ((34)))
:
3156 DNPRINTF(MPI_D_IOCTL, "vol\n");
3157 error = mpi_ioctl_vol(sc, (struct bioc_vol *)addr);
3158 break;
3159
3160 case BIOCDISK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_disk) & 0x1fff) << 16) | ((('B')) <<
8) | ((33)))
:
3161 DNPRINTF(MPI_D_IOCTL, "disk\n");
3162 error = mpi_ioctl_disk(sc, (struct bioc_disk *)addr);
3163 break;
3164
3165 case BIOCALARM(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_alarm) & 0x1fff) << 16) | ((('B')) <<
8) | ((35)))
:
3166 DNPRINTF(MPI_D_IOCTL, "alarm\n");
3167 break;
3168
3169 case BIOCBLINK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_blink) & 0x1fff) << 16) | ((('B')) <<
8) | ((36)))
:
3170 DNPRINTF(MPI_D_IOCTL, "blink\n");
3171 break;
3172
3173 case BIOCSETSTATE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_setstate) & 0x1fff) << 16) | ((('B')) <<
8) | ((37)))
:
3174 DNPRINTF(MPI_D_IOCTL, "setstate\n");
3175 error = mpi_ioctl_setstate(sc, (struct bioc_setstate *)addr);
3176 break;
3177
3178 default:
3179 DNPRINTF(MPI_D_IOCTL, " invalid ioctl\n");
3180 error = ENOTTY25;
3181 }
3182
3183 rw_exit_write(&sc->sc_lock);
3184
3185 return (error);
3186}
3187
3188int
3189mpi_ioctl_inq(struct mpi_softc *sc, struct bioc_inq *bi)
3190{
3191 if (!(sc->sc_flags & MPI_F_RAID(1<<1))) {
3192 bi->bi_novol = 0;
3193 bi->bi_nodisk = 0;
3194 }
3195
3196 if (mpi_cfg_page(sc, 0, &sc->sc_cfg_hdr, 1, sc->sc_vol_page,mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
3197 sc->sc_cfg_hdr.page_length * 4)mpi_req_cfg_page((sc), (0), (1<<1), (&sc->sc_cfg_hdr
), (1), (sc->sc_vol_page), (sc->sc_cfg_hdr.page_length *
4))
!= 0) {
3198 DNPRINTF(MPI_D_IOCTL, "%s: mpi_get_raid unable to fetch IOC "
3199 "page 2\n", DEVNAME(sc));
3200 return (EINVAL22);
3201 }
3202
3203 DNPRINTF(MPI_D_IOCTL, "%s: active_vols: %d max_vols: %d "
3204 "active_physdisks: %d max_physdisks: %d\n", DEVNAME(sc),
3205 sc->sc_vol_page->active_vols, sc->sc_vol_page->max_vols,
3206 sc->sc_vol_page->active_physdisks, sc->sc_vol_page->max_physdisks);
3207
3208 bi->bi_novol = sc->sc_vol_page->active_vols;
3209 bi->bi_nodisk = sc->sc_vol_page->active_physdisks;
3210 strlcpy(bi->bi_dev, DEVNAME(sc)((sc)->sc_dev.dv_xname), sizeof(bi->bi_dev));
3211
3212 return (0);
3213}
3214
3215int
3216mpi_ioctl_vol(struct mpi_softc *sc, struct bioc_vol *bv)
3217{
3218 int i, vol, id, rv = EINVAL22;
3219 struct device *dev;
3220 struct scsi_link *link;
3221 struct mpi_cfg_raid_vol_pg0 *rpg0;
3222 char *vendp;
3223
3224 id = bv->bv_volid;
3225 if (mpi_bio_get_pg0_raid(sc, id))
3226 goto done;
3227
3228 if (id > sc->sc_vol_page->active_vols)
3229 return (EINVAL22); /* XXX deal with hot spares */
3230
3231 rpg0 = sc->sc_rpg0;
3232 if (rpg0 == NULL((void *)0))
3233 goto done;
3234
3235 /* determine status */
3236 switch (rpg0->volume_state) {
3237 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL(0x00):
3238 bv->bv_status = BIOC_SVONLINE0x00;
3239 break;
3240 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED(0x01):
3241 bv->bv_status = BIOC_SVDEGRADED0x02;
3242 break;
3243 case MPI_CFG_RAID_VOL_0_STATE_FAILED(0x02):
3244 case MPI_CFG_RAID_VOL_0_STATE_MISSING(0x03):
3245 bv->bv_status = BIOC_SVOFFLINE0x01;
3246 break;
3247 default:
3248 bv->bv_status = BIOC_SVINVALID0xff;
3249 }
3250
3251 /* override status if scrubbing or something */
3252 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING(1<<2))
3253 bv->bv_status = BIOC_SVREBUILD0x05;
3254
3255 bv->bv_size = (uint64_t)lemtoh32(&rpg0->max_lba)((__uint32_t)(*(__uint32_t *)(&rpg0->max_lba))) * 512;
3256
3257 switch (sc->sc_vol_list[id].vol_type) {
3258 case MPI_CFG_RAID_TYPE_RAID_IS(0x00):
3259 bv->bv_level = 0;
3260 break;
3261 case MPI_CFG_RAID_TYPE_RAID_IME(0x01):
3262 case MPI_CFG_RAID_TYPE_RAID_IM(0x02):
3263 bv->bv_level = 1;
3264 break;
3265 case MPI_CFG_RAID_TYPE_RAID_5(0x03):
3266 bv->bv_level = 5;
3267 break;
3268 case MPI_CFG_RAID_TYPE_RAID_6(0x04):
3269 bv->bv_level = 6;
3270 break;
3271 case MPI_CFG_RAID_TYPE_RAID_10(0x05):
3272 bv->bv_level = 10;
3273 break;
3274 case MPI_CFG_RAID_TYPE_RAID_50(0x06):
3275 bv->bv_level = 50;
3276 break;
3277 default:
3278 bv->bv_level = -1;
3279 }
3280
3281 bv->bv_nodisk = rpg0->num_phys_disks;
3282
3283 for (i = 0, vol = -1; i < sc->sc_buswidth; i++) {
3284 link = scsi_get_link(sc->sc_scsibus, i, 0);
3285 if (link == NULL((void *)0))
3286 continue;
3287
3288 /* skip if not a virtual disk */
3289 if (!(link->flags & SDEV_VIRTUAL0x0800))
3290 continue;
3291
3292 vol++;
3293 /* are we it? */
3294 if (vol == bv->bv_volid) {
3295 dev = link->device_softc;
3296 vendp = link->inqdata.vendor;
3297 memcpy(bv->bv_vendor, vendp, sizeof bv->bv_vendor)__builtin_memcpy((bv->bv_vendor), (vendp), (sizeof bv->
bv_vendor))
;
3298 bv->bv_vendor[sizeof(bv->bv_vendor) - 1] = '\0';
3299 strlcpy(bv->bv_dev, dev->dv_xname, sizeof bv->bv_dev);
3300 break;
3301 }
3302 }
3303 rv = 0;
3304done:
3305 return (rv);
3306}
3307
3308int
3309mpi_ioctl_disk(struct mpi_softc *sc, struct bioc_disk *bd)
3310{
3311 int pdid, id, rv = EINVAL22;
3312 u_int32_t address;
3313 struct mpi_cfg_hdr hdr;
3314 struct mpi_cfg_raid_vol_pg0 *rpg0;
3315 struct mpi_cfg_raid_vol_pg0_physdisk *physdisk;
3316 struct mpi_cfg_raid_physdisk_pg0 pdpg0;
3317
3318 id = bd->bd_volid;
3319 if (mpi_bio_get_pg0_raid(sc, id))
3320 goto done;
3321
3322 if (id > sc->sc_vol_page->active_vols)
3323 return (EINVAL22); /* XXX deal with hot spares */
3324
3325 rpg0 = sc->sc_rpg0;
3326 if (rpg0 == NULL((void *)0))
3327 goto done;
3328
3329 pdid = bd->bd_diskid;
3330 if (pdid > rpg0->num_phys_disks)
3331 goto done;
3332 physdisk = (struct mpi_cfg_raid_vol_pg0_physdisk *)(rpg0 + 1);
3333 physdisk += pdid;
3334
3335 /* get raid phys disk page 0 */
3336 address = physdisk->phys_disk_num;
3337 if (mpi_cfg_header(sc, MPI_CONFIG_REQ_PAGE_TYPE_RAID_PD, 0, address,mpi_req_cfg_header((sc), ((0x0A)), (0), (address), (1<<
1), (&hdr))
3338 &hdr)mpi_req_cfg_header((sc), ((0x0A)), (0), (address), (1<<
1), (&hdr))
!= 0)
3339 goto done;
3340 if (mpi_cfg_page(sc, address, &hdr, 1, &pdpg0, sizeof pdpg0)mpi_req_cfg_page((sc), (address), (1<<1), (&hdr), (
1), (&pdpg0), (sizeof pdpg0))
) {
3341 bd->bd_status = BIOC_SDFAILED0x02;
3342 return (0);
3343 }
3344 bd->bd_channel = pdpg0.phys_disk_bus;
3345 bd->bd_target = pdpg0.phys_disk_id;
3346 bd->bd_lun = 0;
3347 bd->bd_size = (uint64_t)lemtoh32(&pdpg0.max_lba)((__uint32_t)(*(__uint32_t *)(&pdpg0.max_lba))) * 512;
3348 strlcpy(bd->bd_vendor, (char *)pdpg0.vendor_id, sizeof(bd->bd_vendor));
3349
3350 switch (pdpg0.phys_disk_state) {
3351 case MPI_CFG_RAID_PHYDISK_0_STATE_ONLINE(0x00):
3352 bd->bd_status = BIOC_SDONLINE0x00;
3353 break;
3354 case MPI_CFG_RAID_PHYDISK_0_STATE_MISSING(0x01):
3355 case MPI_CFG_RAID_PHYDISK_0_STATE_FAILED(0x03):
3356 bd->bd_status = BIOC_SDFAILED0x02;
3357 break;
3358 case MPI_CFG_RAID_PHYDISK_0_STATE_HOSTFAIL(0x06):
3359 case MPI_CFG_RAID_PHYDISK_0_STATE_OTHER(0xff):
3360 case MPI_CFG_RAID_PHYDISK_0_STATE_OFFLINE(0x05):
3361 bd->bd_status = BIOC_SDOFFLINE0x01;
3362 break;
3363 case MPI_CFG_RAID_PHYDISK_0_STATE_INIT(0x04):
3364 bd->bd_status = BIOC_SDSCRUB0x06;
3365 break;
3366 case MPI_CFG_RAID_PHYDISK_0_STATE_INCOMPAT(0x02):
3367 default:
3368 bd->bd_status = BIOC_SDINVALID0xff;
3369 break;
3370 }
3371
3372 /* XXX figure this out */
3373 /* bd_serial[32]; */
3374 /* bd_procdev[16]; */
3375
3376 rv = 0;
3377done:
3378 return (rv);
3379}
3380
3381int
3382mpi_ioctl_setstate(struct mpi_softc *sc, struct bioc_setstate *bs)
3383{
3384 return (ENOTTY25);
3385}
3386
3387#ifndef SMALL_KERNEL
3388int
3389mpi_create_sensors(struct mpi_softc *sc)
3390{
3391 struct device *dev;
3392 struct scsi_link *link;
3393 int i, vol, nsensors;
3394
3395 /* count volumes */
3396 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3397 link = scsi_get_link(sc->sc_scsibus, i, 0);
3398 if (link == NULL((void *)0))
3399 continue;
3400 /* skip if not a virtual disk */
3401 if (!(link->flags & SDEV_VIRTUAL0x0800))
3402 continue;
3403
3404 vol++;
3405 }
3406 if (vol == 0)
3407 return (0);
3408
3409 sc->sc_sensors = mallocarray(vol, sizeof(struct ksensor),
3410 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
3411 if (sc->sc_sensors == NULL((void *)0))
3412 return (1);
3413 nsensors = vol;
3414
3415 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc)((sc)->sc_dev.dv_xname),
3416 sizeof(sc->sc_sensordev.xname));
3417
3418 for (i = 0, vol= 0; i < sc->sc_buswidth; i++) {
3419 link = scsi_get_link(sc->sc_scsibus, i, 0);
3420 if (link == NULL((void *)0))
3421 continue;
3422 /* skip if not a virtual disk */
3423 if (!(link->flags & SDEV_VIRTUAL0x0800))
3424 continue;
3425
3426 dev = link->device_softc;
3427 strlcpy(sc->sc_sensors[vol].desc, dev->dv_xname,
3428 sizeof(sc->sc_sensors[vol].desc));
3429 sc->sc_sensors[vol].type = SENSOR_DRIVE;
3430 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3431 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[vol]);
3432
3433 vol++;
3434 }
3435
3436 if (sensor_task_register(sc, mpi_refresh_sensors, 10) == NULL((void *)0))
3437 goto bad;
3438
3439 sensordev_install(&sc->sc_sensordev);
3440
3441 return (0);
3442
3443bad:
3444 free(sc->sc_sensors, M_DEVBUF2, nsensors * sizeof(struct ksensor));
3445 return (1);
3446}
3447
3448void
3449mpi_refresh_sensors(void *arg)
3450{
3451 int i, vol;
3452 struct scsi_link *link;
3453 struct mpi_softc *sc = arg;
3454 struct mpi_cfg_raid_vol_pg0 *rpg0;
3455
3456 rw_enter_write(&sc->sc_lock);
3457
3458 for (i = 0, vol = 0; i < sc->sc_buswidth; i++) {
3459 link = scsi_get_link(sc->sc_scsibus, i, 0);
3460 if (link == NULL((void *)0))
3461 continue;
3462 /* skip if not a virtual disk */
3463 if (!(link->flags & SDEV_VIRTUAL0x0800))
3464 continue;
3465
3466 if (mpi_bio_get_pg0_raid(sc, vol))
3467 continue;
3468
3469 rpg0 = sc->sc_rpg0;
3470 if (rpg0 == NULL((void *)0))
3471 goto done;
3472
3473 /* determine status */
3474 switch (rpg0->volume_state) {
3475 case MPI_CFG_RAID_VOL_0_STATE_OPTIMAL(0x00):
3476 sc->sc_sensors[vol].value = SENSOR_DRIVE_ONLINE4;
3477 sc->sc_sensors[vol].status = SENSOR_S_OK;
3478 break;
3479 case MPI_CFG_RAID_VOL_0_STATE_DEGRADED(0x01):
3480 sc->sc_sensors[vol].value = SENSOR_DRIVE_PFAIL10;
3481 sc->sc_sensors[vol].status = SENSOR_S_WARN;
3482 break;
3483 case MPI_CFG_RAID_VOL_0_STATE_FAILED(0x02):
3484 case MPI_CFG_RAID_VOL_0_STATE_MISSING(0x03):
3485 sc->sc_sensors[vol].value = SENSOR_DRIVE_FAIL9;
3486 sc->sc_sensors[vol].status = SENSOR_S_CRIT;
3487 break;
3488 default:
3489 sc->sc_sensors[vol].value = 0; /* unknown */
3490 sc->sc_sensors[vol].status = SENSOR_S_UNKNOWN;
3491 }
3492
3493 /* override status if scrubbing or something */
3494 if (rpg0->volume_status & MPI_CFG_RAID_VOL_0_STATUS_RESYNCING(1<<2)) {
3495 sc->sc_sensors[vol].value = SENSOR_DRIVE_REBUILD7;
3496 sc->sc_sensors[vol].status = SENSOR_S_WARN;
3497 }
3498
3499 vol++;
3500 }
3501done:
3502 rw_exit_write(&sc->sc_lock);
3503}
3504#endif /* SMALL_KERNEL */
3505#endif /* NBIO > 0 */