Bug Summary

File:dev/pci/mpii.c
Warning:line 807, column 16
Access to field 'sg_flags' results in a dereference of an undefined pointer value (loaded from variable 'sge')

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name mpii.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/mpii.c
1/* $OpenBSD: mpii.c,v 1.141 2022/01/09 05:42:56 jsg Exp $ */
2/*
3 * Copyright (c) 2010, 2012 Mike Belopuhov
4 * Copyright (c) 2009 James Giannoules
5 * Copyright (c) 2005 - 2010 David Gwynne <dlg@openbsd.org>
6 * Copyright (c) 2005 - 2010 Marco Peereboom <marco@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include "bio.h"
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/device.h>
26#include <sys/ioctl.h>
27#include <sys/malloc.h>
28#include <sys/kernel.h>
29#include <sys/rwlock.h>
30#include <sys/sensors.h>
31#include <sys/dkio.h>
32#include <sys/tree.h>
33#include <sys/task.h>
34
35#include <machine/bus.h>
36
37#include <dev/pci/pcireg.h>
38#include <dev/pci/pcivar.h>
39#include <dev/pci/pcidevs.h>
40
41#include <scsi/scsi_all.h>
42#include <scsi/scsiconf.h>
43
44#include <dev/biovar.h>
45
46#include <dev/pci/mpiireg.h>
47
48/* #define MPII_DEBUG */
49#ifdef MPII_DEBUG
50#define DPRINTF(x...) do { if (mpii_debug) printf(x); } while(0)
51#define DNPRINTF(n,x...) do { if (mpii_debug & (n)) printf(x); } while(0)
52#define MPII_D_CMD (0x0001)
53#define MPII_D_INTR (0x0002)
54#define MPII_D_MISC (0x0004)
55#define MPII_D_DMA (0x0008)
56#define MPII_D_IOCTL (0x0010)
57#define MPII_D_RW (0x0020)
58#define MPII_D_MEM (0x0040)
59#define MPII_D_CCB (0x0080)
60#define MPII_D_PPR (0x0100)
61#define MPII_D_RAID (0x0200)
62#define MPII_D_EVT (0x0400)
63#define MPII_D_CFG (0x0800)
64#define MPII_D_MAP (0x1000)
65
66u_int32_t mpii_debug = 0
67 | MPII_D_CMD
68 | MPII_D_INTR
69 | MPII_D_MISC
70 | MPII_D_DMA
71 | MPII_D_IOCTL
72 | MPII_D_RW
73 | MPII_D_MEM
74 | MPII_D_CCB
75 | MPII_D_PPR
76 | MPII_D_RAID
77 | MPII_D_EVT
78 | MPII_D_CFG
79 | MPII_D_MAP
80 ;
81#else
82#define DPRINTF(x...)
83#define DNPRINTF(n,x...)
84#endif
85
86#define MPII_REQUEST_SIZE(512) (512)
87#define MPII_REQUEST_CREDIT(128) (128)
88
89struct mpii_dmamem {
90 bus_dmamap_t mdm_map;
91 bus_dma_segment_t mdm_seg;
92 size_t mdm_size;
93 caddr_t mdm_kva;
94};
95#define MPII_DMA_MAP(_mdm)((_mdm)->mdm_map) ((_mdm)->mdm_map)
96#define MPII_DMA_DVA(_mdm)((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
97#define MPII_DMA_KVA(_mdm)((void *)(_mdm)->mdm_kva) ((void *)(_mdm)->mdm_kva)
98
99struct mpii_softc;
100
101struct mpii_rcb {
102 SIMPLEQ_ENTRY(mpii_rcb)struct { struct mpii_rcb *sqe_next; } rcb_link;
103 void *rcb_reply;
104 u_int32_t rcb_reply_dva;
105};
106
107SIMPLEQ_HEAD(mpii_rcb_list, mpii_rcb)struct mpii_rcb_list { struct mpii_rcb *sqh_first; struct mpii_rcb
**sqh_last; }
;
108
109struct mpii_device {
110 int flags;
111#define MPII_DF_ATTACH(0x0001) (0x0001)
112#define MPII_DF_DETACH(0x0002) (0x0002)
113#define MPII_DF_HIDDEN(0x0004) (0x0004)
114#define MPII_DF_UNUSED(0x0008) (0x0008)
115#define MPII_DF_VOLUME(0x0010) (0x0010)
116#define MPII_DF_VOLUME_DISK(0x0020) (0x0020)
117#define MPII_DF_HOT_SPARE(0x0040) (0x0040)
118 short slot;
119 short percent;
120 u_int16_t dev_handle;
121 u_int16_t enclosure;
122 u_int16_t expander;
123 u_int8_t phy_num;
124 u_int8_t physical_port;
125};
126
127struct mpii_ccb {
128 struct mpii_softc *ccb_sc;
129
130 void * ccb_cookie;
131 bus_dmamap_t ccb_dmamap;
132
133 bus_addr_t ccb_offset;
134 void *ccb_cmd;
135 bus_addr_t ccb_cmd_dva;
136 u_int16_t ccb_dev_handle;
137 u_int16_t ccb_smid;
138
139 volatile enum {
140 MPII_CCB_FREE,
141 MPII_CCB_READY,
142 MPII_CCB_QUEUED,
143 MPII_CCB_TIMEOUT
144 } ccb_state;
145
146 void (*ccb_done)(struct mpii_ccb *);
147 struct mpii_rcb *ccb_rcb;
148
149 SIMPLEQ_ENTRY(mpii_ccb)struct { struct mpii_ccb *sqe_next; } ccb_link;
150};
151
152SIMPLEQ_HEAD(mpii_ccb_list, mpii_ccb)struct mpii_ccb_list { struct mpii_ccb *sqh_first; struct mpii_ccb
**sqh_last; }
;
153
154struct mpii_softc {
155 struct device sc_dev;
156
157 pci_chipset_tag_t sc_pc;
158 pcitag_t sc_tag;
159
160 void *sc_ih;
161
162 int sc_flags;
163#define MPII_F_RAID(1<<1) (1<<1)
164#define MPII_F_SAS3(1<<2) (1<<2)
165
166 struct scsibus_softc *sc_scsibus;
167 unsigned int sc_pending;
168
169 struct mpii_device **sc_devs;
170
171 bus_space_tag_t sc_iot;
172 bus_space_handle_t sc_ioh;
173 bus_size_t sc_ios;
174 bus_dma_tag_t sc_dmat;
175
176 struct mutex sc_req_mtx;
177 struct mutex sc_rep_mtx;
178
179 ushort sc_reply_size;
180 ushort sc_request_size;
181
182 ushort sc_max_cmds;
183 ushort sc_num_reply_frames;
184 u_int sc_reply_free_qdepth;
185 u_int sc_reply_post_qdepth;
186
187 ushort sc_chain_sge;
188 ushort sc_max_sgl;
189 int sc_max_chain;
190
191 u_int8_t sc_ioc_event_replay;
192
193 u_int8_t sc_porttype;
194 u_int8_t sc_max_volumes;
195 u_int16_t sc_max_devices;
196 u_int16_t sc_vd_count;
197 u_int16_t sc_vd_id_low;
198 u_int16_t sc_pd_id_start;
199 int sc_ioc_number;
200 u_int8_t sc_vf_id;
201
202 struct mpii_ccb *sc_ccbs;
203 struct mpii_ccb_list sc_ccb_free;
204 struct mutex sc_ccb_free_mtx;
205
206 struct mutex sc_ccb_mtx;
207 /*
208 * this protects the ccb state and list entry
209 * between mpii_scsi_cmd and scsidone.
210 */
211
212 struct mpii_ccb_list sc_ccb_tmos;
213 struct scsi_iohandler sc_ccb_tmo_handler;
214
215 struct scsi_iopool sc_iopool;
216
217 struct mpii_dmamem *sc_requests;
218
219 struct mpii_dmamem *sc_replies;
220 struct mpii_rcb *sc_rcbs;
221
222 struct mpii_dmamem *sc_reply_postq;
223 struct mpii_reply_descr *sc_reply_postq_kva;
224 u_int sc_reply_post_host_index;
225
226 struct mpii_dmamem *sc_reply_freeq;
227 u_int sc_reply_free_host_index;
228
229 struct mpii_rcb_list sc_evt_sas_queue;
230 struct mutex sc_evt_sas_mtx;
231 struct task sc_evt_sas_task;
232
233 struct mpii_rcb_list sc_evt_ack_queue;
234 struct mutex sc_evt_ack_mtx;
235 struct scsi_iohandler sc_evt_ack_handler;
236
237 /* scsi ioctl from sd device */
238 int (*sc_ioctl)(struct device *, u_long, caddr_t);
239
240 int sc_nsensors;
241 struct ksensor *sc_sensors;
242 struct ksensordev sc_sensordev;
243};
244
245int mpii_match(struct device *, void *, void *);
246void mpii_attach(struct device *, struct device *, void *);
247int mpii_detach(struct device *, int);
248
249int mpii_intr(void *);
250
251struct cfattach mpii_ca = {
252 sizeof(struct mpii_softc),
253 mpii_match,
254 mpii_attach,
255 mpii_detach
256};
257
258struct cfdriver mpii_cd = {
259 NULL((void *)0),
260 "mpii",
261 DV_DULL
262};
263
264void mpii_scsi_cmd(struct scsi_xfer *);
265void mpii_scsi_cmd_done(struct mpii_ccb *);
266int mpii_scsi_probe(struct scsi_link *);
267int mpii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
268
269struct scsi_adapter mpii_switch = {
270 mpii_scsi_cmd, NULL((void *)0), mpii_scsi_probe, NULL((void *)0), mpii_scsi_ioctl
271};
272
273struct mpii_dmamem *
274 mpii_dmamem_alloc(struct mpii_softc *, size_t);
275void mpii_dmamem_free(struct mpii_softc *,
276 struct mpii_dmamem *);
277int mpii_alloc_ccbs(struct mpii_softc *);
278void * mpii_get_ccb(void *);
279void mpii_put_ccb(void *, void *);
280int mpii_alloc_replies(struct mpii_softc *);
281int mpii_alloc_queues(struct mpii_softc *);
282void mpii_push_reply(struct mpii_softc *, struct mpii_rcb *);
283void mpii_push_replies(struct mpii_softc *);
284
285void mpii_scsi_cmd_tmo(void *);
286void mpii_scsi_cmd_tmo_handler(void *, void *);
287void mpii_scsi_cmd_tmo_done(struct mpii_ccb *);
288
289int mpii_insert_dev(struct mpii_softc *, struct mpii_device *);
290int mpii_remove_dev(struct mpii_softc *, struct mpii_device *);
291struct mpii_device *
292 mpii_find_dev(struct mpii_softc *, u_int16_t);
293
294void mpii_start(struct mpii_softc *, struct mpii_ccb *);
295int mpii_poll(struct mpii_softc *, struct mpii_ccb *);
296void mpii_poll_done(struct mpii_ccb *);
297struct mpii_rcb *
298 mpii_reply(struct mpii_softc *, struct mpii_reply_descr *);
299
300void mpii_wait(struct mpii_softc *, struct mpii_ccb *);
301void mpii_wait_done(struct mpii_ccb *);
302
303void mpii_init_queues(struct mpii_softc *);
304
305int mpii_load_xs(struct mpii_ccb *);
306int mpii_load_xs_sas3(struct mpii_ccb *);
307
308u_int32_t mpii_read(struct mpii_softc *, bus_size_t);
309void mpii_write(struct mpii_softc *, bus_size_t, u_int32_t);
310int mpii_wait_eq(struct mpii_softc *, bus_size_t, u_int32_t,
311 u_int32_t);
312int mpii_wait_ne(struct mpii_softc *, bus_size_t, u_int32_t,
313 u_int32_t);
314
315int mpii_init(struct mpii_softc *);
316int mpii_reset_soft(struct mpii_softc *);
317int mpii_reset_hard(struct mpii_softc *);
318
319int mpii_handshake_send(struct mpii_softc *, void *, size_t);
320int mpii_handshake_recv_dword(struct mpii_softc *,
321 u_int32_t *);
322int mpii_handshake_recv(struct mpii_softc *, void *, size_t);
323
324void mpii_empty_done(struct mpii_ccb *);
325
326int mpii_iocinit(struct mpii_softc *);
327int mpii_iocfacts(struct mpii_softc *);
328int mpii_portfacts(struct mpii_softc *);
329int mpii_portenable(struct mpii_softc *);
330int mpii_cfg_coalescing(struct mpii_softc *);
331int mpii_board_info(struct mpii_softc *);
332int mpii_target_map(struct mpii_softc *);
333
334int mpii_eventnotify(struct mpii_softc *);
335void mpii_eventnotify_done(struct mpii_ccb *);
336void mpii_eventack(void *, void *);
337void mpii_eventack_done(struct mpii_ccb *);
338void mpii_event_process(struct mpii_softc *, struct mpii_rcb *);
339void mpii_event_done(struct mpii_softc *, struct mpii_rcb *);
340void mpii_event_sas(void *);
341void mpii_event_raid(struct mpii_softc *,
342 struct mpii_msg_event_reply *);
343void mpii_event_discovery(struct mpii_softc *,
344 struct mpii_msg_event_reply *);
345
346void mpii_sas_remove_device(struct mpii_softc *, u_int16_t);
347
348int mpii_req_cfg_header(struct mpii_softc *, u_int8_t,
349 u_int8_t, u_int32_t, int, void *);
350int mpii_req_cfg_page(struct mpii_softc *, u_int32_t, int,
351 void *, int, void *, size_t);
352
353int mpii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
354
355#if NBIO1 > 0
356int mpii_ioctl(struct device *, u_long, caddr_t);
357int mpii_ioctl_inq(struct mpii_softc *, struct bioc_inq *);
358int mpii_ioctl_vol(struct mpii_softc *, struct bioc_vol *);
359int mpii_ioctl_disk(struct mpii_softc *, struct bioc_disk *);
360int mpii_bio_hs(struct mpii_softc *, struct bioc_disk *, int,
361 int, int *);
362int mpii_bio_disk(struct mpii_softc *, struct bioc_disk *,
363 u_int8_t);
364struct mpii_device *
365 mpii_find_vol(struct mpii_softc *, int);
366#ifndef SMALL_KERNEL
367 int mpii_bio_volstate(struct mpii_softc *, struct bioc_vol *);
368int mpii_create_sensors(struct mpii_softc *);
369void mpii_refresh_sensors(void *);
370#endif /* SMALL_KERNEL */
371#endif /* NBIO > 0 */
372
373#define DEVNAME(s)((s)->sc_dev.dv_xname) ((s)->sc_dev.dv_xname)
374
375#define dwordsof(s)(sizeof(s) / sizeof(u_int32_t)) (sizeof(s) / sizeof(u_int32_t))
376
377#define mpii_read_db(s)mpii_read((s), (0x00)) mpii_read((s), MPII_DOORBELL(0x00))
378#define mpii_write_db(s, v)mpii_write((s), (0x00), (v)) mpii_write((s), MPII_DOORBELL(0x00), (v))
379#define mpii_read_intr(s)mpii_read((s), (0x30)) mpii_read((s), MPII_INTR_STATUS(0x30))
380#define mpii_write_intr(s, v)mpii_write((s), (0x30), (v)) mpii_write((s), MPII_INTR_STATUS(0x30), (v))
381#define mpii_reply_waiting(s)((mpii_read(((s)), (0x30)) & (1<<3)) == (1<<3
))
((mpii_read_intr((s))mpii_read(((s)), (0x30)) & MPII_INTR_STATUS_REPLY(1<<3))\
382 == MPII_INTR_STATUS_REPLY(1<<3))
383
384#define mpii_write_reply_free(s, v)(((s)->sc_iot)->write_4(((s)->sc_ioh), ((0x48)), ((v
))))
\
385 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \(((s)->sc_iot)->write_4(((s)->sc_ioh), ((0x48)), ((v
))))
386 MPII_REPLY_FREE_HOST_INDEX, (v))(((s)->sc_iot)->write_4(((s)->sc_ioh), ((0x48)), ((v
))))
387#define mpii_write_reply_post(s, v)(((s)->sc_iot)->write_4(((s)->sc_ioh), ((0x6c)), ((v
))))
\
388 bus_space_write_4((s)->sc_iot, (s)->sc_ioh, \(((s)->sc_iot)->write_4(((s)->sc_ioh), ((0x6c)), ((v
))))
389 MPII_REPLY_POST_HOST_INDEX, (v))(((s)->sc_iot)->write_4(((s)->sc_ioh), ((0x6c)), ((v
))))
390
391#define mpii_wait_db_int(s)mpii_wait_ne((s), (0x30), (1<<0), 0) mpii_wait_ne((s), MPII_INTR_STATUS(0x30), \
392 MPII_INTR_STATUS_IOC2SYSDB(1<<0), 0)
393#define mpii_wait_db_ack(s)mpii_wait_eq((s), (0x30), (1<<31), 0) mpii_wait_eq((s), MPII_INTR_STATUS(0x30), \
394 MPII_INTR_STATUS_SYS2IOCDB(1<<31), 0)
395
396static inline void
397mpii_dvatosge(struct mpii_sge *sge, u_int64_t dva)
398{
399 htolem32(&sge->sg_addr_lo, dva)(*(__uint32_t *)(&sge->sg_addr_lo) = ((__uint32_t)(dva
)))
;
400 htolem32(&sge->sg_addr_hi, dva >> 32)(*(__uint32_t *)(&sge->sg_addr_hi) = ((__uint32_t)(dva
>> 32)))
;
401}
402
403#define MPII_PG_EXTENDED(1<<0) (1<<0)
404#define MPII_PG_POLL(1<<1) (1<<1)
405#define MPII_PG_FMT"\020" "\002POLL" "\001EXTENDED" "\020" "\002POLL" "\001EXTENDED"
406
407static const struct pci_matchid mpii_devices[] = {
408 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS20040x0070 },
409 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS20080x0072 },
410 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SSS62000x007e },
411 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2108_30x0074 },
412 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2108_40x0076 },
413 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2108_50x0077 },
414 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2116_10x0064 },
415 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2116_20x0065 },
416 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2208_10x0080 },
417 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2208_20x0081 },
418 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2208_30x0082 },
419 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2208_40x0083 },
420 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2208_50x0084 },
421 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2208_60x0085 },
422 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2308_10x0086 },
423 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2308_20x0087 },
424 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS2308_30x006e },
425 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS30040x0096 },
426 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS30080x0097 },
427 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS3108_10x0090 },
428 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS3108_20x0091 },
429 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS3108_30x0094 },
430 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS3108_40x0095 },
431 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS34080x00af },
432 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS34160x00ac },
433 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS35080x00ad },
434 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS3508_10x00ae },
435 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS35160x00aa },
436 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_SAS3516_10x00ab }
437};
438
439int
440mpii_match(struct device *parent, void *match, void *aux)
441{
442 return (pci_matchbyid(aux, mpii_devices, nitems(mpii_devices)(sizeof((mpii_devices)) / sizeof((mpii_devices)[0]))));
443}
444
445void
446mpii_attach(struct device *parent, struct device *self, void *aux)
447{
448 struct mpii_softc *sc = (struct mpii_softc *)self;
449 struct pci_attach_args *pa = aux;
450 pcireg_t memtype;
451 int r;
452 pci_intr_handle_t ih;
453 struct scsibus_attach_args saa;
454 struct mpii_ccb *ccb;
455
456 sc->sc_pc = pa->pa_pc;
457 sc->sc_tag = pa->pa_tag;
458 sc->sc_dmat = pa->pa_dmat;
459
460 mtx_init(&sc->sc_req_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_req_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ?
0x9 : ((0x6)))); } while (0)
;
461 mtx_init(&sc->sc_rep_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_rep_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ?
0x9 : ((0x6)))); } while (0)
;
462
463 /* find the appropriate memory base */
464 for (r = PCI_MAPREG_START0x10; r < PCI_MAPREG_END0x28; r += sizeof(memtype)) {
465 memtype = pci_mapreg_type(sc->sc_pc, sc->sc_tag, r);
466 if ((memtype & PCI_MAPREG_TYPE_MASK0x00000001) == PCI_MAPREG_TYPE_MEM0x00000000)
467 break;
468 }
469 if (r >= PCI_MAPREG_END0x28) {
470 printf(": unable to locate system interface registers\n");
471 return;
472 }
473
474 if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh,
475 NULL((void *)0), &sc->sc_ios, 0xFF) != 0) {
476 printf(": unable to map system interface registers\n");
477 return;
478 }
479
480 /* disable the expansion rom */
481 pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_ROM_REG0x30,
482 pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_ROM_REG0x30) &
483 ~PCI_ROM_ENABLE0x00000001);
484
485 /* disable interrupts */
486 mpii_write(sc, MPII_INTR_MASK(0x34),
487 MPII_INTR_MASK_RESET(1<<30) | MPII_INTR_MASK_REPLY(1<<3) |
488 MPII_INTR_MASK_DOORBELL(1<<0));
489
490 /* hook up the interrupt */
491 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
492 printf(": unable to map interrupt\n");
493 goto unmap;
494 }
495 printf(": %s\n", pci_intr_string(sc->sc_pc, ih));
496
497 if (mpii_iocfacts(sc) != 0) {
498 printf("%s: unable to get iocfacts\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
499 goto unmap;
500 }
501
502 if (mpii_init(sc) != 0) {
503 printf("%s: unable to initialize ioc\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
504 goto unmap;
505 }
506
507 if (mpii_alloc_ccbs(sc) != 0) {
508 /* error already printed */
509 goto unmap;
510 }
511
512 if (mpii_alloc_replies(sc) != 0) {
513 printf("%s: unable to allocated reply space\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
514 goto free_ccbs;
515 }
516
517 if (mpii_alloc_queues(sc) != 0) {
518 printf("%s: unable to allocate reply queues\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
519 goto free_replies;
520 }
521
522 if (mpii_iocinit(sc) != 0) {
523 printf("%s: unable to send iocinit\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
524 goto free_queues;
525 }
526
527 if (mpii_wait_eq(sc, MPII_DOORBELL(0x00), MPII_DOORBELL_STATE(0xf<<28),
528 MPII_DOORBELL_STATE_OPER(0x2<<28)) != 0) {
529 printf("%s: state: 0x%08x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
530 mpii_read_db(sc)mpii_read((sc), (0x00)) & MPII_DOORBELL_STATE(0xf<<28));
531 printf("%s: operational state timeout\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
532 goto free_queues;
533 }
534
535 mpii_push_replies(sc);
536 mpii_init_queues(sc);
537
538 if (mpii_board_info(sc) != 0) {
539 printf("%s: unable to get manufacturing page 0\n",
540 DEVNAME(sc)((sc)->sc_dev.dv_xname));
541 goto free_queues;
542 }
543
544 if (mpii_portfacts(sc) != 0) {
545 printf("%s: unable to get portfacts\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
546 goto free_queues;
547 }
548
549 if (mpii_target_map(sc) != 0) {
550 printf("%s: unable to setup target mappings\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
551 goto free_queues;
552 }
553
554 if (mpii_cfg_coalescing(sc) != 0) {
555 printf("%s: unable to configure coalescing\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
556 goto free_queues;
557 }
558
559 /* XXX bail on unsupported porttype? */
560 if ((sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL(0x30)) ||
561 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL(0x31)) ||
562 (sc->sc_porttype == MPII_PORTFACTS_PORTTYPE_TRI_MODE(0x40))) {
563 if (mpii_eventnotify(sc) != 0) {
564 printf("%s: unable to enable events\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
565 goto free_queues;
566 }
567 }
568
569 sc->sc_devs = mallocarray(sc->sc_max_devices,
570 sizeof(struct mpii_device *), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
571 if (sc->sc_devs == NULL((void *)0)) {
572 printf("%s: unable to allocate memory for mpii_device\n",
573 DEVNAME(sc)((sc)->sc_dev.dv_xname));
574 goto free_queues;
575 }
576
577 if (mpii_portenable(sc) != 0) {
578 printf("%s: unable to enable port\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
579 goto free_devs;
580 }
581
582 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO0x6,
583 mpii_intr, sc, sc->sc_dev.dv_xname);
584 if (sc->sc_ih == NULL((void *)0))
585 goto free_devs;
586
587 /* force autoconf to wait for the first sas discovery to complete */
588 sc->sc_pending = 1;
589 config_pending_incr();
590
591 saa.saa_adapter = &mpii_switch;
592 saa.saa_adapter_softc = sc;
593 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff;
594 saa.saa_adapter_buswidth = sc->sc_max_devices;
595 saa.saa_luns = 1;
596 saa.saa_openings = sc->sc_max_cmds - 1;
597 saa.saa_pool = &sc->sc_iopool;
598 saa.saa_quirks = saa.saa_flags = 0;
599 saa.saa_wwpn = saa.saa_wwnn = 0;
600
601 sc->sc_scsibus = (struct scsibus_softc *) config_found(&sc->sc_dev,config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
602 &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
603
604 /* enable interrupts */
605 mpii_write(sc, MPII_INTR_MASK(0x34), MPII_INTR_MASK_DOORBELL(1<<0)
606 | MPII_INTR_MASK_RESET(1<<30));
607
608#if NBIO1 > 0
609 if (ISSET(sc->sc_flags, MPII_F_RAID)((sc->sc_flags) & ((1<<1)))) {
610 if (bio_register(&sc->sc_dev, mpii_ioctl) != 0)
611 panic("%s: controller registration failed",
612 DEVNAME(sc)((sc)->sc_dev.dv_xname));
613 else
614 sc->sc_ioctl = mpii_ioctl;
615
616#ifndef SMALL_KERNEL
617 if (mpii_create_sensors(sc) != 0)
618 printf("%s: unable to create sensors\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
619#endif
620 }
621#endif
622
623 return;
624
625free_devs:
626 free(sc->sc_devs, M_DEVBUF2, 0);
627 sc->sc_devs = NULL((void *)0);
628
629free_queues:
630 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_freeq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_freeq)->mdm_map)), (0), (sc->sc_reply_free_qdepth
* 4), (0x02))
631 0, sc->sc_reply_free_qdepth * 4, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_freeq)->mdm_map)), (0), (sc->sc_reply_free_qdepth
* 4), (0x02))
;
632 mpii_dmamem_free(sc, sc->sc_reply_freeq);
633
634 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* 8), (0x02))
635 0, sc->sc_reply_post_qdepth * 8, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* 8), (0x02))
;
636 mpii_dmamem_free(sc, sc->sc_reply_postq);
637
638free_replies:
639 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), ((1 << 12)), (0x02))
640 0, PAGE_SIZE, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), ((1 << 12)), (0x02))
;
641 mpii_dmamem_free(sc, sc->sc_replies);
642
643free_ccbs:
644 while ((ccb = mpii_get_ccb(sc)) != NULL((void *)0))
645 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
646 mpii_dmamem_free(sc, sc->sc_requests);
647 free(sc->sc_ccbs, M_DEVBUF2, 0);
648
649unmap:
650 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
651 sc->sc_ios = 0;
652}
653
654int
655mpii_detach(struct device *self, int flags)
656{
657 struct mpii_softc *sc = (struct mpii_softc *)self;
658
659 if (sc->sc_ih != NULL((void *)0)) {
660 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
661 sc->sc_ih = NULL((void *)0);
662 }
663 if (sc->sc_ios != 0) {
664 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
665 sc->sc_ios = 0;
666 }
667
668 return (0);
669}
670
671int
672mpii_intr(void *arg)
673{
674 struct mpii_rcb_list evts = SIMPLEQ_HEAD_INITIALIZER(evts){ ((void *)0), &(evts).sqh_first };
675 struct mpii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs){ ((void *)0), &(ccbs).sqh_first };
676 struct mpii_softc *sc = arg;
677 struct mpii_reply_descr *postq = sc->sc_reply_postq_kva, *rdp;
678 struct mpii_ccb *ccb;
679 struct mpii_rcb *rcb;
680 int smid;
681 u_int idx;
682 int rv = 0;
683
684 mtx_enter(&sc->sc_rep_mtx);
685 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* sizeof(*rdp)), (0x02 | 0x08))
686 MPII_DMA_MAP(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* sizeof(*rdp)), (0x02 | 0x08))
687 0, sc->sc_reply_post_qdepth * sizeof(*rdp),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* sizeof(*rdp)), (0x02 | 0x08))
688 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* sizeof(*rdp)), (0x02 | 0x08))
;
689
690 idx = sc->sc_reply_post_host_index;
691 for (;;) {
692 rdp = &postq[idx];
693 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK(0x0f)) ==
694 MPII_REPLY_DESCR_UNUSED(0x0f))
695 break;
696 if (rdp->data == 0xffffffff) {
697 /*
698 * ioc is still writing to the reply post queue
699 * race condition - bail!
700 */
701 break;
702 }
703
704 smid = lemtoh16(&rdp->smid)((__uint16_t)(*(__uint16_t *)(&rdp->smid)));
705 rcb = mpii_reply(sc, rdp);
706
707 if (smid) {
708 ccb = &sc->sc_ccbs[smid - 1];
709 ccb->ccb_state = MPII_CCB_READY;
710 ccb->ccb_rcb = rcb;
711 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link)do { (ccb)->ccb_link.sqe_next = ((void *)0); *(&ccbs)->
sqh_last = (ccb); (&ccbs)->sqh_last = &(ccb)->ccb_link
.sqe_next; } while (0)
;
712 } else
713 SIMPLEQ_INSERT_TAIL(&evts, rcb, rcb_link)do { (rcb)->rcb_link.sqe_next = ((void *)0); *(&evts)->
sqh_last = (rcb); (&evts)->sqh_last = &(rcb)->rcb_link
.sqe_next; } while (0)
;
714
715 if (++idx >= sc->sc_reply_post_qdepth)
716 idx = 0;
717
718 rv = 1;
719 }
720
721 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* sizeof(*rdp)), (0x01 | 0x04))
722 MPII_DMA_MAP(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* sizeof(*rdp)), (0x01 | 0x04))
723 0, sc->sc_reply_post_qdepth * sizeof(*rdp),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* sizeof(*rdp)), (0x01 | 0x04))
724 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (sc->sc_reply_post_qdepth
* sizeof(*rdp)), (0x01 | 0x04))
;
725
726 if (rv)
727 mpii_write_reply_post(sc, sc->sc_reply_post_host_index = idx)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x6c)), (
(sc->sc_reply_post_host_index = idx))))
;
728
729 mtx_leave(&sc->sc_rep_mtx);
730
731 if (rv == 0)
732 return (0);
733
734 while ((ccb = SIMPLEQ_FIRST(&ccbs)((&ccbs)->sqh_first)) != NULL((void *)0)) {
735 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link)do { if (((&ccbs)->sqh_first = (&ccbs)->sqh_first
->ccb_link.sqe_next) == ((void *)0)) (&ccbs)->sqh_last
= &(&ccbs)->sqh_first; } while (0)
;
736 ccb->ccb_done(ccb);
737 }
738 while ((rcb = SIMPLEQ_FIRST(&evts)((&evts)->sqh_first)) != NULL((void *)0)) {
739 SIMPLEQ_REMOVE_HEAD(&evts, rcb_link)do { if (((&evts)->sqh_first = (&evts)->sqh_first
->rcb_link.sqe_next) == ((void *)0)) (&evts)->sqh_last
= &(&evts)->sqh_first; } while (0)
;
740 mpii_event_process(sc, rcb);
741 }
742
743 return (1);
744}
745
746int
747mpii_load_xs_sas3(struct mpii_ccb *ccb)
748{
749 struct mpii_softc *sc = ccb->ccb_sc;
750 struct scsi_xfer *xs = ccb->ccb_cookie;
751 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
752 struct mpii_ieee_sge *csge, *nsge, *sge;
11
'sge' declared without an initial value
753 bus_dmamap_t dmap = ccb->ccb_dmamap;
754 int i, error;
755
756 /* Request frame structure is described in the mpii_iocfacts */
757 nsge = (struct mpii_ieee_sge *)(io + 1);
758
759 /* zero length transfer still requires an SGE */
760 if (xs->datalen == 0) {
12
Assuming field 'datalen' is not equal to 0
13
Taking false branch
761 nsge->sg_flags = MPII_IEEE_SGE_END_OF_LIST(0x40);
762 return (0);
763 }
764
765 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
14
Assuming the condition is false
15
'?' condition is false
766 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
;
767 if (error) {
16
Assuming 'error' is 0
17
Taking false branch
768 printf("%s: error %d loading dmamap\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), error);
769 return (1);
770 }
771
772 csge = NULL((void *)0);
773 if (dmap->dm_nsegs > sc->sc_chain_sge) {
18
Assuming field 'dm_nsegs' is <= field 'sc_chain_sge'
19
Taking false branch
774 csge = nsge + sc->sc_chain_sge;
775
776 /* offset to the chain sge from the beginning */
777 io->chain_offset = ((caddr_t)csge - (caddr_t)io) / sizeof(*sge);
778 }
779
780 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
20
Assuming 'i' is >= field 'dm_nsegs'
21
Loop condition is false. Execution continues on line 807
781 if (nsge == csge) {
782 nsge++;
783
784 /* address of the next sge */
785 htolem64(&csge->sg_addr, ccb->ccb_cmd_dva +(*(__uint64_t *)(&csge->sg_addr) = ((__uint64_t)(ccb->
ccb_cmd_dva + ((caddr_t)nsge - (caddr_t)io))))
786 ((caddr_t)nsge - (caddr_t)io))(*(__uint64_t *)(&csge->sg_addr) = ((__uint64_t)(ccb->
ccb_cmd_dva + ((caddr_t)nsge - (caddr_t)io))))
;
787 htolem32(&csge->sg_len, (dmap->dm_nsegs - i) *(*(__uint32_t *)(&csge->sg_len) = ((__uint32_t)((dmap->
dm_nsegs - i) * sizeof(*sge))))
788 sizeof(*sge))(*(__uint32_t *)(&csge->sg_len) = ((__uint32_t)((dmap->
dm_nsegs - i) * sizeof(*sge))))
;
789 csge->sg_next_chain_offset = 0;
790 csge->sg_flags = MPII_IEEE_SGE_CHAIN_ELEMENT(0x80) |
791 MPII_IEEE_SGE_ADDR_SYSTEM(0x00);
792
793 if ((dmap->dm_nsegs - i) > sc->sc_max_chain) {
794 csge->sg_next_chain_offset = sc->sc_max_chain;
795 csge += sc->sc_max_chain;
796 }
797 }
798
799 sge = nsge;
800 sge->sg_flags = MPII_IEEE_SGE_ADDR_SYSTEM(0x00);
801 sge->sg_next_chain_offset = 0;
802 htolem32(&sge->sg_len, dmap->dm_segs[i].ds_len)(*(__uint32_t *)(&sge->sg_len) = ((__uint32_t)(dmap->
dm_segs[i].ds_len)))
;
803 htolem64(&sge->sg_addr, dmap->dm_segs[i].ds_addr)(*(__uint64_t *)(&sge->sg_addr) = ((__uint64_t)(dmap->
dm_segs[i].ds_addr)))
;
804 }
805
806 /* terminate list */
807 sge->sg_flags |= MPII_IEEE_SGE_END_OF_LIST(0x40);
22
Access to field 'sg_flags' results in a dereference of an undefined pointer value (loaded from variable 'sge')
808
809 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
810 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
811 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
;
812
813 return (0);
814}
815
816int
817mpii_load_xs(struct mpii_ccb *ccb)
818{
819 struct mpii_softc *sc = ccb->ccb_sc;
820 struct scsi_xfer *xs = ccb->ccb_cookie;
821 struct mpii_msg_scsi_io *io = ccb->ccb_cmd;
822 struct mpii_sge *csge, *nsge, *sge;
823 bus_dmamap_t dmap = ccb->ccb_dmamap;
824 u_int32_t flags;
825 u_int16_t len;
826 int i, error;
827
828 /* Request frame structure is described in the mpii_iocfacts */
829 nsge = (struct mpii_sge *)(io + 1);
830 csge = nsge + sc->sc_chain_sge;
831
832 /* zero length transfer still requires an SGE */
833 if (xs->datalen == 0) {
834 nsge->sg_hdr = htole32(MPII_SGE_FL_TYPE_SIMPLE |((__uint32_t)((0x1<<28) | (0x1<<31) | (0x1<<
30) | (0x1<<24)))
835 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL)((__uint32_t)((0x1<<28) | (0x1<<31) | (0x1<<
30) | (0x1<<24)))
;
836 return (0);
837 }
838
839 error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
840 (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (xs->data), (xs->datalen), (((void *)0)), ((xs->flags
& 0x00001) ? 0x0001 : 0x0000))
;
841 if (error) {
842 printf("%s: error %d loading dmamap\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), error);
843 return (1);
844 }
845
846 /* safe default starting flags */
847 flags = MPII_SGE_FL_TYPE_SIMPLE(0x1<<28) | MPII_SGE_FL_SIZE_64(0x1<<25);
848 if (xs->flags & SCSI_DATA_OUT0x01000)
849 flags |= MPII_SGE_FL_DIR_OUT(0x1<<26);
850
851 for (i = 0; i < dmap->dm_nsegs; i++, nsge++) {
852 if (nsge == csge) {
853 nsge++;
854 /* offset to the chain sge from the beginning */
855 io->chain_offset = ((caddr_t)csge - (caddr_t)io) / 4;
856 /* length of the sgl segment we're pointing to */
857 len = (dmap->dm_nsegs - i) * sizeof(*sge);
858 htolem32(&csge->sg_hdr, MPII_SGE_FL_TYPE_CHAIN |(*(__uint32_t *)(&csge->sg_hdr) = ((__uint32_t)((0x3<<
28) | (0x1<<25) | len)))
859 MPII_SGE_FL_SIZE_64 | len)(*(__uint32_t *)(&csge->sg_hdr) = ((__uint32_t)((0x3<<
28) | (0x1<<25) | len)))
;
860 /* address of the next sge */
861 mpii_dvatosge(csge, ccb->ccb_cmd_dva +
862 ((caddr_t)nsge - (caddr_t)io));
863 }
864
865 sge = nsge;
866 htolem32(&sge->sg_hdr, flags | dmap->dm_segs[i].ds_len)(*(__uint32_t *)(&sge->sg_hdr) = ((__uint32_t)(flags |
dmap->dm_segs[i].ds_len)))
;
867 mpii_dvatosge(sge, dmap->dm_segs[i].ds_addr);
868 }
869
870 /* terminate list */
871 sge->sg_hdr |= htole32(MPII_SGE_FL_LAST | MPII_SGE_FL_EOB |((__uint32_t)((0x1<<31) | (0x1<<30) | (0x1<<
24)))
872 MPII_SGE_FL_EOL)((__uint32_t)((0x1<<31) | (0x1<<30) | (0x1<<
24)))
;
873
874 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
875 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
876 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x01 : 0x04))
;
877
878 return (0);
879}
880
881int
882mpii_scsi_probe(struct scsi_link *link)
883{
884 struct mpii_softc *sc = link->bus->sb_adapter_softc;
885 struct mpii_cfg_sas_dev_pg0 pg0;
886 struct mpii_ecfg_hdr ehdr;
887 struct mpii_device *dev;
888 uint32_t address;
889 int flags;
890
891 if ((sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_PHYSICAL(0x30)) &&
892 (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_SAS_VIRTUAL(0x31)) &&
893 (sc->sc_porttype != MPII_PORTFACTS_PORTTYPE_TRI_MODE(0x40)))
894 return (ENXIO6);
895
896 dev = sc->sc_devs[link->target];
897 if (dev == NULL((void *)0))
898 return (1);
899
900 flags = dev->flags;
901 if (ISSET(flags, MPII_DF_HIDDEN)((flags) & ((0x0004))) || ISSET(flags, MPII_DF_UNUSED)((flags) & ((0x0008))))
902 return (1);
903
904 if (ISSET(flags, MPII_DF_VOLUME)((flags) & ((0x0010)))) {
905 struct mpii_cfg_hdr hdr;
906 struct mpii_cfg_raid_vol_pg1 vpg;
907 size_t pagelen;
908
909 address = MPII_CFG_RAID_VOL_ADDR_HANDLE(1<<28) | dev->dev_handle;
910
911 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08),
912 1, address, MPII_PG_POLL(1<<1), &hdr) != 0)
913 return (EINVAL22);
914
915 memset(&vpg, 0, sizeof(vpg))__builtin_memset((&vpg), (0), (sizeof(vpg)));
916 /* avoid stack trash on future page growth */
917 pagelen = min(sizeof(vpg), hdr.page_length * 4);
918
919 if (mpii_req_cfg_page(sc, address, MPII_PG_POLL(1<<1), &hdr, 1,
920 &vpg, pagelen) != 0)
921 return (EINVAL22);
922
923 link->port_wwn = letoh64(vpg.wwid)((__uint64_t)(vpg.wwid));
924 /*
925 * WWIDs generated by LSI firmware are not IEEE NAA compliant
926 * and historical practise in OBP on sparc64 is to set the top
927 * nibble to 3 to indicate that this is a RAID volume.
928 */
929 link->port_wwn &= 0x0fffffffffffffff;
930 link->port_wwn |= 0x3000000000000000;
931
932 return (0);
933 }
934
935 memset(&ehdr, 0, sizeof(ehdr))__builtin_memset((&ehdr), (0), (sizeof(ehdr)));
936 ehdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED(0x0f);
937 ehdr.page_number = 0;
938 ehdr.page_version = 0;
939 ehdr.ext_page_type = MPII_CONFIG_REQ_EXTPAGE_TYPE_SAS_DEVICE(0x12);
940 ehdr.ext_page_length = htole16(sizeof(pg0) / 4)((__uint16_t)(sizeof(pg0) / 4)); /* dwords */
941
942 address = MPII_PGAD_SAS_DEVICE_FORM_HANDLE(0x20000000) | (uint32_t)dev->dev_handle;
943 if (mpii_req_cfg_page(sc, address, MPII_PG_EXTENDED(1<<0),
944 &ehdr, 1, &pg0, sizeof(pg0)) != 0) {
945 printf("%s: unable to fetch SAS device page 0 for target %u\n",
946 DEVNAME(sc)((sc)->sc_dev.dv_xname), link->target);
947
948 return (0); /* the handle should still work */
949 }
950
951 link->port_wwn = letoh64(pg0.sas_addr)((__uint64_t)(pg0.sas_addr));
952 link->node_wwn = letoh64(pg0.device_name)((__uint64_t)(pg0.device_name));
953
954 if (ISSET(lemtoh32(&pg0.device_info),((((__uint32_t)(*(__uint32_t *)(&pg0.device_info)))) &
((1<<13)))
955 MPII_CFG_SAS_DEV_0_DEVINFO_ATAPI_DEVICE)((((__uint32_t)(*(__uint32_t *)(&pg0.device_info)))) &
((1<<13)))
) {
956 link->flags |= SDEV_ATAPI0x0200;
957 }
958
959 return (0);
960}
961
962u_int32_t
963mpii_read(struct mpii_softc *sc, bus_size_t r)
964{
965 u_int32_t rv;
966
967 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
968 BUS_SPACE_BARRIER_READ0x01);
969 rv = bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)((sc->sc_iot)->read_4((sc->sc_ioh), (r)));
970
971 DNPRINTF(MPII_D_RW, "%s: mpii_read %#lx %#x\n", DEVNAME(sc), r, rv);
972
973 return (rv);
974}
975
976void
977mpii_write(struct mpii_softc *sc, bus_size_t r, u_int32_t v)
978{
979 DNPRINTF(MPII_D_RW, "%s: mpii_write %#lx %#x\n", DEVNAME(sc), r, v);
980
981 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v)((sc->sc_iot)->write_4((sc->sc_ioh), (r), (v)));
982 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
983 BUS_SPACE_BARRIER_WRITE0x02);
984}
985
986
987int
988mpii_wait_eq(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
989 u_int32_t target)
990{
991 int i;
992
993 DNPRINTF(MPII_D_RW, "%s: mpii_wait_eq %#lx %#x %#x\n", DEVNAME(sc), r,
994 mask, target);
995
996 for (i = 0; i < 15000; i++) {
997 if ((mpii_read(sc, r) & mask) == target)
998 return (0);
999 delay(1000)(*delay_func)(1000);
1000 }
1001
1002 return (1);
1003}
1004
1005int
1006mpii_wait_ne(struct mpii_softc *sc, bus_size_t r, u_int32_t mask,
1007 u_int32_t target)
1008{
1009 int i;
1010
1011 DNPRINTF(MPII_D_RW, "%s: mpii_wait_ne %#lx %#x %#x\n", DEVNAME(sc), r,
1012 mask, target);
1013
1014 for (i = 0; i < 15000; i++) {
1015 if ((mpii_read(sc, r) & mask) != target)
1016 return (0);
1017 delay(1000)(*delay_func)(1000);
1018 }
1019
1020 return (1);
1021}
1022
1023int
1024mpii_init(struct mpii_softc *sc)
1025{
1026 u_int32_t db;
1027 int i;
1028
1029 /* spin until the ioc leaves the reset state */
1030 if (mpii_wait_ne(sc, MPII_DOORBELL(0x00), MPII_DOORBELL_STATE(0xf<<28),
1031 MPII_DOORBELL_STATE_RESET(0x0<<28)) != 0) {
1032 DNPRINTF(MPII_D_MISC, "%s: mpii_init timeout waiting to leave "
1033 "reset state\n", DEVNAME(sc));
1034 return (1);
1035 }
1036
1037 /* check current ownership */
1038 db = mpii_read_db(sc)mpii_read((sc), (0x00));
1039 if ((db & MPII_DOORBELL_WHOINIT(0x7<<24)) == MPII_DOORBELL_WHOINIT_PCIPEER(0x3<<24)) {
1040 DNPRINTF(MPII_D_MISC, "%s: mpii_init initialised by pci peer\n",
1041 DEVNAME(sc));
1042 return (0);
1043 }
1044
1045 for (i = 0; i < 5; i++) {
1046 switch (db & MPII_DOORBELL_STATE(0xf<<28)) {
1047 case MPII_DOORBELL_STATE_READY(0x1<<28):
1048 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is ready\n",
1049 DEVNAME(sc));
1050 return (0);
1051
1052 case MPII_DOORBELL_STATE_OPER(0x2<<28):
1053 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is oper\n",
1054 DEVNAME(sc));
1055 if (sc->sc_ioc_event_replay)
1056 mpii_reset_soft(sc);
1057 else
1058 mpii_reset_hard(sc);
1059 break;
1060
1061 case MPII_DOORBELL_STATE_FAULT(0x4<<28):
1062 DNPRINTF(MPII_D_MISC, "%s: mpii_init ioc is being "
1063 "reset hard\n" , DEVNAME(sc));
1064 mpii_reset_hard(sc);
1065 break;
1066
1067 case MPII_DOORBELL_STATE_RESET(0x0<<28):
1068 DNPRINTF(MPII_D_MISC, "%s: mpii_init waiting to come "
1069 "out of reset\n", DEVNAME(sc));
1070 if (mpii_wait_ne(sc, MPII_DOORBELL(0x00), MPII_DOORBELL_STATE(0xf<<28),
1071 MPII_DOORBELL_STATE_RESET(0x0<<28)) != 0)
1072 return (1);
1073 break;
1074 }
1075 db = mpii_read_db(sc)mpii_read((sc), (0x00));
1076 }
1077
1078 return (1);
1079}
1080
1081int
1082mpii_reset_soft(struct mpii_softc *sc)
1083{
1084 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_soft\n", DEVNAME(sc));
1085
1086 if (mpii_read_db(sc)mpii_read((sc), (0x00)) & MPII_DOORBELL_INUSE(0x1<<27)) {
1087 return (1);
1088 }
1089
1090 mpii_write_db(sc,mpii_write((sc), (0x00), (((((0x40)) << (24)) & (0xff
<< (24)))))
1091 MPII_DOORBELL_FUNCTION(MPII_FUNCTION_IOC_MESSAGE_UNIT_RESET))mpii_write((sc), (0x00), (((((0x40)) << (24)) & (0xff
<< (24)))))
;
1092
1093 /* XXX LSI waits 15 sec */
1094 if (mpii_wait_db_ack(sc)mpii_wait_eq((sc), (0x30), (1<<31), 0) != 0)
1095 return (1);
1096
1097 /* XXX LSI waits 15 sec */
1098 if (mpii_wait_eq(sc, MPII_DOORBELL(0x00), MPII_DOORBELL_STATE(0xf<<28),
1099 MPII_DOORBELL_STATE_READY(0x1<<28)) != 0)
1100 return (1);
1101
1102 /* XXX wait for Sys2IOCDB bit to clear in HIS?? */
1103
1104 return (0);
1105}
1106
1107int
1108mpii_reset_hard(struct mpii_softc *sc)
1109{
1110 u_int16_t i;
1111
1112 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard\n", DEVNAME(sc));
1113
1114 mpii_write_intr(sc, 0)mpii_write((sc), (0x30), (0));
1115
1116 /* enable diagnostic register */
1117 mpii_write(sc, MPII_WRITESEQ(0x04), MPII_WRITESEQ_FLUSH(0x00));
1118 mpii_write(sc, MPII_WRITESEQ(0x04), MPII_WRITESEQ_1(0x0f));
1119 mpii_write(sc, MPII_WRITESEQ(0x04), MPII_WRITESEQ_2(0x04));
1120 mpii_write(sc, MPII_WRITESEQ(0x04), MPII_WRITESEQ_3(0x0b));
1121 mpii_write(sc, MPII_WRITESEQ(0x04), MPII_WRITESEQ_4(0x02));
1122 mpii_write(sc, MPII_WRITESEQ(0x04), MPII_WRITESEQ_5(0x07));
1123 mpii_write(sc, MPII_WRITESEQ(0x04), MPII_WRITESEQ_6(0x0d));
1124
1125 delay(100)(*delay_func)(100);
1126
1127 if ((mpii_read(sc, MPII_HOSTDIAG(0x08)) & MPII_HOSTDIAG_DWRE(1<<7)) == 0) {
1128 DNPRINTF(MPII_D_MISC, "%s: mpii_reset_hard failure to enable "
1129 "diagnostic read/write\n", DEVNAME(sc));
1130 return(1);
1131 }
1132
1133 /* reset ioc */
1134 mpii_write(sc, MPII_HOSTDIAG(0x08), MPII_HOSTDIAG_RESET_ADAPTER(1<<2));
1135
1136 /* 240 milliseconds */
1137 delay(240000)(*delay_func)(240000);
1138
1139
1140 /* XXX this whole function should be more robust */
1141
1142 /* XXX read the host diagnostic reg until reset adapter bit clears ? */
1143 for (i = 0; i < 30000; i++) {
1144 if ((mpii_read(sc, MPII_HOSTDIAG(0x08)) &
1145 MPII_HOSTDIAG_RESET_ADAPTER(1<<2)) == 0)
1146 break;
1147 delay(10000)(*delay_func)(10000);
1148 }
1149
1150 /* disable diagnostic register */
1151 mpii_write(sc, MPII_WRITESEQ(0x04), 0xff);
1152
1153 /* XXX what else? */
1154
1155 DNPRINTF(MPII_D_MISC, "%s: done with mpii_reset_hard\n", DEVNAME(sc));
1156
1157 return(0);
1158}
1159
1160int
1161mpii_handshake_send(struct mpii_softc *sc, void *buf, size_t dwords)
1162{
1163 u_int32_t *query = buf;
1164 int i;
1165
1166 /* make sure the doorbell is not in use. */
1167 if (mpii_read_db(sc)mpii_read((sc), (0x00)) & MPII_DOORBELL_INUSE(0x1<<27))
1168 return (1);
1169
1170 /* clear pending doorbell interrupts */
1171 if (mpii_read_intr(sc)mpii_read((sc), (0x30)) & MPII_INTR_STATUS_IOC2SYSDB(1<<0))
1172 mpii_write_intr(sc, 0)mpii_write((sc), (0x30), (0));
1173
1174 /*
1175 * first write the doorbell with the handshake function and the
1176 * dword count.
1177 */
1178 mpii_write_db(sc, MPII_DOORBELL_FUNCTION(MPII_FUNCTION_HANDSHAKE) |mpii_write((sc), (0x00), (((((0x42)) << (24)) & (0xff
<< (24))) | (((dwords) << 16) & (0xff <<
16))))
1179 MPII_DOORBELL_DWORDS(dwords))mpii_write((sc), (0x00), (((((0x42)) << (24)) & (0xff
<< (24))) | (((dwords) << 16) & (0xff <<
16))))
;
1180
1181 /*
1182 * the doorbell used bit will be set because a doorbell function has
1183 * started. wait for the interrupt and then ack it.
1184 */
1185 if (mpii_wait_db_int(sc)mpii_wait_ne((sc), (0x30), (1<<0), 0) != 0)
1186 return (1);
1187 mpii_write_intr(sc, 0)mpii_write((sc), (0x30), (0));
1188
1189 /* poll for the acknowledgement. */
1190 if (mpii_wait_db_ack(sc)mpii_wait_eq((sc), (0x30), (1<<31), 0) != 0)
1191 return (1);
1192
1193 /* write the query through the doorbell. */
1194 for (i = 0; i < dwords; i++) {
1195 mpii_write_db(sc, htole32(query[i]))mpii_write((sc), (0x00), (((__uint32_t)(query[i]))));
1196 if (mpii_wait_db_ack(sc)mpii_wait_eq((sc), (0x30), (1<<31), 0) != 0)
1197 return (1);
1198 }
1199
1200 return (0);
1201}
1202
1203int
1204mpii_handshake_recv_dword(struct mpii_softc *sc, u_int32_t *dword)
1205{
1206 u_int16_t *words = (u_int16_t *)dword;
1207 int i;
1208
1209 for (i = 0; i < 2; i++) {
1210 if (mpii_wait_db_int(sc)mpii_wait_ne((sc), (0x30), (1<<0), 0) != 0)
1211 return (1);
1212 words[i] = letoh16(mpii_read_db(sc) & MPII_DOORBELL_DATA_MASK)((__uint16_t)(mpii_read((sc), (0x00)) & (0xffff)));
1213 mpii_write_intr(sc, 0)mpii_write((sc), (0x30), (0));
1214 }
1215
1216 return (0);
1217}
1218
1219int
1220mpii_handshake_recv(struct mpii_softc *sc, void *buf, size_t dwords)
1221{
1222 struct mpii_msg_reply *reply = buf;
1223 u_int32_t *dbuf = buf, dummy;
1224 int i;
1225
1226 /* get the first dword so we can read the length out of the header. */
1227 if (mpii_handshake_recv_dword(sc, &dbuf[0]) != 0)
1228 return (1);
1229
1230 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dwords: %lu reply: %d\n",
1231 DEVNAME(sc), dwords, reply->msg_length);
1232
1233 /*
1234 * the total length, in dwords, is in the message length field of the
1235 * reply header.
1236 */
1237 for (i = 1; i < MIN(dwords, reply->msg_length)(((dwords)<(reply->msg_length))?(dwords):(reply->msg_length
))
; i++) {
1238 if (mpii_handshake_recv_dword(sc, &dbuf[i]) != 0)
1239 return (1);
1240 }
1241
1242 /* if there's extra stuff to come off the ioc, discard it */
1243 while (i++ < reply->msg_length) {
1244 if (mpii_handshake_recv_dword(sc, &dummy) != 0)
1245 return (1);
1246 DNPRINTF(MPII_D_CMD, "%s: mpii_handshake_recv dummy read: "
1247 "0x%08x\n", DEVNAME(sc), dummy);
1248 }
1249
1250 /* wait for the doorbell used bit to be reset and clear the intr */
1251 if (mpii_wait_db_int(sc)mpii_wait_ne((sc), (0x30), (1<<0), 0) != 0)
1252 return (1);
1253
1254 if (mpii_wait_eq(sc, MPII_DOORBELL(0x00), MPII_DOORBELL_INUSE(0x1<<27), 0) != 0)
1255 return (1);
1256
1257 mpii_write_intr(sc, 0)mpii_write((sc), (0x30), (0));
1258
1259 return (0);
1260}
1261
1262void
1263mpii_empty_done(struct mpii_ccb *ccb)
1264{
1265 /* nothing to do */
1266}
1267
1268int
1269mpii_iocfacts(struct mpii_softc *sc)
1270{
1271 struct mpii_msg_iocfacts_request ifq;
1272 struct mpii_msg_iocfacts_reply ifp;
1273 int irs;
1274 int sge_size;
1275 u_int qdepth;
1276
1277 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts\n", DEVNAME(sc));
1278
1279 memset(&ifq, 0, sizeof(ifq))__builtin_memset((&ifq), (0), (sizeof(ifq)));
1280 memset(&ifp, 0, sizeof(ifp))__builtin_memset((&ifp), (0), (sizeof(ifp)));
1281
1282 ifq.function = MPII_FUNCTION_IOC_FACTS(0x03);
1283
1284 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)(sizeof(ifq) / sizeof(u_int32_t))) != 0) {
1285 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts send failed\n",
1286 DEVNAME(sc));
1287 return (1);
1288 }
1289
1290 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)(sizeof(ifp) / sizeof(u_int32_t))) != 0) {
1291 DNPRINTF(MPII_D_MISC, "%s: mpii_iocfacts recv failed\n",
1292 DEVNAME(sc));
1293 return (1);
1294 }
1295
1296 sc->sc_ioc_number = ifp.ioc_number;
1297 sc->sc_vf_id = ifp.vf_id;
1298
1299 sc->sc_max_volumes = ifp.max_volumes;
1300 sc->sc_max_devices = ifp.max_volumes + lemtoh16(&ifp.max_targets)((__uint16_t)(*(__uint16_t *)(&ifp.max_targets)));
1301
1302 if (ISSET(lemtoh32(&ifp.ioc_capabilities),((((__uint32_t)(*(__uint32_t *)(&ifp.ioc_capabilities))))
& ((1<<12)))
1303 MPII_IOCFACTS_CAPABILITY_INTEGRATED_RAID)((((__uint32_t)(*(__uint32_t *)(&ifp.ioc_capabilities))))
& ((1<<12)))
)
1304 SET(sc->sc_flags, MPII_F_RAID)((sc->sc_flags) |= ((1<<1)));
1305 if (ISSET(lemtoh32(&ifp.ioc_capabilities),((((__uint32_t)(*(__uint32_t *)(&ifp.ioc_capabilities))))
& ((1<<13)))
1306 MPII_IOCFACTS_CAPABILITY_EVENT_REPLAY)((((__uint32_t)(*(__uint32_t *)(&ifp.ioc_capabilities))))
& ((1<<13)))
)
1307 sc->sc_ioc_event_replay = 1;
1308
1309 sc->sc_max_cmds = MIN(lemtoh16(&ifp.request_credit),(((((__uint16_t)(*(__uint16_t *)(&ifp.request_credit))))<
((128)))?(((__uint16_t)(*(__uint16_t *)(&ifp.request_credit
)))):((128)))
1310 MPII_REQUEST_CREDIT)(((((__uint16_t)(*(__uint16_t *)(&ifp.request_credit))))<
((128)))?(((__uint16_t)(*(__uint16_t *)(&ifp.request_credit
)))):((128)))
;
1311
1312 /* SAS3 and 3.5 controllers have different sgl layouts */
1313 if (ifp.msg_version_maj == 2 && ((ifp.msg_version_min == 5)
1314 || (ifp.msg_version_min == 6)))
1315 SET(sc->sc_flags, MPII_F_SAS3)((sc->sc_flags) |= ((1<<2)));
1316
1317 /*
1318 * The host driver must ensure that there is at least one
1319 * unused entry in the Reply Free Queue. One way to ensure
1320 * that this requirement is met is to never allocate a number
1321 * of reply frames that is a multiple of 16.
1322 */
1323 sc->sc_num_reply_frames = sc->sc_max_cmds + 32;
1324 if (!(sc->sc_num_reply_frames % 16))
1325 sc->sc_num_reply_frames--;
1326
1327 /* must be multiple of 16 */
1328 sc->sc_reply_post_qdepth = sc->sc_max_cmds +
1329 sc->sc_num_reply_frames;
1330 sc->sc_reply_post_qdepth += 16 - (sc->sc_reply_post_qdepth % 16);
1331
1332 qdepth = lemtoh16(&ifp.max_reply_descriptor_post_queue_depth)((__uint16_t)(*(__uint16_t *)(&ifp.max_reply_descriptor_post_queue_depth
)))
;
1333 if (sc->sc_reply_post_qdepth > qdepth) {
1334 sc->sc_reply_post_qdepth = qdepth;
1335 if (sc->sc_reply_post_qdepth < 16) {
1336 printf("%s: RDPQ is too shallow\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1337 return (1);
1338 }
1339 sc->sc_max_cmds = sc->sc_reply_post_qdepth / 2 - 4;
1340 sc->sc_num_reply_frames = sc->sc_max_cmds + 4;
1341 }
1342
1343 sc->sc_reply_free_qdepth = sc->sc_num_reply_frames +
1344 16 - (sc->sc_num_reply_frames % 16);
1345
1346 /*
1347 * Our request frame for an I/O operation looks like this:
1348 *
1349 * +-------------------+ -.
1350 * | mpii_msg_scsi_io | |
1351 * +-------------------| |
1352 * | mpii_sge | |
1353 * + - - - - - - - - - + |
1354 * | ... | > ioc_request_frame_size
1355 * + - - - - - - - - - + |
1356 * | mpii_sge (tail) | |
1357 * + - - - - - - - - - + |
1358 * | mpii_sge (csge) | | --.
1359 * + - - - - - - - - - + -' | chain sge points to the next sge
1360 * | mpii_sge |<-----'
1361 * + - - - - - - - - - +
1362 * | ... |
1363 * + - - - - - - - - - +
1364 * | mpii_sge (tail) |
1365 * +-------------------+
1366 * | |
1367 * ~~~~~~~~~~~~~~~~~~~~~
1368 * | |
1369 * +-------------------+ <- sc_request_size - sizeof(scsi_sense_data)
1370 * | scsi_sense_data |
1371 * +-------------------+
1372 *
1373 * If the controller gives us a maximum chain size, there can be
1374 * multiple chain sges, each of which points to the sge following it.
1375 * Otherwise, there will only be one chain sge.
1376 */
1377
1378 /* both sizes are in 32-bit words */
1379 sc->sc_reply_size = ifp.reply_frame_size * 4;
1380 irs = lemtoh16(&ifp.ioc_request_frame_size)((__uint16_t)(*(__uint16_t *)(&ifp.ioc_request_frame_size
)))
* 4;
1381 sc->sc_request_size = MPII_REQUEST_SIZE(512);
1382 /* make sure we have enough space for scsi sense data */
1383 if (irs > sc->sc_request_size) {
1384 sc->sc_request_size = irs + sizeof(struct scsi_sense_data);
1385 sc->sc_request_size += 16 - (sc->sc_request_size % 16);
1386 }
1387
1388 if (ISSET(sc->sc_flags, MPII_F_SAS3)((sc->sc_flags) & ((1<<2)))) {
1389 sge_size = sizeof(struct mpii_ieee_sge);
1390 } else {
1391 sge_size = sizeof(struct mpii_sge);
1392 }
1393
1394 /* offset to the chain sge */
1395 sc->sc_chain_sge = (irs - sizeof(struct mpii_msg_scsi_io)) /
1396 sge_size - 1;
1397
1398 sc->sc_max_chain = lemtoh16(&ifp.ioc_max_chain_seg_size)((__uint16_t)(*(__uint16_t *)(&ifp.ioc_max_chain_seg_size
)))
;
1399
1400 /*
1401 * A number of simple scatter-gather elements we can fit into the
1402 * request buffer after the I/O command minus the chain element(s).
1403 */
1404 sc->sc_max_sgl = (sc->sc_request_size -
1405 sizeof(struct mpii_msg_scsi_io) - sizeof(struct scsi_sense_data)) /
1406 sge_size - 1;
1407 if (sc->sc_max_chain > 0) {
1408 sc->sc_max_sgl -= (sc->sc_max_sgl - sc->sc_chain_sge) /
1409 sc->sc_max_chain;
1410 }
1411
1412 return (0);
1413}
1414
1415int
1416mpii_iocinit(struct mpii_softc *sc)
1417{
1418 struct mpii_msg_iocinit_request iiq;
1419 struct mpii_msg_iocinit_reply iip;
1420
1421 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit\n", DEVNAME(sc));
1422
1423 memset(&iiq, 0, sizeof(iiq))__builtin_memset((&iiq), (0), (sizeof(iiq)));
1424 memset(&iip, 0, sizeof(iip))__builtin_memset((&iip), (0), (sizeof(iip)));
1425
1426 iiq.function = MPII_FUNCTION_IOC_INIT(0x02);
1427 iiq.whoinit = MPII_WHOINIT_HOST_DRIVER(0x04);
1428
1429 /* XXX JPG do something about vf_id */
1430 iiq.vf_id = 0;
1431
1432 iiq.msg_version_maj = 0x02;
1433 iiq.msg_version_min = 0x00;
1434
1435 /* XXX JPG ensure compliance with some level and hard-code? */
1436 iiq.hdr_version_unit = 0x00;
1437 iiq.hdr_version_dev = 0x00;
1438
1439 htolem16(&iiq.system_request_frame_size, sc->sc_request_size / 4)(*(__uint16_t *)(&iiq.system_request_frame_size) = ((__uint16_t
)(sc->sc_request_size / 4)))
;
1440
1441 htolem16(&iiq.reply_descriptor_post_queue_depth,(*(__uint16_t *)(&iiq.reply_descriptor_post_queue_depth) =
((__uint16_t)(sc->sc_reply_post_qdepth)))
1442 sc->sc_reply_post_qdepth)(*(__uint16_t *)(&iiq.reply_descriptor_post_queue_depth) =
((__uint16_t)(sc->sc_reply_post_qdepth)))
;
1443
1444 htolem16(&iiq.reply_free_queue_depth, sc->sc_reply_free_qdepth)(*(__uint16_t *)(&iiq.reply_free_queue_depth) = ((__uint16_t
)(sc->sc_reply_free_qdepth)))
;
1445
1446 htolem32(&iiq.sense_buffer_address_high,(*(__uint32_t *)(&iiq.sense_buffer_address_high) = ((__uint32_t
)(((u_int64_t)(sc->sc_requests)->mdm_map->dm_segs[0]
.ds_addr) >> 32)))
1447 MPII_DMA_DVA(sc->sc_requests) >> 32)(*(__uint32_t *)(&iiq.sense_buffer_address_high) = ((__uint32_t
)(((u_int64_t)(sc->sc_requests)->mdm_map->dm_segs[0]
.ds_addr) >> 32)))
;
1448
1449 htolem32(&iiq.system_reply_address_high,(*(__uint32_t *)(&iiq.system_reply_address_high) = ((__uint32_t
)(((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].
ds_addr) >> 32)))
1450 MPII_DMA_DVA(sc->sc_replies) >> 32)(*(__uint32_t *)(&iiq.system_reply_address_high) = ((__uint32_t
)(((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].
ds_addr) >> 32)))
;
1451
1452 htolem32(&iiq.system_request_frame_base_address_lo,(*(__uint32_t *)(&iiq.system_request_frame_base_address_lo
) = ((__uint32_t)(((u_int64_t)(sc->sc_requests)->mdm_map
->dm_segs[0].ds_addr))))
1453 MPII_DMA_DVA(sc->sc_requests))(*(__uint32_t *)(&iiq.system_request_frame_base_address_lo
) = ((__uint32_t)(((u_int64_t)(sc->sc_requests)->mdm_map
->dm_segs[0].ds_addr))))
;
1454 htolem32(&iiq.system_request_frame_base_address_hi,(*(__uint32_t *)(&iiq.system_request_frame_base_address_hi
) = ((__uint32_t)(((u_int64_t)(sc->sc_requests)->mdm_map
->dm_segs[0].ds_addr) >> 32)))
1455 MPII_DMA_DVA(sc->sc_requests) >> 32)(*(__uint32_t *)(&iiq.system_request_frame_base_address_hi
) = ((__uint32_t)(((u_int64_t)(sc->sc_requests)->mdm_map
->dm_segs[0].ds_addr) >> 32)))
;
1456
1457 htolem32(&iiq.reply_descriptor_post_queue_address_lo,(*(__uint32_t *)(&iiq.reply_descriptor_post_queue_address_lo
) = ((__uint32_t)(((u_int64_t)(sc->sc_reply_postq)->mdm_map
->dm_segs[0].ds_addr))))
1458 MPII_DMA_DVA(sc->sc_reply_postq))(*(__uint32_t *)(&iiq.reply_descriptor_post_queue_address_lo
) = ((__uint32_t)(((u_int64_t)(sc->sc_reply_postq)->mdm_map
->dm_segs[0].ds_addr))))
;
1459 htolem32(&iiq.reply_descriptor_post_queue_address_hi,(*(__uint32_t *)(&iiq.reply_descriptor_post_queue_address_hi
) = ((__uint32_t)(((u_int64_t)(sc->sc_reply_postq)->mdm_map
->dm_segs[0].ds_addr) >> 32)))
1460 MPII_DMA_DVA(sc->sc_reply_postq) >> 32)(*(__uint32_t *)(&iiq.reply_descriptor_post_queue_address_hi
) = ((__uint32_t)(((u_int64_t)(sc->sc_reply_postq)->mdm_map
->dm_segs[0].ds_addr) >> 32)))
;
1461
1462 htolem32(&iiq.reply_free_queue_address_lo,(*(__uint32_t *)(&iiq.reply_free_queue_address_lo) = ((__uint32_t
)(((u_int64_t)(sc->sc_reply_freeq)->mdm_map->dm_segs
[0].ds_addr))))
1463 MPII_DMA_DVA(sc->sc_reply_freeq))(*(__uint32_t *)(&iiq.reply_free_queue_address_lo) = ((__uint32_t
)(((u_int64_t)(sc->sc_reply_freeq)->mdm_map->dm_segs
[0].ds_addr))))
;
1464 htolem32(&iiq.reply_free_queue_address_hi,(*(__uint32_t *)(&iiq.reply_free_queue_address_hi) = ((__uint32_t
)(((u_int64_t)(sc->sc_reply_freeq)->mdm_map->dm_segs
[0].ds_addr) >> 32)))
1465 MPII_DMA_DVA(sc->sc_reply_freeq) >> 32)(*(__uint32_t *)(&iiq.reply_free_queue_address_hi) = ((__uint32_t
)(((u_int64_t)(sc->sc_reply_freeq)->mdm_map->dm_segs
[0].ds_addr) >> 32)))
;
1466
1467 if (mpii_handshake_send(sc, &iiq, dwordsof(iiq)(sizeof(iiq) / sizeof(u_int32_t))) != 0) {
1468 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit send failed\n",
1469 DEVNAME(sc));
1470 return (1);
1471 }
1472
1473 if (mpii_handshake_recv(sc, &iip, dwordsof(iip)(sizeof(iip) / sizeof(u_int32_t))) != 0) {
1474 DNPRINTF(MPII_D_MISC, "%s: mpii_iocinit recv failed\n",
1475 DEVNAME(sc));
1476 return (1);
1477 }
1478
1479 DNPRINTF(MPII_D_MISC, "%s: function: 0x%02x msg_length: %d "
1480 "whoinit: 0x%02x\n", DEVNAME(sc), iip.function,
1481 iip.msg_length, iip.whoinit);
1482 DNPRINTF(MPII_D_MISC, "%s: msg_flags: 0x%02x\n", DEVNAME(sc),
1483 iip.msg_flags);
1484 DNPRINTF(MPII_D_MISC, "%s: vf_id: 0x%02x vp_id: 0x%02x\n", DEVNAME(sc),
1485 iip.vf_id, iip.vp_id);
1486 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
1487 lemtoh16(&iip.ioc_status));
1488 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
1489 lemtoh32(&iip.ioc_loginfo));
1490
1491 if (lemtoh16(&iip.ioc_status)((__uint16_t)(*(__uint16_t *)(&iip.ioc_status))) != MPII_IOCSTATUS_SUCCESS(0x0000) ||
1492 lemtoh32(&iip.ioc_loginfo)((__uint32_t)(*(__uint32_t *)(&iip.ioc_loginfo))))
1493 return (1);
1494
1495 return (0);
1496}
1497
1498void
1499mpii_push_reply(struct mpii_softc *sc, struct mpii_rcb *rcb)
1500{
1501 u_int32_t *rfp;
1502 u_int idx;
1503
1504 if (rcb == NULL((void *)0))
1505 return;
1506
1507 idx = sc->sc_reply_free_host_index;
1508
1509 rfp = MPII_DMA_KVA(sc->sc_reply_freeq)((void *)(sc->sc_reply_freeq)->mdm_kva);
1510 htolem32(&rfp[idx], rcb->rcb_reply_dva)(*(__uint32_t *)(&rfp[idx]) = ((__uint32_t)(rcb->rcb_reply_dva
)))
;
1511
1512 if (++idx >= sc->sc_reply_free_qdepth)
1513 idx = 0;
1514
1515 mpii_write_reply_free(sc, sc->sc_reply_free_host_index = idx)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x48)), (
(sc->sc_reply_free_host_index = idx))))
;
1516}
1517
1518int
1519mpii_portfacts(struct mpii_softc *sc)
1520{
1521 struct mpii_msg_portfacts_request *pfq;
1522 struct mpii_msg_portfacts_reply *pfp;
1523 struct mpii_ccb *ccb;
1524 int rv = 1;
1525
1526 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts\n", DEVNAME(sc));
1527
1528 ccb = scsi_io_get(&sc->sc_iopool, 0);
1529 if (ccb == NULL((void *)0)) {
1530 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts mpii_get_ccb fail\n",
1531 DEVNAME(sc));
1532 return (rv);
1533 }
1534
1535 ccb->ccb_done = mpii_empty_done;
1536 pfq = ccb->ccb_cmd;
1537
1538 memset(pfq, 0, sizeof(*pfq))__builtin_memset((pfq), (0), (sizeof(*pfq)));
1539
1540 pfq->function = MPII_FUNCTION_PORT_FACTS(0x05);
1541 pfq->chain_offset = 0;
1542 pfq->msg_flags = 0;
1543 pfq->port_number = 0;
1544 pfq->vp_id = 0;
1545 pfq->vf_id = 0;
1546
1547 if (mpii_poll(sc, ccb) != 0) {
1548 DNPRINTF(MPII_D_MISC, "%s: mpii_portfacts poll\n",
1549 DEVNAME(sc));
1550 goto err;
1551 }
1552
1553 if (ccb->ccb_rcb == NULL((void *)0)) {
1554 DNPRINTF(MPII_D_MISC, "%s: empty portfacts reply\n",
1555 DEVNAME(sc));
1556 goto err;
1557 }
1558
1559 pfp = ccb->ccb_rcb->rcb_reply;
1560 sc->sc_porttype = pfp->port_type;
1561
1562 mpii_push_reply(sc, ccb->ccb_rcb);
1563 rv = 0;
1564err:
1565 scsi_io_put(&sc->sc_iopool, ccb);
1566
1567 return (rv);
1568}
1569
1570void
1571mpii_eventack(void *cookie, void *io)
1572{
1573 struct mpii_softc *sc = cookie;
1574 struct mpii_ccb *ccb = io;
1575 struct mpii_rcb *rcb, *next;
1576 struct mpii_msg_event_reply *enp;
1577 struct mpii_msg_eventack_request *eaq;
1578
1579 mtx_enter(&sc->sc_evt_ack_mtx);
1580 rcb = SIMPLEQ_FIRST(&sc->sc_evt_ack_queue)((&sc->sc_evt_ack_queue)->sqh_first);
1581 if (rcb != NULL((void *)0)) {
1582 next = SIMPLEQ_NEXT(rcb, rcb_link)((rcb)->rcb_link.sqe_next);
1583 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_ack_queue, rcb_link)do { if (((&sc->sc_evt_ack_queue)->sqh_first = (&
sc->sc_evt_ack_queue)->sqh_first->rcb_link.sqe_next)
== ((void *)0)) (&sc->sc_evt_ack_queue)->sqh_last =
&(&sc->sc_evt_ack_queue)->sqh_first; } while (
0)
;
1584 }
1585 mtx_leave(&sc->sc_evt_ack_mtx);
1586
1587 if (rcb == NULL((void *)0)) {
1588 scsi_io_put(&sc->sc_iopool, ccb);
1589 return;
1590 }
1591
1592 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1593
1594 ccb->ccb_done = mpii_eventack_done;
1595 eaq = ccb->ccb_cmd;
1596
1597 eaq->function = MPII_FUNCTION_EVENT_ACK(0x08);
1598
1599 eaq->event = enp->event;
1600 eaq->event_context = enp->event_context;
1601
1602 mpii_push_reply(sc, rcb);
1603
1604 mpii_start(sc, ccb);
1605
1606 if (next != NULL((void *)0))
1607 scsi_ioh_add(&sc->sc_evt_ack_handler);
1608}
1609
1610void
1611mpii_eventack_done(struct mpii_ccb *ccb)
1612{
1613 struct mpii_softc *sc = ccb->ccb_sc;
1614
1615 DNPRINTF(MPII_D_EVT, "%s: event ack done\n", DEVNAME(sc));
1616
1617 mpii_push_reply(sc, ccb->ccb_rcb);
1618 scsi_io_put(&sc->sc_iopool, ccb);
1619}
1620
1621int
1622mpii_portenable(struct mpii_softc *sc)
1623{
1624 struct mpii_msg_portenable_request *peq;
1625 struct mpii_ccb *ccb;
1626
1627 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable\n", DEVNAME(sc));
1628
1629 ccb = scsi_io_get(&sc->sc_iopool, 0);
1630 if (ccb == NULL((void *)0)) {
1631 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable ccb_get\n",
1632 DEVNAME(sc));
1633 return (1);
1634 }
1635
1636 ccb->ccb_done = mpii_empty_done;
1637 peq = ccb->ccb_cmd;
1638
1639 peq->function = MPII_FUNCTION_PORT_ENABLE(0x06);
1640 peq->vf_id = sc->sc_vf_id;
1641
1642 if (mpii_poll(sc, ccb) != 0) {
1643 DNPRINTF(MPII_D_MISC, "%s: mpii_portenable poll\n",
1644 DEVNAME(sc));
1645 return (1);
1646 }
1647
1648 if (ccb->ccb_rcb == NULL((void *)0)) {
1649 DNPRINTF(MPII_D_MISC, "%s: empty portenable reply\n",
1650 DEVNAME(sc));
1651 return (1);
1652 }
1653
1654 mpii_push_reply(sc, ccb->ccb_rcb);
1655 scsi_io_put(&sc->sc_iopool, ccb);
1656
1657 return (0);
1658}
1659
1660int
1661mpii_cfg_coalescing(struct mpii_softc *sc)
1662{
1663 struct mpii_cfg_hdr hdr;
1664 struct mpii_cfg_ioc_pg1 ipg;
1665
1666 hdr.page_version = 0;
1667 hdr.page_length = sizeof(ipg) / 4;
1668 hdr.page_number = 1;
1669 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC(0x01);
1670 memset(&ipg, 0, sizeof(ipg))__builtin_memset((&ipg), (0), (sizeof(ipg)));
1671 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL(1<<1), &hdr, 1, &ipg,
1672 sizeof(ipg)) != 0) {
1673 DNPRINTF(MPII_D_MISC, "%s: unable to fetch IOC page 1\n"
1674 "page 1\n", DEVNAME(sc));
1675 return (1);
1676 }
1677
1678 if (!ISSET(lemtoh32(&ipg.flags), MPII_CFG_IOC_1_REPLY_COALESCING)((((__uint32_t)(*(__uint32_t *)(&ipg.flags)))) & ((1<<
0)))
)
1679 return (0);
1680
1681 /* Disable coalescing */
1682 CLR(ipg.flags, htole32(MPII_CFG_IOC_1_REPLY_COALESCING))((ipg.flags) &= ~(((__uint32_t)((1<<0)))));
1683 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL(1<<1), &hdr, 0, &ipg,
1684 sizeof(ipg)) != 0) {
1685 DNPRINTF(MPII_D_MISC, "%s: unable to clear coalescing\n",
1686 DEVNAME(sc));
1687 return (1);
1688 }
1689
1690 return (0);
1691}
1692
1693#define MPII_EVENT_MASKALL(enq)do { enq->event_masks[0] = 0xffffffff; enq->event_masks
[1] = 0xffffffff; enq->event_masks[2] = 0xffffffff; enq->
event_masks[3] = 0xffffffff; } while (0)
do { \
1694 enq->event_masks[0] = 0xffffffff; \
1695 enq->event_masks[1] = 0xffffffff; \
1696 enq->event_masks[2] = 0xffffffff; \
1697 enq->event_masks[3] = 0xffffffff; \
1698 } while (0)
1699
1700#define MPII_EVENT_UNMASK(enq, evt)do { enq->event_masks[evt / 32] &= ((__uint32_t)(~(1 <<
(evt % 32)))); } while (0)
do { \
1701 enq->event_masks[evt / 32] &= \
1702 htole32(~(1 << (evt % 32)))((__uint32_t)(~(1 << (evt % 32)))); \
1703 } while (0)
1704
1705int
1706mpii_eventnotify(struct mpii_softc *sc)
1707{
1708 struct mpii_msg_event_request *enq;
1709 struct mpii_ccb *ccb;
1710
1711 ccb = scsi_io_get(&sc->sc_iopool, 0);
1712 if (ccb == NULL((void *)0)) {
1713 DNPRINTF(MPII_D_MISC, "%s: mpii_eventnotify ccb_get\n",
1714 DEVNAME(sc));
1715 return (1);
1716 }
1717
1718 SIMPLEQ_INIT(&sc->sc_evt_sas_queue)do { (&sc->sc_evt_sas_queue)->sqh_first = ((void *)
0); (&sc->sc_evt_sas_queue)->sqh_last = &(&
sc->sc_evt_sas_queue)->sqh_first; } while (0)
;
1719 mtx_init(&sc->sc_evt_sas_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_evt_sas_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
1720 task_set(&sc->sc_evt_sas_task, mpii_event_sas, sc);
1721
1722 SIMPLEQ_INIT(&sc->sc_evt_ack_queue)do { (&sc->sc_evt_ack_queue)->sqh_first = ((void *)
0); (&sc->sc_evt_ack_queue)->sqh_last = &(&
sc->sc_evt_ack_queue)->sqh_first; } while (0)
;
1723 mtx_init(&sc->sc_evt_ack_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_evt_ack_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
1724 scsi_ioh_set(&sc->sc_evt_ack_handler, &sc->sc_iopool,
1725 mpii_eventack, sc);
1726
1727 ccb->ccb_done = mpii_eventnotify_done;
1728 enq = ccb->ccb_cmd;
1729
1730 enq->function = MPII_FUNCTION_EVENT_NOTIFICATION(0x07);
1731
1732 /*
1733 * Enable reporting of the following events:
1734 *
1735 * MPII_EVENT_SAS_DISCOVERY
1736 * MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST
1737 * MPII_EVENT_SAS_DEVICE_STATUS_CHANGE
1738 * MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE
1739 * MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST
1740 * MPII_EVENT_IR_VOLUME
1741 * MPII_EVENT_IR_PHYSICAL_DISK
1742 * MPII_EVENT_IR_OPERATION_STATUS
1743 */
1744
1745 MPII_EVENT_MASKALL(enq)do { enq->event_masks[0] = 0xffffffff; enq->event_masks
[1] = 0xffffffff; enq->event_masks[2] = 0xffffffff; enq->
event_masks[3] = 0xffffffff; } while (0)
;
1746 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DISCOVERY)do { enq->event_masks[(0x16) / 32] &= ((__uint32_t)(~(
1 << ((0x16) % 32)))); } while (0)
;
1747 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST)do { enq->event_masks[(0x1c) / 32] &= ((__uint32_t)(~(
1 << ((0x1c) % 32)))); } while (0)
;
1748 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_DEVICE_STATUS_CHANGE)do { enq->event_masks[(0x0f) / 32] &= ((__uint32_t)(~(
1 << ((0x0f) % 32)))); } while (0)
;
1749 MPII_EVENT_UNMASK(enq, MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE)do { enq->event_masks[(0x1d) / 32] &= ((__uint32_t)(~(
1 << ((0x1d) % 32)))); } while (0)
;
1750 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST)do { enq->event_masks[(0x20) / 32] &= ((__uint32_t)(~(
1 << ((0x20) % 32)))); } while (0)
;
1751 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_VOLUME)do { enq->event_masks[(0x1e) / 32] &= ((__uint32_t)(~(
1 << ((0x1e) % 32)))); } while (0)
;
1752 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_PHYSICAL_DISK)do { enq->event_masks[(0x1f) / 32] &= ((__uint32_t)(~(
1 << ((0x1f) % 32)))); } while (0)
;
1753 MPII_EVENT_UNMASK(enq, MPII_EVENT_IR_OPERATION_STATUS)do { enq->event_masks[(0x14) / 32] &= ((__uint32_t)(~(
1 << ((0x14) % 32)))); } while (0)
;
1754
1755 mpii_start(sc, ccb);
1756
1757 return (0);
1758}
1759
1760void
1761mpii_eventnotify_done(struct mpii_ccb *ccb)
1762{
1763 struct mpii_softc *sc = ccb->ccb_sc;
1764 struct mpii_rcb *rcb = ccb->ccb_rcb;
1765
1766 DNPRINTF(MPII_D_EVT, "%s: mpii_eventnotify_done\n", DEVNAME(sc));
1767
1768 scsi_io_put(&sc->sc_iopool, ccb);
1769 mpii_event_process(sc, rcb);
1770}
1771
1772void
1773mpii_event_raid(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1774{
1775 struct mpii_evt_ir_cfg_change_list *ccl;
1776 struct mpii_evt_ir_cfg_element *ce;
1777 struct mpii_device *dev;
1778 u_int16_t type;
1779 int i;
1780
1781 ccl = (struct mpii_evt_ir_cfg_change_list *)(enp + 1);
1782 if (ccl->num_elements == 0)
1783 return;
1784
1785 if (ISSET(lemtoh32(&ccl->flags), MPII_EVT_IR_CFG_CHANGE_LIST_FOREIGN)((((__uint32_t)(*(__uint32_t *)(&ccl->flags)))) & (
(0x1)))
) {
1786 /* bail on foreign configurations */
1787 return;
1788 }
1789
1790 ce = (struct mpii_evt_ir_cfg_element *)(ccl + 1);
1791
1792 for (i = 0; i < ccl->num_elements; i++, ce++) {
1793 type = (lemtoh16(&ce->element_flags)((__uint16_t)(*(__uint16_t *)(&ce->element_flags))) &
1794 MPII_EVT_IR_CFG_ELEMENT_TYPE_MASK(0xf));
1795
1796 switch (type) {
1797 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME(0x0):
1798 switch (ce->reason_code) {
1799 case MPII_EVT_IR_CFG_ELEMENT_RC_ADDED(0x01):
1800 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_CREATED(0x06):
1801 if (mpii_find_dev(sc,
1802 lemtoh16(&ce->vol_dev_handle)((__uint16_t)(*(__uint16_t *)(&ce->vol_dev_handle))))) {
1803 printf("%s: device %#x is already "
1804 "configured\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
1805 lemtoh16(&ce->vol_dev_handle)((__uint16_t)(*(__uint16_t *)(&ce->vol_dev_handle))));
1806 break;
1807 }
1808 dev = malloc(sizeof(*dev), M_DEVBUF2,
1809 M_NOWAIT0x0002 | M_ZERO0x0008);
1810 if (!dev) {
1811 printf("%s: failed to allocate a "
1812 "device structure\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1813 break;
1814 }
1815 SET(dev->flags, MPII_DF_VOLUME)((dev->flags) |= ((0x0010)));
1816 dev->slot = sc->sc_vd_id_low;
1817 dev->dev_handle = lemtoh16(&ce->vol_dev_handle)((__uint16_t)(*(__uint16_t *)(&ce->vol_dev_handle)));
1818 if (mpii_insert_dev(sc, dev)) {
1819 free(dev, M_DEVBUF2, sizeof *dev);
1820 break;
1821 }
1822 sc->sc_vd_count++;
1823 break;
1824 case MPII_EVT_IR_CFG_ELEMENT_RC_REMOVED(0x02):
1825 case MPII_EVT_IR_CFG_ELEMENT_RC_VOLUME_DELETED(0x07):
1826 if (!(dev = mpii_find_dev(sc,
1827 lemtoh16(&ce->vol_dev_handle)((__uint16_t)(*(__uint16_t *)(&ce->vol_dev_handle))))))
1828 break;
1829 mpii_remove_dev(sc, dev);
1830 sc->sc_vd_count--;
1831 break;
1832 }
1833 break;
1834 case MPII_EVT_IR_CFG_ELEMENT_TYPE_VOLUME_DISK(0x1):
1835 if (ce->reason_code ==
1836 MPII_EVT_IR_CFG_ELEMENT_RC_PD_CREATED(0x08) ||
1837 ce->reason_code ==
1838 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE(0x04)) {
1839 /* there should be an underlying sas drive */
1840 if (!(dev = mpii_find_dev(sc,
1841 lemtoh16(&ce->phys_disk_dev_handle)((__uint16_t)(*(__uint16_t *)(&ce->phys_disk_dev_handle
)))
)))
1842 break;
1843 /* promoted from a hot spare? */
1844 CLR(dev->flags, MPII_DF_HOT_SPARE)((dev->flags) &= ~((0x0040)));
1845 SET(dev->flags, MPII_DF_VOLUME_DISK |((dev->flags) |= ((0x0020) | (0x0004)))
1846 MPII_DF_HIDDEN)((dev->flags) |= ((0x0020) | (0x0004)));
1847 }
1848 break;
1849 case MPII_EVT_IR_CFG_ELEMENT_TYPE_HOT_SPARE(0x2):
1850 if (ce->reason_code ==
1851 MPII_EVT_IR_CFG_ELEMENT_RC_HIDE(0x04)) {
1852 /* there should be an underlying sas drive */
1853 if (!(dev = mpii_find_dev(sc,
1854 lemtoh16(&ce->phys_disk_dev_handle)((__uint16_t)(*(__uint16_t *)(&ce->phys_disk_dev_handle
)))
)))
1855 break;
1856 SET(dev->flags, MPII_DF_HOT_SPARE |((dev->flags) |= ((0x0040) | (0x0004)))
1857 MPII_DF_HIDDEN)((dev->flags) |= ((0x0040) | (0x0004)));
1858 }
1859 break;
1860 }
1861 }
1862}
1863
1864void
1865mpii_event_sas(void *xsc)
1866{
1867 struct mpii_softc *sc = xsc;
1868 struct mpii_rcb *rcb, *next;
1869 struct mpii_msg_event_reply *enp;
1870 struct mpii_evt_sas_tcl *tcl;
1871 struct mpii_evt_phy_entry *pe;
1872 struct mpii_device *dev;
1873 int i;
1874 u_int16_t handle;
1875
1876 mtx_enter(&sc->sc_evt_sas_mtx);
1877 rcb = SIMPLEQ_FIRST(&sc->sc_evt_sas_queue)((&sc->sc_evt_sas_queue)->sqh_first);
1878 if (rcb != NULL((void *)0)) {
1879 next = SIMPLEQ_NEXT(rcb, rcb_link)((rcb)->rcb_link.sqe_next);
1880 SIMPLEQ_REMOVE_HEAD(&sc->sc_evt_sas_queue, rcb_link)do { if (((&sc->sc_evt_sas_queue)->sqh_first = (&
sc->sc_evt_sas_queue)->sqh_first->rcb_link.sqe_next)
== ((void *)0)) (&sc->sc_evt_sas_queue)->sqh_last =
&(&sc->sc_evt_sas_queue)->sqh_first; } while (
0)
;
1881 }
1882 mtx_leave(&sc->sc_evt_sas_mtx);
1883
1884 if (rcb == NULL((void *)0))
1885 return;
1886 if (next != NULL((void *)0))
1887 task_add(systq, &sc->sc_evt_sas_task);
1888
1889 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1890 switch (lemtoh16(&enp->event)((__uint16_t)(*(__uint16_t *)(&enp->event)))) {
1891 case MPII_EVENT_SAS_DISCOVERY(0x16):
1892 mpii_event_discovery(sc, enp);
1893 goto done;
1894 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST(0x1c):
1895 /* handle below */
1896 break;
1897 default:
1898 panic("%s: unexpected event %#x in sas event queue",
1899 DEVNAME(sc)((sc)->sc_dev.dv_xname), lemtoh16(&enp->event)((__uint16_t)(*(__uint16_t *)(&enp->event))));
1900 /* NOTREACHED */
1901 }
1902
1903 tcl = (struct mpii_evt_sas_tcl *)(enp + 1);
1904 pe = (struct mpii_evt_phy_entry *)(tcl + 1);
1905
1906 for (i = 0; i < tcl->num_entries; i++, pe++) {
1907 switch (pe->phy_status & MPII_EVENT_SAS_TOPO_PS_RC_MASK(0x0f)) {
1908 case MPII_EVENT_SAS_TOPO_PS_RC_ADDED(0x01):
1909 handle = lemtoh16(&pe->dev_handle)((__uint16_t)(*(__uint16_t *)(&pe->dev_handle)));
1910 if (mpii_find_dev(sc, handle)) {
1911 printf("%s: device %#x is already "
1912 "configured\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), handle);
1913 break;
1914 }
1915
1916 dev = malloc(sizeof(*dev), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
1917 dev->slot = sc->sc_pd_id_start + tcl->start_phy_num + i;
1918 dev->dev_handle = handle;
1919 dev->phy_num = tcl->start_phy_num + i;
1920 if (tcl->enclosure_handle)
1921 dev->physical_port = tcl->physical_port;
1922 dev->enclosure = lemtoh16(&tcl->enclosure_handle)((__uint16_t)(*(__uint16_t *)(&tcl->enclosure_handle))
)
;
1923 dev->expander = lemtoh16(&tcl->expander_handle)((__uint16_t)(*(__uint16_t *)(&tcl->expander_handle)));
1924
1925 if (mpii_insert_dev(sc, dev)) {
1926 free(dev, M_DEVBUF2, sizeof *dev);
1927 break;
1928 }
1929
1930 if (sc->sc_scsibus != NULL((void *)0))
1931 scsi_probe_target(sc->sc_scsibus, dev->slot);
1932 break;
1933
1934 case MPII_EVENT_SAS_TOPO_PS_RC_MISSING(0x02):
1935 dev = mpii_find_dev(sc, lemtoh16(&pe->dev_handle)((__uint16_t)(*(__uint16_t *)(&pe->dev_handle))));
1936 if (dev == NULL((void *)0))
1937 break;
1938
1939 mpii_remove_dev(sc, dev);
1940 mpii_sas_remove_device(sc, dev->dev_handle);
1941 if (sc->sc_scsibus != NULL((void *)0) &&
1942 !ISSET(dev->flags, MPII_DF_HIDDEN)((dev->flags) & ((0x0004)))) {
1943 scsi_activate(sc->sc_scsibus, dev->slot, -1,
1944 DVACT_DEACTIVATE1);
1945 scsi_detach_target(sc->sc_scsibus, dev->slot,
1946 DETACH_FORCE0x01);
1947 }
1948
1949 free(dev, M_DEVBUF2, sizeof *dev);
1950 break;
1951 }
1952 }
1953
1954done:
1955 mpii_event_done(sc, rcb);
1956}
1957
1958void
1959mpii_event_discovery(struct mpii_softc *sc, struct mpii_msg_event_reply *enp)
1960{
1961 struct mpii_evt_sas_discovery *esd =
1962 (struct mpii_evt_sas_discovery *)(enp + 1);
1963
1964 if (sc->sc_pending == 0)
1965 return;
1966
1967 switch (esd->reason_code) {
1968 case MPII_EVENT_SAS_DISC_REASON_CODE_STARTED(0x01):
1969 ++sc->sc_pending;
1970 break;
1971 case MPII_EVENT_SAS_DISC_REASON_CODE_COMPLETED(0x02):
1972 if (--sc->sc_pending == 1) {
1973 sc->sc_pending = 0;
1974 config_pending_decr();
1975 }
1976 break;
1977 }
1978}
1979
1980void
1981mpii_event_process(struct mpii_softc *sc, struct mpii_rcb *rcb)
1982{
1983 struct mpii_msg_event_reply *enp;
1984
1985 enp = (struct mpii_msg_event_reply *)rcb->rcb_reply;
1986
1987 DNPRINTF(MPII_D_EVT, "%s: mpii_event_process: %#x\n", DEVNAME(sc),
1988 lemtoh16(&enp->event));
1989
1990 switch (lemtoh16(&enp->event)((__uint16_t)(*(__uint16_t *)(&enp->event)))) {
1991 case MPII_EVENT_EVENT_CHANGE(0x0a):
1992 /* should be properly ignored */
1993 break;
1994 case MPII_EVENT_SAS_DISCOVERY(0x16):
1995 case MPII_EVENT_SAS_TOPOLOGY_CHANGE_LIST(0x1c):
1996 mtx_enter(&sc->sc_evt_sas_mtx);
1997 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_sas_queue, rcb, rcb_link)do { (rcb)->rcb_link.sqe_next = ((void *)0); *(&sc->
sc_evt_sas_queue)->sqh_last = (rcb); (&sc->sc_evt_sas_queue
)->sqh_last = &(rcb)->rcb_link.sqe_next; } while (0
)
;
1998 mtx_leave(&sc->sc_evt_sas_mtx);
1999 task_add(systq, &sc->sc_evt_sas_task);
2000 return;
2001 case MPII_EVENT_SAS_DEVICE_STATUS_CHANGE(0x0f):
2002 break;
2003 case MPII_EVENT_SAS_ENCL_DEVICE_STATUS_CHANGE(0x1d):
2004 break;
2005 case MPII_EVENT_IR_VOLUME(0x1e): {
2006 struct mpii_evt_ir_volume *evd =
2007 (struct mpii_evt_ir_volume *)(enp + 1);
2008 struct mpii_device *dev;
2009#if NBIO1 > 0
2010 const char *vol_states[] = {
2011 BIOC_SVINVALID_S"Invalid",
2012 BIOC_SVOFFLINE_S"Offline",
2013 BIOC_SVBUILDING_S"Building",
2014 BIOC_SVONLINE_S"Online",
2015 BIOC_SVDEGRADED_S"Degraded",
2016 BIOC_SVONLINE_S"Online",
2017 };
2018#endif
2019
2020 if (cold)
2021 break;
2022 KERNEL_LOCK()_kernel_lock();
2023 dev = mpii_find_dev(sc, lemtoh16(&evd->vol_dev_handle)((__uint16_t)(*(__uint16_t *)(&evd->vol_dev_handle))));
2024 KERNEL_UNLOCK()_kernel_unlock();
2025 if (dev == NULL((void *)0))
2026 break;
2027#if NBIO1 > 0
2028 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATE_CHANGED(0x03))
2029 printf("%s: volume %d state changed from %s to %s\n",
2030 DEVNAME(sc)((sc)->sc_dev.dv_xname), dev->slot - sc->sc_vd_id_low,
2031 vol_states[evd->prev_value],
2032 vol_states[evd->new_value]);
2033#endif
2034 if (evd->reason_code == MPII_EVENT_IR_VOL_RC_STATUS_CHANGED(0x02) &&
2035 ISSET(evd->new_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC)((evd->new_value) & ((1<<16))) &&
2036 !ISSET(evd->prev_value, MPII_CFG_RAID_VOL_0_STATUS_RESYNC)((evd->prev_value) & ((1<<16))))
2037 printf("%s: started resync on a volume %d\n",
2038 DEVNAME(sc)((sc)->sc_dev.dv_xname), dev->slot - sc->sc_vd_id_low);
2039 }
2040 break;
2041 case MPII_EVENT_IR_PHYSICAL_DISK(0x1f):
2042 break;
2043 case MPII_EVENT_IR_CONFIGURATION_CHANGE_LIST(0x20):
2044 mpii_event_raid(sc, enp);
2045 break;
2046 case MPII_EVENT_IR_OPERATION_STATUS(0x14): {
2047 struct mpii_evt_ir_status *evs =
2048 (struct mpii_evt_ir_status *)(enp + 1);
2049 struct mpii_device *dev;
2050
2051 KERNEL_LOCK()_kernel_lock();
2052 dev = mpii_find_dev(sc, lemtoh16(&evs->vol_dev_handle)((__uint16_t)(*(__uint16_t *)(&evs->vol_dev_handle))));
2053 KERNEL_UNLOCK()_kernel_unlock();
2054 if (dev != NULL((void *)0) &&
2055 evs->operation == MPII_EVENT_IR_RAIDOP_RESYNC(0x00))
2056 dev->percent = evs->percent;
2057 break;
2058 }
2059 default:
2060 DNPRINTF(MPII_D_EVT, "%s: unhandled event 0x%02x\n",
2061 DEVNAME(sc), lemtoh16(&enp->event));
2062 }
2063
2064 mpii_event_done(sc, rcb);
2065}
2066
2067void
2068mpii_event_done(struct mpii_softc *sc, struct mpii_rcb *rcb)
2069{
2070 struct mpii_msg_event_reply *enp = rcb->rcb_reply;
2071
2072 if (enp->ack_required) {
2073 mtx_enter(&sc->sc_evt_ack_mtx);
2074 SIMPLEQ_INSERT_TAIL(&sc->sc_evt_ack_queue, rcb, rcb_link)do { (rcb)->rcb_link.sqe_next = ((void *)0); *(&sc->
sc_evt_ack_queue)->sqh_last = (rcb); (&sc->sc_evt_ack_queue
)->sqh_last = &(rcb)->rcb_link.sqe_next; } while (0
)
;
2075 mtx_leave(&sc->sc_evt_ack_mtx);
2076 scsi_ioh_add(&sc->sc_evt_ack_handler);
2077 } else
2078 mpii_push_reply(sc, rcb);
2079}
2080
2081void
2082mpii_sas_remove_device(struct mpii_softc *sc, u_int16_t handle)
2083{
2084 struct mpii_msg_scsi_task_request *stq;
2085 struct mpii_msg_sas_oper_request *soq;
2086 struct mpii_ccb *ccb;
2087
2088 ccb = scsi_io_get(&sc->sc_iopool, 0);
2089 if (ccb == NULL((void *)0))
2090 return;
2091
2092 stq = ccb->ccb_cmd;
2093 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT(0x01);
2094 stq->task_type = MPII_SCSI_TASK_TARGET_RESET(0x03);
2095 htolem16(&stq->dev_handle, handle)(*(__uint16_t *)(&stq->dev_handle) = ((__uint16_t)(handle
)))
;
2096
2097 ccb->ccb_done = mpii_empty_done;
2098 mpii_wait(sc, ccb);
2099
2100 if (ccb->ccb_rcb != NULL((void *)0))
2101 mpii_push_reply(sc, ccb->ccb_rcb);
2102
2103 /* reuse a ccb */
2104 ccb->ccb_state = MPII_CCB_READY;
2105 ccb->ccb_rcb = NULL((void *)0);
2106
2107 soq = ccb->ccb_cmd;
2108 memset(soq, 0, sizeof(*soq))__builtin_memset((soq), (0), (sizeof(*soq)));
2109 soq->function = MPII_FUNCTION_SAS_IO_UNIT_CONTROL(0x1b);
2110 soq->operation = MPII_SAS_OP_REMOVE_DEVICE(0x0d);
2111 htolem16(&soq->dev_handle, handle)(*(__uint16_t *)(&soq->dev_handle) = ((__uint16_t)(handle
)))
;
2112
2113 ccb->ccb_done = mpii_empty_done;
2114 mpii_wait(sc, ccb);
2115 if (ccb->ccb_rcb != NULL((void *)0))
2116 mpii_push_reply(sc, ccb->ccb_rcb);
2117
2118 scsi_io_put(&sc->sc_iopool, ccb);
2119}
2120
2121int
2122mpii_board_info(struct mpii_softc *sc)
2123{
2124 struct mpii_msg_iocfacts_request ifq;
2125 struct mpii_msg_iocfacts_reply ifp;
2126 struct mpii_cfg_manufacturing_pg0 mpg;
2127 struct mpii_cfg_hdr hdr;
2128
2129 memset(&ifq, 0, sizeof(ifq))__builtin_memset((&ifq), (0), (sizeof(ifq)));
2130 memset(&ifp, 0, sizeof(ifp))__builtin_memset((&ifp), (0), (sizeof(ifp)));
2131
2132 ifq.function = MPII_FUNCTION_IOC_FACTS(0x03);
2133
2134 if (mpii_handshake_send(sc, &ifq, dwordsof(ifq)(sizeof(ifq) / sizeof(u_int32_t))) != 0) {
2135 DNPRINTF(MPII_D_MISC, "%s: failed to request ioc facts\n",
2136 DEVNAME(sc));
2137 return (1);
2138 }
2139
2140 if (mpii_handshake_recv(sc, &ifp, dwordsof(ifp)(sizeof(ifp) / sizeof(u_int32_t))) != 0) {
2141 DNPRINTF(MPII_D_MISC, "%s: failed to receive ioc facts\n",
2142 DEVNAME(sc));
2143 return (1);
2144 }
2145
2146 hdr.page_version = 0;
2147 hdr.page_length = sizeof(mpg) / 4;
2148 hdr.page_number = 0;
2149 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_MANUFACTURING(0x09);
2150 memset(&mpg, 0, sizeof(mpg))__builtin_memset((&mpg), (0), (sizeof(mpg)));
2151 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL(1<<1), &hdr, 1, &mpg,
2152 sizeof(mpg)) != 0) {
2153 printf("%s: unable to fetch manufacturing page 0\n",
2154 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2155 return (EINVAL22);
2156 }
2157
2158 printf("%s: %s, firmware %u.%u.%u.%u%s, MPI %u.%u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
2159 mpg.board_name, ifp.fw_version_maj, ifp.fw_version_min,
2160 ifp.fw_version_unit, ifp.fw_version_dev,
2161 ISSET(sc->sc_flags, MPII_F_RAID)((sc->sc_flags) & ((1<<1))) ? " IR" : "",
2162 ifp.msg_version_maj, ifp.msg_version_min);
2163
2164 return (0);
2165}
2166
2167int
2168mpii_target_map(struct mpii_softc *sc)
2169{
2170 struct mpii_cfg_hdr hdr;
2171 struct mpii_cfg_ioc_pg8 ipg;
2172 int flags, pad = 0;
2173
2174 hdr.page_version = 0;
2175 hdr.page_length = sizeof(ipg) / 4;
2176 hdr.page_number = 8;
2177 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_IOC(0x01);
2178 memset(&ipg, 0, sizeof(ipg))__builtin_memset((&ipg), (0), (sizeof(ipg)));
2179 if (mpii_req_cfg_page(sc, 0, MPII_PG_POLL(1<<1), &hdr, 1, &ipg,
2180 sizeof(ipg)) != 0) {
2181 printf("%s: unable to fetch ioc page 8\n",
2182 DEVNAME(sc)((sc)->sc_dev.dv_xname));
2183 return (EINVAL22);
2184 }
2185
2186 if (lemtoh16(&ipg.flags)((__uint16_t)(*(__uint16_t *)(&ipg.flags))) & MPII_IOC_PG8_FLAGS_RESERVED_TARGETID_0(1<<4))
2187 pad = 1;
2188
2189 flags = lemtoh16(&ipg.ir_volume_mapping_flags)((__uint16_t)(*(__uint16_t *)(&ipg.ir_volume_mapping_flags
)))
&
2190 MPII_IOC_PG8_IRFLAGS_VOLUME_MAPPING_MODE_MASK(0x00000003);
2191 if (ISSET(sc->sc_flags, MPII_F_RAID)((sc->sc_flags) & ((1<<1)))) {
2192 if (flags == MPII_IOC_PG8_IRFLAGS_LOW_VOLUME_MAPPING(0<<0)) {
2193 sc->sc_vd_id_low += pad;
2194 pad = sc->sc_max_volumes; /* for sc_pd_id_start */
2195 } else
2196 sc->sc_vd_id_low = sc->sc_max_devices -
2197 sc->sc_max_volumes;
2198 }
2199
2200 sc->sc_pd_id_start += pad;
2201
2202 return (0);
2203}
2204
2205int
2206mpii_req_cfg_header(struct mpii_softc *sc, u_int8_t type, u_int8_t number,
2207 u_int32_t address, int flags, void *p)
2208{
2209 struct mpii_msg_config_request *cq;
2210 struct mpii_msg_config_reply *cp;
2211 struct mpii_ccb *ccb;
2212 struct mpii_cfg_hdr *hdr = p;
2213 struct mpii_ecfg_hdr *ehdr = p;
2214 int etype = 0;
2215 int rv = 0;
2216
2217 DNPRINTF(MPII_D_MISC, "%s: mpii_req_cfg_header type: %#x number: %x "
2218 "address: 0x%08x flags: 0x%b\n", DEVNAME(sc), type, number,
2219 address, flags, MPII_PG_FMT);
2220
2221 ccb = scsi_io_get(&sc->sc_iopool,
2222 ISSET(flags, MPII_PG_POLL)((flags) & ((1<<1))) ? SCSI_NOSLEEP0x00001 : 0);
2223 if (ccb == NULL((void *)0)) {
2224 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header ccb_get\n",
2225 DEVNAME(sc));
2226 return (1);
2227 }
2228
2229 if (ISSET(flags, MPII_PG_EXTENDED)((flags) & ((1<<0)))) {
2230 etype = type;
2231 type = MPII_CONFIG_REQ_PAGE_TYPE_EXTENDED(0x0f);
2232 }
2233
2234 cq = ccb->ccb_cmd;
2235
2236 cq->function = MPII_FUNCTION_CONFIG(0x04);
2237
2238 cq->action = MPII_CONFIG_REQ_ACTION_PAGE_HEADER(0x00);
2239
2240 cq->config_header.page_number = number;
2241 cq->config_header.page_type = type;
2242 cq->ext_page_type = etype;
2243 htolem32(&cq->page_address, address)(*(__uint32_t *)(&cq->page_address) = ((__uint32_t)(address
)))
;
2244 htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24))))
2245 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL)(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24))))
;
2246
2247 ccb->ccb_done = mpii_empty_done;
2248 if (ISSET(flags, MPII_PG_POLL)((flags) & ((1<<1)))) {
2249 if (mpii_poll(sc, ccb) != 0) {
2250 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2251 DEVNAME(sc));
2252 return (1);
2253 }
2254 } else
2255 mpii_wait(sc, ccb);
2256
2257 if (ccb->ccb_rcb == NULL((void *)0)) {
2258 scsi_io_put(&sc->sc_iopool, ccb);
2259 return (1);
2260 }
2261 cp = ccb->ccb_rcb->rcb_reply;
2262
2263 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x sgl_flags: 0x%02x "
2264 "msg_length: %d function: 0x%02x\n", DEVNAME(sc), cp->action,
2265 cp->sgl_flags, cp->msg_length, cp->function);
2266 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2267 "msg_flags: 0x%02x\n", DEVNAME(sc),
2268 lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2269 cp->msg_flags);
2270 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2271 cp->vp_id, cp->vf_id);
2272 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2273 lemtoh16(&cp->ioc_status));
2274 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2275 lemtoh32(&cp->ioc_loginfo));
2276 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2277 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2278 cp->config_header.page_version,
2279 cp->config_header.page_length,
2280 cp->config_header.page_number,
2281 cp->config_header.page_type);
2282
2283 if (lemtoh16(&cp->ioc_status)((__uint16_t)(*(__uint16_t *)(&cp->ioc_status))) != MPII_IOCSTATUS_SUCCESS(0x0000))
2284 rv = 1;
2285 else if (ISSET(flags, MPII_PG_EXTENDED)((flags) & ((1<<0)))) {
2286 memset(ehdr, 0, sizeof(*ehdr))__builtin_memset((ehdr), (0), (sizeof(*ehdr)));
2287 ehdr->page_version = cp->config_header.page_version;
2288 ehdr->page_number = cp->config_header.page_number;
2289 ehdr->page_type = cp->config_header.page_type;
2290 ehdr->ext_page_length = cp->ext_page_length;
2291 ehdr->ext_page_type = cp->ext_page_type;
2292 } else
2293 *hdr = cp->config_header;
2294
2295 mpii_push_reply(sc, ccb->ccb_rcb);
2296 scsi_io_put(&sc->sc_iopool, ccb);
2297
2298 return (rv);
2299}
2300
2301int
2302mpii_req_cfg_page(struct mpii_softc *sc, u_int32_t address, int flags,
2303 void *p, int read, void *page, size_t len)
2304{
2305 struct mpii_msg_config_request *cq;
2306 struct mpii_msg_config_reply *cp;
2307 struct mpii_ccb *ccb;
2308 struct mpii_cfg_hdr *hdr = p;
2309 struct mpii_ecfg_hdr *ehdr = p;
2310 caddr_t kva;
2311 int page_length;
2312 int rv = 0;
2313
2314 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page address: %d read: %d "
2315 "type: %x\n", DEVNAME(sc), address, read, hdr->page_type);
2316
2317 page_length = ISSET(flags, MPII_PG_EXTENDED)((flags) & ((1<<0))) ?
2318 lemtoh16(&ehdr->ext_page_length)((__uint16_t)(*(__uint16_t *)(&ehdr->ext_page_length))
)
: hdr->page_length;
2319
2320 if (len > sc->sc_request_size - sizeof(*cq) || len < page_length * 4)
2321 return (1);
2322
2323 ccb = scsi_io_get(&sc->sc_iopool,
2324 ISSET(flags, MPII_PG_POLL)((flags) & ((1<<1))) ? SCSI_NOSLEEP0x00001 : 0);
2325 if (ccb == NULL((void *)0)) {
2326 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_page ccb_get\n",
2327 DEVNAME(sc));
2328 return (1);
2329 }
2330
2331 cq = ccb->ccb_cmd;
2332
2333 cq->function = MPII_FUNCTION_CONFIG(0x04);
2334
2335 cq->action = (read ? MPII_CONFIG_REQ_ACTION_PAGE_READ_CURRENT(0x01) :
2336 MPII_CONFIG_REQ_ACTION_PAGE_WRITE_CURRENT(0x02));
2337
2338 if (ISSET(flags, MPII_PG_EXTENDED)((flags) & ((1<<0)))) {
2339 cq->config_header.page_version = ehdr->page_version;
2340 cq->config_header.page_number = ehdr->page_number;
2341 cq->config_header.page_type = ehdr->page_type;
2342 cq->ext_page_len = ehdr->ext_page_length;
2343 cq->ext_page_type = ehdr->ext_page_type;
2344 } else
2345 cq->config_header = *hdr;
2346 cq->config_header.page_type &= MPII_CONFIG_REQ_PAGE_TYPE_MASK(0x0f);
2347 htolem32(&cq->page_address, address)(*(__uint32_t *)(&cq->page_address) = ((__uint32_t)(address
)))
;
2348 htolem32(&cq->page_buffer.sg_hdr, MPII_SGE_FL_TYPE_SIMPLE |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (0x1<<25) | (page_length * 4) | (read ? (0x0<<
26) : (0x1<<26)))))
2349 MPII_SGE_FL_LAST | MPII_SGE_FL_EOB | MPII_SGE_FL_EOL |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (0x1<<25) | (page_length * 4) | (read ? (0x0<<
26) : (0x1<<26)))))
2350 MPII_SGE_FL_SIZE_64 | (page_length * 4) |(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (0x1<<25) | (page_length * 4) | (read ? (0x0<<
26) : (0x1<<26)))))
2351 (read ? MPII_SGE_FL_DIR_IN : MPII_SGE_FL_DIR_OUT))(*(__uint32_t *)(&cq->page_buffer.sg_hdr) = ((__uint32_t
)((0x1<<28) | (0x1<<31) | (0x1<<30) | (0x1<<
24) | (0x1<<25) | (page_length * 4) | (read ? (0x0<<
26) : (0x1<<26)))))
;
2352
2353 /* bounce the page via the request space to avoid more bus_dma games */
2354 mpii_dvatosge(&cq->page_buffer, ccb->ccb_cmd_dva +
2355 sizeof(struct mpii_msg_config_request));
2356
2357 kva = ccb->ccb_cmd;
2358 kva += sizeof(struct mpii_msg_config_request);
2359
2360 if (!read)
2361 memcpy(kva, page, len)__builtin_memcpy((kva), (page), (len));
2362
2363 ccb->ccb_done = mpii_empty_done;
2364 if (ISSET(flags, MPII_PG_POLL)((flags) & ((1<<1)))) {
2365 if (mpii_poll(sc, ccb) != 0) {
2366 DNPRINTF(MPII_D_MISC, "%s: mpii_cfg_header poll\n",
2367 DEVNAME(sc));
2368 return (1);
2369 }
2370 } else
2371 mpii_wait(sc, ccb);
2372
2373 if (ccb->ccb_rcb == NULL((void *)0)) {
2374 scsi_io_put(&sc->sc_iopool, ccb);
2375 return (1);
2376 }
2377 cp = ccb->ccb_rcb->rcb_reply;
2378
2379 DNPRINTF(MPII_D_MISC, "%s: action: 0x%02x msg_length: %d "
2380 "function: 0x%02x\n", DEVNAME(sc), cp->action, cp->msg_length,
2381 cp->function);
2382 DNPRINTF(MPII_D_MISC, "%s: ext_page_length: %d ext_page_type: 0x%02x "
2383 "msg_flags: 0x%02x\n", DEVNAME(sc),
2384 lemtoh16(&cp->ext_page_length), cp->ext_page_type,
2385 cp->msg_flags);
2386 DNPRINTF(MPII_D_MISC, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
2387 cp->vp_id, cp->vf_id);
2388 DNPRINTF(MPII_D_MISC, "%s: ioc_status: 0x%04x\n", DEVNAME(sc),
2389 lemtoh16(&cp->ioc_status));
2390 DNPRINTF(MPII_D_MISC, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
2391 lemtoh32(&cp->ioc_loginfo));
2392 DNPRINTF(MPII_D_MISC, "%s: page_version: 0x%02x page_length: %d "
2393 "page_number: 0x%02x page_type: 0x%02x\n", DEVNAME(sc),
2394 cp->config_header.page_version,
2395 cp->config_header.page_length,
2396 cp->config_header.page_number,
2397 cp->config_header.page_type);
2398
2399 if (lemtoh16(&cp->ioc_status)((__uint16_t)(*(__uint16_t *)(&cp->ioc_status))) != MPII_IOCSTATUS_SUCCESS(0x0000))
2400 rv = 1;
2401 else if (read)
2402 memcpy(page, kva, len)__builtin_memcpy((page), (kva), (len));
2403
2404 mpii_push_reply(sc, ccb->ccb_rcb);
2405 scsi_io_put(&sc->sc_iopool, ccb);
2406
2407 return (rv);
2408}
2409
2410struct mpii_rcb *
2411mpii_reply(struct mpii_softc *sc, struct mpii_reply_descr *rdp)
2412{
2413 struct mpii_rcb *rcb = NULL((void *)0);
2414 u_int32_t rfid;
2415
2416 DNPRINTF(MPII_D_INTR, "%s: mpii_reply\n", DEVNAME(sc));
2417
2418 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK(0x0f)) ==
2419 MPII_REPLY_DESCR_ADDRESS_REPLY(0x01)) {
2420 rfid = (lemtoh32(&rdp->frame_addr)((__uint32_t)(*(__uint32_t *)(&rdp->frame_addr))) -
2421 (u_int32_t)MPII_DMA_DVA(sc->sc_replies)((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].ds_addr
)
) /
2422 sc->sc_reply_size;
2423
2424 bus_dmamap_sync(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (sc->sc_reply_size * rfid), (sc
->sc_reply_size), (0x02))
2425 MPII_DMA_MAP(sc->sc_replies), sc->sc_reply_size * rfid,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (sc->sc_reply_size * rfid), (sc
->sc_reply_size), (0x02))
2426 sc->sc_reply_size, BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (sc->sc_reply_size * rfid), (sc
->sc_reply_size), (0x02))
;
2427
2428 rcb = &sc->sc_rcbs[rfid];
2429 }
2430
2431 memset(rdp, 0xff, sizeof(*rdp))__builtin_memset((rdp), (0xff), (sizeof(*rdp)));
2432
2433 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (8 * sc->sc_reply_post_host_index
), (8), (0x02 | 0x08))
2434 8 * sc->sc_reply_post_host_index, 8,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (8 * sc->sc_reply_post_host_index
), (8), (0x02 | 0x08))
2435 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (8 * sc->sc_reply_post_host_index
), (8), (0x02 | 0x08))
;
2436
2437 return (rcb);
2438}
2439
2440struct mpii_dmamem *
2441mpii_dmamem_alloc(struct mpii_softc *sc, size_t size)
2442{
2443 struct mpii_dmamem *mdm;
2444 int nsegs;
2445
2446 mdm = malloc(sizeof(*mdm), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2447 if (mdm == NULL((void *)0))
2448 return (NULL((void *)0));
2449
2450 mdm->mdm_size = size;
2451
2452 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&mdm->mdm_map
))
2453 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&mdm->mdm_map
))
!= 0)
2454 goto mdmfree;
2455
2456 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &mdm->mdm_seg,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&mdm->mdm_seg), (1), (&
nsegs), (0x0001 | 0x1000))
2457 1, &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&mdm->mdm_seg), (1), (&
nsegs), (0x0001 | 0x1000))
!= 0)
2458 goto destroy;
2459
2460 if (bus_dmamem_map(sc->sc_dmat, &mdm->mdm_seg, nsegs, size,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&mdm
->mdm_seg), (nsegs), (size), (&mdm->mdm_kva), (0x0001
))
2461 &mdm->mdm_kva, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&mdm
->mdm_seg), (nsegs), (size), (&mdm->mdm_kva), (0x0001
))
!= 0)
2462 goto free;
2463
2464 if (bus_dmamap_load(sc->sc_dmat, mdm->mdm_map, mdm->mdm_kva, size,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (mdm->
mdm_map), (mdm->mdm_kva), (size), (((void *)0)), (0x0001))
2465 NULL, BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (mdm->
mdm_map), (mdm->mdm_kva), (size), (((void *)0)), (0x0001))
!= 0)
2466 goto unmap;
2467
2468 return (mdm);
2469
2470unmap:
2471 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (mdm->
mdm_kva), (size))
;
2472free:
2473 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
mdm->mdm_seg), (1))
;
2474destroy:
2475 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mdm
->mdm_map))
;
2476mdmfree:
2477 free(mdm, M_DEVBUF2, sizeof *mdm);
2478
2479 return (NULL((void *)0));
2480}
2481
2482void
2483mpii_dmamem_free(struct mpii_softc *sc, struct mpii_dmamem *mdm)
2484{
2485 DNPRINTF(MPII_D_MEM, "%s: mpii_dmamem_free %p\n", DEVNAME(sc), mdm);
2486
2487 bus_dmamap_unload(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (mdm
->mdm_map))
;
2488 bus_dmamem_unmap(sc->sc_dmat, mdm->mdm_kva, mdm->mdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (mdm->
mdm_kva), (mdm->mdm_size))
;
2489 bus_dmamem_free(sc->sc_dmat, &mdm->mdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
mdm->mdm_seg), (1))
;
2490 bus_dmamap_destroy(sc->sc_dmat, mdm->mdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (mdm
->mdm_map))
;
2491 free(mdm, M_DEVBUF2, sizeof *mdm);
2492}
2493
2494int
2495mpii_insert_dev(struct mpii_softc *sc, struct mpii_device *dev)
2496{
2497 int slot; /* initial hint */
2498
2499 if (dev == NULL((void *)0) || dev->slot < 0)
2500 return (1);
2501 slot = dev->slot;
2502
2503 while (slot < sc->sc_max_devices && sc->sc_devs[slot] != NULL((void *)0))
2504 slot++;
2505
2506 if (slot >= sc->sc_max_devices)
2507 return (1);
2508
2509 dev->slot = slot;
2510 sc->sc_devs[slot] = dev;
2511
2512 return (0);
2513}
2514
2515int
2516mpii_remove_dev(struct mpii_softc *sc, struct mpii_device *dev)
2517{
2518 int i;
2519
2520 if (dev == NULL((void *)0))
2521 return (1);
2522
2523 for (i = 0; i < sc->sc_max_devices; i++) {
2524 if (sc->sc_devs[i] == NULL((void *)0))
2525 continue;
2526
2527 if (sc->sc_devs[i]->dev_handle == dev->dev_handle) {
2528 sc->sc_devs[i] = NULL((void *)0);
2529 return (0);
2530 }
2531 }
2532
2533 return (1);
2534}
2535
2536struct mpii_device *
2537mpii_find_dev(struct mpii_softc *sc, u_int16_t handle)
2538{
2539 int i;
2540
2541 for (i = 0; i < sc->sc_max_devices; i++) {
2542 if (sc->sc_devs[i] == NULL((void *)0))
2543 continue;
2544
2545 if (sc->sc_devs[i]->dev_handle == handle)
2546 return (sc->sc_devs[i]);
2547 }
2548
2549 return (NULL((void *)0));
2550}
2551
2552int
2553mpii_alloc_ccbs(struct mpii_softc *sc)
2554{
2555 struct mpii_ccb *ccb;
2556 u_int8_t *cmd;
2557 int i;
2558
2559 SIMPLEQ_INIT(&sc->sc_ccb_free)do { (&sc->sc_ccb_free)->sqh_first = ((void *)0); (
&sc->sc_ccb_free)->sqh_last = &(&sc->sc_ccb_free
)->sqh_first; } while (0)
;
2560 SIMPLEQ_INIT(&sc->sc_ccb_tmos)do { (&sc->sc_ccb_tmos)->sqh_first = ((void *)0); (
&sc->sc_ccb_tmos)->sqh_last = &(&sc->sc_ccb_tmos
)->sqh_first; } while (0)
;
2561 mtx_init(&sc->sc_ccb_free_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_free_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
2562 mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ?
0x9 : ((0x6)))); } while (0)
;
2563 scsi_ioh_set(&sc->sc_ccb_tmo_handler, &sc->sc_iopool,
2564 mpii_scsi_cmd_tmo_handler, sc);
2565
2566 sc->sc_ccbs = mallocarray((sc->sc_max_cmds-1), sizeof(*ccb),
2567 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2568 if (sc->sc_ccbs == NULL((void *)0)) {
2569 printf("%s: unable to allocate ccbs\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2570 return (1);
2571 }
2572
2573 sc->sc_requests = mpii_dmamem_alloc(sc,
2574 sc->sc_request_size * sc->sc_max_cmds);
2575 if (sc->sc_requests == NULL((void *)0)) {
2576 printf("%s: unable to allocate ccb dmamem\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2577 goto free_ccbs;
2578 }
2579 cmd = MPII_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->mdm_kva);
2580
2581 /*
2582 * we have sc->sc_max_cmds system request message
2583 * frames, but smid zero cannot be used. so we then
2584 * have (sc->sc_max_cmds - 1) number of ccbs
2585 */
2586 for (i = 1; i < sc->sc_max_cmds; i++) {
2587 ccb = &sc->sc_ccbs[i - 1];
2588
2589 if (bus_dmamap_create(sc->sc_dmat, MAXPHYS, sc->sc_max_sgl,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl), ((64 * 1024)), (0), (0x0001 |
0x0002 | 0x2000), (&ccb->ccb_dmamap))
2590 MAXPHYS, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl), ((64 * 1024)), (0), (0x0001 |
0x0002 | 0x2000), (&ccb->ccb_dmamap))
2591 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW | BUS_DMA_64BIT,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl), ((64 * 1024)), (0), (0x0001 |
0x0002 | 0x2000), (&ccb->ccb_dmamap))
2592 &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl), ((64 * 1024)), (0), (0x0001 |
0x0002 | 0x2000), (&ccb->ccb_dmamap))
!= 0) {
2593 printf("%s: unable to create dma map\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
2594 goto free_maps;
2595 }
2596
2597 ccb->ccb_sc = sc;
2598 htolem16(&ccb->ccb_smid, i)(*(__uint16_t *)(&ccb->ccb_smid) = ((__uint16_t)(i)));
2599 ccb->ccb_offset = sc->sc_request_size * i;
2600
2601 ccb->ccb_cmd = &cmd[ccb->ccb_offset];
2602 ccb->ccb_cmd_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_requests)((u_int64_t)(sc->sc_requests)->mdm_map->dm_segs[0].ds_addr
)
+
2603 ccb->ccb_offset;
2604
2605 DNPRINTF(MPII_D_CCB, "%s: mpii_alloc_ccbs(%d) ccb: %p map: %p "
2606 "sc: %p smid: %#x offs: %#lx cmd: %p dva: %#lx\n",
2607 DEVNAME(sc), i, ccb, ccb->ccb_dmamap, ccb->ccb_sc,
2608 ccb->ccb_smid, ccb->ccb_offset, ccb->ccb_cmd,
2609 ccb->ccb_cmd_dva);
2610
2611 mpii_put_ccb(sc, ccb);
2612 }
2613
2614 scsi_iopool_init(&sc->sc_iopool, sc, mpii_get_ccb, mpii_put_ccb);
2615
2616 return (0);
2617
2618free_maps:
2619 while ((ccb = mpii_get_ccb(sc)) != NULL((void *)0))
2620 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
2621
2622 mpii_dmamem_free(sc, sc->sc_requests);
2623free_ccbs:
2624 free(sc->sc_ccbs, M_DEVBUF2, (sc->sc_max_cmds-1) * sizeof(*ccb));
2625
2626 return (1);
2627}
2628
2629void
2630mpii_put_ccb(void *cookie, void *io)
2631{
2632 struct mpii_softc *sc = cookie;
2633 struct mpii_ccb *ccb = io;
2634
2635 DNPRINTF(MPII_D_CCB, "%s: mpii_put_ccb %p\n", DEVNAME(sc), ccb);
2636
2637 ccb->ccb_state = MPII_CCB_FREE;
2638 ccb->ccb_cookie = NULL((void *)0);
2639 ccb->ccb_done = NULL((void *)0);
2640 ccb->ccb_rcb = NULL((void *)0);
2641 memset(ccb->ccb_cmd, 0, sc->sc_request_size)__builtin_memset((ccb->ccb_cmd), (0), (sc->sc_request_size
))
;
2642
2643 KERNEL_UNLOCK()_kernel_unlock();
2644 mtx_enter(&sc->sc_ccb_free_mtx);
2645 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_free, ccb, ccb_link)do { if (((ccb)->ccb_link.sqe_next = (&sc->sc_ccb_free
)->sqh_first) == ((void *)0)) (&sc->sc_ccb_free)->
sqh_last = &(ccb)->ccb_link.sqe_next; (&sc->sc_ccb_free
)->sqh_first = (ccb); } while (0)
;
2646 mtx_leave(&sc->sc_ccb_free_mtx);
2647 KERNEL_LOCK()_kernel_lock();
2648}
2649
2650void *
2651mpii_get_ccb(void *cookie)
2652{
2653 struct mpii_softc *sc = cookie;
2654 struct mpii_ccb *ccb;
2655
2656 KERNEL_UNLOCK()_kernel_unlock();
2657
2658 mtx_enter(&sc->sc_ccb_free_mtx);
2659 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_free)((&sc->sc_ccb_free)->sqh_first);
2660 if (ccb != NULL((void *)0)) {
2661 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_free, ccb_link)do { if (((&sc->sc_ccb_free)->sqh_first = (&sc->
sc_ccb_free)->sqh_first->ccb_link.sqe_next) == ((void *
)0)) (&sc->sc_ccb_free)->sqh_last = &(&sc->
sc_ccb_free)->sqh_first; } while (0)
;
2662 ccb->ccb_state = MPII_CCB_READY;
2663 }
2664 mtx_leave(&sc->sc_ccb_free_mtx);
2665
2666 KERNEL_LOCK()_kernel_lock();
2667
2668 DNPRINTF(MPII_D_CCB, "%s: mpii_get_ccb %p\n", DEVNAME(sc), ccb);
2669
2670 return (ccb);
2671}
2672
2673int
2674mpii_alloc_replies(struct mpii_softc *sc)
2675{
2676 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_replies\n", DEVNAME(sc));
2677
2678 sc->sc_rcbs = mallocarray(sc->sc_num_reply_frames,
2679 sizeof(struct mpii_rcb), M_DEVBUF2, M_NOWAIT0x0002);
2680 if (sc->sc_rcbs == NULL((void *)0))
2681 return (1);
2682
2683 sc->sc_replies = mpii_dmamem_alloc(sc, sc->sc_reply_size *
2684 sc->sc_num_reply_frames);
2685 if (sc->sc_replies == NULL((void *)0)) {
2686 free(sc->sc_rcbs, M_DEVBUF2,
2687 sc->sc_num_reply_frames * sizeof(struct mpii_rcb));
2688 return (1);
2689 }
2690
2691 return (0);
2692}
2693
2694void
2695mpii_push_replies(struct mpii_softc *sc)
2696{
2697 struct mpii_rcb *rcb;
2698 caddr_t kva = MPII_DMA_KVA(sc->sc_replies)((void *)(sc->sc_replies)->mdm_kva);
2699 int i;
2700
2701 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_replies),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_reply_size * sc->
sc_num_reply_frames), (0x01))
2702 0, sc->sc_reply_size * sc->sc_num_reply_frames,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_reply_size * sc->
sc_num_reply_frames), (0x01))
2703 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_replies)->mdm_map)), (0), (sc->sc_reply_size * sc->
sc_num_reply_frames), (0x01))
;
2704
2705 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2706 rcb = &sc->sc_rcbs[i];
2707
2708 rcb->rcb_reply = kva + sc->sc_reply_size * i;
2709 rcb->rcb_reply_dva = (u_int32_t)MPII_DMA_DVA(sc->sc_replies)((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].ds_addr
)
+
2710 sc->sc_reply_size * i;
2711 mpii_push_reply(sc, rcb);
2712 }
2713}
2714
2715void
2716mpii_start(struct mpii_softc *sc, struct mpii_ccb *ccb)
2717{
2718 struct mpii_request_header *rhp;
2719 struct mpii_request_descr descr;
2720 u_long *rdp = (u_long *)&descr;
2721
2722 DNPRINTF(MPII_D_RW, "%s: mpii_start %#lx\n", DEVNAME(sc),
2723 ccb->ccb_cmd_dva);
2724
2725 bus_dmamap_sync(sc->sc_dmat, MPII_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (sc->sc_request_size
), (0x01 | 0x04))
2726 ccb->ccb_offset, sc->sc_request_size,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (sc->sc_request_size
), (0x01 | 0x04))
2727 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_offset), (sc->sc_request_size
), (0x01 | 0x04))
;
2728
2729 ccb->ccb_state = MPII_CCB_QUEUED;
2730
2731 rhp = ccb->ccb_cmd;
2732
2733 memset(&descr, 0, sizeof(descr))__builtin_memset((&descr), (0), (sizeof(descr)));
2734
2735 switch (rhp->function) {
2736 case MPII_FUNCTION_SCSI_IO_REQUEST(0x00):
2737 descr.request_flags = MPII_REQ_DESCR_SCSI_IO(0x00);
2738 descr.dev_handle = htole16(ccb->ccb_dev_handle)((__uint16_t)(ccb->ccb_dev_handle));
2739 break;
2740 case MPII_FUNCTION_SCSI_TASK_MGMT(0x01):
2741 descr.request_flags = MPII_REQ_DESCR_HIGH_PRIORITY(0x06);
2742 break;
2743 default:
2744 descr.request_flags = MPII_REQ_DESCR_DEFAULT(0x08);
2745 }
2746
2747 descr.vf_id = sc->sc_vf_id;
2748 descr.smid = ccb->ccb_smid;
2749
2750 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_LOW (0x%08x) write "
2751 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_LOW, *rdp);
2752
2753 DNPRINTF(MPII_D_RW, "%s: MPII_REQ_DESCR_POST_HIGH (0x%08x) write "
2754 "0x%08lx\n", DEVNAME(sc), MPII_REQ_DESCR_POST_HIGH, *(rdp+1));
2755
2756#if defined(__LP64__1)
2757 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh,((sc->sc_iot)->write_8((sc->sc_ioh), ((0xc0)), (*rdp
)))
2758 MPII_REQ_DESCR_POST_LOW, *rdp)((sc->sc_iot)->write_8((sc->sc_ioh), ((0xc0)), (*rdp
)))
;
2759#else
2760 mtx_enter(&sc->sc_req_mtx);
2761 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,((sc->sc_iot)->write_4((sc->sc_ioh), ((0xc0)), (rdp[
0])))
2762 MPII_REQ_DESCR_POST_LOW, rdp[0])((sc->sc_iot)->write_4((sc->sc_ioh), ((0xc0)), (rdp[
0])))
;
2763 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2764 MPII_REQ_DESCR_POST_LOW(0xc0), 8, BUS_SPACE_BARRIER_WRITE0x02);
2765
2766 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh,((sc->sc_iot)->write_4((sc->sc_ioh), ((0xc4)), (rdp[
1])))
2767 MPII_REQ_DESCR_POST_HIGH, rdp[1])((sc->sc_iot)->write_4((sc->sc_ioh), ((0xc4)), (rdp[
1])))
;
2768 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
2769 MPII_REQ_DESCR_POST_LOW(0xc0), 8, BUS_SPACE_BARRIER_WRITE0x02);
2770 mtx_leave(&sc->sc_req_mtx);
2771#endif
2772}
2773
2774int
2775mpii_poll(struct mpii_softc *sc, struct mpii_ccb *ccb)
2776{
2777 void (*done)(struct mpii_ccb *);
2778 void *cookie;
2779 int rv = 1;
2780
2781 DNPRINTF(MPII_D_INTR, "%s: mpii_poll\n", DEVNAME(sc));
2782
2783 done = ccb->ccb_done;
2784 cookie = ccb->ccb_cookie;
2785
2786 ccb->ccb_done = mpii_poll_done;
2787 ccb->ccb_cookie = &rv;
2788
2789 mpii_start(sc, ccb);
2790
2791 while (rv == 1) {
2792 /* avoid excessive polling */
2793 if (mpii_reply_waiting(sc)((mpii_read(((sc)), (0x30)) & (1<<3)) == (1<<
3))
)
2794 mpii_intr(sc);
2795 else
2796 delay(10)(*delay_func)(10);
2797 }
2798
2799 ccb->ccb_cookie = cookie;
2800 done(ccb);
2801
2802 return (0);
2803}
2804
2805void
2806mpii_poll_done(struct mpii_ccb *ccb)
2807{
2808 int *rv = ccb->ccb_cookie;
2809
2810 *rv = 0;
2811}
2812
2813int
2814mpii_alloc_queues(struct mpii_softc *sc)
2815{
2816 u_int32_t *rfp;
2817 int i;
2818
2819 DNPRINTF(MPII_D_MISC, "%s: mpii_alloc_queues\n", DEVNAME(sc));
2820
2821 sc->sc_reply_freeq = mpii_dmamem_alloc(sc,
2822 sc->sc_reply_free_qdepth * sizeof(*rfp));
2823 if (sc->sc_reply_freeq == NULL((void *)0))
2824 return (1);
2825 rfp = MPII_DMA_KVA(sc->sc_reply_freeq)((void *)(sc->sc_reply_freeq)->mdm_kva);
2826 for (i = 0; i < sc->sc_num_reply_frames; i++) {
2827 rfp[i] = (u_int32_t)MPII_DMA_DVA(sc->sc_replies)((u_int64_t)(sc->sc_replies)->mdm_map->dm_segs[0].ds_addr
)
+
2828 sc->sc_reply_size * i;
2829 }
2830
2831 sc->sc_reply_postq = mpii_dmamem_alloc(sc,
2832 sc->sc_reply_post_qdepth * sizeof(struct mpii_reply_descr));
2833 if (sc->sc_reply_postq == NULL((void *)0))
2834 goto free_reply_freeq;
2835 sc->sc_reply_postq_kva = MPII_DMA_KVA(sc->sc_reply_postq)((void *)(sc->sc_reply_postq)->mdm_kva);
2836 memset(sc->sc_reply_postq_kva, 0xff, sc->sc_reply_post_qdepth *__builtin_memset((sc->sc_reply_postq_kva), (0xff), (sc->
sc_reply_post_qdepth * sizeof(struct mpii_reply_descr)))
2837 sizeof(struct mpii_reply_descr))__builtin_memset((sc->sc_reply_postq_kva), (0xff), (sc->
sc_reply_post_qdepth * sizeof(struct mpii_reply_descr)))
;
2838
2839 return (0);
2840
2841free_reply_freeq:
2842 mpii_dmamem_free(sc, sc->sc_reply_freeq);
2843 return (1);
2844}
2845
2846void
2847mpii_init_queues(struct mpii_softc *sc)
2848{
2849 DNPRINTF(MPII_D_MISC, "%s: mpii_init_queues\n", DEVNAME(sc));
2850
2851 sc->sc_reply_free_host_index = sc->sc_reply_free_qdepth - 1;
2852 sc->sc_reply_post_host_index = 0;
2853 mpii_write_reply_free(sc, sc->sc_reply_free_host_index)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x48)), (
(sc->sc_reply_free_host_index))))
;
2854 mpii_write_reply_post(sc, sc->sc_reply_post_host_index)(((sc)->sc_iot)->write_4(((sc)->sc_ioh), ((0x6c)), (
(sc->sc_reply_post_host_index))))
;
2855}
2856
2857void
2858mpii_wait(struct mpii_softc *sc, struct mpii_ccb *ccb)
2859{
2860 struct mutex mtx = MUTEX_INITIALIZER(IPL_BIO){ ((void *)0), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6))), 0x0 }
;
2861 void (*done)(struct mpii_ccb *);
2862 void *cookie;
2863
2864 done = ccb->ccb_done;
2865 cookie = ccb->ccb_cookie;
2866
2867 ccb->ccb_done = mpii_wait_done;
2868 ccb->ccb_cookie = &mtx;
2869
2870 /* XXX this will wait forever for the ccb to complete */
2871
2872 mpii_start(sc, ccb);
2873
2874 mtx_enter(&mtx);
2875 while (ccb->ccb_cookie != NULL((void *)0))
2876 msleep_nsec(ccb, &mtx, PRIBIO16, "mpiiwait", INFSLP0xffffffffffffffffULL);
2877 mtx_leave(&mtx);
2878
2879 ccb->ccb_cookie = cookie;
2880 done(ccb);
2881}
2882
2883void
2884mpii_wait_done(struct mpii_ccb *ccb)
2885{
2886 struct mutex *mtx = ccb->ccb_cookie;
2887
2888 mtx_enter(mtx);
2889 ccb->ccb_cookie = NULL((void *)0);
2890 mtx_leave(mtx);
2891
2892 wakeup_one(ccb)wakeup_n((ccb), 1);
2893}
2894
2895void
2896mpii_scsi_cmd(struct scsi_xfer *xs)
2897{
2898 struct scsi_link *link = xs->sc_link;
2899 struct mpii_softc *sc = link->bus->sb_adapter_softc;
2900 struct mpii_ccb *ccb = xs->io;
2901 struct mpii_msg_scsi_io *io;
2902 struct mpii_device *dev;
2903 int ret;
2904
2905 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd\n", DEVNAME(sc));
2906
2907 if (xs->cmdlen > MPII_CDB_LEN(32)) {
1
Assuming field 'cmdlen' is <= MPII_CDB_LEN
2
Taking false branch
2908 DNPRINTF(MPII_D_CMD, "%s: CDB too big %d\n",
2909 DEVNAME(sc), xs->cmdlen);
2910 memset(&xs->sense, 0, sizeof(xs->sense))__builtin_memset((&xs->sense), (0), (sizeof(xs->sense
)))
;
2911 xs->sense.error_code = SSD_ERRCODE_VALID0x80 | 0x70;
2912 xs->sense.flags = SKEY_ILLEGAL_REQUEST0x05;
2913 xs->sense.add_sense_code = 0x20;
2914 xs->error = XS_SENSE1;
2915 scsi_done(xs);
2916 return;
2917 }
2918
2919 if ((dev = sc->sc_devs[link->target]) == NULL((void *)0)) {
3
Assuming the condition is false
4
Taking false branch
2920 /* device no longer exists */
2921 xs->error = XS_SELTIMEOUT3;
2922 scsi_done(xs);
2923 return;
2924 }
2925
2926 KERNEL_UNLOCK()_kernel_unlock();
2927
2928 DNPRINTF(MPII_D_CMD, "%s: ccb_smid: %d xs->flags: 0x%x\n",
2929 DEVNAME(sc), ccb->ccb_smid, xs->flags);
2930
2931 ccb->ccb_cookie = xs;
2932 ccb->ccb_done = mpii_scsi_cmd_done;
2933 ccb->ccb_dev_handle = dev->dev_handle;
2934
2935 io = ccb->ccb_cmd;
2936 memset(io, 0, sizeof(*io))__builtin_memset((io), (0), (sizeof(*io)));
2937 io->function = MPII_FUNCTION_SCSI_IO_REQUEST(0x00);
2938 io->sense_buffer_length = sizeof(xs->sense);
2939 io->sgl_offset0 = sizeof(struct mpii_msg_scsi_io) / 4;
2940 htolem16(&io->io_flags, xs->cmdlen)(*(__uint16_t *)(&io->io_flags) = ((__uint16_t)(xs->
cmdlen)))
;
2941 htolem16(&io->dev_handle, ccb->ccb_dev_handle)(*(__uint16_t *)(&io->dev_handle) = ((__uint16_t)(ccb->
ccb_dev_handle)))
;
2942 htobem16(&io->lun[0], link->lun)(*(__uint16_t *)(&io->lun[0]) = (__uint16_t)(__builtin_constant_p
(link->lun) ? (__uint16_t)(((__uint16_t)(link->lun) &
0xffU) << 8 | ((__uint16_t)(link->lun) & 0xff00U
) >> 8) : __swap16md(link->lun)))
;
5
'?' condition is false
2943
2944 switch (xs->flags & (SCSI_DATA_IN0x00800 | SCSI_DATA_OUT0x01000)) {
6
Control jumps to the 'default' case at line 2951
2945 case SCSI_DATA_IN0x00800:
2946 io->direction = MPII_SCSIIO_DIR_READ(0x2);
2947 break;
2948 case SCSI_DATA_OUT0x01000:
2949 io->direction = MPII_SCSIIO_DIR_WRITE(0x1);
2950 break;
2951 default:
2952 io->direction = MPII_SCSIIO_DIR_NONE(0x0);
2953 break;
7
Execution continues on line 2956
2954 }
2955
2956 io->tagging = MPII_SCSIIO_ATTR_SIMPLE_Q(0x0);
2957
2958 memcpy(io->cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((io->cdb), (&xs->cmd), (xs->cmdlen
))
;
2959
2960 htolem32(&io->data_length, xs->datalen)(*(__uint32_t *)(&io->data_length) = ((__uint32_t)(xs->
datalen)))
;
2961
2962 /* sense data is at the end of a request */
2963 htolem32(&io->sense_buffer_low_address, ccb->ccb_cmd_dva +(*(__uint32_t *)(&io->sense_buffer_low_address) = ((__uint32_t
)(ccb->ccb_cmd_dva + sc->sc_request_size - sizeof(struct
scsi_sense_data))))
2964 sc->sc_request_size - sizeof(struct scsi_sense_data))(*(__uint32_t *)(&io->sense_buffer_low_address) = ((__uint32_t
)(ccb->ccb_cmd_dva + sc->sc_request_size - sizeof(struct
scsi_sense_data))))
;
2965
2966 if (ISSET(sc->sc_flags, MPII_F_SAS3)((sc->sc_flags) & ((1<<2))))
8
Assuming the condition is true
9
Taking true branch
2967 ret = mpii_load_xs_sas3(ccb);
10
Calling 'mpii_load_xs_sas3'
2968 else
2969 ret = mpii_load_xs(ccb);
2970
2971 if (ret != 0) {
2972 xs->error = XS_DRIVER_STUFFUP2;
2973 goto done;
2974 }
2975
2976 timeout_set(&xs->stimeout, mpii_scsi_cmd_tmo, ccb);
2977 if (xs->flags & SCSI_POLL0x00002) {
2978 if (mpii_poll(sc, ccb) != 0) {
2979 xs->error = XS_DRIVER_STUFFUP2;
2980 goto done;
2981 }
2982 } else {
2983 timeout_add_msec(&xs->stimeout, xs->timeout);
2984 mpii_start(sc, ccb);
2985 }
2986
2987 KERNEL_LOCK()_kernel_lock();
2988 return;
2989
2990done:
2991 KERNEL_LOCK()_kernel_lock();
2992 scsi_done(xs);
2993}
2994
2995void
2996mpii_scsi_cmd_tmo(void *xccb)
2997{
2998 struct mpii_ccb *ccb = xccb;
2999 struct mpii_softc *sc = ccb->ccb_sc;
3000
3001 printf("%s: mpii_scsi_cmd_tmo (0x%08x)\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
3002 mpii_read_db(sc)mpii_read((sc), (0x00)));
3003
3004 mtx_enter(&sc->sc_ccb_mtx);
3005 if (ccb->ccb_state == MPII_CCB_QUEUED) {
3006 ccb->ccb_state = MPII_CCB_TIMEOUT;
3007 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_tmos, ccb, ccb_link)do { if (((ccb)->ccb_link.sqe_next = (&sc->sc_ccb_tmos
)->sqh_first) == ((void *)0)) (&sc->sc_ccb_tmos)->
sqh_last = &(ccb)->ccb_link.sqe_next; (&sc->sc_ccb_tmos
)->sqh_first = (ccb); } while (0)
;
3008 }
3009 mtx_leave(&sc->sc_ccb_mtx);
3010
3011 scsi_ioh_add(&sc->sc_ccb_tmo_handler);
3012}
3013
3014void
3015mpii_scsi_cmd_tmo_handler(void *cookie, void *io)
3016{
3017 struct mpii_softc *sc = cookie;
3018 struct mpii_ccb *tccb = io;
3019 struct mpii_ccb *ccb;
3020 struct mpii_msg_scsi_task_request *stq;
3021
3022 mtx_enter(&sc->sc_ccb_mtx);
3023 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_tmos)((&sc->sc_ccb_tmos)->sqh_first);
3024 if (ccb != NULL((void *)0)) {
3025 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link)do { if (((&sc->sc_ccb_tmos)->sqh_first = (&sc->
sc_ccb_tmos)->sqh_first->ccb_link.sqe_next) == ((void *
)0)) (&sc->sc_ccb_tmos)->sqh_last = &(&sc->
sc_ccb_tmos)->sqh_first; } while (0)
;
3026 ccb->ccb_state = MPII_CCB_QUEUED;
3027 }
3028 /* should remove any other ccbs for the same dev handle */
3029 mtx_leave(&sc->sc_ccb_mtx);
3030
3031 if (ccb == NULL((void *)0)) {
3032 scsi_io_put(&sc->sc_iopool, tccb);
3033 return;
3034 }
3035
3036 stq = tccb->ccb_cmd;
3037 stq->function = MPII_FUNCTION_SCSI_TASK_MGMT(0x01);
3038 stq->task_type = MPII_SCSI_TASK_TARGET_RESET(0x03);
3039 htolem16(&stq->dev_handle, ccb->ccb_dev_handle)(*(__uint16_t *)(&stq->dev_handle) = ((__uint16_t)(ccb
->ccb_dev_handle)))
;
3040
3041 tccb->ccb_done = mpii_scsi_cmd_tmo_done;
3042 mpii_start(sc, tccb);
3043}
3044
3045void
3046mpii_scsi_cmd_tmo_done(struct mpii_ccb *tccb)
3047{
3048 mpii_scsi_cmd_tmo_handler(tccb->ccb_sc, tccb);
3049}
3050
3051void
3052mpii_scsi_cmd_done(struct mpii_ccb *ccb)
3053{
3054 struct mpii_ccb *tccb;
3055 struct mpii_msg_scsi_io_error *sie;
3056 struct mpii_softc *sc = ccb->ccb_sc;
3057 struct scsi_xfer *xs = ccb->ccb_cookie;
3058 struct scsi_sense_data *sense;
3059 bus_dmamap_t dmap = ccb->ccb_dmamap;
3060
3061 timeout_del(&xs->stimeout);
3062 mtx_enter(&sc->sc_ccb_mtx);
3063 if (ccb->ccb_state == MPII_CCB_TIMEOUT) {
3064 /* ENOSIMPLEQ_REMOVE :( */
3065 if (ccb == SIMPLEQ_FIRST(&sc->sc_ccb_tmos)((&sc->sc_ccb_tmos)->sqh_first))
3066 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_tmos, ccb_link)do { if (((&sc->sc_ccb_tmos)->sqh_first = (&sc->
sc_ccb_tmos)->sqh_first->ccb_link.sqe_next) == ((void *
)0)) (&sc->sc_ccb_tmos)->sqh_last = &(&sc->
sc_ccb_tmos)->sqh_first; } while (0)
;
3067 else {
3068 SIMPLEQ_FOREACH(tccb, &sc->sc_ccb_tmos, ccb_link)for((tccb) = ((&sc->sc_ccb_tmos)->sqh_first); (tccb
) != ((void *)0); (tccb) = ((tccb)->ccb_link.sqe_next))
{
3069 if (SIMPLEQ_NEXT(tccb, ccb_link)((tccb)->ccb_link.sqe_next) == ccb) {
3070 SIMPLEQ_REMOVE_AFTER(&sc->sc_ccb_tmos,do { if (((tccb)->ccb_link.sqe_next = (tccb)->ccb_link.
sqe_next->ccb_link.sqe_next) == ((void *)0)) (&sc->
sc_ccb_tmos)->sqh_last = &(tccb)->ccb_link.sqe_next
; } while (0)
3071 tccb, ccb_link)do { if (((tccb)->ccb_link.sqe_next = (tccb)->ccb_link.
sqe_next->ccb_link.sqe_next) == ((void *)0)) (&sc->
sc_ccb_tmos)->sqh_last = &(tccb)->ccb_link.sqe_next
; } while (0)
;
3072 break;
3073 }
3074 }
3075 }
3076 }
3077
3078 ccb->ccb_state = MPII_CCB_READY;
3079 mtx_leave(&sc->sc_ccb_mtx);
3080
3081 if (xs->datalen != 0) {
3082 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
3083 (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_POSTREAD :(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
3084 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), ((xs->flags & 0x00800) ?
0x02 : 0x08))
;
3085
3086 bus_dmamap_unload(sc->sc_dmat, dmap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (dmap
))
;
3087 }
3088
3089 xs->error = XS_NOERROR0;
3090 xs->resid = 0;
3091
3092 if (ccb->ccb_rcb == NULL((void *)0)) {
3093 /* no scsi error, we're ok so drop out early */
3094 xs->status = SCSI_OK0x00;
3095 goto done;
3096 }
3097
3098 sie = ccb->ccb_rcb->rcb_reply;
3099
3100 DNPRINTF(MPII_D_CMD, "%s: mpii_scsi_cmd_done xs cmd: 0x%02x len: %d "
3101 "flags 0x%x\n", DEVNAME(sc), xs->cmd.opcode, xs->datalen,
3102 xs->flags);
3103 DNPRINTF(MPII_D_CMD, "%s: dev_handle: %d msg_length: %d "
3104 "function: 0x%02x\n", DEVNAME(sc), lemtoh16(&sie->dev_handle),
3105 sie->msg_length, sie->function);
3106 DNPRINTF(MPII_D_CMD, "%s: vp_id: 0x%02x vf_id: 0x%02x\n", DEVNAME(sc),
3107 sie->vp_id, sie->vf_id);
3108 DNPRINTF(MPII_D_CMD, "%s: scsi_status: 0x%02x scsi_state: 0x%02x "
3109 "ioc_status: 0x%04x\n", DEVNAME(sc), sie->scsi_status,
3110 sie->scsi_state, lemtoh16(&sie->ioc_status));
3111 DNPRINTF(MPII_D_CMD, "%s: ioc_loginfo: 0x%08x\n", DEVNAME(sc),
3112 lemtoh32(&sie->ioc_loginfo));
3113 DNPRINTF(MPII_D_CMD, "%s: transfer_count: %d\n", DEVNAME(sc),
3114 lemtoh32(&sie->transfer_count));
3115 DNPRINTF(MPII_D_CMD, "%s: sense_count: %d\n", DEVNAME(sc),
3116 lemtoh32(&sie->sense_count));
3117 DNPRINTF(MPII_D_CMD, "%s: response_info: 0x%08x\n", DEVNAME(sc),
3118 lemtoh32(&sie->response_info));
3119 DNPRINTF(MPII_D_CMD, "%s: task_tag: 0x%04x\n", DEVNAME(sc),
3120 lemtoh16(&sie->task_tag));
3121 DNPRINTF(MPII_D_CMD, "%s: bidirectional_transfer_count: 0x%08x\n",
3122 DEVNAME(sc), lemtoh32(&sie->bidirectional_transfer_count));
3123
3124 if (sie->scsi_state & MPII_SCSIIO_STATE_NO_SCSI_STATUS(1<<2))
3125 xs->status = SCSI_TERMINATED0x22;
3126 else
3127 xs->status = sie->scsi_status;
3128 xs->resid = 0;
3129
3130 switch (lemtoh16(&sie->ioc_status)((__uint16_t)(*(__uint16_t *)(&sie->ioc_status))) & MPII_IOCSTATUS_MASK(0x7fff)) {
3131 case MPII_IOCSTATUS_SCSI_DATA_UNDERRUN(0x0045):
3132 xs->resid = xs->datalen - lemtoh32(&sie->transfer_count)((__uint32_t)(*(__uint32_t *)(&sie->transfer_count)));
3133 /* FALLTHROUGH */
3134
3135 case MPII_IOCSTATUS_SUCCESS(0x0000):
3136 case MPII_IOCSTATUS_SCSI_RECOVERED_ERROR(0x0040):
3137 switch (xs->status) {
3138 case SCSI_OK0x00:
3139 xs->error = XS_NOERROR0;
3140 break;
3141
3142 case SCSI_CHECK0x02:
3143 xs->error = XS_SENSE1;
3144 break;
3145
3146 case SCSI_BUSY0x08:
3147 case SCSI_QUEUE_FULL0x28:
3148 xs->error = XS_BUSY5;
3149 break;
3150
3151 default:
3152 xs->error = XS_DRIVER_STUFFUP2;
3153 }
3154 break;
3155
3156 case MPII_IOCSTATUS_BUSY(0x0002):
3157 case MPII_IOCSTATUS_INSUFFICIENT_RESOURCES(0x0006):
3158 xs->error = XS_BUSY5;
3159 break;
3160
3161 case MPII_IOCSTATUS_SCSI_IOC_TERMINATED(0x004b):
3162 case MPII_IOCSTATUS_SCSI_TASK_TERMINATED(0x0048):
3163 xs->error = XS_RESET8;
3164 break;
3165
3166 case MPII_IOCSTATUS_SCSI_INVALID_DEVHANDLE(0x0042):
3167 case MPII_IOCSTATUS_SCSI_DEVICE_NOT_THERE(0x0043):
3168 xs->error = XS_SELTIMEOUT3;
3169 break;
3170
3171 default:
3172 xs->error = XS_DRIVER_STUFFUP2;
3173 break;
3174 }
3175
3176 sense = (struct scsi_sense_data *)((caddr_t)ccb->ccb_cmd +
3177 sc->sc_request_size - sizeof(*sense));
3178 if (sie->scsi_state & MPII_SCSIIO_STATE_AUTOSENSE_VALID(1<<0))
3179 memcpy(&xs->sense, sense, sizeof(xs->sense))__builtin_memcpy((&xs->sense), (sense), (sizeof(xs->
sense)))
;
3180
3181 DNPRINTF(MPII_D_CMD, "%s: xs err: %d status: %#x\n", DEVNAME(sc),
3182 xs->error, xs->status);
3183
3184 mpii_push_reply(sc, ccb->ccb_rcb);
3185done:
3186 KERNEL_LOCK()_kernel_lock();
3187 scsi_done(xs);
3188 KERNEL_UNLOCK()_kernel_unlock();
3189}
3190
3191int
3192mpii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
3193{
3194 struct mpii_softc *sc = link->bus->sb_adapter_softc;
3195 struct mpii_device *dev = sc->sc_devs[link->target];
3196
3197 DNPRINTF(MPII_D_IOCTL, "%s: mpii_scsi_ioctl\n", DEVNAME(sc));
3198
3199 switch (cmd) {
3200 case DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((117)))
:
3201 case DIOCSCACHE((unsigned long)0x80000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((118)))
:
3202 if (dev != NULL((void *)0) && ISSET(dev->flags, MPII_DF_VOLUME)((dev->flags) & ((0x0010)))) {
3203 return (mpii_ioctl_cache(link, cmd,
3204 (struct dk_cache *)addr));
3205 }
3206 break;
3207
3208 default:
3209 if (sc->sc_ioctl)
3210 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
3211
3212 break;
3213 }
3214
3215 return (ENOTTY25);
3216}
3217
3218int
3219mpii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
3220{
3221 struct mpii_softc *sc = link->bus->sb_adapter_softc;
3222 struct mpii_device *dev = sc->sc_devs[link->target];
3223 struct mpii_cfg_raid_vol_pg0 *vpg;
3224 struct mpii_msg_raid_action_request *req;
3225 struct mpii_msg_raid_action_reply *rep;
3226 struct mpii_cfg_hdr hdr;
3227 struct mpii_ccb *ccb;
3228 u_int32_t addr = MPII_CFG_RAID_VOL_ADDR_HANDLE(1<<28) | dev->dev_handle;
3229 size_t pagelen;
3230 int rv = 0;
3231 int enabled;
3232
3233 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08), 0,
3234 addr, MPII_PG_POLL(1<<1), &hdr) != 0)
3235 return (EINVAL22);
3236
3237 pagelen = hdr.page_length * 4;
3238 vpg = malloc(pagelen, M_TEMP127, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
3239 if (vpg == NULL((void *)0))
3240 return (ENOMEM12);
3241
3242 if (mpii_req_cfg_page(sc, addr, MPII_PG_POLL(1<<1), &hdr, 1,
3243 vpg, pagelen) != 0) {
3244 rv = EINVAL22;
3245 goto done;
3246 }
3247
3248 enabled = ((lemtoh16(&vpg->volume_settings)((__uint16_t)(*(__uint16_t *)(&vpg->volume_settings))) &
3249 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_MASK(0x3<<0)) ==
3250 MPII_CFG_RAID_VOL_0_SETTINGS_CACHE_ENABLED(0x2<<0)) ? 1 : 0;
3251
3252 if (cmd == DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((117)))
) {
3253 dc->wrcache = enabled;
3254 dc->rdcache = 0;
3255 goto done;
3256 } /* else DIOCSCACHE */
3257
3258 if (dc->rdcache) {
3259 rv = EOPNOTSUPP45;
3260 goto done;
3261 }
3262
3263 if (((dc->wrcache) ? 1 : 0) == enabled)
3264 goto done;
3265
3266 ccb = scsi_io_get(&sc->sc_iopool, SCSI_POLL0x00002);
3267 if (ccb == NULL((void *)0)) {
3268 rv = ENOMEM12;
3269 goto done;
3270 }
3271
3272 ccb->ccb_done = mpii_empty_done;
3273
3274 req = ccb->ccb_cmd;
3275 memset(req, 0, sizeof(*req))__builtin_memset((req), (0), (sizeof(*req)));
3276 req->function = MPII_FUNCTION_RAID_ACTION(0x15);
3277 req->action = MPII_RAID_ACTION_CHANGE_VOL_WRITE_CACHE(0x17);
3278 htolem16(&req->vol_dev_handle, dev->dev_handle)(*(__uint16_t *)(&req->vol_dev_handle) = ((__uint16_t)
(dev->dev_handle)))
;
3279 htolem32(&req->action_data, dc->wrcache ?(*(__uint32_t *)(&req->action_data) = ((__uint32_t)(dc
->wrcache ? (0x02) : (0x01))))
3280 MPII_RAID_VOL_WRITE_CACHE_ENABLE :(*(__uint32_t *)(&req->action_data) = ((__uint32_t)(dc
->wrcache ? (0x02) : (0x01))))
3281 MPII_RAID_VOL_WRITE_CACHE_DISABLE)(*(__uint32_t *)(&req->action_data) = ((__uint32_t)(dc
->wrcache ? (0x02) : (0x01))))
;
3282
3283 if (mpii_poll(sc, ccb) != 0) {
3284 rv = EIO5;
3285 goto done;
3286 }
3287
3288 if (ccb->ccb_rcb != NULL((void *)0)) {
3289 rep = ccb->ccb_rcb->rcb_reply;
3290 if ((rep->ioc_status != MPII_IOCSTATUS_SUCCESS(0x0000)) ||
3291 ((rep->action_data[0] &
3292 MPII_RAID_VOL_WRITE_CACHE_MASK(0x03)) !=
3293 (dc->wrcache ? MPII_RAID_VOL_WRITE_CACHE_ENABLE(0x02) :
3294 MPII_RAID_VOL_WRITE_CACHE_DISABLE(0x01))))
3295 rv = EINVAL22;
3296 mpii_push_reply(sc, ccb->ccb_rcb);
3297 }
3298
3299 scsi_io_put(&sc->sc_iopool, ccb);
3300
3301done:
3302 free(vpg, M_TEMP127, pagelen);
3303 return (rv);
3304}
3305
3306#if NBIO1 > 0
3307int
3308mpii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
3309{
3310 struct mpii_softc *sc = (struct mpii_softc *)dev;
3311 int error = 0;
3312
3313 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl ", DEVNAME(sc));
3314
3315 switch (cmd) {
3316 case BIOCINQ(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_inq) & 0x1fff) << 16) | ((('B')) <<
8) | ((32)))
:
3317 DNPRINTF(MPII_D_IOCTL, "inq\n");
3318 error = mpii_ioctl_inq(sc, (struct bioc_inq *)addr);
3319 break;
3320 case BIOCVOL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_vol) & 0x1fff) << 16) | ((('B')) <<
8) | ((34)))
:
3321 DNPRINTF(MPII_D_IOCTL, "vol\n");
3322 error = mpii_ioctl_vol(sc, (struct bioc_vol *)addr);
3323 break;
3324 case BIOCDISK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_disk) & 0x1fff) << 16) | ((('B')) <<
8) | ((33)))
:
3325 DNPRINTF(MPII_D_IOCTL, "disk\n");
3326 error = mpii_ioctl_disk(sc, (struct bioc_disk *)addr);
3327 break;
3328 default:
3329 DNPRINTF(MPII_D_IOCTL, " invalid ioctl\n");
3330 error = ENOTTY25;
3331 }
3332
3333 return (error);
3334}
3335
3336int
3337mpii_ioctl_inq(struct mpii_softc *sc, struct bioc_inq *bi)
3338{
3339 int i;
3340
3341 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_inq\n", DEVNAME(sc));
3342
3343 strlcpy(bi->bi_dev, DEVNAME(sc)((sc)->sc_dev.dv_xname), sizeof(bi->bi_dev));
3344 for (i = 0; i < sc->sc_max_devices; i++)
3345 if (sc->sc_devs[i] &&
3346 ISSET(sc->sc_devs[i]->flags, MPII_DF_VOLUME)((sc->sc_devs[i]->flags) & ((0x0010))))
3347 bi->bi_novol++;
3348 return (0);
3349}
3350
3351int
3352mpii_ioctl_vol(struct mpii_softc *sc, struct bioc_vol *bv)
3353{
3354 struct mpii_cfg_raid_vol_pg0 *vpg;
3355 struct mpii_cfg_hdr hdr;
3356 struct mpii_device *dev;
3357 struct scsi_link *lnk;
3358 struct device *scdev;
3359 size_t pagelen;
3360 u_int16_t volh;
3361 int rv, hcnt = 0;
3362
3363 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_vol %d\n",
3364 DEVNAME(sc), bv->bv_volid);
3365
3366 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL((void *)0))
3367 return (ENODEV19);
3368 volh = dev->dev_handle;
3369
3370 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08), 0,
3371 MPII_CFG_RAID_VOL_ADDR_HANDLE(1<<28) | volh, 0, &hdr) != 0) {
3372 printf("%s: unable to fetch header for raid volume page 0\n",
3373 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3374 return (EINVAL22);
3375 }
3376
3377 pagelen = hdr.page_length * 4;
3378 vpg = malloc(pagelen, M_TEMP127, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
3379 if (vpg == NULL((void *)0)) {
3380 printf("%s: unable to allocate space for raid "
3381 "volume page 0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3382 return (ENOMEM12);
3383 }
3384
3385 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE(1<<28) | volh, 0,
3386 &hdr, 1, vpg, pagelen) != 0) {
3387 printf("%s: unable to fetch raid volume page 0\n",
3388 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3389 free(vpg, M_TEMP127, pagelen);
3390 return (EINVAL22);
3391 }
3392
3393 switch (vpg->volume_state) {
3394 case MPII_CFG_RAID_VOL_0_STATE_ONLINE(0x03):
3395 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL(0x05):
3396 bv->bv_status = BIOC_SVONLINE0x00;
3397 break;
3398 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED(0x04):
3399 if (ISSET(lemtoh32(&vpg->volume_status),((((__uint32_t)(*(__uint32_t *)(&vpg->volume_status)))
) & ((1<<16)))
3400 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)((((__uint32_t)(*(__uint32_t *)(&vpg->volume_status)))
) & ((1<<16)))
) {
3401 bv->bv_status = BIOC_SVREBUILD0x05;
3402 bv->bv_percent = dev->percent;
3403 } else
3404 bv->bv_status = BIOC_SVDEGRADED0x02;
3405 break;
3406 case MPII_CFG_RAID_VOL_0_STATE_FAILED(0x01):
3407 bv->bv_status = BIOC_SVOFFLINE0x01;
3408 break;
3409 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING(0x02):
3410 bv->bv_status = BIOC_SVBUILDING0x03;
3411 break;
3412 case MPII_CFG_RAID_VOL_0_STATE_MISSING(0x00):
3413 default:
3414 bv->bv_status = BIOC_SVINVALID0xff;
3415 break;
3416 }
3417
3418 switch (vpg->volume_type) {
3419 case MPII_CFG_RAID_VOL_0_TYPE_RAID0(0x00):
3420 bv->bv_level = 0;
3421 break;
3422 case MPII_CFG_RAID_VOL_0_TYPE_RAID1(0x02):
3423 bv->bv_level = 1;
3424 break;
3425 case MPII_CFG_RAID_VOL_0_TYPE_RAID1E(0x01):
3426 case MPII_CFG_RAID_VOL_0_TYPE_RAID10(0x05):
3427 bv->bv_level = 10;
3428 break;
3429 default:
3430 bv->bv_level = -1;
3431 }
3432
3433 if ((rv = mpii_bio_hs(sc, NULL((void *)0), 0, vpg->hot_spare_pool, &hcnt)) != 0) {
3434 free(vpg, M_TEMP127, pagelen);
3435 return (rv);
3436 }
3437
3438 bv->bv_nodisk = vpg->num_phys_disks + hcnt;
3439
3440 bv->bv_size = letoh64(vpg->max_lba)((__uint64_t)(vpg->max_lba)) * lemtoh16(&vpg->block_size)((__uint16_t)(*(__uint16_t *)(&vpg->block_size)));
3441
3442 lnk = scsi_get_link(sc->sc_scsibus, dev->slot, 0);
3443 if (lnk != NULL((void *)0)) {
3444 scdev = lnk->device_softc;
3445 strlcpy(bv->bv_dev, scdev->dv_xname, sizeof(bv->bv_dev));
3446 }
3447
3448 free(vpg, M_TEMP127, pagelen);
3449 return (0);
3450}
3451
3452int
3453mpii_ioctl_disk(struct mpii_softc *sc, struct bioc_disk *bd)
3454{
3455 struct mpii_cfg_raid_vol_pg0 *vpg;
3456 struct mpii_cfg_raid_vol_pg0_physdisk *pd;
3457 struct mpii_cfg_hdr hdr;
3458 struct mpii_device *dev;
3459 size_t pagelen;
3460 u_int16_t volh;
3461 u_int8_t dn;
3462
3463 DNPRINTF(MPII_D_IOCTL, "%s: mpii_ioctl_disk %d/%d\n",
3464 DEVNAME(sc), bd->bd_volid, bd->bd_diskid);
3465
3466 if ((dev = mpii_find_vol(sc, bd->bd_volid)) == NULL((void *)0))
3467 return (ENODEV19);
3468 volh = dev->dev_handle;
3469
3470 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08), 0,
3471 MPII_CFG_RAID_VOL_ADDR_HANDLE(1<<28) | volh, 0, &hdr) != 0) {
3472 printf("%s: unable to fetch header for raid volume page 0\n",
3473 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3474 return (EINVAL22);
3475 }
3476
3477 pagelen = hdr.page_length * 4;
3478 vpg = malloc(pagelen, M_TEMP127, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
3479 if (vpg == NULL((void *)0)) {
3480 printf("%s: unable to allocate space for raid "
3481 "volume page 0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3482 return (ENOMEM12);
3483 }
3484
3485 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE(1<<28) | volh, 0,
3486 &hdr, 1, vpg, pagelen) != 0) {
3487 printf("%s: unable to fetch raid volume page 0\n",
3488 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3489 free(vpg, M_TEMP127, pagelen);
3490 return (EINVAL22);
3491 }
3492
3493 if (bd->bd_diskid >= vpg->num_phys_disks) {
3494 int nvdsk = vpg->num_phys_disks;
3495 int hsmap = vpg->hot_spare_pool;
3496
3497 free(vpg, M_TEMP127, pagelen);
3498 return (mpii_bio_hs(sc, bd, nvdsk, hsmap, NULL((void *)0)));
3499 }
3500
3501 pd = (struct mpii_cfg_raid_vol_pg0_physdisk *)(vpg + 1) +
3502 bd->bd_diskid;
3503 dn = pd->phys_disk_num;
3504
3505 free(vpg, M_TEMP127, pagelen);
3506 return (mpii_bio_disk(sc, bd, dn));
3507}
3508
3509int
3510mpii_bio_hs(struct mpii_softc *sc, struct bioc_disk *bd, int nvdsk,
3511 int hsmap, int *hscnt)
3512{
3513 struct mpii_cfg_raid_config_pg0 *cpg;
3514 struct mpii_raid_config_element *el;
3515 struct mpii_ecfg_hdr ehdr;
3516 size_t pagelen;
3517 int i, nhs = 0;
3518
3519 if (bd)
3520 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs %d\n", DEVNAME(sc),
3521 bd->bd_diskid - nvdsk);
3522 else
3523 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_hs\n", DEVNAME(sc));
3524
3525 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_CONFIG(0x16),
3526 0, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG(2<<28), MPII_PG_EXTENDED(1<<0),
3527 &ehdr) != 0) {
3528 printf("%s: unable to fetch header for raid config page 0\n",
3529 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3530 return (EINVAL22);
3531 }
3532
3533 pagelen = lemtoh16(&ehdr.ext_page_length)((__uint16_t)(*(__uint16_t *)(&ehdr.ext_page_length))) * 4;
3534 cpg = malloc(pagelen, M_TEMP127, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
3535 if (cpg == NULL((void *)0)) {
3536 printf("%s: unable to allocate space for raid config page 0\n",
3537 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3538 return (ENOMEM12);
3539 }
3540
3541 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_CONFIG_ACTIVE_CONFIG(2<<28),
3542 MPII_PG_EXTENDED(1<<0), &ehdr, 1, cpg, pagelen) != 0) {
3543 printf("%s: unable to fetch raid config page 0\n",
3544 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3545 free(cpg, M_TEMP127, pagelen);
3546 return (EINVAL22);
3547 }
3548
3549 el = (struct mpii_raid_config_element *)(cpg + 1);
3550 for (i = 0; i < cpg->num_elements; i++, el++) {
3551 if (ISSET(lemtoh16(&el->element_flags),((((__uint16_t)(*(__uint16_t *)(&el->element_flags))))
& ((0x2)))
3552 MPII_RAID_CONFIG_ELEMENT_FLAG_HSP_PHYS_DISK)((((__uint16_t)(*(__uint16_t *)(&el->element_flags))))
& ((0x2)))
&&
3553 el->hot_spare_pool == hsmap) {
3554 /*
3555 * diskid comparison is based on the idea that all
3556 * disks are counted by the bio(4) in sequence, thus
3557 * subtracting the number of disks in the volume
3558 * from the diskid yields us a "relative" hotspare
3559 * number, which is good enough for us.
3560 */
3561 if (bd != NULL((void *)0) && bd->bd_diskid == nhs + nvdsk) {
3562 u_int8_t dn = el->phys_disk_num;
3563
3564 free(cpg, M_TEMP127, pagelen);
3565 return (mpii_bio_disk(sc, bd, dn));
3566 }
3567 nhs++;
3568 }
3569 }
3570
3571 if (hscnt)
3572 *hscnt = nhs;
3573
3574 free(cpg, M_TEMP127, pagelen);
3575 return (0);
3576}
3577
3578int
3579mpii_bio_disk(struct mpii_softc *sc, struct bioc_disk *bd, u_int8_t dn)
3580{
3581 struct mpii_cfg_raid_physdisk_pg0 *ppg;
3582 struct mpii_cfg_hdr hdr;
3583 struct mpii_device *dev;
3584 int len;
3585
3586 DNPRINTF(MPII_D_IOCTL, "%s: mpii_bio_disk %d\n", DEVNAME(sc),
3587 bd->bd_diskid);
3588
3589 ppg = malloc(sizeof(*ppg), M_TEMP127, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
3590 if (ppg == NULL((void *)0)) {
3591 printf("%s: unable to allocate space for raid physical disk "
3592 "page 0\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
3593 return (ENOMEM12);
3594 }
3595
3596 hdr.page_version = 0;
3597 hdr.page_length = sizeof(*ppg) / 4;
3598 hdr.page_number = 0;
3599 hdr.page_type = MPII_CONFIG_REQ_PAGE_TYPE_RAID_PD(0x0a);
3600
3601 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_PHYS_DISK_ADDR_NUMBER(1<<28) | dn, 0,
3602 &hdr, 1, ppg, sizeof(*ppg)) != 0) {
3603 printf("%s: unable to fetch raid drive page 0\n",
3604 DEVNAME(sc)((sc)->sc_dev.dv_xname));
3605 free(ppg, M_TEMP127, sizeof(*ppg));
3606 return (EINVAL22);
3607 }
3608
3609 bd->bd_target = ppg->phys_disk_num;
3610
3611 if ((dev = mpii_find_dev(sc, lemtoh16(&ppg->dev_handle)((__uint16_t)(*(__uint16_t *)(&ppg->dev_handle))))) == NULL((void *)0)) {
3612 bd->bd_status = BIOC_SDINVALID0xff;
3613 free(ppg, M_TEMP127, sizeof(*ppg));
3614 return (0);
3615 }
3616
3617 switch (ppg->phys_disk_state) {
3618 case MPII_CFG_RAID_PHYDISK_0_STATE_ONLINE(0x03):
3619 case MPII_CFG_RAID_PHYDISK_0_STATE_OPTIMAL(0x07):
3620 bd->bd_status = BIOC_SDONLINE0x00;
3621 break;
3622 case MPII_CFG_RAID_PHYDISK_0_STATE_OFFLINE(0x02):
3623 if (ppg->offline_reason ==
3624 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILED(0x03) ||
3625 ppg->offline_reason ==
3626 MPII_CFG_RAID_PHYDISK_0_OFFLINE_FAILEDREQ(0x06))
3627 bd->bd_status = BIOC_SDFAILED0x02;
3628 else
3629 bd->bd_status = BIOC_SDOFFLINE0x01;
3630 break;
3631 case MPII_CFG_RAID_PHYDISK_0_STATE_DEGRADED(0x05):
3632 bd->bd_status = BIOC_SDFAILED0x02;
3633 break;
3634 case MPII_CFG_RAID_PHYDISK_0_STATE_REBUILDING(0x06):
3635 bd->bd_status = BIOC_SDREBUILD0x03;
3636 break;
3637 case MPII_CFG_RAID_PHYDISK_0_STATE_HOTSPARE(0x04):
3638 bd->bd_status = BIOC_SDHOTSPARE0x04;
3639 break;
3640 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCONFIGURED(0x00):
3641 bd->bd_status = BIOC_SDUNUSED0x05;
3642 break;
3643 case MPII_CFG_RAID_PHYDISK_0_STATE_NOTCOMPATIBLE(0x01):
3644 default:
3645 bd->bd_status = BIOC_SDINVALID0xff;
3646 break;
3647 }
3648
3649 bd->bd_size = letoh64(ppg->dev_max_lba)((__uint64_t)(ppg->dev_max_lba)) * lemtoh16(&ppg->block_size)((__uint16_t)(*(__uint16_t *)(&ppg->block_size)));
3650
3651 scsi_strvis(bd->bd_vendor, ppg->vendor_id, sizeof(ppg->vendor_id));
3652 len = strlen(bd->bd_vendor);
3653 bd->bd_vendor[len] = ' ';
3654 scsi_strvis(&bd->bd_vendor[len + 1], ppg->product_id,
3655 sizeof(ppg->product_id));
3656 scsi_strvis(bd->bd_serial, ppg->serial, sizeof(ppg->serial));
3657
3658 free(ppg, M_TEMP127, sizeof(*ppg));
3659 return (0);
3660}
3661
3662struct mpii_device *
3663mpii_find_vol(struct mpii_softc *sc, int volid)
3664{
3665 struct mpii_device *dev = NULL((void *)0);
3666
3667 if (sc->sc_vd_id_low + volid >= sc->sc_max_devices)
3668 return (NULL((void *)0));
3669 dev = sc->sc_devs[sc->sc_vd_id_low + volid];
3670 if (dev && ISSET(dev->flags, MPII_DF_VOLUME)((dev->flags) & ((0x0010))))
3671 return (dev);
3672 return (NULL((void *)0));
3673}
3674
3675#ifndef SMALL_KERNEL
3676/*
3677 * Non-sleeping lightweight version of the mpii_ioctl_vol
3678 */
3679int
3680mpii_bio_volstate(struct mpii_softc *sc, struct bioc_vol *bv)
3681{
3682 struct mpii_cfg_raid_vol_pg0 *vpg;
3683 struct mpii_cfg_hdr hdr;
3684 struct mpii_device *dev = NULL((void *)0);
3685 size_t pagelen;
3686 u_int16_t volh;
3687
3688 if ((dev = mpii_find_vol(sc, bv->bv_volid)) == NULL((void *)0))
3689 return (ENODEV19);
3690 volh = dev->dev_handle;
3691
3692 if (mpii_req_cfg_header(sc, MPII_CONFIG_REQ_PAGE_TYPE_RAID_VOL(0x08), 0,
3693 MPII_CFG_RAID_VOL_ADDR_HANDLE(1<<28) | volh, MPII_PG_POLL(1<<1), &hdr) != 0) {
3694 DNPRINTF(MPII_D_MISC, "%s: unable to fetch header for raid "
3695 "volume page 0\n", DEVNAME(sc));
3696 return (EINVAL22);
3697 }
3698
3699 pagelen = hdr.page_length * 4;
3700 vpg = malloc(pagelen, M_TEMP127, M_NOWAIT0x0002 | M_ZERO0x0008);
3701 if (vpg == NULL((void *)0)) {
3702 DNPRINTF(MPII_D_MISC, "%s: unable to allocate space for raid "
3703 "volume page 0\n", DEVNAME(sc));
3704 return (ENOMEM12);
3705 }
3706
3707 if (mpii_req_cfg_page(sc, MPII_CFG_RAID_VOL_ADDR_HANDLE(1<<28) | volh,
3708 MPII_PG_POLL(1<<1), &hdr, 1, vpg, pagelen) != 0) {
3709 DNPRINTF(MPII_D_MISC, "%s: unable to fetch raid volume "
3710 "page 0\n", DEVNAME(sc));
3711 free(vpg, M_TEMP127, pagelen);
3712 return (EINVAL22);
3713 }
3714
3715 switch (vpg->volume_state) {
3716 case MPII_CFG_RAID_VOL_0_STATE_ONLINE(0x03):
3717 case MPII_CFG_RAID_VOL_0_STATE_OPTIMAL(0x05):
3718 bv->bv_status = BIOC_SVONLINE0x00;
3719 break;
3720 case MPII_CFG_RAID_VOL_0_STATE_DEGRADED(0x04):
3721 if (ISSET(lemtoh32(&vpg->volume_status),((((__uint32_t)(*(__uint32_t *)(&vpg->volume_status)))
) & ((1<<16)))
3722 MPII_CFG_RAID_VOL_0_STATUS_RESYNC)((((__uint32_t)(*(__uint32_t *)(&vpg->volume_status)))
) & ((1<<16)))
)
3723 bv->bv_status = BIOC_SVREBUILD0x05;
3724 else
3725 bv->bv_status = BIOC_SVDEGRADED0x02;
3726 break;
3727 case MPII_CFG_RAID_VOL_0_STATE_FAILED(0x01):
3728 bv->bv_status = BIOC_SVOFFLINE0x01;
3729 break;
3730 case MPII_CFG_RAID_VOL_0_STATE_INITIALIZING(0x02):
3731 bv->bv_status = BIOC_SVBUILDING0x03;
3732 break;
3733 case MPII_CFG_RAID_VOL_0_STATE_MISSING(0x00):
3734 default:
3735 bv->bv_status = BIOC_SVINVALID0xff;
3736 break;
3737 }
3738
3739 free(vpg, M_TEMP127, pagelen);
3740 return (0);
3741}
3742
3743int
3744mpii_create_sensors(struct mpii_softc *sc)
3745{
3746 struct scsibus_softc *ssc = sc->sc_scsibus;
3747 struct device *dev;
3748 struct scsi_link *link;
3749 int i;
3750
3751 sc->sc_sensors = mallocarray(sc->sc_vd_count, sizeof(struct ksensor),
3752 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
3753 if (sc->sc_sensors == NULL((void *)0))
3754 return (1);
3755 sc->sc_nsensors = sc->sc_vd_count;
3756
3757 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc)((sc)->sc_dev.dv_xname),
3758 sizeof(sc->sc_sensordev.xname));
3759
3760 for (i = 0; i < sc->sc_vd_count; i++) {
3761 link = scsi_get_link(ssc, i + sc->sc_vd_id_low, 0);
3762 if (link == NULL((void *)0))
3763 goto bad;
3764
3765 dev = link->device_softc;
3766
3767 sc->sc_sensors[i].type = SENSOR_DRIVE;
3768 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3769
3770 strlcpy(sc->sc_sensors[i].desc, dev->dv_xname,
3771 sizeof(sc->sc_sensors[i].desc));
3772
3773 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
3774 }
3775
3776 if (sensor_task_register(sc, mpii_refresh_sensors, 10) == NULL((void *)0))
3777 goto bad;
3778
3779 sensordev_install(&sc->sc_sensordev);
3780
3781 return (0);
3782
3783bad:
3784 free(sc->sc_sensors, M_DEVBUF2, 0);
3785
3786 return (1);
3787}
3788
3789void
3790mpii_refresh_sensors(void *arg)
3791{
3792 struct mpii_softc *sc = arg;
3793 struct bioc_vol bv;
3794 int i;
3795
3796 for (i = 0; i < sc->sc_nsensors; i++) {
3797 memset(&bv, 0, sizeof(bv))__builtin_memset((&bv), (0), (sizeof(bv)));
3798 bv.bv_volid = i;
3799 if (mpii_bio_volstate(sc, &bv))
3800 return;
3801 switch(bv.bv_status) {
3802 case BIOC_SVOFFLINE0x01:
3803 sc->sc_sensors[i].value = SENSOR_DRIVE_FAIL9;
3804 sc->sc_sensors[i].status = SENSOR_S_CRIT;
3805 break;
3806 case BIOC_SVDEGRADED0x02:
3807 sc->sc_sensors[i].value = SENSOR_DRIVE_PFAIL10;
3808 sc->sc_sensors[i].status = SENSOR_S_WARN;
3809 break;
3810 case BIOC_SVREBUILD0x05:
3811 sc->sc_sensors[i].value = SENSOR_DRIVE_REBUILD7;
3812 sc->sc_sensors[i].status = SENSOR_S_WARN;
3813 break;
3814 case BIOC_SVONLINE0x00:
3815 sc->sc_sensors[i].value = SENSOR_DRIVE_ONLINE4;
3816 sc->sc_sensors[i].status = SENSOR_S_OK;
3817 break;
3818 case BIOC_SVINVALID0xff:
3819 /* FALLTHROUGH */
3820 default:
3821 sc->sc_sensors[i].value = 0; /* unknown */
3822 sc->sc_sensors[i].status = SENSOR_S_UNKNOWN;
3823 }
3824 }
3825}
3826#endif /* SMALL_KERNEL */
3827#endif /* NBIO > 0 */