Bug Summary

File:dev/pci/mfii.c
Warning:line 1829, column 4
Null pointer passed as 1st argument to memory copy function

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name mfii.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/dev/pci/mfii.c
1/* $OpenBSD: mfii.c,v 1.83 2020/12/15 03:05:31 dlg Exp $ */
2
3/*
4 * Copyright (c) 2012 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include "bio.h"
20
21#include <sys/param.h>
22#include <sys/systm.h>
23#include <sys/malloc.h>
24#include <sys/device.h>
25#include <sys/dkio.h>
26#include <sys/pool.h>
27#include <sys/task.h>
28#include <sys/atomic.h>
29#include <sys/sensors.h>
30#include <sys/rwlock.h>
31#include <sys/syslog.h>
32#include <sys/smr.h>
33
34#include <dev/biovar.h>
35#include <dev/pci/pcidevs.h>
36#include <dev/pci/pcivar.h>
37
38#include <machine/bus.h>
39
40#include <scsi/scsi_all.h>
41#include <scsi/scsi_disk.h>
42#include <scsi/scsiconf.h>
43
44#include <dev/ic/mfireg.h>
45#include <dev/pci/mpiireg.h>
46
47#define MFII_BAR0x14 0x14
48#define MFII_BAR_350x10 0x10
49#define MFII_PCI_MEMSIZE0x2000 0x2000 /* 8k */
50
51#define MFII_OSTS_INTR_VALID0x00000009 0x00000009
52#define MFII_RPI0x6c 0x6c /* reply post host index */
53#define MFII_OSP20xb4 0xb4 /* outbound scratch pad 2 */
54#define MFII_OSP30xb8 0xb8 /* outbound scratch pad 3 */
55
56#define MFII_REQ_TYPE_SCSI(0x00) MPII_REQ_DESCR_SCSI_IO(0x00)
57#define MFII_REQ_TYPE_LDIO(0x7 << 1) (0x7 << 1)
58#define MFII_REQ_TYPE_MFA(0x1 << 1) (0x1 << 1)
59#define MFII_REQ_TYPE_NO_LOCK(0x2 << 1) (0x2 << 1)
60#define MFII_REQ_TYPE_HI_PRI(0x6 << 1) (0x6 << 1)
61
62#define MFII_REQ_MFA(_a)((__uint64_t)((_a) | (0x1 << 1))) htole64((_a) | MFII_REQ_TYPE_MFA)((__uint64_t)((_a) | (0x1 << 1)))
63
64#define MFII_FUNCTION_PASSTHRU_IO(0xf0) (0xf0)
65#define MFII_FUNCTION_LDIO_REQUEST(0xf1) (0xf1)
66
67#define MFII_MAX_CHAIN_UNIT0x00400000 0x00400000
68#define MFII_MAX_CHAIN_MASK0x000003E0 0x000003E0
69#define MFII_MAX_CHAIN_SHIFT5 5
70
71#define MFII_256K_IO128 128
72#define MFII_1MB_IO(128 * 4) (MFII_256K_IO128 * 4)
73
74#define MFII_CHAIN_FRAME_MIN1024 1024
75
76struct mfii_request_descr {
77 u_int8_t flags;
78 u_int8_t msix_index;
79 u_int16_t smid;
80
81 u_int16_t lmid;
82 u_int16_t dev_handle;
83} __packed__attribute__((__packed__));
84
85#define MFII_RAID_CTX_IO_TYPE_SYSPD(0x1 << 4) (0x1 << 4)
86#define MFII_RAID_CTX_TYPE_CUDA(0x2 << 4) (0x2 << 4)
87
88struct mfii_raid_context {
89 u_int8_t type_nseg;
90 u_int8_t _reserved1;
91 u_int16_t timeout_value;
92
93 u_int16_t reg_lock_flags;
94#define MFII_RAID_CTX_RL_FLAGS_SEQNO_EN(0x08) (0x08)
95#define MFII_RAID_CTX_RL_FLAGS_CPU0(0x00) (0x00)
96#define MFII_RAID_CTX_RL_FLAGS_CPU1(0x10) (0x10)
97#define MFII_RAID_CTX_RL_FLAGS_CUDA(0x80) (0x80)
98
99#define MFII_RAID_CTX_ROUTING_FLAGS_SQN(1 << 4) (1 << 4)
100#define MFII_RAID_CTX_ROUTING_FLAGS_CPU00 0
101 u_int16_t virtual_disk_target_id;
102
103 u_int64_t reg_lock_row_lba;
104
105 u_int32_t reg_lock_length;
106
107 u_int16_t next_lm_id;
108 u_int8_t ex_status;
109 u_int8_t status;
110
111 u_int8_t raid_flags;
112 u_int8_t num_sge;
113 u_int16_t config_seq_num;
114
115 u_int8_t span_arm;
116 u_int8_t _reserved3[3];
117} __packed__attribute__((__packed__));
118
119struct mfii_sge {
120 u_int64_t sg_addr;
121 u_int32_t sg_len;
122 u_int16_t _reserved;
123 u_int8_t sg_next_chain_offset;
124 u_int8_t sg_flags;
125} __packed__attribute__((__packed__));
126
127#define MFII_SGE_ADDR_MASK(0x03) (0x03)
128#define MFII_SGE_ADDR_SYSTEM(0x00) (0x00)
129#define MFII_SGE_ADDR_IOCDDR(0x01) (0x01)
130#define MFII_SGE_ADDR_IOCPLB(0x02) (0x02)
131#define MFII_SGE_ADDR_IOCPLBNTA(0x03) (0x03)
132#define MFII_SGE_END_OF_LIST(0x40) (0x40)
133#define MFII_SGE_CHAIN_ELEMENT(0x80) (0x80)
134
135#define MFII_REQUEST_SIZE256 256
136
137#define MR_DCMD_LD_MAP_GET_INFO0x0300e101 0x0300e101
138
139#define MFII_MAX_ROW32 32
140#define MFII_MAX_ARRAY128 128
141
142struct mfii_array_map {
143 uint16_t mam_pd[MFII_MAX_ROW32];
144} __packed__attribute__((__packed__));
145
146struct mfii_dev_handle {
147 uint16_t mdh_cur_handle;
148 uint8_t mdh_valid;
149 uint8_t mdh_reserved;
150 uint16_t mdh_handle[2];
151} __packed__attribute__((__packed__));
152
153struct mfii_ld_map {
154 uint32_t mlm_total_size;
155 uint32_t mlm_reserved1[5];
156 uint32_t mlm_num_lds;
157 uint32_t mlm_reserved2;
158 uint8_t mlm_tgtid_to_ld[2 * MFI_MAX_LD64];
159 uint8_t mlm_pd_timeout;
160 uint8_t mlm_reserved3[7];
161 struct mfii_array_map mlm_am[MFII_MAX_ARRAY128];
162 struct mfii_dev_handle mlm_dev_handle[MFI_MAX_PD256];
163} __packed__attribute__((__packed__));
164
165struct mfii_task_mgmt {
166 union {
167 uint8_t request[128];
168 struct mpii_msg_scsi_task_request
169 mpii_request;
170 } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8)));
171
172 union {
173 uint8_t reply[128];
174 uint32_t flags;
175#define MFII_TASK_MGMT_FLAGS_LD(1 << 0) (1 << 0)
176#define MFII_TASK_MGMT_FLAGS_PD(1 << 1) (1 << 1)
177 struct mpii_msg_scsi_task_reply
178 mpii_reply;
179 } __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8)));
180} __packed__attribute__((__packed__)) __aligned(8)__attribute__((__aligned__(8)));
181
182struct mfii_dmamem {
183 bus_dmamap_t mdm_map;
184 bus_dma_segment_t mdm_seg;
185 size_t mdm_size;
186 caddr_t mdm_kva;
187};
188#define MFII_DMA_MAP(_mdm)((_mdm)->mdm_map) ((_mdm)->mdm_map)
189#define MFII_DMA_LEN(_mdm)((_mdm)->mdm_size) ((_mdm)->mdm_size)
190#define MFII_DMA_DVA(_mdm)((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr) ((u_int64_t)(_mdm)->mdm_map->dm_segs[0].ds_addr)
191#define MFII_DMA_KVA(_mdm)((void *)(_mdm)->mdm_kva) ((void *)(_mdm)->mdm_kva)
192
193struct mfii_softc;
194
195struct mfii_ccb {
196 void *ccb_request;
197 u_int64_t ccb_request_dva;
198 bus_addr_t ccb_request_offset;
199
200 void *ccb_mfi;
201 u_int64_t ccb_mfi_dva;
202 bus_addr_t ccb_mfi_offset;
203
204 struct mfi_sense *ccb_sense;
205 u_int64_t ccb_sense_dva;
206 bus_addr_t ccb_sense_offset;
207
208 struct mfii_sge *ccb_sgl;
209 u_int64_t ccb_sgl_dva;
210 bus_addr_t ccb_sgl_offset;
211 u_int ccb_sgl_len;
212
213 struct mfii_request_descr ccb_req;
214
215 bus_dmamap_t ccb_dmamap;
216
217 /* data for sgl */
218 void *ccb_data;
219 size_t ccb_len;
220
221 int ccb_direction;
222#define MFII_DATA_NONE0 0
223#define MFII_DATA_IN1 1
224#define MFII_DATA_OUT2 2
225
226 void *ccb_cookie;
227 void (*ccb_done)(struct mfii_softc *,
228 struct mfii_ccb *);
229
230 u_int32_t ccb_flags;
231#define MFI_CCB_F_ERR(1<<0) (1<<0)
232 u_int ccb_smid;
233 u_int ccb_refcnt;
234 SIMPLEQ_ENTRY(mfii_ccb)struct { struct mfii_ccb *sqe_next; } ccb_link;
235};
236SIMPLEQ_HEAD(mfii_ccb_list, mfii_ccb)struct mfii_ccb_list { struct mfii_ccb *sqh_first; struct mfii_ccb
**sqh_last; }
;
237
238struct mfii_pd_dev_handles {
239 struct smr_entry pd_smr;
240 uint16_t pd_handles[MFI_MAX_PD256];
241};
242
243struct mfii_pd_softc {
244 struct scsibus_softc *pd_scsibus;
245 struct mfii_pd_dev_handles *pd_dev_handles;
246 uint8_t pd_timeout;
247};
248
249struct mfii_iop {
250 int bar;
251 int num_sge_loc;
252#define MFII_IOP_NUM_SGE_LOC_ORIG0 0
253#define MFII_IOP_NUM_SGE_LOC_351 1
254 u_int16_t ldio_ctx_reg_lock_flags;
255 u_int8_t ldio_req_type;
256 u_int8_t ldio_ctx_type_nseg;
257 u_int8_t sge_flag_chain;
258 u_int8_t sge_flag_eol;
259};
260
261struct mfii_softc {
262 struct device sc_dev;
263 const struct mfii_iop *sc_iop;
264
265 pci_chipset_tag_t sc_pc;
266 pcitag_t sc_tag;
267
268 bus_space_tag_t sc_iot;
269 bus_space_handle_t sc_ioh;
270 bus_size_t sc_ios;
271 bus_dma_tag_t sc_dmat;
272
273 void *sc_ih;
274
275 struct mutex sc_ccb_mtx;
276 struct mutex sc_post_mtx;
277
278 u_int sc_max_fw_cmds;
279 u_int sc_max_cmds;
280 u_int sc_max_sgl;
281
282 u_int sc_reply_postq_depth;
283 u_int sc_reply_postq_index;
284 struct mutex sc_reply_postq_mtx;
285 struct mfii_dmamem *sc_reply_postq;
286
287 struct mfii_dmamem *sc_requests;
288 struct mfii_dmamem *sc_mfi;
289 struct mfii_dmamem *sc_sense;
290 struct mfii_dmamem *sc_sgl;
291
292 struct mfii_ccb *sc_ccb;
293 struct mfii_ccb_list sc_ccb_freeq;
294
295 struct mfii_ccb *sc_aen_ccb;
296 struct task sc_aen_task;
297
298 struct mutex sc_abort_mtx;
299 struct mfii_ccb_list sc_abort_list;
300 struct task sc_abort_task;
301
302 struct scsibus_softc *sc_scsibus;
303 struct mfii_pd_softc *sc_pd;
304 struct scsi_iopool sc_iopool;
305
306 /* save some useful information for logical drives that is missing
307 * in sc_ld_list
308 */
309 struct {
310 char ld_dev[16]; /* device name sd? */
311 } sc_ld[MFI_MAX_LD64];
312 int sc_target_lds[MFI_MAX_LD64];
313
314 /* scsi ioctl from sd device */
315 int (*sc_ioctl)(struct device *, u_long, caddr_t);
316
317 /* bio */
318 struct mfi_conf *sc_cfg;
319 struct mfi_ctrl_info sc_info;
320 struct mfi_ld_list sc_ld_list;
321 struct mfi_ld_details *sc_ld_details; /* array to all logical disks */
322 int sc_no_pd; /* used physical disks */
323 int sc_ld_sz; /* sizeof sc_ld_details */
324
325 /* mgmt lock */
326 struct rwlock sc_lock;
327
328 /* sensors */
329 struct ksensordev sc_sensordev;
330 struct ksensor *sc_bbu;
331 struct ksensor *sc_bbu_status;
332 struct ksensor *sc_sensors;
333};
334
335#ifdef MFII_DEBUG
336#define DPRINTF(x...) do { if (mfii_debug) printf(x); } while(0)
337#define DNPRINTF(n,x...) do { if (mfii_debug & n) printf(x); } while(0)
338#define MFII_D_CMD 0x0001
339#define MFII_D_INTR 0x0002
340#define MFII_D_MISC 0x0004
341#define MFII_D_DMA 0x0008
342#define MFII_D_IOCTL 0x0010
343#define MFII_D_RW 0x0020
344#define MFII_D_MEM 0x0040
345#define MFII_D_CCB 0x0080
346uint32_t mfii_debug = 0
347/* | MFII_D_CMD */
348/* | MFII_D_INTR */
349 | MFII_D_MISC
350/* | MFII_D_DMA */
351/* | MFII_D_IOCTL */
352/* | MFII_D_RW */
353/* | MFII_D_MEM */
354/* | MFII_D_CCB */
355 ;
356#else
357#define DPRINTF(x...)
358#define DNPRINTF(n,x...)
359#endif
360
361int mfii_match(struct device *, void *, void *);
362void mfii_attach(struct device *, struct device *, void *);
363int mfii_detach(struct device *, int);
364int mfii_activate(struct device *, int);
365
366struct cfattach mfii_ca = {
367 sizeof(struct mfii_softc),
368 mfii_match,
369 mfii_attach,
370 mfii_detach,
371 mfii_activate,
372};
373
374struct cfdriver mfii_cd = {
375 NULL((void *)0),
376 "mfii",
377 DV_DULL
378};
379
380void mfii_scsi_cmd(struct scsi_xfer *);
381void mfii_scsi_cmd_done(struct mfii_softc *, struct mfii_ccb *);
382int mfii_scsi_ioctl(struct scsi_link *, u_long, caddr_t, int);
383int mfii_ioctl_cache(struct scsi_link *, u_long, struct dk_cache *);
384
385struct scsi_adapter mfii_switch = {
386 mfii_scsi_cmd, NULL((void *)0), NULL((void *)0), NULL((void *)0), mfii_scsi_ioctl
387};
388
389void mfii_pd_scsi_cmd(struct scsi_xfer *);
390int mfii_pd_scsi_probe(struct scsi_link *);
391
392struct scsi_adapter mfii_pd_switch = {
393 mfii_pd_scsi_cmd, NULL((void *)0), mfii_pd_scsi_probe, NULL((void *)0), NULL((void *)0),
394};
395
396#define DEVNAME(_sc)((_sc)->sc_dev.dv_xname) ((_sc)->sc_dev.dv_xname)
397
398u_int32_t mfii_read(struct mfii_softc *, bus_size_t);
399void mfii_write(struct mfii_softc *, bus_size_t, u_int32_t);
400
401struct mfii_dmamem * mfii_dmamem_alloc(struct mfii_softc *, size_t);
402void mfii_dmamem_free(struct mfii_softc *,
403 struct mfii_dmamem *);
404
405void * mfii_get_ccb(void *);
406void mfii_put_ccb(void *, void *);
407int mfii_init_ccb(struct mfii_softc *);
408void mfii_scrub_ccb(struct mfii_ccb *);
409
410int mfii_transition_firmware(struct mfii_softc *);
411int mfii_initialise_firmware(struct mfii_softc *);
412int mfii_get_info(struct mfii_softc *);
413int mfii_syspd(struct mfii_softc *);
414
415void mfii_start(struct mfii_softc *, struct mfii_ccb *);
416void mfii_done(struct mfii_softc *, struct mfii_ccb *);
417int mfii_poll(struct mfii_softc *, struct mfii_ccb *);
418void mfii_poll_done(struct mfii_softc *, struct mfii_ccb *);
419int mfii_exec(struct mfii_softc *, struct mfii_ccb *);
420void mfii_exec_done(struct mfii_softc *, struct mfii_ccb *);
421int mfii_my_intr(struct mfii_softc *);
422int mfii_intr(void *);
423void mfii_postq(struct mfii_softc *);
424
425int mfii_load_ccb(struct mfii_softc *, struct mfii_ccb *,
426 void *, int);
427int mfii_load_mfa(struct mfii_softc *, struct mfii_ccb *,
428 void *, int);
429
430int mfii_mfa_poll(struct mfii_softc *, struct mfii_ccb *);
431
432int mfii_mgmt(struct mfii_softc *, uint32_t,
433 const union mfi_mbox *, void *, size_t, int);
434int mfii_do_mgmt(struct mfii_softc *, struct mfii_ccb *,
435 uint32_t, const union mfi_mbox *, void *, size_t,
436 int);
437void mfii_empty_done(struct mfii_softc *, struct mfii_ccb *);
438
439int mfii_scsi_cmd_io(struct mfii_softc *,
440 struct scsi_xfer *);
441int mfii_scsi_cmd_cdb(struct mfii_softc *,
442 struct scsi_xfer *);
443int mfii_pd_scsi_cmd_cdb(struct mfii_softc *,
444 struct scsi_xfer *);
445void mfii_scsi_cmd_tmo(void *);
446
447int mfii_dev_handles_update(struct mfii_softc *sc);
448void mfii_dev_handles_smr(void *pd_arg);
449
450void mfii_abort_task(void *);
451void mfii_abort(struct mfii_softc *, struct mfii_ccb *,
452 uint16_t, uint16_t, uint8_t, uint32_t);
453void mfii_scsi_cmd_abort_done(struct mfii_softc *,
454 struct mfii_ccb *);
455
456int mfii_aen_register(struct mfii_softc *);
457void mfii_aen_start(struct mfii_softc *, struct mfii_ccb *,
458 struct mfii_dmamem *, uint32_t);
459void mfii_aen_done(struct mfii_softc *, struct mfii_ccb *);
460void mfii_aen(void *);
461void mfii_aen_unregister(struct mfii_softc *);
462
463void mfii_aen_pd_insert(struct mfii_softc *,
464 const struct mfi_evtarg_pd_address *);
465void mfii_aen_pd_remove(struct mfii_softc *,
466 const struct mfi_evtarg_pd_address *);
467void mfii_aen_pd_state_change(struct mfii_softc *,
468 const struct mfi_evtarg_pd_state *);
469void mfii_aen_ld_update(struct mfii_softc *);
470
471#if NBIO1 > 0
472int mfii_ioctl(struct device *, u_long, caddr_t);
473int mfii_bio_getitall(struct mfii_softc *);
474int mfii_ioctl_inq(struct mfii_softc *, struct bioc_inq *);
475int mfii_ioctl_vol(struct mfii_softc *, struct bioc_vol *);
476int mfii_ioctl_disk(struct mfii_softc *, struct bioc_disk *);
477int mfii_ioctl_alarm(struct mfii_softc *, struct bioc_alarm *);
478int mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *);
479int mfii_ioctl_setstate(struct mfii_softc *,
480 struct bioc_setstate *);
481int mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *);
482int mfii_bio_hs(struct mfii_softc *, int, int, void *);
483
484#ifndef SMALL_KERNEL
485static const char *mfi_bbu_indicators[] = {
486 "pack missing",
487 "voltage low",
488 "temp high",
489 "charge active",
490 "discharge active",
491 "learn cycle req'd",
492 "learn cycle active",
493 "learn cycle failed",
494 "learn cycle timeout",
495 "I2C errors",
496 "replace pack",
497 "low capacity",
498 "periodic learn req'd"
499};
500
501void mfii_init_ld_sensor(struct mfii_softc *, int);
502void mfii_refresh_ld_sensor(struct mfii_softc *, int);
503int mfii_create_sensors(struct mfii_softc *);
504void mfii_refresh_sensors(void *);
505void mfii_bbu(struct mfii_softc *);
506#endif /* SMALL_KERNEL */
507#endif /* NBIO > 0 */
508
509/*
510 * mfii boards support asynchronous (and non-polled) completion of
511 * dcmds by proxying them through a passthru mpii command that points
512 * at a dcmd frame. since the passthru command is submitted like
513 * the scsi commands using an SMID in the request descriptor,
514 * ccb_request memory * must contain the passthru command because
515 * that is what the SMID refers to. this means ccb_request cannot
516 * contain the dcmd. rather than allocating separate dma memory to
517 * hold the dcmd, we reuse the sense memory buffer for it.
518 */
519
520void mfii_dcmd_start(struct mfii_softc *,
521 struct mfii_ccb *);
522
523static inline void
524mfii_dcmd_scrub(struct mfii_ccb *ccb)
525{
526 memset(ccb->ccb_sense, 0, sizeof(*ccb->ccb_sense))__builtin_memset((ccb->ccb_sense), (0), (sizeof(*ccb->ccb_sense
)))
;
527}
528
529static inline struct mfi_dcmd_frame *
530mfii_dcmd_frame(struct mfii_ccb *ccb)
531{
532 CTASSERT(sizeof(struct mfi_dcmd_frame) <= sizeof(*ccb->ccb_sense))extern char _ctassert[(sizeof(struct mfi_dcmd_frame) <= sizeof
(*ccb->ccb_sense)) ? 1 : -1 ] __attribute__((__unused__))
;
533 return ((struct mfi_dcmd_frame *)ccb->ccb_sense);
534}
535
536static inline void
537mfii_dcmd_sync(struct mfii_softc *sc, struct mfii_ccb *ccb, int flags)
538{
539 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sense),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_sense)->mdm_map)), (ccb->ccb_sense_offset), (sizeof(
*ccb->ccb_sense)), (flags))
540 ccb->ccb_sense_offset, sizeof(*ccb->ccb_sense), flags)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_sense)->mdm_map)), (ccb->ccb_sense_offset), (sizeof(
*ccb->ccb_sense)), (flags))
;
541}
542
543#define mfii_fw_state(_sc)mfii_read((_sc), 0xb0) mfii_read((_sc), MFI_OSP0xb0)
544
545const struct mfii_iop mfii_iop_thunderbolt = {
546 MFII_BAR0x14,
547 MFII_IOP_NUM_SGE_LOC_ORIG0,
548 0,
549 MFII_REQ_TYPE_LDIO(0x7 << 1),
550 0,
551 MFII_SGE_CHAIN_ELEMENT(0x80) | MFII_SGE_ADDR_IOCPLBNTA(0x03),
552 0
553};
554
555/*
556 * a lot of these values depend on us not implementing fastpath yet.
557 */
558const struct mfii_iop mfii_iop_25 = {
559 MFII_BAR0x14,
560 MFII_IOP_NUM_SGE_LOC_ORIG0,
561 MFII_RAID_CTX_RL_FLAGS_CPU0(0x00), /* | MFII_RAID_CTX_RL_FLAGS_SEQNO_EN */
562 MFII_REQ_TYPE_NO_LOCK(0x2 << 1),
563 MFII_RAID_CTX_TYPE_CUDA(0x2 << 4) | 0x1,
564 MFII_SGE_CHAIN_ELEMENT(0x80),
565 MFII_SGE_END_OF_LIST(0x40)
566};
567
568const struct mfii_iop mfii_iop_35 = {
569 MFII_BAR_350x10,
570 MFII_IOP_NUM_SGE_LOC_351,
571 MFII_RAID_CTX_ROUTING_FLAGS_CPU00, /* | MFII_RAID_CTX_ROUTING_FLAGS_SQN */
572 MFII_REQ_TYPE_NO_LOCK(0x2 << 1),
573 MFII_RAID_CTX_TYPE_CUDA(0x2 << 4) | 0x1,
574 MFII_SGE_CHAIN_ELEMENT(0x80),
575 MFII_SGE_END_OF_LIST(0x40)
576};
577
578struct mfii_device {
579 pcireg_t mpd_vendor;
580 pcireg_t mpd_product;
581 const struct mfii_iop *mpd_iop;
582};
583
584const struct mfii_device mfii_devices[] = {
585 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_22080x005b,
586 &mfii_iop_thunderbolt },
587 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_30080x005f,
588 &mfii_iop_25 },
589 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_31080x005d,
590 &mfii_iop_25 },
591 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_34040x001c,
592 &mfii_iop_35 },
593 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_35040x001b,
594 &mfii_iop_35 },
595 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_34080x0017,
596 &mfii_iop_35 },
597 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_35080x0016,
598 &mfii_iop_35 },
599 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_34160x0015,
600 &mfii_iop_35 },
601 { PCI_VENDOR_SYMBIOS0x1000, PCI_PRODUCT_SYMBIOS_MEGARAID_35160x0014,
602 &mfii_iop_35 }
603};
604
605const struct mfii_iop *mfii_find_iop(struct pci_attach_args *);
606
607const struct mfii_iop *
608mfii_find_iop(struct pci_attach_args *pa)
609{
610 const struct mfii_device *mpd;
611 int i;
612
613 for (i = 0; i < nitems(mfii_devices)(sizeof((mfii_devices)) / sizeof((mfii_devices)[0])); i++) {
614 mpd = &mfii_devices[i];
615
616 if (mpd->mpd_vendor == PCI_VENDOR(pa->pa_id)(((pa->pa_id) >> 0) & 0xffff) &&
617 mpd->mpd_product == PCI_PRODUCT(pa->pa_id)(((pa->pa_id) >> 16) & 0xffff))
618 return (mpd->mpd_iop);
619 }
620
621 return (NULL((void *)0));
622}
623
624int
625mfii_match(struct device *parent, void *match, void *aux)
626{
627 return ((mfii_find_iop(aux) != NULL((void *)0)) ? 1 : 0);
628}
629
630void
631mfii_attach(struct device *parent, struct device *self, void *aux)
632{
633 struct mfii_softc *sc = (struct mfii_softc *)self;
634 struct pci_attach_args *pa = aux;
635 pcireg_t memtype;
636 pci_intr_handle_t ih;
637 struct scsibus_attach_args saa;
638 u_int32_t status, scpad2, scpad3;
639 int chain_frame_sz, nsge_in_io, nsge_in_chain, i;
640
641 /* init sc */
642 sc->sc_iop = mfii_find_iop(aux);
643 sc->sc_dmat = pa->pa_dmat;
644 SIMPLEQ_INIT(&sc->sc_ccb_freeq)do { (&sc->sc_ccb_freeq)->sqh_first = ((void *)0); (
&sc->sc_ccb_freeq)->sqh_last = &(&sc->sc_ccb_freeq
)->sqh_first; } while (0)
;
645 mtx_init(&sc->sc_ccb_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_ccb_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ?
0x9 : ((0x6)))); } while (0)
;
646 mtx_init(&sc->sc_post_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_post_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9)
? 0x9 : ((0x6)))); } while (0)
;
647 mtx_init(&sc->sc_reply_postq_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_reply_postq_mtx), ((((0x6)) > 0x0 && ((0x6)) <
0x9) ? 0x9 : ((0x6)))); } while (0)
;
648 scsi_iopool_init(&sc->sc_iopool, sc, mfii_get_ccb, mfii_put_ccb);
649
650 rw_init(&sc->sc_lock, "mfii_lock")_rw_init_flags(&sc->sc_lock, "mfii_lock", 0, ((void *)
0))
;
651
652 sc->sc_aen_ccb = NULL((void *)0);
653 task_set(&sc->sc_aen_task, mfii_aen, sc);
654
655 mtx_init(&sc->sc_abort_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sc->
sc_abort_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6)))); } while (0)
;
656 SIMPLEQ_INIT(&sc->sc_abort_list)do { (&sc->sc_abort_list)->sqh_first = ((void *)0);
(&sc->sc_abort_list)->sqh_last = &(&sc->
sc_abort_list)->sqh_first; } while (0)
;
657 task_set(&sc->sc_abort_task, mfii_abort_task, sc);
658
659 /* wire up the bus shizz */
660 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, sc->sc_iop->bar);
661 if (pci_mapreg_map(pa, sc->sc_iop->bar, memtype, 0,
662 &sc->sc_iot, &sc->sc_ioh, NULL((void *)0), &sc->sc_ios, MFII_PCI_MEMSIZE0x2000)) {
663 printf(": unable to map registers\n");
664 return;
665 }
666
667 /* disable interrupts */
668 mfii_write(sc, MFI_OMSK0x34, 0xffffffff);
669
670 if (pci_intr_map_msi(pa, &ih) != 0 && pci_intr_map(pa, &ih) != 0) {
671 printf(": unable to map interrupt\n");
672 goto pci_unmap;
673 }
674 printf(": %s\n", pci_intr_string(pa->pa_pc, ih));
675
676 /* lets get started */
677 if (mfii_transition_firmware(sc))
678 goto pci_unmap;
679
680 /* determine max_cmds (refer to the Linux megaraid_sas driver) */
681 scpad3 = mfii_read(sc, MFII_OSP30xb8);
682 status = mfii_fw_state(sc)mfii_read((sc), 0xb0);
683 sc->sc_max_fw_cmds = scpad3 & MFI_STATE_MAXCMD_MASK0x0000ffff;
684 if (sc->sc_max_fw_cmds == 0)
685 sc->sc_max_fw_cmds = status & MFI_STATE_MAXCMD_MASK0x0000ffff;
686 /*
687 * reduce max_cmds by 1 to ensure that the reply queue depth does not
688 * exceed FW supplied max_fw_cmds.
689 */
690 sc->sc_max_cmds = min(sc->sc_max_fw_cmds, 1024) - 1;
691
692 /* determine max_sgl (refer to the Linux megaraid_sas driver) */
693 scpad2 = mfii_read(sc, MFII_OSP20xb4);
694 chain_frame_sz =
695 ((scpad2 & MFII_MAX_CHAIN_MASK0x000003E0) >> MFII_MAX_CHAIN_SHIFT5) *
696 ((scpad2 & MFII_MAX_CHAIN_UNIT0x00400000) ? MFII_1MB_IO(128 * 4) : MFII_256K_IO128);
697 if (chain_frame_sz < MFII_CHAIN_FRAME_MIN1024)
698 chain_frame_sz = MFII_CHAIN_FRAME_MIN1024;
699
700 nsge_in_io = (MFII_REQUEST_SIZE256 -
701 sizeof(struct mpii_msg_scsi_io) -
702 sizeof(struct mfii_raid_context)) / sizeof(struct mfii_sge);
703 nsge_in_chain = chain_frame_sz / sizeof(struct mfii_sge);
704
705 /* round down to nearest power of two */
706 sc->sc_max_sgl = 1;
707 while ((sc->sc_max_sgl << 1) <= (nsge_in_io + nsge_in_chain))
708 sc->sc_max_sgl <<= 1;
709
710 DNPRINTF(MFII_D_MISC, "%s: OSP 0x%08x, OSP2 0x%08x, OSP3 0x%08x\n",
711 DEVNAME(sc), status, scpad2, scpad3);
712 DNPRINTF(MFII_D_MISC, "%s: max_fw_cmds %d, max_cmds %d\n",
713 DEVNAME(sc), sc->sc_max_fw_cmds, sc->sc_max_cmds);
714 DNPRINTF(MFII_D_MISC, "%s: nsge_in_io %d, nsge_in_chain %d, "
715 "max_sgl %d\n", DEVNAME(sc), nsge_in_io, nsge_in_chain,
716 sc->sc_max_sgl);
717
718 /* sense memory */
719 CTASSERT(sizeof(struct mfi_sense) == MFI_SENSE_SIZE)extern char _ctassert[(sizeof(struct mfi_sense) == 128) ? 1 :
-1 ] __attribute__((__unused__))
;
720 sc->sc_sense = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_SENSE_SIZE128);
721 if (sc->sc_sense == NULL((void *)0)) {
722 printf("%s: unable to allocate sense memory\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
723 goto pci_unmap;
724 }
725
726 /* reply post queue */
727 sc->sc_reply_postq_depth = roundup(sc->sc_max_fw_cmds, 16)((((sc->sc_max_fw_cmds)+((16)-1))/(16))*(16));
728
729 sc->sc_reply_postq = mfii_dmamem_alloc(sc,
730 sc->sc_reply_postq_depth * sizeof(struct mpii_reply_descr));
731 if (sc->sc_reply_postq == NULL((void *)0))
732 goto free_sense;
733
734 memset(MFII_DMA_KVA(sc->sc_reply_postq), 0xff,__builtin_memset((((void *)(sc->sc_reply_postq)->mdm_kva
)), (0xff), (((sc->sc_reply_postq)->mdm_size)))
735 MFII_DMA_LEN(sc->sc_reply_postq))__builtin_memset((((void *)(sc->sc_reply_postq)->mdm_kva
)), (0xff), (((sc->sc_reply_postq)->mdm_size)))
;
736
737 /* MPII request frame array */
738 sc->sc_requests = mfii_dmamem_alloc(sc,
739 MFII_REQUEST_SIZE256 * (sc->sc_max_cmds + 1));
740 if (sc->sc_requests == NULL((void *)0))
741 goto free_reply_postq;
742
743 /* MFI command frame array */
744 sc->sc_mfi = mfii_dmamem_alloc(sc, sc->sc_max_cmds * MFI_FRAME_SIZE64);
745 if (sc->sc_mfi == NULL((void *)0))
746 goto free_requests;
747
748 /* MPII SGL array */
749 sc->sc_sgl = mfii_dmamem_alloc(sc, sc->sc_max_cmds *
750 sizeof(struct mfii_sge) * sc->sc_max_sgl);
751 if (sc->sc_sgl == NULL((void *)0))
752 goto free_mfi;
753
754 if (mfii_init_ccb(sc) != 0) {
755 printf("%s: could not init ccb list\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
756 goto free_sgl;
757 }
758
759 /* kickstart firmware with all addresses and pointers */
760 if (mfii_initialise_firmware(sc) != 0) {
761 printf("%s: could not initialize firmware\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
762 goto free_sgl;
763 }
764
765 if (mfii_get_info(sc) != 0) {
766 printf("%s: could not retrieve controller information\n",
767 DEVNAME(sc)((sc)->sc_dev.dv_xname));
768 goto free_sgl;
769 }
770
771 printf("%s: \"%s\", firmware %s", DEVNAME(sc)((sc)->sc_dev.dv_xname),
772 sc->sc_info.mci_product_name, sc->sc_info.mci_package_version);
773 if (letoh16(sc->sc_info.mci_memory_size)((__uint16_t)(sc->sc_info.mci_memory_size)) > 0)
774 printf(", %uMB cache", letoh16(sc->sc_info.mci_memory_size)((__uint16_t)(sc->sc_info.mci_memory_size)));
775 printf("\n");
776
777 sc->sc_ih = pci_intr_establish(sc->sc_pc, ih, IPL_BIO0x6,
778 mfii_intr, sc, DEVNAME(sc)((sc)->sc_dev.dv_xname));
779 if (sc->sc_ih == NULL((void *)0))
780 goto free_sgl;
781
782 saa.saa_adapter_softc = sc;
783 saa.saa_adapter = &mfii_switch;
784 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff;
785 saa.saa_adapter_buswidth = sc->sc_info.mci_max_lds;
786 saa.saa_luns = 8;
787 saa.saa_openings = sc->sc_max_cmds;
788 saa.saa_pool = &sc->sc_iopool;
789 saa.saa_quirks = saa.saa_flags = 0;
790 saa.saa_wwpn = saa.saa_wwnn = 0;
791
792 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
793 scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
794
795 mfii_syspd(sc);
796
797 if (mfii_aen_register(sc) != 0) {
798 /* error printed by mfii_aen_register */
799 goto intr_disestablish;
800 }
801
802 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST0x03010000, NULL((void *)0), &sc->sc_ld_list,
803 sizeof(sc->sc_ld_list), SCSI_DATA_IN0x00800) != 0) {
804 printf("%s: getting list of logical disks failed\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
805 goto intr_disestablish;
806 }
807 memset(sc->sc_target_lds, -1, sizeof(sc->sc_target_lds))__builtin_memset((sc->sc_target_lds), (-1), (sizeof(sc->
sc_target_lds)))
;
808 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
809 int target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
810 sc->sc_target_lds[target] = i;
811 }
812
813 /* enable interrupts */
814 mfii_write(sc, MFI_OSTS0x30, 0xffffffff);
815 mfii_write(sc, MFI_OMSK0x34, ~MFII_OSTS_INTR_VALID0x00000009);
816
817#if NBIO1 > 0
818 if (bio_register(&sc->sc_dev, mfii_ioctl) != 0)
819 panic("%s: controller registration failed", DEVNAME(sc)((sc)->sc_dev.dv_xname));
820 else
821 sc->sc_ioctl = mfii_ioctl;
822
823#ifndef SMALL_KERNEL
824 if (mfii_create_sensors(sc) != 0)
825 printf("%s: unable to create sensors\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
826#endif
827#endif /* NBIO > 0 */
828
829 return;
830intr_disestablish:
831 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
832free_sgl:
833 mfii_dmamem_free(sc, sc->sc_sgl);
834free_mfi:
835 mfii_dmamem_free(sc, sc->sc_mfi);
836free_requests:
837 mfii_dmamem_free(sc, sc->sc_requests);
838free_reply_postq:
839 mfii_dmamem_free(sc, sc->sc_reply_postq);
840free_sense:
841 mfii_dmamem_free(sc, sc->sc_sense);
842pci_unmap:
843 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
844}
845
846static inline uint16_t
847mfii_dev_handle(struct mfii_softc *sc, uint16_t target)
848{
849 struct mfii_pd_dev_handles *handles;
850 uint16_t handle;
851
852 smr_read_enter();
853 handles = SMR_PTR_GET(&sc->sc_pd->pd_dev_handles)({ typeof(*&sc->sc_pd->pd_dev_handles) __tmp = *(volatile
typeof(*&sc->sc_pd->pd_dev_handles) *)&(*&
sc->sc_pd->pd_dev_handles); membar_datadep_consumer(); __tmp
; })
;
854 handle = handles->pd_handles[target];
855 smr_read_leave();
856
857 return (handle);
858}
859
860void
861mfii_dev_handles_smr(void *pd_arg)
862{
863 struct mfii_pd_dev_handles *handles = pd_arg;
864
865 free(handles, M_DEVBUF2, sizeof(*handles));
866}
867
868int
869mfii_dev_handles_update(struct mfii_softc *sc)
870{
871 struct mfii_ld_map *lm;
872 struct mfii_pd_dev_handles *handles, *old_handles;
873 int i;
874 int rv = 0;
875
876 lm = malloc(sizeof(*lm), M_TEMP127, M_WAITOK0x0001|M_ZERO0x0008);
877
878 rv = mfii_mgmt(sc, MR_DCMD_LD_MAP_GET_INFO0x0300e101, NULL((void *)0), lm, sizeof(*lm),
879 SCSI_DATA_IN0x00800|SCSI_NOSLEEP0x00001);
880
881 if (rv != 0) {
882 rv = EIO5;
883 goto free_lm;
884 }
885
886 handles = malloc(sizeof(*handles), M_DEVBUF2, M_WAITOK0x0001);
887 smr_init(&handles->pd_smr);
888 for (i = 0; i < MFI_MAX_PD256; i++)
889 handles->pd_handles[i] = lm->mlm_dev_handle[i].mdh_cur_handle;
890
891 /* commit the updated info */
892 sc->sc_pd->pd_timeout = lm->mlm_pd_timeout;
893 old_handles = SMR_PTR_GET_LOCKED(&sc->sc_pd->pd_dev_handles)(*(&sc->sc_pd->pd_dev_handles));
894 SMR_PTR_SET_LOCKED(&sc->sc_pd->pd_dev_handles, handles)do { do { __asm volatile("" ::: "memory"); } while (0); ({ typeof
(*&sc->sc_pd->pd_dev_handles) __tmp = (handles); *(
volatile typeof(*&sc->sc_pd->pd_dev_handles) *)&
(*&sc->sc_pd->pd_dev_handles) = __tmp; __tmp; }); }
while (0)
;
895
896 if (old_handles != NULL((void *)0))
897 smr_call(&old_handles->pd_smr, mfii_dev_handles_smr, old_handles)smr_call_impl(&old_handles->pd_smr, mfii_dev_handles_smr
, old_handles, 0)
;
898
899free_lm:
900 free(lm, M_TEMP127, sizeof(*lm));
901
902 return (rv);
903}
904
905int
906mfii_syspd(struct mfii_softc *sc)
907{
908 struct scsibus_attach_args saa;
909
910 sc->sc_pd = malloc(sizeof(*sc->sc_pd), M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
911 if (sc->sc_pd == NULL((void *)0))
912 return (1);
913
914 if (mfii_dev_handles_update(sc) != 0)
915 goto free_pdsc;
916
917 saa.saa_adapter = &mfii_pd_switch;
918 saa.saa_adapter_softc = sc;
919 saa.saa_adapter_buswidth = MFI_MAX_PD256;
920 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff;
921 saa.saa_luns = 8;
922 saa.saa_openings = sc->sc_max_cmds - 1;
923 saa.saa_pool = &sc->sc_iopool;
924 saa.saa_quirks = saa.saa_flags = 0;
925 saa.saa_wwpn = saa.saa_wwnn = 0;
926
927 sc->sc_pd->pd_scsibus = (struct scsibus_softc *)
928 config_found(&sc->sc_dev, &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
929
930 return (0);
931
932free_pdsc:
933 free(sc->sc_pd, M_DEVBUF2, sizeof(*sc->sc_pd));
934 return (1);
935}
936
937int
938mfii_detach(struct device *self, int flags)
939{
940 struct mfii_softc *sc = (struct mfii_softc *)self;
941
942 if (sc->sc_ih == NULL((void *)0))
943 return (0);
944
945#ifndef SMALL_KERNEL
946 if (sc->sc_sensors) {
947 sensordev_deinstall(&sc->sc_sensordev);
948 free(sc->sc_sensors, M_DEVBUF2,
949 MFI_MAX_LD64 * sizeof(struct ksensor));
950 }
951
952 if (sc->sc_bbu) {
953 free(sc->sc_bbu, M_DEVBUF2, 4 * sizeof(*sc->sc_bbu));
954 }
955
956 if (sc->sc_bbu_status) {
957 free(sc->sc_bbu_status, M_DEVBUF2,
958 sizeof(*sc->sc_bbu_status) * sizeof(mfi_bbu_indicators));
959 }
960#endif /* SMALL_KERNEL */
961
962 mfii_aen_unregister(sc);
963 pci_intr_disestablish(sc->sc_pc, sc->sc_ih);
964 mfii_dmamem_free(sc, sc->sc_sgl);
965 mfii_dmamem_free(sc, sc->sc_mfi);
966 mfii_dmamem_free(sc, sc->sc_requests);
967 mfii_dmamem_free(sc, sc->sc_reply_postq);
968 mfii_dmamem_free(sc, sc->sc_sense);
969 bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios);
970
971 return (0);
972}
973
974static void
975mfii_flush_cache(struct mfii_softc *sc, struct mfii_ccb *ccb)
976{
977#if 0
978 union mfi_mbox mbox = {
979 .b[0] = MR_FLUSH_CTRL_CACHE0x01 | MR_FLUSH_DISK_CACHE0x02,
980 };
981 int rv;
982
983 mfii_scrub_ccb(ccb);
984 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_CACHE_FLUSH0x01101000, &mbox,
985 NULL((void *)0), 0, SCSI_NOSLEEP0x00001);
986 if (rv != 0) {
987 printf("%s: unable to flush cache\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
988 return;
989 }
990#endif
991}
992
993static void
994mfii_shutdown(struct mfii_softc *sc, struct mfii_ccb *ccb)
995{
996#if 0
997 int rv;
998
999 mfii_scrub_ccb(ccb);
1000 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_SHUTDOWN0x01050000, NULL((void *)0),
1001 NULL((void *)0), 0, SCSI_POLL0x00002);
1002 if (rv != 0) {
1003 printf("%s: unable to shutdown controller\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1004 return;
1005 }
1006#endif
1007}
1008
1009static void
1010mfii_powerdown(struct mfii_softc *sc)
1011{
1012 struct mfii_ccb *ccb;
1013
1014 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
1015 if (ccb == NULL((void *)0)) {
1016 printf("%s: unable to allocate ccb for shutdown\n",
1017 DEVNAME(sc)((sc)->sc_dev.dv_xname));
1018 return;
1019 }
1020
1021 mfii_flush_cache(sc, ccb);
1022 mfii_shutdown(sc, ccb);
1023 scsi_io_put(&sc->sc_iopool, ccb);
1024}
1025
1026int
1027mfii_activate(struct device *self, int act)
1028{
1029 struct mfii_softc *sc = (struct mfii_softc *)self;
1030 int rv;
1031
1032 switch (act) {
1033 case DVACT_POWERDOWN6:
1034 rv = config_activate_children(&sc->sc_dev, act);
1035 mfii_powerdown(sc);
1036 break;
1037 default:
1038 rv = config_activate_children(&sc->sc_dev, act);
1039 break;
1040 }
1041
1042 return (rv);
1043}
1044
1045u_int32_t
1046mfii_read(struct mfii_softc *sc, bus_size_t r)
1047{
1048 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1049 BUS_SPACE_BARRIER_READ0x01);
1050 return (bus_space_read_4(sc->sc_iot, sc->sc_ioh, r)((sc->sc_iot)->read_4((sc->sc_ioh), (r))));
1051}
1052
1053void
1054mfii_write(struct mfii_softc *sc, bus_size_t r, u_int32_t v)
1055{
1056 bus_space_write_4(sc->sc_iot, sc->sc_ioh, r, v)((sc->sc_iot)->write_4((sc->sc_ioh), (r), (v)));
1057 bus_space_barrier(sc->sc_iot, sc->sc_ioh, r, 4,
1058 BUS_SPACE_BARRIER_WRITE0x02);
1059}
1060
1061struct mfii_dmamem *
1062mfii_dmamem_alloc(struct mfii_softc *sc, size_t size)
1063{
1064 struct mfii_dmamem *m;
1065 int nsegs;
1066
1067 m = malloc(sizeof(*m), M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
1068 if (m == NULL((void *)0))
1069 return (NULL((void *)0));
1070
1071 m->mdm_size = size;
1072
1073 if (bus_dmamap_create(sc->sc_dmat, size, 1, size, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->mdm_map))
1074 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &m->mdm_map)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), (size
), (1), (size), (0), (0x0001 | 0x0002), (&m->mdm_map))
!= 0)
1075 goto mdmfree;
1076
1077 if (bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &m->mdm_seg, 1,(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->mdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
1078 &nsegs, BUS_DMA_NOWAIT | BUS_DMA_ZERO)(*(sc->sc_dmat)->_dmamem_alloc)((sc->sc_dmat), (size
), ((1 << 12)), (0), (&m->mdm_seg), (1), (&nsegs
), (0x0001 | 0x1000))
!= 0)
1079 goto destroy;
1080
1081 if (bus_dmamem_map(sc->sc_dmat, &m->mdm_seg, nsegs, size, &m->mdm_kva,(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->mdm_seg), (nsegs), (size), (&m->mdm_kva), (0x0001
))
1082 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamem_map)((sc->sc_dmat), (&m
->mdm_seg), (nsegs), (size), (&m->mdm_kva), (0x0001
))
!= 0)
1083 goto free;
1084
1085 if (bus_dmamap_load(sc->sc_dmat, m->mdm_map, m->mdm_kva, size, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
mdm_map), (m->mdm_kva), (size), (((void *)0)), (0x0001))
1086 BUS_DMA_NOWAIT)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (m->
mdm_map), (m->mdm_kva), (size), (((void *)0)), (0x0001))
!= 0)
1087 goto unmap;
1088
1089 return (m);
1090
1091unmap:
1092 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
mdm_kva), (m->mdm_size))
;
1093free:
1094 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->mdm_seg), (1))
;
1095destroy:
1096 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
mdm_map))
;
1097mdmfree:
1098 free(m, M_DEVBUF2, sizeof *m);
1099
1100 return (NULL((void *)0));
1101}
1102
1103void
1104mfii_dmamem_free(struct mfii_softc *sc, struct mfii_dmamem *m)
1105{
1106 bus_dmamap_unload(sc->sc_dmat, m->mdm_map)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (m->
mdm_map))
;
1107 bus_dmamem_unmap(sc->sc_dmat, m->mdm_kva, m->mdm_size)(*(sc->sc_dmat)->_dmamem_unmap)((sc->sc_dmat), (m->
mdm_kva), (m->mdm_size))
;
1108 bus_dmamem_free(sc->sc_dmat, &m->mdm_seg, 1)(*(sc->sc_dmat)->_dmamem_free)((sc->sc_dmat), (&
m->mdm_seg), (1))
;
1109 bus_dmamap_destroy(sc->sc_dmat, m->mdm_map)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (m->
mdm_map))
;
1110 free(m, M_DEVBUF2, sizeof *m);
1111}
1112
1113void
1114mfii_dcmd_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1115{
1116 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1117 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1118 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1119
1120 io->function = MFII_FUNCTION_PASSTHRU_IO(0xf0);
1121 io->sgl_offset0 = (uint32_t *)sge - (uint32_t *)io;
1122 io->chain_offset = io->sgl_offset0 / 4;
1123
1124 htolem64(&sge->sg_addr, ccb->ccb_sense_dva)(*(__uint64_t *)(&sge->sg_addr) = ((__uint64_t)(ccb->
ccb_sense_dva)))
;
1125 htolem32(&sge->sg_len, sizeof(*ccb->ccb_sense))(*(__uint32_t *)(&sge->sg_len) = ((__uint32_t)(sizeof(
*ccb->ccb_sense))))
;
1126 sge->sg_flags = MFII_SGE_CHAIN_ELEMENT(0x80) | MFII_SGE_ADDR_IOCPLBNTA(0x03);
1127
1128 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI(0x00);
1129 ccb->ccb_req.smid = letoh16(ccb->ccb_smid)((__uint16_t)(ccb->ccb_smid));
1130
1131 mfii_start(sc, ccb);
1132}
1133
1134int
1135mfii_aen_register(struct mfii_softc *sc)
1136{
1137 struct mfi_evt_log_info mel;
1138 struct mfii_ccb *ccb;
1139 struct mfii_dmamem *mdm;
1140 int rv;
1141
1142 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
1143 if (ccb == NULL((void *)0)) {
1144 printf("%s: unable to allocate ccb for aen\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1145 return (ENOMEM12);
1146 }
1147
1148 memset(&mel, 0, sizeof(mel))__builtin_memset((&mel), (0), (sizeof(mel)));
1149 mfii_scrub_ccb(ccb);
1150
1151 rv = mfii_do_mgmt(sc, ccb, MR_DCMD_CTRL_EVENT_GET_INFO0x01040100, NULL((void *)0),
1152 &mel, sizeof(mel), SCSI_DATA_IN0x00800|SCSI_NOSLEEP0x00001);
1153 if (rv != 0) {
1154 scsi_io_put(&sc->sc_iopool, ccb);
1155 printf("%s: unable to get event info\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1156 return (EIO5);
1157 }
1158
1159 mdm = mfii_dmamem_alloc(sc, sizeof(struct mfi_evt_detail));
1160 if (mdm == NULL((void *)0)) {
1161 scsi_io_put(&sc->sc_iopool, ccb);
1162 printf("%s: unable to allocate event data\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1163 return (ENOMEM12);
1164 }
1165
1166 /* replay all the events from boot */
1167 mfii_aen_start(sc, ccb, mdm, lemtoh32(&mel.mel_boot_seq_num)((__uint32_t)(*(__uint32_t *)(&mel.mel_boot_seq_num))));
1168
1169 return (0);
1170}
1171
1172void
1173mfii_aen_start(struct mfii_softc *sc, struct mfii_ccb *ccb,
1174 struct mfii_dmamem *mdm, uint32_t seq)
1175{
1176 struct mfi_dcmd_frame *dcmd = mfii_dcmd_frame(ccb);
1177 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1178 union mfi_sgl *sgl = &dcmd->mdf_sgl;
1179 union mfi_evt_class_locale mec;
1180
1181 mfii_scrub_ccb(ccb);
1182 mfii_dcmd_scrub(ccb);
1183 memset(MFII_DMA_KVA(mdm), 0, MFII_DMA_LEN(mdm))__builtin_memset((((void *)(mdm)->mdm_kva)), (0), (((mdm)->
mdm_size)))
;
1184
1185 ccb->ccb_cookie = mdm;
1186 ccb->ccb_done = mfii_aen_done;
1187 sc->sc_aen_ccb = ccb;
1188
1189 mec.mec_members.class = MFI_EVT_CLASS_DEBUG;
1190 mec.mec_members.reserved = 0;
1191 mec.mec_members.locale = htole16(MFI_EVT_LOCALE_ALL)((__uint16_t)(MFI_EVT_LOCALE_ALL));
1192
1193 hdr->mfh_cmd = MFI_CMD_DCMD0x05;
1194 hdr->mfh_sg_count = 1;
1195 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ | MFI_FRAME_SGL64)((__uint16_t)(0x0010 | 0x0002));
1196 htolem32(&hdr->mfh_data_len, MFII_DMA_LEN(mdm))(*(__uint32_t *)(&hdr->mfh_data_len) = ((__uint32_t)((
(mdm)->mdm_size))))
;
1197 dcmd->mdf_opcode = htole32(MR_DCMD_CTRL_EVENT_WAIT)((__uint32_t)(0x01040500));
1198 htolem32(&dcmd->mdf_mbox.w[0], seq)(*(__uint32_t *)(&dcmd->mdf_mbox.w[0]) = ((__uint32_t)
(seq)))
;
1199 htolem32(&dcmd->mdf_mbox.w[1], mec.mec_word)(*(__uint32_t *)(&dcmd->mdf_mbox.w[1]) = ((__uint32_t)
(mec.mec_word)))
;
1200 htolem64(&sgl->sg64[0].addr, MFII_DMA_DVA(mdm))(*(__uint64_t *)(&sgl->sg64[0].addr) = ((__uint64_t)((
(u_int64_t)(mdm)->mdm_map->dm_segs[0].ds_addr))))
;
1201 htolem32(&sgl->sg64[0].len, MFII_DMA_LEN(mdm))(*(__uint32_t *)(&sgl->sg64[0].len) = ((__uint32_t)(((
mdm)->mdm_size))))
;
1202
1203 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mdm
)->mdm_map)), (0), (((mdm)->mdm_size)), (0x01))
1204 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mdm
)->mdm_map)), (0), (((mdm)->mdm_size)), (0x01))
;
1205
1206 mfii_dcmd_sync(sc, ccb, BUS_DMASYNC_PREREAD0x01|BUS_DMASYNC_PREWRITE0x04);
1207 mfii_dcmd_start(sc, ccb);
1208}
1209
1210void
1211mfii_aen_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1212{
1213 KASSERT(sc->sc_aen_ccb == ccb)((sc->sc_aen_ccb == ccb) ? (void)0 : __assert("diagnostic "
, "/usr/src/sys/dev/pci/mfii.c", 1213, "sc->sc_aen_ccb == ccb"
))
;
1214
1215 /* defer to a thread with KERNEL_LOCK so we can run autoconf */
1216 task_add(systq, &sc->sc_aen_task);
1217}
1218
1219void
1220mfii_aen(void *arg)
1221{
1222 struct mfii_softc *sc = arg;
1223 struct mfii_ccb *ccb = sc->sc_aen_ccb;
1224 struct mfii_dmamem *mdm = ccb->ccb_cookie;
1225 const struct mfi_evt_detail *med = MFII_DMA_KVA(mdm)((void *)(mdm)->mdm_kva);
1226 uint32_t code;
1227
1228 mfii_dcmd_sync(sc, ccb,
1229 BUS_DMASYNC_POSTREAD0x02|BUS_DMASYNC_POSTWRITE0x08);
1230 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(mdm),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mdm
)->mdm_map)), (0), (((mdm)->mdm_size)), (0x02))
1231 0, MFII_DMA_LEN(mdm), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((mdm
)->mdm_map)), (0), (((mdm)->mdm_size)), (0x02))
;
1232
1233 code = lemtoh32(&med->med_code)((__uint32_t)(*(__uint32_t *)(&med->med_code)));
1234
1235#if 0
1236 log(LOG_DEBUG7, "%s (seq %u, code %08x) %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
1237 lemtoh32(&med->med_seq_num)((__uint32_t)(*(__uint32_t *)(&med->med_seq_num))), code, med->med_description);
1238#endif
1239
1240 switch (code) {
1241 case MFI_EVT_PD_INSERTED_EXT0x00f7:
1242 if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS0x1d)
1243 break;
1244
1245 mfii_aen_pd_insert(sc, &med->args.pd_address);
1246 break;
1247 case MFI_EVT_PD_REMOVED_EXT0x00f8:
1248 if (med->med_arg_type != MFI_EVT_ARGS_PD_ADDRESS0x1d)
1249 break;
1250
1251 mfii_aen_pd_remove(sc, &med->args.pd_address);
1252 break;
1253
1254 case MFI_EVT_PD_STATE_CHANGE0x0072:
1255 if (med->med_arg_type != MFI_EVT_ARGS_PD_STATE0x0f)
1256 break;
1257
1258 mfii_aen_pd_state_change(sc, &med->args.pd_state);
1259 break;
1260
1261 case MFI_EVT_LD_CREATED0x008a:
1262 case MFI_EVT_LD_DELETED0x008b:
1263 mfii_aen_ld_update(sc);
1264 break;
1265
1266 default:
1267 break;
1268 }
1269
1270 mfii_aen_start(sc, ccb, mdm, lemtoh32(&med->med_seq_num)((__uint32_t)(*(__uint32_t *)(&med->med_seq_num))) + 1);
1271}
1272
1273void
1274mfii_aen_pd_insert(struct mfii_softc *sc,
1275 const struct mfi_evtarg_pd_address *pd)
1276{
1277#if 0
1278 printf("%s: pd inserted ext\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1279 printf("%s: device_id %04x encl_id: %04x type %x\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
1280 lemtoh16(&pd->device_id)((__uint16_t)(*(__uint16_t *)(&pd->device_id))), lemtoh16(&pd->encl_id)((__uint16_t)(*(__uint16_t *)(&pd->encl_id))),
1281 pd->scsi_dev_type);
1282 printf("%s: connected %02x addrs %016llx %016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
1283 pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0])((__uint64_t)(*(__uint64_t *)(&pd->sas_addr[0]))),
1284 lemtoh64(&pd->sas_addr[1])((__uint64_t)(*(__uint64_t *)(&pd->sas_addr[1]))));
1285#endif
1286
1287 if (mfii_dev_handles_update(sc) != 0) /* refresh map */
1288 return;
1289
1290 scsi_probe_target(sc->sc_pd->pd_scsibus, lemtoh16(&pd->device_id)((__uint16_t)(*(__uint16_t *)(&pd->device_id))));
1291}
1292
1293void
1294mfii_aen_pd_remove(struct mfii_softc *sc,
1295 const struct mfi_evtarg_pd_address *pd)
1296{
1297#if 0
1298 printf("%s: pd removed ext\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1299 printf("%s: device_id %04x encl_id: %04x type %u\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
1300 lemtoh16(&pd->device_id)((__uint16_t)(*(__uint16_t *)(&pd->device_id))), lemtoh16(&pd->encl_id)((__uint16_t)(*(__uint16_t *)(&pd->encl_id))),
1301 pd->scsi_dev_type);
1302 printf("%s: connected %02x addrs %016llx %016llx\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
1303 pd->connected.port_bitmap, lemtoh64(&pd->sas_addr[0])((__uint64_t)(*(__uint64_t *)(&pd->sas_addr[0]))),
1304 lemtoh64(&pd->sas_addr[1])((__uint64_t)(*(__uint64_t *)(&pd->sas_addr[1]))));
1305#endif
1306 uint16_t target = lemtoh16(&pd->device_id)((__uint16_t)(*(__uint16_t *)(&pd->device_id)));
1307
1308 scsi_activate(sc->sc_pd->pd_scsibus, target, -1, DVACT_DEACTIVATE1);
1309
1310 /* the firmware will abort outstanding commands for us */
1311
1312 scsi_detach_target(sc->sc_pd->pd_scsibus, target, DETACH_FORCE0x01);
1313}
1314
1315void
1316mfii_aen_pd_state_change(struct mfii_softc *sc,
1317 const struct mfi_evtarg_pd_state *state)
1318{
1319 uint16_t target = lemtoh16(&state->pd.mep_device_id)((__uint16_t)(*(__uint16_t *)(&state->pd.mep_device_id
)))
;
1320
1321 if (state->prev_state == htole32(MFI_PD_SYSTEM)((__uint32_t)(0x40)) &&
1322 state->new_state != htole32(MFI_PD_SYSTEM)((__uint32_t)(0x40))) {
1323 /* it's been pulled or configured for raid */
1324
1325 scsi_activate(sc->sc_pd->pd_scsibus, target, -1,
1326 DVACT_DEACTIVATE1);
1327 /* outstanding commands will simply complete or get aborted */
1328 scsi_detach_target(sc->sc_pd->pd_scsibus, target,
1329 DETACH_FORCE0x01);
1330
1331 } else if (state->prev_state == htole32(MFI_PD_UNCONFIG_GOOD)((__uint32_t)(0x00)) &&
1332 state->new_state == htole32(MFI_PD_SYSTEM)((__uint32_t)(0x40))) {
1333 /* the firmware is handing the disk over */
1334
1335 scsi_probe_target(sc->sc_pd->pd_scsibus, target);
1336 }
1337}
1338
1339void
1340mfii_aen_ld_update(struct mfii_softc *sc)
1341{
1342 int i, state, target, old, nld;
1343 int newlds[MFI_MAX_LD64];
1344
1345 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST0x03010000, NULL((void *)0), &sc->sc_ld_list,
1346 sizeof(sc->sc_ld_list), SCSI_DATA_IN0x00800) != 0) {
1347 DNPRINTF(MFII_D_MISC, "%s: getting list of logical disks failed\n",
1348 DEVNAME(sc));
1349 return;
1350 }
1351
1352 memset(newlds, -1, sizeof(newlds))__builtin_memset((newlds), (-1), (sizeof(newlds)));
1353
1354 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
1355 state = sc->sc_ld_list.mll_list[i].mll_state;
1356 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
1357 DNPRINTF(MFII_D_MISC, "%s: target %d: state %d\n",
1358 DEVNAME(sc), target, state);
1359 newlds[target] = i;
1360 }
1361
1362 for (i = 0; i < MFI_MAX_LD64; i++) {
1363 old = sc->sc_target_lds[i];
1364 nld = newlds[i];
1365
1366 if (old == -1 && nld != -1) {
1367 DNPRINTF(MFII_D_MISC, "%s: attaching target %d\n",
1368 DEVNAME(sc), i);
1369
1370 scsi_probe_target(sc->sc_scsibus, i);
1371
1372#ifndef SMALL_KERNEL
1373 mfii_init_ld_sensor(sc, nld);
1374 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1375#endif
1376 } else if (nld == -1 && old != -1) {
1377 DNPRINTF(MFII_D_MISC, "%s: detaching target %d\n",
1378 DEVNAME(sc), i);
1379
1380 scsi_activate(sc->sc_scsibus, i, -1,
1381 DVACT_DEACTIVATE1);
1382 scsi_detach_target(sc->sc_scsibus, i,
1383 DETACH_FORCE0x01);
1384#ifndef SMALL_KERNEL
1385 sensor_detach(&sc->sc_sensordev, &sc->sc_sensors[i]);
1386#endif
1387 }
1388 }
1389
1390 memcpy(sc->sc_target_lds, newlds, sizeof(sc->sc_target_lds))__builtin_memcpy((sc->sc_target_lds), (newlds), (sizeof(sc
->sc_target_lds)))
;
1391}
1392
1393void
1394mfii_aen_unregister(struct mfii_softc *sc)
1395{
1396 /* XXX */
1397}
1398
1399int
1400mfii_transition_firmware(struct mfii_softc *sc)
1401{
1402 int32_t fw_state, cur_state;
1403 int max_wait, i;
1404
1405 fw_state = mfii_fw_state(sc)mfii_read((sc), 0xb0) & MFI_STATE_MASK0xf0000000;
1406
1407 while (fw_state != MFI_STATE_READY0xb0000000) {
1408 cur_state = fw_state;
1409 switch (fw_state) {
1410 case MFI_STATE_FAULT0xf0000000:
1411 printf("%s: firmware fault\n", DEVNAME(sc)((sc)->sc_dev.dv_xname));
1412 return (1);
1413 case MFI_STATE_WAIT_HANDSHAKE0x60000000:
1414 mfii_write(sc, MFI_SKINNY_IDB0x00,
1415 MFI_INIT_CLEAR_HANDSHAKE0x00000008);
1416 max_wait = 2;
1417 break;
1418 case MFI_STATE_OPERATIONAL0xc0000000:
1419 mfii_write(sc, MFI_SKINNY_IDB0x00, MFI_INIT_READY0x00000002);
1420 max_wait = 10;
1421 break;
1422 case MFI_STATE_UNDEFINED0x00000000:
1423 case MFI_STATE_BB_INIT0x10000000:
1424 max_wait = 2;
1425 break;
1426 case MFI_STATE_FW_INIT0x40000000:
1427 case MFI_STATE_DEVICE_SCAN0x80000000:
1428 case MFI_STATE_FLUSH_CACHE0xa0000000:
1429 max_wait = 20;
1430 break;
1431 default:
1432 printf("%s: unknown firmware state %d\n",
1433 DEVNAME(sc)((sc)->sc_dev.dv_xname), fw_state);
1434 return (1);
1435 }
1436 for (i = 0; i < (max_wait * 10); i++) {
1437 fw_state = mfii_fw_state(sc)mfii_read((sc), 0xb0) & MFI_STATE_MASK0xf0000000;
1438 if (fw_state == cur_state)
1439 DELAY(100000)(*delay_func)(100000);
1440 else
1441 break;
1442 }
1443 if (fw_state == cur_state) {
1444 printf("%s: firmware stuck in state %#x\n",
1445 DEVNAME(sc)((sc)->sc_dev.dv_xname), fw_state);
1446 return (1);
1447 }
1448 }
1449
1450 return (0);
1451}
1452
1453int
1454mfii_get_info(struct mfii_softc *sc)
1455{
1456 int i, rv;
1457
1458 rv = mfii_mgmt(sc, MR_DCMD_CTRL_GET_INFO0x01010000, NULL((void *)0), &sc->sc_info,
1459 sizeof(sc->sc_info), SCSI_DATA_IN0x00800|SCSI_NOSLEEP0x00001);
1460
1461 if (rv != 0)
1462 return (rv);
1463
1464 for (i = 0; i < sc->sc_info.mci_image_component_count; i++) {
1465 DPRINTF("%s: active FW %s Version %s date %s time %s\n",
1466 DEVNAME(sc),
1467 sc->sc_info.mci_image_component[i].mic_name,
1468 sc->sc_info.mci_image_component[i].mic_version,
1469 sc->sc_info.mci_image_component[i].mic_build_date,
1470 sc->sc_info.mci_image_component[i].mic_build_time);
1471 }
1472
1473 for (i = 0; i < sc->sc_info.mci_pending_image_component_count; i++) {
1474 DPRINTF("%s: pending FW %s Version %s date %s time %s\n",
1475 DEVNAME(sc),
1476 sc->sc_info.mci_pending_image_component[i].mic_name,
1477 sc->sc_info.mci_pending_image_component[i].mic_version,
1478 sc->sc_info.mci_pending_image_component[i].mic_build_date,
1479 sc->sc_info.mci_pending_image_component[i].mic_build_time);
1480 }
1481
1482 DPRINTF("%s: max_arms %d max_spans %d max_arrs %d max_lds %d name %s\n",
1483 DEVNAME(sc),
1484 sc->sc_info.mci_max_arms,
1485 sc->sc_info.mci_max_spans,
1486 sc->sc_info.mci_max_arrays,
1487 sc->sc_info.mci_max_lds,
1488 sc->sc_info.mci_product_name);
1489
1490 DPRINTF("%s: serial %s present %#x fw time %d max_cmds %d max_sg %d\n",
1491 DEVNAME(sc),
1492 sc->sc_info.mci_serial_number,
1493 sc->sc_info.mci_hw_present,
1494 sc->sc_info.mci_current_fw_time,
1495 sc->sc_info.mci_max_cmds,
1496 sc->sc_info.mci_max_sg_elements);
1497
1498 DPRINTF("%s: max_rq %d lds_pres %d lds_deg %d lds_off %d pd_pres %d\n",
1499 DEVNAME(sc),
1500 sc->sc_info.mci_max_request_size,
1501 sc->sc_info.mci_lds_present,
1502 sc->sc_info.mci_lds_degraded,
1503 sc->sc_info.mci_lds_offline,
1504 sc->sc_info.mci_pd_present);
1505
1506 DPRINTF("%s: pd_dsk_prs %d pd_dsk_pred_fail %d pd_dsk_fail %d\n",
1507 DEVNAME(sc),
1508 sc->sc_info.mci_pd_disks_present,
1509 sc->sc_info.mci_pd_disks_pred_failure,
1510 sc->sc_info.mci_pd_disks_failed);
1511
1512 DPRINTF("%s: nvram %d mem %d flash %d\n",
1513 DEVNAME(sc),
1514 sc->sc_info.mci_nvram_size,
1515 sc->sc_info.mci_memory_size,
1516 sc->sc_info.mci_flash_size);
1517
1518 DPRINTF("%s: ram_cor %d ram_uncor %d clus_all %d clus_act %d\n",
1519 DEVNAME(sc),
1520 sc->sc_info.mci_ram_correctable_errors,
1521 sc->sc_info.mci_ram_uncorrectable_errors,
1522 sc->sc_info.mci_cluster_allowed,
1523 sc->sc_info.mci_cluster_active);
1524
1525 DPRINTF("%s: max_strps_io %d raid_lvl %#x adapt_ops %#x ld_ops %#x\n",
1526 DEVNAME(sc),
1527 sc->sc_info.mci_max_strips_per_io,
1528 sc->sc_info.mci_raid_levels,
1529 sc->sc_info.mci_adapter_ops,
1530 sc->sc_info.mci_ld_ops);
1531
1532 DPRINTF("%s: strp_sz_min %d strp_sz_max %d pd_ops %#x pd_mix %#x\n",
1533 DEVNAME(sc),
1534 sc->sc_info.mci_stripe_sz_ops.min,
1535 sc->sc_info.mci_stripe_sz_ops.max,
1536 sc->sc_info.mci_pd_ops,
1537 sc->sc_info.mci_pd_mix_support);
1538
1539 DPRINTF("%s: ecc_bucket %d pckg_prop %s\n",
1540 DEVNAME(sc),
1541 sc->sc_info.mci_ecc_bucket_count,
1542 sc->sc_info.mci_package_version);
1543
1544 DPRINTF("%s: sq_nm %d prd_fail_poll %d intr_thrtl %d intr_thrtl_to %d\n",
1545 DEVNAME(sc),
1546 sc->sc_info.mci_properties.mcp_seq_num,
1547 sc->sc_info.mci_properties.mcp_pred_fail_poll_interval,
1548 sc->sc_info.mci_properties.mcp_intr_throttle_cnt,
1549 sc->sc_info.mci_properties.mcp_intr_throttle_timeout);
1550
1551 DPRINTF("%s: rbld_rate %d patr_rd_rate %d bgi_rate %d cc_rate %d\n",
1552 DEVNAME(sc),
1553 sc->sc_info.mci_properties.mcp_rebuild_rate,
1554 sc->sc_info.mci_properties.mcp_patrol_read_rate,
1555 sc->sc_info.mci_properties.mcp_bgi_rate,
1556 sc->sc_info.mci_properties.mcp_cc_rate);
1557
1558 DPRINTF("%s: rc_rate %d ch_flsh %d spin_cnt %d spin_dly %d clus_en %d\n",
1559 DEVNAME(sc),
1560 sc->sc_info.mci_properties.mcp_recon_rate,
1561 sc->sc_info.mci_properties.mcp_cache_flush_interval,
1562 sc->sc_info.mci_properties.mcp_spinup_drv_cnt,
1563 sc->sc_info.mci_properties.mcp_spinup_delay,
1564 sc->sc_info.mci_properties.mcp_cluster_enable);
1565
1566 DPRINTF("%s: coerc %d alarm %d dis_auto_rbld %d dis_bat_wrn %d ecc %d\n",
1567 DEVNAME(sc),
1568 sc->sc_info.mci_properties.mcp_coercion_mode,
1569 sc->sc_info.mci_properties.mcp_alarm_enable,
1570 sc->sc_info.mci_properties.mcp_disable_auto_rebuild,
1571 sc->sc_info.mci_properties.mcp_disable_battery_warn,
1572 sc->sc_info.mci_properties.mcp_ecc_bucket_size);
1573
1574 DPRINTF("%s: ecc_leak %d rest_hs %d exp_encl_dev %d\n",
1575 DEVNAME(sc),
1576 sc->sc_info.mci_properties.mcp_ecc_bucket_leak_rate,
1577 sc->sc_info.mci_properties.mcp_restore_hotspare_on_insertion,
1578 sc->sc_info.mci_properties.mcp_expose_encl_devices);
1579
1580 DPRINTF("%s: vendor %#x device %#x subvendor %#x subdevice %#x\n",
1581 DEVNAME(sc),
1582 sc->sc_info.mci_pci.mip_vendor,
1583 sc->sc_info.mci_pci.mip_device,
1584 sc->sc_info.mci_pci.mip_subvendor,
1585 sc->sc_info.mci_pci.mip_subdevice);
1586
1587 DPRINTF("%s: type %#x port_count %d port_addr ",
1588 DEVNAME(sc),
1589 sc->sc_info.mci_host.mih_type,
1590 sc->sc_info.mci_host.mih_port_count);
1591
1592 for (i = 0; i < 8; i++)
1593 DPRINTF("%.0llx ", sc->sc_info.mci_host.mih_port_addr[i]);
1594 DPRINTF("\n");
1595
1596 DPRINTF("%s: type %.x port_count %d port_addr ",
1597 DEVNAME(sc),
1598 sc->sc_info.mci_device.mid_type,
1599 sc->sc_info.mci_device.mid_port_count);
1600
1601 for (i = 0; i < 8; i++)
1602 DPRINTF("%.0llx ", sc->sc_info.mci_device.mid_port_addr[i]);
1603 DPRINTF("\n");
1604
1605 return (0);
1606}
1607
1608int
1609mfii_mfa_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1610{
1611 struct mfi_frame_header *hdr = ccb->ccb_request;
1612 u_int64_t r;
1613 int to = 0, rv = 0;
1614
1615#ifdef DIAGNOSTIC1
1616 if (ccb->ccb_cookie != NULL((void *)0) || ccb->ccb_done != NULL((void *)0))
1617 panic("mfii_mfa_poll called with cookie or done set");
1618#endif
1619
1620 hdr->mfh_context = ccb->ccb_smid;
1621 hdr->mfh_cmd_status = MFI_STAT_INVALID_STATUS;
1622 hdr->mfh_flags |= htole16(MFI_FRAME_DONT_POST_IN_REPLY_QUEUE)((__uint16_t)(0x0001));
1623
1624 r = MFII_REQ_MFA(ccb->ccb_request_dva)((__uint64_t)((ccb->ccb_request_dva) | (0x1 << 1)));
1625 memcpy(&ccb->ccb_req, &r, sizeof(ccb->ccb_req))__builtin_memcpy((&ccb->ccb_req), (&r), (sizeof(ccb
->ccb_req)))
;
1626
1627 mfii_start(sc, ccb);
1628
1629 for (;;) {
1630 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x02 | 0x08))
1631 ccb->ccb_request_offset, MFII_REQUEST_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x02 | 0x08))
1632 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x02 | 0x08))
;
1633
1634 if (hdr->mfh_cmd_status != MFI_STAT_INVALID_STATUS)
1635 break;
1636
1637 if (to++ > 5000) { /* XXX 5 seconds busywait sucks */
1638 printf("%s: timeout on ccb %d\n", DEVNAME(sc)((sc)->sc_dev.dv_xname),
1639 ccb->ccb_smid);
1640 ccb->ccb_flags |= MFI_CCB_F_ERR(1<<0);
1641 rv = 1;
1642 break;
1643 }
1644
1645 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x01 | 0x04))
1646 ccb->ccb_request_offset, MFII_REQUEST_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x01 | 0x04))
1647 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x01 | 0x04))
;
1648
1649 delay(1000)(*delay_func)(1000);
1650 }
1651
1652 if (ccb->ccb_len > 0) {
1653 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((ccb->
ccb_direction == 1) ? 0x02 : 0x08))
1654 0, ccb->ccb_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((ccb->
ccb_direction == 1) ? 0x02 : 0x08))
1655 (ccb->ccb_direction == MFII_DATA_IN) ?(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((ccb->
ccb_direction == 1) ? 0x02 : 0x08))
1656 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((ccb->
ccb_direction == 1) ? 0x02 : 0x08))
;
1657
1658 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
1659 }
1660
1661 return (rv);
1662}
1663
1664int
1665mfii_poll(struct mfii_softc *sc, struct mfii_ccb *ccb)
1666{
1667 void (*done)(struct mfii_softc *, struct mfii_ccb *);
1668 void *cookie;
1669 int rv = 1;
1670
1671 done = ccb->ccb_done;
1672 cookie = ccb->ccb_cookie;
1673
1674 ccb->ccb_done = mfii_poll_done;
1675 ccb->ccb_cookie = &rv;
1676
1677 mfii_start(sc, ccb);
1678
1679 do {
1680 delay(10)(*delay_func)(10);
1681 mfii_postq(sc);
1682 } while (rv == 1);
1683
1684 ccb->ccb_cookie = cookie;
1685 done(sc, ccb);
1686
1687 return (0);
1688}
1689
1690void
1691mfii_poll_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1692{
1693 int *rv = ccb->ccb_cookie;
1694
1695 *rv = 0;
1696}
1697
1698int
1699mfii_exec(struct mfii_softc *sc, struct mfii_ccb *ccb)
1700{
1701 struct mutex m = MUTEX_INITIALIZER(IPL_BIO){ ((void *)0), ((((0x6)) > 0x0 && ((0x6)) < 0x9
) ? 0x9 : ((0x6))), 0x0 }
;
1702
1703#ifdef DIAGNOSTIC1
1704 if (ccb->ccb_cookie != NULL((void *)0) || ccb->ccb_done != NULL((void *)0))
1705 panic("mfii_exec called with cookie or done set");
1706#endif
1707
1708 ccb->ccb_cookie = &m;
1709 ccb->ccb_done = mfii_exec_done;
1710
1711 mfii_start(sc, ccb);
1712
1713 mtx_enter(&m);
1714 while (ccb->ccb_cookie != NULL((void *)0))
1715 msleep_nsec(ccb, &m, PRIBIO16, "mfiiexec", INFSLP0xffffffffffffffffULL);
1716 mtx_leave(&m);
1717
1718 return (0);
1719}
1720
1721void
1722mfii_exec_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1723{
1724 struct mutex *m = ccb->ccb_cookie;
1725
1726 mtx_enter(m);
1727 ccb->ccb_cookie = NULL((void *)0);
1728 wakeup_one(ccb)wakeup_n((ccb), 1);
1729 mtx_leave(m);
1730}
1731
1732int
1733mfii_mgmt(struct mfii_softc *sc, uint32_t opc, const union mfi_mbox *mbox,
1734 void *buf, size_t len, int flags)
1735{
1736 struct mfii_ccb *ccb;
1737 int rv;
1738
1739 ccb = scsi_io_get(&sc->sc_iopool, flags);
1740 if (ccb == NULL((void *)0))
6
Assuming 'ccb' is not equal to NULL
7
Taking false branch
1741 return (ENOMEM12);
1742
1743 mfii_scrub_ccb(ccb);
1744 rv = mfii_do_mgmt(sc, ccb, opc, mbox, buf, len, flags);
8
Passing 'pl' via 5th parameter 'buf'
9
Calling 'mfii_do_mgmt'
1745 scsi_io_put(&sc->sc_iopool, ccb);
1746
1747 return (rv);
1748}
1749
1750int
1751mfii_do_mgmt(struct mfii_softc *sc, struct mfii_ccb *ccb, uint32_t opc,
1752 const union mfi_mbox *mbox, void *buf, size_t len, int flags)
1753{
1754 struct mpii_msg_scsi_io *io = ccb->ccb_request;
1755 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
1756 struct mfii_sge *sge = (struct mfii_sge *)(ctx + 1);
1757 struct mfi_dcmd_frame *dcmd = ccb->ccb_mfi;
1758 struct mfi_frame_header *hdr = &dcmd->mdf_header;
1759 u_int8_t *dma_buf = NULL((void *)0);
1760 int rv = EIO5;
1761
1762 if (cold)
10
Assuming 'cold' is 0
11
Taking false branch
1763 flags |= SCSI_NOSLEEP0x00001;
1764
1765 if (buf != NULL((void *)0)) {
12
Assuming 'buf' is equal to NULL
13
Taking false branch
1766 dma_buf = dma_alloc(len, PR_WAITOK0x0001);
1767 if (dma_buf == NULL((void *)0))
1768 return (ENOMEM12);
1769 }
1770
1771 ccb->ccb_data = dma_buf;
1772 ccb->ccb_len = len;
1773 switch (flags & (SCSI_DATA_IN0x00800 | SCSI_DATA_OUT0x01000)) {
14
Control jumps to 'case 2048:' at line 1774
1774 case SCSI_DATA_IN0x00800:
1775 ccb->ccb_direction = MFII_DATA_IN1;
1776 hdr->mfh_flags = htole16(MFI_FRAME_DIR_READ)((__uint16_t)(0x0010));
1777 break;
15
Execution continues on line 1789
1778 case SCSI_DATA_OUT0x01000:
1779 ccb->ccb_direction = MFII_DATA_OUT2;
1780 hdr->mfh_flags = htole16(MFI_FRAME_DIR_WRITE)((__uint16_t)(0x0008));
1781 memcpy(dma_buf, buf, len)__builtin_memcpy((dma_buf), (buf), (len));
1782 break;
1783 case 0:
1784 ccb->ccb_direction = MFII_DATA_NONE0;
1785 hdr->mfh_flags = htole16(MFI_FRAME_DIR_NONE)((__uint16_t)(0x0000));
1786 break;
1787 }
1788
1789 if (mfii_load_mfa(sc, ccb, &dcmd->mdf_sgl,
16
Assuming the condition is false
17
Taking false branch
1790 ISSET(flags, SCSI_NOSLEEP)((flags) & (0x00001))) != 0) {
1791 rv = ENOMEM12;
1792 goto done;
1793 }
1794
1795 hdr->mfh_cmd = MFI_CMD_DCMD0x05;
1796 hdr->mfh_context = ccb->ccb_smid;
1797 hdr->mfh_data_len = htole32(len)((__uint32_t)(len));
1798 hdr->mfh_sg_count = len
17.1
'len' is 6152
? ccb->ccb_dmamap->dm_nsegs : 0;
18
'?' condition is true
1799
1800 dcmd->mdf_opcode = opc;
1801 /* handle special opcodes */
1802 if (mbox
18.1
'mbox' is equal to NULL
!= NULL((void *)0))
19
Taking false branch
1803 memcpy(&dcmd->mdf_mbox, mbox, sizeof(dcmd->mdf_mbox))__builtin_memcpy((&dcmd->mdf_mbox), (mbox), (sizeof(dcmd
->mdf_mbox)))
;
1804
1805 io->function = MFII_FUNCTION_PASSTHRU_IO(0xf0);
1806
1807 if (len
19.1
'len' is 6152
) {
20
Taking true branch
1808 io->sgl_offset0 = ((u_int8_t *)sge - (u_int8_t *)io) / 4;
1809 io->chain_offset = ((u_int8_t *)sge - (u_int8_t *)io) / 16;
1810 htolem64(&sge->sg_addr, ccb->ccb_mfi_dva)(*(__uint64_t *)(&sge->sg_addr) = ((__uint64_t)(ccb->
ccb_mfi_dva)))
;
1811 htolem32(&sge->sg_len, MFI_FRAME_SIZE)(*(__uint32_t *)(&sge->sg_len) = ((__uint32_t)(64)));
1812 sge->sg_flags =
1813 MFII_SGE_CHAIN_ELEMENT(0x80) | MFII_SGE_ADDR_IOCPLBNTA(0x03);
1814 }
1815
1816 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI(0x00);
1817 ccb->ccb_req.smid = letoh16(ccb->ccb_smid)((__uint16_t)(ccb->ccb_smid));
1818
1819 if (ISSET(flags, SCSI_NOSLEEP)((flags) & (0x00001))) {
21
Taking false branch
1820 ccb->ccb_done = mfii_empty_done;
1821 mfii_poll(sc, ccb);
1822 } else
1823 mfii_exec(sc, ccb);
1824
1825 if (hdr->mfh_cmd_status == MFI_STAT_OK) {
22
Assuming field 'mfh_cmd_status' is equal to MFI_STAT_OK
23
Taking true branch
1826 rv = 0;
1827
1828 if (ccb->ccb_direction == MFII_DATA_IN1)
24
Assuming field 'ccb_direction' is equal to MFII_DATA_IN
25
Taking true branch
1829 memcpy(buf, dma_buf, len)__builtin_memcpy((buf), (dma_buf), (len));
26
Null pointer passed as 1st argument to memory copy function
1830 }
1831
1832done:
1833 if (buf != NULL((void *)0))
1834 dma_free(dma_buf, len);
1835
1836 return (rv);
1837}
1838
1839void
1840mfii_empty_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1841{
1842 return;
1843}
1844
1845int
1846mfii_load_mfa(struct mfii_softc *sc, struct mfii_ccb *ccb,
1847 void *sglp, int nosleep)
1848{
1849 union mfi_sgl *sgl = sglp;
1850 bus_dmamap_t dmap = ccb->ccb_dmamap;
1851 int error;
1852 int i;
1853
1854 if (ccb->ccb_len == 0)
1855 return (0);
1856
1857 error = bus_dmamap_load(sc->sc_dmat, dmap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (ccb->ccb_data), (ccb->ccb_len), (((void *)0)), (nosleep
? 0x0001 : 0x0000))
1858 ccb->ccb_data, ccb->ccb_len, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (ccb->ccb_data), (ccb->ccb_len), (((void *)0)), (nosleep
? 0x0001 : 0x0000))
1859 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (ccb->ccb_data), (ccb->ccb_len), (((void *)0)), (nosleep
? 0x0001 : 0x0000))
;
1860 if (error) {
1861 printf("%s: error %d loading dmamap\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), error);
1862 return (1);
1863 }
1864
1865 for (i = 0; i < dmap->dm_nsegs; i++) {
1866 sgl->sg32[i].addr = htole32(dmap->dm_segs[i].ds_addr)((__uint32_t)(dmap->dm_segs[i].ds_addr));
1867 sgl->sg32[i].len = htole32(dmap->dm_segs[i].ds_len)((__uint32_t)(dmap->dm_segs[i].ds_len));
1868 }
1869
1870 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), (ccb->ccb_direction == 2 ? 0x04
: 0x01))
1871 ccb->ccb_direction == MFII_DATA_OUT ?(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), (ccb->ccb_direction == 2 ? 0x04
: 0x01))
1872 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), (ccb->ccb_direction == 2 ? 0x04
: 0x01))
;
1873
1874 return (0);
1875}
1876
1877void
1878mfii_start(struct mfii_softc *sc, struct mfii_ccb *ccb)
1879{
1880 u_long *r = (u_long *)&ccb->ccb_req;
1881
1882 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x01 | 0x04))
1883 ccb->ccb_request_offset, MFII_REQUEST_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x01 | 0x04))
1884 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x01 | 0x04))
;
1885
1886#if defined(__LP64__1)
1887 bus_space_write_raw_8(sc->sc_iot, sc->sc_ioh, MFI_IQPL, *r)((sc->sc_iot)->write_8((sc->sc_ioh), (0x000000c0), (
*r)))
;
1888#else
1889 mtx_enter(&sc->sc_post_mtx);
1890 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPL, r[0])((sc->sc_iot)->write_4((sc->sc_ioh), (0x000000c0), (
r[0])))
;
1891 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1892 MFI_IQPL0x000000c0, 8, BUS_SPACE_BARRIER_WRITE0x02);
1893
1894 bus_space_write_raw_4(sc->sc_iot, sc->sc_ioh, MFI_IQPH, r[1])((sc->sc_iot)->write_4((sc->sc_ioh), (0x000000c4), (
r[1])))
;
1895 bus_space_barrier(sc->sc_iot, sc->sc_ioh,
1896 MFI_IQPH0x000000c4, 8, BUS_SPACE_BARRIER_WRITE0x02);
1897 mtx_leave(&sc->sc_post_mtx);
1898#endif
1899}
1900
1901void
1902mfii_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
1903{
1904 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_requests),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x02 | 0x08))
1905 ccb->ccb_request_offset, MFII_REQUEST_SIZE,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x02 | 0x08))
1906 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_requests)->mdm_map)), (ccb->ccb_request_offset), (256
), (0x02 | 0x08))
;
1907
1908 if (ccb->ccb_sgl_len > 0) {
1909 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_sgl)->mdm_map)), (ccb->ccb_sgl_offset), (ccb->ccb_sgl_len
), (0x08))
1910 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_sgl)->mdm_map)), (ccb->ccb_sgl_offset), (ccb->ccb_sgl_len
), (0x08))
1911 BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_sgl)->mdm_map)), (ccb->ccb_sgl_offset), (ccb->ccb_sgl_len
), (0x08))
;
1912 }
1913
1914 if (ccb->ccb_len > 0) {
1915 bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((ccb->
ccb_direction == 1) ? 0x02 : 0x08))
1916 0, ccb->ccb_dmamap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((ccb->
ccb_direction == 1) ? 0x02 : 0x08))
1917 (ccb->ccb_direction == MFII_DATA_IN) ?(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((ccb->
ccb_direction == 1) ? 0x02 : 0x08))
1918 BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (ccb->
ccb_dmamap), (0), (ccb->ccb_dmamap->dm_mapsize), ((ccb->
ccb_direction == 1) ? 0x02 : 0x08))
;
1919
1920 bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_unload)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
1921 }
1922
1923 ccb->ccb_done(sc, ccb);
1924}
1925
1926int
1927mfii_initialise_firmware(struct mfii_softc *sc)
1928{
1929 struct mpii_msg_iocinit_request *iiq;
1930 struct mfii_dmamem *m;
1931 struct mfii_ccb *ccb;
1932 struct mfi_init_frame *init;
1933 int rv;
1934
1935 m = mfii_dmamem_alloc(sc, sizeof(*iiq));
1936 if (m == NULL((void *)0))
1937 return (1);
1938
1939 iiq = MFII_DMA_KVA(m)((void *)(m)->mdm_kva);
1940 memset(iiq, 0, sizeof(*iiq))__builtin_memset((iiq), (0), (sizeof(*iiq)));
1941
1942 iiq->function = MPII_FUNCTION_IOC_INIT(0x02);
1943 iiq->whoinit = MPII_WHOINIT_HOST_DRIVER(0x04);
1944
1945 iiq->msg_version_maj = 0x02;
1946 iiq->msg_version_min = 0x00;
1947 iiq->hdr_version_unit = 0x10;
1948 iiq->hdr_version_dev = 0x0;
1949
1950 iiq->system_request_frame_size = htole16(MFII_REQUEST_SIZE / 4)((__uint16_t)(256 / 4));
1951
1952 iiq->reply_descriptor_post_queue_depth =
1953 htole16(sc->sc_reply_postq_depth)((__uint16_t)(sc->sc_reply_postq_depth));
1954 iiq->reply_free_queue_depth = htole16(0)((__uint16_t)(0));
1955
1956 htolem32(&iiq->sense_buffer_address_high,(*(__uint32_t *)(&iiq->sense_buffer_address_high) = ((
__uint32_t)(((u_int64_t)(sc->sc_sense)->mdm_map->dm_segs
[0].ds_addr) >> 32)))
1957 MFII_DMA_DVA(sc->sc_sense) >> 32)(*(__uint32_t *)(&iiq->sense_buffer_address_high) = ((
__uint32_t)(((u_int64_t)(sc->sc_sense)->mdm_map->dm_segs
[0].ds_addr) >> 32)))
;
1958
1959 htolem32(&iiq->reply_descriptor_post_queue_address_lo,(*(__uint32_t *)(&iiq->reply_descriptor_post_queue_address_lo
) = ((__uint32_t)(((u_int64_t)(sc->sc_reply_postq)->mdm_map
->dm_segs[0].ds_addr))))
1960 MFII_DMA_DVA(sc->sc_reply_postq))(*(__uint32_t *)(&iiq->reply_descriptor_post_queue_address_lo
) = ((__uint32_t)(((u_int64_t)(sc->sc_reply_postq)->mdm_map
->dm_segs[0].ds_addr))))
;
1961 htolem32(&iiq->reply_descriptor_post_queue_address_hi,(*(__uint32_t *)(&iiq->reply_descriptor_post_queue_address_hi
) = ((__uint32_t)(((u_int64_t)(sc->sc_reply_postq)->mdm_map
->dm_segs[0].ds_addr) >> 32)))
1962 MFII_DMA_DVA(sc->sc_reply_postq) >> 32)(*(__uint32_t *)(&iiq->reply_descriptor_post_queue_address_hi
) = ((__uint32_t)(((u_int64_t)(sc->sc_reply_postq)->mdm_map
->dm_segs[0].ds_addr) >> 32)))
;
1963
1964 htolem32(&iiq->system_request_frame_base_address_lo,(*(__uint32_t *)(&iiq->system_request_frame_base_address_lo
) = ((__uint32_t)(((u_int64_t)(sc->sc_requests)->mdm_map
->dm_segs[0].ds_addr))))
1965 MFII_DMA_DVA(sc->sc_requests))(*(__uint32_t *)(&iiq->system_request_frame_base_address_lo
) = ((__uint32_t)(((u_int64_t)(sc->sc_requests)->mdm_map
->dm_segs[0].ds_addr))))
;
1966 htolem32(&iiq->system_request_frame_base_address_hi,(*(__uint32_t *)(&iiq->system_request_frame_base_address_hi
) = ((__uint32_t)(((u_int64_t)(sc->sc_requests)->mdm_map
->dm_segs[0].ds_addr) >> 32)))
1967 MFII_DMA_DVA(sc->sc_requests) >> 32)(*(__uint32_t *)(&iiq->system_request_frame_base_address_hi
) = ((__uint32_t)(((u_int64_t)(sc->sc_requests)->mdm_map
->dm_segs[0].ds_addr) >> 32)))
;
1968
1969 iiq->timestamp = htole64(getuptime())((__uint64_t)(getuptime()));
1970
1971 ccb = scsi_io_get(&sc->sc_iopool, SCSI_NOSLEEP0x00001);
1972 if (ccb == NULL((void *)0)) {
1973 /* shouldn't ever run out of ccbs during attach */
1974 return (1);
1975 }
1976 mfii_scrub_ccb(ccb);
1977 init = ccb->ccb_request;
1978
1979 init->mif_header.mfh_cmd = MFI_CMD_INIT0x00;
1980 init->mif_header.mfh_data_len = htole32(sizeof(*iiq))((__uint32_t)(sizeof(*iiq)));
1981 init->mif_qinfo_new_addr = htole64(MFII_DMA_DVA(m))((__uint64_t)(((u_int64_t)(m)->mdm_map->dm_segs[0].ds_addr
)))
;
1982
1983 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x01))
1984 0, MFII_DMA_LEN(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x01))
1985 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x01))
;
1986
1987 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((m)->
mdm_map)), (0), (sizeof(*iiq)), (0x01))
1988 0, sizeof(*iiq), BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((m)->
mdm_map)), (0), (sizeof(*iiq)), (0x01))
;
1989
1990 rv = mfii_mfa_poll(sc, ccb);
1991
1992 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(m),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((m)->
mdm_map)), (0), (sizeof(*iiq)), (0x02))
1993 0, sizeof(*iiq), BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((m)->
mdm_map)), (0), (sizeof(*iiq)), (0x02))
;
1994
1995 scsi_io_put(&sc->sc_iopool, ccb);
1996 mfii_dmamem_free(sc, m);
1997
1998 return (rv);
1999}
2000
2001int
2002mfii_my_intr(struct mfii_softc *sc)
2003{
2004 u_int32_t status;
2005
2006 status = mfii_read(sc, MFI_OSTS0x30);
2007 if (ISSET(status, 0x1)((status) & (0x1))) {
2008 mfii_write(sc, MFI_OSTS0x30, status);
2009 return (1);
2010 }
2011
2012 return (ISSET(status, MFII_OSTS_INTR_VALID)((status) & (0x00000009)) ? 1 : 0);
2013}
2014
2015int
2016mfii_intr(void *arg)
2017{
2018 struct mfii_softc *sc = arg;
2019
2020 if (!mfii_my_intr(sc))
2021 return (0);
2022
2023 mfii_postq(sc);
2024
2025 return (1);
2026}
2027
2028void
2029mfii_postq(struct mfii_softc *sc)
2030{
2031 struct mfii_ccb_list ccbs = SIMPLEQ_HEAD_INITIALIZER(ccbs){ ((void *)0), &(ccbs).sqh_first };
2032 struct mpii_reply_descr *postq = MFII_DMA_KVA(sc->sc_reply_postq)((void *)(sc->sc_reply_postq)->mdm_kva);
2033 struct mpii_reply_descr *rdp;
2034 struct mfii_ccb *ccb;
2035 int rpi = 0;
2036
2037 mtx_enter(&sc->sc_reply_postq_mtx);
2038
2039 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x02))
2040 0, MFII_DMA_LEN(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x02))
2041 BUS_DMASYNC_POSTREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x02))
;
2042
2043 for (;;) {
2044 rdp = &postq[sc->sc_reply_postq_index];
2045 if ((rdp->reply_flags & MPII_REPLY_DESCR_TYPE_MASK(0x0f)) ==
2046 MPII_REPLY_DESCR_UNUSED(0x0f))
2047 break;
2048 if (rdp->data == 0xffffffff) {
2049 /*
2050 * ioc is still writing to the reply post queue
2051 * race condition - bail!
2052 */
2053 break;
2054 }
2055
2056 ccb = &sc->sc_ccb[letoh16(rdp->smid)((__uint16_t)(rdp->smid)) - 1];
2057 SIMPLEQ_INSERT_TAIL(&ccbs, ccb, ccb_link)do { (ccb)->ccb_link.sqe_next = ((void *)0); *(&ccbs)->
sqh_last = (ccb); (&ccbs)->sqh_last = &(ccb)->ccb_link
.sqe_next; } while (0)
;
2058 memset(rdp, 0xff, sizeof(*rdp))__builtin_memset((rdp), (0xff), (sizeof(*rdp)));
2059
2060 sc->sc_reply_postq_index++;
2061 sc->sc_reply_postq_index %= sc->sc_reply_postq_depth;
2062 rpi = 1;
2063 }
2064
2065 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x01))
2066 0, MFII_DMA_LEN(sc->sc_reply_postq),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x01))
2067 BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_reply_postq)->mdm_map)), (0), (((sc->sc_reply_postq)
->mdm_size)), (0x01))
;
2068
2069 if (rpi)
2070 mfii_write(sc, MFII_RPI0x6c, sc->sc_reply_postq_index);
2071
2072 mtx_leave(&sc->sc_reply_postq_mtx);
2073
2074 while ((ccb = SIMPLEQ_FIRST(&ccbs)((&ccbs)->sqh_first)) != NULL((void *)0)) {
2075 SIMPLEQ_REMOVE_HEAD(&ccbs, ccb_link)do { if (((&ccbs)->sqh_first = (&ccbs)->sqh_first
->ccb_link.sqe_next) == ((void *)0)) (&ccbs)->sqh_last
= &(&ccbs)->sqh_first; } while (0)
;
2076 mfii_done(sc, ccb);
2077 }
2078}
2079
2080void
2081mfii_scsi_cmd(struct scsi_xfer *xs)
2082{
2083 struct scsi_link *link = xs->sc_link;
2084 struct mfii_softc *sc = link->bus->sb_adapter_softc;
2085 struct mfii_ccb *ccb = xs->io;
2086
2087 mfii_scrub_ccb(ccb);
2088 ccb->ccb_cookie = xs;
2089 ccb->ccb_done = mfii_scsi_cmd_done;
2090 ccb->ccb_data = xs->data;
2091 ccb->ccb_len = xs->datalen;
2092
2093 timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2094
2095 switch (xs->cmd.opcode) {
2096 case READ_COMMAND0x08:
2097 case READ_100x28:
2098 case READ_120xa8:
2099 case READ_160x88:
2100 case WRITE_COMMAND0x0a:
2101 case WRITE_100x2a:
2102 case WRITE_120xaa:
2103 case WRITE_160x8a:
2104 if (mfii_scsi_cmd_io(sc, xs) != 0)
2105 goto stuffup;
2106
2107 break;
2108
2109 default:
2110 if (mfii_scsi_cmd_cdb(sc, xs) != 0)
2111 goto stuffup;
2112 break;
2113 }
2114
2115 xs->error = XS_NOERROR0;
2116 xs->resid = 0;
2117
2118 if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) {
2119 if (mfii_poll(sc, ccb) != 0)
2120 goto stuffup;
2121 return;
2122 }
2123
2124 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2125 timeout_add_msec(&xs->stimeout, xs->timeout);
2126 mfii_start(sc, ccb);
2127
2128 return;
2129
2130stuffup:
2131 xs->error = XS_DRIVER_STUFFUP2;
2132 scsi_done(xs);
2133}
2134
2135void
2136mfii_scsi_cmd_done(struct mfii_softc *sc, struct mfii_ccb *ccb)
2137{
2138 struct scsi_xfer *xs = ccb->ccb_cookie;
2139 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2140 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2141 u_int refs = 1;
2142
2143 if (timeout_del(&xs->stimeout))
2144 refs = 2;
2145
2146 switch (ctx->status) {
2147 case MFI_STAT_OK:
2148 break;
2149
2150 case MFI_STAT_SCSI_DONE_WITH_ERROR:
2151 xs->error = XS_SENSE1;
2152 memset(&xs->sense, 0, sizeof(xs->sense))__builtin_memset((&xs->sense), (0), (sizeof(xs->sense
)))
;
2153 memcpy(&xs->sense, ccb->ccb_sense, sizeof(xs->sense))__builtin_memcpy((&xs->sense), (ccb->ccb_sense), (sizeof
(xs->sense)))
;
2154 break;
2155
2156 case MFI_STAT_LD_OFFLINE:
2157 case MFI_STAT_DEVICE_NOT_FOUND:
2158 xs->error = XS_SELTIMEOUT3;
2159 break;
2160
2161 default:
2162 xs->error = XS_DRIVER_STUFFUP2;
2163 break;
2164 }
2165
2166 if (atomic_sub_int_nv(&ccb->ccb_refcnt, refs)_atomic_sub_int_nv(&ccb->ccb_refcnt, refs) == 0)
2167 scsi_done(xs);
2168}
2169
2170int
2171mfii_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag)
2172{
2173 struct mfii_softc *sc = link->bus->sb_adapter_softc;
2174
2175 DNPRINTF(MFII_D_IOCTL, "%s: mfii_scsi_ioctl\n", DEVNAME(sc));
2176
2177 switch (cmd) {
2178 case DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((117)))
:
2179 case DIOCSCACHE((unsigned long)0x80000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((118)))
:
2180 return (mfii_ioctl_cache(link, cmd, (struct dk_cache *)addr));
2181 break;
2182
2183 default:
2184 if (sc->sc_ioctl)
2185 return (sc->sc_ioctl(&sc->sc_dev, cmd, addr));
2186 break;
2187 }
2188
2189 return (ENOTTY25);
2190}
2191
2192int
2193mfii_ioctl_cache(struct scsi_link *link, u_long cmd, struct dk_cache *dc)
2194{
2195 struct mfii_softc *sc = link->bus->sb_adapter_softc;
2196 int rv, wrenable, rdenable;
2197 struct mfi_ld_prop ldp;
2198 union mfi_mbox mbox;
2199
2200 if (mfii_get_info(sc)) {
2201 rv = EIO5;
2202 goto done;
2203 }
2204
2205 if (sc->sc_target_lds[link->target] == -1) {
2206 rv = EIO5;
2207 goto done;
2208 }
2209
2210 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
2211 mbox.b[0] = link->target;
2212 rv = mfii_mgmt(sc, MR_DCMD_LD_GET_PROPERTIES0x03030000, &mbox, &ldp, sizeof(ldp),
2213 SCSI_DATA_IN0x00800);
2214 if (rv != 0)
2215 goto done;
2216
2217 if (sc->sc_info.mci_memory_size > 0) {
2218 wrenable = ISSET(ldp.mlp_cur_cache_policy,((ldp.mlp_cur_cache_policy) & (0x20))
2219 MR_LD_CACHE_ALLOW_WRITE_CACHE)((ldp.mlp_cur_cache_policy) & (0x20))? 1 : 0;
2220 rdenable = ISSET(ldp.mlp_cur_cache_policy,((ldp.mlp_cur_cache_policy) & (0x40))
2221 MR_LD_CACHE_ALLOW_READ_CACHE)((ldp.mlp_cur_cache_policy) & (0x40))? 1 : 0;
2222 } else {
2223 wrenable = ISSET(ldp.mlp_diskcache_policy,((ldp.mlp_diskcache_policy) & (0x01))
2224 MR_LD_DISK_CACHE_ENABLE)((ldp.mlp_diskcache_policy) & (0x01))? 1 : 0;
2225 rdenable = 0;
2226 }
2227
2228 if (cmd == DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff
) << 16) | ((('d')) << 8) | ((117)))
) {
2229 dc->wrcache = wrenable;
2230 dc->rdcache = rdenable;
2231 goto done;
2232 } /* else DIOCSCACHE */
2233
2234 if (((dc->wrcache) ? 1 : 0) == wrenable &&
2235 ((dc->rdcache) ? 1 : 0) == rdenable)
2236 goto done;
2237
2238 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
2239 mbox.b[0] = ldp.mlp_ld.mld_target;
2240 mbox.b[1] = ldp.mlp_ld.mld_res;
2241 mbox.s[1] = ldp.mlp_ld.mld_seq;
2242
2243 if (sc->sc_info.mci_memory_size > 0) {
2244 if (dc->rdcache)
2245 SET(ldp.mlp_cur_cache_policy,((ldp.mlp_cur_cache_policy) |= (0x40))
2246 MR_LD_CACHE_ALLOW_READ_CACHE)((ldp.mlp_cur_cache_policy) |= (0x40));
2247 else
2248 CLR(ldp.mlp_cur_cache_policy,((ldp.mlp_cur_cache_policy) &= ~(0x40))
2249 MR_LD_CACHE_ALLOW_READ_CACHE)((ldp.mlp_cur_cache_policy) &= ~(0x40));
2250 if (dc->wrcache)
2251 SET(ldp.mlp_cur_cache_policy,((ldp.mlp_cur_cache_policy) |= (0x20))
2252 MR_LD_CACHE_ALLOW_WRITE_CACHE)((ldp.mlp_cur_cache_policy) |= (0x20));
2253 else
2254 CLR(ldp.mlp_cur_cache_policy,((ldp.mlp_cur_cache_policy) &= ~(0x20))
2255 MR_LD_CACHE_ALLOW_WRITE_CACHE)((ldp.mlp_cur_cache_policy) &= ~(0x20));
2256 } else {
2257 if (dc->rdcache) {
2258 rv = EOPNOTSUPP45;
2259 goto done;
2260 }
2261 if (dc->wrcache)
2262 ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_ENABLE0x01;
2263 else
2264 ldp.mlp_diskcache_policy = MR_LD_DISK_CACHE_DISABLE0x02;
2265 }
2266
2267 rv = mfii_mgmt(sc, MR_DCMD_LD_SET_PROPERTIES0x03040000, &mbox, &ldp, sizeof(ldp),
2268 SCSI_DATA_OUT0x01000);
2269done:
2270 return (rv);
2271}
2272
2273int
2274mfii_scsi_cmd_io(struct mfii_softc *sc, struct scsi_xfer *xs)
2275{
2276 struct scsi_link *link = xs->sc_link;
2277 struct mfii_ccb *ccb = xs->io;
2278 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2279 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2280 int segs;
2281
2282 io->dev_handle = htole16(link->target)((__uint16_t)(link->target));
2283 io->function = MFII_FUNCTION_LDIO_REQUEST(0xf1);
2284 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva)((__uint32_t)(ccb->ccb_sense_dva));
2285 io->sgl_flags = htole16(0x02)((__uint16_t)(0x02)); /* XXX */
2286 io->sense_buffer_length = sizeof(xs->sense);
2287 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2288 io->data_length = htole32(xs->datalen)((__uint32_t)(xs->datalen));
2289 io->io_flags = htole16(xs->cmdlen)((__uint16_t)(xs->cmdlen));
2290 switch (xs->flags & (SCSI_DATA_IN0x00800 | SCSI_DATA_OUT0x01000)) {
2291 case SCSI_DATA_IN0x00800:
2292 ccb->ccb_direction = MFII_DATA_IN1;
2293 io->direction = MPII_SCSIIO_DIR_READ(0x2);
2294 break;
2295 case SCSI_DATA_OUT0x01000:
2296 ccb->ccb_direction = MFII_DATA_OUT2;
2297 io->direction = MPII_SCSIIO_DIR_WRITE(0x1);
2298 break;
2299 default:
2300 ccb->ccb_direction = MFII_DATA_NONE0;
2301 io->direction = MPII_SCSIIO_DIR_NONE(0x0);
2302 break;
2303 }
2304 memcpy(io->cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((io->cdb), (&xs->cmd), (xs->cmdlen
))
;
2305
2306 ctx->type_nseg = sc->sc_iop->ldio_ctx_type_nseg;
2307 ctx->timeout_value = htole16(0x14)((__uint16_t)(0x14)); /* XXX */
2308 ctx->reg_lock_flags = htole16(sc->sc_iop->ldio_ctx_reg_lock_flags)((__uint16_t)(sc->sc_iop->ldio_ctx_reg_lock_flags));
2309 ctx->virtual_disk_target_id = htole16(link->target)((__uint16_t)(link->target));
2310
2311 if (mfii_load_ccb(sc, ccb, ctx + 1,
2312 ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001))) != 0)
2313 return (1);
2314
2315 segs = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2316 switch (sc->sc_iop->num_sge_loc) {
2317 case MFII_IOP_NUM_SGE_LOC_ORIG0:
2318 ctx->num_sge = segs;
2319 break;
2320 case MFII_IOP_NUM_SGE_LOC_351:
2321 /* 12 bit field, but we're only using the lower 8 */
2322 ctx->span_arm = segs;
2323 break;
2324 }
2325
2326 ccb->ccb_req.flags = sc->sc_iop->ldio_req_type;
2327 ccb->ccb_req.smid = letoh16(ccb->ccb_smid)((__uint16_t)(ccb->ccb_smid));
2328
2329 return (0);
2330}
2331
2332int
2333mfii_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2334{
2335 struct scsi_link *link = xs->sc_link;
2336 struct mfii_ccb *ccb = xs->io;
2337 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2338 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2339
2340 io->dev_handle = htole16(link->target)((__uint16_t)(link->target));
2341 io->function = MFII_FUNCTION_LDIO_REQUEST(0xf1);
2342 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva)((__uint32_t)(ccb->ccb_sense_dva));
2343 io->sgl_flags = htole16(0x02)((__uint16_t)(0x02)); /* XXX */
2344 io->sense_buffer_length = sizeof(xs->sense);
2345 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2346 io->data_length = htole32(xs->datalen)((__uint32_t)(xs->datalen));
2347 io->io_flags = htole16(xs->cmdlen)((__uint16_t)(xs->cmdlen));
2348 io->lun[0] = htobe16(link->lun)(__uint16_t)(__builtin_constant_p(link->lun) ? (__uint16_t
)(((__uint16_t)(link->lun) & 0xffU) << 8 | ((__uint16_t
)(link->lun) & 0xff00U) >> 8) : __swap16md(link->
lun))
;
2349 switch (xs->flags & (SCSI_DATA_IN0x00800 | SCSI_DATA_OUT0x01000)) {
2350 case SCSI_DATA_IN0x00800:
2351 ccb->ccb_direction = MFII_DATA_IN1;
2352 io->direction = MPII_SCSIIO_DIR_READ(0x2);
2353 break;
2354 case SCSI_DATA_OUT0x01000:
2355 ccb->ccb_direction = MFII_DATA_OUT2;
2356 io->direction = MPII_SCSIIO_DIR_WRITE(0x1);
2357 break;
2358 default:
2359 ccb->ccb_direction = MFII_DATA_NONE0;
2360 io->direction = MPII_SCSIIO_DIR_NONE(0x0);
2361 break;
2362 }
2363 memcpy(io->cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((io->cdb), (&xs->cmd), (xs->cmdlen
))
;
2364
2365 ctx->virtual_disk_target_id = htole16(link->target)((__uint16_t)(link->target));
2366
2367 if (mfii_load_ccb(sc, ccb, ctx + 1,
2368 ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001))) != 0)
2369 return (1);
2370
2371 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2372
2373 ccb->ccb_req.flags = MFII_REQ_TYPE_SCSI(0x00);
2374 ccb->ccb_req.smid = letoh16(ccb->ccb_smid)((__uint16_t)(ccb->ccb_smid));
2375
2376 return (0);
2377}
2378
2379void
2380mfii_pd_scsi_cmd(struct scsi_xfer *xs)
2381{
2382 struct scsi_link *link = xs->sc_link;
2383 struct mfii_softc *sc = link->bus->sb_adapter_softc;
2384 struct mfii_ccb *ccb = xs->io;
2385
2386 mfii_scrub_ccb(ccb);
2387 ccb->ccb_cookie = xs;
2388 ccb->ccb_done = mfii_scsi_cmd_done;
2389 ccb->ccb_data = xs->data;
2390 ccb->ccb_len = xs->datalen;
2391
2392 timeout_set(&xs->stimeout, mfii_scsi_cmd_tmo, xs);
2393
2394 xs->error = mfii_pd_scsi_cmd_cdb(sc, xs);
2395 if (xs->error != XS_NOERROR0)
2396 goto done;
2397
2398 xs->resid = 0;
2399
2400 if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) {
2401 if (mfii_poll(sc, ccb) != 0)
2402 goto stuffup;
2403 return;
2404 }
2405
2406 ccb->ccb_refcnt = 2; /* one for the chip, one for the timeout */
2407 timeout_add_msec(&xs->stimeout, xs->timeout);
2408 mfii_start(sc, ccb);
2409
2410 return;
2411
2412stuffup:
2413 xs->error = XS_DRIVER_STUFFUP2;
2414done:
2415 scsi_done(xs);
2416}
2417
2418int
2419mfii_pd_scsi_probe(struct scsi_link *link)
2420{
2421 struct mfii_softc *sc = link->bus->sb_adapter_softc;
2422 struct mfi_pd_details mpd;
2423 union mfi_mbox mbox;
2424 int rv;
2425
2426 if (link->lun > 0)
2427 return (0);
2428
2429 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
2430 mbox.s[0] = htole16(link->target)((__uint16_t)(link->target));
2431
2432 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, &mpd, sizeof(mpd),
2433 SCSI_DATA_IN0x00800|SCSI_NOSLEEP0x00001);
2434 if (rv != 0)
2435 return (EIO5);
2436
2437 if (mpd.mpd_fw_state != htole16(MFI_PD_SYSTEM)((__uint16_t)(0x40)))
2438 return (ENXIO6);
2439
2440 return (0);
2441}
2442
2443int
2444mfii_pd_scsi_cmd_cdb(struct mfii_softc *sc, struct scsi_xfer *xs)
2445{
2446 struct scsi_link *link = xs->sc_link;
2447 struct mfii_ccb *ccb = xs->io;
2448 struct mpii_msg_scsi_io *io = ccb->ccb_request;
2449 struct mfii_raid_context *ctx = (struct mfii_raid_context *)(io + 1);
2450 uint16_t dev_handle;
2451
2452 dev_handle = mfii_dev_handle(sc, link->target);
2453 if (dev_handle == htole16(0xffff)((__uint16_t)(0xffff)))
2454 return (XS_SELTIMEOUT3);
2455
2456 io->dev_handle = dev_handle;
2457 io->function = 0;
2458 io->sense_buffer_low_address = htole32(ccb->ccb_sense_dva)((__uint32_t)(ccb->ccb_sense_dva));
2459 io->sgl_flags = htole16(0x02)((__uint16_t)(0x02)); /* XXX */
2460 io->sense_buffer_length = sizeof(xs->sense);
2461 io->sgl_offset0 = (sizeof(*io) + sizeof(*ctx)) / 4;
2462 io->data_length = htole32(xs->datalen)((__uint32_t)(xs->datalen));
2463 io->io_flags = htole16(xs->cmdlen)((__uint16_t)(xs->cmdlen));
2464 io->lun[0] = htobe16(link->lun)(__uint16_t)(__builtin_constant_p(link->lun) ? (__uint16_t
)(((__uint16_t)(link->lun) & 0xffU) << 8 | ((__uint16_t
)(link->lun) & 0xff00U) >> 8) : __swap16md(link->
lun))
;
2465 switch (xs->flags & (SCSI_DATA_IN0x00800 | SCSI_DATA_OUT0x01000)) {
2466 case SCSI_DATA_IN0x00800:
2467 ccb->ccb_direction = MFII_DATA_IN1;
2468 io->direction = MPII_SCSIIO_DIR_READ(0x2);
2469 break;
2470 case SCSI_DATA_OUT0x01000:
2471 ccb->ccb_direction = MFII_DATA_OUT2;
2472 io->direction = MPII_SCSIIO_DIR_WRITE(0x1);
2473 break;
2474 default:
2475 ccb->ccb_direction = MFII_DATA_NONE0;
2476 io->direction = MPII_SCSIIO_DIR_NONE(0x0);
2477 break;
2478 }
2479 memcpy(io->cdb, &xs->cmd, xs->cmdlen)__builtin_memcpy((io->cdb), (&xs->cmd), (xs->cmdlen
))
;
2480
2481 ctx->virtual_disk_target_id = htole16(link->target)((__uint16_t)(link->target));
2482 ctx->raid_flags = MFII_RAID_CTX_IO_TYPE_SYSPD(0x1 << 4);
2483 ctx->timeout_value = sc->sc_pd->pd_timeout;
2484
2485 if (mfii_load_ccb(sc, ccb, ctx + 1,
2486 ISSET(xs->flags, SCSI_NOSLEEP)((xs->flags) & (0x00001))) != 0)
2487 return (XS_DRIVER_STUFFUP2);
2488
2489 ctx->num_sge = (ccb->ccb_len == 0) ? 0 : ccb->ccb_dmamap->dm_nsegs;
2490
2491 ccb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI(0x6 << 1);
2492 ccb->ccb_req.smid = letoh16(ccb->ccb_smid)((__uint16_t)(ccb->ccb_smid));
2493 ccb->ccb_req.dev_handle = dev_handle;
2494
2495 return (XS_NOERROR0);
2496}
2497
2498int
2499mfii_load_ccb(struct mfii_softc *sc, struct mfii_ccb *ccb, void *sglp,
2500 int nosleep)
2501{
2502 struct mpii_msg_request *req = ccb->ccb_request;
2503 struct mfii_sge *sge = NULL((void *)0), *nsge = sglp;
2504 struct mfii_sge *ce = NULL((void *)0);
2505 bus_dmamap_t dmap = ccb->ccb_dmamap;
2506 u_int space;
2507 int i;
2508
2509 int error;
2510
2511 if (ccb->ccb_len == 0)
2512 return (0);
2513
2514 error = bus_dmamap_load(sc->sc_dmat, dmap,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (ccb->ccb_data), (ccb->ccb_len), (((void *)0)), (nosleep
? 0x0001 : 0x0000))
2515 ccb->ccb_data, ccb->ccb_len, NULL,(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (ccb->ccb_data), (ccb->ccb_len), (((void *)0)), (nosleep
? 0x0001 : 0x0000))
2516 nosleep ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK)(*(sc->sc_dmat)->_dmamap_load)((sc->sc_dmat), (dmap)
, (ccb->ccb_data), (ccb->ccb_len), (((void *)0)), (nosleep
? 0x0001 : 0x0000))
;
2517 if (error) {
2518 printf("%s: error %d loading dmamap\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), error);
2519 return (1);
2520 }
2521
2522 space = (MFII_REQUEST_SIZE256 - ((u_int8_t *)nsge - (u_int8_t *)req)) /
2523 sizeof(*nsge);
2524 if (dmap->dm_nsegs > space) {
2525 space--;
2526
2527 ccb->ccb_sgl_len = (dmap->dm_nsegs - space) * sizeof(*nsge);
2528 memset(ccb->ccb_sgl, 0, ccb->ccb_sgl_len)__builtin_memset((ccb->ccb_sgl), (0), (ccb->ccb_sgl_len
))
;
2529
2530 ce = nsge + space;
2531 ce->sg_addr = htole64(ccb->ccb_sgl_dva)((__uint64_t)(ccb->ccb_sgl_dva));
2532 ce->sg_len = htole32(ccb->ccb_sgl_len)((__uint32_t)(ccb->ccb_sgl_len));
2533 ce->sg_flags = sc->sc_iop->sge_flag_chain;
2534
2535 req->chain_offset = ((u_int8_t *)ce - (u_int8_t *)req) / 16;
2536 }
2537
2538 for (i = 0; i < dmap->dm_nsegs; i++) {
2539 if (nsge == ce)
2540 nsge = ccb->ccb_sgl;
2541
2542 sge = nsge;
2543
2544 sge->sg_addr = htole64(dmap->dm_segs[i].ds_addr)((__uint64_t)(dmap->dm_segs[i].ds_addr));
2545 sge->sg_len = htole32(dmap->dm_segs[i].ds_len)((__uint32_t)(dmap->dm_segs[i].ds_len));
2546 sge->sg_flags = MFII_SGE_ADDR_SYSTEM(0x00);
2547
2548 nsge = sge + 1;
2549 }
2550 sge->sg_flags |= sc->sc_iop->sge_flag_eol;
2551
2552 bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), (ccb->ccb_direction == 2 ? 0x04
: 0x01))
2553 ccb->ccb_direction == MFII_DATA_OUT ?(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), (ccb->ccb_direction == 2 ? 0x04
: 0x01))
2554 BUS_DMASYNC_PREWRITE : BUS_DMASYNC_PREREAD)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (dmap)
, (0), (dmap->dm_mapsize), (ccb->ccb_direction == 2 ? 0x04
: 0x01))
;
2555
2556 if (ccb->ccb_sgl_len > 0) {
2557 bus_dmamap_sync(sc->sc_dmat, MFII_DMA_MAP(sc->sc_sgl),(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_sgl)->mdm_map)), (ccb->ccb_sgl_offset), (ccb->ccb_sgl_len
), (0x04))
2558 ccb->ccb_sgl_offset, ccb->ccb_sgl_len,(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_sgl)->mdm_map)), (ccb->ccb_sgl_offset), (ccb->ccb_sgl_len
), (0x04))
2559 BUS_DMASYNC_PREWRITE)(*(sc->sc_dmat)->_dmamap_sync)((sc->sc_dmat), (((sc->
sc_sgl)->mdm_map)), (ccb->ccb_sgl_offset), (ccb->ccb_sgl_len
), (0x04))
;
2560 }
2561
2562 return (0);
2563}
2564
2565void
2566mfii_scsi_cmd_tmo(void *xsp)
2567{
2568 struct scsi_xfer *xs = xsp;
2569 struct scsi_link *link = xs->sc_link;
2570 struct mfii_softc *sc = link->bus->sb_adapter_softc;
2571 struct mfii_ccb *ccb = xs->io;
2572
2573 mtx_enter(&sc->sc_abort_mtx);
2574 SIMPLEQ_INSERT_TAIL(&sc->sc_abort_list, ccb, ccb_link)do { (ccb)->ccb_link.sqe_next = ((void *)0); *(&sc->
sc_abort_list)->sqh_last = (ccb); (&sc->sc_abort_list
)->sqh_last = &(ccb)->ccb_link.sqe_next; } while (0
)
;
2575 mtx_leave(&sc->sc_abort_mtx);
2576
2577 task_add(systqmp, &sc->sc_abort_task);
2578}
2579
2580void
2581mfii_abort_task(void *scp)
2582{
2583 struct mfii_softc *sc = scp;
2584 struct mfii_ccb *list;
2585
2586 mtx_enter(&sc->sc_abort_mtx);
2587 list = SIMPLEQ_FIRST(&sc->sc_abort_list)((&sc->sc_abort_list)->sqh_first);
2588 SIMPLEQ_INIT(&sc->sc_abort_list)do { (&sc->sc_abort_list)->sqh_first = ((void *)0);
(&sc->sc_abort_list)->sqh_last = &(&sc->
sc_abort_list)->sqh_first; } while (0)
;
2589 mtx_leave(&sc->sc_abort_mtx);
2590
2591 while (list != NULL((void *)0)) {
2592 struct mfii_ccb *ccb = list;
2593 struct scsi_xfer *xs = ccb->ccb_cookie;
2594 struct scsi_link *link = xs->sc_link;
2595
2596 uint16_t dev_handle;
2597 struct mfii_ccb *accb;
2598
2599 list = SIMPLEQ_NEXT(ccb, ccb_link)((ccb)->ccb_link.sqe_next);
2600
2601 dev_handle = mfii_dev_handle(sc, link->target);
2602 if (dev_handle == htole16(0xffff)((__uint16_t)(0xffff))) {
2603 /* device is gone */
2604 if (atomic_dec_int_nv(&ccb->ccb_refcnt)_atomic_sub_int_nv((&ccb->ccb_refcnt), 1) == 0)
2605 scsi_done(xs);
2606 continue;
2607 }
2608
2609 accb = scsi_io_get(&sc->sc_iopool, 0);
2610 mfii_scrub_ccb(accb);
2611 mfii_abort(sc, accb, dev_handle, ccb->ccb_smid,
2612 MPII_SCSI_TASK_ABORT_TASK(0x01),
2613 htole32(MFII_TASK_MGMT_FLAGS_PD)((__uint32_t)((1 << 1))));
2614
2615 accb->ccb_cookie = ccb;
2616 accb->ccb_done = mfii_scsi_cmd_abort_done;
2617
2618 mfii_start(sc, accb);
2619 }
2620}
2621
2622void
2623mfii_abort(struct mfii_softc *sc, struct mfii_ccb *accb, uint16_t dev_handle,
2624 uint16_t smid, uint8_t type, uint32_t flags)
2625{
2626 struct mfii_task_mgmt *msg;
2627 struct mpii_msg_scsi_task_request *req;
2628
2629 msg = accb->ccb_request;
2630 req = &msg->mpii_request;
2631 req->dev_handle = dev_handle;
2632 req->function = MPII_FUNCTION_SCSI_TASK_MGMT(0x01);
2633 req->task_type = type;
2634 htolem16(&req->task_mid, smid)(*(__uint16_t *)(&req->task_mid) = ((__uint16_t)(smid)
))
;
2635 msg->flags = flags;
2636
2637 accb->ccb_req.flags = MFII_REQ_TYPE_HI_PRI(0x6 << 1);
2638 accb->ccb_req.smid = letoh16(accb->ccb_smid)((__uint16_t)(accb->ccb_smid));
2639}
2640
2641void
2642mfii_scsi_cmd_abort_done(struct mfii_softc *sc, struct mfii_ccb *accb)
2643{
2644 struct mfii_ccb *ccb = accb->ccb_cookie;
2645 struct scsi_xfer *xs = ccb->ccb_cookie;
2646
2647 /* XXX check accb completion? */
2648
2649 scsi_io_put(&sc->sc_iopool, accb);
2650
2651 if (atomic_dec_int_nv(&ccb->ccb_refcnt)_atomic_sub_int_nv((&ccb->ccb_refcnt), 1) == 0)
2652 scsi_done(xs);
2653}
2654
2655void *
2656mfii_get_ccb(void *cookie)
2657{
2658 struct mfii_softc *sc = cookie;
2659 struct mfii_ccb *ccb;
2660
2661 mtx_enter(&sc->sc_ccb_mtx);
2662 ccb = SIMPLEQ_FIRST(&sc->sc_ccb_freeq)((&sc->sc_ccb_freeq)->sqh_first);
2663 if (ccb != NULL((void *)0))
2664 SIMPLEQ_REMOVE_HEAD(&sc->sc_ccb_freeq, ccb_link)do { if (((&sc->sc_ccb_freeq)->sqh_first = (&sc
->sc_ccb_freeq)->sqh_first->ccb_link.sqe_next) == ((
void *)0)) (&sc->sc_ccb_freeq)->sqh_last = &(&
sc->sc_ccb_freeq)->sqh_first; } while (0)
;
2665 mtx_leave(&sc->sc_ccb_mtx);
2666
2667 return (ccb);
2668}
2669
2670void
2671mfii_scrub_ccb(struct mfii_ccb *ccb)
2672{
2673 ccb->ccb_cookie = NULL((void *)0);
2674 ccb->ccb_done = NULL((void *)0);
2675 ccb->ccb_flags = 0;
2676 ccb->ccb_data = NULL((void *)0);
2677 ccb->ccb_direction = 0;
2678 ccb->ccb_len = 0;
2679 ccb->ccb_sgl_len = 0;
2680 ccb->ccb_refcnt = 1;
2681
2682 memset(&ccb->ccb_req, 0, sizeof(ccb->ccb_req))__builtin_memset((&ccb->ccb_req), (0), (sizeof(ccb->
ccb_req)))
;
2683 memset(ccb->ccb_request, 0, MFII_REQUEST_SIZE)__builtin_memset((ccb->ccb_request), (0), (256));
2684 memset(ccb->ccb_mfi, 0, MFI_FRAME_SIZE)__builtin_memset((ccb->ccb_mfi), (0), (64));
2685}
2686
2687void
2688mfii_put_ccb(void *cookie, void *io)
2689{
2690 struct mfii_softc *sc = cookie;
2691 struct mfii_ccb *ccb = io;
2692
2693 mtx_enter(&sc->sc_ccb_mtx);
2694 SIMPLEQ_INSERT_HEAD(&sc->sc_ccb_freeq, ccb, ccb_link)do { if (((ccb)->ccb_link.sqe_next = (&sc->sc_ccb_freeq
)->sqh_first) == ((void *)0)) (&sc->sc_ccb_freeq)->
sqh_last = &(ccb)->ccb_link.sqe_next; (&sc->sc_ccb_freeq
)->sqh_first = (ccb); } while (0)
;
2695 mtx_leave(&sc->sc_ccb_mtx);
2696}
2697
2698int
2699mfii_init_ccb(struct mfii_softc *sc)
2700{
2701 struct mfii_ccb *ccb;
2702 u_int8_t *request = MFII_DMA_KVA(sc->sc_requests)((void *)(sc->sc_requests)->mdm_kva);
2703 u_int8_t *mfi = MFII_DMA_KVA(sc->sc_mfi)((void *)(sc->sc_mfi)->mdm_kva);
2704 u_int8_t *sense = MFII_DMA_KVA(sc->sc_sense)((void *)(sc->sc_sense)->mdm_kva);
2705 u_int8_t *sgl = MFII_DMA_KVA(sc->sc_sgl)((void *)(sc->sc_sgl)->mdm_kva);
2706 u_int i;
2707 int error;
2708
2709 sc->sc_ccb = mallocarray(sc->sc_max_cmds, sizeof(struct mfii_ccb),
2710 M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
2711
2712 for (i = 0; i < sc->sc_max_cmds; i++) {
2713 ccb = &sc->sc_ccb[i];
2714
2715 /* create a dma map for transfer */
2716 error = bus_dmamap_create(sc->sc_dmat,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl), ((64 * 1024)), (0), (0x0001 |
0x0002), (&ccb->ccb_dmamap))
2717 MAXPHYS, sc->sc_max_sgl, MAXPHYS, 0,(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl), ((64 * 1024)), (0), (0x0001 |
0x0002), (&ccb->ccb_dmamap))
2718 BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_create)((sc->sc_dmat), ((64
* 1024)), (sc->sc_max_sgl), ((64 * 1024)), (0), (0x0001 |
0x0002), (&ccb->ccb_dmamap))
;
2719 if (error) {
2720 printf("%s: cannot create ccb dmamap (%d)\n",
2721 DEVNAME(sc)((sc)->sc_dev.dv_xname), error);
2722 goto destroy;
2723 }
2724
2725 /* select i + 1'th request. 0 is reserved for events */
2726 ccb->ccb_smid = i + 1;
2727 ccb->ccb_request_offset = MFII_REQUEST_SIZE256 * (i + 1);
2728 ccb->ccb_request = request + ccb->ccb_request_offset;
2729 ccb->ccb_request_dva = MFII_DMA_DVA(sc->sc_requests)((u_int64_t)(sc->sc_requests)->mdm_map->dm_segs[0].ds_addr
)
+
2730 ccb->ccb_request_offset;
2731
2732 /* select i'th MFI command frame */
2733 ccb->ccb_mfi_offset = MFI_FRAME_SIZE64 * i;
2734 ccb->ccb_mfi = mfi + ccb->ccb_mfi_offset;
2735 ccb->ccb_mfi_dva = MFII_DMA_DVA(sc->sc_mfi)((u_int64_t)(sc->sc_mfi)->mdm_map->dm_segs[0].ds_addr
)
+
2736 ccb->ccb_mfi_offset;
2737
2738 /* select i'th sense */
2739 ccb->ccb_sense_offset = MFI_SENSE_SIZE128 * i;
2740 ccb->ccb_sense = (struct mfi_sense *)(sense +
2741 ccb->ccb_sense_offset);
2742 ccb->ccb_sense_dva = MFII_DMA_DVA(sc->sc_sense)((u_int64_t)(sc->sc_sense)->mdm_map->dm_segs[0].ds_addr
)
+
2743 ccb->ccb_sense_offset;
2744
2745 /* select i'th sgl */
2746 ccb->ccb_sgl_offset = sizeof(struct mfii_sge) *
2747 sc->sc_max_sgl * i;
2748 ccb->ccb_sgl = (struct mfii_sge *)(sgl + ccb->ccb_sgl_offset);
2749 ccb->ccb_sgl_dva = MFII_DMA_DVA(sc->sc_sgl)((u_int64_t)(sc->sc_sgl)->mdm_map->dm_segs[0].ds_addr
)
+
2750 ccb->ccb_sgl_offset;
2751
2752 /* add ccb to queue */
2753 mfii_put_ccb(sc, ccb);
2754 }
2755
2756 return (0);
2757
2758destroy:
2759 /* free dma maps and ccb memory */
2760 while ((ccb = mfii_get_ccb(sc)) != NULL((void *)0))
2761 bus_dmamap_destroy(sc->sc_dmat, ccb->ccb_dmamap)(*(sc->sc_dmat)->_dmamap_destroy)((sc->sc_dmat), (ccb
->ccb_dmamap))
;
2762
2763 free(sc->sc_ccb, M_DEVBUF2, 0);
2764
2765 return (1);
2766}
2767
2768#if NBIO1 > 0
2769int
2770mfii_ioctl(struct device *dev, u_long cmd, caddr_t addr)
2771{
2772 struct mfii_softc *sc = (struct mfii_softc *)dev;
2773 int error = 0;
2774
2775 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl ", DEVNAME(sc));
2776
2777 rw_enter_write(&sc->sc_lock);
2778
2779 switch (cmd) {
1
Control jumps to 'case 3268428325:' at line 2805
2780 case BIOCINQ(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_inq) & 0x1fff) << 16) | ((('B')) <<
8) | ((32)))
:
2781 DNPRINTF(MFII_D_IOCTL, "inq\n");
2782 error = mfii_ioctl_inq(sc, (struct bioc_inq *)addr);
2783 break;
2784
2785 case BIOCVOL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_vol) & 0x1fff) << 16) | ((('B')) <<
8) | ((34)))
:
2786 DNPRINTF(MFII_D_IOCTL, "vol\n");
2787 error = mfii_ioctl_vol(sc, (struct bioc_vol *)addr);
2788 break;
2789
2790 case BIOCDISK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_disk) & 0x1fff) << 16) | ((('B')) <<
8) | ((33)))
:
2791 DNPRINTF(MFII_D_IOCTL, "disk\n");
2792 error = mfii_ioctl_disk(sc, (struct bioc_disk *)addr);
2793 break;
2794
2795 case BIOCALARM(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_alarm) & 0x1fff) << 16) | ((('B')) <<
8) | ((35)))
:
2796 DNPRINTF(MFII_D_IOCTL, "alarm\n");
2797 error = mfii_ioctl_alarm(sc, (struct bioc_alarm *)addr);
2798 break;
2799
2800 case BIOCBLINK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_blink) & 0x1fff) << 16) | ((('B')) <<
8) | ((36)))
:
2801 DNPRINTF(MFII_D_IOCTL, "blink\n");
2802 error = mfii_ioctl_blink(sc, (struct bioc_blink *)addr);
2803 break;
2804
2805 case BIOCSETSTATE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_setstate) & 0x1fff) << 16) | ((('B')) <<
8) | ((37)))
:
2806 DNPRINTF(MFII_D_IOCTL, "setstate\n");
2807 error = mfii_ioctl_setstate(sc, (struct bioc_setstate *)addr);
2
Calling 'mfii_ioctl_setstate'
2808 break;
2809
2810 case BIOCPATROL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof
(struct bioc_patrol) & 0x1fff) << 16) | ((('B')) <<
8) | ((42)))
:
2811 DNPRINTF(MFII_D_IOCTL, "patrol\n");
2812 error = mfii_ioctl_patrol(sc, (struct bioc_patrol *)addr);
2813 break;
2814
2815 default:
2816 DNPRINTF(MFII_D_IOCTL, " invalid ioctl\n");
2817 error = ENOTTY25;
2818 }
2819
2820 rw_exit_write(&sc->sc_lock);
2821
2822 return (error);
2823}
2824
2825int
2826mfii_bio_getitall(struct mfii_softc *sc)
2827{
2828 int i, d, rv = EINVAL22;
2829 size_t size;
2830 union mfi_mbox mbox;
2831 struct mfi_conf *cfg = NULL((void *)0);
2832 struct mfi_ld_details *ld_det = NULL((void *)0);
2833
2834 /* get info */
2835 if (mfii_get_info(sc)) {
2836 DNPRINTF(MFII_D_IOCTL, "%s: mfii_get_info failed\n",
2837 DEVNAME(sc));
2838 goto done;
2839 }
2840
2841 /* send single element command to retrieve size for full structure */
2842 cfg = malloc(sizeof *cfg, M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2843 if (cfg == NULL((void *)0))
2844 goto done;
2845 if (mfii_mgmt(sc, MR_DCMD_CONF_GET0x04010000, NULL((void *)0), cfg, sizeof(*cfg),
2846 SCSI_DATA_IN0x00800)) {
2847 free(cfg, M_DEVBUF2, sizeof *cfg);
2848 goto done;
2849 }
2850
2851 size = cfg->mfc_size;
2852 free(cfg, M_DEVBUF2, sizeof *cfg);
2853
2854 /* memory for read config */
2855 cfg = malloc(size, M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2856 if (cfg == NULL((void *)0))
2857 goto done;
2858 if (mfii_mgmt(sc, MR_DCMD_CONF_GET0x04010000, NULL((void *)0), cfg, size, SCSI_DATA_IN0x00800)) {
2859 free(cfg, M_DEVBUF2, size);
2860 goto done;
2861 }
2862
2863 /* replace current pointer with new one */
2864 if (sc->sc_cfg)
2865 free(sc->sc_cfg, M_DEVBUF2, 0);
2866 sc->sc_cfg = cfg;
2867
2868 /* get all ld info */
2869 if (mfii_mgmt(sc, MR_DCMD_LD_GET_LIST0x03010000, NULL((void *)0), &sc->sc_ld_list,
2870 sizeof(sc->sc_ld_list), SCSI_DATA_IN0x00800))
2871 goto done;
2872
2873 /* get memory for all ld structures */
2874 size = cfg->mfc_no_ld * sizeof(struct mfi_ld_details);
2875 if (sc->sc_ld_sz != size) {
2876 if (sc->sc_ld_details)
2877 free(sc->sc_ld_details, M_DEVBUF2, 0);
2878
2879 ld_det = malloc(size, M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
2880 if (ld_det == NULL((void *)0))
2881 goto done;
2882 sc->sc_ld_sz = size;
2883 sc->sc_ld_details = ld_det;
2884 }
2885
2886 /* find used physical disks */
2887 size = sizeof(struct mfi_ld_details);
2888 for (i = 0, d = 0; i < cfg->mfc_no_ld; i++) {
2889 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
2890 mbox.b[0] = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2891 if (mfii_mgmt(sc, MR_DCMD_LD_GET_INFO0x03020000, &mbox, &sc->sc_ld_details[i], size,
2892 SCSI_DATA_IN0x00800))
2893 goto done;
2894
2895 d += sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
2896 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
2897 }
2898 sc->sc_no_pd = d;
2899
2900 rv = 0;
2901done:
2902 return (rv);
2903}
2904
2905int
2906mfii_ioctl_inq(struct mfii_softc *sc, struct bioc_inq *bi)
2907{
2908 int rv = EINVAL22;
2909 struct mfi_conf *cfg = NULL((void *)0);
2910
2911 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_inq\n", DEVNAME(sc));
2912
2913 if (mfii_bio_getitall(sc)) {
2914 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2915 DEVNAME(sc));
2916 goto done;
2917 }
2918
2919 /* count unused disks as volumes */
2920 if (sc->sc_cfg == NULL((void *)0))
2921 goto done;
2922 cfg = sc->sc_cfg;
2923
2924 bi->bi_nodisk = sc->sc_info.mci_pd_disks_present;
2925 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs;
2926#if notyet
2927 bi->bi_novol = cfg->mfc_no_ld + cfg->mfc_no_hs +
2928 (bi->bi_nodisk - sc->sc_no_pd);
2929#endif
2930 /* tell bio who we are */
2931 strlcpy(bi->bi_dev, DEVNAME(sc)((sc)->sc_dev.dv_xname), sizeof(bi->bi_dev));
2932
2933 rv = 0;
2934done:
2935 return (rv);
2936}
2937
2938int
2939mfii_ioctl_vol(struct mfii_softc *sc, struct bioc_vol *bv)
2940{
2941 int i, per, target, rv = EINVAL22;
2942 struct scsi_link *link;
2943 struct device *dev;
2944
2945 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_vol %#x\n",
2946 DEVNAME(sc), bv->bv_volid);
2947
2948 /* we really could skip and expect that inq took care of it */
2949 if (mfii_bio_getitall(sc)) {
2950 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
2951 DEVNAME(sc));
2952 goto done;
2953 }
2954
2955 if (bv->bv_volid >= sc->sc_ld_list.mll_no_ld) {
2956 /* go do hotspares & unused disks */
2957 rv = mfii_bio_hs(sc, bv->bv_volid, MFI_MGMT_VD0x01, bv);
2958 goto done;
2959 }
2960
2961 i = bv->bv_volid;
2962 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
2963 link = scsi_get_link(sc->sc_scsibus, target, 0);
2964 if (link == NULL((void *)0)) {
2965 strlcpy(bv->bv_dev, "cache", sizeof(bv->bv_dev));
2966 } else {
2967 dev = link->device_softc;
2968 if (dev == NULL((void *)0))
2969 goto done;
2970
2971 strlcpy(bv->bv_dev, dev->dv_xname, sizeof(bv->bv_dev));
2972 }
2973
2974 switch(sc->sc_ld_list.mll_list[i].mll_state) {
2975 case MFI_LD_OFFLINE0x00:
2976 bv->bv_status = BIOC_SVOFFLINE0x01;
2977 break;
2978
2979 case MFI_LD_PART_DEGRADED0x01:
2980 case MFI_LD_DEGRADED0x02:
2981 bv->bv_status = BIOC_SVDEGRADED0x02;
2982 break;
2983
2984 case MFI_LD_ONLINE0x03:
2985 bv->bv_status = BIOC_SVONLINE0x00;
2986 break;
2987
2988 default:
2989 bv->bv_status = BIOC_SVINVALID0xff;
2990 DNPRINTF(MFII_D_IOCTL, "%s: invalid logical disk state %#x\n",
2991 DEVNAME(sc),
2992 sc->sc_ld_list.mll_list[i].mll_state);
2993 }
2994
2995 /* additional status can modify MFI status */
2996 switch (sc->sc_ld_details[i].mld_progress.mlp_in_prog) {
2997 case MFI_LD_PROG_CC0x01:
2998 bv->bv_status = BIOC_SVSCRUB0x04;
2999 per = (int)sc->sc_ld_details[i].mld_progress.mlp_cc.mp_progress;
3000 bv->bv_percent = (per * 100) / 0xffff;
3001 bv->bv_seconds =
3002 sc->sc_ld_details[i].mld_progress.mlp_cc.mp_elapsed_seconds;
3003 break;
3004
3005 case MFI_LD_PROG_BGI0x02:
3006 bv->bv_status = BIOC_SVSCRUB0x04;
3007 per = (int)sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_progress;
3008 bv->bv_percent = (per * 100) / 0xffff;
3009 bv->bv_seconds =
3010 sc->sc_ld_details[i].mld_progress.mlp_bgi.mp_elapsed_seconds;
3011 break;
3012
3013 case MFI_LD_PROG_FGI0x04:
3014 case MFI_LD_PROG_RECONSTRUCT0x08:
3015 /* nothing yet */
3016 break;
3017 }
3018
3019 if (sc->sc_ld_details[i].mld_cfg.mlc_prop.mlp_cur_cache_policy & 0x01)
3020 bv->bv_cache = BIOC_CVWRITEBACK0x01;
3021 else
3022 bv->bv_cache = BIOC_CVWRITETHROUGH0x02;
3023
3024 /*
3025 * The RAID levels are determined per the SNIA DDF spec, this is only
3026 * a subset that is valid for the MFI controller.
3027 */
3028 bv->bv_level = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_pri_raid;
3029 if (sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth > 1)
3030 bv->bv_level *= 10;
3031
3032 bv->bv_nodisk = sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_no_drv_per_span *
3033 sc->sc_ld_details[i].mld_cfg.mlc_parm.mpa_span_depth;
3034
3035 bv->bv_size = sc->sc_ld_details[i].mld_size * 512; /* bytes per block */
3036
3037 rv = 0;
3038done:
3039 return (rv);
3040}
3041
3042int
3043mfii_ioctl_disk(struct mfii_softc *sc, struct bioc_disk *bd)
3044{
3045 struct mfi_conf *cfg;
3046 struct mfi_array *ar;
3047 struct mfi_ld_cfg *ld;
3048 struct mfi_pd_details *pd;
3049 struct mfi_pd_list *pl;
3050 struct mfi_pd_progress *mfp;
3051 struct mfi_progress *mp;
3052 struct scsi_inquiry_data *inqbuf;
3053 char vend[8+16+4+1], *vendp;
3054 int i, rv = EINVAL22;
3055 int arr, vol, disk, span;
3056 union mfi_mbox mbox;
3057
3058 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_disk %#x\n",
3059 DEVNAME(sc), bd->bd_diskid);
3060
3061 /* we really could skip and expect that inq took care of it */
3062 if (mfii_bio_getitall(sc)) {
3063 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3064 DEVNAME(sc));
3065 return (rv);
3066 }
3067 cfg = sc->sc_cfg;
3068
3069 pd = malloc(sizeof *pd, M_DEVBUF2, M_WAITOK0x0001);
3070 pl = malloc(sizeof *pl, M_DEVBUF2, M_WAITOK0x0001);
3071
3072 ar = cfg->mfc_array;
3073 vol = bd->bd_volid;
3074 if (vol >= cfg->mfc_no_ld) {
3075 /* do hotspares */
3076 rv = mfii_bio_hs(sc, bd->bd_volid, MFI_MGMT_SD0x02, bd);
3077 goto freeme;
3078 }
3079
3080 /* calculate offset to ld structure */
3081 ld = (struct mfi_ld_cfg *)(
3082 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array)__builtin_offsetof(struct mfi_conf, mfc_array) +
3083 cfg->mfc_array_size * cfg->mfc_no_array);
3084
3085 /* use span 0 only when raid group is not spanned */
3086 if (ld[vol].mlc_parm.mpa_span_depth > 1)
3087 span = bd->bd_diskid / ld[vol].mlc_parm.mpa_no_drv_per_span;
3088 else
3089 span = 0;
3090 arr = ld[vol].mlc_span[span].mls_index;
3091
3092 /* offset disk into pd list */
3093 disk = bd->bd_diskid % ld[vol].mlc_parm.mpa_no_drv_per_span;
3094
3095 if (ar[arr].pd[disk].mar_pd.mfp_id == 0xffffU) {
3096 /* disk is missing but succeed command */
3097 bd->bd_status = BIOC_SDFAILED0x02;
3098 rv = 0;
3099
3100 /* try to find an unused disk for the target to rebuild */
3101 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST0x02010000, NULL((void *)0), pl, sizeof(*pl),
3102 SCSI_DATA_IN0x00800))
3103 goto freeme;
3104
3105 for (i = 0; i < pl->mpl_no_pd; i++) {
3106 if (pl->mpl_address[i].mpa_scsi_type != 0)
3107 continue;
3108
3109 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
3110 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3111 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd),
3112 SCSI_DATA_IN0x00800))
3113 continue;
3114
3115 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_GOOD0x00 ||
3116 pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD0x01)
3117 break;
3118 }
3119
3120 if (i == pl->mpl_no_pd)
3121 goto freeme;
3122 } else {
3123 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
3124 mbox.s[0] = ar[arr].pd[disk].mar_pd.mfp_id;
3125 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd),
3126 SCSI_DATA_IN0x00800)) {
3127 bd->bd_status = BIOC_SDINVALID0xff;
3128 goto freeme;
3129 }
3130 }
3131
3132 /* get the remaining fields */
3133 bd->bd_channel = pd->mpd_enc_idx;
3134 bd->bd_target = pd->mpd_enc_slot;
3135
3136 /* get status */
3137 switch (pd->mpd_fw_state){
3138 case MFI_PD_UNCONFIG_GOOD0x00:
3139 case MFI_PD_UNCONFIG_BAD0x01:
3140 bd->bd_status = BIOC_SDUNUSED0x05;
3141 break;
3142
3143 case MFI_PD_HOTSPARE0x02: /* XXX dedicated hotspare part of array? */
3144 bd->bd_status = BIOC_SDHOTSPARE0x04;
3145 break;
3146
3147 case MFI_PD_OFFLINE0x10:
3148 bd->bd_status = BIOC_SDOFFLINE0x01;
3149 break;
3150
3151 case MFI_PD_FAILED0x11:
3152 bd->bd_status = BIOC_SDFAILED0x02;
3153 break;
3154
3155 case MFI_PD_REBUILD0x14:
3156 bd->bd_status = BIOC_SDREBUILD0x03;
3157 break;
3158
3159 case MFI_PD_ONLINE0x18:
3160 bd->bd_status = BIOC_SDONLINE0x00;
3161 break;
3162
3163 case MFI_PD_COPYBACK0x20:
3164 case MFI_PD_SYSTEM0x40:
3165 default:
3166 bd->bd_status = BIOC_SDINVALID0xff;
3167 break;
3168 }
3169
3170 bd->bd_size = pd->mpd_size * 512; /* bytes per block */
3171
3172 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3173 vendp = inqbuf->vendor;
3174 memcpy(vend, vendp, sizeof vend - 1)__builtin_memcpy((vend), (vendp), (sizeof vend - 1));
3175 vend[sizeof vend - 1] = '\0';
3176 strlcpy(bd->bd_vendor, vend, sizeof(bd->bd_vendor));
3177
3178 /* XXX find a way to retrieve serial nr from drive */
3179 /* XXX find a way to get bd_procdev */
3180
3181 mfp = &pd->mpd_progress;
3182 if (mfp->mfp_in_prog & MFI_PD_PROG_PR0x02) {
3183 mp = &mfp->mfp_patrol_read;
3184 bd->bd_patrol.bdp_percent = (mp->mp_progress * 100) / 0xffff;
3185 bd->bd_patrol.bdp_seconds = mp->mp_elapsed_seconds;
3186 }
3187
3188 rv = 0;
3189freeme:
3190 free(pd, M_DEVBUF2, sizeof *pd);
3191 free(pl, M_DEVBUF2, sizeof *pl);
3192
3193 return (rv);
3194}
3195
3196int
3197mfii_ioctl_alarm(struct mfii_softc *sc, struct bioc_alarm *ba)
3198{
3199 uint32_t opc, flags = 0;
3200 int rv = 0;
3201 int8_t ret;
3202
3203 switch(ba->ba_opcode) {
3204 case BIOC_SADISABLE0x00:
3205 opc = MR_DCMD_SPEAKER_DISABLE0x01030300;
3206 break;
3207
3208 case BIOC_SAENABLE0x01:
3209 opc = MR_DCMD_SPEAKER_ENABLE0x01030200;
3210 break;
3211
3212 case BIOC_SASILENCE0x02:
3213 opc = MR_DCMD_SPEAKER_SILENCE0x01030400;
3214 break;
3215
3216 case BIOC_GASTATUS0x03:
3217 opc = MR_DCMD_SPEAKER_GET0x01030100;
3218 flags = SCSI_DATA_IN0x00800;
3219 break;
3220
3221 case BIOC_SATEST0x04:
3222 opc = MR_DCMD_SPEAKER_TEST0x01030500;
3223 break;
3224
3225 default:
3226 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_alarm biocalarm invalid "
3227 "opcode %x\n", DEVNAME(sc), ba->ba_opcode);
3228 return (EINVAL22);
3229 }
3230
3231 if (mfii_mgmt(sc, opc, NULL((void *)0), &ret, sizeof(ret), flags))
3232 rv = EINVAL22;
3233 else
3234 if (ba->ba_opcode == BIOC_GASTATUS0x03)
3235 ba->ba_status = ret;
3236 else
3237 ba->ba_status = 0;
3238
3239 return (rv);
3240}
3241
3242int
3243mfii_ioctl_blink(struct mfii_softc *sc, struct bioc_blink *bb)
3244{
3245 int i, found, rv = EINVAL22;
3246 union mfi_mbox mbox;
3247 uint32_t cmd;
3248 struct mfi_pd_list *pd;
3249
3250 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink %x\n", DEVNAME(sc),
3251 bb->bb_status);
3252
3253 /* channel 0 means not in an enclosure so can't be blinked */
3254 if (bb->bb_channel == 0)
3255 return (EINVAL22);
3256
3257 pd = malloc(sizeof(*pd), M_DEVBUF2, M_WAITOK0x0001);
3258
3259 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST0x02010000, NULL((void *)0), pd, sizeof(*pd), SCSI_DATA_IN0x00800))
3260 goto done;
3261
3262 for (i = 0, found = 0; i < pd->mpl_no_pd; i++)
3263 if (bb->bb_channel == pd->mpl_address[i].mpa_enc_index &&
3264 bb->bb_target == pd->mpl_address[i].mpa_enc_slot) {
3265 found = 1;
3266 break;
3267 }
3268
3269 if (!found)
3270 goto done;
3271
3272 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
3273 mbox.s[0] = pd->mpl_address[i].mpa_pd_id;
3274
3275 switch (bb->bb_status) {
3276 case BIOC_SBUNBLINK0x00:
3277 cmd = MR_DCMD_PD_UNBLINK0x02070200;
3278 break;
3279
3280 case BIOC_SBBLINK0x01:
3281 cmd = MR_DCMD_PD_BLINK0x02070100;
3282 break;
3283
3284 case BIOC_SBALARM0x02:
3285 default:
3286 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_blink biocblink invalid "
3287 "opcode %x\n", DEVNAME(sc), bb->bb_status);
3288 goto done;
3289 }
3290
3291
3292 if (mfii_mgmt(sc, cmd, &mbox, NULL((void *)0), 0, 0) == 0)
3293 rv = 0;
3294
3295done:
3296 free(pd, M_DEVBUF2, sizeof *pd);
3297 return (rv);
3298}
3299
3300static int
3301mfii_makegood(struct mfii_softc *sc, uint16_t pd_id)
3302{
3303 struct mfii_foreign_scan_info *fsi;
3304 struct mfi_pd_details *pd;
3305 union mfi_mbox mbox;
3306 int rv;
3307
3308 fsi = malloc(sizeof *fsi, M_DEVBUF2, M_WAITOK0x0001);
3309 pd = malloc(sizeof *pd, M_DEVBUF2, M_WAITOK0x0001);
3310
3311 memset(&mbox, 0, sizeof mbox)__builtin_memset((&mbox), (0), (sizeof mbox));
3312 mbox.s[0] = pd_id;
3313 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd), SCSI_DATA_IN0x00800);
3314 if (rv != 0)
3315 goto done;
3316
3317 if (pd->mpd_fw_state == MFI_PD_UNCONFIG_BAD0x01) {
3318 mbox.s[0] = pd_id;
3319 mbox.s[1] = pd->mpd_pd.mfp_seq;
3320 mbox.b[4] = MFI_PD_UNCONFIG_GOOD0x00;
3321 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE0x02030100, &mbox, NULL((void *)0), 0, 0);
3322 if (rv != 0)
3323 goto done;
3324 }
3325
3326 memset(&mbox, 0, sizeof mbox)__builtin_memset((&mbox), (0), (sizeof mbox));
3327 mbox.s[0] = pd_id;
3328 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd), SCSI_DATA_IN0x00800);
3329 if (rv != 0)
3330 goto done;
3331
3332 if (pd->mpd_ddf_state & MFI_DDF_FOREIGN0x10) {
3333 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_SCAN0x04060100, NULL((void *)0), fsi, sizeof(*fsi),
3334 SCSI_DATA_IN0x00800);
3335 if (rv != 0)
3336 goto done;
3337
3338 if (fsi->count > 0) {
3339 rv = mfii_mgmt(sc, MR_DCMD_CFG_FOREIGN_CLEAR0x04060500, NULL((void *)0), NULL((void *)0), 0, 0);
3340 if (rv != 0)
3341 goto done;
3342 }
3343 }
3344
3345 memset(&mbox, 0, sizeof mbox)__builtin_memset((&mbox), (0), (sizeof mbox));
3346 mbox.s[0] = pd_id;
3347 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd), SCSI_DATA_IN0x00800);
3348 if (rv != 0)
3349 goto done;
3350
3351 if (pd->mpd_fw_state != MFI_PD_UNCONFIG_GOOD0x00 ||
3352 pd->mpd_ddf_state & MFI_DDF_FOREIGN0x10)
3353 rv = ENXIO6;
3354
3355done:
3356 free(fsi, M_DEVBUF2, sizeof *fsi);
3357 free(pd, M_DEVBUF2, sizeof *pd);
3358
3359 return (rv);
3360}
3361
3362static int
3363mfii_makespare(struct mfii_softc *sc, uint16_t pd_id)
3364{
3365 struct mfi_hotspare *hs;
3366 struct mfi_pd_details *pd;
3367 union mfi_mbox mbox;
3368 size_t size;
3369 int rv = EINVAL22;
3370
3371 /* we really could skip and expect that inq took care of it */
3372 if (mfii_bio_getitall(sc)) {
3373 DNPRINTF(MFII_D_IOCTL, "%s: mfii_bio_getitall failed\n",
3374 DEVNAME(sc));
3375 return (rv);
3376 }
3377 size = sizeof *hs + sizeof(uint16_t) * sc->sc_cfg->mfc_no_array;
3378
3379 hs = malloc(size, M_DEVBUF2, M_WAITOK0x0001);
3380 pd = malloc(sizeof *pd, M_DEVBUF2, M_WAITOK0x0001);
3381
3382 memset(&mbox, 0, sizeof mbox)__builtin_memset((&mbox), (0), (sizeof mbox));
3383 mbox.s[0] = pd_id;
3384 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd),
3385 SCSI_DATA_IN0x00800);
3386 if (rv != 0)
3387 goto done;
3388
3389 memset(hs, 0, size)__builtin_memset((hs), (0), (size));
3390 hs->mhs_pd.mfp_id = pd->mpd_pd.mfp_id;
3391 hs->mhs_pd.mfp_seq = pd->mpd_pd.mfp_seq;
3392 rv = mfii_mgmt(sc, MR_DCMD_CFG_MAKE_SPARE0x04040000, NULL((void *)0), hs, size, SCSI_DATA_OUT0x01000);
3393
3394done:
3395 free(hs, M_DEVBUF2, size);
3396 free(pd, M_DEVBUF2, sizeof *pd);
3397
3398 return (rv);
3399}
3400
3401int
3402mfii_ioctl_setstate(struct mfii_softc *sc, struct bioc_setstate *bs)
3403{
3404 struct mfi_pd_details *pd;
3405 struct mfi_pd_list *pl;
3406 int i, found, rv = EINVAL22;
3407 union mfi_mbox mbox;
3408
3409 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate %x\n", DEVNAME(sc),
3410 bs->bs_status);
3411
3412 pd = malloc(sizeof *pd, M_DEVBUF2, M_WAITOK0x0001);
3413 pl = malloc(sizeof *pl, M_DEVBUF2, M_WAITOK0x0001);
3
Value assigned to 'pl'
3414
3415 if (mfii_mgmt(sc, MR_DCMD_PD_GET_LIST0x02010000, NULL((void *)0), pl, sizeof(*pl), SCSI_DATA_IN0x00800))
4
Passing 'pl' via 4th parameter 'buf'
5
Calling 'mfii_mgmt'
3416 goto done;
3417
3418 for (i = 0, found = 0; i < pl->mpl_no_pd; i++)
3419 if (bs->bs_channel == pl->mpl_address[i].mpa_enc_index &&
3420 bs->bs_target == pl->mpl_address[i].mpa_enc_slot) {
3421 found = 1;
3422 break;
3423 }
3424
3425 if (!found)
3426 goto done;
3427
3428 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
3429 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3430
3431 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd), SCSI_DATA_IN0x00800))
3432 goto done;
3433
3434 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3435 mbox.s[1] = pd->mpd_pd.mfp_seq;
3436
3437 switch (bs->bs_status) {
3438 case BIOC_SSONLINE0x00:
3439 mbox.b[4] = MFI_PD_ONLINE0x18;
3440 break;
3441
3442 case BIOC_SSOFFLINE0x01:
3443 mbox.b[4] = MFI_PD_OFFLINE0x10;
3444 break;
3445
3446 case BIOC_SSHOTSPARE0x02:
3447 mbox.b[4] = MFI_PD_HOTSPARE0x02;
3448 break;
3449
3450 case BIOC_SSREBUILD0x03:
3451 if (pd->mpd_fw_state != MFI_PD_OFFLINE0x10) {
3452 if ((rv = mfii_makegood(sc,
3453 pl->mpl_address[i].mpa_pd_id)))
3454 goto done;
3455
3456 if ((rv = mfii_makespare(sc,
3457 pl->mpl_address[i].mpa_pd_id)))
3458 goto done;
3459
3460 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
3461 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3462 rv = mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd),
3463 SCSI_DATA_IN0x00800);
3464 if (rv != 0)
3465 goto done;
3466
3467 /* rebuilding might be started by mfii_makespare() */
3468 if (pd->mpd_fw_state == MFI_PD_REBUILD0x14) {
3469 rv = 0;
3470 goto done;
3471 }
3472
3473 mbox.s[0] = pl->mpl_address[i].mpa_pd_id;
3474 mbox.s[1] = pd->mpd_pd.mfp_seq;
3475 }
3476 mbox.b[4] = MFI_PD_REBUILD0x14;
3477 break;
3478
3479 default:
3480 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_setstate invalid "
3481 "opcode %x\n", DEVNAME(sc), bs->bs_status);
3482 goto done;
3483 }
3484
3485
3486 rv = mfii_mgmt(sc, MR_DCMD_PD_SET_STATE0x02030100, &mbox, NULL((void *)0), 0, 0);
3487done:
3488 free(pd, M_DEVBUF2, sizeof *pd);
3489 free(pl, M_DEVBUF2, sizeof *pl);
3490 return (rv);
3491}
3492
3493int
3494mfii_ioctl_patrol(struct mfii_softc *sc, struct bioc_patrol *bp)
3495{
3496 uint32_t opc;
3497 int rv = 0;
3498 struct mfi_pr_properties prop;
3499 struct mfi_pr_status status;
3500 uint32_t time, exec_freq;
3501
3502 switch (bp->bp_opcode) {
3503 case BIOC_SPSTOP0x00:
3504 case BIOC_SPSTART0x01:
3505 if (bp->bp_opcode == BIOC_SPSTART0x01)
3506 opc = MR_DCMD_PR_START0x01070400;
3507 else
3508 opc = MR_DCMD_PR_STOP0x01070500;
3509 if (mfii_mgmt(sc, opc, NULL((void *)0), NULL((void *)0), 0, SCSI_DATA_IN0x00800))
3510 return (EINVAL22);
3511 break;
3512
3513 case BIOC_SPMANUAL0x05:
3514 case BIOC_SPDISABLE0x03:
3515 case BIOC_SPAUTO0x04:
3516 /* Get device's time. */
3517 opc = MR_DCMD_TIME_SECS_GET0x01080201;
3518 if (mfii_mgmt(sc, opc, NULL((void *)0), &time, sizeof(time), SCSI_DATA_IN0x00800))
3519 return (EINVAL22);
3520
3521 opc = MR_DCMD_PR_GET_PROPERTIES0x01070200;
3522 if (mfii_mgmt(sc, opc, NULL((void *)0), &prop, sizeof(prop), SCSI_DATA_IN0x00800))
3523 return (EINVAL22);
3524
3525 switch (bp->bp_opcode) {
3526 case BIOC_SPMANUAL0x05:
3527 prop.op_mode = MFI_PR_OPMODE_MANUAL0x01;
3528 break;
3529 case BIOC_SPDISABLE0x03:
3530 prop.op_mode = MFI_PR_OPMODE_DISABLED0x02;
3531 break;
3532 case BIOC_SPAUTO0x04:
3533 if (bp->bp_autoival != 0) {
3534 if (bp->bp_autoival == -1)
3535 /* continuously */
3536 exec_freq = 0xffffffffU;
3537 else if (bp->bp_autoival > 0)
3538 exec_freq = bp->bp_autoival;
3539 else
3540 return (EINVAL22);
3541 prop.exec_freq = exec_freq;
3542 }
3543 if (bp->bp_autonext != 0) {
3544 if (bp->bp_autonext < 0)
3545 return (EINVAL22);
3546 else
3547 prop.next_exec = time + bp->bp_autonext;
3548 }
3549 prop.op_mode = MFI_PR_OPMODE_AUTO0x00;
3550 break;
3551 }
3552
3553 opc = MR_DCMD_PR_SET_PROPERTIES0x01070300;
3554 if (mfii_mgmt(sc, opc, NULL((void *)0), &prop, sizeof(prop), SCSI_DATA_OUT0x01000))
3555 return (EINVAL22);
3556
3557 break;
3558
3559 case BIOC_GPSTATUS0x02:
3560 opc = MR_DCMD_PR_GET_PROPERTIES0x01070200;
3561 if (mfii_mgmt(sc, opc, NULL((void *)0), &prop, sizeof(prop), SCSI_DATA_IN0x00800))
3562 return (EINVAL22);
3563
3564 opc = MR_DCMD_PR_GET_STATUS0x01070100;
3565 if (mfii_mgmt(sc, opc, NULL((void *)0), &status, sizeof(status), SCSI_DATA_IN0x00800))
3566 return (EINVAL22);
3567
3568 /* Get device's time. */
3569 opc = MR_DCMD_TIME_SECS_GET0x01080201;
3570 if (mfii_mgmt(sc, opc, NULL((void *)0), &time, sizeof(time), SCSI_DATA_IN0x00800))
3571 return (EINVAL22);
3572
3573 switch (prop.op_mode) {
3574 case MFI_PR_OPMODE_AUTO0x00:
3575 bp->bp_mode = BIOC_SPMAUTO0x00;
3576 bp->bp_autoival = prop.exec_freq;
3577 bp->bp_autonext = prop.next_exec;
3578 bp->bp_autonow = time;
3579 break;
3580 case MFI_PR_OPMODE_MANUAL0x01:
3581 bp->bp_mode = BIOC_SPMMANUAL0x01;
3582 break;
3583 case MFI_PR_OPMODE_DISABLED0x02:
3584 bp->bp_mode = BIOC_SPMDISABLED0x02;
3585 break;
3586 default:
3587 printf("%s: unknown patrol mode %d\n",
3588 DEVNAME(sc)((sc)->sc_dev.dv_xname), prop.op_mode);
3589 break;
3590 }
3591
3592 switch (status.state) {
3593 case MFI_PR_STATE_STOPPED0:
3594 bp->bp_status = BIOC_SPSSTOPPED0x00;
3595 break;
3596 case MFI_PR_STATE_READY1:
3597 bp->bp_status = BIOC_SPSREADY0x01;
3598 break;
3599 case MFI_PR_STATE_ACTIVE2:
3600 bp->bp_status = BIOC_SPSACTIVE0x02;
3601 break;
3602 case MFI_PR_STATE_ABORTED0xff:
3603 bp->bp_status = BIOC_SPSABORTED0xff;
3604 break;
3605 default:
3606 printf("%s: unknown patrol state %d\n",
3607 DEVNAME(sc)((sc)->sc_dev.dv_xname), status.state);
3608 break;
3609 }
3610
3611 break;
3612
3613 default:
3614 DNPRINTF(MFII_D_IOCTL, "%s: mfii_ioctl_patrol biocpatrol invalid "
3615 "opcode %x\n", DEVNAME(sc), bp->bp_opcode);
3616 return (EINVAL22);
3617 }
3618
3619 return (rv);
3620}
3621
3622int
3623mfii_bio_hs(struct mfii_softc *sc, int volid, int type, void *bio_hs)
3624{
3625 struct mfi_conf *cfg;
3626 struct mfi_hotspare *hs;
3627 struct mfi_pd_details *pd;
3628 struct bioc_disk *sdhs;
3629 struct bioc_vol *vdhs;
3630 struct scsi_inquiry_data *inqbuf;
3631 char vend[8+16+4+1], *vendp;
3632 int i, rv = EINVAL22;
3633 uint32_t size;
3634 union mfi_mbox mbox;
3635
3636 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs %d\n", DEVNAME(sc), volid);
3637
3638 if (!bio_hs)
3639 return (EINVAL22);
3640
3641 pd = malloc(sizeof *pd, M_DEVBUF2, M_WAITOK0x0001);
3642
3643 /* send single element command to retrieve size for full structure */
3644 cfg = malloc(sizeof *cfg, M_DEVBUF2, M_WAITOK0x0001);
3645 if (mfii_mgmt(sc, MR_DCMD_CONF_GET0x04010000, NULL((void *)0), cfg, sizeof(*cfg), SCSI_DATA_IN0x00800))
3646 goto freeme;
3647
3648 size = cfg->mfc_size;
3649 free(cfg, M_DEVBUF2, sizeof *cfg);
3650
3651 /* memory for read config */
3652 cfg = malloc(size, M_DEVBUF2, M_WAITOK0x0001|M_ZERO0x0008);
3653 if (mfii_mgmt(sc, MR_DCMD_CONF_GET0x04010000, NULL((void *)0), cfg, size, SCSI_DATA_IN0x00800))
3654 goto freeme;
3655
3656 /* calculate offset to hs structure */
3657 hs = (struct mfi_hotspare *)(
3658 ((uint8_t *)cfg) + offsetof(struct mfi_conf, mfc_array)__builtin_offsetof(struct mfi_conf, mfc_array) +
3659 cfg->mfc_array_size * cfg->mfc_no_array +
3660 cfg->mfc_ld_size * cfg->mfc_no_ld);
3661
3662 if (volid < cfg->mfc_no_ld)
3663 goto freeme; /* not a hotspare */
3664
3665 if (volid > (cfg->mfc_no_ld + cfg->mfc_no_hs))
3666 goto freeme; /* not a hotspare */
3667
3668 /* offset into hotspare structure */
3669 i = volid - cfg->mfc_no_ld;
3670
3671 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs i %d volid %d no_ld %d no_hs %d "
3672 "hs %p cfg %p id %02x\n", DEVNAME(sc), i, volid, cfg->mfc_no_ld,
3673 cfg->mfc_no_hs, hs, cfg, hs[i].mhs_pd.mfp_id);
3674
3675 /* get pd fields */
3676 memset(&mbox, 0, sizeof(mbox))__builtin_memset((&mbox), (0), (sizeof(mbox)));
3677 mbox.s[0] = hs[i].mhs_pd.mfp_id;
3678 if (mfii_mgmt(sc, MR_DCMD_PD_GET_INFO0x02020000, &mbox, pd, sizeof(*pd),
3679 SCSI_DATA_IN0x00800)) {
3680 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs illegal PD\n",
3681 DEVNAME(sc));
3682 goto freeme;
3683 }
3684
3685 switch (type) {
3686 case MFI_MGMT_VD0x01:
3687 vdhs = bio_hs;
3688 vdhs->bv_status = BIOC_SVONLINE0x00;
3689 vdhs->bv_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3690 vdhs->bv_level = -1; /* hotspare */
3691 vdhs->bv_nodisk = 1;
3692 break;
3693
3694 case MFI_MGMT_SD0x02:
3695 sdhs = bio_hs;
3696 sdhs->bd_status = BIOC_SDHOTSPARE0x04;
3697 sdhs->bd_size = pd->mpd_size / 2 * 1024; /* XXX why? */
3698 sdhs->bd_channel = pd->mpd_enc_idx;
3699 sdhs->bd_target = pd->mpd_enc_slot;
3700 inqbuf = (struct scsi_inquiry_data *)&pd->mpd_inq_data;
3701 vendp = inqbuf->vendor;
3702 memcpy(vend, vendp, sizeof vend - 1)__builtin_memcpy((vend), (vendp), (sizeof vend - 1));
3703 vend[sizeof vend - 1] = '\0';
3704 strlcpy(sdhs->bd_vendor, vend, sizeof(sdhs->bd_vendor));
3705 break;
3706
3707 default:
3708 goto freeme;
3709 }
3710
3711 DNPRINTF(MFII_D_IOCTL, "%s: mfii_vol_hs 6\n", DEVNAME(sc));
3712 rv = 0;
3713freeme:
3714 free(pd, M_DEVBUF2, sizeof *pd);
3715 free(cfg, M_DEVBUF2, 0);
3716
3717 return (rv);
3718}
3719
3720#ifndef SMALL_KERNEL
3721
3722#define MFI_BBU_SENSORS4 4
3723
3724void
3725mfii_bbu(struct mfii_softc *sc)
3726{
3727 struct mfi_bbu_status bbu;
3728 u_int32_t status;
3729 u_int32_t mask;
3730 u_int32_t soh_bad;
3731 int i;
3732
3733 if (mfii_mgmt(sc, MR_DCMD_BBU_GET_STATUS0x05010000, NULL((void *)0), &bbu,
3734 sizeof(bbu), SCSI_DATA_IN0x00800) != 0) {
3735 for (i = 0; i < MFI_BBU_SENSORS4; i++) {
3736 sc->sc_bbu[i].value = 0;
3737 sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3738 }
3739 for (i = 0; i < nitems(mfi_bbu_indicators)(sizeof((mfi_bbu_indicators)) / sizeof((mfi_bbu_indicators)[0
]))
; i++) {
3740 sc->sc_bbu_status[i].value = 0;
3741 sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3742 }
3743 return;
3744 }
3745
3746 switch (bbu.battery_type) {
3747 case MFI_BBU_TYPE_IBBU1:
3748 mask = MFI_BBU_STATE_BAD_IBBU( (1 << 0) | (1 << 1) | (1 << 4) | (1 <<
5) | (1 << 6) | (1 << 10) | (1 << 11))
;
3749 soh_bad = 0;
3750 break;
3751 case MFI_BBU_TYPE_BBU2:
3752 mask = MFI_BBU_STATE_BAD_BBU( (1 << 0) | (1 << 10) | (1 << 11));
3753 soh_bad = (bbu.detail.bbu.is_SOH_good == 0);
3754 break;
3755
3756 case MFI_BBU_TYPE_NONE0:
3757 default:
3758 sc->sc_bbu[0].value = 0;
3759 sc->sc_bbu[0].status = SENSOR_S_CRIT;
3760 for (i = 1; i < MFI_BBU_SENSORS4; i++) {
3761 sc->sc_bbu[i].value = 0;
3762 sc->sc_bbu[i].status = SENSOR_S_UNKNOWN;
3763 }
3764 for (i = 0; i < nitems(mfi_bbu_indicators)(sizeof((mfi_bbu_indicators)) / sizeof((mfi_bbu_indicators)[0
]))
; i++) {
3765 sc->sc_bbu_status[i].value = 0;
3766 sc->sc_bbu_status[i].status = SENSOR_S_UNKNOWN;
3767 }
3768 return;
3769 }
3770
3771 status = letoh32(bbu.fw_status)((__uint32_t)(bbu.fw_status));
3772
3773 sc->sc_bbu[0].value = ((status & mask) || soh_bad) ? 0 : 1;
3774 sc->sc_bbu[0].status = ((status & mask) || soh_bad) ? SENSOR_S_CRIT :
3775 SENSOR_S_OK;
3776
3777 sc->sc_bbu[1].value = letoh16(bbu.voltage)((__uint16_t)(bbu.voltage)) * 1000;
3778 sc->sc_bbu[2].value = (int16_t)letoh16(bbu.current)((__uint16_t)(bbu.current)) * 1000;
3779 sc->sc_bbu[3].value = letoh16(bbu.temperature)((__uint16_t)(bbu.temperature)) * 1000000 + 273150000;
3780 for (i = 1; i < MFI_BBU_SENSORS4; i++)
3781 sc->sc_bbu[i].status = SENSOR_S_UNSPEC;
3782
3783 for (i = 0; i < nitems(mfi_bbu_indicators)(sizeof((mfi_bbu_indicators)) / sizeof((mfi_bbu_indicators)[0
]))
; i++) {
3784 sc->sc_bbu_status[i].value = (status & (1 << i)) ? 1 : 0;
3785 sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3786 }
3787}
3788
3789void
3790mfii_refresh_ld_sensor(struct mfii_softc *sc, int ld)
3791{
3792 struct ksensor *sensor;
3793 int target;
3794
3795 target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3796 sensor = &sc->sc_sensors[target];
3797
3798 switch(sc->sc_ld_list.mll_list[ld].mll_state) {
3799 case MFI_LD_OFFLINE0x00:
3800 sensor->value = SENSOR_DRIVE_FAIL9;
3801 sensor->status = SENSOR_S_CRIT;
3802 break;
3803
3804 case MFI_LD_PART_DEGRADED0x01:
3805 case MFI_LD_DEGRADED0x02:
3806 sensor->value = SENSOR_DRIVE_PFAIL10;
3807 sensor->status = SENSOR_S_WARN;
3808 break;
3809
3810 case MFI_LD_ONLINE0x03:
3811 sensor->value = SENSOR_DRIVE_ONLINE4;
3812 sensor->status = SENSOR_S_OK;
3813 break;
3814
3815 default:
3816 sensor->value = 0; /* unknown */
3817 sensor->status = SENSOR_S_UNKNOWN;
3818 break;
3819 }
3820}
3821
3822void
3823mfii_init_ld_sensor(struct mfii_softc *sc, int ld)
3824{
3825 struct device *dev;
3826 struct scsi_link *link;
3827 struct ksensor *sensor;
3828 int target;
3829
3830 target = sc->sc_ld_list.mll_list[ld].mll_ld.mld_target;
3831 sensor = &sc->sc_sensors[target];
3832
3833 link = scsi_get_link(sc->sc_scsibus, target, 0);
3834 if (link == NULL((void *)0)) {
3835 strlcpy(sensor->desc, "cache", sizeof(sensor->desc));
3836 } else {
3837 dev = link->device_softc;
3838 if (dev != NULL((void *)0))
3839 strlcpy(sensor->desc, dev->dv_xname,
3840 sizeof(sensor->desc));
3841 }
3842 sensor->type = SENSOR_DRIVE;
3843 mfii_refresh_ld_sensor(sc, ld);
3844}
3845
3846int
3847mfii_create_sensors(struct mfii_softc *sc)
3848{
3849 int i, target;
3850
3851 strlcpy(sc->sc_sensordev.xname, DEVNAME(sc)((sc)->sc_dev.dv_xname),
3852 sizeof(sc->sc_sensordev.xname));
3853
3854 if (ISSET(letoh32(sc->sc_info.mci_hw_present), MFI_INFO_HW_BBU)((((__uint32_t)(sc->sc_info.mci_hw_present))) & (0x01)
)
) {
3855 sc->sc_bbu = mallocarray(4, sizeof(*sc->sc_bbu),
3856 M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
3857
3858 sc->sc_bbu[0].type = SENSOR_INDICATOR;
3859 sc->sc_bbu[0].status = SENSOR_S_UNKNOWN;
3860 strlcpy(sc->sc_bbu[0].desc, "bbu ok",
3861 sizeof(sc->sc_bbu[0].desc));
3862 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[0]);
3863
3864 sc->sc_bbu[1].type = SENSOR_VOLTS_DC;
3865 sc->sc_bbu[1].status = SENSOR_S_UNSPEC;
3866 sc->sc_bbu[2].type = SENSOR_AMPS;
3867 sc->sc_bbu[2].status = SENSOR_S_UNSPEC;
3868 sc->sc_bbu[3].type = SENSOR_TEMP;
3869 sc->sc_bbu[3].status = SENSOR_S_UNSPEC;
3870 for (i = 1; i < MFI_BBU_SENSORS4; i++) {
3871 strlcpy(sc->sc_bbu[i].desc, "bbu",
3872 sizeof(sc->sc_bbu[i].desc));
3873 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu[i]);
3874 }
3875
3876 sc->sc_bbu_status = malloc(sizeof(*sc->sc_bbu_status) *
3877 sizeof(mfi_bbu_indicators), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
3878
3879 for (i = 0; i < nitems(mfi_bbu_indicators)(sizeof((mfi_bbu_indicators)) / sizeof((mfi_bbu_indicators)[0
]))
; i++) {
3880 sc->sc_bbu_status[i].type = SENSOR_INDICATOR;
3881 sc->sc_bbu_status[i].status = SENSOR_S_UNSPEC;
3882 strlcpy(sc->sc_bbu_status[i].desc,
3883 mfi_bbu_indicators[i],
3884 sizeof(sc->sc_bbu_status[i].desc));
3885
3886 sensor_attach(&sc->sc_sensordev, &sc->sc_bbu_status[i]);
3887 }
3888 }
3889
3890 sc->sc_sensors = mallocarray(MFI_MAX_LD64, sizeof(struct ksensor),
3891 M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008);
3892 if (sc->sc_sensors == NULL((void *)0))
3893 return (1);
3894
3895 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++) {
3896 mfii_init_ld_sensor(sc, i);
3897 target = sc->sc_ld_list.mll_list[i].mll_ld.mld_target;
3898 sensor_attach(&sc->sc_sensordev, &sc->sc_sensors[target]);
3899 }
3900
3901 if (sensor_task_register(sc, mfii_refresh_sensors, 10) == NULL((void *)0))
3902 goto bad;
3903
3904 sensordev_install(&sc->sc_sensordev);
3905
3906 return (0);
3907
3908bad:
3909 free(sc->sc_sensors, M_DEVBUF2,
3910 MFI_MAX_LD64 * sizeof(struct ksensor));
3911
3912 return (1);
3913}
3914
3915void
3916mfii_refresh_sensors(void *arg)
3917{
3918 struct mfii_softc *sc = arg;
3919 int i;
3920
3921 rw_enter_write(&sc->sc_lock);
3922 if (sc->sc_bbu != NULL((void *)0))
3923 mfii_bbu(sc);
3924
3925 mfii_bio_getitall(sc);
3926 rw_exit_write(&sc->sc_lock);
3927
3928 for (i = 0; i < sc->sc_ld_list.mll_no_ld; i++)
3929 mfii_refresh_ld_sensor(sc, i);
3930}
3931#endif /* SMALL_KERNEL */
3932#endif /* NBIO > 0 */