Bug Summary

File:scsi/mpath.c
Warning:line 308, column 4
Value stored to 'p' is never read

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.0 -analyze -disable-free -disable-llvm-verifier -discard-value-names -main-file-name mpath.c -analyzer-store=region -analyzer-opt-analyze-nested-blocks -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/lib/clang/13.0.0 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -D CONFIG_DRM_AMD_DC_DCN3_0 -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /usr/obj/sys/arch/amd64/compile/GENERIC.MP/scan-build/2022-01-12-131800-47421-1 -x c /usr/src/sys/scsi/mpath.c
1/* $OpenBSD: mpath.c,v 1.55 2021/10/24 16:57:30 mpi Exp $ */
2
3/*
4 * Copyright (c) 2009 David Gwynne <dlg@openbsd.org>
5 *
6 * Permission to use, copy, modify, and distribute this software for any
7 * purpose with or without fee is hereby granted, provided that the above
8 * copyright notice and this permission notice appear in all copies.
9 *
10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17 */
18
19#include <sys/param.h>
20#include <sys/systm.h>
21#include <sys/kernel.h>
22#include <sys/malloc.h>
23#include <sys/device.h>
24#include <sys/conf.h>
25#include <sys/queue.h>
26#include <sys/rwlock.h>
27#include <sys/ioctl.h>
28#include <sys/poll.h>
29#include <sys/selinfo.h>
30
31#include <scsi/scsi_all.h>
32#include <scsi/scsiconf.h>
33#include <scsi/mpathvar.h>
34
35#define MPATH_BUSWIDTH256 256
36
37int mpath_match(struct device *, void *, void *);
38void mpath_attach(struct device *, struct device *, void *);
39void mpath_shutdown(void *);
40
41TAILQ_HEAD(mpath_paths, mpath_path)struct mpath_paths { struct mpath_path *tqh_first; struct mpath_path
**tqh_last; }
;
42
43struct mpath_group {
44 TAILQ_ENTRY(mpath_group)struct { struct mpath_group *tqe_next; struct mpath_group **tqe_prev
; }
g_entry;
45 struct mpath_paths g_paths;
46 struct mpath_dev *g_dev;
47 u_int g_id;
48};
49TAILQ_HEAD(mpath_groups, mpath_group)struct mpath_groups { struct mpath_group *tqh_first; struct mpath_group
**tqh_last; }
;
50
51struct mpath_dev {
52 struct mutex d_mtx;
53
54 struct scsi_xfer_list d_xfers;
55 struct mpath_path *d_next_path;
56
57 struct mpath_groups d_groups;
58
59 struct mpath_group *d_failover_iter;
60 struct timeout d_failover_tmo;
61 u_int d_failover;
62
63 const struct mpath_ops *d_ops;
64 struct devid *d_id;
65};
66
67struct mpath_softc {
68 struct device sc_dev;
69 struct scsibus_softc *sc_scsibus;
70 struct mpath_dev *sc_devs[MPATH_BUSWIDTH256];
71};
72#define DEVNAME(_s)((_s)->sc_dev.dv_xname) ((_s)->sc_dev.dv_xname)
73
74struct mpath_softc *mpath;
75
76const struct cfattach mpath_ca = {
77 sizeof(struct mpath_softc),
78 mpath_match,
79 mpath_attach
80};
81
82struct cfdriver mpath_cd = {
83 NULL((void *)0),
84 "mpath",
85 DV_DULL
86};
87
88void mpath_cmd(struct scsi_xfer *);
89void mpath_minphys(struct buf *, struct scsi_link *);
90int mpath_probe(struct scsi_link *);
91
92struct mpath_path *mpath_next_path(struct mpath_dev *);
93void mpath_done(struct scsi_xfer *);
94
95void mpath_failover(struct mpath_dev *);
96void mpath_failover_start(void *);
97void mpath_failover_check(struct mpath_dev *);
98
99struct scsi_adapter mpath_switch = {
100 mpath_cmd, NULL((void *)0), mpath_probe, NULL((void *)0), NULL((void *)0)
101};
102
103void mpath_xs_stuffup(struct scsi_xfer *);
104
105int
106mpath_match(struct device *parent, void *match, void *aux)
107{
108 return (1);
109}
110
111void
112mpath_attach(struct device *parent, struct device *self, void *aux)
113{
114 struct mpath_softc *sc = (struct mpath_softc *)self;
115 struct scsibus_attach_args saa;
116
117 mpath = sc;
118
119 printf("\n");
120
121 saa.saa_adapter = &mpath_switch;
122 saa.saa_adapter_softc = sc;
123 saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff;
124 saa.saa_adapter_buswidth = MPATH_BUSWIDTH256;
125 saa.saa_luns = 1;
126 saa.saa_openings = 1024; /* XXX magical */
127 saa.saa_pool = NULL((void *)0);
128 saa.saa_quirks = saa.saa_flags = 0;
129 saa.saa_wwpn = saa.saa_wwnn = 0;
130
131 sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev,config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
132 &saa, scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint)
, ((void *)0))
;
133}
134
135void
136mpath_xs_stuffup(struct scsi_xfer *xs)
137{
138 xs->error = XS_DRIVER_STUFFUP2;
139 scsi_done(xs);
140}
141
142int
143mpath_probe(struct scsi_link *link)
144{
145 struct mpath_softc *sc = link->bus->sb_adapter_softc;
146 struct mpath_dev *d = sc->sc_devs[link->target];
147
148 if (link->lun != 0 || d == NULL((void *)0))
149 return (ENXIO6);
150
151 link->id = devid_copy(d->d_id);
152
153 return (0);
154}
155
156struct mpath_path *
157mpath_next_path(struct mpath_dev *d)
158{
159 struct mpath_group *g;
160 struct mpath_path *p;
161
162#ifdef DIAGNOSTIC1
163 if (d == NULL((void *)0))
164 panic("%s: d is NULL", __func__);
165#endif /* DIAGNOSTIC */
166
167 p = d->d_next_path;
168 if (p != NULL((void *)0)) {
169 d->d_next_path = TAILQ_NEXT(p, p_entry)((p)->p_entry.tqe_next);
170 if (d->d_next_path == NULL((void *)0) &&
171 (g = TAILQ_FIRST(&d->d_groups)((&d->d_groups)->tqh_first)) != NULL((void *)0))
172 d->d_next_path = TAILQ_FIRST(&g->g_paths)((&g->g_paths)->tqh_first);
173 }
174
175 return (p);
176}
177
178void
179mpath_cmd(struct scsi_xfer *xs)
180{
181 struct scsi_link *link = xs->sc_link;
182 struct mpath_softc *sc = link->bus->sb_adapter_softc;
183 struct mpath_dev *d = sc->sc_devs[link->target];
184 struct mpath_path *p;
185 struct scsi_xfer *mxs;
186
187#ifdef DIAGNOSTIC1
188 if (d == NULL((void *)0))
189 panic("mpath_cmd issued against nonexistent device");
190#endif /* DIAGNOSTIC */
191
192 if (ISSET(xs->flags, SCSI_POLL)((xs->flags) & (0x00002))) {
193 mtx_enter(&d->d_mtx);
194 p = mpath_next_path(d);
195 mtx_leave(&d->d_mtx);
196 if (p == NULL((void *)0)) {
197 mpath_xs_stuffup(xs);
198 return;
199 }
200
201 mxs = scsi_xs_get(p->p_link, xs->flags);
202 if (mxs == NULL((void *)0)) {
203 mpath_xs_stuffup(xs);
204 return;
205 }
206
207 memcpy(&mxs->cmd, &xs->cmd, xs->cmdlen)__builtin_memcpy((&mxs->cmd), (&xs->cmd), (xs->
cmdlen))
;
208 mxs->cmdlen = xs->cmdlen;
209 mxs->data = xs->data;
210 mxs->datalen = xs->datalen;
211 mxs->retries = xs->retries;
212 mxs->timeout = xs->timeout;
213 mxs->bp = xs->bp;
214
215 scsi_xs_sync(mxs);
216
217 xs->error = mxs->error;
218 xs->status = mxs->status;
219 xs->resid = mxs->resid;
220
221 memcpy(&xs->sense, &mxs->sense, sizeof(xs->sense))__builtin_memcpy((&xs->sense), (&mxs->sense), (
sizeof(xs->sense)))
;
222
223 scsi_xs_put(mxs);
224 scsi_done(xs);
225 return;
226 }
227
228 mtx_enter(&d->d_mtx);
229 SIMPLEQ_INSERT_TAIL(&d->d_xfers, xs, xfer_list)do { (xs)->xfer_list.sqe_next = ((void *)0); *(&d->
d_xfers)->sqh_last = (xs); (&d->d_xfers)->sqh_last
= &(xs)->xfer_list.sqe_next; } while (0)
;
230 p = mpath_next_path(d);
231 mtx_leave(&d->d_mtx);
232
233 if (p != NULL((void *)0))
234 scsi_xsh_add(&p->p_xsh);
235}
236
237void
238mpath_start(struct mpath_path *p, struct scsi_xfer *mxs)
239{
240 struct mpath_dev *d = p->p_group->g_dev;
241 struct scsi_xfer *xs;
242 int addxsh = 0;
243
244 if (ISSET(p->p_link->state, SDEV_S_DYING)((p->p_link->state) & ((1<<1))) || d == NULL((void *)0))
245 goto fail;
246
247 mtx_enter(&d->d_mtx);
248 xs = SIMPLEQ_FIRST(&d->d_xfers)((&d->d_xfers)->sqh_first);
249 if (xs != NULL((void *)0)) {
250 SIMPLEQ_REMOVE_HEAD(&d->d_xfers, xfer_list)do { if (((&d->d_xfers)->sqh_first = (&d->d_xfers
)->sqh_first->xfer_list.sqe_next) == ((void *)0)) (&
d->d_xfers)->sqh_last = &(&d->d_xfers)->sqh_first
; } while (0)
;
251 if (!SIMPLEQ_EMPTY(&d->d_xfers)(((&d->d_xfers)->sqh_first) == ((void *)0)))
252 addxsh = 1;
253 }
254 mtx_leave(&d->d_mtx);
255
256 if (xs == NULL((void *)0))
257 goto fail;
258
259 memcpy(&mxs->cmd, &xs->cmd, xs->cmdlen)__builtin_memcpy((&mxs->cmd), (&xs->cmd), (xs->
cmdlen))
;
260 mxs->cmdlen = xs->cmdlen;
261 mxs->data = xs->data;
262 mxs->datalen = xs->datalen;
263 mxs->retries = xs->retries;
264 mxs->timeout = xs->timeout;
265 mxs->bp = xs->bp;
266 mxs->flags = xs->flags;
267
268 mxs->cookie = xs;
269 mxs->done = mpath_done;
270
271 scsi_xs_exec(mxs);
272
273 if (addxsh)
274 scsi_xsh_add(&p->p_xsh);
275
276 return;
277fail:
278 scsi_xs_put(mxs);
279}
280
281void
282mpath_done(struct scsi_xfer *mxs)
283{
284 struct scsi_xfer *xs = mxs->cookie;
285 struct scsi_link *link = xs->sc_link;
286 struct mpath_softc *sc = link->bus->sb_adapter_softc;
287 struct mpath_dev *d = sc->sc_devs[link->target];
288 struct mpath_path *p;
289
290 switch (mxs->error) {
291 case XS_SELTIMEOUT3: /* physical path is gone, try the next */
292 case XS_RESET8:
293 mtx_enter(&d->d_mtx);
294 SIMPLEQ_INSERT_HEAD(&d->d_xfers, xs, xfer_list)do { if (((xs)->xfer_list.sqe_next = (&d->d_xfers)->
sqh_first) == ((void *)0)) (&d->d_xfers)->sqh_last =
&(xs)->xfer_list.sqe_next; (&d->d_xfers)->sqh_first
= (xs); } while (0)
;
295 p = mpath_next_path(d);
296 mtx_leave(&d->d_mtx);
297
298 scsi_xs_put(mxs);
299
300 if (p != NULL((void *)0))
301 scsi_xsh_add(&p->p_xsh);
302 return;
303 case XS_SENSE1:
304 switch (d->d_ops->op_checksense(mxs)) {
305 case MPATH_SENSE_FAILOVER1:
306 mtx_enter(&d->d_mtx);
307 SIMPLEQ_INSERT_HEAD(&d->d_xfers, xs, xfer_list)do { if (((xs)->xfer_list.sqe_next = (&d->d_xfers)->
sqh_first) == ((void *)0)) (&d->d_xfers)->sqh_last =
&(xs)->xfer_list.sqe_next; (&d->d_xfers)->sqh_first
= (xs); } while (0)
;
308 p = mpath_next_path(d);
Value stored to 'p' is never read
309 mtx_leave(&d->d_mtx);
310
311 scsi_xs_put(mxs);
312
313 mpath_failover(d);
314 return;
315 case MPATH_SENSE_DECLINED0:
316 break;
317#ifdef DIAGNOSTIC1
318 default:
319 panic("unexpected return from checksense");
320#endif /* DIAGNOSTIC */
321 }
322 break;
323 }
324
325 xs->error = mxs->error;
326 xs->status = mxs->status;
327 xs->resid = mxs->resid;
328
329 memcpy(&xs->sense, &mxs->sense, sizeof(xs->sense))__builtin_memcpy((&xs->sense), (&mxs->sense), (
sizeof(xs->sense)))
;
330
331 scsi_xs_put(mxs);
332
333 scsi_done(xs);
334}
335
336void
337mpath_failover(struct mpath_dev *d)
338{
339 if (!scsi_pending_start(&d->d_mtx, &d->d_failover))
340 return;
341
342 mpath_failover_start(d);
343}
344
345void
346mpath_failover_start(void *xd)
347{
348 struct mpath_dev *d = xd;
349
350 mtx_enter(&d->d_mtx);
351 d->d_failover_iter = TAILQ_FIRST(&d->d_groups)((&d->d_groups)->tqh_first);
352 mtx_leave(&d->d_mtx);
353
354 mpath_failover_check(d);
355}
356
357void
358mpath_failover_check(struct mpath_dev *d)
359{
360 struct mpath_group *g = d->d_failover_iter;
361 struct mpath_path *p;
362
363 if (g == NULL((void *)0))
364 timeout_add_sec(&d->d_failover_tmo, 1);
365 else {
366 p = TAILQ_FIRST(&g->g_paths)((&g->g_paths)->tqh_first);
367 d->d_ops->op_status(p->p_link);
368 }
369}
370
371void
372mpath_path_status(struct mpath_path *p, int status)
373{
374 struct mpath_group *g = p->p_group;
375 struct mpath_dev *d = g->g_dev;
376
377 mtx_enter(&d->d_mtx);
378 if (status == MPATH_S_ACTIVE0) {
379 TAILQ_REMOVE(&d->d_groups, g, g_entry)do { if (((g)->g_entry.tqe_next) != ((void *)0)) (g)->g_entry
.tqe_next->g_entry.tqe_prev = (g)->g_entry.tqe_prev; else
(&d->d_groups)->tqh_last = (g)->g_entry.tqe_prev
; *(g)->g_entry.tqe_prev = (g)->g_entry.tqe_next; ((g)->
g_entry.tqe_prev) = ((void *)-1); ((g)->g_entry.tqe_next) =
((void *)-1); } while (0)
;
380 TAILQ_INSERT_HEAD(&d->d_groups, g, g_entry)do { if (((g)->g_entry.tqe_next = (&d->d_groups)->
tqh_first) != ((void *)0)) (&d->d_groups)->tqh_first
->g_entry.tqe_prev = &(g)->g_entry.tqe_next; else (
&d->d_groups)->tqh_last = &(g)->g_entry.tqe_next
; (&d->d_groups)->tqh_first = (g); (g)->g_entry.
tqe_prev = &(&d->d_groups)->tqh_first; } while (
0)
;
381 d->d_next_path = p;
382 } else
383 d->d_failover_iter = TAILQ_NEXT(d->d_failover_iter, g_entry)((d->d_failover_iter)->g_entry.tqe_next);
384 mtx_leave(&d->d_mtx);
385
386 if (status == MPATH_S_ACTIVE0) {
387 scsi_xsh_add(&p->p_xsh);
388 if (!scsi_pending_finish(&d->d_mtx, &d->d_failover))
389 mpath_failover_start(d);
390 } else
391 mpath_failover_check(d);
392}
393
394void
395mpath_minphys(struct buf *bp, struct scsi_link *link)
396{
397 struct mpath_softc *sc = link->bus->sb_adapter_softc;
398 struct mpath_dev *d = sc->sc_devs[link->target];
399 struct mpath_group *g;
400 struct mpath_path *p;
401
402#ifdef DIAGNOSTIC1
403 if (d == NULL((void *)0))
404 panic("mpath_minphys against nonexistent device");
405#endif /* DIAGNOSTIC */
406
407 mtx_enter(&d->d_mtx);
408 TAILQ_FOREACH(g, &d->d_groups, g_entry)for((g) = ((&d->d_groups)->tqh_first); (g) != ((void
*)0); (g) = ((g)->g_entry.tqe_next))
{
409 TAILQ_FOREACH(p, &g->g_paths, p_entry)for((p) = ((&g->g_paths)->tqh_first); (p) != ((void
*)0); (p) = ((p)->p_entry.tqe_next))
{
410 /* XXX crossing layers with mutex held */
411 if (p->p_link->bus->sb_adapter->dev_minphys != NULL((void *)0))
412 p->p_link->bus->sb_adapter->dev_minphys(bp,
413 p->p_link);
414 }
415 }
416 mtx_leave(&d->d_mtx);
417}
418
419int
420mpath_path_probe(struct scsi_link *link)
421{
422 if (mpath == NULL((void *)0))
423 return (ENXIO6);
424
425 if (link->id == NULL((void *)0))
426 return (EINVAL22);
427
428 if (ISSET(link->flags, SDEV_UMASS)((link->flags) & (0x0400)))
429 return (EINVAL22);
430
431 if (mpath == link->bus->sb_adapter_softc)
432 return (ENXIO6);
433
434 return (0);
435}
436
437int
438mpath_path_attach(struct mpath_path *p, u_int g_id, const struct mpath_ops *ops)
439{
440 struct mpath_softc *sc = mpath;
441 struct scsi_link *link = p->p_link;
442 struct mpath_dev *d = NULL((void *)0);
443 struct mpath_group *g;
444 int newdev = 0, addxsh = 0;
445 int target;
446
447#ifdef DIAGNOSTIC1
448 if (p->p_link == NULL((void *)0))
449 panic("mpath_path_attach: NULL link");
450 if (p->p_group != NULL((void *)0))
451 panic("mpath_path_attach: group is not NULL");
452#endif /* DIAGNOSTIC */
453
454 for (target = 0; target < MPATH_BUSWIDTH256; target++) {
455 if ((d = sc->sc_devs[target]) == NULL((void *)0))
456 continue;
457
458 if (DEVID_CMP(d->d_id, link->id)( (d->d_id) != ((void *)0) && (link->id) != ((void
*)0) && ((d->d_id) == (link->id) || ((d->d_id
)->d_type != 0 && (d->d_id)->d_type == (link
->id)->d_type && (d->d_id)->d_len == (link
->id)->d_len && bcmp((d->d_id) + 1, (link->
id) + 1, (d->d_id)->d_len) == 0)) )
&& d->d_ops == ops)
459 break;
460
461 d = NULL((void *)0);
462 }
463
464 if (d == NULL((void *)0)) {
465 for (target = 0; target < MPATH_BUSWIDTH256; target++) {
466 if (sc->sc_devs[target] == NULL((void *)0))
467 break;
468 }
469 if (target >= MPATH_BUSWIDTH256)
470 return (ENXIO6);
471
472 d = malloc(sizeof(*d), M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
473 if (d == NULL((void *)0))
474 return (ENOMEM12);
475
476 mtx_init(&d->d_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&d->d_mtx
), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ? 0x9 : ((
0x6)))); } while (0)
;
477 TAILQ_INIT(&d->d_groups)do { (&d->d_groups)->tqh_first = ((void *)0); (&
d->d_groups)->tqh_last = &(&d->d_groups)->
tqh_first; } while (0)
;
478 SIMPLEQ_INIT(&d->d_xfers)do { (&d->d_xfers)->sqh_first = ((void *)0); (&
d->d_xfers)->sqh_last = &(&d->d_xfers)->sqh_first
; } while (0)
;
479 d->d_id = devid_copy(link->id);
480 d->d_ops = ops;
481
482 timeout_set(&d->d_failover_tmo, mpath_failover_start, d);
483
484 sc->sc_devs[target] = d;
485 newdev = 1;
486 } else {
487 /*
488 * instead of carrying identical values in different devid
489 * instances, delete the new one and reference the old one in
490 * the new scsi_link.
491 */
492 devid_free(link->id);
493 link->id = devid_copy(d->d_id);
494 }
495
496 TAILQ_FOREACH(g, &d->d_groups, g_entry)for((g) = ((&d->d_groups)->tqh_first); (g) != ((void
*)0); (g) = ((g)->g_entry.tqe_next))
{
497 if (g->g_id == g_id)
498 break;
499 }
500
501 if (g == NULL((void *)0)) {
502 g = malloc(sizeof(*g), M_DEVBUF2,
503 M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
504 if (g == NULL((void *)0)) {
505 if (newdev) {
506 free(d, M_DEVBUF2, sizeof(*d));
507 sc->sc_devs[target] = NULL((void *)0);
508 }
509
510 return (ENOMEM12);
511 }
512
513 TAILQ_INIT(&g->g_paths)do { (&g->g_paths)->tqh_first = ((void *)0); (&
g->g_paths)->tqh_last = &(&g->g_paths)->tqh_first
; } while (0)
;
514 g->g_dev = d;
515 g->g_id = g_id;
516
517 mtx_enter(&d->d_mtx);
518 TAILQ_INSERT_TAIL(&d->d_groups, g, g_entry)do { (g)->g_entry.tqe_next = ((void *)0); (g)->g_entry.
tqe_prev = (&d->d_groups)->tqh_last; *(&d->d_groups
)->tqh_last = (g); (&d->d_groups)->tqh_last = &
(g)->g_entry.tqe_next; } while (0)
;
519 mtx_leave(&d->d_mtx);
520 }
521
522 p->p_group = g;
523
524 mtx_enter(&d->d_mtx);
525 TAILQ_INSERT_TAIL(&g->g_paths, p, p_entry)do { (p)->p_entry.tqe_next = ((void *)0); (p)->p_entry.
tqe_prev = (&g->g_paths)->tqh_last; *(&g->g_paths
)->tqh_last = (p); (&g->g_paths)->tqh_last = &
(p)->p_entry.tqe_next; } while (0)
;
526 if (!SIMPLEQ_EMPTY(&d->d_xfers)(((&d->d_xfers)->sqh_first) == ((void *)0)))
527 addxsh = 1;
528
529 if (d->d_next_path == NULL((void *)0))
530 d->d_next_path = p;
531 mtx_leave(&d->d_mtx);
532
533 if (newdev)
534 scsi_probe_target(mpath->sc_scsibus, target);
535 else if (addxsh)
536 scsi_xsh_add(&p->p_xsh);
537
538 return (0);
539}
540
541int
542mpath_path_detach(struct mpath_path *p)
543{
544 struct mpath_group *g = p->p_group;
545 struct mpath_dev *d;
546 struct mpath_path *np = NULL((void *)0);
547
548#ifdef DIAGNOSTIC1
549 if (g == NULL((void *)0))
550 panic("mpath: detaching a path from a nonexistent bus");
551#endif /* DIAGNOSTIC */
552 d = g->g_dev;
553 p->p_group = NULL((void *)0);
554
555 mtx_enter(&d->d_mtx);
556 TAILQ_REMOVE(&g->g_paths, p, p_entry)do { if (((p)->p_entry.tqe_next) != ((void *)0)) (p)->p_entry
.tqe_next->p_entry.tqe_prev = (p)->p_entry.tqe_prev; else
(&g->g_paths)->tqh_last = (p)->p_entry.tqe_prev
; *(p)->p_entry.tqe_prev = (p)->p_entry.tqe_next; ((p)->
p_entry.tqe_prev) = ((void *)-1); ((p)->p_entry.tqe_next) =
((void *)-1); } while (0)
;
557 if (d->d_next_path == p)
558 d->d_next_path = TAILQ_FIRST(&g->g_paths)((&g->g_paths)->tqh_first);
559
560 if (TAILQ_EMPTY(&g->g_paths)(((&g->g_paths)->tqh_first) == ((void *)0)))
561 TAILQ_REMOVE(&d->d_groups, g, g_entry)do { if (((g)->g_entry.tqe_next) != ((void *)0)) (g)->g_entry
.tqe_next->g_entry.tqe_prev = (g)->g_entry.tqe_prev; else
(&d->d_groups)->tqh_last = (g)->g_entry.tqe_prev
; *(g)->g_entry.tqe_prev = (g)->g_entry.tqe_next; ((g)->
g_entry.tqe_prev) = ((void *)-1); ((g)->g_entry.tqe_next) =
((void *)-1); } while (0)
;
562 else
563 g = NULL((void *)0);
564
565 if (!SIMPLEQ_EMPTY(&d->d_xfers)(((&d->d_xfers)->sqh_first) == ((void *)0)))
566 np = d->d_next_path;
567 mtx_leave(&d->d_mtx);
568
569 if (g != NULL((void *)0))
570 free(g, M_DEVBUF2, sizeof(*g));
571
572 scsi_xsh_del(&p->p_xsh);
573
574 if (np == NULL((void *)0))
575 mpath_failover(d);
576 else
577 scsi_xsh_add(&np->p_xsh);
578
579 return (0);
580}
581
582struct device *
583mpath_bootdv(struct device *dev)
584{
585 struct mpath_softc *sc = mpath;
586 struct mpath_dev *d;
587 struct mpath_group *g;
588 struct mpath_path *p;
589 int target;
590
591 if (sc == NULL((void *)0))
592 return (dev);
593
594 for (target = 0; target < MPATH_BUSWIDTH256; target++) {
595 if ((d = sc->sc_devs[target]) == NULL((void *)0))
596 continue;
597
598 TAILQ_FOREACH(g, &d->d_groups, g_entry)for((g) = ((&d->d_groups)->tqh_first); (g) != ((void
*)0); (g) = ((g)->g_entry.tqe_next))
{
599 TAILQ_FOREACH(p, &g->g_paths, p_entry)for((p) = ((&g->g_paths)->tqh_first); (p) != ((void
*)0); (p) = ((p)->p_entry.tqe_next))
{
600 if (p->p_link->device_softc == dev) {
601 return (scsi_get_link(mpath->sc_scsibus,
602 target, 0)->device_softc);
603 }
604 }
605 }
606 }
607
608 return (dev);
609}