File: | dev/softraid.c |
Warning: | line 3731, column 18 Result of 'malloc' is converted to a pointer of type 'struct sr_meta_opt_hdr', which is incompatible with sizeof operand type 'struct sr_meta_boot' |
Press '?' to see keyboard shortcuts
Keyboard shortcuts:
1 | /* $OpenBSD: softraid.c,v 1.421 2022/01/09 05:42:37 jsg Exp $ */ |
2 | /* |
3 | * Copyright (c) 2007, 2008, 2009 Marco Peereboom <marco@peereboom.us> |
4 | * Copyright (c) 2008 Chris Kuethe <ckuethe@openbsd.org> |
5 | * Copyright (c) 2009 Joel Sing <jsing@openbsd.org> |
6 | * |
7 | * Permission to use, copy, modify, and distribute this software for any |
8 | * purpose with or without fee is hereby granted, provided that the above |
9 | * copyright notice and this permission notice appear in all copies. |
10 | * |
11 | * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES |
12 | * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF |
13 | * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR |
14 | * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES |
15 | * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN |
16 | * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF |
17 | * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. |
18 | */ |
19 | |
20 | #include "bio.h" |
21 | |
22 | #include <sys/param.h> |
23 | #include <sys/systm.h> |
24 | #include <sys/buf.h> |
25 | #include <sys/device.h> |
26 | #include <sys/ioctl.h> |
27 | #include <sys/malloc.h> |
28 | #include <sys/pool.h> |
29 | #include <sys/kernel.h> |
30 | #include <sys/disk.h> |
31 | #include <sys/rwlock.h> |
32 | #include <sys/queue.h> |
33 | #include <sys/fcntl.h> |
34 | #include <sys/disklabel.h> |
35 | #include <sys/vnode.h> |
36 | #include <sys/lock.h> |
37 | #include <sys/mount.h> |
38 | #include <sys/sensors.h> |
39 | #include <sys/stat.h> |
40 | #include <sys/conf.h> |
41 | #include <sys/uio.h> |
42 | #include <sys/task.h> |
43 | #include <sys/kthread.h> |
44 | #include <sys/dkio.h> |
45 | #include <sys/stdint.h> |
46 | |
47 | #include <scsi/scsi_all.h> |
48 | #include <scsi/scsiconf.h> |
49 | #include <scsi/scsi_disk.h> |
50 | |
51 | #include <dev/softraidvar.h> |
52 | |
53 | #ifdef HIBERNATE1 |
54 | #include <lib/libsa/aes_xts.h> |
55 | #include <sys/hibernate.h> |
56 | #include <scsi/sdvar.h> |
57 | #endif /* HIBERNATE */ |
58 | |
59 | /* #define SR_FANCY_STATS */ |
60 | |
61 | #ifdef SR_DEBUG |
62 | #define SR_FANCY_STATS |
63 | uint32_t sr_debug = 0 |
64 | /* | SR_D_CMD */ |
65 | /* | SR_D_MISC */ |
66 | /* | SR_D_INTR */ |
67 | /* | SR_D_IOCTL */ |
68 | /* | SR_D_CCB */ |
69 | /* | SR_D_WU */ |
70 | /* | SR_D_META */ |
71 | /* | SR_D_DIS */ |
72 | /* | SR_D_STATE */ |
73 | /* | SR_D_REBUILD */ |
74 | ; |
75 | #endif |
76 | |
77 | struct sr_softc *softraid0; |
78 | struct sr_uuid sr_bootuuid; |
79 | u_int8_t sr_bootkey[SR_CRYPTO_MAXKEYBYTES32]; |
80 | |
81 | int sr_match(struct device *, void *, void *); |
82 | void sr_attach(struct device *, struct device *, void *); |
83 | int sr_detach(struct device *, int); |
84 | void sr_map_root(void); |
85 | |
86 | struct cfattach softraid_ca = { |
87 | sizeof(struct sr_softc), sr_match, sr_attach, sr_detach, |
88 | }; |
89 | |
90 | struct cfdriver softraid_cd = { |
91 | NULL((void *)0), "softraid", DV_DULL |
92 | }; |
93 | |
94 | /* scsi & discipline */ |
95 | void sr_scsi_cmd(struct scsi_xfer *); |
96 | int sr_scsi_probe(struct scsi_link *); |
97 | int sr_scsi_ioctl(struct scsi_link *, u_long, |
98 | caddr_t, int); |
99 | int sr_bio_ioctl(struct device *, u_long, caddr_t); |
100 | int sr_bio_handler(struct sr_softc *, |
101 | struct sr_discipline *, u_long, struct bio *); |
102 | int sr_ioctl_inq(struct sr_softc *, struct bioc_inq *); |
103 | int sr_ioctl_vol(struct sr_softc *, struct bioc_vol *); |
104 | int sr_ioctl_disk(struct sr_softc *, struct bioc_disk *); |
105 | int sr_ioctl_setstate(struct sr_softc *, |
106 | struct bioc_setstate *); |
107 | int sr_ioctl_createraid(struct sr_softc *, |
108 | struct bioc_createraid *, int, void *); |
109 | int sr_ioctl_deleteraid(struct sr_softc *, |
110 | struct sr_discipline *, struct bioc_deleteraid *); |
111 | int sr_ioctl_discipline(struct sr_softc *, |
112 | struct sr_discipline *, struct bioc_discipline *); |
113 | int sr_ioctl_installboot(struct sr_softc *, |
114 | struct sr_discipline *, struct bioc_installboot *); |
115 | void sr_chunks_unwind(struct sr_softc *, |
116 | struct sr_chunk_head *); |
117 | void sr_discipline_free(struct sr_discipline *); |
118 | void sr_discipline_shutdown(struct sr_discipline *, int, int); |
119 | int sr_discipline_init(struct sr_discipline *, int); |
120 | int sr_alloc_resources(struct sr_discipline *); |
121 | void sr_free_resources(struct sr_discipline *); |
122 | void sr_set_chunk_state(struct sr_discipline *, int, int); |
123 | void sr_set_vol_state(struct sr_discipline *); |
124 | |
125 | /* utility functions */ |
126 | void sr_shutdown(int); |
127 | void sr_uuid_generate(struct sr_uuid *); |
128 | char *sr_uuid_format(struct sr_uuid *); |
129 | void sr_uuid_print(struct sr_uuid *, int); |
130 | void sr_checksum_print(u_int8_t *); |
131 | int sr_boot_assembly(struct sr_softc *); |
132 | int sr_already_assembled(struct sr_discipline *); |
133 | int sr_hotspare(struct sr_softc *, dev_t); |
134 | void sr_hotspare_rebuild(struct sr_discipline *); |
135 | int sr_rebuild_init(struct sr_discipline *, dev_t, int); |
136 | void sr_rebuild_start(void *); |
137 | void sr_rebuild_thread(void *); |
138 | void sr_rebuild(struct sr_discipline *); |
139 | void sr_roam_chunks(struct sr_discipline *); |
140 | int sr_chunk_in_use(struct sr_softc *, dev_t); |
141 | int sr_rw(struct sr_softc *, dev_t, char *, size_t, |
142 | daddr_t, long); |
143 | void sr_wu_done_callback(void *); |
144 | |
145 | /* don't include these on RAMDISK */ |
146 | #ifndef SMALL_KERNEL |
147 | void sr_sensors_refresh(void *); |
148 | int sr_sensors_create(struct sr_discipline *); |
149 | void sr_sensors_delete(struct sr_discipline *); |
150 | #endif |
151 | |
152 | /* metadata */ |
153 | int sr_meta_probe(struct sr_discipline *, dev_t *, int); |
154 | int sr_meta_attach(struct sr_discipline *, int, int); |
155 | int sr_meta_rw(struct sr_discipline *, dev_t, void *, long); |
156 | int sr_meta_clear(struct sr_discipline *); |
157 | void sr_meta_init(struct sr_discipline *, int, int); |
158 | void sr_meta_init_complete(struct sr_discipline *); |
159 | void sr_meta_opt_handler(struct sr_discipline *, |
160 | struct sr_meta_opt_hdr *); |
161 | |
162 | /* hotplug magic */ |
163 | void sr_disk_attach(struct disk *, int); |
164 | |
165 | struct sr_hotplug_list { |
166 | void (*sh_hotplug)(struct sr_discipline *, |
167 | struct disk *, int); |
168 | struct sr_discipline *sh_sd; |
169 | |
170 | SLIST_ENTRY(sr_hotplug_list)struct { struct sr_hotplug_list *sle_next; } shl_link; |
171 | }; |
172 | SLIST_HEAD(sr_hotplug_list_head, sr_hotplug_list)struct sr_hotplug_list_head { struct sr_hotplug_list *slh_first ; }; |
173 | |
174 | struct sr_hotplug_list_head sr_hotplug_callbacks; |
175 | extern void (*softraid_disk_attach)(struct disk *, int); |
176 | |
177 | /* scsi glue */ |
178 | struct scsi_adapter sr_switch = { |
179 | sr_scsi_cmd, NULL((void *)0), sr_scsi_probe, NULL((void *)0), sr_scsi_ioctl |
180 | }; |
181 | |
182 | /* native metadata format */ |
183 | int sr_meta_native_bootprobe(struct sr_softc *, dev_t, |
184 | struct sr_boot_chunk_head *); |
185 | #define SR_META_NOTCLAIMED(0) (0) |
186 | #define SR_META_CLAIMED(1) (1) |
187 | int sr_meta_native_probe(struct sr_softc *, |
188 | struct sr_chunk *); |
189 | int sr_meta_native_attach(struct sr_discipline *, int); |
190 | int sr_meta_native_write(struct sr_discipline *, dev_t, |
191 | struct sr_metadata *,void *); |
192 | |
193 | #ifdef SR_DEBUG |
194 | void sr_meta_print(struct sr_metadata *); |
195 | #else |
196 | #define sr_meta_print(m) |
197 | #endif |
198 | |
199 | /* the metadata driver should remain stateless */ |
200 | struct sr_meta_driver { |
201 | daddr_t smd_offset; /* metadata location */ |
202 | u_int32_t smd_size; /* size of metadata */ |
203 | |
204 | int (*smd_probe)(struct sr_softc *, |
205 | struct sr_chunk *); |
206 | int (*smd_attach)(struct sr_discipline *, int); |
207 | int (*smd_detach)(struct sr_discipline *); |
208 | int (*smd_read)(struct sr_discipline *, dev_t, |
209 | struct sr_metadata *, void *); |
210 | int (*smd_write)(struct sr_discipline *, dev_t, |
211 | struct sr_metadata *, void *); |
212 | int (*smd_validate)(struct sr_discipline *, |
213 | struct sr_metadata *, void *); |
214 | } smd[] = { |
215 | { SR_META_OFFSET16, SR_META_SIZE64 * DEV_BSIZE(1 << 9), |
216 | sr_meta_native_probe, sr_meta_native_attach, NULL((void *)0), |
217 | sr_meta_native_read, sr_meta_native_write, NULL((void *)0) }, |
218 | { 0, 0, NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0) } |
219 | }; |
220 | |
221 | int |
222 | sr_meta_attach(struct sr_discipline *sd, int chunk_no, int force) |
223 | { |
224 | struct sr_softc *sc = sd->sd_sc; |
225 | struct sr_chunk_head *cl; |
226 | struct sr_chunk *ch_entry, *chunk1, *chunk2; |
227 | int rv = 1, i = 0; |
228 | |
229 | DNPRINTF(SR_D_META, "%s: sr_meta_attach(%d)\n", DEVNAME(sc), chunk_no); |
230 | |
231 | /* in memory copy of metadata */ |
232 | sd->sd_meta = malloc(SR_META_SIZE64 * DEV_BSIZE(1 << 9), M_DEVBUF2, |
233 | M_ZERO0x0008 | M_NOWAIT0x0002); |
234 | if (!sd->sd_meta) { |
235 | sr_error(sc, "could not allocate memory for metadata"); |
236 | goto bad; |
237 | } |
238 | |
239 | if (sd->sd_meta_type != SR_META_F_NATIVE0) { |
240 | /* in memory copy of foreign metadata */ |
241 | sd->sd_meta_foreign = malloc(smd[sd->sd_meta_type].smd_size, |
242 | M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002); |
243 | if (!sd->sd_meta_foreign) { |
244 | /* unwind frees sd_meta */ |
245 | sr_error(sc, "could not allocate memory for foreign " |
246 | "metadata"); |
247 | goto bad; |
248 | } |
249 | } |
250 | |
251 | /* we have a valid list now create an array index */ |
252 | cl = &sd->sd_vol.sv_chunk_list; |
253 | sd->sd_vol.sv_chunks = mallocarray(chunk_no, sizeof(struct sr_chunk *), |
254 | M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
255 | |
256 | /* fill out chunk array */ |
257 | i = 0; |
258 | SLIST_FOREACH(ch_entry, cl, src_link)for((ch_entry) = ((cl)->slh_first); (ch_entry) != ((void * )0); (ch_entry) = ((ch_entry)->src_link.sle_next)) |
259 | sd->sd_vol.sv_chunks[i++] = ch_entry; |
260 | |
261 | /* attach metadata */ |
262 | if (smd[sd->sd_meta_type].smd_attach(sd, force)) |
263 | goto bad; |
264 | |
265 | /* Force chunks into correct order now that metadata is attached. */ |
266 | SLIST_INIT(cl){ ((cl)->slh_first) = ((void *)0); }; |
267 | for (i = 0; i < chunk_no; i++) { |
268 | ch_entry = sd->sd_vol.sv_chunks[i]; |
269 | chunk2 = NULL((void *)0); |
270 | SLIST_FOREACH(chunk1, cl, src_link)for((chunk1) = ((cl)->slh_first); (chunk1) != ((void *)0); (chunk1) = ((chunk1)->src_link.sle_next)) { |
271 | if (chunk1->src_meta.scmi_scm_invariant.scm_chunk_id > |
272 | ch_entry->src_meta.scmi_scm_invariant.scm_chunk_id) |
273 | break; |
274 | chunk2 = chunk1; |
275 | } |
276 | if (chunk2 == NULL((void *)0)) |
277 | SLIST_INSERT_HEAD(cl, ch_entry, src_link)do { (ch_entry)->src_link.sle_next = (cl)->slh_first; ( cl)->slh_first = (ch_entry); } while (0); |
278 | else |
279 | SLIST_INSERT_AFTER(chunk2, ch_entry, src_link)do { (ch_entry)->src_link.sle_next = (chunk2)->src_link .sle_next; (chunk2)->src_link.sle_next = (ch_entry); } while (0); |
280 | } |
281 | i = 0; |
282 | SLIST_FOREACH(ch_entry, cl, src_link)for((ch_entry) = ((cl)->slh_first); (ch_entry) != ((void * )0); (ch_entry) = ((ch_entry)->src_link.sle_next)) |
283 | sd->sd_vol.sv_chunks[i++] = ch_entry; |
284 | |
285 | rv = 0; |
286 | bad: |
287 | return (rv); |
288 | } |
289 | |
290 | int |
291 | sr_meta_probe(struct sr_discipline *sd, dev_t *dt, int no_chunk) |
292 | { |
293 | struct sr_softc *sc = sd->sd_sc; |
294 | struct vnode *vn; |
295 | struct sr_chunk *ch_entry, *ch_prev = NULL((void *)0); |
296 | struct sr_chunk_head *cl; |
297 | char devname[32]; |
298 | int i, d, type, found, prevf, error; |
299 | dev_t dev; |
300 | |
301 | DNPRINTF(SR_D_META, "%s: sr_meta_probe(%d)\n", DEVNAME(sc), no_chunk); |
302 | |
303 | if (no_chunk == 0) |
304 | goto unwind; |
305 | |
306 | cl = &sd->sd_vol.sv_chunk_list; |
307 | |
308 | for (d = 0, prevf = SR_META_F_INVALID-1; d < no_chunk; d++) { |
309 | ch_entry = malloc(sizeof(struct sr_chunk), M_DEVBUF2, |
310 | M_WAITOK0x0001 | M_ZERO0x0008); |
311 | /* keep disks in user supplied order */ |
312 | if (ch_prev) |
313 | SLIST_INSERT_AFTER(ch_prev, ch_entry, src_link)do { (ch_entry)->src_link.sle_next = (ch_prev)->src_link .sle_next; (ch_prev)->src_link.sle_next = (ch_entry); } while (0); |
314 | else |
315 | SLIST_INSERT_HEAD(cl, ch_entry, src_link)do { (ch_entry)->src_link.sle_next = (cl)->slh_first; ( cl)->slh_first = (ch_entry); } while (0); |
316 | ch_prev = ch_entry; |
317 | dev = dt[d]; |
318 | ch_entry->src_dev_mm = dev; |
319 | |
320 | if (dev == NODEV(dev_t)(-1)) { |
321 | ch_entry->src_meta.scm_status = BIOC_SDOFFLINE0x01; |
322 | continue; |
323 | } else { |
324 | sr_meta_getdevname(sc, dev, devname, sizeof(devname)); |
325 | if (bdevvp(dev, &vn)) { |
326 | sr_error(sc, "sr_meta_probe: cannot allocate " |
327 | "vnode"); |
328 | goto unwind; |
329 | } |
330 | |
331 | /* |
332 | * XXX leaving dev open for now; move this to attach |
333 | * and figure out the open/close dance for unwind. |
334 | */ |
335 | error = VOP_OPEN(vn, FREAD0x0001 | FWRITE0x0002, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
336 | if (error) { |
337 | DNPRINTF(SR_D_META,"%s: sr_meta_probe can't " |
338 | "open %s\n", DEVNAME(sc), devname); |
339 | vput(vn); |
340 | goto unwind; |
341 | } |
342 | |
343 | strlcpy(ch_entry->src_devname, devname, |
344 | sizeof(ch_entry->src_devname)); |
345 | ch_entry->src_vn = vn; |
346 | } |
347 | |
348 | /* determine if this is a device we understand */ |
349 | for (i = 0, found = SR_META_F_INVALID-1; smd[i].smd_probe; i++) { |
350 | type = smd[i].smd_probe(sc, ch_entry); |
351 | if (type == SR_META_F_INVALID-1) |
352 | continue; |
353 | else { |
354 | found = type; |
355 | break; |
356 | } |
357 | } |
358 | |
359 | if (found == SR_META_F_INVALID-1) |
360 | goto unwind; |
361 | if (prevf == SR_META_F_INVALID-1) |
362 | prevf = found; |
363 | if (prevf != found) { |
364 | DNPRINTF(SR_D_META, "%s: prevf != found\n", |
365 | DEVNAME(sc)); |
366 | goto unwind; |
367 | } |
368 | } |
369 | |
370 | return (prevf); |
371 | unwind: |
372 | return (SR_META_F_INVALID-1); |
373 | } |
374 | |
375 | void |
376 | sr_meta_getdevname(struct sr_softc *sc, dev_t dev, char *buf, int size) |
377 | { |
378 | int maj, unit, part; |
379 | char *name; |
380 | |
381 | DNPRINTF(SR_D_META, "%s: sr_meta_getdevname(%p, %d)\n", |
382 | DEVNAME(sc), buf, size); |
383 | |
384 | if (!buf) |
385 | return; |
386 | |
387 | maj = major(dev)(((unsigned)(dev) >> 8) & 0xff); |
388 | part = DISKPART(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >> 8)) % 16); |
389 | unit = DISKUNIT(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >> 8)) / 16); |
390 | |
391 | name = findblkname(maj); |
392 | if (name == NULL((void *)0)) |
393 | return; |
394 | |
395 | snprintf(buf, size, "%s%d%c", name, unit, part + 'a'); |
396 | } |
397 | |
398 | int |
399 | sr_rw(struct sr_softc *sc, dev_t dev, char *buf, size_t size, daddr_t blkno, |
400 | long flags) |
401 | { |
402 | struct vnode *vp; |
403 | struct buf b; |
404 | size_t bufsize, dma_bufsize; |
405 | int rv = 1; |
406 | char *dma_buf; |
407 | |
408 | DNPRINTF(SR_D_MISC, "%s: sr_rw(0x%x, %p, %zu, %lld 0x%lx)\n", |
409 | DEVNAME(sc), dev, buf, size, (long long)blkno, flags); |
410 | |
411 | dma_bufsize = (size > MAXPHYS(64 * 1024)) ? MAXPHYS(64 * 1024) : size; |
412 | dma_buf = dma_alloc(dma_bufsize, PR_WAITOK0x0001); |
413 | |
414 | if (bdevvp(dev, &vp)) { |
415 | printf("%s: sr_rw: failed to allocate vnode\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
416 | goto done; |
417 | } |
418 | |
419 | while (size > 0) { |
420 | DNPRINTF(SR_D_MISC, "%s: dma_buf %p, size %zu, blkno %lld)\n", |
421 | DEVNAME(sc), dma_buf, size, (long long)blkno); |
422 | |
423 | bufsize = (size > MAXPHYS(64 * 1024)) ? MAXPHYS(64 * 1024) : size; |
424 | if (flags == B_WRITE0x00000000) |
425 | memcpy(dma_buf, buf, bufsize)__builtin_memcpy((dma_buf), (buf), (bufsize)); |
426 | |
427 | bzero(&b, sizeof(b))__builtin_bzero((&b), (sizeof(b))); |
428 | b.b_flags = flags | B_PHYS0x00002000; |
429 | b.b_proc = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc; |
430 | b.b_dev = dev; |
431 | b.b_iodone = NULL((void *)0); |
432 | b.b_error = 0; |
433 | b.b_blkno = blkno; |
434 | b.b_data = dma_buf; |
435 | b.b_bcount = bufsize; |
436 | b.b_bufsize = bufsize; |
437 | b.b_resid = bufsize; |
438 | b.b_vp = vp; |
439 | |
440 | if ((b.b_flags & B_READ0x00008000) == 0) |
441 | vp->v_numoutput++; |
442 | |
443 | LIST_INIT(&b.b_dep)do { ((&b.b_dep)->lh_first) = ((void *)0); } while (0); |
444 | VOP_STRATEGY(vp, &b); |
445 | biowait(&b); |
446 | |
447 | if (b.b_flags & B_ERROR0x00000400) { |
448 | printf("%s: I/O error %d on dev 0x%x at block %llu\n", |
449 | DEVNAME(sc)((sc)->sc_dev.dv_xname), b.b_error, dev, b.b_blkno); |
450 | goto done; |
451 | } |
452 | |
453 | if (flags == B_READ0x00008000) |
454 | memcpy(buf, dma_buf, bufsize)__builtin_memcpy((buf), (dma_buf), (bufsize)); |
455 | |
456 | size -= bufsize; |
457 | buf += bufsize; |
458 | blkno += howmany(bufsize, DEV_BSIZE)(((bufsize) + (((1 << 9)) - 1)) / ((1 << 9))); |
459 | } |
460 | |
461 | rv = 0; |
462 | |
463 | done: |
464 | if (vp) |
465 | vput(vp); |
466 | |
467 | dma_free(dma_buf, dma_bufsize); |
468 | |
469 | return (rv); |
470 | } |
471 | |
472 | int |
473 | sr_meta_rw(struct sr_discipline *sd, dev_t dev, void *md, long flags) |
474 | { |
475 | int rv = 1; |
476 | |
477 | DNPRINTF(SR_D_META, "%s: sr_meta_rw(0x%x, %p, 0x%lx)\n", |
478 | DEVNAME(sd->sd_sc), dev, md, flags); |
479 | |
480 | if (md == NULL((void *)0)) { |
481 | printf("%s: sr_meta_rw: invalid metadata pointer\n", |
482 | DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname)); |
483 | goto done; |
484 | } |
485 | |
486 | rv = sr_rw(sd->sd_sc, dev, md, SR_META_SIZE64 * DEV_BSIZE(1 << 9), |
487 | SR_META_OFFSET16, flags); |
488 | |
489 | done: |
490 | return (rv); |
491 | } |
492 | |
493 | int |
494 | sr_meta_clear(struct sr_discipline *sd) |
495 | { |
496 | struct sr_softc *sc = sd->sd_sc; |
497 | struct sr_chunk_head *cl = &sd->sd_vol.sv_chunk_list; |
498 | struct sr_chunk *ch_entry; |
499 | void *m; |
500 | int rv = 1; |
501 | |
502 | DNPRINTF(SR_D_META, "%s: sr_meta_clear\n", DEVNAME(sc)); |
503 | |
504 | if (sd->sd_meta_type != SR_META_F_NATIVE0) { |
505 | sr_error(sc, "cannot clear foreign metadata"); |
506 | goto done; |
507 | } |
508 | |
509 | m = malloc(SR_META_SIZE64 * DEV_BSIZE(1 << 9), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
510 | SLIST_FOREACH(ch_entry, cl, src_link)for((ch_entry) = ((cl)->slh_first); (ch_entry) != ((void * )0); (ch_entry) = ((ch_entry)->src_link.sle_next)) { |
511 | if (sr_meta_native_write(sd, ch_entry->src_dev_mm, m, NULL((void *)0))) { |
512 | /* XXX mark disk offline */ |
513 | DNPRINTF(SR_D_META, "%s: sr_meta_clear failed to " |
514 | "clear %s\n", DEVNAME(sc), ch_entry->src_devname); |
515 | rv++; |
516 | continue; |
517 | } |
518 | bzero(&ch_entry->src_meta, sizeof(ch_entry->src_meta))__builtin_bzero((&ch_entry->src_meta), (sizeof(ch_entry ->src_meta))); |
519 | } |
520 | |
521 | bzero(sd->sd_meta, SR_META_SIZE * DEV_BSIZE)__builtin_bzero((sd->sd_meta), (64 * (1 << 9))); |
522 | |
523 | free(m, M_DEVBUF2, SR_META_SIZE64 * DEV_BSIZE(1 << 9)); |
524 | rv = 0; |
525 | done: |
526 | return (rv); |
527 | } |
528 | |
529 | void |
530 | sr_meta_init(struct sr_discipline *sd, int level, int no_chunk) |
531 | { |
532 | struct sr_softc *sc = sd->sd_sc; |
533 | struct sr_metadata *sm = sd->sd_meta; |
534 | struct sr_chunk_head *cl = &sd->sd_vol.sv_chunk_list; |
535 | struct sr_meta_chunk *scm; |
536 | struct sr_chunk *chunk; |
537 | int cid = 0; |
538 | u_int64_t max_chunk_sz = 0, min_chunk_sz = 0; |
539 | u_int32_t secsize = DEV_BSIZE(1 << 9); |
540 | |
541 | DNPRINTF(SR_D_META, "%s: sr_meta_init\n", DEVNAME(sc)); |
542 | |
543 | if (!sm) |
544 | return; |
545 | |
546 | /* Initialise volume metadata. */ |
547 | sm->ssdi_sdd_invariant.ssd_magic = SR_MAGIC0x4d4152436372616dLLU; |
548 | sm->ssdi_sdd_invariant.ssd_version = SR_META_VERSION6; |
549 | sm->ssdi_sdd_invariant.ssd_vol_flags = sd->sd_meta_flags; |
550 | sm->ssdi_sdd_invariant.ssd_volid = 0; |
551 | sm->ssdi_sdd_invariant.ssd_chunk_no = no_chunk; |
552 | sm->ssdi_sdd_invariant.ssd_level = level; |
553 | |
554 | sm->ssd_data_blkno = SR_DATA_OFFSET(16 + (64 + (320 + 128))); |
555 | sm->ssd_ondisk = 0; |
556 | |
557 | sr_uuid_generate(&sm->ssdi_sdd_invariant.ssd_uuid); |
558 | |
559 | /* Initialise chunk metadata and get min/max chunk sizes & secsize. */ |
560 | SLIST_FOREACH(chunk, cl, src_link)for((chunk) = ((cl)->slh_first); (chunk) != ((void *)0); ( chunk) = ((chunk)->src_link.sle_next)) { |
561 | scm = &chunk->src_meta; |
562 | scm->scmi_scm_invariant.scm_size = chunk->src_size; |
563 | scm->scmi_scm_invariant.scm_chunk_id = cid++; |
564 | scm->scm_status = BIOC_SDONLINE0x00; |
565 | scm->scmi_scm_invariant.scm_volid = 0; |
566 | strlcpy(scm->scmi_scm_invariant.scm_devname, chunk->src_devname, |
567 | sizeof(scm->scmi_scm_invariant.scm_devname)); |
568 | memcpy(&scm->scmi.scm_uuid, &sm->ssdi.ssd_uuid,__builtin_memcpy((&scm->_scm_invariant.scm_uuid), (& sm->_sdd_invariant.ssd_uuid), (sizeof(scm->_scm_invariant .scm_uuid))) |
569 | sizeof(scm->scmi.scm_uuid))__builtin_memcpy((&scm->_scm_invariant.scm_uuid), (& sm->_sdd_invariant.ssd_uuid), (sizeof(scm->_scm_invariant .scm_uuid))); |
570 | sr_checksum(sc, scm, &scm->scm_checksum, |
571 | sizeof(scm->scm_checksum)); |
572 | |
573 | if (min_chunk_sz == 0) |
574 | min_chunk_sz = scm->scmi_scm_invariant.scm_size; |
575 | if (chunk->src_secsize > secsize) |
576 | secsize = chunk->src_secsize; |
577 | min_chunk_sz = MIN(min_chunk_sz, scm->scmi.scm_size)(((min_chunk_sz)<(scm->_scm_invariant.scm_size))?(min_chunk_sz ):(scm->_scm_invariant.scm_size)); |
578 | max_chunk_sz = MAX(max_chunk_sz, scm->scmi.scm_size)(((max_chunk_sz)>(scm->_scm_invariant.scm_size))?(max_chunk_sz ):(scm->_scm_invariant.scm_size)); |
579 | } |
580 | |
581 | sm->ssdi_sdd_invariant.ssd_secsize = secsize; |
582 | |
583 | /* Equalize chunk sizes. */ |
584 | SLIST_FOREACH(chunk, cl, src_link)for((chunk) = ((cl)->slh_first); (chunk) != ((void *)0); ( chunk) = ((chunk)->src_link.sle_next)) |
585 | chunk->src_meta.scmi_scm_invariant.scm_coerced_size = min_chunk_sz; |
586 | |
587 | sd->sd_vol.sv_chunk_minsz = min_chunk_sz; |
588 | sd->sd_vol.sv_chunk_maxsz = max_chunk_sz; |
589 | } |
590 | |
591 | void |
592 | sr_meta_init_complete(struct sr_discipline *sd) |
593 | { |
594 | #ifdef SR_DEBUG |
595 | struct sr_softc *sc = sd->sd_sc; |
596 | #endif |
597 | struct sr_metadata *sm = sd->sd_meta; |
598 | |
599 | DNPRINTF(SR_D_META, "%s: sr_meta_complete\n", DEVNAME(sc)); |
600 | |
601 | /* Complete initialisation of volume metadata. */ |
602 | strlcpy(sm->ssdi_sdd_invariant.ssd_vendor, "OPENBSD", sizeof(sm->ssdi_sdd_invariant.ssd_vendor)); |
603 | snprintf(sm->ssdi_sdd_invariant.ssd_product, sizeof(sm->ssdi_sdd_invariant.ssd_product), |
604 | "SR %s", sd->sd_name); |
605 | snprintf(sm->ssdi_sdd_invariant.ssd_revision, sizeof(sm->ssdi_sdd_invariant.ssd_revision), |
606 | "%03d", sm->ssdi_sdd_invariant.ssd_version); |
607 | } |
608 | |
609 | void |
610 | sr_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om) |
611 | { |
612 | if (om->som_type != SR_OPT_BOOT0x02) |
613 | panic("unknown optional metadata type"); |
614 | } |
615 | |
616 | void |
617 | sr_meta_save_callback(void *xsd) |
618 | { |
619 | struct sr_discipline *sd = xsd; |
620 | int s; |
621 | |
622 | s = splbio()splraise(0x6); |
623 | |
624 | if (sr_meta_save(sd, SR_META_DIRTY0x1)) |
625 | printf("%s: save metadata failed\n", DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname)); |
626 | |
627 | sd->sd_must_flush = 0; |
628 | splx(s)spllower(s); |
629 | } |
630 | |
631 | int |
632 | sr_meta_save(struct sr_discipline *sd, u_int32_t flags) |
633 | { |
634 | struct sr_softc *sc = sd->sd_sc; |
635 | struct sr_metadata *sm = sd->sd_meta, *m; |
636 | struct sr_meta_driver *s; |
637 | struct sr_chunk *src; |
638 | struct sr_meta_chunk *cm; |
639 | struct sr_workunit wu; |
640 | struct sr_meta_opt_hdr *omh; |
641 | struct sr_meta_opt_item *omi; |
642 | int i; |
643 | |
644 | DNPRINTF(SR_D_META, "%s: sr_meta_save %s\n", |
645 | DEVNAME(sc), sd->sd_meta->ssd_devname); |
646 | |
647 | if (!sm) { |
648 | printf("%s: no in memory copy of metadata\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
649 | goto bad; |
650 | } |
651 | |
652 | /* meta scratchpad */ |
653 | s = &smd[sd->sd_meta_type]; |
654 | m = malloc(SR_META_SIZE64 * DEV_BSIZE(1 << 9), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002); |
655 | if (!m) { |
656 | printf("%s: could not allocate metadata scratch area\n", |
657 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
658 | goto bad; |
659 | } |
660 | |
661 | /* from here on out metadata is updated */ |
662 | restart: |
663 | sm->ssd_ondisk++; |
664 | sm->ssd_meta_flags = flags; |
665 | memcpy(m, sm, sizeof(*m))__builtin_memcpy((m), (sm), (sizeof(*m))); |
666 | |
667 | /* Chunk metadata. */ |
668 | cm = (struct sr_meta_chunk *)(m + 1); |
669 | for (i = 0; i < sm->ssdi_sdd_invariant.ssd_chunk_no; i++) { |
670 | src = sd->sd_vol.sv_chunks[i]; |
671 | memcpy(cm, &src->src_meta, sizeof(*cm))__builtin_memcpy((cm), (&src->src_meta), (sizeof(*cm)) ); |
672 | cm++; |
673 | } |
674 | |
675 | /* Optional metadata. */ |
676 | omh = (struct sr_meta_opt_hdr *)(cm); |
677 | SLIST_FOREACH(omi, &sd->sd_meta_opt, omi_link)for((omi) = ((&sd->sd_meta_opt)->slh_first); (omi) != ((void *)0); (omi) = ((omi)->omi_link.sle_next)) { |
678 | DNPRINTF(SR_D_META, "%s: saving optional metadata type %u with " |
679 | "length %u\n", DEVNAME(sc), omi->omi_som->som_type, |
680 | omi->omi_som->som_length); |
681 | bzero(&omi->omi_som->som_checksum, MD5_DIGEST_LENGTH)__builtin_bzero((&omi->omi_som->som_checksum), (16) ); |
682 | sr_checksum(sc, omi->omi_som, &omi->omi_som->som_checksum, |
683 | omi->omi_som->som_length); |
684 | memcpy(omh, omi->omi_som, omi->omi_som->som_length)__builtin_memcpy((omh), (omi->omi_som), (omi->omi_som-> som_length)); |
685 | omh = (struct sr_meta_opt_hdr *)((u_int8_t *)omh + |
686 | omi->omi_som->som_length); |
687 | } |
688 | |
689 | for (i = 0; i < sm->ssdi_sdd_invariant.ssd_chunk_no; i++) { |
690 | src = sd->sd_vol.sv_chunks[i]; |
691 | |
692 | /* skip disks that are offline */ |
693 | if (src->src_meta.scm_status == BIOC_SDOFFLINE0x01) |
694 | continue; |
695 | |
696 | /* calculate metadata checksum for correct chunk */ |
697 | m->ssdi_sdd_invariant.ssd_chunk_id = i; |
698 | sr_checksum(sc, m, &m->ssd_checksum, |
699 | sizeof(struct sr_meta_invariant)); |
700 | |
701 | #ifdef SR_DEBUG |
702 | DNPRINTF(SR_D_META, "%s: sr_meta_save %s: volid: %d " |
703 | "chunkid: %d checksum: ", |
704 | DEVNAME(sc), src->src_meta.scmi.scm_devname, |
705 | m->ssdi.ssd_volid, m->ssdi.ssd_chunk_id); |
706 | |
707 | if (sr_debug & SR_D_META) |
708 | sr_checksum_print((u_int8_t *)&m->ssd_checksum); |
709 | DNPRINTF(SR_D_META, "\n"); |
710 | sr_meta_print(m); |
711 | #endif |
712 | |
713 | /* translate and write to disk */ |
714 | if (s->smd_write(sd, src->src_dev_mm, m, NULL((void *)0) /* XXX */)) { |
715 | printf("%s: could not write metadata to %s\n", |
716 | DEVNAME(sc)((sc)->sc_dev.dv_xname), src->src_devname); |
717 | /* restart the meta write */ |
718 | src->src_meta.scm_status = BIOC_SDOFFLINE0x01; |
719 | /* XXX recalculate volume status */ |
720 | goto restart; |
721 | } |
722 | } |
723 | |
724 | /* not all disciplines have sync */ |
725 | if (sd->sd_scsi_sync) { |
726 | bzero(&wu, sizeof(wu))__builtin_bzero((&wu), (sizeof(wu))); |
727 | wu.swu_flags |= SR_WUF_FAKE(1<<6); |
728 | wu.swu_dis = sd; |
729 | sd->sd_scsi_sync(&wu); |
730 | } |
731 | free(m, M_DEVBUF2, SR_META_SIZE64 * DEV_BSIZE(1 << 9)); |
732 | return (0); |
733 | bad: |
734 | return (1); |
735 | } |
736 | |
737 | int |
738 | sr_meta_read(struct sr_discipline *sd) |
739 | { |
740 | struct sr_softc *sc = sd->sd_sc; |
741 | struct sr_chunk_head *cl = &sd->sd_vol.sv_chunk_list; |
742 | struct sr_metadata *sm; |
743 | struct sr_chunk *ch_entry; |
744 | struct sr_meta_chunk *cp; |
745 | struct sr_meta_driver *s; |
746 | void *fm = NULL((void *)0); |
747 | int no_disk = 0, got_meta = 0; |
748 | |
749 | DNPRINTF(SR_D_META, "%s: sr_meta_read\n", DEVNAME(sc)); |
750 | |
751 | sm = malloc(SR_META_SIZE64 * DEV_BSIZE(1 << 9), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
752 | s = &smd[sd->sd_meta_type]; |
753 | if (sd->sd_meta_type != SR_META_F_NATIVE0) |
754 | fm = malloc(s->smd_size, M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
755 | |
756 | cp = (struct sr_meta_chunk *)(sm + 1); |
757 | SLIST_FOREACH(ch_entry, cl, src_link)for((ch_entry) = ((cl)->slh_first); (ch_entry) != ((void * )0); (ch_entry) = ((ch_entry)->src_link.sle_next)) { |
758 | /* skip disks that are offline */ |
759 | if (ch_entry->src_meta.scm_status == BIOC_SDOFFLINE0x01) { |
760 | DNPRINTF(SR_D_META, |
761 | "%s: %s chunk marked offline, spoofing status\n", |
762 | DEVNAME(sc), ch_entry->src_devname); |
763 | cp++; /* adjust chunk pointer to match failure */ |
764 | continue; |
765 | } else if (s->smd_read(sd, ch_entry->src_dev_mm, sm, fm)) { |
766 | /* read and translate */ |
767 | /* XXX mark chunk offline, elsewhere!! */ |
768 | ch_entry->src_meta.scm_status = BIOC_SDOFFLINE0x01; |
769 | cp++; /* adjust chunk pointer to match failure */ |
770 | DNPRINTF(SR_D_META, "%s: sr_meta_read failed\n", |
771 | DEVNAME(sc)); |
772 | continue; |
773 | } |
774 | |
775 | if (sm->ssdi_sdd_invariant.ssd_magic != SR_MAGIC0x4d4152436372616dLLU) { |
776 | DNPRINTF(SR_D_META, "%s: sr_meta_read !SR_MAGIC\n", |
777 | DEVNAME(sc)); |
778 | continue; |
779 | } |
780 | |
781 | /* validate metadata */ |
782 | if (sr_meta_validate(sd, ch_entry->src_dev_mm, sm, fm)) { |
783 | DNPRINTF(SR_D_META, "%s: invalid metadata\n", |
784 | DEVNAME(sc)); |
785 | no_disk = -1; |
786 | goto done; |
787 | } |
788 | |
789 | /* assume first chunk contains metadata */ |
790 | if (got_meta == 0) { |
791 | sr_meta_opt_load(sc, sm, &sd->sd_meta_opt); |
792 | memcpy(sd->sd_meta, sm, sizeof(*sd->sd_meta))__builtin_memcpy((sd->sd_meta), (sm), (sizeof(*sd->sd_meta ))); |
793 | got_meta = 1; |
794 | } |
795 | |
796 | memcpy(&ch_entry->src_meta, cp, sizeof(ch_entry->src_meta))__builtin_memcpy((&ch_entry->src_meta), (cp), (sizeof( ch_entry->src_meta))); |
797 | |
798 | no_disk++; |
799 | cp++; |
800 | } |
801 | |
802 | free(sm, M_DEVBUF2, SR_META_SIZE64 * DEV_BSIZE(1 << 9)); |
803 | free(fm, M_DEVBUF2, s->smd_size); |
804 | |
805 | done: |
806 | DNPRINTF(SR_D_META, "%s: sr_meta_read found %d parts\n", DEVNAME(sc), |
807 | no_disk); |
808 | return (no_disk); |
809 | } |
810 | |
811 | void |
812 | sr_meta_opt_load(struct sr_softc *sc, struct sr_metadata *sm, |
813 | struct sr_meta_opt_head *som) |
814 | { |
815 | struct sr_meta_opt_hdr *omh; |
816 | struct sr_meta_opt_item *omi; |
817 | u_int8_t checksum[MD5_DIGEST_LENGTH16]; |
818 | int i; |
819 | |
820 | /* Process optional metadata. */ |
821 | omh = (struct sr_meta_opt_hdr *)((u_int8_t *)(sm + 1) + |
822 | sizeof(struct sr_meta_chunk) * sm->ssdi_sdd_invariant.ssd_chunk_no); |
823 | for (i = 0; i < sm->ssdi_sdd_invariant.ssd_opt_no; i++) { |
824 | |
825 | omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF2, |
826 | M_WAITOK0x0001 | M_ZERO0x0008); |
827 | SLIST_INSERT_HEAD(som, omi, omi_link)do { (omi)->omi_link.sle_next = (som)->slh_first; (som) ->slh_first = (omi); } while (0); |
828 | |
829 | if (omh->som_length == 0) { |
830 | |
831 | /* Load old fixed length optional metadata. */ |
832 | DNPRINTF(SR_D_META, "%s: old optional metadata of type " |
833 | "%u\n", DEVNAME(sc), omh->som_type); |
834 | |
835 | /* Validate checksum. */ |
836 | sr_checksum(sc, (void *)omh, &checksum, |
837 | SR_OLD_META_OPT_SIZE2480 - MD5_DIGEST_LENGTH16); |
838 | if (bcmp(&checksum, (void *)omh + SR_OLD_META_OPT_MD5(2480 - 16), |
839 | sizeof(checksum))) |
840 | panic("%s: invalid optional metadata checksum", |
841 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
842 | |
843 | /* Determine correct length. */ |
844 | switch (omh->som_type) { |
845 | case SR_OPT_CRYPTO0x01: |
846 | omh->som_length = sizeof(struct sr_meta_crypto); |
847 | break; |
848 | case SR_OPT_BOOT0x02: |
849 | omh->som_length = sizeof(struct sr_meta_boot); |
850 | break; |
851 | case SR_OPT_KEYDISK0x03: |
852 | omh->som_length = |
853 | sizeof(struct sr_meta_keydisk); |
854 | break; |
855 | default: |
856 | panic("unknown old optional metadata type %u", |
857 | omh->som_type); |
858 | } |
859 | |
860 | omi->omi_som = malloc(omh->som_length, M_DEVBUF2, |
861 | M_WAITOK0x0001 | M_ZERO0x0008); |
862 | memcpy((u_int8_t *)omi->omi_som + sizeof(*omi->omi_som),__builtin_memcpy(((u_int8_t *)omi->omi_som + sizeof(*omi-> omi_som)), ((u_int8_t *)omh + 8), (omh->som_length - sizeof (*omi->omi_som))) |
863 | (u_int8_t *)omh + SR_OLD_META_OPT_OFFSET,__builtin_memcpy(((u_int8_t *)omi->omi_som + sizeof(*omi-> omi_som)), ((u_int8_t *)omh + 8), (omh->som_length - sizeof (*omi->omi_som))) |
864 | omh->som_length - sizeof(*omi->omi_som))__builtin_memcpy(((u_int8_t *)omi->omi_som + sizeof(*omi-> omi_som)), ((u_int8_t *)omh + 8), (omh->som_length - sizeof (*omi->omi_som))); |
865 | omi->omi_som->som_type = omh->som_type; |
866 | omi->omi_som->som_length = omh->som_length; |
867 | |
868 | omh = (struct sr_meta_opt_hdr *)((void *)omh + |
869 | SR_OLD_META_OPT_SIZE2480); |
870 | } else { |
871 | |
872 | /* Load variable length optional metadata. */ |
873 | DNPRINTF(SR_D_META, "%s: optional metadata of type %u, " |
874 | "length %u\n", DEVNAME(sc), omh->som_type, |
875 | omh->som_length); |
876 | omi->omi_som = malloc(omh->som_length, M_DEVBUF2, |
877 | M_WAITOK0x0001 | M_ZERO0x0008); |
878 | memcpy(omi->omi_som, omh, omh->som_length)__builtin_memcpy((omi->omi_som), (omh), (omh->som_length )); |
879 | |
880 | /* Validate checksum. */ |
881 | memcpy(&checksum, &omi->omi_som->som_checksum,__builtin_memcpy((&checksum), (&omi->omi_som->som_checksum ), (16)) |
882 | MD5_DIGEST_LENGTH)__builtin_memcpy((&checksum), (&omi->omi_som->som_checksum ), (16)); |
883 | bzero(&omi->omi_som->som_checksum, MD5_DIGEST_LENGTH)__builtin_bzero((&omi->omi_som->som_checksum), (16) ); |
884 | sr_checksum(sc, omi->omi_som, |
885 | &omi->omi_som->som_checksum, omh->som_length); |
886 | if (bcmp(&checksum, &omi->omi_som->som_checksum, |
887 | sizeof(checksum))) |
888 | panic("%s: invalid optional metadata checksum", |
889 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
890 | |
891 | omh = (struct sr_meta_opt_hdr *)((void *)omh + |
892 | omh->som_length); |
893 | } |
894 | } |
895 | } |
896 | |
897 | int |
898 | sr_meta_validate(struct sr_discipline *sd, dev_t dev, struct sr_metadata *sm, |
899 | void *fm) |
900 | { |
901 | struct sr_softc *sc = sd->sd_sc; |
902 | struct sr_meta_driver *s; |
903 | #ifdef SR_DEBUG |
904 | struct sr_meta_chunk *mc; |
905 | #endif |
906 | u_int8_t checksum[MD5_DIGEST_LENGTH16]; |
907 | char devname[32]; |
908 | int rv = 1; |
909 | |
910 | DNPRINTF(SR_D_META, "%s: sr_meta_validate(%p)\n", DEVNAME(sc), sm); |
911 | |
912 | sr_meta_getdevname(sc, dev, devname, sizeof(devname)); |
913 | |
914 | s = &smd[sd->sd_meta_type]; |
915 | if (sd->sd_meta_type != SR_META_F_NATIVE0) |
916 | if (s->smd_validate(sd, sm, fm)) { |
917 | sr_error(sc, "invalid foreign metadata"); |
918 | goto done; |
919 | } |
920 | |
921 | /* |
922 | * at this point all foreign metadata has been translated to the native |
923 | * format and will be treated just like the native format |
924 | */ |
925 | |
926 | if (sm->ssdi_sdd_invariant.ssd_magic != SR_MAGIC0x4d4152436372616dLLU) { |
927 | sr_error(sc, "not valid softraid metadata"); |
928 | goto done; |
929 | } |
930 | |
931 | /* Verify metadata checksum. */ |
932 | sr_checksum(sc, sm, &checksum, sizeof(struct sr_meta_invariant)); |
933 | if (bcmp(&checksum, &sm->ssd_checksum, sizeof(checksum))) { |
934 | sr_error(sc, "invalid metadata checksum"); |
935 | goto done; |
936 | } |
937 | |
938 | /* Handle changes between versions. */ |
939 | if (sm->ssdi_sdd_invariant.ssd_version == 3) { |
940 | |
941 | /* |
942 | * Version 3 - update metadata version and fix up data blkno |
943 | * value since this did not exist in version 3. |
944 | */ |
945 | if (sm->ssd_data_blkno == 0) |
946 | sm->ssd_data_blkno = SR_META_V3_DATA_OFFSET(16 + 64); |
947 | sm->ssdi_sdd_invariant.ssd_secsize = DEV_BSIZE(1 << 9); |
948 | |
949 | } else if (sm->ssdi_sdd_invariant.ssd_version == 4) { |
950 | |
951 | /* |
952 | * Version 4 - original metadata format did not store |
953 | * data blkno so fix this up if necessary. |
954 | */ |
955 | if (sm->ssd_data_blkno == 0) |
956 | sm->ssd_data_blkno = SR_DATA_OFFSET(16 + (64 + (320 + 128))); |
957 | sm->ssdi_sdd_invariant.ssd_secsize = DEV_BSIZE(1 << 9); |
958 | |
959 | } else if (sm->ssdi_sdd_invariant.ssd_version == 5) { |
960 | |
961 | /* |
962 | * Version 5 - variable length optional metadata. Migration |
963 | * from earlier fixed length optional metadata is handled |
964 | * in sr_meta_read(). |
965 | */ |
966 | sm->ssdi_sdd_invariant.ssd_secsize = DEV_BSIZE(1 << 9); |
967 | |
968 | } else if (sm->ssdi_sdd_invariant.ssd_version == SR_META_VERSION6) { |
969 | |
970 | /* |
971 | * Version 6 - store & report a sector size. |
972 | */ |
973 | |
974 | } else { |
975 | |
976 | sr_error(sc, "cannot read metadata version %u on %s, " |
977 | "expected version %u or earlier", |
978 | sm->ssdi_sdd_invariant.ssd_version, devname, SR_META_VERSION6); |
979 | goto done; |
980 | |
981 | } |
982 | |
983 | /* Update version number and revision string. */ |
984 | sm->ssdi_sdd_invariant.ssd_version = SR_META_VERSION6; |
985 | snprintf(sm->ssdi_sdd_invariant.ssd_revision, sizeof(sm->ssdi_sdd_invariant.ssd_revision), |
986 | "%03d", SR_META_VERSION6); |
987 | |
988 | #ifdef SR_DEBUG |
989 | /* warn if disk changed order */ |
990 | mc = (struct sr_meta_chunk *)(sm + 1); |
991 | if (strncmp(mc[sm->ssdi_sdd_invariant.ssd_chunk_id].scmi_scm_invariant.scm_devname, devname, |
992 | sizeof(mc[sm->ssdi_sdd_invariant.ssd_chunk_id].scmi_scm_invariant.scm_devname))) |
993 | DNPRINTF(SR_D_META, "%s: roaming device %s -> %s\n", |
994 | DEVNAME(sc), mc[sm->ssdi.ssd_chunk_id].scmi.scm_devname, |
995 | devname); |
996 | #endif |
997 | |
998 | /* we have meta data on disk */ |
999 | DNPRINTF(SR_D_META, "%s: sr_meta_validate valid metadata %s\n", |
1000 | DEVNAME(sc), devname); |
1001 | |
1002 | rv = 0; |
1003 | done: |
1004 | return (rv); |
1005 | } |
1006 | |
1007 | int |
1008 | sr_meta_native_bootprobe(struct sr_softc *sc, dev_t devno, |
1009 | struct sr_boot_chunk_head *bch) |
1010 | { |
1011 | struct vnode *vn; |
1012 | struct disklabel label; |
1013 | struct sr_metadata *md = NULL((void *)0); |
1014 | struct sr_discipline *fake_sd = NULL((void *)0); |
1015 | struct sr_boot_chunk *bc; |
1016 | char devname[32]; |
1017 | dev_t chrdev, rawdev; |
1018 | int error, i; |
1019 | int rv = SR_META_NOTCLAIMED(0); |
1020 | |
1021 | DNPRINTF(SR_D_META, "%s: sr_meta_native_bootprobe\n", DEVNAME(sc)); |
1022 | |
1023 | /* |
1024 | * Use character raw device to avoid SCSI complaints about missing |
1025 | * media on removable media devices. |
1026 | */ |
1027 | chrdev = blktochr(devno); |
1028 | rawdev = MAKEDISKDEV(major(chrdev), DISKUNIT(devno), RAW_PART)(((dev_t)((((((((unsigned)(chrdev) >> 8) & 0xff))) & 0xff) << 8) | (((((((((unsigned)((devno) & 0xff) | (((devno) & 0xffff0000) >> 8)) / 16))) * 16) + ((2 )))) & 0xff) | ((((((((((unsigned)((devno) & 0xff) | ( ((devno) & 0xffff0000) >> 8)) / 16))) * 16) + ((2)) )) & 0xffff00) << 8)))); |
1029 | if (cdevvp(rawdev, &vn)) { |
1030 | sr_error(sc, "sr_meta_native_bootprobe: cannot allocate vnode"); |
1031 | goto done; |
1032 | } |
1033 | |
1034 | /* open device */ |
1035 | error = VOP_OPEN(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1036 | if (error) { |
1037 | DNPRINTF(SR_D_META, "%s: sr_meta_native_bootprobe open " |
1038 | "failed\n", DEVNAME(sc)); |
1039 | vput(vn); |
1040 | goto done; |
1041 | } |
1042 | |
1043 | /* get disklabel */ |
1044 | error = VOP_IOCTL(vn, DIOCGDINFO((unsigned long)0x40000000 | ((sizeof(struct disklabel) & 0x1fff) << 16) | ((('d')) << 8) | ((101))), (caddr_t)&label, FREAD0x0001, NOCRED((struct ucred *)-1), |
1045 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1046 | if (error) { |
1047 | DNPRINTF(SR_D_META, "%s: sr_meta_native_bootprobe ioctl " |
1048 | "failed\n", DEVNAME(sc)); |
1049 | VOP_CLOSE(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1050 | vput(vn); |
1051 | goto done; |
1052 | } |
1053 | |
1054 | /* we are done, close device */ |
1055 | error = VOP_CLOSE(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1056 | if (error) { |
1057 | DNPRINTF(SR_D_META, "%s: sr_meta_native_bootprobe close " |
1058 | "failed\n", DEVNAME(sc)); |
1059 | vput(vn); |
1060 | goto done; |
1061 | } |
1062 | vput(vn); |
1063 | |
1064 | md = malloc(SR_META_SIZE64 * DEV_BSIZE(1 << 9), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002); |
1065 | if (md == NULL((void *)0)) { |
1066 | sr_error(sc, "not enough memory for metadata buffer"); |
1067 | goto done; |
1068 | } |
1069 | |
1070 | /* create fake sd to use utility functions */ |
1071 | fake_sd = malloc(sizeof(struct sr_discipline), M_DEVBUF2, |
1072 | M_ZERO0x0008 | M_NOWAIT0x0002); |
1073 | if (fake_sd == NULL((void *)0)) { |
1074 | sr_error(sc, "not enough memory for fake discipline"); |
1075 | goto done; |
1076 | } |
1077 | fake_sd->sd_sc = sc; |
1078 | fake_sd->sd_meta_type = SR_META_F_NATIVE0; |
1079 | |
1080 | for (i = 0; i < MAXPARTITIONS16; i++) { |
1081 | if (label.d_partitions[i].p_fstype != FS_RAID19) |
1082 | continue; |
1083 | |
1084 | /* open partition */ |
1085 | rawdev = MAKEDISKDEV(major(devno), DISKUNIT(devno), i)(((dev_t)((((((((unsigned)(devno) >> 8) & 0xff))) & 0xff) << 8) | (((((((((unsigned)((devno) & 0xff) | (((devno) & 0xffff0000) >> 8)) / 16))) * 16) + ((i )))) & 0xff) | ((((((((((unsigned)((devno) & 0xff) | ( ((devno) & 0xffff0000) >> 8)) / 16))) * 16) + ((i)) )) & 0xffff00) << 8)))); |
1086 | if (bdevvp(rawdev, &vn)) { |
1087 | sr_error(sc, "sr_meta_native_bootprobe: cannot " |
1088 | "allocate vnode for partition"); |
1089 | goto done; |
1090 | } |
1091 | error = VOP_OPEN(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1092 | if (error) { |
1093 | DNPRINTF(SR_D_META, "%s: sr_meta_native_bootprobe " |
1094 | "open failed, partition %d\n", |
1095 | DEVNAME(sc), i); |
1096 | vput(vn); |
1097 | continue; |
1098 | } |
1099 | |
1100 | if (sr_meta_native_read(fake_sd, rawdev, md, NULL((void *)0))) { |
1101 | sr_error(sc, "native bootprobe could not read native " |
1102 | "metadata"); |
1103 | VOP_CLOSE(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1104 | vput(vn); |
1105 | continue; |
1106 | } |
1107 | |
1108 | /* are we a softraid partition? */ |
1109 | if (md->ssdi_sdd_invariant.ssd_magic != SR_MAGIC0x4d4152436372616dLLU) { |
1110 | VOP_CLOSE(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1111 | vput(vn); |
1112 | continue; |
1113 | } |
1114 | |
1115 | sr_meta_getdevname(sc, rawdev, devname, sizeof(devname)); |
1116 | if (sr_meta_validate(fake_sd, rawdev, md, NULL((void *)0)) == 0) { |
1117 | /* XXX fix M_WAITOK, this is boot time */ |
1118 | bc = malloc(sizeof(struct sr_boot_chunk), |
1119 | M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
1120 | bc->sbc_metadata = malloc(sizeof(struct sr_metadata), |
1121 | M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
1122 | memcpy(bc->sbc_metadata, md, sizeof(struct sr_metadata))__builtin_memcpy((bc->sbc_metadata), (md), (sizeof(struct sr_metadata ))); |
1123 | bc->sbc_mm = rawdev; |
1124 | SLIST_INSERT_HEAD(bch, bc, sbc_link)do { (bc)->sbc_link.sle_next = (bch)->slh_first; (bch)-> slh_first = (bc); } while (0); |
1125 | rv = SR_META_CLAIMED(1); |
1126 | } |
1127 | |
1128 | /* we are done, close partition */ |
1129 | VOP_CLOSE(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1130 | vput(vn); |
1131 | } |
1132 | |
1133 | done: |
1134 | free(fake_sd, M_DEVBUF2, sizeof(struct sr_discipline)); |
1135 | free(md, M_DEVBUF2, SR_META_SIZE64 * DEV_BSIZE(1 << 9)); |
1136 | |
1137 | return (rv); |
1138 | } |
1139 | |
1140 | int |
1141 | sr_boot_assembly(struct sr_softc *sc) |
1142 | { |
1143 | struct sr_boot_volume_head bvh; |
1144 | struct sr_boot_chunk_head bch, kdh; |
1145 | struct sr_boot_volume *bv, *bv1, *bv2; |
1146 | struct sr_boot_chunk *bc, *bcnext, *bc1, *bc2; |
1147 | struct sr_disk_head sdklist; |
1148 | struct sr_disk *sdk; |
1149 | struct disk *dk; |
1150 | struct bioc_createraid bcr; |
1151 | struct sr_meta_chunk *hm; |
1152 | struct sr_chunk_head *cl; |
1153 | struct sr_chunk *hotspare, *chunk, *last; |
1154 | u_int64_t *ondisk = NULL((void *)0); |
1155 | dev_t *devs = NULL((void *)0); |
1156 | void *data; |
1157 | char devname[32]; |
1158 | int rv = 0, i; |
1159 | |
1160 | DNPRINTF(SR_D_META, "%s: sr_boot_assembly\n", DEVNAME(sc)); |
1161 | |
1162 | SLIST_INIT(&sdklist){ ((&sdklist)->slh_first) = ((void *)0); }; |
1163 | SLIST_INIT(&bvh){ ((&bvh)->slh_first) = ((void *)0); }; |
1164 | SLIST_INIT(&bch){ ((&bch)->slh_first) = ((void *)0); }; |
1165 | SLIST_INIT(&kdh){ ((&kdh)->slh_first) = ((void *)0); }; |
1166 | |
1167 | dk = TAILQ_FIRST(&disklist)((&disklist)->tqh_first); |
1168 | while (dk != NULL((void *)0)) { |
1169 | |
1170 | /* See if this disk has been checked. */ |
1171 | SLIST_FOREACH(sdk, &sdklist, sdk_link)for((sdk) = ((&sdklist)->slh_first); (sdk) != ((void * )0); (sdk) = ((sdk)->sdk_link.sle_next)) |
1172 | if (sdk->sdk_devno == dk->dk_devno) |
1173 | break; |
1174 | |
1175 | if (sdk != NULL((void *)0) || dk->dk_devno == NODEV(dev_t)(-1)) { |
1176 | dk = TAILQ_NEXT(dk, dk_link)((dk)->dk_link.tqe_next); |
1177 | continue; |
1178 | } |
1179 | |
1180 | /* Add this disk to the list that we've checked. */ |
1181 | sdk = malloc(sizeof(struct sr_disk), M_DEVBUF2, |
1182 | M_NOWAIT0x0002 | M_ZERO0x0008); |
1183 | if (sdk == NULL((void *)0)) |
1184 | goto unwind; |
1185 | sdk->sdk_devno = dk->dk_devno; |
1186 | SLIST_INSERT_HEAD(&sdklist, sdk, sdk_link)do { (sdk)->sdk_link.sle_next = (&sdklist)->slh_first ; (&sdklist)->slh_first = (sdk); } while (0); |
1187 | |
1188 | /* Only check sd(4) and wd(4) devices. */ |
1189 | if (strncmp(dk->dk_name, "sd", 2) && |
1190 | strncmp(dk->dk_name, "wd", 2)) { |
1191 | dk = TAILQ_NEXT(dk, dk_link)((dk)->dk_link.tqe_next); |
1192 | continue; |
1193 | } |
1194 | |
1195 | /* native softraid uses partitions */ |
1196 | rw_enter_write(&sc->sc_lock); |
1197 | bio_status_init(&sc->sc_status, &sc->sc_dev); |
1198 | sr_meta_native_bootprobe(sc, dk->dk_devno, &bch); |
1199 | rw_exit_write(&sc->sc_lock); |
1200 | |
1201 | /* probe non-native disks if native failed. */ |
1202 | |
1203 | /* Restart scan since we may have slept. */ |
1204 | dk = TAILQ_FIRST(&disklist)((&disklist)->tqh_first); |
1205 | } |
1206 | |
1207 | /* |
1208 | * Create a list of volumes and associate chunks with each volume. |
1209 | */ |
1210 | for (bc = SLIST_FIRST(&bch)((&bch)->slh_first); bc != NULL((void *)0); bc = bcnext) { |
1211 | |
1212 | bcnext = SLIST_NEXT(bc, sbc_link)((bc)->sbc_link.sle_next); |
1213 | SLIST_REMOVE(&bch, bc, sr_boot_chunk, sbc_link)do { if ((&bch)->slh_first == (bc)) { do { ((&bch) )->slh_first = ((&bch))->slh_first->sbc_link.sle_next ; } while (0); } else { struct sr_boot_chunk *curelm = (& bch)->slh_first; while (curelm->sbc_link.sle_next != (bc )) curelm = curelm->sbc_link.sle_next; curelm->sbc_link .sle_next = curelm->sbc_link.sle_next->sbc_link.sle_next ; } ((bc)->sbc_link.sle_next) = ((void *)-1); } while (0); |
1214 | bc->sbc_chunk_id = bc->sbc_metadata->ssdi_sdd_invariant.ssd_chunk_id; |
1215 | |
1216 | /* Handle key disks separately. */ |
1217 | if (bc->sbc_metadata->ssdi_sdd_invariant.ssd_level == SR_KEYDISK_LEVEL0xfffffffe) { |
1218 | SLIST_INSERT_HEAD(&kdh, bc, sbc_link)do { (bc)->sbc_link.sle_next = (&kdh)->slh_first; ( &kdh)->slh_first = (bc); } while (0); |
1219 | continue; |
1220 | } |
1221 | |
1222 | SLIST_FOREACH(bv, &bvh, sbv_link)for((bv) = ((&bvh)->slh_first); (bv) != ((void *)0); ( bv) = ((bv)->sbv_link.sle_next)) { |
1223 | if (bcmp(&bc->sbc_metadata->ssdi_sdd_invariant.ssd_uuid, |
1224 | &bv->sbv_uuid, |
1225 | sizeof(bc->sbc_metadata->ssdi_sdd_invariant.ssd_uuid)) == 0) |
1226 | break; |
1227 | } |
1228 | |
1229 | if (bv == NULL((void *)0)) { |
1230 | bv = malloc(sizeof(struct sr_boot_volume), |
1231 | M_DEVBUF2, M_NOWAIT0x0002 | M_ZERO0x0008); |
1232 | if (bv == NULL((void *)0)) { |
1233 | printf("%s: failed to allocate boot volume\n", |
1234 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1235 | goto unwind; |
1236 | } |
1237 | |
1238 | bv->sbv_level = bc->sbc_metadata->ssdi_sdd_invariant.ssd_level; |
1239 | bv->sbv_volid = bc->sbc_metadata->ssdi_sdd_invariant.ssd_volid; |
1240 | bv->sbv_chunk_no = bc->sbc_metadata->ssdi_sdd_invariant.ssd_chunk_no; |
1241 | bv->sbv_flags = bc->sbc_metadata->ssdi_sdd_invariant.ssd_vol_flags; |
1242 | memcpy(&bv->sbv_uuid, &bc->sbc_metadata->ssdi.ssd_uuid,__builtin_memcpy((&bv->sbv_uuid), (&bc->sbc_metadata ->_sdd_invariant.ssd_uuid), (sizeof(bc->sbc_metadata-> _sdd_invariant.ssd_uuid))) |
1243 | sizeof(bc->sbc_metadata->ssdi.ssd_uuid))__builtin_memcpy((&bv->sbv_uuid), (&bc->sbc_metadata ->_sdd_invariant.ssd_uuid), (sizeof(bc->sbc_metadata-> _sdd_invariant.ssd_uuid))); |
1244 | SLIST_INIT(&bv->sbv_chunks){ ((&bv->sbv_chunks)->slh_first) = ((void *)0); }; |
1245 | |
1246 | /* Maintain volume order. */ |
1247 | bv2 = NULL((void *)0); |
1248 | SLIST_FOREACH(bv1, &bvh, sbv_link)for((bv1) = ((&bvh)->slh_first); (bv1) != ((void *)0); (bv1) = ((bv1)->sbv_link.sle_next)) { |
1249 | if (bv1->sbv_volid > bv->sbv_volid) |
1250 | break; |
1251 | bv2 = bv1; |
1252 | } |
1253 | if (bv2 == NULL((void *)0)) { |
1254 | DNPRINTF(SR_D_META, "%s: insert volume %u " |
1255 | "at head\n", DEVNAME(sc), bv->sbv_volid); |
1256 | SLIST_INSERT_HEAD(&bvh, bv, sbv_link)do { (bv)->sbv_link.sle_next = (&bvh)->slh_first; ( &bvh)->slh_first = (bv); } while (0); |
1257 | } else { |
1258 | DNPRINTF(SR_D_META, "%s: insert volume %u " |
1259 | "after %u\n", DEVNAME(sc), bv->sbv_volid, |
1260 | bv2->sbv_volid); |
1261 | SLIST_INSERT_AFTER(bv2, bv, sbv_link)do { (bv)->sbv_link.sle_next = (bv2)->sbv_link.sle_next ; (bv2)->sbv_link.sle_next = (bv); } while (0); |
1262 | } |
1263 | } |
1264 | |
1265 | /* Maintain chunk order. */ |
1266 | bc2 = NULL((void *)0); |
1267 | SLIST_FOREACH(bc1, &bv->sbv_chunks, sbc_link)for((bc1) = ((&bv->sbv_chunks)->slh_first); (bc1) != ((void *)0); (bc1) = ((bc1)->sbc_link.sle_next)) { |
1268 | if (bc1->sbc_chunk_id > bc->sbc_chunk_id) |
1269 | break; |
1270 | bc2 = bc1; |
1271 | } |
1272 | if (bc2 == NULL((void *)0)) { |
1273 | DNPRINTF(SR_D_META, "%s: volume %u insert chunk %u " |
1274 | "at head\n", DEVNAME(sc), bv->sbv_volid, |
1275 | bc->sbc_chunk_id); |
1276 | SLIST_INSERT_HEAD(&bv->sbv_chunks, bc, sbc_link)do { (bc)->sbc_link.sle_next = (&bv->sbv_chunks)-> slh_first; (&bv->sbv_chunks)->slh_first = (bc); } while (0); |
1277 | } else { |
1278 | DNPRINTF(SR_D_META, "%s: volume %u insert chunk %u " |
1279 | "after %u\n", DEVNAME(sc), bv->sbv_volid, |
1280 | bc->sbc_chunk_id, bc2->sbc_chunk_id); |
1281 | SLIST_INSERT_AFTER(bc2, bc, sbc_link)do { (bc)->sbc_link.sle_next = (bc2)->sbc_link.sle_next ; (bc2)->sbc_link.sle_next = (bc); } while (0); |
1282 | } |
1283 | |
1284 | bv->sbv_chunks_found++; |
1285 | } |
1286 | |
1287 | /* Allocate memory for device and ondisk version arrays. */ |
1288 | devs = mallocarray(BIOC_CRMAXLEN1024, sizeof(dev_t), M_DEVBUF2, |
1289 | M_NOWAIT0x0002); |
1290 | if (devs == NULL((void *)0)) { |
1291 | printf("%s: failed to allocate device array\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1292 | goto unwind; |
1293 | } |
1294 | ondisk = mallocarray(BIOC_CRMAXLEN1024, sizeof(u_int64_t), M_DEVBUF2, |
1295 | M_NOWAIT0x0002); |
1296 | if (ondisk == NULL((void *)0)) { |
1297 | printf("%s: failed to allocate ondisk array\n", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1298 | goto unwind; |
1299 | } |
1300 | |
1301 | /* |
1302 | * Assemble hotspare "volumes". |
1303 | */ |
1304 | SLIST_FOREACH(bv, &bvh, sbv_link)for((bv) = ((&bvh)->slh_first); (bv) != ((void *)0); ( bv) = ((bv)->sbv_link.sle_next)) { |
1305 | |
1306 | /* Check if this is a hotspare "volume". */ |
1307 | if (bv->sbv_level != SR_HOTSPARE_LEVEL0xffffffff || |
1308 | bv->sbv_chunk_no != 1) |
1309 | continue; |
1310 | |
1311 | #ifdef SR_DEBUG |
1312 | DNPRINTF(SR_D_META, "%s: assembling hotspare volume ", |
1313 | DEVNAME(sc)); |
1314 | if (sr_debug & SR_D_META) |
1315 | sr_uuid_print(&bv->sbv_uuid, 0); |
1316 | DNPRINTF(SR_D_META, " volid %u with %u chunks\n", |
1317 | bv->sbv_volid, bv->sbv_chunk_no); |
1318 | #endif |
1319 | |
1320 | /* Create hotspare chunk metadata. */ |
1321 | hotspare = malloc(sizeof(struct sr_chunk), M_DEVBUF2, |
1322 | M_NOWAIT0x0002 | M_ZERO0x0008); |
1323 | if (hotspare == NULL((void *)0)) { |
1324 | printf("%s: failed to allocate hotspare\n", |
1325 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1326 | goto unwind; |
1327 | } |
1328 | |
1329 | bc = SLIST_FIRST(&bv->sbv_chunks)((&bv->sbv_chunks)->slh_first); |
1330 | sr_meta_getdevname(sc, bc->sbc_mm, devname, sizeof(devname)); |
1331 | hotspare->src_dev_mm = bc->sbc_mm; |
1332 | strlcpy(hotspare->src_devname, devname, |
1333 | sizeof(hotspare->src_devname)); |
1334 | hotspare->src_size = bc->sbc_metadata->ssdi_sdd_invariant.ssd_size; |
1335 | |
1336 | hm = &hotspare->src_meta; |
1337 | hm->scmi_scm_invariant.scm_volid = SR_HOTSPARE_VOLID0xffffffff; |
1338 | hm->scmi_scm_invariant.scm_chunk_id = 0; |
1339 | hm->scmi_scm_invariant.scm_size = bc->sbc_metadata->ssdi_sdd_invariant.ssd_size; |
1340 | hm->scmi_scm_invariant.scm_coerced_size = bc->sbc_metadata->ssdi_sdd_invariant.ssd_size; |
1341 | strlcpy(hm->scmi_scm_invariant.scm_devname, devname, |
1342 | sizeof(hm->scmi_scm_invariant.scm_devname)); |
1343 | memcpy(&hm->scmi.scm_uuid, &bc->sbc_metadata->ssdi.ssd_uuid,__builtin_memcpy((&hm->_scm_invariant.scm_uuid), (& bc->sbc_metadata->_sdd_invariant.ssd_uuid), (sizeof(struct sr_uuid))) |
1344 | sizeof(struct sr_uuid))__builtin_memcpy((&hm->_scm_invariant.scm_uuid), (& bc->sbc_metadata->_sdd_invariant.ssd_uuid), (sizeof(struct sr_uuid))); |
1345 | |
1346 | sr_checksum(sc, hm, &hm->scm_checksum, |
1347 | sizeof(struct sr_meta_chunk_invariant)); |
1348 | |
1349 | hm->scm_status = BIOC_SDHOTSPARE0x04; |
1350 | |
1351 | /* Add chunk to hotspare list. */ |
1352 | rw_enter_write(&sc->sc_hs_lock); |
1353 | cl = &sc->sc_hotspare_list; |
1354 | if (SLIST_EMPTY(cl)(((cl)->slh_first) == ((void *)0))) |
1355 | SLIST_INSERT_HEAD(cl, hotspare, src_link)do { (hotspare)->src_link.sle_next = (cl)->slh_first; ( cl)->slh_first = (hotspare); } while (0); |
1356 | else { |
1357 | SLIST_FOREACH(chunk, cl, src_link)for((chunk) = ((cl)->slh_first); (chunk) != ((void *)0); ( chunk) = ((chunk)->src_link.sle_next)) |
1358 | last = chunk; |
1359 | SLIST_INSERT_AFTER(last, hotspare, src_link)do { (hotspare)->src_link.sle_next = (last)->src_link.sle_next ; (last)->src_link.sle_next = (hotspare); } while (0); |
1360 | } |
1361 | sc->sc_hotspare_no++; |
1362 | rw_exit_write(&sc->sc_hs_lock); |
1363 | |
1364 | } |
1365 | |
1366 | /* |
1367 | * Assemble RAID volumes. |
1368 | */ |
1369 | SLIST_FOREACH(bv, &bvh, sbv_link)for((bv) = ((&bvh)->slh_first); (bv) != ((void *)0); ( bv) = ((bv)->sbv_link.sle_next)) { |
1370 | |
1371 | bzero(&bcr, sizeof(bcr))__builtin_bzero((&bcr), (sizeof(bcr))); |
1372 | data = NULL((void *)0); |
1373 | |
1374 | /* Check if this is a hotspare "volume". */ |
1375 | if (bv->sbv_level == SR_HOTSPARE_LEVEL0xffffffff && |
1376 | bv->sbv_chunk_no == 1) |
1377 | continue; |
1378 | |
1379 | /* |
1380 | * Skip volumes that are marked as no auto assemble, unless |
1381 | * this was the volume which we actually booted from. |
1382 | */ |
1383 | if (bcmp(&sr_bootuuid, &bv->sbv_uuid, sizeof(sr_bootuuid)) != 0) |
1384 | if (bv->sbv_flags & BIOC_SCNOAUTOASSEMBLE0x04) |
1385 | continue; |
1386 | |
1387 | #ifdef SR_DEBUG |
1388 | DNPRINTF(SR_D_META, "%s: assembling volume ", DEVNAME(sc)); |
1389 | if (sr_debug & SR_D_META) |
1390 | sr_uuid_print(&bv->sbv_uuid, 0); |
1391 | DNPRINTF(SR_D_META, " volid %u with %u chunks\n", |
1392 | bv->sbv_volid, bv->sbv_chunk_no); |
1393 | #endif |
1394 | |
1395 | /* |
1396 | * If this is a crypto volume, try to find a matching |
1397 | * key disk... |
1398 | */ |
1399 | bcr.bc_key_disk = NODEV(dev_t)(-1); |
1400 | if (bv->sbv_level == 'C' || bv->sbv_level == 0x1C) { |
1401 | SLIST_FOREACH(bc, &kdh, sbc_link)for((bc) = ((&kdh)->slh_first); (bc) != ((void *)0); ( bc) = ((bc)->sbc_link.sle_next)) { |
1402 | if (bcmp(&bc->sbc_metadata->ssdi_sdd_invariant.ssd_uuid, |
1403 | &bv->sbv_uuid, |
1404 | sizeof(bc->sbc_metadata->ssdi_sdd_invariant.ssd_uuid)) |
1405 | == 0) |
1406 | bcr.bc_key_disk = bc->sbc_mm; |
1407 | } |
1408 | } |
1409 | |
1410 | for (i = 0; i < BIOC_CRMAXLEN1024; i++) { |
1411 | devs[i] = NODEV(dev_t)(-1); /* mark device as illegal */ |
1412 | ondisk[i] = 0; |
1413 | } |
1414 | |
1415 | SLIST_FOREACH(bc, &bv->sbv_chunks, sbc_link)for((bc) = ((&bv->sbv_chunks)->slh_first); (bc) != ( (void *)0); (bc) = ((bc)->sbc_link.sle_next)) { |
1416 | if (devs[bc->sbc_chunk_id] != NODEV(dev_t)(-1)) { |
1417 | bv->sbv_chunks_found--; |
1418 | sr_meta_getdevname(sc, bc->sbc_mm, devname, |
1419 | sizeof(devname)); |
1420 | printf("%s: found duplicate chunk %u for " |
1421 | "volume %u on device %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
1422 | bc->sbc_chunk_id, bv->sbv_volid, devname); |
1423 | } |
1424 | |
1425 | if (devs[bc->sbc_chunk_id] == NODEV(dev_t)(-1) || |
1426 | bc->sbc_metadata->ssd_ondisk > |
1427 | ondisk[bc->sbc_chunk_id]) { |
1428 | devs[bc->sbc_chunk_id] = bc->sbc_mm; |
1429 | ondisk[bc->sbc_chunk_id] = |
1430 | bc->sbc_metadata->ssd_ondisk; |
1431 | DNPRINTF(SR_D_META, "%s: using ondisk " |
1432 | "metadata version %llu for chunk %u\n", |
1433 | DEVNAME(sc), ondisk[bc->sbc_chunk_id], |
1434 | bc->sbc_chunk_id); |
1435 | } |
1436 | } |
1437 | |
1438 | if (bv->sbv_chunk_no != bv->sbv_chunks_found) { |
1439 | printf("%s: not all chunks were provided; " |
1440 | "attempting to bring volume %d online\n", |
1441 | DEVNAME(sc)((sc)->sc_dev.dv_xname), bv->sbv_volid); |
1442 | } |
1443 | |
1444 | bcr.bc_level = bv->sbv_level; |
1445 | bcr.bc_dev_list_len = bv->sbv_chunk_no * sizeof(dev_t); |
1446 | bcr.bc_dev_list = devs; |
1447 | bcr.bc_flags = BIOC_SCDEVT0x02 | |
1448 | (bv->sbv_flags & BIOC_SCNOAUTOASSEMBLE0x04); |
1449 | |
1450 | if ((bv->sbv_level == 'C' || bv->sbv_level == 0x1C) && |
1451 | bcmp(&sr_bootuuid, &bv->sbv_uuid, sizeof(sr_bootuuid)) == 0) |
1452 | data = sr_bootkey; |
1453 | |
1454 | rw_enter_write(&sc->sc_lock); |
1455 | bio_status_init(&sc->sc_status, &sc->sc_dev); |
1456 | sr_ioctl_createraid(sc, &bcr, 0, data); |
1457 | rw_exit_write(&sc->sc_lock); |
1458 | |
1459 | rv++; |
1460 | } |
1461 | |
1462 | /* done with metadata */ |
1463 | unwind: |
1464 | /* Free boot volumes and associated chunks. */ |
1465 | for (bv1 = SLIST_FIRST(&bvh)((&bvh)->slh_first); bv1 != NULL((void *)0); bv1 = bv2) { |
1466 | bv2 = SLIST_NEXT(bv1, sbv_link)((bv1)->sbv_link.sle_next); |
1467 | for (bc1 = SLIST_FIRST(&bv1->sbv_chunks)((&bv1->sbv_chunks)->slh_first); bc1 != NULL((void *)0); |
1468 | bc1 = bc2) { |
1469 | bc2 = SLIST_NEXT(bc1, sbc_link)((bc1)->sbc_link.sle_next); |
1470 | free(bc1->sbc_metadata, M_DEVBUF2, |
1471 | sizeof(*bc1->sbc_metadata)); |
1472 | free(bc1, M_DEVBUF2, sizeof(*bc1)); |
1473 | } |
1474 | free(bv1, M_DEVBUF2, sizeof(*bv1)); |
1475 | } |
1476 | /* Free keydisks chunks. */ |
1477 | for (bc1 = SLIST_FIRST(&kdh)((&kdh)->slh_first); bc1 != NULL((void *)0); bc1 = bc2) { |
1478 | bc2 = SLIST_NEXT(bc1, sbc_link)((bc1)->sbc_link.sle_next); |
1479 | free(bc1->sbc_metadata, M_DEVBUF2, sizeof(*bc1->sbc_metadata)); |
1480 | free(bc1, M_DEVBUF2, sizeof(*bc1)); |
1481 | } |
1482 | /* Free unallocated chunks. */ |
1483 | for (bc1 = SLIST_FIRST(&bch)((&bch)->slh_first); bc1 != NULL((void *)0); bc1 = bc2) { |
1484 | bc2 = SLIST_NEXT(bc1, sbc_link)((bc1)->sbc_link.sle_next); |
1485 | free(bc1->sbc_metadata, M_DEVBUF2, sizeof(*bc1->sbc_metadata)); |
1486 | free(bc1, M_DEVBUF2, sizeof(*bc1)); |
1487 | } |
1488 | |
1489 | while (!SLIST_EMPTY(&sdklist)(((&sdklist)->slh_first) == ((void *)0))) { |
1490 | sdk = SLIST_FIRST(&sdklist)((&sdklist)->slh_first); |
1491 | SLIST_REMOVE_HEAD(&sdklist, sdk_link)do { (&sdklist)->slh_first = (&sdklist)->slh_first ->sdk_link.sle_next; } while (0); |
1492 | free(sdk, M_DEVBUF2, sizeof(*sdk)); |
1493 | } |
1494 | |
1495 | free(devs, M_DEVBUF2, BIOC_CRMAXLEN1024 * sizeof(dev_t)); |
1496 | free(ondisk, M_DEVBUF2, BIOC_CRMAXLEN1024 * sizeof(u_int64_t)); |
1497 | |
1498 | return (rv); |
1499 | } |
1500 | |
1501 | void |
1502 | sr_map_root(void) |
1503 | { |
1504 | struct sr_softc *sc = softraid0; |
1505 | struct sr_discipline *sd; |
1506 | struct sr_meta_opt_item *omi; |
1507 | struct sr_meta_boot *sbm; |
1508 | u_char duid[8]; |
1509 | int i; |
1510 | |
1511 | DNPRINTF(SR_D_MISC, "%s: sr_map_root\n", DEVNAME(sc)); |
1512 | |
1513 | if (sc == NULL((void *)0)) |
1514 | return; |
1515 | |
1516 | bzero(duid, sizeof(duid))__builtin_bzero((duid), (sizeof(duid))); |
1517 | if (bcmp(rootduid, duid, sizeof(duid)) == 0) { |
1518 | DNPRINTF(SR_D_MISC, "%s: root duid is zero\n", DEVNAME(sc)); |
1519 | return; |
1520 | } |
1521 | |
1522 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
1523 | SLIST_FOREACH(omi, &sd->sd_meta_opt, omi_link)for((omi) = ((&sd->sd_meta_opt)->slh_first); (omi) != ((void *)0); (omi) = ((omi)->omi_link.sle_next)) { |
1524 | if (omi->omi_som->som_type != SR_OPT_BOOT0x02) |
1525 | continue; |
1526 | sbm = (struct sr_meta_boot *)omi->omi_som; |
1527 | for (i = 0; i < SR_MAX_BOOT_DISKS16; i++) { |
1528 | if (bcmp(rootduid, sbm->sbm_boot_duid[i], |
1529 | sizeof(rootduid)) == 0) { |
1530 | memcpy(rootduid, sbm->sbm_root_duid,__builtin_memcpy((rootduid), (sbm->sbm_root_duid), (sizeof (rootduid))) |
1531 | sizeof(rootduid))__builtin_memcpy((rootduid), (sbm->sbm_root_duid), (sizeof (rootduid))); |
1532 | DNPRINTF(SR_D_MISC, "%s: root duid " |
1533 | "mapped to %s\n", DEVNAME(sc), |
1534 | duid_format(rootduid)); |
1535 | return; |
1536 | } |
1537 | } |
1538 | } |
1539 | } |
1540 | } |
1541 | |
1542 | int |
1543 | sr_meta_native_probe(struct sr_softc *sc, struct sr_chunk *ch_entry) |
1544 | { |
1545 | struct disklabel label; |
1546 | char *devname; |
1547 | int error, part; |
1548 | u_int64_t size; |
1549 | |
1550 | DNPRINTF(SR_D_META, "%s: sr_meta_native_probe(%s)\n", |
1551 | DEVNAME(sc), ch_entry->src_devname); |
1552 | |
1553 | devname = ch_entry->src_devname; |
1554 | part = DISKPART(ch_entry->src_dev_mm)(((unsigned)((ch_entry->src_dev_mm) & 0xff) | (((ch_entry ->src_dev_mm) & 0xffff0000) >> 8)) % 16); |
1555 | |
1556 | /* get disklabel */ |
1557 | error = VOP_IOCTL(ch_entry->src_vn, DIOCGDINFO((unsigned long)0x40000000 | ((sizeof(struct disklabel) & 0x1fff) << 16) | ((('d')) << 8) | ((101))), (caddr_t)&label, FREAD0x0001, |
1558 | NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
1559 | if (error) { |
1560 | DNPRINTF(SR_D_META, "%s: %s can't obtain disklabel\n", |
1561 | DEVNAME(sc), devname); |
1562 | goto unwind; |
1563 | } |
1564 | memcpy(ch_entry->src_duid, label.d_uid, sizeof(ch_entry->src_duid))__builtin_memcpy((ch_entry->src_duid), (label.d_uid), (sizeof (ch_entry->src_duid))); |
1565 | |
1566 | /* make sure the partition is of the right type */ |
1567 | if (label.d_partitions[part].p_fstype != FS_RAID19) { |
1568 | DNPRINTF(SR_D_META, |
1569 | "%s: %s partition not of type RAID (%d)\n", DEVNAME(sc), |
1570 | devname, |
1571 | label.d_partitions[part].p_fstype); |
1572 | goto unwind; |
1573 | } |
1574 | |
1575 | size = DL_SECTOBLK(&label, DL_GETPSIZE(&label.d_partitions[part]))(((((u_int64_t)(&label.d_partitions[part])->p_sizeh << 32) + (&label.d_partitions[part])->p_size)) * ((& label)->d_secsize / (1 << 9))); |
1576 | if (size <= SR_DATA_OFFSET(16 + (64 + (320 + 128)))) { |
1577 | DNPRINTF(SR_D_META, "%s: %s partition too small\n", DEVNAME(sc), |
1578 | devname); |
1579 | goto unwind; |
1580 | } |
1581 | size -= SR_DATA_OFFSET(16 + (64 + (320 + 128))); |
1582 | if (size > INT64_MAX0x7fffffffffffffffLL) { |
1583 | DNPRINTF(SR_D_META, "%s: %s partition too large\n", DEVNAME(sc), |
1584 | devname); |
1585 | goto unwind; |
1586 | } |
1587 | ch_entry->src_size = size; |
1588 | ch_entry->src_secsize = label.d_secsize; |
1589 | |
1590 | DNPRINTF(SR_D_META, "%s: probe found %s size %lld\n", DEVNAME(sc), |
1591 | devname, (long long)size); |
1592 | |
1593 | return (SR_META_F_NATIVE0); |
1594 | unwind: |
1595 | DNPRINTF(SR_D_META, "%s: invalid device: %s\n", DEVNAME(sc), |
1596 | devname ? devname : "nodev"); |
1597 | return (SR_META_F_INVALID-1); |
1598 | } |
1599 | |
1600 | int |
1601 | sr_meta_native_attach(struct sr_discipline *sd, int force) |
1602 | { |
1603 | struct sr_softc *sc = sd->sd_sc; |
1604 | struct sr_chunk_head *cl = &sd->sd_vol.sv_chunk_list; |
1605 | struct sr_metadata *md = NULL((void *)0); |
1606 | struct sr_chunk *ch_entry, *ch_next; |
1607 | struct sr_uuid uuid; |
1608 | u_int64_t version = 0; |
1609 | int sr, not_sr, rv = 1, d, expected = -1, old_meta = 0; |
1610 | |
1611 | DNPRINTF(SR_D_META, "%s: sr_meta_native_attach\n", DEVNAME(sc)); |
1612 | |
1613 | md = malloc(SR_META_SIZE64 * DEV_BSIZE(1 << 9), M_DEVBUF2, M_ZERO0x0008 | M_NOWAIT0x0002); |
1614 | if (md == NULL((void *)0)) { |
1615 | sr_error(sc, "not enough memory for metadata buffer"); |
1616 | goto bad; |
1617 | } |
1618 | |
1619 | bzero(&uuid, sizeof uuid)__builtin_bzero((&uuid), (sizeof uuid)); |
1620 | |
1621 | sr = not_sr = d = 0; |
1622 | SLIST_FOREACH(ch_entry, cl, src_link)for((ch_entry) = ((cl)->slh_first); (ch_entry) != ((void * )0); (ch_entry) = ((ch_entry)->src_link.sle_next)) { |
1623 | if (ch_entry->src_dev_mm == NODEV(dev_t)(-1)) |
1624 | continue; |
1625 | |
1626 | if (sr_meta_native_read(sd, ch_entry->src_dev_mm, md, NULL((void *)0))) { |
1627 | sr_error(sc, "could not read native metadata"); |
1628 | goto bad; |
1629 | } |
1630 | |
1631 | if (md->ssdi_sdd_invariant.ssd_magic == SR_MAGIC0x4d4152436372616dLLU) { |
1632 | sr++; |
1633 | ch_entry->src_meta.scmi_scm_invariant.scm_chunk_id = |
1634 | md->ssdi_sdd_invariant.ssd_chunk_id; |
1635 | if (d == 0) { |
1636 | memcpy(&uuid, &md->ssdi.ssd_uuid, sizeof uuid)__builtin_memcpy((&uuid), (&md->_sdd_invariant.ssd_uuid ), (sizeof uuid)); |
1637 | expected = md->ssdi_sdd_invariant.ssd_chunk_no; |
1638 | version = md->ssd_ondisk; |
1639 | d++; |
1640 | continue; |
1641 | } else if (bcmp(&md->ssdi_sdd_invariant.ssd_uuid, &uuid, |
1642 | sizeof uuid)) { |
1643 | sr_error(sc, "not part of the same volume"); |
1644 | goto bad; |
1645 | } |
1646 | if (md->ssd_ondisk != version) { |
1647 | old_meta++; |
1648 | version = MAX(md->ssd_ondisk, version)(((md->ssd_ondisk)>(version))?(md->ssd_ondisk):(version )); |
1649 | } |
1650 | } else |
1651 | not_sr++; |
1652 | } |
1653 | |
1654 | if (sr && not_sr && !force) { |
1655 | sr_error(sc, "not all chunks are of the native metadata " |
1656 | "format"); |
1657 | goto bad; |
1658 | } |
1659 | |
1660 | /* mixed metadata versions; mark bad disks offline */ |
1661 | if (old_meta) { |
1662 | d = 0; |
1663 | for (ch_entry = SLIST_FIRST(cl)((cl)->slh_first); ch_entry != NULL((void *)0); |
1664 | ch_entry = ch_next, d++) { |
1665 | ch_next = SLIST_NEXT(ch_entry, src_link)((ch_entry)->src_link.sle_next); |
1666 | |
1667 | /* XXX do we want to read this again? */ |
1668 | if (ch_entry->src_dev_mm == NODEV(dev_t)(-1)) |
1669 | panic("src_dev_mm == NODEV"); |
1670 | if (sr_meta_native_read(sd, ch_entry->src_dev_mm, md, |
1671 | NULL((void *)0))) |
1672 | sr_warn(sc, "could not read native metadata"); |
1673 | if (md->ssd_ondisk != version) |
1674 | sd->sd_vol.sv_chunks[d]->src_meta.scm_status = |
1675 | BIOC_SDOFFLINE0x01; |
1676 | } |
1677 | } |
1678 | |
1679 | if (expected != sr && !force && expected != -1) { |
1680 | DNPRINTF(SR_D_META, "%s: not all chunks were provided, trying " |
1681 | "anyway\n", DEVNAME(sc)); |
1682 | } |
1683 | |
1684 | rv = 0; |
1685 | bad: |
1686 | free(md, M_DEVBUF2, SR_META_SIZE64 * DEV_BSIZE(1 << 9)); |
1687 | return (rv); |
1688 | } |
1689 | |
1690 | int |
1691 | sr_meta_native_read(struct sr_discipline *sd, dev_t dev, |
1692 | struct sr_metadata *md, void *fm) |
1693 | { |
1694 | #ifdef SR_DEBUG |
1695 | struct sr_softc *sc = sd->sd_sc; |
1696 | #endif |
1697 | DNPRINTF(SR_D_META, "%s: sr_meta_native_read(0x%x, %p)\n", |
1698 | DEVNAME(sc), dev, md); |
1699 | |
1700 | return (sr_meta_rw(sd, dev, md, B_READ0x00008000)); |
1701 | } |
1702 | |
1703 | int |
1704 | sr_meta_native_write(struct sr_discipline *sd, dev_t dev, |
1705 | struct sr_metadata *md, void *fm) |
1706 | { |
1707 | #ifdef SR_DEBUG |
1708 | struct sr_softc *sc = sd->sd_sc; |
1709 | #endif |
1710 | DNPRINTF(SR_D_META, "%s: sr_meta_native_write(0x%x, %p)\n", |
1711 | DEVNAME(sc), dev, md); |
1712 | |
1713 | return (sr_meta_rw(sd, dev, md, B_WRITE0x00000000)); |
1714 | } |
1715 | |
1716 | void |
1717 | sr_hotplug_register(struct sr_discipline *sd, void *func) |
1718 | { |
1719 | struct sr_hotplug_list *mhe; |
1720 | |
1721 | DNPRINTF(SR_D_MISC, "%s: sr_hotplug_register: %p\n", |
1722 | DEVNAME(sd->sd_sc), func); |
1723 | |
1724 | /* make sure we aren't on the list yet */ |
1725 | SLIST_FOREACH(mhe, &sr_hotplug_callbacks, shl_link)for((mhe) = ((&sr_hotplug_callbacks)->slh_first); (mhe ) != ((void *)0); (mhe) = ((mhe)->shl_link.sle_next)) |
1726 | if (mhe->sh_hotplug == func) |
1727 | return; |
1728 | |
1729 | mhe = malloc(sizeof(struct sr_hotplug_list), M_DEVBUF2, |
1730 | M_WAITOK0x0001 | M_ZERO0x0008); |
1731 | mhe->sh_hotplug = func; |
1732 | mhe->sh_sd = sd; |
1733 | SLIST_INSERT_HEAD(&sr_hotplug_callbacks, mhe, shl_link)do { (mhe)->shl_link.sle_next = (&sr_hotplug_callbacks )->slh_first; (&sr_hotplug_callbacks)->slh_first = ( mhe); } while (0); |
1734 | } |
1735 | |
1736 | void |
1737 | sr_hotplug_unregister(struct sr_discipline *sd, void *func) |
1738 | { |
1739 | struct sr_hotplug_list *mhe; |
1740 | |
1741 | DNPRINTF(SR_D_MISC, "%s: sr_hotplug_unregister: %s %p\n", |
1742 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, func); |
1743 | |
1744 | /* make sure we are on the list yet */ |
1745 | SLIST_FOREACH(mhe, &sr_hotplug_callbacks, shl_link)for((mhe) = ((&sr_hotplug_callbacks)->slh_first); (mhe ) != ((void *)0); (mhe) = ((mhe)->shl_link.sle_next)) { |
1746 | if (mhe->sh_hotplug == func) |
1747 | break; |
1748 | } |
1749 | if (mhe != NULL((void *)0)) { |
1750 | SLIST_REMOVE(&sr_hotplug_callbacks, mhe,do { if ((&sr_hotplug_callbacks)->slh_first == (mhe)) { do { ((&sr_hotplug_callbacks))->slh_first = ((&sr_hotplug_callbacks ))->slh_first->shl_link.sle_next; } while (0); } else { struct sr_hotplug_list *curelm = (&sr_hotplug_callbacks) ->slh_first; while (curelm->shl_link.sle_next != (mhe)) curelm = curelm->shl_link.sle_next; curelm->shl_link.sle_next = curelm->shl_link.sle_next->shl_link.sle_next; } ((mhe )->shl_link.sle_next) = ((void *)-1); } while (0) |
1751 | sr_hotplug_list, shl_link)do { if ((&sr_hotplug_callbacks)->slh_first == (mhe)) { do { ((&sr_hotplug_callbacks))->slh_first = ((&sr_hotplug_callbacks ))->slh_first->shl_link.sle_next; } while (0); } else { struct sr_hotplug_list *curelm = (&sr_hotplug_callbacks) ->slh_first; while (curelm->shl_link.sle_next != (mhe)) curelm = curelm->shl_link.sle_next; curelm->shl_link.sle_next = curelm->shl_link.sle_next->shl_link.sle_next; } ((mhe )->shl_link.sle_next) = ((void *)-1); } while (0); |
1752 | free(mhe, M_DEVBUF2, sizeof(*mhe)); |
1753 | } |
1754 | } |
1755 | |
1756 | void |
1757 | sr_disk_attach(struct disk *diskp, int action) |
1758 | { |
1759 | struct sr_hotplug_list *mhe; |
1760 | |
1761 | SLIST_FOREACH(mhe, &sr_hotplug_callbacks, shl_link)for((mhe) = ((&sr_hotplug_callbacks)->slh_first); (mhe ) != ((void *)0); (mhe) = ((mhe)->shl_link.sle_next)) |
1762 | if (mhe->sh_sd->sd_ready) |
1763 | mhe->sh_hotplug(mhe->sh_sd, diskp, action); |
1764 | } |
1765 | |
1766 | int |
1767 | sr_match(struct device *parent, void *match, void *aux) |
1768 | { |
1769 | return (1); |
1770 | } |
1771 | |
1772 | void |
1773 | sr_attach(struct device *parent, struct device *self, void *aux) |
1774 | { |
1775 | struct sr_softc *sc = (void *)self; |
1776 | struct scsibus_attach_args saa; |
1777 | |
1778 | DNPRINTF(SR_D_MISC, "\n%s: sr_attach", DEVNAME(sc)); |
1779 | |
1780 | if (softraid0 == NULL((void *)0)) |
1781 | softraid0 = sc; |
1782 | |
1783 | rw_init(&sc->sc_lock, "sr_lock")_rw_init_flags(&sc->sc_lock, "sr_lock", 0, ((void *)0) ); |
1784 | rw_init(&sc->sc_hs_lock, "sr_hs_lock")_rw_init_flags(&sc->sc_hs_lock, "sr_hs_lock", 0, ((void *)0)); |
1785 | |
1786 | SLIST_INIT(&sr_hotplug_callbacks){ ((&sr_hotplug_callbacks)->slh_first) = ((void *)0); }; |
1787 | TAILQ_INIT(&sc->sc_dis_list)do { (&sc->sc_dis_list)->tqh_first = ((void *)0); ( &sc->sc_dis_list)->tqh_last = &(&sc->sc_dis_list )->tqh_first; } while (0); |
1788 | SLIST_INIT(&sc->sc_hotspare_list){ ((&sc->sc_hotspare_list)->slh_first) = ((void *)0 ); }; |
1789 | |
1790 | #if NBIO1 > 0 |
1791 | if (bio_register(&sc->sc_dev, sr_bio_ioctl) != 0) |
1792 | printf("%s: controller registration failed", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
1793 | #endif /* NBIO > 0 */ |
1794 | |
1795 | #ifndef SMALL_KERNEL |
1796 | strlcpy(sc->sc_sensordev.xname, DEVNAME(sc)((sc)->sc_dev.dv_xname), |
1797 | sizeof(sc->sc_sensordev.xname)); |
1798 | sensordev_install(&sc->sc_sensordev); |
1799 | #endif /* SMALL_KERNEL */ |
1800 | |
1801 | printf("\n"); |
1802 | |
1803 | saa.saa_adapter_softc = sc; |
1804 | saa.saa_adapter = &sr_switch; |
1805 | saa.saa_adapter_target = SDEV_NO_ADAPTER_TARGET0xffff; |
1806 | saa.saa_adapter_buswidth = SR_MAX_LD256; |
1807 | saa.saa_luns = 1; |
1808 | saa.saa_openings = 0; |
1809 | saa.saa_pool = NULL((void *)0); |
1810 | saa.saa_quirks = saa.saa_flags = 0; |
1811 | saa.saa_wwpn = saa.saa_wwnn = 0; |
1812 | |
1813 | sc->sc_scsibus = (struct scsibus_softc *)config_found(&sc->sc_dev, &saa,config_found_sm((&sc->sc_dev), (&saa), (scsiprint) , ((void *)0)) |
1814 | scsiprint)config_found_sm((&sc->sc_dev), (&saa), (scsiprint) , ((void *)0)); |
1815 | |
1816 | softraid_disk_attach = sr_disk_attach; |
1817 | |
1818 | sr_boot_assembly(sc); |
1819 | |
1820 | explicit_bzero(sr_bootkey, sizeof(sr_bootkey)); |
1821 | } |
1822 | |
1823 | int |
1824 | sr_detach(struct device *self, int flags) |
1825 | { |
1826 | struct sr_softc *sc = (void *)self; |
1827 | int rv; |
1828 | |
1829 | DNPRINTF(SR_D_MISC, "%s: sr_detach\n", DEVNAME(sc)); |
1830 | |
1831 | softraid_disk_attach = NULL((void *)0); |
1832 | |
1833 | sr_shutdown(0); |
1834 | |
1835 | #ifndef SMALL_KERNEL |
1836 | if (sc->sc_sensor_task != NULL((void *)0)) |
1837 | sensor_task_unregister(sc->sc_sensor_task); |
1838 | sensordev_deinstall(&sc->sc_sensordev); |
1839 | #endif /* SMALL_KERNEL */ |
1840 | |
1841 | if (sc->sc_scsibus != NULL((void *)0)) { |
1842 | rv = config_detach((struct device *)sc->sc_scsibus, flags); |
1843 | if (rv != 0) |
1844 | return (rv); |
1845 | sc->sc_scsibus = NULL((void *)0); |
1846 | } |
1847 | |
1848 | return (0); |
1849 | } |
1850 | |
1851 | void |
1852 | sr_info(struct sr_softc *sc, const char *fmt, ...) |
1853 | { |
1854 | va_list ap; |
1855 | |
1856 | rw_assert_wrlock(&sc->sc_lock); |
1857 | |
1858 | va_start(ap, fmt)__builtin_va_start((ap), fmt); |
1859 | bio_status(&sc->sc_status, 0, BIO_MSG_INFO1, fmt, &ap); |
1860 | va_end(ap)__builtin_va_end((ap)); |
1861 | } |
1862 | |
1863 | void |
1864 | sr_warn(struct sr_softc *sc, const char *fmt, ...) |
1865 | { |
1866 | va_list ap; |
1867 | |
1868 | rw_assert_wrlock(&sc->sc_lock); |
1869 | |
1870 | va_start(ap, fmt)__builtin_va_start((ap), fmt); |
1871 | bio_status(&sc->sc_status, 1, BIO_MSG_WARN2, fmt, &ap); |
1872 | va_end(ap)__builtin_va_end((ap)); |
1873 | } |
1874 | |
1875 | void |
1876 | sr_error(struct sr_softc *sc, const char *fmt, ...) |
1877 | { |
1878 | va_list ap; |
1879 | |
1880 | rw_assert_wrlock(&sc->sc_lock); |
1881 | |
1882 | va_start(ap, fmt)__builtin_va_start((ap), fmt); |
1883 | bio_status(&sc->sc_status, 1, BIO_MSG_ERROR3, fmt, &ap); |
1884 | va_end(ap)__builtin_va_end((ap)); |
1885 | } |
1886 | |
1887 | int |
1888 | sr_ccb_alloc(struct sr_discipline *sd) |
1889 | { |
1890 | struct sr_ccb *ccb; |
1891 | int i; |
1892 | |
1893 | if (!sd) |
1894 | return (1); |
1895 | |
1896 | DNPRINTF(SR_D_CCB, "%s: sr_ccb_alloc\n", DEVNAME(sd->sd_sc)); |
1897 | |
1898 | if (sd->sd_ccb) |
1899 | return (1); |
1900 | |
1901 | sd->sd_ccb = mallocarray(sd->sd_max_wu, |
1902 | sd->sd_max_ccb_per_wu * sizeof(struct sr_ccb), |
1903 | M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
1904 | TAILQ_INIT(&sd->sd_ccb_freeq)do { (&sd->sd_ccb_freeq)->tqh_first = ((void *)0); ( &sd->sd_ccb_freeq)->tqh_last = &(&sd->sd_ccb_freeq )->tqh_first; } while (0); |
1905 | for (i = 0; i < sd->sd_max_wu * sd->sd_max_ccb_per_wu; i++) { |
1906 | ccb = &sd->sd_ccb[i]; |
1907 | ccb->ccb_dis = sd; |
1908 | sr_ccb_put(ccb); |
1909 | } |
1910 | |
1911 | DNPRINTF(SR_D_CCB, "%s: sr_ccb_alloc ccb: %d\n", |
1912 | DEVNAME(sd->sd_sc), sd->sd_max_wu * sd->sd_max_ccb_per_wu); |
1913 | |
1914 | return (0); |
1915 | } |
1916 | |
1917 | void |
1918 | sr_ccb_free(struct sr_discipline *sd) |
1919 | { |
1920 | struct sr_ccb *ccb; |
1921 | |
1922 | if (!sd) |
1923 | return; |
1924 | |
1925 | DNPRINTF(SR_D_CCB, "%s: sr_ccb_free %p\n", DEVNAME(sd->sd_sc), sd); |
1926 | |
1927 | while ((ccb = TAILQ_FIRST(&sd->sd_ccb_freeq)((&sd->sd_ccb_freeq)->tqh_first)) != NULL((void *)0)) |
1928 | TAILQ_REMOVE(&sd->sd_ccb_freeq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)-> ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link. tqe_prev; else (&sd->sd_ccb_freeq)->tqh_last = (ccb )->ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb) ->ccb_link.tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb)->ccb_link.tqe_next) = ((void *)-1); } while (0); |
1929 | |
1930 | free(sd->sd_ccb, M_DEVBUF2, sd->sd_max_wu * sd->sd_max_ccb_per_wu * |
1931 | sizeof(struct sr_ccb)); |
1932 | } |
1933 | |
1934 | struct sr_ccb * |
1935 | sr_ccb_get(struct sr_discipline *sd) |
1936 | { |
1937 | struct sr_ccb *ccb; |
1938 | int s; |
1939 | |
1940 | s = splbio()splraise(0x6); |
1941 | |
1942 | ccb = TAILQ_FIRST(&sd->sd_ccb_freeq)((&sd->sd_ccb_freeq)->tqh_first); |
1943 | if (ccb) { |
1944 | TAILQ_REMOVE(&sd->sd_ccb_freeq, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)-> ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link. tqe_prev; else (&sd->sd_ccb_freeq)->tqh_last = (ccb )->ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb) ->ccb_link.tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb)->ccb_link.tqe_next) = ((void *)-1); } while (0); |
1945 | ccb->ccb_state = SR_CCB_INPROGRESS1; |
1946 | } |
1947 | |
1948 | splx(s)spllower(s); |
1949 | |
1950 | DNPRINTF(SR_D_CCB, "%s: sr_ccb_get: %p\n", DEVNAME(sd->sd_sc), |
1951 | ccb); |
1952 | |
1953 | return (ccb); |
1954 | } |
1955 | |
1956 | void |
1957 | sr_ccb_put(struct sr_ccb *ccb) |
1958 | { |
1959 | struct sr_discipline *sd = ccb->ccb_dis; |
1960 | int s; |
1961 | |
1962 | DNPRINTF(SR_D_CCB, "%s: sr_ccb_put: %p\n", DEVNAME(sd->sd_sc), |
1963 | ccb); |
1964 | |
1965 | s = splbio()splraise(0x6); |
1966 | |
1967 | ccb->ccb_wu = NULL((void *)0); |
1968 | ccb->ccb_state = SR_CCB_FREE0; |
1969 | ccb->ccb_target = -1; |
1970 | ccb->ccb_opaque = NULL((void *)0); |
1971 | |
1972 | TAILQ_INSERT_TAIL(&sd->sd_ccb_freeq, ccb, ccb_link)do { (ccb)->ccb_link.tqe_next = ((void *)0); (ccb)->ccb_link .tqe_prev = (&sd->sd_ccb_freeq)->tqh_last; *(&sd ->sd_ccb_freeq)->tqh_last = (ccb); (&sd->sd_ccb_freeq )->tqh_last = &(ccb)->ccb_link.tqe_next; } while (0 ); |
1973 | |
1974 | splx(s)spllower(s); |
1975 | } |
1976 | |
1977 | struct sr_ccb * |
1978 | sr_ccb_rw(struct sr_discipline *sd, int chunk, daddr_t blkno, |
1979 | long len, u_int8_t *data, int xsflags, int ccbflags) |
1980 | { |
1981 | struct sr_chunk *sc = sd->sd_vol.sv_chunks[chunk]; |
1982 | struct sr_ccb *ccb = NULL((void *)0); |
1983 | |
1984 | ccb = sr_ccb_get(sd); |
1985 | if (ccb == NULL((void *)0)) |
1986 | goto out; |
1987 | |
1988 | ccb->ccb_flags = ccbflags; |
1989 | ccb->ccb_target = chunk; |
1990 | |
1991 | ccb->ccb_buf.b_flags = B_PHYS0x00002000 | B_CALL0x00000040; |
1992 | if (ISSET(xsflags, SCSI_DATA_IN)((xsflags) & (0x00800))) |
1993 | ccb->ccb_buf.b_flags |= B_READ0x00008000; |
1994 | else |
1995 | ccb->ccb_buf.b_flags |= B_WRITE0x00000000; |
1996 | |
1997 | ccb->ccb_buf.b_blkno = blkno + sd->sd_meta->ssd_data_blkno; |
1998 | ccb->ccb_buf.b_bcount = len; |
1999 | ccb->ccb_buf.b_bufsize = len; |
2000 | ccb->ccb_buf.b_resid = len; |
2001 | ccb->ccb_buf.b_data = data; |
2002 | ccb->ccb_buf.b_error = 0; |
2003 | ccb->ccb_buf.b_iodone = sd->sd_scsi_intr; |
2004 | ccb->ccb_buf.b_proc = curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc; |
2005 | ccb->ccb_buf.b_dev = sc->src_dev_mm; |
2006 | ccb->ccb_buf.b_vp = sc->src_vn; |
2007 | ccb->ccb_buf.b_bq = NULL((void *)0); |
2008 | |
2009 | if (!ISSET(ccb->ccb_buf.b_flags, B_READ)((ccb->ccb_buf.b_flags) & (0x00008000))) |
2010 | ccb->ccb_buf.b_vp->v_numoutput++; |
2011 | |
2012 | LIST_INIT(&ccb->ccb_buf.b_dep)do { ((&ccb->ccb_buf.b_dep)->lh_first) = ((void *)0 ); } while (0); |
2013 | |
2014 | DNPRINTF(SR_D_DIS, "%s: %s %s ccb " |
2015 | "b_bcount %ld b_blkno %lld b_flags 0x%0lx b_data %p\n", |
2016 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, sd->sd_name, |
2017 | ccb->ccb_buf.b_bcount, (long long)ccb->ccb_buf.b_blkno, |
2018 | ccb->ccb_buf.b_flags, ccb->ccb_buf.b_data); |
2019 | |
2020 | out: |
2021 | return ccb; |
2022 | } |
2023 | |
2024 | void |
2025 | sr_ccb_done(struct sr_ccb *ccb) |
2026 | { |
2027 | struct sr_workunit *wu = ccb->ccb_wu; |
2028 | struct sr_discipline *sd = wu->swu_dis; |
2029 | struct sr_softc *sc = sd->sd_sc; |
2030 | |
2031 | DNPRINTF(SR_D_INTR, "%s: %s %s ccb done b_bcount %ld b_resid %zu" |
2032 | " b_flags 0x%0lx block %lld target %d\n", |
2033 | DEVNAME(sc), sd->sd_meta->ssd_devname, sd->sd_name, |
2034 | ccb->ccb_buf.b_bcount, ccb->ccb_buf.b_resid, ccb->ccb_buf.b_flags, |
2035 | (long long)ccb->ccb_buf.b_blkno, ccb->ccb_target); |
2036 | |
2037 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
2038 | |
2039 | if (ccb->ccb_target == -1) |
2040 | panic("%s: invalid target on wu: %p", DEVNAME(sc)((sc)->sc_dev.dv_xname), wu); |
2041 | |
2042 | if (ccb->ccb_buf.b_flags & B_ERROR0x00000400) { |
2043 | DNPRINTF(SR_D_INTR, "%s: i/o error on block %lld target %d\n", |
2044 | DEVNAME(sc), (long long)ccb->ccb_buf.b_blkno, |
2045 | ccb->ccb_target); |
2046 | if (ISSET(sd->sd_capabilities, SR_CAP_REDUNDANT)((sd->sd_capabilities) & (0x00000010))) |
2047 | sd->sd_set_chunk_state(sd, ccb->ccb_target, |
2048 | BIOC_SDOFFLINE0x01); |
2049 | else |
2050 | printf("%s: %s: i/o error %d @ %s block %lld\n", |
2051 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sd->sd_meta->ssd_devname, |
2052 | ccb->ccb_buf.b_error, sd->sd_name, |
2053 | (long long)ccb->ccb_buf.b_blkno); |
2054 | ccb->ccb_state = SR_CCB_FAILED3; |
2055 | wu->swu_ios_failed++; |
2056 | } else { |
2057 | ccb->ccb_state = SR_CCB_OK2; |
2058 | wu->swu_ios_succeeded++; |
2059 | } |
2060 | |
2061 | wu->swu_ios_complete++; |
2062 | } |
2063 | |
2064 | int |
2065 | sr_wu_alloc(struct sr_discipline *sd) |
2066 | { |
2067 | struct sr_workunit *wu; |
2068 | int i, no_wu; |
2069 | |
2070 | DNPRINTF(SR_D_WU, "%s: sr_wu_alloc %p %d\n", DEVNAME(sd->sd_sc), |
2071 | sd, sd->sd_max_wu); |
2072 | |
2073 | no_wu = sd->sd_max_wu; |
2074 | sd->sd_wu_pending = no_wu; |
2075 | |
2076 | mtx_init(&sd->sd_wu_mtx, IPL_BIO)do { (void)(((void *)0)); (void)(0); __mtx_init((&sd-> sd_wu_mtx), ((((0x6)) > 0x0 && ((0x6)) < 0x9) ? 0x9 : ((0x6)))); } while (0); |
2077 | TAILQ_INIT(&sd->sd_wu)do { (&sd->sd_wu)->tqh_first = ((void *)0); (&sd ->sd_wu)->tqh_last = &(&sd->sd_wu)->tqh_first ; } while (0); |
2078 | TAILQ_INIT(&sd->sd_wu_freeq)do { (&sd->sd_wu_freeq)->tqh_first = ((void *)0); ( &sd->sd_wu_freeq)->tqh_last = &(&sd->sd_wu_freeq )->tqh_first; } while (0); |
2079 | TAILQ_INIT(&sd->sd_wu_pendq)do { (&sd->sd_wu_pendq)->tqh_first = ((void *)0); ( &sd->sd_wu_pendq)->tqh_last = &(&sd->sd_wu_pendq )->tqh_first; } while (0); |
2080 | TAILQ_INIT(&sd->sd_wu_defq)do { (&sd->sd_wu_defq)->tqh_first = ((void *)0); (& sd->sd_wu_defq)->tqh_last = &(&sd->sd_wu_defq )->tqh_first; } while (0); |
2081 | |
2082 | for (i = 0; i < no_wu; i++) { |
2083 | wu = malloc(sd->sd_wu_size, M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
2084 | TAILQ_INSERT_TAIL(&sd->sd_wu, wu, swu_next)do { (wu)->swu_next.tqe_next = ((void *)0); (wu)->swu_next .tqe_prev = (&sd->sd_wu)->tqh_last; *(&sd->sd_wu )->tqh_last = (wu); (&sd->sd_wu)->tqh_last = & (wu)->swu_next.tqe_next; } while (0); |
2085 | TAILQ_INIT(&wu->swu_ccb)do { (&wu->swu_ccb)->tqh_first = ((void *)0); (& wu->swu_ccb)->tqh_last = &(&wu->swu_ccb)-> tqh_first; } while (0); |
2086 | wu->swu_dis = sd; |
2087 | task_set(&wu->swu_task, sr_wu_done_callback, wu); |
2088 | sr_wu_put(sd, wu); |
2089 | } |
2090 | |
2091 | return (0); |
2092 | } |
2093 | |
2094 | void |
2095 | sr_wu_free(struct sr_discipline *sd) |
2096 | { |
2097 | struct sr_workunit *wu; |
2098 | |
2099 | DNPRINTF(SR_D_WU, "%s: sr_wu_free %p\n", DEVNAME(sd->sd_sc), sd); |
2100 | |
2101 | while ((wu = TAILQ_FIRST(&sd->sd_wu_freeq)((&sd->sd_wu_freeq)->tqh_first)) != NULL((void *)0)) |
2102 | TAILQ_REMOVE(&sd->sd_wu_freeq, wu, swu_link)do { if (((wu)->swu_link.tqe_next) != ((void *)0)) (wu)-> swu_link.tqe_next->swu_link.tqe_prev = (wu)->swu_link.tqe_prev ; else (&sd->sd_wu_freeq)->tqh_last = (wu)->swu_link .tqe_prev; *(wu)->swu_link.tqe_prev = (wu)->swu_link.tqe_next ; ((wu)->swu_link.tqe_prev) = ((void *)-1); ((wu)->swu_link .tqe_next) = ((void *)-1); } while (0); |
2103 | while ((wu = TAILQ_FIRST(&sd->sd_wu_pendq)((&sd->sd_wu_pendq)->tqh_first)) != NULL((void *)0)) |
2104 | TAILQ_REMOVE(&sd->sd_wu_pendq, wu, swu_link)do { if (((wu)->swu_link.tqe_next) != ((void *)0)) (wu)-> swu_link.tqe_next->swu_link.tqe_prev = (wu)->swu_link.tqe_prev ; else (&sd->sd_wu_pendq)->tqh_last = (wu)->swu_link .tqe_prev; *(wu)->swu_link.tqe_prev = (wu)->swu_link.tqe_next ; ((wu)->swu_link.tqe_prev) = ((void *)-1); ((wu)->swu_link .tqe_next) = ((void *)-1); } while (0); |
2105 | while ((wu = TAILQ_FIRST(&sd->sd_wu_defq)((&sd->sd_wu_defq)->tqh_first)) != NULL((void *)0)) |
2106 | TAILQ_REMOVE(&sd->sd_wu_defq, wu, swu_link)do { if (((wu)->swu_link.tqe_next) != ((void *)0)) (wu)-> swu_link.tqe_next->swu_link.tqe_prev = (wu)->swu_link.tqe_prev ; else (&sd->sd_wu_defq)->tqh_last = (wu)->swu_link .tqe_prev; *(wu)->swu_link.tqe_prev = (wu)->swu_link.tqe_next ; ((wu)->swu_link.tqe_prev) = ((void *)-1); ((wu)->swu_link .tqe_next) = ((void *)-1); } while (0); |
2107 | |
2108 | while ((wu = TAILQ_FIRST(&sd->sd_wu)((&sd->sd_wu)->tqh_first)) != NULL((void *)0)) { |
2109 | TAILQ_REMOVE(&sd->sd_wu, wu, swu_next)do { if (((wu)->swu_next.tqe_next) != ((void *)0)) (wu)-> swu_next.tqe_next->swu_next.tqe_prev = (wu)->swu_next.tqe_prev ; else (&sd->sd_wu)->tqh_last = (wu)->swu_next.tqe_prev ; *(wu)->swu_next.tqe_prev = (wu)->swu_next.tqe_next; ( (wu)->swu_next.tqe_prev) = ((void *)-1); ((wu)->swu_next .tqe_next) = ((void *)-1); } while (0); |
2110 | free(wu, M_DEVBUF2, sd->sd_wu_size); |
2111 | } |
2112 | } |
2113 | |
2114 | void * |
2115 | sr_wu_get(void *xsd) |
2116 | { |
2117 | struct sr_discipline *sd = (struct sr_discipline *)xsd; |
2118 | struct sr_workunit *wu; |
2119 | |
2120 | mtx_enter(&sd->sd_wu_mtx); |
2121 | wu = TAILQ_FIRST(&sd->sd_wu_freeq)((&sd->sd_wu_freeq)->tqh_first); |
2122 | if (wu) { |
2123 | TAILQ_REMOVE(&sd->sd_wu_freeq, wu, swu_link)do { if (((wu)->swu_link.tqe_next) != ((void *)0)) (wu)-> swu_link.tqe_next->swu_link.tqe_prev = (wu)->swu_link.tqe_prev ; else (&sd->sd_wu_freeq)->tqh_last = (wu)->swu_link .tqe_prev; *(wu)->swu_link.tqe_prev = (wu)->swu_link.tqe_next ; ((wu)->swu_link.tqe_prev) = ((void *)-1); ((wu)->swu_link .tqe_next) = ((void *)-1); } while (0); |
2124 | sd->sd_wu_pending++; |
2125 | } |
2126 | mtx_leave(&sd->sd_wu_mtx); |
2127 | |
2128 | DNPRINTF(SR_D_WU, "%s: sr_wu_get: %p\n", DEVNAME(sd->sd_sc), wu); |
2129 | |
2130 | return (wu); |
2131 | } |
2132 | |
2133 | void |
2134 | sr_wu_put(void *xsd, void *xwu) |
2135 | { |
2136 | struct sr_discipline *sd = (struct sr_discipline *)xsd; |
2137 | struct sr_workunit *wu = (struct sr_workunit *)xwu; |
2138 | |
2139 | DNPRINTF(SR_D_WU, "%s: sr_wu_put: %p\n", DEVNAME(sd->sd_sc), wu); |
2140 | |
2141 | sr_wu_release_ccbs(wu); |
2142 | sr_wu_init(sd, wu); |
2143 | |
2144 | mtx_enter(&sd->sd_wu_mtx); |
2145 | TAILQ_INSERT_TAIL(&sd->sd_wu_freeq, wu, swu_link)do { (wu)->swu_link.tqe_next = ((void *)0); (wu)->swu_link .tqe_prev = (&sd->sd_wu_freeq)->tqh_last; *(&sd ->sd_wu_freeq)->tqh_last = (wu); (&sd->sd_wu_freeq )->tqh_last = &(wu)->swu_link.tqe_next; } while (0); |
2146 | sd->sd_wu_pending--; |
2147 | mtx_leave(&sd->sd_wu_mtx); |
2148 | } |
2149 | |
2150 | void |
2151 | sr_wu_init(struct sr_discipline *sd, struct sr_workunit *wu) |
2152 | { |
2153 | int s; |
2154 | |
2155 | s = splbio()splraise(0x6); |
2156 | if (wu->swu_cb_active == 1) |
2157 | panic("%s: sr_wu_init got active wu", DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname)); |
2158 | splx(s)spllower(s); |
2159 | |
2160 | wu->swu_xs = NULL((void *)0); |
2161 | wu->swu_state = SR_WU_FREE0; |
2162 | wu->swu_flags = 0; |
2163 | wu->swu_blk_start = 0; |
2164 | wu->swu_blk_end = 0; |
2165 | wu->swu_collider = NULL((void *)0); |
2166 | } |
2167 | |
2168 | void |
2169 | sr_wu_enqueue_ccb(struct sr_workunit *wu, struct sr_ccb *ccb) |
2170 | { |
2171 | struct sr_discipline *sd = wu->swu_dis; |
2172 | int s; |
2173 | |
2174 | s = splbio()splraise(0x6); |
2175 | if (wu->swu_cb_active == 1) |
2176 | panic("%s: sr_wu_enqueue_ccb got active wu", |
2177 | DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname)); |
2178 | ccb->ccb_wu = wu; |
2179 | wu->swu_io_count++; |
2180 | TAILQ_INSERT_TAIL(&wu->swu_ccb, ccb, ccb_link)do { (ccb)->ccb_link.tqe_next = ((void *)0); (ccb)->ccb_link .tqe_prev = (&wu->swu_ccb)->tqh_last; *(&wu-> swu_ccb)->tqh_last = (ccb); (&wu->swu_ccb)->tqh_last = &(ccb)->ccb_link.tqe_next; } while (0); |
2181 | splx(s)spllower(s); |
2182 | } |
2183 | |
2184 | void |
2185 | sr_wu_release_ccbs(struct sr_workunit *wu) |
2186 | { |
2187 | struct sr_ccb *ccb; |
2188 | |
2189 | /* Return all ccbs that are associated with this workunit. */ |
2190 | while ((ccb = TAILQ_FIRST(&wu->swu_ccb)((&wu->swu_ccb)->tqh_first)) != NULL((void *)0)) { |
2191 | TAILQ_REMOVE(&wu->swu_ccb, ccb, ccb_link)do { if (((ccb)->ccb_link.tqe_next) != ((void *)0)) (ccb)-> ccb_link.tqe_next->ccb_link.tqe_prev = (ccb)->ccb_link. tqe_prev; else (&wu->swu_ccb)->tqh_last = (ccb)-> ccb_link.tqe_prev; *(ccb)->ccb_link.tqe_prev = (ccb)->ccb_link .tqe_next; ((ccb)->ccb_link.tqe_prev) = ((void *)-1); ((ccb )->ccb_link.tqe_next) = ((void *)-1); } while (0); |
2192 | sr_ccb_put(ccb); |
2193 | } |
2194 | |
2195 | wu->swu_io_count = 0; |
2196 | wu->swu_ios_complete = 0; |
2197 | wu->swu_ios_failed = 0; |
2198 | wu->swu_ios_succeeded = 0; |
2199 | } |
2200 | |
2201 | void |
2202 | sr_wu_done(struct sr_workunit *wu) |
2203 | { |
2204 | struct sr_discipline *sd = wu->swu_dis; |
2205 | |
2206 | DNPRINTF(SR_D_INTR, "%s: sr_wu_done count %d completed %d failed %d\n", |
2207 | DEVNAME(sd->sd_sc), wu->swu_io_count, wu->swu_ios_complete, |
2208 | wu->swu_ios_failed); |
2209 | |
2210 | if (wu->swu_ios_complete < wu->swu_io_count) |
2211 | return; |
2212 | |
2213 | task_add(sd->sd_taskq, &wu->swu_task); |
2214 | } |
2215 | |
2216 | void |
2217 | sr_wu_done_callback(void *xwu) |
2218 | { |
2219 | struct sr_workunit *wu = xwu; |
2220 | struct sr_discipline *sd = wu->swu_dis; |
2221 | struct scsi_xfer *xs = wu->swu_xs; |
2222 | struct sr_workunit *wup; |
2223 | int s; |
2224 | |
2225 | /* |
2226 | * The SR_WUF_DISCIPLINE or SR_WUF_REBUILD flag must be set if |
2227 | * the work unit is not associated with a scsi_xfer. |
2228 | */ |
2229 | KASSERT(xs != NULL ||((xs != ((void *)0) || (wu->swu_flags & ((1<<5)| (1<<0)))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/softraid.c" , 2230, "xs != NULL || (wu->swu_flags & (SR_WUF_DISCIPLINE|SR_WUF_REBUILD))" )) |
2230 | (wu->swu_flags & (SR_WUF_DISCIPLINE|SR_WUF_REBUILD)))((xs != ((void *)0) || (wu->swu_flags & ((1<<5)| (1<<0)))) ? (void)0 : __assert("diagnostic ", "/usr/src/sys/dev/softraid.c" , 2230, "xs != NULL || (wu->swu_flags & (SR_WUF_DISCIPLINE|SR_WUF_REBUILD))" )); |
2231 | |
2232 | s = splbio()splraise(0x6); |
2233 | |
2234 | if (xs != NULL((void *)0)) { |
2235 | if (wu->swu_ios_failed) |
2236 | xs->error = XS_DRIVER_STUFFUP2; |
2237 | else |
2238 | xs->error = XS_NOERROR0; |
2239 | } |
2240 | |
2241 | if (sd->sd_scsi_wu_done) { |
2242 | if (sd->sd_scsi_wu_done(wu) == SR_WU_RESTART7) |
2243 | goto done; |
2244 | } |
2245 | |
2246 | /* Remove work unit from pending queue. */ |
2247 | TAILQ_FOREACH(wup, &sd->sd_wu_pendq, swu_link)for((wup) = ((&sd->sd_wu_pendq)->tqh_first); (wup) != ((void *)0); (wup) = ((wup)->swu_link.tqe_next)) |
2248 | if (wup == wu) |
2249 | break; |
2250 | if (wup == NULL((void *)0)) |
2251 | panic("%s: wu %p not on pending queue", |
2252 | DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname), wu); |
2253 | TAILQ_REMOVE(&sd->sd_wu_pendq, wu, swu_link)do { if (((wu)->swu_link.tqe_next) != ((void *)0)) (wu)-> swu_link.tqe_next->swu_link.tqe_prev = (wu)->swu_link.tqe_prev ; else (&sd->sd_wu_pendq)->tqh_last = (wu)->swu_link .tqe_prev; *(wu)->swu_link.tqe_prev = (wu)->swu_link.tqe_next ; ((wu)->swu_link.tqe_prev) = ((void *)-1); ((wu)->swu_link .tqe_next) = ((void *)-1); } while (0); |
2254 | |
2255 | if (wu->swu_collider) { |
2256 | if (wu->swu_ios_failed) |
2257 | sr_raid_recreate_wu(wu->swu_collider); |
2258 | |
2259 | /* XXX Should the collider be failed if this xs failed? */ |
2260 | sr_raid_startwu(wu->swu_collider); |
2261 | } |
2262 | |
2263 | /* |
2264 | * If a discipline provides its own sd_scsi_done function, then it |
2265 | * is responsible for calling sr_scsi_done() once I/O is complete. |
2266 | */ |
2267 | if (wu->swu_flags & SR_WUF_REBUILD(1<<0)) |
2268 | wu->swu_flags |= SR_WUF_REBUILDIOCOMP(1<<1); |
2269 | if (wu->swu_flags & SR_WUF_WAKEUP(1<<4)) |
2270 | wakeup(wu); |
2271 | if (sd->sd_scsi_done) |
2272 | sd->sd_scsi_done(wu); |
2273 | else if (wu->swu_flags & SR_WUF_DISCIPLINE(1<<5)) |
2274 | sr_scsi_wu_put(sd, wu); |
2275 | else if (!(wu->swu_flags & SR_WUF_REBUILD(1<<0))) |
2276 | sr_scsi_done(sd, xs); |
2277 | |
2278 | done: |
2279 | splx(s)spllower(s); |
2280 | } |
2281 | |
2282 | struct sr_workunit * |
2283 | sr_scsi_wu_get(struct sr_discipline *sd, int flags) |
2284 | { |
2285 | return scsi_io_get(&sd->sd_iopool, flags); |
2286 | } |
2287 | |
2288 | void |
2289 | sr_scsi_wu_put(struct sr_discipline *sd, struct sr_workunit *wu) |
2290 | { |
2291 | scsi_io_put(&sd->sd_iopool, wu); |
2292 | |
2293 | if (sd->sd_sync && sd->sd_wu_pending == 0) |
2294 | wakeup(sd); |
2295 | } |
2296 | |
2297 | void |
2298 | sr_scsi_done(struct sr_discipline *sd, struct scsi_xfer *xs) |
2299 | { |
2300 | DNPRINTF(SR_D_DIS, "%s: sr_scsi_done: xs %p\n", DEVNAME(sd->sd_sc), xs); |
2301 | |
2302 | if (xs->error == XS_NOERROR0) |
2303 | xs->resid = 0; |
2304 | |
2305 | scsi_done(xs); |
2306 | |
2307 | if (sd->sd_sync && sd->sd_wu_pending == 0) |
2308 | wakeup(sd); |
2309 | } |
2310 | |
2311 | void |
2312 | sr_scsi_cmd(struct scsi_xfer *xs) |
2313 | { |
2314 | struct scsi_link *link = xs->sc_link; |
2315 | struct sr_softc *sc = link->bus->sb_adapter_softc; |
2316 | struct sr_workunit *wu = xs->io; |
2317 | struct sr_discipline *sd; |
2318 | |
2319 | DNPRINTF(SR_D_CMD, "%s: sr_scsi_cmd target %d xs %p flags %#x\n", |
2320 | DEVNAME(sc), link->target, xs, xs->flags); |
2321 | |
2322 | sd = sc->sc_targets[link->target]; |
2323 | if (sd == NULL((void *)0)) |
2324 | panic("%s: sr_scsi_cmd NULL discipline", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
2325 | |
2326 | if (sd->sd_deleted) { |
2327 | printf("%s: %s device is being deleted, failing io\n", |
2328 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sd->sd_meta->ssd_devname); |
2329 | goto stuffup; |
2330 | } |
2331 | |
2332 | /* scsi layer *can* re-send wu without calling sr_wu_put(). */ |
2333 | sr_wu_release_ccbs(wu); |
2334 | sr_wu_init(sd, wu); |
2335 | wu->swu_state = SR_WU_INPROGRESS1; |
2336 | wu->swu_xs = xs; |
2337 | |
2338 | switch (xs->cmd.opcode) { |
2339 | case READ_COMMAND0x08: |
2340 | case READ_100x28: |
2341 | case READ_160x88: |
2342 | case WRITE_COMMAND0x0a: |
2343 | case WRITE_100x2a: |
2344 | case WRITE_160x8a: |
2345 | DNPRINTF(SR_D_CMD, "%s: sr_scsi_cmd: READ/WRITE %02x\n", |
2346 | DEVNAME(sc), xs->cmd.opcode); |
2347 | if (sd->sd_scsi_rw(wu)) |
2348 | goto stuffup; |
2349 | break; |
2350 | |
2351 | case SYNCHRONIZE_CACHE0x35: |
2352 | DNPRINTF(SR_D_CMD, "%s: sr_scsi_cmd: SYNCHRONIZE_CACHE\n", |
2353 | DEVNAME(sc)); |
2354 | if (sd->sd_scsi_sync(wu)) |
2355 | goto stuffup; |
2356 | goto complete; |
2357 | |
2358 | case TEST_UNIT_READY0x00: |
2359 | DNPRINTF(SR_D_CMD, "%s: sr_scsi_cmd: TEST_UNIT_READY\n", |
2360 | DEVNAME(sc)); |
2361 | if (sd->sd_scsi_tur(wu)) |
2362 | goto stuffup; |
2363 | goto complete; |
2364 | |
2365 | case START_STOP0x1b: |
2366 | DNPRINTF(SR_D_CMD, "%s: sr_scsi_cmd: START_STOP\n", |
2367 | DEVNAME(sc)); |
2368 | if (sd->sd_scsi_start_stop(wu)) |
2369 | goto stuffup; |
2370 | goto complete; |
2371 | |
2372 | case INQUIRY0x12: |
2373 | DNPRINTF(SR_D_CMD, "%s: sr_scsi_cmd: INQUIRY\n", |
2374 | DEVNAME(sc)); |
2375 | if (sd->sd_scsi_inquiry(wu)) |
2376 | goto stuffup; |
2377 | goto complete; |
2378 | |
2379 | case READ_CAPACITY0x25: |
2380 | case READ_CAPACITY_160x9e: |
2381 | DNPRINTF(SR_D_CMD, "%s: sr_scsi_cmd READ CAPACITY 0x%02x\n", |
2382 | DEVNAME(sc), xs->cmd.opcode); |
2383 | if (sd->sd_scsi_read_cap(wu)) |
2384 | goto stuffup; |
2385 | goto complete; |
2386 | |
2387 | case REQUEST_SENSE0x03: |
2388 | DNPRINTF(SR_D_CMD, "%s: sr_scsi_cmd REQUEST SENSE\n", |
2389 | DEVNAME(sc)); |
2390 | if (sd->sd_scsi_req_sense(wu)) |
2391 | goto stuffup; |
2392 | goto complete; |
2393 | |
2394 | default: |
2395 | DNPRINTF(SR_D_CMD, "%s: unsupported scsi command %x\n", |
2396 | DEVNAME(sc), xs->cmd.opcode); |
2397 | /* XXX might need to add generic function to handle others */ |
2398 | goto stuffup; |
2399 | } |
2400 | |
2401 | return; |
2402 | stuffup: |
2403 | if (sd->sd_scsi_sense.error_code) { |
2404 | xs->error = XS_SENSE1; |
2405 | memcpy(&xs->sense, &sd->sd_scsi_sense, sizeof(xs->sense))__builtin_memcpy((&xs->sense), (&sd->sd_scsi_sense ), (sizeof(xs->sense))); |
2406 | bzero(&sd->sd_scsi_sense, sizeof(sd->sd_scsi_sense))__builtin_bzero((&sd->sd_scsi_sense), (sizeof(sd->sd_scsi_sense ))); |
2407 | } else { |
2408 | xs->error = XS_DRIVER_STUFFUP2; |
2409 | } |
2410 | complete: |
2411 | sr_scsi_done(sd, xs); |
2412 | } |
2413 | |
2414 | int |
2415 | sr_scsi_probe(struct scsi_link *link) |
2416 | { |
2417 | struct sr_softc *sc = link->bus->sb_adapter_softc; |
2418 | struct sr_discipline *sd; |
2419 | |
2420 | KASSERT(link->target < SR_MAX_LD && link->lun == 0)((link->target < 256 && link->lun == 0) ? (void )0 : __assert("diagnostic ", "/usr/src/sys/dev/softraid.c", 2420 , "link->target < SR_MAX_LD && link->lun == 0" )); |
2421 | |
2422 | sd = sc->sc_targets[link->target]; |
2423 | if (sd == NULL((void *)0)) |
2424 | return (ENODEV19); |
2425 | |
2426 | link->pool = &sd->sd_iopool; |
2427 | if (sd->sd_openings) |
2428 | link->openings = sd->sd_openings(sd); |
2429 | else |
2430 | link->openings = sd->sd_max_wu; |
2431 | |
2432 | return (0); |
2433 | } |
2434 | |
2435 | int |
2436 | sr_scsi_ioctl(struct scsi_link *link, u_long cmd, caddr_t addr, int flag) |
2437 | { |
2438 | struct sr_softc *sc = link->bus->sb_adapter_softc; |
2439 | struct sr_discipline *sd; |
2440 | |
2441 | sd = sc->sc_targets[link->target]; |
2442 | if (sd == NULL((void *)0)) |
2443 | return (ENODEV19); |
2444 | |
2445 | DNPRINTF(SR_D_IOCTL, "%s: %s sr_scsi_ioctl cmd: %#lx\n", |
2446 | DEVNAME(sc), sd->sd_meta->ssd_devname, cmd); |
2447 | |
2448 | /* Pass bio ioctls through to the bio handler. */ |
2449 | if (IOCGROUP(cmd)(((cmd) >> 8) & 0xff) == 'B') |
2450 | return (sr_bio_handler(sc, sd, cmd, (struct bio *)addr)); |
2451 | |
2452 | switch (cmd) { |
2453 | case DIOCGCACHE((unsigned long)0x40000000 | ((sizeof(struct dk_cache) & 0x1fff ) << 16) | ((('d')) << 8) | ((117))): |
2454 | case DIOCSCACHE((unsigned long)0x80000000 | ((sizeof(struct dk_cache) & 0x1fff ) << 16) | ((('d')) << 8) | ((118))): |
2455 | return (EOPNOTSUPP45); |
2456 | default: |
2457 | return (ENOTTY25); |
2458 | } |
2459 | } |
2460 | |
2461 | int |
2462 | sr_bio_ioctl(struct device *dev, u_long cmd, caddr_t addr) |
2463 | { |
2464 | struct sr_softc *sc = (struct sr_softc *) dev; |
2465 | DNPRINTF(SR_D_IOCTL, "%s: sr_bio_ioctl\n", DEVNAME(sc)); |
2466 | |
2467 | return sr_bio_handler(sc, NULL((void *)0), cmd, (struct bio *)addr); |
2468 | } |
2469 | |
2470 | int |
2471 | sr_bio_handler(struct sr_softc *sc, struct sr_discipline *sd, u_long cmd, |
2472 | struct bio *bio) |
2473 | { |
2474 | int rv = 0; |
2475 | |
2476 | DNPRINTF(SR_D_IOCTL, "%s: sr_bio_handler ", DEVNAME(sc)); |
2477 | |
2478 | rw_enter_write(&sc->sc_lock); |
2479 | |
2480 | bio_status_init(&sc->sc_status, &sc->sc_dev); |
2481 | |
2482 | switch (cmd) { |
2483 | case BIOCINQ(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_inq) & 0x1fff) << 16) | ((('B')) << 8) | ((32))): |
2484 | DNPRINTF(SR_D_IOCTL, "inq\n"); |
2485 | rv = sr_ioctl_inq(sc, (struct bioc_inq *)bio); |
2486 | break; |
2487 | |
2488 | case BIOCVOL(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_vol) & 0x1fff) << 16) | ((('B')) << 8) | ((34))): |
2489 | DNPRINTF(SR_D_IOCTL, "vol\n"); |
2490 | rv = sr_ioctl_vol(sc, (struct bioc_vol *)bio); |
2491 | break; |
2492 | |
2493 | case BIOCDISK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_disk) & 0x1fff) << 16) | ((('B')) << 8) | ((33))): |
2494 | DNPRINTF(SR_D_IOCTL, "disk\n"); |
2495 | rv = sr_ioctl_disk(sc, (struct bioc_disk *)bio); |
2496 | break; |
2497 | |
2498 | case BIOCALARM(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_alarm) & 0x1fff) << 16) | ((('B')) << 8) | ((35))): |
2499 | DNPRINTF(SR_D_IOCTL, "alarm\n"); |
2500 | /*rv = sr_ioctl_alarm(sc, (struct bioc_alarm *)bio); */ |
2501 | break; |
2502 | |
2503 | case BIOCBLINK(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_blink) & 0x1fff) << 16) | ((('B')) << 8) | ((36))): |
2504 | DNPRINTF(SR_D_IOCTL, "blink\n"); |
2505 | /*rv = sr_ioctl_blink(sc, (struct bioc_blink *)bio); */ |
2506 | break; |
2507 | |
2508 | case BIOCSETSTATE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_setstate) & 0x1fff) << 16) | ((('B')) << 8) | ((37))): |
2509 | DNPRINTF(SR_D_IOCTL, "setstate\n"); |
2510 | rv = sr_ioctl_setstate(sc, (struct bioc_setstate *)bio); |
2511 | break; |
2512 | |
2513 | case BIOCCREATERAID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_createraid) & 0x1fff) << 16) | ((('B') ) << 8) | ((38))): |
2514 | DNPRINTF(SR_D_IOCTL, "createraid\n"); |
2515 | rv = sr_ioctl_createraid(sc, (struct bioc_createraid *)bio, |
2516 | 1, NULL((void *)0)); |
2517 | break; |
2518 | |
2519 | case BIOCDELETERAID(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_deleteraid) & 0x1fff) << 16) | ((('B') ) << 8) | ((39))): |
2520 | DNPRINTF(SR_D_IOCTL, "deleteraid\n"); |
2521 | rv = sr_ioctl_deleteraid(sc, sd, (struct bioc_deleteraid *)bio); |
2522 | break; |
2523 | |
2524 | case BIOCDISCIPLINE(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_discipline) & 0x1fff) << 16) | ((('B') ) << 8) | ((40))): |
2525 | DNPRINTF(SR_D_IOCTL, "discipline\n"); |
2526 | rv = sr_ioctl_discipline(sc, sd, (struct bioc_discipline *)bio); |
2527 | break; |
2528 | |
2529 | case BIOCINSTALLBOOT(((unsigned long)0x80000000|(unsigned long)0x40000000) | ((sizeof (struct bioc_installboot) & 0x1fff) << 16) | ((('B' )) << 8) | ((41))): |
2530 | DNPRINTF(SR_D_IOCTL, "installboot\n"); |
2531 | rv = sr_ioctl_installboot(sc, sd, |
2532 | (struct bioc_installboot *)bio); |
2533 | break; |
2534 | |
2535 | default: |
2536 | DNPRINTF(SR_D_IOCTL, "invalid ioctl\n"); |
2537 | rv = ENOTTY25; |
2538 | } |
2539 | |
2540 | sc->sc_status.bs_status = (rv ? BIO_STATUS_ERROR2 : BIO_STATUS_SUCCESS1); |
2541 | |
2542 | if (sc->sc_status.bs_msg_count > 0) |
2543 | rv = 0; |
2544 | |
2545 | memcpy(&bio->bio_status, &sc->sc_status, sizeof(struct bio_status))__builtin_memcpy((&bio->bio_status), (&sc->sc_status ), (sizeof(struct bio_status))); |
2546 | |
2547 | rw_exit_write(&sc->sc_lock); |
2548 | |
2549 | return (rv); |
2550 | } |
2551 | |
2552 | int |
2553 | sr_ioctl_inq(struct sr_softc *sc, struct bioc_inq *bi) |
2554 | { |
2555 | struct sr_discipline *sd; |
2556 | int vol = 0, disk = 0; |
2557 | |
2558 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
2559 | vol++; |
2560 | disk += sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; |
2561 | } |
2562 | |
2563 | strlcpy(bi->bi_dev, sc->sc_dev.dv_xname, sizeof(bi->bi_dev)); |
2564 | bi->bi_novol = vol + sc->sc_hotspare_no; |
2565 | bi->bi_nodisk = disk + sc->sc_hotspare_no; |
2566 | |
2567 | return (0); |
2568 | } |
2569 | |
2570 | int |
2571 | sr_ioctl_vol(struct sr_softc *sc, struct bioc_vol *bv) |
2572 | { |
2573 | int vol = -1, rv = EINVAL22; |
2574 | struct sr_discipline *sd; |
2575 | struct sr_chunk *hotspare; |
2576 | |
2577 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
2578 | vol++; |
2579 | if (vol != bv->bv_volid) |
2580 | continue; |
2581 | |
2582 | bv->bv_status = sd->sd_vol_status; |
2583 | bv->bv_size = sd->sd_meta->ssdi_sdd_invariant.ssd_size << DEV_BSHIFT9; |
2584 | bv->bv_level = sd->sd_meta->ssdi_sdd_invariant.ssd_level; |
2585 | bv->bv_nodisk = sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; |
2586 | |
2587 | #ifdef CRYPTO1 |
2588 | if ((sd->sd_meta->ssdi_sdd_invariant.ssd_level == 'C' || |
2589 | sd->sd_meta->ssdi_sdd_invariant.ssd_level == 0x1C) && |
2590 | sd->mdssd_dis_specific.mdd_crypto.key_disk != NULL((void *)0)) |
2591 | bv->bv_nodisk++; |
2592 | #endif |
2593 | if (bv->bv_status == BIOC_SVREBUILD0x05) |
2594 | bv->bv_percent = sr_rebuild_percent(sd); |
2595 | |
2596 | strlcpy(bv->bv_dev, sd->sd_meta->ssd_devname, |
2597 | sizeof(bv->bv_dev)); |
2598 | strlcpy(bv->bv_vendor, sd->sd_meta->ssdi_sdd_invariant.ssd_vendor, |
2599 | sizeof(bv->bv_vendor)); |
2600 | rv = 0; |
2601 | goto done; |
2602 | } |
2603 | |
2604 | /* Check hotspares list. */ |
2605 | SLIST_FOREACH(hotspare, &sc->sc_hotspare_list, src_link)for((hotspare) = ((&sc->sc_hotspare_list)->slh_first ); (hotspare) != ((void *)0); (hotspare) = ((hotspare)->src_link .sle_next)) { |
2606 | vol++; |
2607 | if (vol != bv->bv_volid) |
2608 | continue; |
2609 | |
2610 | bv->bv_status = BIOC_SVONLINE0x00; |
2611 | bv->bv_size = hotspare->src_meta.scmi_scm_invariant.scm_size << DEV_BSHIFT9; |
2612 | bv->bv_level = -1; /* Hotspare. */ |
2613 | bv->bv_nodisk = 1; |
2614 | strlcpy(bv->bv_dev, hotspare->src_meta.scmi_scm_invariant.scm_devname, |
2615 | sizeof(bv->bv_dev)); |
2616 | strlcpy(bv->bv_vendor, hotspare->src_meta.scmi_scm_invariant.scm_devname, |
2617 | sizeof(bv->bv_vendor)); |
2618 | rv = 0; |
2619 | goto done; |
2620 | } |
2621 | |
2622 | done: |
2623 | return (rv); |
2624 | } |
2625 | |
2626 | int |
2627 | sr_ioctl_disk(struct sr_softc *sc, struct bioc_disk *bd) |
2628 | { |
2629 | struct sr_discipline *sd; |
2630 | struct sr_chunk *src, *hotspare; |
2631 | int vol = -1, rv = EINVAL22; |
2632 | |
2633 | if (bd->bd_diskid < 0) |
2634 | goto done; |
2635 | |
2636 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
2637 | vol++; |
2638 | if (vol != bd->bd_volid) |
2639 | continue; |
2640 | |
2641 | if (bd->bd_diskid < sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no) |
2642 | src = sd->sd_vol.sv_chunks[bd->bd_diskid]; |
2643 | #ifdef CRYPTO1 |
2644 | else if (bd->bd_diskid == sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no && |
2645 | (sd->sd_meta->ssdi_sdd_invariant.ssd_level == 'C' || |
2646 | sd->sd_meta->ssdi_sdd_invariant.ssd_level == 0x1C) && |
2647 | sd->mdssd_dis_specific.mdd_crypto.key_disk != NULL((void *)0)) |
2648 | src = sd->mdssd_dis_specific.mdd_crypto.key_disk; |
2649 | #endif |
2650 | else |
2651 | break; |
2652 | |
2653 | bd->bd_status = src->src_meta.scm_status; |
2654 | bd->bd_size = src->src_meta.scmi_scm_invariant.scm_size << DEV_BSHIFT9; |
2655 | bd->bd_channel = vol; |
2656 | bd->bd_target = bd->bd_diskid; |
2657 | strlcpy(bd->bd_vendor, src->src_meta.scmi_scm_invariant.scm_devname, |
2658 | sizeof(bd->bd_vendor)); |
2659 | rv = 0; |
2660 | goto done; |
2661 | } |
2662 | |
2663 | /* Check hotspares list. */ |
2664 | SLIST_FOREACH(hotspare, &sc->sc_hotspare_list, src_link)for((hotspare) = ((&sc->sc_hotspare_list)->slh_first ); (hotspare) != ((void *)0); (hotspare) = ((hotspare)->src_link .sle_next)) { |
2665 | vol++; |
2666 | if (vol != bd->bd_volid) |
2667 | continue; |
2668 | |
2669 | if (bd->bd_diskid != 0) |
2670 | break; |
2671 | |
2672 | bd->bd_status = hotspare->src_meta.scm_status; |
2673 | bd->bd_size = hotspare->src_meta.scmi_scm_invariant.scm_size << DEV_BSHIFT9; |
2674 | bd->bd_channel = vol; |
2675 | bd->bd_target = bd->bd_diskid; |
2676 | strlcpy(bd->bd_vendor, hotspare->src_meta.scmi_scm_invariant.scm_devname, |
2677 | sizeof(bd->bd_vendor)); |
2678 | rv = 0; |
2679 | goto done; |
2680 | } |
2681 | |
2682 | done: |
2683 | return (rv); |
2684 | } |
2685 | |
2686 | int |
2687 | sr_ioctl_setstate(struct sr_softc *sc, struct bioc_setstate *bs) |
2688 | { |
2689 | int rv = EINVAL22; |
2690 | int vol = -1, found, c; |
2691 | struct sr_discipline *sd; |
2692 | struct sr_chunk *ch_entry; |
2693 | struct sr_chunk_head *cl; |
2694 | |
2695 | if (bs->bs_other_id_type == BIOC_SSOTHER_UNUSED0x00) |
2696 | goto done; |
2697 | |
2698 | if (bs->bs_status == BIOC_SSHOTSPARE0x02) { |
2699 | rv = sr_hotspare(sc, (dev_t)bs->bs_other_id); |
2700 | goto done; |
2701 | } |
2702 | |
2703 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
2704 | vol++; |
2705 | if (vol == bs->bs_volid) |
2706 | break; |
2707 | } |
2708 | if (sd == NULL((void *)0)) |
2709 | goto done; |
2710 | |
2711 | switch (bs->bs_status) { |
2712 | case BIOC_SSOFFLINE0x01: |
2713 | /* Take chunk offline */ |
2714 | found = c = 0; |
2715 | cl = &sd->sd_vol.sv_chunk_list; |
2716 | SLIST_FOREACH(ch_entry, cl, src_link)for((ch_entry) = ((cl)->slh_first); (ch_entry) != ((void * )0); (ch_entry) = ((ch_entry)->src_link.sle_next)) { |
2717 | if (ch_entry->src_dev_mm == bs->bs_other_id) { |
2718 | found = 1; |
2719 | break; |
2720 | } |
2721 | c++; |
2722 | } |
2723 | if (found == 0) { |
2724 | sr_error(sc, "chunk not part of array"); |
2725 | goto done; |
2726 | } |
2727 | |
2728 | /* XXX: check current state first */ |
2729 | sd->sd_set_chunk_state(sd, c, BIOC_SDOFFLINE0x01); |
2730 | |
2731 | if (sr_meta_save(sd, SR_META_DIRTY0x1)) { |
2732 | sr_error(sc, "could not save metadata for %s", |
2733 | sd->sd_meta->ssd_devname); |
2734 | goto done; |
2735 | } |
2736 | rv = 0; |
2737 | break; |
2738 | |
2739 | case BIOC_SDSCRUB0x06: |
2740 | break; |
2741 | |
2742 | case BIOC_SSREBUILD0x03: |
2743 | rv = sr_rebuild_init(sd, (dev_t)bs->bs_other_id, 0); |
2744 | break; |
2745 | |
2746 | default: |
2747 | sr_error(sc, "unsupported state request %d", bs->bs_status); |
2748 | } |
2749 | |
2750 | done: |
2751 | return (rv); |
2752 | } |
2753 | |
2754 | int |
2755 | sr_chunk_in_use(struct sr_softc *sc, dev_t dev) |
2756 | { |
2757 | struct sr_discipline *sd; |
2758 | struct sr_chunk *chunk; |
2759 | int i; |
2760 | |
2761 | DNPRINTF(SR_D_MISC, "%s: sr_chunk_in_use(%d)\n", DEVNAME(sc), dev); |
2762 | |
2763 | if (dev == NODEV(dev_t)(-1)) |
2764 | return BIOC_SDINVALID0xff; |
2765 | |
2766 | /* See if chunk is already in use. */ |
2767 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
2768 | for (i = 0; i < sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; i++) { |
2769 | chunk = sd->sd_vol.sv_chunks[i]; |
2770 | if (chunk->src_dev_mm == dev) |
2771 | return chunk->src_meta.scm_status; |
2772 | } |
2773 | } |
2774 | |
2775 | /* Check hotspares list. */ |
2776 | SLIST_FOREACH(chunk, &sc->sc_hotspare_list, src_link)for((chunk) = ((&sc->sc_hotspare_list)->slh_first); (chunk) != ((void *)0); (chunk) = ((chunk)->src_link.sle_next )) |
2777 | if (chunk->src_dev_mm == dev) |
2778 | return chunk->src_meta.scm_status; |
2779 | |
2780 | return BIOC_SDINVALID0xff; |
2781 | } |
2782 | |
2783 | int |
2784 | sr_hotspare(struct sr_softc *sc, dev_t dev) |
2785 | { |
2786 | struct sr_discipline *sd = NULL((void *)0); |
2787 | struct sr_metadata *sm = NULL((void *)0); |
2788 | struct sr_meta_chunk *hm; |
2789 | struct sr_chunk_head *cl; |
2790 | struct sr_chunk *chunk, *last, *hotspare = NULL((void *)0); |
2791 | struct sr_uuid uuid; |
2792 | struct disklabel label; |
2793 | struct vnode *vn; |
2794 | u_int64_t size; |
2795 | char devname[32]; |
2796 | int rv = EINVAL22; |
2797 | int c, part, open = 0; |
2798 | |
2799 | /* |
2800 | * Add device to global hotspares list. |
2801 | */ |
2802 | |
2803 | sr_meta_getdevname(sc, dev, devname, sizeof(devname)); |
2804 | |
2805 | /* Make sure chunk is not already in use. */ |
2806 | c = sr_chunk_in_use(sc, dev); |
2807 | if (c != BIOC_SDINVALID0xff && c != BIOC_SDOFFLINE0x01) { |
2808 | if (c == BIOC_SDHOTSPARE0x04) |
2809 | sr_error(sc, "%s is already a hotspare", devname); |
2810 | else |
2811 | sr_error(sc, "%s is already in use", devname); |
2812 | goto done; |
2813 | } |
2814 | |
2815 | /* XXX - See if there is an existing degraded volume... */ |
2816 | |
2817 | /* Open device. */ |
2818 | if (bdevvp(dev, &vn)) { |
2819 | sr_error(sc, "sr_hotspare: cannot allocate vnode"); |
2820 | goto done; |
2821 | } |
2822 | if (VOP_OPEN(vn, FREAD0x0001 | FWRITE0x0002, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc)) { |
2823 | DNPRINTF(SR_D_META,"%s: sr_hotspare cannot open %s\n", |
2824 | DEVNAME(sc), devname); |
2825 | vput(vn); |
2826 | goto fail; |
2827 | } |
2828 | open = 1; /* close dev on error */ |
2829 | |
2830 | /* Get partition details. */ |
2831 | part = DISKPART(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >> 8)) % 16); |
2832 | if (VOP_IOCTL(vn, DIOCGDINFO((unsigned long)0x40000000 | ((sizeof(struct disklabel) & 0x1fff) << 16) | ((('d')) << 8) | ((101))), (caddr_t)&label, FREAD0x0001, |
2833 | NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc)) { |
2834 | DNPRINTF(SR_D_META, "%s: sr_hotspare ioctl failed\n", |
2835 | DEVNAME(sc)); |
2836 | goto fail; |
2837 | } |
2838 | if (label.d_partitions[part].p_fstype != FS_RAID19) { |
2839 | sr_error(sc, "%s partition not of type RAID (%d)", |
2840 | devname, label.d_partitions[part].p_fstype); |
2841 | goto fail; |
2842 | } |
2843 | |
2844 | /* Calculate partition size. */ |
2845 | size = DL_SECTOBLK(&label, DL_GETPSIZE(&label.d_partitions[part]))(((((u_int64_t)(&label.d_partitions[part])->p_sizeh << 32) + (&label.d_partitions[part])->p_size)) * ((& label)->d_secsize / (1 << 9))); |
2846 | if (size <= SR_DATA_OFFSET(16 + (64 + (320 + 128)))) { |
2847 | DNPRINTF(SR_D_META, "%s: %s partition too small\n", DEVNAME(sc), |
2848 | devname); |
2849 | goto fail; |
2850 | } |
2851 | size -= SR_DATA_OFFSET(16 + (64 + (320 + 128))); |
2852 | if (size > INT64_MAX0x7fffffffffffffffLL) { |
2853 | DNPRINTF(SR_D_META, "%s: %s partition too large\n", DEVNAME(sc), |
2854 | devname); |
2855 | goto fail; |
2856 | } |
2857 | |
2858 | /* |
2859 | * Create and populate chunk metadata. |
2860 | */ |
2861 | |
2862 | sr_uuid_generate(&uuid); |
2863 | hotspare = malloc(sizeof(struct sr_chunk), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
2864 | |
2865 | hotspare->src_dev_mm = dev; |
2866 | hotspare->src_vn = vn; |
2867 | strlcpy(hotspare->src_devname, devname, sizeof(hm->scmi_scm_invariant.scm_devname)); |
2868 | hotspare->src_size = size; |
2869 | |
2870 | hm = &hotspare->src_meta; |
2871 | hm->scmi_scm_invariant.scm_volid = SR_HOTSPARE_VOLID0xffffffff; |
2872 | hm->scmi_scm_invariant.scm_chunk_id = 0; |
2873 | hm->scmi_scm_invariant.scm_size = size; |
2874 | hm->scmi_scm_invariant.scm_coerced_size = size; |
2875 | strlcpy(hm->scmi_scm_invariant.scm_devname, devname, sizeof(hm->scmi_scm_invariant.scm_devname)); |
2876 | memcpy(&hm->scmi.scm_uuid, &uuid, sizeof(struct sr_uuid))__builtin_memcpy((&hm->_scm_invariant.scm_uuid), (& uuid), (sizeof(struct sr_uuid))); |
2877 | |
2878 | sr_checksum(sc, hm, &hm->scm_checksum, |
2879 | sizeof(struct sr_meta_chunk_invariant)); |
2880 | |
2881 | hm->scm_status = BIOC_SDHOTSPARE0x04; |
2882 | |
2883 | /* |
2884 | * Create and populate our own discipline and metadata. |
2885 | */ |
2886 | |
2887 | sm = malloc(sizeof(struct sr_metadata), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
2888 | sm->ssdi_sdd_invariant.ssd_magic = SR_MAGIC0x4d4152436372616dLLU; |
2889 | sm->ssdi_sdd_invariant.ssd_version = SR_META_VERSION6; |
2890 | sm->ssd_ondisk = 0; |
2891 | sm->ssdi_sdd_invariant.ssd_vol_flags = 0; |
2892 | memcpy(&sm->ssdi.ssd_uuid, &uuid, sizeof(struct sr_uuid))__builtin_memcpy((&sm->_sdd_invariant.ssd_uuid), (& uuid), (sizeof(struct sr_uuid))); |
2893 | sm->ssdi_sdd_invariant.ssd_chunk_no = 1; |
2894 | sm->ssdi_sdd_invariant.ssd_volid = SR_HOTSPARE_VOLID0xffffffff; |
2895 | sm->ssdi_sdd_invariant.ssd_level = SR_HOTSPARE_LEVEL0xffffffff; |
2896 | sm->ssdi_sdd_invariant.ssd_size = size; |
2897 | sm->ssdi_sdd_invariant.ssd_secsize = label.d_secsize; |
2898 | strlcpy(sm->ssdi_sdd_invariant.ssd_vendor, "OPENBSD", sizeof(sm->ssdi_sdd_invariant.ssd_vendor)); |
2899 | snprintf(sm->ssdi_sdd_invariant.ssd_product, sizeof(sm->ssdi_sdd_invariant.ssd_product), |
2900 | "SR %s", "HOTSPARE"); |
2901 | snprintf(sm->ssdi_sdd_invariant.ssd_revision, sizeof(sm->ssdi_sdd_invariant.ssd_revision), |
2902 | "%03d", SR_META_VERSION6); |
2903 | |
2904 | sd = malloc(sizeof(struct sr_discipline), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
2905 | sd->sd_sc = sc; |
2906 | sd->sd_meta = sm; |
2907 | sd->sd_meta_type = SR_META_F_NATIVE0; |
2908 | sd->sd_vol_status = BIOC_SVONLINE0x00; |
2909 | strlcpy(sd->sd_name, "HOTSPARE", sizeof(sd->sd_name)); |
2910 | SLIST_INIT(&sd->sd_meta_opt){ ((&sd->sd_meta_opt)->slh_first) = ((void *)0); }; |
2911 | |
2912 | /* Add chunk to volume. */ |
2913 | sd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF2, |
2914 | M_WAITOK0x0001 | M_ZERO0x0008); |
2915 | sd->sd_vol.sv_chunks[0] = hotspare; |
2916 | SLIST_INIT(&sd->sd_vol.sv_chunk_list){ ((&sd->sd_vol.sv_chunk_list)->slh_first) = ((void *)0); }; |
2917 | SLIST_INSERT_HEAD(&sd->sd_vol.sv_chunk_list, hotspare, src_link)do { (hotspare)->src_link.sle_next = (&sd->sd_vol.sv_chunk_list )->slh_first; (&sd->sd_vol.sv_chunk_list)->slh_first = (hotspare); } while (0); |
2918 | |
2919 | /* Save metadata. */ |
2920 | if (sr_meta_save(sd, SR_META_DIRTY0x1)) { |
2921 | sr_error(sc, "could not save metadata to %s", devname); |
2922 | goto fail; |
2923 | } |
2924 | |
2925 | /* |
2926 | * Add chunk to hotspare list. |
2927 | */ |
2928 | rw_enter_write(&sc->sc_hs_lock); |
2929 | cl = &sc->sc_hotspare_list; |
2930 | if (SLIST_EMPTY(cl)(((cl)->slh_first) == ((void *)0))) |
2931 | SLIST_INSERT_HEAD(cl, hotspare, src_link)do { (hotspare)->src_link.sle_next = (cl)->slh_first; ( cl)->slh_first = (hotspare); } while (0); |
2932 | else { |
2933 | SLIST_FOREACH(chunk, cl, src_link)for((chunk) = ((cl)->slh_first); (chunk) != ((void *)0); ( chunk) = ((chunk)->src_link.sle_next)) |
2934 | last = chunk; |
2935 | SLIST_INSERT_AFTER(last, hotspare, src_link)do { (hotspare)->src_link.sle_next = (last)->src_link.sle_next ; (last)->src_link.sle_next = (hotspare); } while (0); |
2936 | } |
2937 | sc->sc_hotspare_no++; |
2938 | rw_exit_write(&sc->sc_hs_lock); |
2939 | |
2940 | rv = 0; |
2941 | goto done; |
2942 | |
2943 | fail: |
2944 | free(hotspare, M_DEVBUF2, sizeof(*hotspare)); |
2945 | |
2946 | done: |
2947 | if (sd) |
2948 | free(sd->sd_vol.sv_chunks, M_DEVBUF2, |
2949 | sizeof(sd->sd_vol.sv_chunks)); |
2950 | free(sd, M_DEVBUF2, sizeof(*sd)); |
2951 | free(sm, M_DEVBUF2, sizeof(*sm)); |
2952 | if (open) { |
2953 | VOP_CLOSE(vn, FREAD0x0001 | FWRITE0x0002, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
2954 | vput(vn); |
2955 | } |
2956 | |
2957 | return (rv); |
2958 | } |
2959 | |
2960 | void |
2961 | sr_hotspare_rebuild_callback(void *xsd) |
2962 | { |
2963 | struct sr_discipline *sd = xsd; |
2964 | sr_hotspare_rebuild(sd); |
2965 | } |
2966 | |
2967 | void |
2968 | sr_hotspare_rebuild(struct sr_discipline *sd) |
2969 | { |
2970 | struct sr_softc *sc = sd->sd_sc; |
2971 | struct sr_chunk_head *cl; |
2972 | struct sr_chunk *hotspare, *chunk = NULL((void *)0); |
2973 | struct sr_workunit *wu; |
2974 | struct sr_ccb *ccb; |
2975 | int i, s, cid, busy; |
2976 | |
2977 | /* |
2978 | * Attempt to locate a hotspare and initiate rebuild. |
2979 | */ |
2980 | |
2981 | /* Find first offline chunk. */ |
2982 | for (cid = 0; cid < sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; cid++) { |
2983 | if (sd->sd_vol.sv_chunks[cid]->src_meta.scm_status == |
2984 | BIOC_SDOFFLINE0x01) { |
2985 | chunk = sd->sd_vol.sv_chunks[cid]; |
2986 | break; |
2987 | } |
2988 | } |
2989 | if (chunk == NULL((void *)0)) { |
2990 | printf("%s: no offline chunk found on %s!\n", |
2991 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sd->sd_meta->ssd_devname); |
2992 | return; |
2993 | } |
2994 | |
2995 | /* See if we have a suitable hotspare... */ |
2996 | rw_enter_write(&sc->sc_hs_lock); |
2997 | cl = &sc->sc_hotspare_list; |
2998 | SLIST_FOREACH(hotspare, cl, src_link)for((hotspare) = ((cl)->slh_first); (hotspare) != ((void * )0); (hotspare) = ((hotspare)->src_link.sle_next)) |
2999 | if (hotspare->src_size >= chunk->src_size && |
3000 | hotspare->src_secsize <= sd->sd_meta->ssdi_sdd_invariant.ssd_secsize) |
3001 | break; |
3002 | |
3003 | if (hotspare != NULL((void *)0)) { |
3004 | |
3005 | printf("%s: %s volume degraded, will attempt to " |
3006 | "rebuild on hotspare %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3007 | sd->sd_meta->ssd_devname, hotspare->src_devname); |
3008 | |
3009 | /* |
3010 | * Ensure that all pending I/O completes on the failed chunk |
3011 | * before trying to initiate a rebuild. |
3012 | */ |
3013 | i = 0; |
3014 | do { |
3015 | busy = 0; |
3016 | |
3017 | s = splbio()splraise(0x6); |
3018 | TAILQ_FOREACH(wu, &sd->sd_wu_pendq, swu_link)for((wu) = ((&sd->sd_wu_pendq)->tqh_first); (wu) != ((void *)0); (wu) = ((wu)->swu_link.tqe_next)) { |
3019 | TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link)for((ccb) = ((&wu->swu_ccb)->tqh_first); (ccb) != ( (void *)0); (ccb) = ((ccb)->ccb_link.tqe_next)) { |
3020 | if (ccb->ccb_target == cid) |
3021 | busy = 1; |
3022 | } |
3023 | } |
3024 | TAILQ_FOREACH(wu, &sd->sd_wu_defq, swu_link)for((wu) = ((&sd->sd_wu_defq)->tqh_first); (wu) != ( (void *)0); (wu) = ((wu)->swu_link.tqe_next)) { |
3025 | TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link)for((ccb) = ((&wu->swu_ccb)->tqh_first); (ccb) != ( (void *)0); (ccb) = ((ccb)->ccb_link.tqe_next)) { |
3026 | if (ccb->ccb_target == cid) |
3027 | busy = 1; |
3028 | } |
3029 | } |
3030 | splx(s)spllower(s); |
3031 | |
3032 | if (busy) { |
3033 | tsleep_nsec(sd, PRIBIO16, "sr_hotspare", |
3034 | SEC_TO_NSEC(1)); |
3035 | i++; |
3036 | } |
3037 | |
3038 | } while (busy && i < 120); |
3039 | |
3040 | DNPRINTF(SR_D_META, "%s: waited %i seconds for I/O to " |
3041 | "complete on failed chunk %s\n", DEVNAME(sc), |
3042 | i, chunk->src_devname); |
3043 | |
3044 | if (busy) { |
3045 | printf("%s: pending I/O failed to complete on " |
3046 | "failed chunk %s, hotspare rebuild aborted...\n", |
3047 | DEVNAME(sc)((sc)->sc_dev.dv_xname), chunk->src_devname); |
3048 | goto done; |
3049 | } |
3050 | |
3051 | s = splbio()splraise(0x6); |
3052 | rw_enter_write(&sc->sc_lock); |
3053 | bio_status_init(&sc->sc_status, &sc->sc_dev); |
3054 | if (sr_rebuild_init(sd, hotspare->src_dev_mm, 1) == 0) { |
3055 | |
3056 | /* Remove hotspare from available list. */ |
3057 | sc->sc_hotspare_no--; |
3058 | SLIST_REMOVE(cl, hotspare, sr_chunk, src_link)do { if ((cl)->slh_first == (hotspare)) { do { ((cl))-> slh_first = ((cl))->slh_first->src_link.sle_next; } while (0); } else { struct sr_chunk *curelm = (cl)->slh_first; while (curelm->src_link.sle_next != (hotspare)) curelm = curelm ->src_link.sle_next; curelm->src_link.sle_next = curelm ->src_link.sle_next->src_link.sle_next; } ((hotspare)-> src_link.sle_next) = ((void *)-1); } while (0); |
3059 | free(hotspare, M_DEVBUF2, sizeof(*hotspare)); |
3060 | |
3061 | } |
3062 | rw_exit_write(&sc->sc_lock); |
3063 | splx(s)spllower(s); |
3064 | } |
3065 | done: |
3066 | rw_exit_write(&sc->sc_hs_lock); |
3067 | } |
3068 | |
3069 | int |
3070 | sr_rebuild_init(struct sr_discipline *sd, dev_t dev, int hotspare) |
3071 | { |
3072 | struct sr_softc *sc = sd->sd_sc; |
3073 | struct sr_chunk *chunk = NULL((void *)0); |
3074 | struct sr_meta_chunk *meta; |
3075 | struct disklabel label; |
3076 | struct vnode *vn; |
3077 | u_int64_t size; |
3078 | int64_t csize; |
3079 | char devname[32]; |
3080 | int rv = EINVAL22, open = 0; |
3081 | int cid, i, part, status; |
3082 | |
3083 | /* |
3084 | * Attempt to initiate a rebuild onto the specified device. |
3085 | */ |
3086 | |
3087 | if (!(sd->sd_capabilities & SR_CAP_REBUILD0x00000004)) { |
3088 | sr_error(sc, "discipline does not support rebuild"); |
3089 | goto done; |
3090 | } |
3091 | |
3092 | /* make sure volume is in the right state */ |
3093 | if (sd->sd_vol_status == BIOC_SVREBUILD0x05) { |
3094 | sr_error(sc, "rebuild already in progress"); |
3095 | goto done; |
3096 | } |
3097 | if (sd->sd_vol_status != BIOC_SVDEGRADED0x02) { |
3098 | sr_error(sc, "volume not degraded"); |
3099 | goto done; |
3100 | } |
3101 | |
3102 | /* Find first offline chunk. */ |
3103 | for (cid = 0; cid < sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; cid++) { |
3104 | if (sd->sd_vol.sv_chunks[cid]->src_meta.scm_status == |
3105 | BIOC_SDOFFLINE0x01) { |
3106 | chunk = sd->sd_vol.sv_chunks[cid]; |
3107 | break; |
3108 | } |
3109 | } |
3110 | if (chunk == NULL((void *)0)) { |
3111 | sr_error(sc, "no offline chunks available to rebuild"); |
3112 | goto done; |
3113 | } |
3114 | |
3115 | /* Get coerced size from another online chunk. */ |
3116 | csize = 0; |
3117 | for (i = 0; i < sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; i++) { |
3118 | if (sd->sd_vol.sv_chunks[i]->src_meta.scm_status == |
3119 | BIOC_SDONLINE0x00) { |
3120 | meta = &sd->sd_vol.sv_chunks[i]->src_meta; |
3121 | csize = meta->scmi_scm_invariant.scm_coerced_size; |
3122 | break; |
3123 | } |
3124 | } |
3125 | if (csize == 0) { |
3126 | sr_error(sc, "no online chunks available for rebuild"); |
3127 | goto done; |
3128 | } |
3129 | |
3130 | sr_meta_getdevname(sc, dev, devname, sizeof(devname)); |
3131 | if (bdevvp(dev, &vn)) { |
3132 | printf("%s: sr_rebuild_init: can't allocate vnode\n", |
3133 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3134 | goto done; |
3135 | } |
3136 | if (VOP_OPEN(vn, FREAD0x0001 | FWRITE0x0002, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc)) { |
3137 | DNPRINTF(SR_D_META,"%s: sr_ioctl_setstate can't " |
3138 | "open %s\n", DEVNAME(sc), devname); |
3139 | vput(vn); |
3140 | goto done; |
3141 | } |
3142 | open = 1; /* close dev on error */ |
3143 | |
3144 | /* Get disklabel and check partition. */ |
3145 | part = DISKPART(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >> 8)) % 16); |
3146 | if (VOP_IOCTL(vn, DIOCGDINFO((unsigned long)0x40000000 | ((sizeof(struct disklabel) & 0x1fff) << 16) | ((('d')) << 8) | ((101))), (caddr_t)&label, FREAD0x0001, |
3147 | NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc)) { |
3148 | DNPRINTF(SR_D_META, "%s: sr_ioctl_setstate ioctl failed\n", |
3149 | DEVNAME(sc)); |
3150 | goto done; |
3151 | } |
3152 | if (label.d_partitions[part].p_fstype != FS_RAID19) { |
3153 | sr_error(sc, "%s partition not of type RAID (%d)", |
3154 | devname, label.d_partitions[part].p_fstype); |
3155 | goto done; |
3156 | } |
3157 | |
3158 | /* Is the partition large enough? */ |
3159 | size = DL_SECTOBLK(&label, DL_GETPSIZE(&label.d_partitions[part]))(((((u_int64_t)(&label.d_partitions[part])->p_sizeh << 32) + (&label.d_partitions[part])->p_size)) * ((& label)->d_secsize / (1 << 9))); |
3160 | if (size <= sd->sd_meta->ssd_data_blkno) { |
3161 | sr_error(sc, "%s: %s partition too small", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3162 | devname); |
3163 | goto done; |
3164 | } |
3165 | size -= sd->sd_meta->ssd_data_blkno; |
3166 | if (size > INT64_MAX0x7fffffffffffffffLL) { |
3167 | sr_error(sc, "%s: %s partition too large", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3168 | devname); |
3169 | goto done; |
3170 | } |
3171 | if (size < csize) { |
3172 | sr_error(sc, "%s partition too small, at least %lld bytes " |
3173 | "required", devname, (long long)(csize << DEV_BSHIFT9)); |
3174 | goto done; |
3175 | } else if (size > csize) |
3176 | sr_warn(sc, "%s partition too large, wasting %lld bytes", |
3177 | devname, (long long)((size - csize) << DEV_BSHIFT9)); |
3178 | if (label.d_secsize > sd->sd_meta->ssdi_sdd_invariant.ssd_secsize) { |
3179 | sr_error(sc, "%s sector size too large, <= %u bytes " |
3180 | "required", devname, sd->sd_meta->ssdi_sdd_invariant.ssd_secsize); |
3181 | goto done; |
3182 | } |
3183 | |
3184 | /* Ensure that this chunk is not already in use. */ |
3185 | status = sr_chunk_in_use(sc, dev); |
3186 | if (status != BIOC_SDINVALID0xff && status != BIOC_SDOFFLINE0x01 && |
3187 | !(hotspare && status == BIOC_SDHOTSPARE0x04)) { |
3188 | sr_error(sc, "%s is already in use", devname); |
3189 | goto done; |
3190 | } |
3191 | |
3192 | /* Reset rebuild counter since we rebuilding onto a new chunk. */ |
3193 | sd->sd_meta->ssd_rebuild = 0; |
3194 | |
3195 | open = 0; /* leave dev open from here on out */ |
3196 | |
3197 | /* Fix up chunk. */ |
3198 | memcpy(chunk->src_duid, label.d_uid, sizeof(chunk->src_duid))__builtin_memcpy((chunk->src_duid), (label.d_uid), (sizeof (chunk->src_duid))); |
3199 | chunk->src_dev_mm = dev; |
3200 | chunk->src_vn = vn; |
3201 | |
3202 | /* Reconstruct metadata. */ |
3203 | meta = &chunk->src_meta; |
3204 | meta->scmi_scm_invariant.scm_volid = sd->sd_meta->ssdi_sdd_invariant.ssd_volid; |
3205 | meta->scmi_scm_invariant.scm_chunk_id = cid; |
3206 | strlcpy(meta->scmi_scm_invariant.scm_devname, devname, |
3207 | sizeof(meta->scmi_scm_invariant.scm_devname)); |
3208 | meta->scmi_scm_invariant.scm_size = size; |
3209 | meta->scmi_scm_invariant.scm_coerced_size = csize; |
3210 | memcpy(&meta->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,__builtin_memcpy((&meta->_scm_invariant.scm_uuid), (& sd->sd_meta->_sdd_invariant.ssd_uuid), (sizeof(meta-> _scm_invariant.scm_uuid))) |
3211 | sizeof(meta->scmi.scm_uuid))__builtin_memcpy((&meta->_scm_invariant.scm_uuid), (& sd->sd_meta->_sdd_invariant.ssd_uuid), (sizeof(meta-> _scm_invariant.scm_uuid))); |
3212 | sr_checksum(sc, meta, &meta->scm_checksum, |
3213 | sizeof(struct sr_meta_chunk_invariant)); |
3214 | |
3215 | sd->sd_set_chunk_state(sd, cid, BIOC_SDREBUILD0x03); |
3216 | |
3217 | if (sr_meta_save(sd, SR_META_DIRTY0x1)) { |
3218 | sr_error(sc, "could not save metadata to %s", devname); |
3219 | open = 1; |
3220 | goto done; |
3221 | } |
3222 | |
3223 | sr_warn(sc, "rebuild of %s started on %s", |
3224 | sd->sd_meta->ssd_devname, devname); |
3225 | |
3226 | sd->sd_reb_abort = 0; |
3227 | kthread_create_deferred(sr_rebuild_start, sd); |
3228 | |
3229 | rv = 0; |
3230 | done: |
3231 | if (open) { |
3232 | VOP_CLOSE(vn, FREAD0x0001 | FWRITE0x0002, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
3233 | vput(vn); |
3234 | } |
3235 | |
3236 | return (rv); |
3237 | } |
3238 | |
3239 | int |
3240 | sr_rebuild_percent(struct sr_discipline *sd) |
3241 | { |
3242 | daddr_t rb, sz; |
3243 | |
3244 | sz = sd->sd_meta->ssdi_sdd_invariant.ssd_size; |
3245 | rb = sd->sd_meta->ssd_rebuild; |
3246 | |
3247 | if (rb > 0) |
3248 | return (100 - ((sz * 100 - rb * 100) / sz) - 1); |
3249 | |
3250 | return (0); |
3251 | } |
3252 | |
3253 | void |
3254 | sr_roam_chunks(struct sr_discipline *sd) |
3255 | { |
3256 | struct sr_softc *sc = sd->sd_sc; |
3257 | struct sr_chunk *chunk; |
3258 | struct sr_meta_chunk *meta; |
3259 | int roamed = 0; |
3260 | |
3261 | /* Have any chunks roamed? */ |
3262 | SLIST_FOREACH(chunk, &sd->sd_vol.sv_chunk_list, src_link)for((chunk) = ((&sd->sd_vol.sv_chunk_list)->slh_first ); (chunk) != ((void *)0); (chunk) = ((chunk)->src_link.sle_next )) { |
3263 | meta = &chunk->src_meta; |
3264 | if (strncmp(meta->scmi_scm_invariant.scm_devname, chunk->src_devname, |
3265 | sizeof(meta->scmi_scm_invariant.scm_devname))) { |
3266 | |
3267 | printf("%s: roaming device %s -> %s\n", DEVNAME(sc)((sc)->sc_dev.dv_xname), |
3268 | meta->scmi_scm_invariant.scm_devname, chunk->src_devname); |
3269 | |
3270 | strlcpy(meta->scmi_scm_invariant.scm_devname, chunk->src_devname, |
3271 | sizeof(meta->scmi_scm_invariant.scm_devname)); |
3272 | |
3273 | roamed++; |
3274 | } |
3275 | } |
3276 | |
3277 | if (roamed) |
3278 | sr_meta_save(sd, SR_META_DIRTY0x1); |
3279 | } |
3280 | |
3281 | int |
3282 | sr_ioctl_createraid(struct sr_softc *sc, struct bioc_createraid *bc, |
3283 | int user, void *data) |
3284 | { |
3285 | struct sr_meta_opt_item *omi; |
3286 | struct sr_chunk_head *cl; |
3287 | struct sr_discipline *sd = NULL((void *)0); |
3288 | struct sr_chunk *ch_entry; |
3289 | struct scsi_link *link; |
3290 | struct device *dev; |
3291 | char *uuid, devname[32]; |
3292 | dev_t *dt = NULL((void *)0); |
3293 | int i, no_chunk, rv = EINVAL22, target, vol; |
3294 | int no_meta; |
3295 | |
3296 | DNPRINTF(SR_D_IOCTL, "%s: sr_ioctl_createraid(%d)\n", |
3297 | DEVNAME(sc), user); |
3298 | |
3299 | /* user input */ |
3300 | if (bc->bc_dev_list_len > BIOC_CRMAXLEN1024) |
3301 | goto unwind; |
3302 | |
3303 | dt = malloc(bc->bc_dev_list_len, M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
3304 | if (user) { |
3305 | if (copyin(bc->bc_dev_list, dt, bc->bc_dev_list_len) != 0) |
3306 | goto unwind; |
3307 | } else |
3308 | memcpy(dt, bc->bc_dev_list, bc->bc_dev_list_len)__builtin_memcpy((dt), (bc->bc_dev_list), (bc->bc_dev_list_len )); |
3309 | |
3310 | /* Initialise discipline. */ |
3311 | sd = malloc(sizeof(struct sr_discipline), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
3312 | sd->sd_sc = sc; |
3313 | SLIST_INIT(&sd->sd_meta_opt){ ((&sd->sd_meta_opt)->slh_first) = ((void *)0); }; |
3314 | sd->sd_taskq = taskq_create("srdis", 1, IPL_BIO0x6, 0); |
3315 | if (sd->sd_taskq == NULL((void *)0)) { |
3316 | sr_error(sc, "could not create discipline taskq"); |
3317 | goto unwind; |
3318 | } |
3319 | if (sr_discipline_init(sd, bc->bc_level)) { |
3320 | sr_error(sc, "could not initialize discipline"); |
3321 | goto unwind; |
3322 | } |
3323 | |
3324 | no_chunk = bc->bc_dev_list_len / sizeof(dev_t); |
3325 | cl = &sd->sd_vol.sv_chunk_list; |
3326 | SLIST_INIT(cl){ ((cl)->slh_first) = ((void *)0); }; |
3327 | |
3328 | /* Ensure that chunks are not already in use. */ |
3329 | for (i = 0; i < no_chunk; i++) { |
3330 | if (sr_chunk_in_use(sc, dt[i]) != BIOC_SDINVALID0xff) { |
3331 | sr_meta_getdevname(sc, dt[i], devname, sizeof(devname)); |
3332 | sr_error(sc, "chunk %s already in use", devname); |
3333 | goto unwind; |
3334 | } |
3335 | } |
3336 | |
3337 | sd->sd_meta_type = sr_meta_probe(sd, dt, no_chunk); |
3338 | if (sd->sd_meta_type == SR_META_F_INVALID-1) { |
3339 | sr_error(sc, "invalid metadata format"); |
3340 | goto unwind; |
3341 | } |
3342 | |
3343 | if (sr_meta_attach(sd, no_chunk, bc->bc_flags & BIOC_SCFORCE0x01)) |
3344 | goto unwind; |
3345 | |
3346 | /* force the raid volume by clearing metadata region */ |
3347 | if (bc->bc_flags & BIOC_SCFORCE0x01) { |
3348 | /* make sure disk isn't up and running */ |
3349 | if (sr_meta_read(sd)) |
3350 | if (sr_already_assembled(sd)) { |
3351 | uuid = sr_uuid_format( |
3352 | &sd->sd_meta->ssdi_sdd_invariant.ssd_uuid); |
3353 | sr_error(sc, "disk %s is currently in use; " |
3354 | "cannot force create", uuid); |
3355 | free(uuid, M_DEVBUF2, 37); |
3356 | goto unwind; |
3357 | } |
3358 | |
3359 | if (sr_meta_clear(sd)) { |
3360 | sr_error(sc, "failed to clear metadata"); |
3361 | goto unwind; |
3362 | } |
3363 | } |
3364 | |
3365 | no_meta = sr_meta_read(sd); |
3366 | if (no_meta == -1) { |
3367 | |
3368 | /* Corrupt metadata on one or more chunks. */ |
3369 | sr_error(sc, "one of the chunks has corrupt metadata; " |
3370 | "aborting assembly"); |
3371 | goto unwind; |
3372 | |
3373 | } else if (no_meta == 0) { |
3374 | |
3375 | /* Initialise volume and chunk metadata. */ |
3376 | sr_meta_init(sd, bc->bc_level, no_chunk); |
3377 | sd->sd_vol_status = BIOC_SVONLINE0x00; |
3378 | sd->sd_meta_flags = bc->bc_flags & BIOC_SCNOAUTOASSEMBLE0x04; |
3379 | if (sd->sd_create) { |
3380 | if ((i = sd->sd_create(sd, bc, no_chunk, |
3381 | sd->sd_vol.sv_chunk_minsz))) { |
3382 | rv = i; |
3383 | goto unwind; |
3384 | } |
3385 | } |
3386 | sr_meta_init_complete(sd); |
3387 | |
3388 | DNPRINTF(SR_D_IOCTL, |
3389 | "%s: sr_ioctl_createraid: vol_size: %lld\n", |
3390 | DEVNAME(sc), sd->sd_meta->ssdi.ssd_size); |
3391 | |
3392 | /* Warn if we've wasted chunk space due to coercing. */ |
3393 | if ((sd->sd_capabilities & SR_CAP_NON_COERCED0x00000008) == 0 && |
3394 | sd->sd_vol.sv_chunk_minsz != sd->sd_vol.sv_chunk_maxsz) |
3395 | sr_warn(sc, "chunk sizes are not equal; up to %llu " |
3396 | "blocks wasted per chunk", |
3397 | sd->sd_vol.sv_chunk_maxsz - |
3398 | sd->sd_vol.sv_chunk_minsz); |
3399 | |
3400 | } else { |
3401 | |
3402 | /* Ensure we are assembling the correct # of chunks. */ |
3403 | if (bc->bc_level == 0x1C && |
3404 | sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no > no_chunk) { |
3405 | sr_warn(sc, "trying to bring up %s degraded", |
3406 | sd->sd_meta->ssd_devname); |
3407 | } else if (sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no != no_chunk) { |
3408 | sr_error(sc, "volume chunk count does not match metadata " |
3409 | "chunk count"); |
3410 | goto unwind; |
3411 | } |
3412 | |
3413 | /* Ensure metadata level matches requested assembly level. */ |
3414 | if (sd->sd_meta->ssdi_sdd_invariant.ssd_level != bc->bc_level) { |
3415 | sr_error(sc, "volume level does not match metadata " |
3416 | "level"); |
3417 | goto unwind; |
3418 | } |
3419 | |
3420 | if (sr_already_assembled(sd)) { |
3421 | uuid = sr_uuid_format(&sd->sd_meta->ssdi_sdd_invariant.ssd_uuid); |
3422 | sr_error(sc, "disk %s already assembled", uuid); |
3423 | free(uuid, M_DEVBUF2, 37); |
3424 | goto unwind; |
3425 | } |
3426 | |
3427 | if (user == 0 && sd->sd_meta_flags & BIOC_SCNOAUTOASSEMBLE0x04) { |
3428 | DNPRINTF(SR_D_META, "%s: disk not auto assembled from " |
3429 | "metadata\n", DEVNAME(sc)); |
3430 | goto unwind; |
3431 | } |
3432 | |
3433 | if (no_meta != no_chunk) |
3434 | sr_warn(sc, "trying to bring up %s degraded", |
3435 | sd->sd_meta->ssd_devname); |
3436 | |
3437 | if (sd->sd_meta->ssd_meta_flags & SR_META_DIRTY0x1) |
3438 | sr_warn(sc, "%s was not shutdown properly", |
3439 | sd->sd_meta->ssd_devname); |
3440 | |
3441 | SLIST_FOREACH(omi, &sd->sd_meta_opt, omi_link)for((omi) = ((&sd->sd_meta_opt)->slh_first); (omi) != ((void *)0); (omi) = ((omi)->omi_link.sle_next)) |
3442 | if (sd->sd_meta_opt_handler == NULL((void *)0) || |
3443 | sd->sd_meta_opt_handler(sd, omi->omi_som) != 0) |
3444 | sr_meta_opt_handler(sd, omi->omi_som); |
3445 | |
3446 | if (sd->sd_assemble) { |
3447 | if ((i = sd->sd_assemble(sd, bc, no_chunk, data))) { |
3448 | rv = i; |
3449 | goto unwind; |
3450 | } |
3451 | } |
3452 | |
3453 | DNPRINTF(SR_D_META, "%s: disk assembled from metadata\n", |
3454 | DEVNAME(sc)); |
3455 | |
3456 | } |
3457 | |
3458 | /* Metadata MUST be fully populated by this point. */ |
3459 | TAILQ_INSERT_TAIL(&sc->sc_dis_list, sd, sd_link)do { (sd)->sd_link.tqe_next = ((void *)0); (sd)->sd_link .tqe_prev = (&sc->sc_dis_list)->tqh_last; *(&sc ->sc_dis_list)->tqh_last = (sd); (&sc->sc_dis_list )->tqh_last = &(sd)->sd_link.tqe_next; } while (0); |
3460 | |
3461 | /* Allocate all resources. */ |
3462 | if ((rv = sd->sd_alloc_resources(sd))) |
3463 | goto unwind; |
3464 | |
3465 | /* Adjust flags if necessary. */ |
3466 | if ((sd->sd_capabilities & SR_CAP_AUTO_ASSEMBLE0x00000002) && |
3467 | (bc->bc_flags & BIOC_SCNOAUTOASSEMBLE0x04) != |
3468 | (sd->sd_meta->ssdi_sdd_invariant.ssd_vol_flags & BIOC_SCNOAUTOASSEMBLE0x04)) { |
3469 | sd->sd_meta->ssdi_sdd_invariant.ssd_vol_flags &= ~BIOC_SCNOAUTOASSEMBLE0x04; |
3470 | sd->sd_meta->ssdi_sdd_invariant.ssd_vol_flags |= |
3471 | bc->bc_flags & BIOC_SCNOAUTOASSEMBLE0x04; |
3472 | } |
3473 | |
3474 | if (sd->sd_capabilities & SR_CAP_SYSTEM_DISK0x00000001) { |
3475 | /* Initialise volume state. */ |
3476 | sd->sd_set_vol_state(sd); |
3477 | if (sd->sd_vol_status == BIOC_SVOFFLINE0x01) { |
3478 | sr_error(sc, "%s is offline, will not be brought " |
3479 | "online", sd->sd_meta->ssd_devname); |
3480 | goto unwind; |
3481 | } |
3482 | |
3483 | /* Setup SCSI iopool. */ |
3484 | scsi_iopool_init(&sd->sd_iopool, sd, sr_wu_get, sr_wu_put); |
3485 | |
3486 | /* |
3487 | * All checks passed - return ENXIO if volume cannot be created. |
3488 | */ |
3489 | rv = ENXIO6; |
3490 | |
3491 | /* |
3492 | * Find a free target. |
3493 | * |
3494 | * XXX: We reserve sd_target == 0 to indicate the |
3495 | * discipline is not linked into sc->sc_targets, so begin |
3496 | * the search with target = 1. |
3497 | */ |
3498 | for (target = 1; target < SR_MAX_LD256; target++) |
3499 | if (sc->sc_targets[target] == NULL((void *)0)) |
3500 | break; |
3501 | if (target == SR_MAX_LD256) { |
3502 | sr_error(sc, "no free target for %s", |
3503 | sd->sd_meta->ssd_devname); |
3504 | goto unwind; |
3505 | } |
3506 | |
3507 | /* Clear sense data. */ |
3508 | bzero(&sd->sd_scsi_sense, sizeof(sd->sd_scsi_sense))__builtin_bzero((&sd->sd_scsi_sense), (sizeof(sd->sd_scsi_sense ))); |
3509 | |
3510 | /* Attach discipline and get midlayer to probe it. */ |
3511 | sd->sd_target = target; |
3512 | sc->sc_targets[target] = sd; |
3513 | if (scsi_probe_lun(sc->sc_scsibus, target, 0) != 0) { |
3514 | sr_error(sc, "scsi_probe_lun failed"); |
3515 | sc->sc_targets[target] = NULL((void *)0); |
3516 | sd->sd_target = 0; |
3517 | goto unwind; |
3518 | } |
3519 | |
3520 | link = scsi_get_link(sc->sc_scsibus, target, 0); |
3521 | if (link == NULL((void *)0)) |
3522 | goto unwind; |
3523 | |
3524 | dev = link->device_softc; |
3525 | DNPRINTF(SR_D_IOCTL, "%s: sr device added: %s at target %d\n", |
3526 | DEVNAME(sc), dev->dv_xname, sd->sd_target); |
3527 | |
3528 | /* XXX - Count volumes, not targets. */ |
3529 | for (i = 0, vol = -1; i <= sd->sd_target; i++) |
3530 | if (sc->sc_targets[i]) |
3531 | vol++; |
3532 | |
3533 | rv = 0; |
3534 | |
3535 | if (sd->sd_meta->ssd_devname[0] != '\0' && |
3536 | strncmp(sd->sd_meta->ssd_devname, dev->dv_xname, |
3537 | sizeof(dev->dv_xname))) |
3538 | sr_warn(sc, "volume %s is roaming, it used to be %s, " |
3539 | "updating metadata", dev->dv_xname, |
3540 | sd->sd_meta->ssd_devname); |
3541 | |
3542 | /* Populate remaining volume metadata. */ |
3543 | sd->sd_meta->ssdi_sdd_invariant.ssd_volid = vol; |
3544 | strlcpy(sd->sd_meta->ssd_devname, dev->dv_xname, |
3545 | sizeof(sd->sd_meta->ssd_devname)); |
3546 | |
3547 | sr_info(sc, "%s volume attached as %s", |
3548 | sd->sd_name, sd->sd_meta->ssd_devname); |
3549 | |
3550 | /* Update device name on any roaming chunks. */ |
3551 | sr_roam_chunks(sd); |
3552 | |
3553 | #ifndef SMALL_KERNEL |
3554 | if (sr_sensors_create(sd)) |
3555 | sr_warn(sc, "unable to create sensor for %s", |
3556 | dev->dv_xname); |
3557 | #endif /* SMALL_KERNEL */ |
3558 | } else { |
3559 | /* This volume does not attach as a system disk. */ |
3560 | ch_entry = SLIST_FIRST(cl)((cl)->slh_first); /* XXX */ |
3561 | strlcpy(sd->sd_meta->ssd_devname, ch_entry->src_devname, |
3562 | sizeof(sd->sd_meta->ssd_devname)); |
3563 | |
3564 | if (sd->sd_start_discipline(sd)) |
3565 | goto unwind; |
3566 | } |
3567 | |
3568 | /* Save current metadata to disk. */ |
3569 | rv = sr_meta_save(sd, SR_META_DIRTY0x1); |
3570 | |
3571 | if (sd->sd_vol_status == BIOC_SVREBUILD0x05) |
3572 | kthread_create_deferred(sr_rebuild_start, sd); |
3573 | |
3574 | sd->sd_ready = 1; |
3575 | |
3576 | free(dt, M_DEVBUF2, bc->bc_dev_list_len); |
3577 | |
3578 | return (rv); |
3579 | |
3580 | unwind: |
3581 | free(dt, M_DEVBUF2, bc->bc_dev_list_len); |
3582 | |
3583 | sr_discipline_shutdown(sd, 0, 0); |
3584 | |
3585 | if (rv == EAGAIN35) |
3586 | rv = 0; |
3587 | |
3588 | return (rv); |
3589 | } |
3590 | |
3591 | int |
3592 | sr_ioctl_deleteraid(struct sr_softc *sc, struct sr_discipline *sd, |
3593 | struct bioc_deleteraid *bd) |
3594 | { |
3595 | int rv = 1; |
3596 | |
3597 | DNPRINTF(SR_D_IOCTL, "%s: sr_ioctl_deleteraid %s\n", |
3598 | DEVNAME(sc), bd->bd_dev); |
3599 | |
3600 | if (sd == NULL((void *)0)) { |
3601 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
3602 | if (!strncmp(sd->sd_meta->ssd_devname, bd->bd_dev, |
3603 | sizeof(sd->sd_meta->ssd_devname))) |
3604 | break; |
3605 | } |
3606 | if (sd == NULL((void *)0)) { |
3607 | sr_error(sc, "volume %s not found", bd->bd_dev); |
3608 | goto bad; |
3609 | } |
3610 | } |
3611 | |
3612 | sd->sd_deleted = 1; |
3613 | sd->sd_meta->ssdi_sdd_invariant.ssd_vol_flags = BIOC_SCNOAUTOASSEMBLE0x04; |
3614 | sr_discipline_shutdown(sd, 1, 0); |
3615 | |
3616 | rv = 0; |
3617 | bad: |
3618 | return (rv); |
3619 | } |
3620 | |
3621 | int |
3622 | sr_ioctl_discipline(struct sr_softc *sc, struct sr_discipline *sd, |
3623 | struct bioc_discipline *bd) |
3624 | { |
3625 | int rv = 1; |
3626 | |
3627 | /* Dispatch a discipline specific ioctl. */ |
3628 | |
3629 | DNPRINTF(SR_D_IOCTL, "%s: sr_ioctl_discipline %s\n", DEVNAME(sc), |
3630 | bd->bd_dev); |
3631 | |
3632 | if (sd == NULL((void *)0)) { |
3633 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
3634 | if (!strncmp(sd->sd_meta->ssd_devname, bd->bd_dev, |
3635 | sizeof(sd->sd_meta->ssd_devname))) |
3636 | break; |
3637 | } |
3638 | if (sd == NULL((void *)0)) { |
3639 | sr_error(sc, "volume %s not found", bd->bd_dev); |
3640 | goto bad; |
3641 | } |
3642 | } |
3643 | |
3644 | if (sd->sd_ioctl_handler) |
3645 | rv = sd->sd_ioctl_handler(sd, bd); |
3646 | |
3647 | bad: |
3648 | return (rv); |
3649 | } |
3650 | |
3651 | int |
3652 | sr_ioctl_installboot(struct sr_softc *sc, struct sr_discipline *sd, |
3653 | struct bioc_installboot *bb) |
3654 | { |
3655 | void *bootblk = NULL((void *)0), *bootldr = NULL((void *)0); |
3656 | struct sr_chunk *chunk; |
3657 | struct sr_meta_opt_item *omi; |
3658 | struct sr_meta_boot *sbm; |
3659 | struct disk *dk; |
3660 | u_int32_t bbs = 0, bls = 0, secsize; |
3661 | u_char duid[8]; |
3662 | int rv = EINVAL22; |
3663 | int i; |
3664 | |
3665 | DNPRINTF(SR_D_IOCTL, "%s: sr_ioctl_installboot %s\n", DEVNAME(sc), |
3666 | bb->bb_dev); |
3667 | |
3668 | if (sd == NULL((void *)0)) { |
3669 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
3670 | if (!strncmp(sd->sd_meta->ssd_devname, bb->bb_dev, |
3671 | sizeof(sd->sd_meta->ssd_devname))) |
3672 | break; |
3673 | } |
3674 | if (sd == NULL((void *)0)) { |
3675 | sr_error(sc, "volume %s not found", bb->bb_dev); |
3676 | goto done; |
3677 | } |
3678 | } |
3679 | |
3680 | bzero(duid, sizeof(duid))__builtin_bzero((duid), (sizeof(duid))); |
3681 | TAILQ_FOREACH(dk, &disklist, dk_link)for((dk) = ((&disklist)->tqh_first); (dk) != ((void *) 0); (dk) = ((dk)->dk_link.tqe_next)) |
3682 | if (!strncmp(dk->dk_name, bb->bb_dev, sizeof(bb->bb_dev))) |
3683 | break; |
3684 | if (dk == NULL((void *)0) || dk->dk_label == NULL((void *)0) || |
3685 | (dk->dk_flags & DKF_LABELVALID0x0008) == 0 || |
3686 | bcmp(dk->dk_label->d_uid, &duid, sizeof(duid)) == 0) { |
3687 | sr_error(sc, "failed to get DUID for softraid volume"); |
3688 | goto done; |
3689 | } |
3690 | memcpy(duid, dk->dk_label->d_uid, sizeof(duid))__builtin_memcpy((duid), (dk->dk_label->d_uid), (sizeof (duid))); |
3691 | |
3692 | /* Ensure that boot storage area is large enough. */ |
3693 | if (sd->sd_meta->ssd_data_blkno < (SR_BOOT_OFFSET(16 + 64) + SR_BOOT_SIZE(320 + 128))) { |
3694 | sr_error(sc, "insufficient boot storage"); |
3695 | goto done; |
3696 | } |
3697 | |
3698 | if (bb->bb_bootblk_size > SR_BOOT_BLOCKS_SIZE128 * DEV_BSIZE(1 << 9)) { |
3699 | sr_error(sc, "boot block too large (%d > %d)", |
3700 | bb->bb_bootblk_size, SR_BOOT_BLOCKS_SIZE128 * DEV_BSIZE(1 << 9)); |
3701 | goto done; |
3702 | } |
3703 | |
3704 | if (bb->bb_bootldr_size > SR_BOOT_LOADER_SIZE320 * DEV_BSIZE(1 << 9)) { |
3705 | sr_error(sc, "boot loader too large (%d > %d)", |
3706 | bb->bb_bootldr_size, SR_BOOT_LOADER_SIZE320 * DEV_BSIZE(1 << 9)); |
3707 | goto done; |
3708 | } |
3709 | |
3710 | secsize = sd->sd_meta->ssdi_sdd_invariant.ssd_secsize; |
3711 | |
3712 | /* Copy in boot block. */ |
3713 | bbs = howmany(bb->bb_bootblk_size, secsize)(((bb->bb_bootblk_size) + ((secsize) - 1)) / (secsize)) * secsize; |
3714 | bootblk = malloc(bbs, M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
3715 | if (copyin(bb->bb_bootblk, bootblk, bb->bb_bootblk_size) != 0) |
3716 | goto done; |
3717 | |
3718 | /* Copy in boot loader. */ |
3719 | bls = howmany(bb->bb_bootldr_size, secsize)(((bb->bb_bootldr_size) + ((secsize) - 1)) / (secsize)) * secsize; |
3720 | bootldr = malloc(bls, M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008); |
3721 | if (copyin(bb->bb_bootldr, bootldr, bb->bb_bootldr_size) != 0) |
3722 | goto done; |
3723 | |
3724 | /* Create or update optional meta for bootable volumes. */ |
3725 | SLIST_FOREACH(omi, &sd->sd_meta_opt, omi_link)for((omi) = ((&sd->sd_meta_opt)->slh_first); (omi) != ((void *)0); (omi) = ((omi)->omi_link.sle_next)) |
3726 | if (omi->omi_som->som_type == SR_OPT_BOOT0x02) |
3727 | break; |
3728 | if (omi == NULL((void *)0)) { |
3729 | omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF2, |
3730 | M_WAITOK0x0001 | M_ZERO0x0008); |
3731 | omi->omi_som = malloc(sizeof(struct sr_meta_boot), M_DEVBUF2, |
Result of 'malloc' is converted to a pointer of type 'struct sr_meta_opt_hdr', which is incompatible with sizeof operand type 'struct sr_meta_boot' | |
3732 | M_WAITOK0x0001 | M_ZERO0x0008); |
3733 | omi->omi_som->som_type = SR_OPT_BOOT0x02; |
3734 | omi->omi_som->som_length = sizeof(struct sr_meta_boot); |
3735 | SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link)do { (omi)->omi_link.sle_next = (&sd->sd_meta_opt)-> slh_first; (&sd->sd_meta_opt)->slh_first = (omi); } while (0); |
3736 | sd->sd_meta->ssdi_sdd_invariant.ssd_opt_no++; |
3737 | } |
3738 | sbm = (struct sr_meta_boot *)omi->omi_som; |
3739 | |
3740 | memcpy(sbm->sbm_root_duid, duid, sizeof(sbm->sbm_root_duid))__builtin_memcpy((sbm->sbm_root_duid), (duid), (sizeof(sbm ->sbm_root_duid))); |
3741 | bzero(&sbm->sbm_boot_duid, sizeof(sbm->sbm_boot_duid))__builtin_bzero((&sbm->sbm_boot_duid), (sizeof(sbm-> sbm_boot_duid))); |
3742 | sbm->sbm_bootblk_size = bbs; |
3743 | sbm->sbm_bootldr_size = bls; |
3744 | |
3745 | DNPRINTF(SR_D_IOCTL, "sr_ioctl_installboot: root duid is %s\n", |
3746 | duid_format(sbm->sbm_root_duid)); |
3747 | |
3748 | /* Save boot block and boot loader to each chunk. */ |
3749 | for (i = 0; i < sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; i++) { |
3750 | |
3751 | chunk = sd->sd_vol.sv_chunks[i]; |
3752 | if (chunk->src_meta.scm_status != BIOC_SDONLINE0x00 && |
3753 | chunk->src_meta.scm_status != BIOC_SDREBUILD0x03) |
3754 | continue; |
3755 | |
3756 | if (i < SR_MAX_BOOT_DISKS16) |
3757 | memcpy(&sbm->sbm_boot_duid[i], chunk->src_duid,__builtin_memcpy((&sbm->sbm_boot_duid[i]), (chunk-> src_duid), (sizeof(sbm->sbm_boot_duid[i]))) |
3758 | sizeof(sbm->sbm_boot_duid[i]))__builtin_memcpy((&sbm->sbm_boot_duid[i]), (chunk-> src_duid), (sizeof(sbm->sbm_boot_duid[i]))); |
3759 | |
3760 | /* Save boot blocks. */ |
3761 | DNPRINTF(SR_D_IOCTL, |
3762 | "sr_ioctl_installboot: saving boot block to %s " |
3763 | "(%u bytes)\n", chunk->src_devname, bbs); |
3764 | |
3765 | if (sr_rw(sc, chunk->src_dev_mm, bootblk, bbs, |
3766 | SR_BOOT_BLOCKS_OFFSET((16 + 64) + 320), B_WRITE0x00000000)) { |
3767 | sr_error(sc, "failed to write boot block"); |
3768 | goto done; |
3769 | } |
3770 | |
3771 | /* Save boot loader.*/ |
3772 | DNPRINTF(SR_D_IOCTL, |
3773 | "sr_ioctl_installboot: saving boot loader to %s " |
3774 | "(%u bytes)\n", chunk->src_devname, bls); |
3775 | |
3776 | if (sr_rw(sc, chunk->src_dev_mm, bootldr, bls, |
3777 | SR_BOOT_LOADER_OFFSET(16 + 64), B_WRITE0x00000000)) { |
3778 | sr_error(sc, "failed to write boot loader"); |
3779 | goto done; |
3780 | } |
3781 | } |
3782 | |
3783 | /* XXX - Install boot block on disk - MD code. */ |
3784 | |
3785 | /* Mark volume as bootable and save metadata. */ |
3786 | sd->sd_meta->ssdi_sdd_invariant.ssd_vol_flags |= BIOC_SCBOOTABLE0x08; |
3787 | if (sr_meta_save(sd, SR_META_DIRTY0x1)) { |
3788 | sr_error(sc, "could not save metadata to %s", DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
3789 | goto done; |
3790 | } |
3791 | |
3792 | rv = 0; |
3793 | |
3794 | done: |
3795 | free(bootblk, M_DEVBUF2, bbs); |
3796 | free(bootldr, M_DEVBUF2, bls); |
3797 | |
3798 | return (rv); |
3799 | } |
3800 | |
3801 | void |
3802 | sr_chunks_unwind(struct sr_softc *sc, struct sr_chunk_head *cl) |
3803 | { |
3804 | struct sr_chunk *ch_entry, *ch_next; |
3805 | |
3806 | DNPRINTF(SR_D_IOCTL, "%s: sr_chunks_unwind\n", DEVNAME(sc)); |
3807 | |
3808 | if (!cl) |
3809 | return; |
3810 | |
3811 | for (ch_entry = SLIST_FIRST(cl)((cl)->slh_first); ch_entry != NULL((void *)0); ch_entry = ch_next) { |
3812 | ch_next = SLIST_NEXT(ch_entry, src_link)((ch_entry)->src_link.sle_next); |
3813 | |
3814 | DNPRINTF(SR_D_IOCTL, "%s: sr_chunks_unwind closing: %s\n", |
3815 | DEVNAME(sc), ch_entry->src_devname); |
3816 | if (ch_entry->src_vn) { |
3817 | /* |
3818 | * XXX - explicitly lock the vnode until we can resolve |
3819 | * the problem introduced by vnode aliasing... specfs |
3820 | * has no locking, whereas ufs/ffs does! |
3821 | */ |
3822 | vn_lock(ch_entry->src_vn, LK_EXCLUSIVE0x0001UL | LK_RETRY0x2000UL); |
3823 | VOP_CLOSE(ch_entry->src_vn, FREAD0x0001 | FWRITE0x0002, NOCRED((struct ucred *)-1), |
3824 | curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r" (__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self))); __ci;})->ci_curproc); |
3825 | vput(ch_entry->src_vn); |
3826 | } |
3827 | free(ch_entry, M_DEVBUF2, sizeof(*ch_entry)); |
3828 | } |
3829 | SLIST_INIT(cl){ ((cl)->slh_first) = ((void *)0); }; |
3830 | } |
3831 | |
3832 | void |
3833 | sr_discipline_free(struct sr_discipline *sd) |
3834 | { |
3835 | struct sr_softc *sc; |
3836 | struct sr_discipline *sdtmp1; |
3837 | struct sr_meta_opt_head *som; |
3838 | struct sr_meta_opt_item *omi, *omi_next; |
3839 | |
3840 | if (!sd) |
3841 | return; |
3842 | |
3843 | sc = sd->sd_sc; |
3844 | |
3845 | DNPRINTF(SR_D_DIS, "%s: sr_discipline_free %s\n", |
3846 | DEVNAME(sc), |
3847 | sd->sd_meta ? sd->sd_meta->ssd_devname : "nodev"); |
3848 | if (sd->sd_free_resources) |
3849 | sd->sd_free_resources(sd); |
3850 | free(sd->sd_vol.sv_chunks, M_DEVBUF2, 0); |
3851 | free(sd->sd_meta, M_DEVBUF2, SR_META_SIZE64 * DEV_BSIZE(1 << 9)); |
3852 | free(sd->sd_meta_foreign, M_DEVBUF2, smd[sd->sd_meta_type].smd_size); |
3853 | |
3854 | som = &sd->sd_meta_opt; |
3855 | for (omi = SLIST_FIRST(som)((som)->slh_first); omi != NULL((void *)0); omi = omi_next) { |
3856 | omi_next = SLIST_NEXT(omi, omi_link)((omi)->omi_link.sle_next); |
3857 | free(omi->omi_som, M_DEVBUF2, 0); |
3858 | free(omi, M_DEVBUF2, sizeof(*omi)); |
3859 | } |
3860 | |
3861 | if (sd->sd_target != 0) { |
3862 | KASSERT(sc->sc_targets[sd->sd_target] == sd)((sc->sc_targets[sd->sd_target] == sd) ? (void)0 : __assert ("diagnostic ", "/usr/src/sys/dev/softraid.c", 3862, "sc->sc_targets[sd->sd_target] == sd" )); |
3863 | sc->sc_targets[sd->sd_target] = NULL((void *)0); |
3864 | } |
3865 | |
3866 | TAILQ_FOREACH(sdtmp1, &sc->sc_dis_list, sd_link)for((sdtmp1) = ((&sc->sc_dis_list)->tqh_first); (sdtmp1 ) != ((void *)0); (sdtmp1) = ((sdtmp1)->sd_link.tqe_next)) { |
3867 | if (sdtmp1 == sd) |
3868 | break; |
3869 | } |
3870 | if (sdtmp1 != NULL((void *)0)) |
3871 | TAILQ_REMOVE(&sc->sc_dis_list, sd, sd_link)do { if (((sd)->sd_link.tqe_next) != ((void *)0)) (sd)-> sd_link.tqe_next->sd_link.tqe_prev = (sd)->sd_link.tqe_prev ; else (&sc->sc_dis_list)->tqh_last = (sd)->sd_link .tqe_prev; *(sd)->sd_link.tqe_prev = (sd)->sd_link.tqe_next ; ((sd)->sd_link.tqe_prev) = ((void *)-1); ((sd)->sd_link .tqe_next) = ((void *)-1); } while (0); |
3872 | |
3873 | explicit_bzero(sd, sizeof *sd); |
3874 | free(sd, M_DEVBUF2, sizeof(*sd)); |
3875 | } |
3876 | |
3877 | void |
3878 | sr_discipline_shutdown(struct sr_discipline *sd, int meta_save, int dying) |
3879 | { |
3880 | struct sr_softc *sc; |
3881 | int ret, s; |
3882 | |
3883 | if (!sd) |
3884 | return; |
3885 | sc = sd->sd_sc; |
3886 | |
3887 | DNPRINTF(SR_D_DIS, "%s: sr_discipline_shutdown %s\n", DEVNAME(sc), |
3888 | sd->sd_meta ? sd->sd_meta->ssd_devname : "nodev"); |
3889 | |
3890 | /* If rebuilding, abort rebuild and drain I/O. */ |
3891 | if (sd->sd_reb_active) { |
3892 | sd->sd_reb_abort = 1; |
3893 | while (sd->sd_reb_active) |
3894 | tsleep_nsec(sd, PWAIT32, "sr_shutdown", MSEC_TO_NSEC(1)); |
3895 | } |
3896 | |
3897 | if (meta_save) |
3898 | sr_meta_save(sd, 0); |
3899 | |
3900 | s = splbio()splraise(0x6); |
3901 | |
3902 | sd->sd_ready = 0; |
3903 | |
3904 | /* make sure there isn't a sync pending and yield */ |
3905 | wakeup(sd); |
3906 | while (sd->sd_sync || sd->sd_must_flush) { |
3907 | ret = tsleep_nsec(&sd->sd_sync, MAXPRI127, "sr_down", |
3908 | SEC_TO_NSEC(60)); |
3909 | if (ret == EWOULDBLOCK35) |
3910 | break; |
3911 | } |
3912 | if (dying == -1) { |
3913 | sd->sd_ready = 1; |
3914 | splx(s)spllower(s); |
3915 | return; |
3916 | } |
3917 | |
3918 | #ifndef SMALL_KERNEL |
3919 | sr_sensors_delete(sd); |
3920 | #endif /* SMALL_KERNEL */ |
3921 | |
3922 | if (sd->sd_target != 0) |
3923 | scsi_detach_lun(sc->sc_scsibus, sd->sd_target, 0, |
3924 | dying ? 0 : DETACH_FORCE0x01); |
3925 | |
3926 | sr_chunks_unwind(sc, &sd->sd_vol.sv_chunk_list); |
3927 | |
3928 | if (sd->sd_taskq) |
3929 | taskq_destroy(sd->sd_taskq); |
3930 | |
3931 | sr_discipline_free(sd); |
3932 | |
3933 | splx(s)spllower(s); |
3934 | } |
3935 | |
3936 | int |
3937 | sr_discipline_init(struct sr_discipline *sd, int level) |
3938 | { |
3939 | int rv = 1; |
3940 | |
3941 | /* Initialise discipline function pointers with defaults. */ |
3942 | sd->sd_alloc_resources = sr_alloc_resources; |
3943 | sd->sd_assemble = NULL((void *)0); |
3944 | sd->sd_create = NULL((void *)0); |
3945 | sd->sd_free_resources = sr_free_resources; |
3946 | sd->sd_ioctl_handler = NULL((void *)0); |
3947 | sd->sd_openings = NULL((void *)0); |
3948 | sd->sd_meta_opt_handler = NULL((void *)0); |
3949 | sd->sd_rebuild = sr_rebuild; |
3950 | sd->sd_scsi_inquiry = sr_raid_inquiry; |
3951 | sd->sd_scsi_read_cap = sr_raid_read_cap; |
3952 | sd->sd_scsi_tur = sr_raid_tur; |
3953 | sd->sd_scsi_req_sense = sr_raid_request_sense; |
3954 | sd->sd_scsi_start_stop = sr_raid_start_stop; |
3955 | sd->sd_scsi_sync = sr_raid_sync; |
3956 | sd->sd_scsi_rw = NULL((void *)0); |
3957 | sd->sd_scsi_intr = sr_raid_intr; |
3958 | sd->sd_scsi_wu_done = NULL((void *)0); |
3959 | sd->sd_scsi_done = NULL((void *)0); |
3960 | sd->sd_set_chunk_state = sr_set_chunk_state; |
3961 | sd->sd_set_vol_state = sr_set_vol_state; |
3962 | sd->sd_start_discipline = NULL((void *)0); |
3963 | |
3964 | task_set(&sd->sd_meta_save_task, sr_meta_save_callback, sd); |
3965 | task_set(&sd->sd_hotspare_rebuild_task, sr_hotspare_rebuild_callback, |
3966 | sd); |
3967 | |
3968 | sd->sd_wu_size = sizeof(struct sr_workunit); |
3969 | switch (level) { |
3970 | case 0: |
3971 | sr_raid0_discipline_init(sd); |
3972 | break; |
3973 | case 1: |
3974 | sr_raid1_discipline_init(sd); |
3975 | break; |
3976 | case 5: |
3977 | sr_raid5_discipline_init(sd); |
3978 | break; |
3979 | case 6: |
3980 | sr_raid6_discipline_init(sd); |
3981 | break; |
3982 | #ifdef CRYPTO1 |
3983 | case 'C': |
3984 | sr_crypto_discipline_init(sd); |
3985 | break; |
3986 | case 0x1C: |
3987 | sr_raid1c_discipline_init(sd); |
3988 | break; |
3989 | #endif |
3990 | case 'c': |
3991 | sr_concat_discipline_init(sd); |
3992 | break; |
3993 | default: |
3994 | goto bad; |
3995 | } |
3996 | |
3997 | rv = 0; |
3998 | bad: |
3999 | return (rv); |
4000 | } |
4001 | |
4002 | int |
4003 | sr_raid_inquiry(struct sr_workunit *wu) |
4004 | { |
4005 | struct sr_discipline *sd = wu->swu_dis; |
4006 | struct scsi_xfer *xs = wu->swu_xs; |
4007 | struct scsi_inquiry *cdb = (struct scsi_inquiry *)&xs->cmd; |
4008 | struct scsi_inquiry_data inq; |
4009 | |
4010 | DNPRINTF(SR_D_DIS, "%s: sr_raid_inquiry\n", DEVNAME(sd->sd_sc)); |
4011 | |
4012 | if (xs->cmdlen != sizeof(*cdb)) |
4013 | return (EINVAL22); |
4014 | |
4015 | if (ISSET(cdb->flags, SI_EVPD)((cdb->flags) & (0x01))) |
4016 | return (EOPNOTSUPP45); |
4017 | |
4018 | bzero(&inq, sizeof(inq))__builtin_bzero((&inq), (sizeof(inq))); |
4019 | inq.device = T_DIRECT0x00; |
4020 | inq.dev_qual2 = 0; |
4021 | inq.version = SCSI_REV_20x02; |
4022 | inq.response_format = SID_SCSI2_RESPONSE0x02; |
4023 | inq.additional_length = SID_SCSI2_ALEN31; |
4024 | inq.flags |= SID_CmdQue0x02; |
4025 | strlcpy(inq.vendor, sd->sd_meta->ssdi_sdd_invariant.ssd_vendor, |
4026 | sizeof(inq.vendor)); |
4027 | strlcpy(inq.product, sd->sd_meta->ssdi_sdd_invariant.ssd_product, |
4028 | sizeof(inq.product)); |
4029 | strlcpy(inq.revision, sd->sd_meta->ssdi_sdd_invariant.ssd_revision, |
4030 | sizeof(inq.revision)); |
4031 | scsi_copy_internal_data(xs, &inq, sizeof(inq)); |
4032 | |
4033 | return (0); |
4034 | } |
4035 | |
4036 | int |
4037 | sr_raid_read_cap(struct sr_workunit *wu) |
4038 | { |
4039 | struct sr_discipline *sd = wu->swu_dis; |
4040 | struct scsi_xfer *xs = wu->swu_xs; |
4041 | struct scsi_read_cap_data rcd; |
4042 | struct scsi_read_cap_data_16 rcd16; |
4043 | u_int64_t addr; |
4044 | int rv = 1; |
4045 | u_int32_t secsize; |
4046 | |
4047 | DNPRINTF(SR_D_DIS, "%s: sr_raid_read_cap\n", DEVNAME(sd->sd_sc)); |
4048 | |
4049 | secsize = sd->sd_meta->ssdi_sdd_invariant.ssd_secsize; |
4050 | |
4051 | addr = ((sd->sd_meta->ssdi_sdd_invariant.ssd_size * DEV_BSIZE(1 << 9)) / secsize) - 1; |
4052 | if (xs->cmd.opcode == READ_CAPACITY0x25) { |
4053 | bzero(&rcd, sizeof(rcd))__builtin_bzero((&rcd), (sizeof(rcd))); |
4054 | if (addr > 0xffffffffllu) |
4055 | _lto4b(0xffffffff, rcd.addr); |
4056 | else |
4057 | _lto4b(addr, rcd.addr); |
4058 | _lto4b(secsize, rcd.length); |
4059 | scsi_copy_internal_data(xs, &rcd, sizeof(rcd)); |
4060 | rv = 0; |
4061 | } else if (xs->cmd.opcode == READ_CAPACITY_160x9e) { |
4062 | bzero(&rcd16, sizeof(rcd16))__builtin_bzero((&rcd16), (sizeof(rcd16))); |
4063 | _lto8b(addr, rcd16.addr); |
4064 | _lto4b(secsize, rcd16.length); |
4065 | scsi_copy_internal_data(xs, &rcd16, sizeof(rcd16)); |
4066 | rv = 0; |
4067 | } |
4068 | |
4069 | return (rv); |
4070 | } |
4071 | |
4072 | int |
4073 | sr_raid_tur(struct sr_workunit *wu) |
4074 | { |
4075 | struct sr_discipline *sd = wu->swu_dis; |
4076 | |
4077 | DNPRINTF(SR_D_DIS, "%s: sr_raid_tur\n", DEVNAME(sd->sd_sc)); |
4078 | |
4079 | if (sd->sd_vol_status == BIOC_SVOFFLINE0x01) { |
4080 | sd->sd_scsi_sense.error_code = SSD_ERRCODE_CURRENT0x70; |
4081 | sd->sd_scsi_sense.flags = SKEY_NOT_READY0x02; |
4082 | sd->sd_scsi_sense.add_sense_code = 0x04; |
4083 | sd->sd_scsi_sense.add_sense_code_qual = 0x11; |
4084 | sd->sd_scsi_sense.extra_len = 4; |
4085 | return (1); |
4086 | } else if (sd->sd_vol_status == BIOC_SVINVALID0xff) { |
4087 | sd->sd_scsi_sense.error_code = SSD_ERRCODE_CURRENT0x70; |
4088 | sd->sd_scsi_sense.flags = SKEY_HARDWARE_ERROR0x04; |
4089 | sd->sd_scsi_sense.add_sense_code = 0x05; |
4090 | sd->sd_scsi_sense.add_sense_code_qual = 0x00; |
4091 | sd->sd_scsi_sense.extra_len = 4; |
4092 | return (1); |
4093 | } |
4094 | |
4095 | return (0); |
4096 | } |
4097 | |
4098 | int |
4099 | sr_raid_request_sense(struct sr_workunit *wu) |
4100 | { |
4101 | struct sr_discipline *sd = wu->swu_dis; |
4102 | struct scsi_xfer *xs = wu->swu_xs; |
4103 | |
4104 | DNPRINTF(SR_D_DIS, "%s: sr_raid_request_sense\n", |
4105 | DEVNAME(sd->sd_sc)); |
4106 | |
4107 | /* use latest sense data */ |
4108 | memcpy(&xs->sense, &sd->sd_scsi_sense, sizeof(xs->sense))__builtin_memcpy((&xs->sense), (&sd->sd_scsi_sense ), (sizeof(xs->sense))); |
4109 | |
4110 | /* clear sense data */ |
4111 | bzero(&sd->sd_scsi_sense, sizeof(sd->sd_scsi_sense))__builtin_bzero((&sd->sd_scsi_sense), (sizeof(sd->sd_scsi_sense ))); |
4112 | |
4113 | return (0); |
4114 | } |
4115 | |
4116 | int |
4117 | sr_raid_start_stop(struct sr_workunit *wu) |
4118 | { |
4119 | struct scsi_xfer *xs = wu->swu_xs; |
4120 | struct scsi_start_stop *ss = (struct scsi_start_stop *)&xs->cmd; |
4121 | |
4122 | DNPRINTF(SR_D_DIS, "%s: sr_raid_start_stop\n", |
4123 | DEVNAME(wu->swu_dis->sd_sc)); |
4124 | |
4125 | if (!ss) |
4126 | return (1); |
4127 | |
4128 | /* |
4129 | * do nothing! |
4130 | * a softraid discipline should always reflect correct status |
4131 | */ |
4132 | return (0); |
4133 | } |
4134 | |
4135 | int |
4136 | sr_raid_sync(struct sr_workunit *wu) |
4137 | { |
4138 | struct sr_discipline *sd = wu->swu_dis; |
4139 | int s, ret, rv = 0, ios; |
4140 | |
4141 | DNPRINTF(SR_D_DIS, "%s: sr_raid_sync\n", DEVNAME(sd->sd_sc)); |
4142 | |
4143 | /* when doing a fake sync don't count the wu */ |
4144 | ios = (wu->swu_flags & SR_WUF_FAKE(1<<6)) ? 0 : 1; |
4145 | |
4146 | s = splbio()splraise(0x6); |
4147 | sd->sd_sync = 1; |
4148 | while (sd->sd_wu_pending > ios) { |
4149 | ret = tsleep_nsec(sd, PRIBIO16, "sr_sync", SEC_TO_NSEC(15)); |
4150 | if (ret == EWOULDBLOCK35) { |
4151 | DNPRINTF(SR_D_DIS, "%s: sr_raid_sync timeout\n", |
4152 | DEVNAME(sd->sd_sc)); |
4153 | rv = 1; |
4154 | break; |
4155 | } |
4156 | } |
4157 | sd->sd_sync = 0; |
4158 | splx(s)spllower(s); |
4159 | |
4160 | wakeup(&sd->sd_sync); |
4161 | |
4162 | return (rv); |
4163 | } |
4164 | |
4165 | void |
4166 | sr_raid_intr(struct buf *bp) |
4167 | { |
4168 | struct sr_ccb *ccb = (struct sr_ccb *)bp; |
4169 | struct sr_workunit *wu = ccb->ccb_wu; |
4170 | #ifdef SR_DEBUG |
4171 | struct sr_discipline *sd = wu->swu_dis; |
4172 | struct scsi_xfer *xs = wu->swu_xs; |
4173 | #endif |
4174 | int s; |
4175 | |
4176 | DNPRINTF(SR_D_INTR, "%s: %s %s intr bp %p xs %p\n", |
4177 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, sd->sd_name, bp, xs); |
4178 | |
4179 | s = splbio()splraise(0x6); |
4180 | sr_ccb_done(ccb); |
4181 | sr_wu_done(wu); |
4182 | splx(s)spllower(s); |
4183 | } |
4184 | |
4185 | void |
4186 | sr_schedule_wu(struct sr_workunit *wu) |
4187 | { |
4188 | struct sr_discipline *sd = wu->swu_dis; |
4189 | struct sr_workunit *wup; |
4190 | int s; |
4191 | |
4192 | DNPRINTF(SR_D_WU, "sr_schedule_wu: schedule wu %p state %i " |
4193 | "flags 0x%x\n", wu, wu->swu_state, wu->swu_flags); |
4194 | |
4195 | KASSERT(wu->swu_io_count > 0)((wu->swu_io_count > 0) ? (void)0 : __assert("diagnostic " , "/usr/src/sys/dev/softraid.c", 4195, "wu->swu_io_count > 0" )); |
4196 | |
4197 | s = splbio()splraise(0x6); |
4198 | |
4199 | /* Construct the work unit, do not schedule it. */ |
4200 | if (wu->swu_state == SR_WU_CONSTRUCT9) |
4201 | goto queued; |
4202 | |
4203 | /* Deferred work unit being reconstructed, do not start. */ |
4204 | if (wu->swu_state == SR_WU_REQUEUE8) |
4205 | goto queued; |
4206 | |
4207 | /* Current work unit failed, restart. */ |
4208 | if (wu->swu_state == SR_WU_RESTART7) |
4209 | goto start; |
4210 | |
4211 | if (wu->swu_state != SR_WU_INPROGRESS1) |
4212 | panic("sr_schedule_wu: work unit not in progress (state %i)", |
4213 | wu->swu_state); |
4214 | |
4215 | /* Walk queue backwards and fill in collider if we have one. */ |
4216 | TAILQ_FOREACH_REVERSE(wup, &sd->sd_wu_pendq, sr_wu_list, swu_link)for((wup) = (*(((struct sr_wu_list *)((&sd->sd_wu_pendq )->tqh_last))->tqh_last)); (wup) != ((void *)0); (wup) = (*(((struct sr_wu_list *)((wup)->swu_link.tqe_prev))-> tqh_last))) { |
4217 | if (wu->swu_blk_end < wup->swu_blk_start || |
4218 | wup->swu_blk_end < wu->swu_blk_start) |
4219 | continue; |
4220 | |
4221 | /* Defer work unit due to LBA collision. */ |
4222 | DNPRINTF(SR_D_WU, "sr_schedule_wu: deferring work unit %p\n", |
4223 | wu); |
4224 | wu->swu_state = SR_WU_DEFERRED5; |
4225 | while (wup->swu_collider) |
4226 | wup = wup->swu_collider; |
4227 | wup->swu_collider = wu; |
4228 | TAILQ_INSERT_TAIL(&sd->sd_wu_defq, wu, swu_link)do { (wu)->swu_link.tqe_next = ((void *)0); (wu)->swu_link .tqe_prev = (&sd->sd_wu_defq)->tqh_last; *(&sd-> sd_wu_defq)->tqh_last = (wu); (&sd->sd_wu_defq)-> tqh_last = &(wu)->swu_link.tqe_next; } while (0); |
4229 | sd->sd_wu_collisions++; |
4230 | goto queued; |
4231 | } |
4232 | |
4233 | start: |
4234 | sr_raid_startwu(wu); |
4235 | |
4236 | queued: |
4237 | splx(s)spllower(s); |
4238 | } |
4239 | |
4240 | void |
4241 | sr_raid_startwu(struct sr_workunit *wu) |
4242 | { |
4243 | struct sr_discipline *sd = wu->swu_dis; |
4244 | struct sr_ccb *ccb; |
4245 | |
4246 | DNPRINTF(SR_D_WU, "sr_raid_startwu: start wu %p\n", wu); |
4247 | |
4248 | splassert(IPL_BIO)do { if (splassert_ctl > 0) { splassert_check(0x6, __func__ ); } } while (0); |
4249 | |
4250 | if (wu->swu_state == SR_WU_DEFERRED5) { |
4251 | TAILQ_REMOVE(&sd->sd_wu_defq, wu, swu_link)do { if (((wu)->swu_link.tqe_next) != ((void *)0)) (wu)-> swu_link.tqe_next->swu_link.tqe_prev = (wu)->swu_link.tqe_prev ; else (&sd->sd_wu_defq)->tqh_last = (wu)->swu_link .tqe_prev; *(wu)->swu_link.tqe_prev = (wu)->swu_link.tqe_next ; ((wu)->swu_link.tqe_prev) = ((void *)-1); ((wu)->swu_link .tqe_next) = ((void *)-1); } while (0); |
4252 | wu->swu_state = SR_WU_INPROGRESS1; |
4253 | } |
4254 | |
4255 | if (wu->swu_state != SR_WU_RESTART7) |
4256 | TAILQ_INSERT_TAIL(&sd->sd_wu_pendq, wu, swu_link)do { (wu)->swu_link.tqe_next = ((void *)0); (wu)->swu_link .tqe_prev = (&sd->sd_wu_pendq)->tqh_last; *(&sd ->sd_wu_pendq)->tqh_last = (wu); (&sd->sd_wu_pendq )->tqh_last = &(wu)->swu_link.tqe_next; } while (0); |
4257 | |
4258 | /* Start all of the individual I/Os. */ |
4259 | if (wu->swu_cb_active == 1) |
4260 | panic("%s: sr_startwu_callback", DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname)); |
4261 | wu->swu_cb_active = 1; |
4262 | |
4263 | TAILQ_FOREACH(ccb, &wu->swu_ccb, ccb_link)for((ccb) = ((&wu->swu_ccb)->tqh_first); (ccb) != ( (void *)0); (ccb) = ((ccb)->ccb_link.tqe_next)) |
4264 | VOP_STRATEGY(ccb->ccb_buf.b_vp, &ccb->ccb_buf); |
4265 | |
4266 | wu->swu_cb_active = 0; |
4267 | } |
4268 | |
4269 | void |
4270 | sr_raid_recreate_wu(struct sr_workunit *wu) |
4271 | { |
4272 | struct sr_discipline *sd = wu->swu_dis; |
4273 | struct sr_workunit *wup = wu; |
4274 | |
4275 | /* |
4276 | * Recreate a work unit by releasing the associated CCBs and reissuing |
4277 | * the SCSI I/O request. This process is then repeated for all of the |
4278 | * colliding work units. |
4279 | */ |
4280 | do { |
4281 | sr_wu_release_ccbs(wup); |
4282 | |
4283 | wup->swu_state = SR_WU_REQUEUE8; |
4284 | if (sd->sd_scsi_rw(wup)) |
4285 | panic("could not requeue I/O"); |
4286 | |
4287 | wup = wup->swu_collider; |
4288 | } while (wup); |
4289 | } |
4290 | |
4291 | int |
4292 | sr_alloc_resources(struct sr_discipline *sd) |
4293 | { |
4294 | if (sr_wu_alloc(sd)) { |
4295 | sr_error(sd->sd_sc, "unable to allocate work units"); |
4296 | return (ENOMEM12); |
4297 | } |
4298 | if (sr_ccb_alloc(sd)) { |
4299 | sr_error(sd->sd_sc, "unable to allocate ccbs"); |
4300 | return (ENOMEM12); |
4301 | } |
4302 | |
4303 | return (0); |
4304 | } |
4305 | |
4306 | void |
4307 | sr_free_resources(struct sr_discipline *sd) |
4308 | { |
4309 | sr_wu_free(sd); |
4310 | sr_ccb_free(sd); |
4311 | } |
4312 | |
4313 | void |
4314 | sr_set_chunk_state(struct sr_discipline *sd, int c, int new_state) |
4315 | { |
4316 | int old_state, s; |
4317 | |
4318 | DNPRINTF(SR_D_STATE, "%s: %s: %s: sr_set_chunk_state %d -> %d\n", |
4319 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, |
4320 | sd->sd_vol.sv_chunks[c]->src_meta.scmi.scm_devname, c, new_state); |
4321 | |
4322 | /* ok to go to splbio since this only happens in error path */ |
4323 | s = splbio()splraise(0x6); |
4324 | old_state = sd->sd_vol.sv_chunks[c]->src_meta.scm_status; |
4325 | |
4326 | /* multiple IOs to the same chunk that fail will come through here */ |
4327 | if (old_state == new_state) |
4328 | goto done; |
4329 | |
4330 | switch (old_state) { |
4331 | case BIOC_SDONLINE0x00: |
4332 | if (new_state == BIOC_SDOFFLINE0x01) |
4333 | break; |
4334 | else |
4335 | goto die; |
4336 | break; |
4337 | |
4338 | case BIOC_SDOFFLINE0x01: |
4339 | goto die; |
4340 | |
4341 | default: |
4342 | die: |
4343 | splx(s)spllower(s); /* XXX */ |
4344 | panic("%s: %s: %s: invalid chunk state transition %d -> %d", |
4345 | DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname), |
4346 | sd->sd_meta->ssd_devname, |
4347 | sd->sd_vol.sv_chunks[c]->src_meta.scmi_scm_invariant.scm_devname, |
4348 | old_state, new_state); |
4349 | /* NOTREACHED */ |
4350 | } |
4351 | |
4352 | sd->sd_vol.sv_chunks[c]->src_meta.scm_status = new_state; |
4353 | sd->sd_set_vol_state(sd); |
4354 | |
4355 | sd->sd_must_flush = 1; |
4356 | task_add(systq, &sd->sd_meta_save_task); |
4357 | done: |
4358 | splx(s)spllower(s); |
4359 | } |
4360 | |
4361 | void |
4362 | sr_set_vol_state(struct sr_discipline *sd) |
4363 | { |
4364 | int states[SR_MAX_STATES7]; |
4365 | int new_state, i, nd; |
4366 | int old_state = sd->sd_vol_status; |
4367 | u_int32_t s; |
4368 | |
4369 | DNPRINTF(SR_D_STATE, "%s: %s: sr_set_vol_state\n", |
4370 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); |
4371 | |
4372 | nd = sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; |
4373 | |
4374 | for (i = 0; i < SR_MAX_STATES7; i++) |
4375 | states[i] = 0; |
4376 | |
4377 | for (i = 0; i < nd; i++) { |
4378 | s = sd->sd_vol.sv_chunks[i]->src_meta.scm_status; |
4379 | if (s >= SR_MAX_STATES7) |
4380 | panic("%s: %s: %s: invalid chunk state", |
4381 | DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname), |
4382 | sd->sd_meta->ssd_devname, |
4383 | sd->sd_vol.sv_chunks[i]->src_meta.scmi_scm_invariant.scm_devname); |
4384 | states[s]++; |
4385 | } |
4386 | |
4387 | if (states[BIOC_SDONLINE0x00] == nd) |
4388 | new_state = BIOC_SVONLINE0x00; |
4389 | else |
4390 | new_state = BIOC_SVOFFLINE0x01; |
4391 | |
4392 | DNPRINTF(SR_D_STATE, "%s: %s: sr_set_vol_state %d -> %d\n", |
4393 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, |
4394 | old_state, new_state); |
4395 | |
4396 | switch (old_state) { |
4397 | case BIOC_SVONLINE0x00: |
4398 | if (new_state == BIOC_SVOFFLINE0x01 || new_state == BIOC_SVONLINE0x00) |
4399 | break; |
4400 | else |
4401 | goto die; |
4402 | break; |
4403 | |
4404 | case BIOC_SVOFFLINE0x01: |
4405 | /* XXX this might be a little too much */ |
4406 | goto die; |
4407 | |
4408 | default: |
4409 | die: |
4410 | panic("%s: %s: invalid volume state transition %d -> %d", |
4411 | DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname), |
4412 | sd->sd_meta->ssd_devname, |
4413 | old_state, new_state); |
4414 | /* NOTREACHED */ |
4415 | } |
4416 | |
4417 | sd->sd_vol_status = new_state; |
4418 | } |
4419 | |
4420 | void * |
4421 | sr_block_get(struct sr_discipline *sd, long length) |
4422 | { |
4423 | return dma_alloc(length, PR_NOWAIT0x0002 | PR_ZERO0x0008); |
4424 | } |
4425 | |
4426 | void |
4427 | sr_block_put(struct sr_discipline *sd, void *ptr, int length) |
4428 | { |
4429 | dma_free(ptr, length); |
4430 | } |
4431 | |
4432 | void |
4433 | sr_checksum_print(u_int8_t *md5) |
4434 | { |
4435 | int i; |
4436 | |
4437 | for (i = 0; i < MD5_DIGEST_LENGTH16; i++) |
4438 | printf("%02x", md5[i]); |
4439 | } |
4440 | |
4441 | void |
4442 | sr_checksum(struct sr_softc *sc, void *src, void *md5, u_int32_t len) |
4443 | { |
4444 | MD5_CTX ctx; |
4445 | |
4446 | DNPRINTF(SR_D_MISC, "%s: sr_checksum(%p %p %d)\n", DEVNAME(sc), src, |
4447 | md5, len); |
4448 | |
4449 | MD5Init(&ctx); |
4450 | MD5Update(&ctx, src, len); |
4451 | MD5Final(md5, &ctx); |
4452 | } |
4453 | |
4454 | void |
4455 | sr_uuid_generate(struct sr_uuid *uuid) |
4456 | { |
4457 | arc4random_buf(uuid->sui_id, sizeof(uuid->sui_id)); |
4458 | /* UUID version 4: random */ |
4459 | uuid->sui_id[6] &= 0x0f; |
4460 | uuid->sui_id[6] |= 0x40; |
4461 | /* RFC4122 variant */ |
4462 | uuid->sui_id[8] &= 0x3f; |
4463 | uuid->sui_id[8] |= 0x80; |
4464 | } |
4465 | |
4466 | char * |
4467 | sr_uuid_format(struct sr_uuid *uuid) |
4468 | { |
4469 | char *uuidstr; |
4470 | |
4471 | uuidstr = malloc(37, M_DEVBUF2, M_WAITOK0x0001); |
4472 | |
4473 | snprintf(uuidstr, 37, |
4474 | "%02x%02x%02x%02x-%02x%02x-%02x%02x-%02x%02x-" |
4475 | "%02x%02x%02x%02x%02x%02x", |
4476 | uuid->sui_id[0], uuid->sui_id[1], |
4477 | uuid->sui_id[2], uuid->sui_id[3], |
4478 | uuid->sui_id[4], uuid->sui_id[5], |
4479 | uuid->sui_id[6], uuid->sui_id[7], |
4480 | uuid->sui_id[8], uuid->sui_id[9], |
4481 | uuid->sui_id[10], uuid->sui_id[11], |
4482 | uuid->sui_id[12], uuid->sui_id[13], |
4483 | uuid->sui_id[14], uuid->sui_id[15]); |
4484 | |
4485 | return uuidstr; |
4486 | } |
4487 | |
4488 | void |
4489 | sr_uuid_print(struct sr_uuid *uuid, int cr) |
4490 | { |
4491 | char *uuidstr; |
4492 | |
4493 | uuidstr = sr_uuid_format(uuid); |
4494 | printf("%s%s", uuidstr, (cr ? "\n" : "")); |
4495 | free(uuidstr, M_DEVBUF2, 37); |
4496 | } |
4497 | |
4498 | int |
4499 | sr_already_assembled(struct sr_discipline *sd) |
4500 | { |
4501 | struct sr_softc *sc = sd->sd_sc; |
4502 | struct sr_discipline *sdtmp; |
4503 | |
4504 | TAILQ_FOREACH(sdtmp, &sc->sc_dis_list, sd_link)for((sdtmp) = ((&sc->sc_dis_list)->tqh_first); (sdtmp ) != ((void *)0); (sdtmp) = ((sdtmp)->sd_link.tqe_next)) { |
4505 | if (!bcmp(&sd->sd_meta->ssdi_sdd_invariant.ssd_uuid, |
4506 | &sdtmp->sd_meta->ssdi_sdd_invariant.ssd_uuid, |
4507 | sizeof(sd->sd_meta->ssdi_sdd_invariant.ssd_uuid))) |
4508 | return (1); |
4509 | } |
4510 | |
4511 | return (0); |
4512 | } |
4513 | |
4514 | int32_t |
4515 | sr_validate_stripsize(u_int32_t b) |
4516 | { |
4517 | int s = 0; |
4518 | |
4519 | if (b % DEV_BSIZE(1 << 9)) |
4520 | return (-1); |
4521 | |
4522 | while ((b & 1) == 0) { |
4523 | b >>= 1; |
4524 | s++; |
4525 | } |
4526 | |
4527 | /* only multiple of twos */ |
4528 | b >>= 1; |
4529 | if (b) |
4530 | return(-1); |
4531 | |
4532 | return (s); |
4533 | } |
4534 | |
4535 | void |
4536 | sr_quiesce(void) |
4537 | { |
4538 | struct sr_softc *sc = softraid0; |
4539 | struct sr_discipline *sd, *nsd; |
4540 | |
4541 | /* Shutdown disciplines in reverse attach order. */ |
4542 | TAILQ_FOREACH_REVERSE_SAFE(sd, &sc->sc_dis_list,for ((sd) = (*(((struct sr_discipline_list *)((&sc->sc_dis_list )->tqh_last))->tqh_last)); (sd) != ((void *)0) && ((nsd) = (*(((struct sr_discipline_list *)((sd)->sd_link. tqe_prev))->tqh_last)), 1); (sd) = (nsd)) |
4543 | sr_discipline_list, sd_link, nsd)for ((sd) = (*(((struct sr_discipline_list *)((&sc->sc_dis_list )->tqh_last))->tqh_last)); (sd) != ((void *)0) && ((nsd) = (*(((struct sr_discipline_list *)((sd)->sd_link. tqe_prev))->tqh_last)), 1); (sd) = (nsd)) |
4544 | sr_discipline_shutdown(sd, 1, -1); |
4545 | } |
4546 | |
4547 | void |
4548 | sr_shutdown(int dying) |
4549 | { |
4550 | struct sr_softc *sc = softraid0; |
4551 | struct sr_discipline *sd; |
4552 | |
4553 | DNPRINTF(SR_D_MISC, "%s: sr_shutdown\n", DEVNAME(sc)); |
4554 | |
4555 | /* |
4556 | * Since softraid is not under mainbus, we have to explicitly |
4557 | * notify its children that the power is going down, so they |
4558 | * can execute their shutdown hooks. |
4559 | */ |
4560 | config_suspend((struct device *)sc, DVACT_POWERDOWN6); |
4561 | |
4562 | /* Shutdown disciplines in reverse attach order. */ |
4563 | while ((sd = TAILQ_LAST(&sc->sc_dis_list, sr_discipline_list)(*(((struct sr_discipline_list *)((&sc->sc_dis_list)-> tqh_last))->tqh_last))) != NULL((void *)0)) |
4564 | sr_discipline_shutdown(sd, 1, dying); |
4565 | } |
4566 | |
4567 | int |
4568 | sr_validate_io(struct sr_workunit *wu, daddr_t *blkno, char *func) |
4569 | { |
4570 | struct sr_discipline *sd = wu->swu_dis; |
4571 | struct scsi_xfer *xs = wu->swu_xs; |
4572 | int rv = 1; |
4573 | |
4574 | DNPRINTF(SR_D_DIS, "%s: %s 0x%02x\n", DEVNAME(sd->sd_sc), func, |
4575 | xs->cmd.opcode); |
4576 | |
4577 | if (sd->sd_meta->ssd_data_blkno == 0) |
4578 | panic("invalid data blkno"); |
4579 | |
4580 | if (sd->sd_vol_status == BIOC_SVOFFLINE0x01) { |
4581 | DNPRINTF(SR_D_DIS, "%s: %s device offline\n", |
4582 | DEVNAME(sd->sd_sc), func); |
4583 | goto bad; |
4584 | } |
4585 | |
4586 | if (xs->datalen == 0) { |
4587 | printf("%s: %s: illegal block count for %s\n", |
4588 | DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname), func, sd->sd_meta->ssd_devname); |
4589 | goto bad; |
4590 | } |
4591 | |
4592 | if (xs->cmdlen == 10) |
4593 | *blkno = _4btol(((struct scsi_rw_10 *)&xs->cmd)->addr); |
4594 | else if (xs->cmdlen == 16) |
4595 | *blkno = _8btol(((struct scsi_rw_16 *)&xs->cmd)->addr); |
4596 | else if (xs->cmdlen == 6) |
4597 | *blkno = _3btol(((struct scsi_rw *)&xs->cmd)->addr); |
4598 | else { |
4599 | printf("%s: %s: illegal cmdlen for %s\n", |
4600 | DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname), func, sd->sd_meta->ssd_devname); |
4601 | goto bad; |
4602 | } |
4603 | |
4604 | *blkno *= (sd->sd_meta->ssdi_sdd_invariant.ssd_secsize / DEV_BSIZE(1 << 9)); |
4605 | |
4606 | wu->swu_blk_start = *blkno; |
4607 | wu->swu_blk_end = *blkno + (xs->datalen >> DEV_BSHIFT9) - 1; |
4608 | |
4609 | if (wu->swu_blk_end > sd->sd_meta->ssdi_sdd_invariant.ssd_size) { |
4610 | DNPRINTF(SR_D_DIS, "%s: %s out of bounds start: %lld " |
4611 | "end: %lld length: %d\n", |
4612 | DEVNAME(sd->sd_sc), func, (long long)wu->swu_blk_start, |
4613 | (long long)wu->swu_blk_end, xs->datalen); |
4614 | |
4615 | sd->sd_scsi_sense.error_code = SSD_ERRCODE_CURRENT0x70 | |
4616 | SSD_ERRCODE_VALID0x80; |
4617 | sd->sd_scsi_sense.flags = SKEY_ILLEGAL_REQUEST0x05; |
4618 | sd->sd_scsi_sense.add_sense_code = 0x21; |
4619 | sd->sd_scsi_sense.add_sense_code_qual = 0x00; |
4620 | sd->sd_scsi_sense.extra_len = 4; |
4621 | goto bad; |
4622 | } |
4623 | |
4624 | rv = 0; |
4625 | bad: |
4626 | return (rv); |
4627 | } |
4628 | |
4629 | void |
4630 | sr_rebuild_start(void *arg) |
4631 | { |
4632 | struct sr_discipline *sd = arg; |
4633 | struct sr_softc *sc = sd->sd_sc; |
4634 | |
4635 | DNPRINTF(SR_D_REBUILD, "%s: %s starting rebuild thread\n", |
4636 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); |
4637 | |
4638 | if (kthread_create(sr_rebuild_thread, sd, &sd->sd_background_proc, |
4639 | DEVNAME(sc)((sc)->sc_dev.dv_xname)) != 0) |
4640 | printf("%s: unable to start background operation\n", |
4641 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4642 | } |
4643 | |
4644 | void |
4645 | sr_rebuild_thread(void *arg) |
4646 | { |
4647 | struct sr_discipline *sd = arg; |
4648 | |
4649 | DNPRINTF(SR_D_REBUILD, "%s: %s rebuild thread started\n", |
4650 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname); |
4651 | |
4652 | sd->sd_reb_active = 1; |
4653 | sd->sd_rebuild(sd); |
4654 | sd->sd_reb_active = 0; |
4655 | |
4656 | kthread_exit(0); |
4657 | } |
4658 | |
4659 | void |
4660 | sr_rebuild(struct sr_discipline *sd) |
4661 | { |
4662 | struct sr_softc *sc = sd->sd_sc; |
4663 | u_int64_t sz, whole_blk, partial_blk, blk, restart; |
4664 | daddr_t lba; |
4665 | struct sr_workunit *wu_r, *wu_w; |
4666 | struct scsi_xfer xs_r, xs_w; |
4667 | struct scsi_rw_16 *cr, *cw; |
4668 | int c, s, slept, percent = 0, old_percent = -1; |
4669 | u_int8_t *buf; |
4670 | |
4671 | whole_blk = sd->sd_meta->ssdi_sdd_invariant.ssd_size / SR_REBUILD_IO_SIZE128; |
4672 | partial_blk = sd->sd_meta->ssdi_sdd_invariant.ssd_size % SR_REBUILD_IO_SIZE128; |
4673 | |
4674 | restart = sd->sd_meta->ssd_rebuild / SR_REBUILD_IO_SIZE128; |
4675 | if (restart > whole_blk) { |
4676 | printf("%s: bogus rebuild restart offset, starting from 0\n", |
4677 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4678 | restart = 0; |
4679 | } |
4680 | if (restart) { |
4681 | /* |
4682 | * XXX there is a hole here; there is a possibility that we |
4683 | * had a restart however the chunk that was supposed to |
4684 | * be rebuilt is no longer valid; we can reach this situation |
4685 | * when a rebuild is in progress and the box crashes and |
4686 | * on reboot the rebuild chunk is different (like zero'd or |
4687 | * replaced). We need to check the uuid of the chunk that is |
4688 | * being rebuilt to assert this. |
4689 | */ |
4690 | percent = sr_rebuild_percent(sd); |
4691 | printf("%s: resuming rebuild on %s at %d%%\n", |
4692 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sd->sd_meta->ssd_devname, percent); |
4693 | } |
4694 | |
4695 | /* currently this is 64k therefore we can use dma_alloc */ |
4696 | buf = dma_alloc(SR_REBUILD_IO_SIZE128 << DEV_BSHIFT9, PR_WAITOK0x0001); |
4697 | for (blk = restart; blk <= whole_blk; blk++) { |
4698 | lba = blk * SR_REBUILD_IO_SIZE128; |
4699 | sz = SR_REBUILD_IO_SIZE128; |
4700 | if (blk == whole_blk) { |
4701 | if (partial_blk == 0) |
4702 | break; |
4703 | sz = partial_blk; |
4704 | } |
4705 | |
4706 | /* get some wu */ |
4707 | wu_r = sr_scsi_wu_get(sd, 0); |
4708 | wu_w = sr_scsi_wu_get(sd, 0); |
4709 | |
4710 | DNPRINTF(SR_D_REBUILD, "%s: %s rebuild wu_r %p, wu_w %p\n", |
4711 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, wu_r, wu_w); |
4712 | |
4713 | /* setup read io */ |
4714 | bzero(&xs_r, sizeof xs_r)__builtin_bzero((&xs_r), (sizeof xs_r)); |
4715 | xs_r.error = XS_NOERROR0; |
4716 | xs_r.flags = SCSI_DATA_IN0x00800; |
4717 | xs_r.datalen = sz << DEV_BSHIFT9; |
4718 | xs_r.data = buf; |
4719 | xs_r.cmdlen = sizeof(*cr); |
4720 | cr = (struct scsi_rw_16 *)&xs_r.cmd; |
4721 | cr->opcode = READ_160x88; |
4722 | _lto4b(sz, cr->length); |
4723 | _lto8b(lba, cr->addr); |
4724 | wu_r->swu_state = SR_WU_CONSTRUCT9; |
4725 | wu_r->swu_flags |= SR_WUF_REBUILD(1<<0); |
4726 | wu_r->swu_xs = &xs_r; |
4727 | if (sd->sd_scsi_rw(wu_r)) { |
4728 | printf("%s: could not create read io\n", |
4729 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4730 | goto fail; |
4731 | } |
4732 | |
4733 | /* setup write io */ |
4734 | bzero(&xs_w, sizeof xs_w)__builtin_bzero((&xs_w), (sizeof xs_w)); |
4735 | xs_w.error = XS_NOERROR0; |
4736 | xs_w.flags = SCSI_DATA_OUT0x01000; |
4737 | xs_w.datalen = sz << DEV_BSHIFT9; |
4738 | xs_w.data = buf; |
4739 | xs_w.cmdlen = sizeof(*cw); |
4740 | cw = (struct scsi_rw_16 *)&xs_w.cmd; |
4741 | cw->opcode = WRITE_160x8a; |
4742 | _lto4b(sz, cw->length); |
4743 | _lto8b(lba, cw->addr); |
4744 | wu_w->swu_state = SR_WU_CONSTRUCT9; |
4745 | wu_w->swu_flags |= SR_WUF_REBUILD(1<<0) | SR_WUF_WAKEUP(1<<4); |
4746 | wu_w->swu_xs = &xs_w; |
4747 | if (sd->sd_scsi_rw(wu_w)) { |
4748 | printf("%s: could not create write io\n", |
4749 | DEVNAME(sc)((sc)->sc_dev.dv_xname)); |
4750 | goto fail; |
4751 | } |
4752 | |
4753 | /* |
4754 | * collide with the read io so that we get automatically |
4755 | * started when the read is done |
4756 | */ |
4757 | wu_w->swu_state = SR_WU_DEFERRED5; |
4758 | wu_r->swu_collider = wu_w; |
4759 | s = splbio()splraise(0x6); |
4760 | TAILQ_INSERT_TAIL(&sd->sd_wu_defq, wu_w, swu_link)do { (wu_w)->swu_link.tqe_next = ((void *)0); (wu_w)->swu_link .tqe_prev = (&sd->sd_wu_defq)->tqh_last; *(&sd-> sd_wu_defq)->tqh_last = (wu_w); (&sd->sd_wu_defq)-> tqh_last = &(wu_w)->swu_link.tqe_next; } while (0); |
4761 | splx(s)spllower(s); |
4762 | |
4763 | DNPRINTF(SR_D_REBUILD, "%s: %s rebuild scheduling wu_r %p\n", |
4764 | DEVNAME(sd->sd_sc), sd->sd_meta->ssd_devname, wu_r); |
4765 | |
4766 | wu_r->swu_state = SR_WU_INPROGRESS1; |
4767 | sr_schedule_wu(wu_r); |
4768 | |
4769 | /* wait for write completion */ |
4770 | slept = 0; |
4771 | while ((wu_w->swu_flags & SR_WUF_REBUILDIOCOMP(1<<1)) == 0) { |
4772 | tsleep_nsec(wu_w, PRIBIO16, "sr_rebuild", INFSLP0xffffffffffffffffULL); |
4773 | slept = 1; |
4774 | } |
4775 | /* yield if we didn't sleep */ |
4776 | if (slept == 0) |
4777 | tsleep_nsec(sc, PWAIT32, "sr_yield", MSEC_TO_NSEC(1)); |
4778 | |
4779 | sr_scsi_wu_put(sd, wu_r); |
4780 | sr_scsi_wu_put(sd, wu_w); |
4781 | |
4782 | sd->sd_meta->ssd_rebuild = lba; |
4783 | |
4784 | /* XXX - this should be based on size, not percentage. */ |
4785 | /* save metadata every percent */ |
4786 | percent = sr_rebuild_percent(sd); |
4787 | if (percent != old_percent && blk != whole_blk) { |
4788 | if (sr_meta_save(sd, SR_META_DIRTY0x1)) |
4789 | printf("%s: could not save metadata to %s\n", |
4790 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sd->sd_meta->ssd_devname); |
4791 | old_percent = percent; |
4792 | } |
4793 | |
4794 | if (sd->sd_reb_abort) |
4795 | goto abort; |
4796 | } |
4797 | |
4798 | /* all done */ |
4799 | sd->sd_meta->ssd_rebuild = 0; |
4800 | for (c = 0; c < sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no; c++) { |
4801 | if (sd->sd_vol.sv_chunks[c]->src_meta.scm_status == |
4802 | BIOC_SDREBUILD0x03) { |
4803 | sd->sd_set_chunk_state(sd, c, BIOC_SDONLINE0x00); |
4804 | break; |
4805 | } |
4806 | } |
4807 | |
4808 | abort: |
4809 | if (sr_meta_save(sd, SR_META_DIRTY0x1)) |
4810 | printf("%s: could not save metadata to %s\n", |
4811 | DEVNAME(sc)((sc)->sc_dev.dv_xname), sd->sd_meta->ssd_devname); |
4812 | fail: |
4813 | dma_free(buf, SR_REBUILD_IO_SIZE128 << DEV_BSHIFT9); |
4814 | } |
4815 | |
4816 | #ifndef SMALL_KERNEL |
4817 | int |
4818 | sr_sensors_create(struct sr_discipline *sd) |
4819 | { |
4820 | struct sr_softc *sc = sd->sd_sc; |
4821 | int rv = 1; |
4822 | |
4823 | DNPRINTF(SR_D_STATE, "%s: %s: sr_sensors_create\n", |
4824 | DEVNAME(sc), sd->sd_meta->ssd_devname); |
4825 | |
4826 | sd->sd_vol.sv_sensor.type = SENSOR_DRIVE; |
4827 | sd->sd_vol.sv_sensor.status = SENSOR_S_UNKNOWN; |
4828 | strlcpy(sd->sd_vol.sv_sensor.desc, sd->sd_meta->ssd_devname, |
4829 | sizeof(sd->sd_vol.sv_sensor.desc)); |
4830 | |
4831 | sensor_attach(&sc->sc_sensordev, &sd->sd_vol.sv_sensor); |
4832 | sd->sd_vol.sv_sensor_attached = 1; |
4833 | |
4834 | if (sc->sc_sensor_task == NULL((void *)0)) { |
4835 | sc->sc_sensor_task = sensor_task_register(sc, |
4836 | sr_sensors_refresh, 10); |
4837 | if (sc->sc_sensor_task == NULL((void *)0)) |
4838 | goto bad; |
4839 | } |
4840 | |
4841 | rv = 0; |
4842 | bad: |
4843 | return (rv); |
4844 | } |
4845 | |
4846 | void |
4847 | sr_sensors_delete(struct sr_discipline *sd) |
4848 | { |
4849 | DNPRINTF(SR_D_STATE, "%s: sr_sensors_delete\n", DEVNAME(sd->sd_sc)); |
4850 | |
4851 | if (sd->sd_vol.sv_sensor_attached) |
4852 | sensor_detach(&sd->sd_sc->sc_sensordev, &sd->sd_vol.sv_sensor); |
4853 | } |
4854 | |
4855 | void |
4856 | sr_sensors_refresh(void *arg) |
4857 | { |
4858 | struct sr_softc *sc = arg; |
4859 | struct sr_volume *sv; |
4860 | struct sr_discipline *sd; |
4861 | |
4862 | DNPRINTF(SR_D_STATE, "%s: sr_sensors_refresh\n", DEVNAME(sc)); |
4863 | |
4864 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
4865 | sv = &sd->sd_vol; |
4866 | |
4867 | switch(sd->sd_vol_status) { |
4868 | case BIOC_SVOFFLINE0x01: |
4869 | sv->sv_sensor.value = SENSOR_DRIVE_FAIL9; |
4870 | sv->sv_sensor.status = SENSOR_S_CRIT; |
4871 | break; |
4872 | |
4873 | case BIOC_SVDEGRADED0x02: |
4874 | sv->sv_sensor.value = SENSOR_DRIVE_PFAIL10; |
4875 | sv->sv_sensor.status = SENSOR_S_WARN; |
4876 | break; |
4877 | |
4878 | case BIOC_SVREBUILD0x05: |
4879 | sv->sv_sensor.value = SENSOR_DRIVE_REBUILD7; |
4880 | sv->sv_sensor.status = SENSOR_S_WARN; |
4881 | break; |
4882 | |
4883 | case BIOC_SVSCRUB0x04: |
4884 | case BIOC_SVONLINE0x00: |
4885 | sv->sv_sensor.value = SENSOR_DRIVE_ONLINE4; |
4886 | sv->sv_sensor.status = SENSOR_S_OK; |
4887 | break; |
4888 | |
4889 | default: |
4890 | sv->sv_sensor.value = 0; /* unknown */ |
4891 | sv->sv_sensor.status = SENSOR_S_UNKNOWN; |
4892 | } |
4893 | } |
4894 | } |
4895 | #endif /* SMALL_KERNEL */ |
4896 | |
4897 | #ifdef SR_FANCY_STATS |
4898 | void sr_print_stats(void); |
4899 | |
4900 | void |
4901 | sr_print_stats(void) |
4902 | { |
4903 | struct sr_softc *sc = softraid0; |
4904 | struct sr_discipline *sd; |
4905 | |
4906 | if (sc == NULL((void *)0)) { |
4907 | printf("no softraid softc found\n"); |
4908 | return; |
4909 | } |
4910 | |
4911 | TAILQ_FOREACH(sd, &sc->sc_dis_list, sd_link)for((sd) = ((&sc->sc_dis_list)->tqh_first); (sd) != ((void *)0); (sd) = ((sd)->sd_link.tqe_next)) { |
4912 | printf("%s: ios pending %d, collisions %llu\n", |
4913 | sd->sd_meta->ssd_devname, |
4914 | sd->sd_wu_pending, |
4915 | sd->sd_wu_collisions); |
4916 | } |
4917 | } |
4918 | #endif /* SR_FANCY_STATS */ |
4919 | |
4920 | #ifdef SR_DEBUG |
4921 | void |
4922 | sr_meta_print(struct sr_metadata *m) |
4923 | { |
4924 | int i; |
4925 | struct sr_meta_chunk *mc; |
4926 | struct sr_meta_opt_hdr *omh; |
4927 | |
4928 | if (!(sr_debug & SR_D_META)) |
4929 | return; |
4930 | |
4931 | printf("\tssd_magic 0x%llx\n", m->ssdi_sdd_invariant.ssd_magic); |
4932 | printf("\tssd_version %d\n", m->ssdi_sdd_invariant.ssd_version); |
4933 | printf("\tssd_vol_flags 0x%x\n", m->ssdi_sdd_invariant.ssd_vol_flags); |
4934 | printf("\tssd_uuid "); |
4935 | sr_uuid_print(&m->ssdi_sdd_invariant.ssd_uuid, 1); |
4936 | printf("\tssd_chunk_no %d\n", m->ssdi_sdd_invariant.ssd_chunk_no); |
4937 | printf("\tssd_chunk_id %d\n", m->ssdi_sdd_invariant.ssd_chunk_id); |
4938 | printf("\tssd_opt_no %d\n", m->ssdi_sdd_invariant.ssd_opt_no); |
4939 | printf("\tssd_volid %d\n", m->ssdi_sdd_invariant.ssd_volid); |
4940 | printf("\tssd_level %d\n", m->ssdi_sdd_invariant.ssd_level); |
4941 | printf("\tssd_size %lld\n", m->ssdi_sdd_invariant.ssd_size); |
4942 | printf("\tssd_devname %s\n", m->ssd_devname); |
4943 | printf("\tssd_vendor %s\n", m->ssdi_sdd_invariant.ssd_vendor); |
4944 | printf("\tssd_product %s\n", m->ssdi_sdd_invariant.ssd_product); |
4945 | printf("\tssd_revision %s\n", m->ssdi_sdd_invariant.ssd_revision); |
4946 | printf("\tssd_strip_size %d\n", m->ssdi_sdd_invariant.ssd_strip_size); |
4947 | printf("\tssd_checksum "); |
4948 | sr_checksum_print(m->ssd_checksum); |
4949 | printf("\n"); |
4950 | printf("\tssd_meta_flags 0x%x\n", m->ssd_meta_flags); |
4951 | printf("\tssd_ondisk %llu\n", m->ssd_ondisk); |
4952 | |
4953 | mc = (struct sr_meta_chunk *)(m + 1); |
4954 | for (i = 0; i < m->ssdi_sdd_invariant.ssd_chunk_no; i++, mc++) { |
4955 | printf("\t\tscm_volid %d\n", mc->scmi_scm_invariant.scm_volid); |
4956 | printf("\t\tscm_chunk_id %d\n", mc->scmi_scm_invariant.scm_chunk_id); |
4957 | printf("\t\tscm_devname %s\n", mc->scmi_scm_invariant.scm_devname); |
4958 | printf("\t\tscm_size %lld\n", mc->scmi_scm_invariant.scm_size); |
4959 | printf("\t\tscm_coerced_size %lld\n",mc->scmi_scm_invariant.scm_coerced_size); |
4960 | printf("\t\tscm_uuid "); |
4961 | sr_uuid_print(&mc->scmi_scm_invariant.scm_uuid, 1); |
4962 | printf("\t\tscm_checksum "); |
4963 | sr_checksum_print(mc->scm_checksum); |
4964 | printf("\n"); |
4965 | printf("\t\tscm_status %d\n", mc->scm_status); |
4966 | } |
4967 | |
4968 | omh = (struct sr_meta_opt_hdr *)((u_int8_t *)(m + 1) + |
4969 | sizeof(struct sr_meta_chunk) * m->ssdi_sdd_invariant.ssd_chunk_no); |
4970 | for (i = 0; i < m->ssdi_sdd_invariant.ssd_opt_no; i++) { |
4971 | printf("\t\t\tsom_type %d\n", omh->som_type); |
4972 | printf("\t\t\tsom_checksum "); |
4973 | sr_checksum_print(omh->som_checksum); |
4974 | printf("\n"); |
4975 | omh = (struct sr_meta_opt_hdr *)((void *)omh + |
4976 | omh->som_length); |
4977 | } |
4978 | } |
4979 | |
4980 | void |
4981 | sr_dump_block(void *blk, int len) |
4982 | { |
4983 | uint8_t *b = blk; |
4984 | int i, j, c; |
4985 | |
4986 | for (i = 0; i < len; i += 16) { |
4987 | for (j = 0; j < 16; j++) |
4988 | printf("%.2x ", b[i + j]); |
4989 | printf(" "); |
4990 | for (j = 0; j < 16; j++) { |
4991 | c = b[i + j]; |
4992 | if (c < ' ' || c > 'z' || i + j > len) |
4993 | c = '.'; |
4994 | printf("%c", c); |
4995 | } |
4996 | printf("\n"); |
4997 | } |
4998 | } |
4999 | |
5000 | void |
5001 | sr_dump_mem(u_int8_t *p, int len) |
5002 | { |
5003 | int i; |
5004 | |
5005 | for (i = 0; i < len; i++) |
5006 | printf("%02x ", *p++); |
5007 | printf("\n"); |
5008 | } |
5009 | |
5010 | #endif /* SR_DEBUG */ |
5011 | |
5012 | #ifdef HIBERNATE1 |
5013 | /* |
5014 | * Side-effect free (no malloc, printf, pool, splx) softraid crypto writer. |
5015 | * |
5016 | * This function must perform the following: |
5017 | * 1. Determine the underlying device's own side-effect free I/O function |
5018 | * (eg, ahci_hibernate_io, wd_hibernate_io, etc). |
5019 | * 2. Store enough information in the provided page argument for subsequent |
5020 | * I/O calls (such as the crypto discipline structure for the keys, the |
5021 | * offset of the softraid partition on the underlying disk, as well as |
5022 | * the offset of the swap partition within the crypto volume. |
5023 | * 3. Encrypt the incoming data using the sr_discipline keys, then pass |
5024 | * the request to the underlying device's own I/O function. |
5025 | */ |
5026 | int |
5027 | sr_hibernate_io(dev_t dev, daddr_t blkno, vaddr_t addr, size_t size, int op, void *page) |
5028 | { |
5029 | /* Struct for stashing data obtained on HIB_INIT. |
5030 | * XXX |
5031 | * We share the page with the underlying device's own |
5032 | * side-effect free I/O function, so we pad our data to |
5033 | * the end of the page. Presently this does not overlap |
5034 | * with either of the two other side-effect free i/o |
5035 | * functions (ahci/wd). |
5036 | */ |
5037 | struct { |
5038 | char pad[3072]; |
5039 | struct sr_discipline *srd; |
5040 | hibio_fn subfn; /* underlying device i/o fn */ |
5041 | dev_t subdev; /* underlying device dev_t */ |
5042 | daddr_t sr_swapoff; /* ofs of swap part in sr volume */ |
5043 | char buf[DEV_BSIZE(1 << 9)]; /* encryption performed into this buf */ |
5044 | } *my = page; |
5045 | extern struct cfdriver sd_cd; |
5046 | char errstr[128], *dl_ret; |
5047 | struct sr_chunk *schunk; |
5048 | struct sd_softc *sd; |
5049 | struct aes_xts_ctx ctx; |
5050 | struct sr_softc *sc; |
5051 | struct device *dv; |
5052 | daddr_t key_blkno; |
5053 | uint32_t sub_raidoff; /* ofs of sr part in underlying dev */ |
5054 | struct disklabel dl; |
5055 | struct partition *pp; |
5056 | size_t i, j; |
5057 | u_char iv[8]; |
5058 | |
5059 | /* |
5060 | * In HIB_INIT, we are passed the swap partition size and offset |
5061 | * in 'size' and 'blkno' respectively. These are relative to the |
5062 | * start of the softraid partition, and we need to save these |
5063 | * for later translation to the underlying device's layout. |
5064 | */ |
5065 | if (op == HIB_INIT-1) { |
5066 | dv = disk_lookup(&sd_cd, DISKUNIT(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >> 8)) / 16)); |
5067 | sd = (struct sd_softc *)dv; |
5068 | sc = (struct sr_softc *)dv->dv_parent->dv_parent; |
5069 | |
5070 | /* |
5071 | * Look up the sr discipline. This is used to determine |
5072 | * if we are SR crypto and what the underlying device is. |
5073 | */ |
5074 | my->srd = sc->sc_targets[sd->sc_link->target]; |
5075 | DNPRINTF(SR_D_MISC, "sr_hibernate_io: discipline is %s\n", |
5076 | my->srd->sd_name); |
5077 | if (strncmp(my->srd->sd_name, "CRYPTO", |
5078 | sizeof(my->srd->sd_name))) |
5079 | return (ENOTSUP91); |
5080 | |
5081 | /* Find the underlying device */ |
5082 | schunk = my->srd->sd_vol.sv_chunks[0]; |
5083 | my->subdev = schunk->src_dev_mm; |
5084 | |
5085 | /* |
5086 | * Find the appropriate underlying device side effect free |
5087 | * I/O function, based on the type of device it is. |
5088 | */ |
5089 | my->subfn = get_hibernate_io_function(my->subdev); |
5090 | if (!my->subfn) |
5091 | return (ENODEV19); |
5092 | |
5093 | /* |
5094 | * Find blkno where this raid partition starts on |
5095 | * the underlying disk. |
5096 | */ |
5097 | dl_ret = disk_readlabel(&dl, my->subdev, errstr, |
5098 | sizeof(errstr)); |
5099 | if (dl_ret) { |
5100 | printf("Hibernate error reading disklabel: %s\n", dl_ret); |
5101 | return (ENOTSUP91); |
5102 | } |
5103 | |
5104 | pp = &dl.d_partitions[DISKPART(my->subdev)(((unsigned)((my->subdev) & 0xff) | (((my->subdev) & 0xffff0000) >> 8)) % 16)]; |
5105 | if (pp->p_fstype != FS_RAID19 || DL_GETPSIZE(pp)(((u_int64_t)(pp)->p_sizeh << 32) + (pp)->p_size) == 0) |
5106 | return (ENOTSUP91); |
5107 | |
5108 | /* Find the blkno of the SR part in the underlying device */ |
5109 | sub_raidoff = my->srd->sd_meta->ssd_data_blkno + |
5110 | DL_SECTOBLK(&dl, DL_GETPOFFSET(pp))(((((u_int64_t)(pp)->p_offseth << 32) + (pp)->p_offset )) * ((&dl)->d_secsize / (1 << 9))); |
5111 | DNPRINTF(SR_D_MISC,"sr_hibernate_io: blk trans ofs: %d blks\n", |
5112 | sub_raidoff); |
5113 | |
5114 | /* Save the blkno of the swap partition in the SR disk */ |
5115 | my->sr_swapoff = blkno; |
5116 | |
5117 | /* Initialize the sub-device */ |
5118 | return my->subfn(my->subdev, sub_raidoff + blkno, |
5119 | addr, size, op, page); |
5120 | } |
5121 | |
5122 | /* Hibernate only uses (and we only support) writes */ |
5123 | if (op != HIB_W1) |
5124 | return (ENOTSUP91); |
5125 | |
5126 | /* |
5127 | * Blocks act as the IV for the encryption. These block numbers |
5128 | * are relative to the start of the sr partition, but the 'blkno' |
5129 | * passed above is relative to the start of the swap partition |
5130 | * inside the sr partition, so bias appropriately. |
5131 | */ |
5132 | key_blkno = my->sr_swapoff + blkno; |
5133 | |
5134 | /* Process each disk block one at a time. */ |
5135 | for (i = 0; i < size; i += DEV_BSIZE(1 << 9)) { |
5136 | int res; |
5137 | |
5138 | bzero(&ctx, sizeof(ctx))__builtin_bzero((&ctx), (sizeof(ctx))); |
5139 | |
5140 | /* |
5141 | * Set encryption key (from the sr discipline stashed |
5142 | * during HIB_INIT. This code is based on the softraid |
5143 | * bootblock code. |
5144 | */ |
5145 | aes_xts_setkey(&ctx, my->srd->mdssd_dis_specific.mdd_crypto.scr_key[0], 64); |
5146 | /* We encrypt DEV_BSIZE bytes at a time in my->buf */ |
5147 | memcpy(my->buf, ((char *)addr) + i, DEV_BSIZE)__builtin_memcpy((my->buf), (((char *)addr) + i), ((1 << 9))); |
5148 | |
5149 | /* Block number is the IV */ |
5150 | memcpy(&iv, &key_blkno, sizeof(key_blkno))__builtin_memcpy((&iv), (&key_blkno), (sizeof(key_blkno ))); |
5151 | aes_xts_reinit(&ctx, iv); |
5152 | |
5153 | /* Encrypt DEV_BSIZE bytes, AES_XTS_BLOCKSIZE bytes at a time */ |
5154 | for (j = 0; j < DEV_BSIZE(1 << 9); j += AES_XTS_BLOCKSIZE16) |
5155 | aes_xts_encrypt(&ctx, my->buf + j); |
5156 | |
5157 | /* |
5158 | * Write one block out from my->buf to the underlying device |
5159 | * using its own side-effect free I/O function. |
5160 | */ |
5161 | res = my->subfn(my->subdev, blkno + (i / DEV_BSIZE(1 << 9)), |
5162 | (vaddr_t)(my->buf), DEV_BSIZE(1 << 9), op, page); |
5163 | if (res != 0) |
5164 | return (res); |
5165 | key_blkno++; |
5166 | } |
5167 | return (0); |
5168 | } |
5169 | #endif /* HIBERNATE */ |