Bug Summary

File:dev/softraid_crypto.c
Warning:line 169, column 17
Result of 'malloc' is converted to a pointer of type 'struct sr_meta_opt_hdr', which is incompatible with sizeof operand type 'struct sr_meta_crypto'

Annotated Source Code

Press '?' to see keyboard shortcuts

clang -cc1 -cc1 -triple amd64-unknown-openbsd7.4 -analyze -disable-free -clear-ast-before-backend -disable-llvm-verifier -discard-value-names -main-file-name softraid_crypto.c -analyzer-checker=core -analyzer-checker=apiModeling -analyzer-checker=unix -analyzer-checker=deadcode -analyzer-checker=security.insecureAPI.UncheckedReturn -analyzer-checker=security.insecureAPI.getpw -analyzer-checker=security.insecureAPI.gets -analyzer-checker=security.insecureAPI.mktemp -analyzer-checker=security.insecureAPI.mkstemp -analyzer-checker=security.insecureAPI.vfork -analyzer-checker=nullability.NullPassedToNonnull -analyzer-checker=nullability.NullReturnedFromNonnull -analyzer-output plist -w -setup-static-analyzer -mrelocation-model static -mframe-pointer=all -relaxed-aliasing -ffp-contract=on -fno-rounding-math -mconstructor-aliases -ffreestanding -mcmodel=kernel -target-cpu x86-64 -target-feature +retpoline-indirect-calls -target-feature +retpoline-indirect-branches -target-feature -sse2 -target-feature -sse -target-feature -3dnow -target-feature -mmx -target-feature +save-args -target-feature +retpoline-external-thunk -disable-red-zone -no-implicit-float -tune-cpu generic -debugger-tuning=gdb -fcoverage-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -nostdsysteminc -nobuiltininc -resource-dir /usr/local/llvm16/lib/clang/16 -I /usr/src/sys -I /usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -I /usr/src/sys/arch -I /usr/src/sys/dev/pci/drm/include -I /usr/src/sys/dev/pci/drm/include/uapi -I /usr/src/sys/dev/pci/drm/amd/include/asic_reg -I /usr/src/sys/dev/pci/drm/amd/include -I /usr/src/sys/dev/pci/drm/amd/amdgpu -I /usr/src/sys/dev/pci/drm/amd/display -I /usr/src/sys/dev/pci/drm/amd/display/include -I /usr/src/sys/dev/pci/drm/amd/display/dc -I /usr/src/sys/dev/pci/drm/amd/display/amdgpu_dm -I /usr/src/sys/dev/pci/drm/amd/pm/inc -I /usr/src/sys/dev/pci/drm/amd/pm/legacy-dpm -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu11 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu12 -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/smu13 -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/inc -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/hwmgr -I /usr/src/sys/dev/pci/drm/amd/pm/powerplay/smumgr -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc -I /usr/src/sys/dev/pci/drm/amd/pm/swsmu/inc/pmfw_if -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc -I /usr/src/sys/dev/pci/drm/amd/display/dc/inc/hw -I /usr/src/sys/dev/pci/drm/amd/display/dc/clk_mgr -I /usr/src/sys/dev/pci/drm/amd/display/modules/inc -I /usr/src/sys/dev/pci/drm/amd/display/modules/hdcp -I /usr/src/sys/dev/pci/drm/amd/display/dmub/inc -I /usr/src/sys/dev/pci/drm/i915 -D DDB -D DIAGNOSTIC -D KTRACE -D ACCOUNTING -D KMEMSTATS -D PTRACE -D POOL_DEBUG -D CRYPTO -D SYSVMSG -D SYSVSEM -D SYSVSHM -D UVM_SWAP_ENCRYPT -D FFS -D FFS2 -D FFS_SOFTUPDATES -D UFS_DIRHASH -D QUOTA -D EXT2FS -D MFS -D NFSCLIENT -D NFSSERVER -D CD9660 -D UDF -D MSDOSFS -D FIFO -D FUSE -D SOCKET_SPLICE -D TCP_ECN -D TCP_SIGNATURE -D INET6 -D IPSEC -D PPP_BSDCOMP -D PPP_DEFLATE -D PIPEX -D MROUTING -D MPLS -D BOOT_CONFIG -D USER_PCICONF -D APERTURE -D MTRR -D NTFS -D SUSPEND -D HIBERNATE -D PCIVERBOSE -D USBVERBOSE -D WSDISPLAY_COMPAT_USL -D WSDISPLAY_COMPAT_RAWKBD -D WSDISPLAY_DEFAULTSCREENS=6 -D X86EMU -D ONEWIREVERBOSE -D MULTIPROCESSOR -D MAXUSERS=80 -D _KERNEL -O2 -Wno-pointer-sign -Wno-address-of-packed-member -Wno-constant-conversion -Wno-unused-but-set-variable -Wno-gnu-folding-constant -fdebug-compilation-dir=/usr/src/sys/arch/amd64/compile/GENERIC.MP/obj -ferror-limit 19 -fwrapv -D_RET_PROTECTOR -ret-protector -fcf-protection=branch -fgnuc-version=4.2.1 -vectorize-loops -vectorize-slp -fno-builtin-malloc -fno-builtin-calloc -fno-builtin-realloc -fno-builtin-valloc -fno-builtin-free -fno-builtin-strdup -fno-builtin-strndup -analyzer-output=html -faddrsig -o /home/ben/Projects/scan/2024-01-11-110808-61670-1 -x c /usr/src/sys/dev/softraid_crypto.c
1/* $OpenBSD: softraid_crypto.c,v 1.145 2021/10/24 14:50:42 tobhe Exp $ */
2/*
3 * Copyright (c) 2007 Marco Peereboom <marco@peereboom.us>
4 * Copyright (c) 2008 Hans-Joerg Hoexer <hshoexer@openbsd.org>
5 * Copyright (c) 2008 Damien Miller <djm@mindrot.org>
6 * Copyright (c) 2009 Joel Sing <jsing@openbsd.org>
7 *
8 * Permission to use, copy, modify, and distribute this software for any
9 * purpose with or without fee is hereby granted, provided that the above
10 * copyright notice and this permission notice appear in all copies.
11 *
12 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19 */
20
21#include "bio.h"
22
23#include <sys/param.h>
24#include <sys/systm.h>
25#include <sys/buf.h>
26#include <sys/device.h>
27#include <sys/ioctl.h>
28#include <sys/malloc.h>
29#include <sys/pool.h>
30#include <sys/kernel.h>
31#include <sys/disk.h>
32#include <sys/rwlock.h>
33#include <sys/queue.h>
34#include <sys/fcntl.h>
35#include <sys/disklabel.h>
36#include <sys/vnode.h>
37#include <sys/mount.h>
38#include <sys/sensors.h>
39#include <sys/stat.h>
40#include <sys/conf.h>
41#include <sys/uio.h>
42#include <sys/dkio.h>
43
44#include <crypto/cryptodev.h>
45#include <crypto/rijndael.h>
46#include <crypto/md5.h>
47#include <crypto/sha1.h>
48#include <crypto/sha2.h>
49#include <crypto/hmac.h>
50
51#include <scsi/scsi_all.h>
52#include <scsi/scsiconf.h>
53#include <scsi/scsi_disk.h>
54
55#include <dev/softraidvar.h>
56
57struct sr_crypto_wu *sr_crypto_prepare(struct sr_workunit *,
58 struct sr_crypto *, int);
59int sr_crypto_decrypt(u_char *, u_char *, u_char *, size_t, int);
60int sr_crypto_encrypt(u_char *, u_char *, u_char *, size_t, int);
61int sr_crypto_decrypt_key(struct sr_discipline *,
62 struct sr_crypto *);
63int sr_crypto_change_maskkey(struct sr_discipline *,
64 struct sr_crypto *, struct sr_crypto_kdfinfo *,
65 struct sr_crypto_kdfinfo *);
66int sr_crypto_create(struct sr_discipline *,
67 struct bioc_createraid *, int, int64_t);
68int sr_crypto_meta_create(struct sr_discipline *,
69 struct sr_crypto *, struct bioc_createraid *);
70int sr_crypto_set_key(struct sr_discipline *, struct sr_crypto *,
71 struct bioc_createraid *, int, void *);
72int sr_crypto_assemble(struct sr_discipline *,
73 struct bioc_createraid *, int, void *);
74void sr_crypto_free_sessions(struct sr_discipline *,
75 struct sr_crypto *);
76int sr_crypto_alloc_resources_internal(struct sr_discipline *,
77 struct sr_crypto *);
78int sr_crypto_alloc_resources(struct sr_discipline *);
79void sr_crypto_free_resources_internal(struct sr_discipline *,
80 struct sr_crypto *);
81void sr_crypto_free_resources(struct sr_discipline *);
82int sr_crypto_ioctl_internal(struct sr_discipline *,
83 struct sr_crypto *, struct bioc_discipline *);
84int sr_crypto_ioctl(struct sr_discipline *,
85 struct bioc_discipline *);
86int sr_crypto_meta_opt_handler_internal(struct sr_discipline *,
87 struct sr_crypto *, struct sr_meta_opt_hdr *);
88int sr_crypto_meta_opt_handler(struct sr_discipline *,
89 struct sr_meta_opt_hdr *);
90int sr_crypto_rw(struct sr_workunit *);
91int sr_crypto_dev_rw(struct sr_workunit *, struct sr_crypto_wu *);
92void sr_crypto_done_internal(struct sr_workunit *,
93 struct sr_crypto *);
94void sr_crypto_done(struct sr_workunit *);
95void sr_crypto_calculate_check_hmac_sha1(u_int8_t *, int,
96 u_int8_t *, int, u_char *);
97void sr_crypto_hotplug(struct sr_discipline *, struct disk *, int);
98
99#ifdef SR_DEBUG0
100void sr_crypto_dumpkeys(struct sr_crypto *);
101#endif
102
103/* Discipline initialisation. */
104void
105sr_crypto_discipline_init(struct sr_discipline *sd)
106{
107 int i;
108
109 /* Fill out discipline members. */
110 sd->sd_wu_size = sizeof(struct sr_crypto_wu);
111 sd->sd_type = SR_MD_CRYPTO4;
112 strlcpy(sd->sd_name, "CRYPTO", sizeof(sd->sd_name));
113 sd->sd_capabilities = SR_CAP_SYSTEM_DISK0x00000001 | SR_CAP_AUTO_ASSEMBLE0x00000002;
114 sd->sd_max_wu = SR_CRYPTO_NOWU16;
115
116 for (i = 0; i < SR_CRYPTO_MAXKEYS32; i++)
117 sd->mdssd_dis_specific.mdd_crypto.scr_sid[i] = (u_int64_t)-1;
118
119 /* Setup discipline specific function pointers. */
120 sd->sd_alloc_resources = sr_crypto_alloc_resources;
121 sd->sd_assemble = sr_crypto_assemble;
122 sd->sd_create = sr_crypto_create;
123 sd->sd_free_resources = sr_crypto_free_resources;
124 sd->sd_ioctl_handler = sr_crypto_ioctl;
125 sd->sd_meta_opt_handler = sr_crypto_meta_opt_handler;
126 sd->sd_scsi_rw = sr_crypto_rw;
127 sd->sd_scsi_done = sr_crypto_done;
128}
129
130int
131sr_crypto_create(struct sr_discipline *sd, struct bioc_createraid *bc,
132 int no_chunk, int64_t coerced_size)
133{
134 int rv = EINVAL22;
135
136 if (no_chunk != 1) {
137 sr_error(sd->sd_sc, "%s requires exactly one chunk",
138 sd->sd_name);
139 return (rv);
140 }
141
142 sd->sd_meta->ssdi_sdd_invariant.ssd_size = coerced_size;
143
144 rv = sr_crypto_meta_create(sd, &sd->mdssd_dis_specific.mdd_crypto, bc);
145 if (rv)
146 return (rv);
147
148 sd->sd_max_ccb_per_wu = no_chunk;
149 return (0);
150}
151
152int
153sr_crypto_meta_create(struct sr_discipline *sd, struct sr_crypto *mdd_crypto,
154 struct bioc_createraid *bc)
155{
156 struct sr_meta_opt_item *omi;
157 int rv = EINVAL22;
158
159 if (sd->sd_meta->ssdi_sdd_invariant.ssd_size > SR_CRYPTO_MAXSIZE((1ULL << 30) * 32)) {
160 sr_error(sd->sd_sc, "%s exceeds maximum size (%lli > %llu)",
161 sd->sd_name, sd->sd_meta->ssdi_sdd_invariant.ssd_size,
162 SR_CRYPTO_MAXSIZE((1ULL << 30) * 32));
163 goto done;
164 }
165
166 /* Create crypto optional metadata. */
167 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF2,
168 M_WAITOK0x0001 | M_ZERO0x0008);
169 omi->omi_som = malloc(sizeof(struct sr_meta_crypto), M_DEVBUF2,
Result of 'malloc' is converted to a pointer of type 'struct sr_meta_opt_hdr', which is incompatible with sizeof operand type 'struct sr_meta_crypto'
170 M_WAITOK0x0001 | M_ZERO0x0008);
171 omi->omi_som->som_type = SR_OPT_CRYPTO0x01;
172 omi->omi_som->som_length = sizeof(struct sr_meta_crypto);
173 SLIST_INSERT_HEAD(&sd->sd_meta_opt, omi, omi_link)do { (omi)->omi_link.sle_next = (&sd->sd_meta_opt)->
slh_first; (&sd->sd_meta_opt)->slh_first = (omi); }
while (0)
;
174 mdd_crypto->scr_meta = (struct sr_meta_crypto *)omi->omi_som;
175 sd->sd_meta->ssdi_sdd_invariant.ssd_opt_no++;
176
177 mdd_crypto->key_disk = NULL((void *)0);
178
179 if (bc->bc_key_disk != NODEV(dev_t)(-1)) {
180
181 /* Create a key disk. */
182 if (sr_crypto_get_kdf(bc, sd, mdd_crypto))
183 goto done;
184 mdd_crypto->key_disk =
185 sr_crypto_create_key_disk(sd, mdd_crypto, bc->bc_key_disk);
186 if (mdd_crypto->key_disk == NULL((void *)0))
187 goto done;
188 sd->sd_capabilities |= SR_CAP_AUTO_ASSEMBLE0x00000002;
189
190 } else if (bc->bc_opaque_flags & BIOC_SOOUT0x02) {
191
192 /* No hint available yet. */
193 bc->bc_opaque_status = BIOC_SOINOUT_FAILED0x00;
194 rv = EAGAIN35;
195 goto done;
196
197 } else if (sr_crypto_get_kdf(bc, sd, mdd_crypto))
198 goto done;
199
200 /* Passphrase volumes cannot be automatically assembled. */
201 if (!(bc->bc_flags & BIOC_SCNOAUTOASSEMBLE0x04) && bc->bc_key_disk == NODEV(dev_t)(-1))
202 goto done;
203
204 sr_crypto_create_keys(sd, mdd_crypto);
205
206 rv = 0;
207done:
208 return (rv);
209}
210
211int
212sr_crypto_set_key(struct sr_discipline *sd, struct sr_crypto *mdd_crypto,
213 struct bioc_createraid *bc, int no_chunk, void *data)
214{
215 int rv = EINVAL22;
216
217 mdd_crypto->key_disk = NULL((void *)0);
218
219 /* Crypto optional metadata must already exist... */
220 if (mdd_crypto->scr_meta == NULL((void *)0))
221 goto done;
222
223 if (data != NULL((void *)0)) {
224 /* Kernel already has mask key. */
225 memcpy(mdd_crypto->scr_maskkey, data,__builtin_memcpy((mdd_crypto->scr_maskkey), (data), (sizeof
(mdd_crypto->scr_maskkey)))
226 sizeof(mdd_crypto->scr_maskkey))__builtin_memcpy((mdd_crypto->scr_maskkey), (data), (sizeof
(mdd_crypto->scr_maskkey)))
;
227 } else if (bc->bc_key_disk != NODEV(dev_t)(-1)) {
228 /* Read the mask key from the key disk. */
229 mdd_crypto->key_disk =
230 sr_crypto_read_key_disk(sd, mdd_crypto, bc->bc_key_disk);
231 if (mdd_crypto->key_disk == NULL((void *)0))
232 goto done;
233 } else if (bc->bc_opaque_flags & BIOC_SOOUT0x02) {
234 /* provide userland with kdf hint */
235 if (bc->bc_opaque == NULL((void *)0))
236 goto done;
237
238 if (sizeof(mdd_crypto->scr_meta->scm_kdfhint) <
239 bc->bc_opaque_size)
240 goto done;
241
242 if (copyout(mdd_crypto->scr_meta->scm_kdfhint,
243 bc->bc_opaque, bc->bc_opaque_size))
244 goto done;
245
246 /* we're done */
247 bc->bc_opaque_status = BIOC_SOINOUT_OK0x01;
248 rv = EAGAIN35;
249 goto done;
250 } else if (bc->bc_opaque_flags & BIOC_SOIN0x01) {
251 /* get kdf with maskkey from userland */
252 if (sr_crypto_get_kdf(bc, sd, mdd_crypto))
253 goto done;
254 } else
255 goto done;
256
257
258 rv = 0;
259done:
260 return (rv);
261}
262
263int
264sr_crypto_assemble(struct sr_discipline *sd,
265 struct bioc_createraid *bc, int no_chunk, void *data)
266{
267 int rv;
268
269 rv = sr_crypto_set_key(sd, &sd->mdssd_dis_specific.mdd_crypto, bc, no_chunk, data);
270 if (rv)
271 return (rv);
272
273 sd->sd_max_ccb_per_wu = sd->sd_meta->ssdi_sdd_invariant.ssd_chunk_no;
274 return (0);
275}
276
277struct sr_crypto_wu *
278sr_crypto_prepare(struct sr_workunit *wu, struct sr_crypto *mdd_crypto,
279 int encrypt)
280{
281 struct scsi_xfer *xs = wu->swu_xs;
282 struct sr_crypto_wu *crwu;
283 struct cryptodesc *crd;
284 int flags, i, n;
285 daddr_t blkno;
286 u_int keyndx;
287
288 DNPRINTF(SR_D_DIS, "%s: sr_crypto_prepare wu %p encrypt %d\n",
289 DEVNAME(wu->swu_dis->sd_sc), wu, encrypt);
290
291 crwu = (struct sr_crypto_wu *)wu;
292 crwu->cr_uio.uio_iovcnt = 1;
293 crwu->cr_uio.uio_iov->iov_len = xs->datalen;
294 if (xs->flags & SCSI_DATA_OUT0x01000) {
295 crwu->cr_uio.uio_iov->iov_base = crwu->cr_dmabuf;
296 memcpy(crwu->cr_uio.uio_iov->iov_base, xs->data, xs->datalen)__builtin_memcpy((crwu->cr_uio.uio_iov->iov_base), (xs->
data), (xs->datalen))
;
297 } else
298 crwu->cr_uio.uio_iov->iov_base = xs->data;
299
300 blkno = wu->swu_blk_start;
301 n = xs->datalen >> DEV_BSHIFT9;
302
303 /*
304 * We preallocated enough crypto descs for up to MAXPHYS of I/O.
305 * Since there may be less than that we need to tweak the amount
306 * of crypto desc structures to be just long enough for our needs.
307 */
308 KASSERT(crwu->cr_crp->crp_ndescalloc >= n)((crwu->cr_crp->crp_ndescalloc >= n) ? (void)0 : __assert
("diagnostic ", "/usr/src/sys/dev/softraid_crypto.c", 308, "crwu->cr_crp->crp_ndescalloc >= n"
))
;
309 crwu->cr_crp->crp_ndesc = n;
310 flags = (encrypt ? CRD_F_ENCRYPT0x01 : 0) |
311 CRD_F_IV_PRESENT0x02 | CRD_F_IV_EXPLICIT0x04;
312
313 /*
314 * Select crypto session based on block number.
315 *
316 * XXX - this does not handle the case where the read/write spans
317 * across a different key blocks (e.g. 0.5TB boundary). Currently
318 * this is already broken by the use of scr_key[0] below.
319 */
320 keyndx = blkno >> SR_CRYPTO_KEY_BLKSHIFT30;
321 crwu->cr_crp->crp_sid = mdd_crypto->scr_sid[keyndx];
322
323 crwu->cr_crp->crp_ilen = xs->datalen;
324 crwu->cr_crp->crp_alloctype = M_DEVBUF2;
325 crwu->cr_crp->crp_flags = CRYPTO_F_IOV0x0002;
326 crwu->cr_crp->crp_buf = &crwu->cr_uio;
327 for (i = 0; i < crwu->cr_crp->crp_ndesc; i++, blkno++) {
328 crd = &crwu->cr_crp->crp_desc[i];
329 crd->crd_skip = i << DEV_BSHIFT9;
330 crd->crd_len = DEV_BSIZE(1 << 9);
331 crd->crd_inject = 0;
332 crd->crd_flags = flags;
333 crd->crd_algCRD_INI.cri_alg = mdd_crypto->scr_alg;
334 crd->crd_klenCRD_INI.cri_klen = mdd_crypto->scr_klen;
335 crd->crd_keyCRD_INI.cri_key = mdd_crypto->scr_key[0];
336 memcpy(crd->crd_iv, &blkno, sizeof(blkno))__builtin_memcpy((crd->CRD_INI.u.iv), (&blkno), (sizeof
(blkno)))
;
337 }
338
339 return (crwu);
340}
341
342int
343sr_crypto_get_kdf(struct bioc_createraid *bc, struct sr_discipline *sd,
344 struct sr_crypto *mdd_crypto)
345{
346 int rv = EINVAL22;
347 struct sr_crypto_kdfinfo *kdfinfo;
348
349 if (!(bc->bc_opaque_flags & BIOC_SOIN0x01))
350 return (rv);
351 if (bc->bc_opaque == NULL((void *)0))
352 return (rv);
353 if (bc->bc_opaque_size != sizeof(*kdfinfo))
354 return (rv);
355
356 kdfinfo = malloc(bc->bc_opaque_size, M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
357 if (copyin(bc->bc_opaque, kdfinfo, bc->bc_opaque_size))
358 goto out;
359
360 if (kdfinfo->len != bc->bc_opaque_size)
361 goto out;
362
363 /* copy KDF hint to disk meta data */
364 if (kdfinfo->flags & SR_CRYPTOKDF_HINT(1<<1)) {
365 if (sizeof(mdd_crypto->scr_meta->scm_kdfhint) <
366 kdfinfo->genkdf_kdfhint.generic.len)
367 goto out;
368 memcpy(mdd_crypto->scr_meta->scm_kdfhint,__builtin_memcpy((mdd_crypto->scr_meta->scm_kdfhint), (
&kdfinfo->_kdfhint.generic), (kdfinfo->_kdfhint.generic
.len))
369 &kdfinfo->genkdf, kdfinfo->genkdf.len)__builtin_memcpy((mdd_crypto->scr_meta->scm_kdfhint), (
&kdfinfo->_kdfhint.generic), (kdfinfo->_kdfhint.generic
.len))
;
370 }
371
372 /* copy mask key to run-time meta data */
373 if ((kdfinfo->flags & SR_CRYPTOKDF_KEY(1<<0))) {
374 if (sizeof(mdd_crypto->scr_maskkey) < sizeof(kdfinfo->maskkey))
375 goto out;
376 memcpy(mdd_crypto->scr_maskkey, &kdfinfo->maskkey,__builtin_memcpy((mdd_crypto->scr_maskkey), (&kdfinfo->
maskkey), (sizeof(kdfinfo->maskkey)))
377 sizeof(kdfinfo->maskkey))__builtin_memcpy((mdd_crypto->scr_maskkey), (&kdfinfo->
maskkey), (sizeof(kdfinfo->maskkey)))
;
378 }
379
380 bc->bc_opaque_status = BIOC_SOINOUT_OK0x01;
381 rv = 0;
382out:
383 explicit_bzero(kdfinfo, bc->bc_opaque_size);
384 free(kdfinfo, M_DEVBUF2, bc->bc_opaque_size);
385
386 return (rv);
387}
388
389int
390sr_crypto_encrypt(u_char *p, u_char *c, u_char *key, size_t size, int alg)
391{
392 rijndael_ctx ctx;
393 int i, rv = 1;
394
395 switch (alg) {
396 case SR_CRYPTOM_AES_ECB_2561:
397 if (rijndael_set_key_enc_only(&ctx, key, 256) != 0)
398 goto out;
399 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN16)
400 rijndael_encrypt(&ctx, &p[i], &c[i]);
401 rv = 0;
402 break;
403 default:
404 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
405 "softraid", alg);
406 rv = -1;
407 goto out;
408 }
409
410out:
411 explicit_bzero(&ctx, sizeof(ctx));
412 return (rv);
413}
414
415int
416sr_crypto_decrypt(u_char *c, u_char *p, u_char *key, size_t size, int alg)
417{
418 rijndael_ctx ctx;
419 int i, rv = 1;
420
421 switch (alg) {
422 case SR_CRYPTOM_AES_ECB_2561:
423 if (rijndael_set_key(&ctx, key, 256) != 0)
424 goto out;
425 for (i = 0; i < size; i += RIJNDAEL128_BLOCK_LEN16)
426 rijndael_decrypt(&ctx, &c[i], &p[i]);
427 rv = 0;
428 break;
429 default:
430 DNPRINTF(SR_D_DIS, "%s: unsupported encryption algorithm %d\n",
431 "softraid", alg);
432 rv = -1;
433 goto out;
434 }
435
436out:
437 explicit_bzero(&ctx, sizeof(ctx));
438 return (rv);
439}
440
441void
442sr_crypto_calculate_check_hmac_sha1(u_int8_t *maskkey, int maskkey_size,
443 u_int8_t *key, int key_size, u_char *check_digest)
444{
445 u_char check_key[SHA1_DIGEST_LENGTH20];
446 HMAC_SHA1_CTX hmacctx;
447 SHA1_CTX shactx;
448
449 bzero(check_key, sizeof(check_key))__builtin_bzero((check_key), (sizeof(check_key)));
450 bzero(&hmacctx, sizeof(hmacctx))__builtin_bzero((&hmacctx), (sizeof(hmacctx)));
451 bzero(&shactx, sizeof(shactx))__builtin_bzero((&shactx), (sizeof(shactx)));
452
453 /* k = SHA1(mask_key) */
454 SHA1Init(&shactx);
455 SHA1Update(&shactx, maskkey, maskkey_size);
456 SHA1Final(check_key, &shactx);
457
458 /* mac = HMAC_SHA1_k(unencrypted key) */
459 HMAC_SHA1_Init(&hmacctx, check_key, sizeof(check_key));
460 HMAC_SHA1_Update(&hmacctx, key, key_size);
461 HMAC_SHA1_Final(check_digest, &hmacctx);
462
463 explicit_bzero(check_key, sizeof(check_key));
464 explicit_bzero(&hmacctx, sizeof(hmacctx));
465 explicit_bzero(&shactx, sizeof(shactx));
466}
467
468int
469sr_crypto_decrypt_key(struct sr_discipline *sd, struct sr_crypto *mdd_crypto)
470{
471 u_char check_digest[SHA1_DIGEST_LENGTH20];
472 int rv = 1;
473
474 DNPRINTF(SR_D_DIS, "%s: sr_crypto_decrypt_key\n", DEVNAME(sd->sd_sc));
475
476 if (mdd_crypto->scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA11)
477 goto out;
478
479 if (sr_crypto_decrypt((u_char *)mdd_crypto->scr_meta->scm_key,
480 (u_char *)mdd_crypto->scr_key,
481 mdd_crypto->scr_maskkey, sizeof(mdd_crypto->scr_key),
482 mdd_crypto->scr_meta->scm_mask_alg) == -1)
483 goto out;
484
485#ifdef SR_DEBUG0
486 sr_crypto_dumpkeys(mdd_crypto);
487#endif
488
489 /* Check that the key decrypted properly. */
490 sr_crypto_calculate_check_hmac_sha1(mdd_crypto->scr_maskkey,
491 sizeof(mdd_crypto->scr_maskkey), (u_int8_t *)mdd_crypto->scr_key,
492 sizeof(mdd_crypto->scr_key), check_digest);
493 if (memcmp(mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac,__builtin_memcmp((mdd_crypto->scr_meta->_scm_chk.chk_hmac_sha1
.sch_mac), (check_digest), (sizeof(check_digest)))
494 check_digest, sizeof(check_digest))__builtin_memcmp((mdd_crypto->scr_meta->_scm_chk.chk_hmac_sha1
.sch_mac), (check_digest), (sizeof(check_digest)))
!= 0) {
495 explicit_bzero(mdd_crypto->scr_key,
496 sizeof(mdd_crypto->scr_key));
497 goto out;
498 }
499
500 rv = 0; /* Success */
501out:
502 /* we don't need the mask key anymore */
503 explicit_bzero(&mdd_crypto->scr_maskkey,
504 sizeof(mdd_crypto->scr_maskkey));
505
506 explicit_bzero(check_digest, sizeof(check_digest));
507
508 return rv;
509}
510
511int
512sr_crypto_create_keys(struct sr_discipline *sd, struct sr_crypto *mdd_crypto)
513{
514
515 DNPRINTF(SR_D_DIS, "%s: sr_crypto_create_keys\n",
516 DEVNAME(sd->sd_sc));
517
518 if (AES_MAXKEYBYTES((256)/8) < sizeof(mdd_crypto->scr_maskkey))
519 return (1);
520
521 /* XXX allow user to specify */
522 mdd_crypto->scr_meta->scm_alg = SR_CRYPTOA_AES_XTS_2562;
523
524 /* generate crypto keys */
525 arc4random_buf(mdd_crypto->scr_key, sizeof(mdd_crypto->scr_key));
526
527 /* Mask the disk keys. */
528 mdd_crypto->scr_meta->scm_mask_alg = SR_CRYPTOM_AES_ECB_2561;
529 sr_crypto_encrypt((u_char *)mdd_crypto->scr_key,
530 (u_char *)mdd_crypto->scr_meta->scm_key,
531 mdd_crypto->scr_maskkey, sizeof(mdd_crypto->scr_key),
532 mdd_crypto->scr_meta->scm_mask_alg);
533
534 /* Prepare key decryption check code. */
535 mdd_crypto->scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA11;
536 sr_crypto_calculate_check_hmac_sha1(mdd_crypto->scr_maskkey,
537 sizeof(mdd_crypto->scr_maskkey),
538 (u_int8_t *)mdd_crypto->scr_key, sizeof(mdd_crypto->scr_key),
539 mdd_crypto->scr_meta->chk_hmac_sha1_scm_chk.chk_hmac_sha1.sch_mac);
540
541 /* Erase the plaintext disk keys */
542 explicit_bzero(mdd_crypto->scr_key, sizeof(mdd_crypto->scr_key));
543
544#ifdef SR_DEBUG0
545 sr_crypto_dumpkeys(mdd_crypto);
546#endif
547
548 mdd_crypto->scr_meta->scm_flags = SR_CRYPTOF_KEY(1<<0) | SR_CRYPTOF_KDFHINT(1<<1);
549
550 return (0);
551}
552
553int
554sr_crypto_change_maskkey(struct sr_discipline *sd, struct sr_crypto *mdd_crypto,
555 struct sr_crypto_kdfinfo *kdfinfo1, struct sr_crypto_kdfinfo *kdfinfo2)
556{
557 u_char check_digest[SHA1_DIGEST_LENGTH20];
558 u_char *c, *p = NULL((void *)0);
559 size_t ksz;
560 int rv = 1;
561
562 DNPRINTF(SR_D_DIS, "%s: sr_crypto_change_maskkey\n",
563 DEVNAME(sd->sd_sc));
564
565 if (mdd_crypto->scr_meta->scm_check_alg != SR_CRYPTOC_HMAC_SHA11)
566 goto out;
567
568 c = (u_char *)mdd_crypto->scr_meta->scm_key;
569 ksz = sizeof(mdd_crypto->scr_key);
570 p = malloc(ksz, M_DEVBUF2, M_WAITOK0x0001 | M_CANFAIL0x0004 | M_ZERO0x0008);
571 if (p == NULL((void *)0))
572 goto out;
573
574 if (sr_crypto_decrypt(c, p, kdfinfo1->maskkey, ksz,
575 mdd_crypto->scr_meta->scm_mask_alg) == -1)
576 goto out;
577
578#ifdef SR_DEBUG0
579 sr_crypto_dumpkeys(mdd_crypto);
580#endif
581
582 sr_crypto_calculate_check_hmac_sha1(kdfinfo1->maskkey,
583 sizeof(kdfinfo1->maskkey), p, ksz, check_digest);
584 if (memcmp(mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac,__builtin_memcmp((mdd_crypto->scr_meta->_scm_chk.chk_hmac_sha1
.sch_mac), (check_digest), (sizeof(check_digest)))
585 check_digest, sizeof(check_digest))__builtin_memcmp((mdd_crypto->scr_meta->_scm_chk.chk_hmac_sha1
.sch_mac), (check_digest), (sizeof(check_digest)))
!= 0) {
586 sr_error(sd->sd_sc, "incorrect key or passphrase");
587 rv = EPERM1;
588 goto out;
589 }
590
591 /* Copy new KDF hint to metadata, if supplied. */
592 if (kdfinfo2->flags & SR_CRYPTOKDF_HINT(1<<1)) {
593 if (kdfinfo2->genkdf_kdfhint.generic.len >
594 sizeof(mdd_crypto->scr_meta->scm_kdfhint))
595 goto out;
596 explicit_bzero(mdd_crypto->scr_meta->scm_kdfhint,
597 sizeof(mdd_crypto->scr_meta->scm_kdfhint));
598 memcpy(mdd_crypto->scr_meta->scm_kdfhint,__builtin_memcpy((mdd_crypto->scr_meta->scm_kdfhint), (
&kdfinfo2->_kdfhint.generic), (kdfinfo2->_kdfhint.generic
.len))
599 &kdfinfo2->genkdf, kdfinfo2->genkdf.len)__builtin_memcpy((mdd_crypto->scr_meta->scm_kdfhint), (
&kdfinfo2->_kdfhint.generic), (kdfinfo2->_kdfhint.generic
.len))
;
600 }
601
602 /* Mask the disk keys. */
603 c = (u_char *)mdd_crypto->scr_meta->scm_key;
604 if (sr_crypto_encrypt(p, c, kdfinfo2->maskkey, ksz,
605 mdd_crypto->scr_meta->scm_mask_alg) == -1)
606 goto out;
607
608 /* Prepare key decryption check code. */
609 mdd_crypto->scr_meta->scm_check_alg = SR_CRYPTOC_HMAC_SHA11;
610 sr_crypto_calculate_check_hmac_sha1(kdfinfo2->maskkey,
611 sizeof(kdfinfo2->maskkey), (u_int8_t *)mdd_crypto->scr_key,
612 sizeof(mdd_crypto->scr_key), check_digest);
613
614 /* Copy new encrypted key and HMAC to metadata. */
615 memcpy(mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac, check_digest,__builtin_memcpy((mdd_crypto->scr_meta->_scm_chk.chk_hmac_sha1
.sch_mac), (check_digest), (sizeof(mdd_crypto->scr_meta->
_scm_chk.chk_hmac_sha1.sch_mac)))
616 sizeof(mdd_crypto->scr_meta->chk_hmac_sha1.sch_mac))__builtin_memcpy((mdd_crypto->scr_meta->_scm_chk.chk_hmac_sha1
.sch_mac), (check_digest), (sizeof(mdd_crypto->scr_meta->
_scm_chk.chk_hmac_sha1.sch_mac)))
;
617
618 rv = 0; /* Success */
619
620out:
621 if (p) {
622 explicit_bzero(p, ksz);
623 free(p, M_DEVBUF2, ksz);
624 }
625
626 explicit_bzero(check_digest, sizeof(check_digest));
627 explicit_bzero(&kdfinfo1->maskkey, sizeof(kdfinfo1->maskkey));
628 explicit_bzero(&kdfinfo2->maskkey, sizeof(kdfinfo2->maskkey));
629
630 return (rv);
631}
632
633struct sr_chunk *
634sr_crypto_create_key_disk(struct sr_discipline *sd,
635 struct sr_crypto *mdd_crypto, dev_t dev)
636{
637 struct sr_softc *sc = sd->sd_sc;
638 struct sr_discipline *fakesd = NULL((void *)0);
639 struct sr_metadata *sm = NULL((void *)0);
640 struct sr_meta_chunk *km;
641 struct sr_meta_opt_item *omi = NULL((void *)0);
642 struct sr_meta_keydisk *skm;
643 struct sr_chunk *key_disk = NULL((void *)0);
644 struct disklabel label;
645 struct vnode *vn;
646 char devname[32];
647 int c, part, open = 0;
648
649 /*
650 * Create a metadata structure on the key disk and store
651 * keying material in the optional metadata.
652 */
653
654 sr_meta_getdevname(sc, dev, devname, sizeof(devname));
655
656 /* Make sure chunk is not already in use. */
657 c = sr_chunk_in_use(sc, dev);
658 if (c != BIOC_SDINVALID0xff && c != BIOC_SDOFFLINE0x01) {
659 sr_error(sc, "%s is already in use", devname);
660 goto done;
661 }
662
663 /* Open device. */
664 if (bdevvp(dev, &vn)) {
665 sr_error(sc, "cannot open key disk %s", devname);
666 goto done;
667 }
668 if (VOP_OPEN(vn, FREAD0x0001 | FWRITE0x0002, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
)) {
669 DNPRINTF(SR_D_META,"%s: sr_crypto_create_key_disk cannot "
670 "open %s\n", DEVNAME(sc), devname);
671 vput(vn);
672 goto done;
673 }
674 open = 1; /* close dev on error */
675
676 /* Get partition details. */
677 part = DISKPART(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >>
8)) % 16)
;
678 if (VOP_IOCTL(vn, DIOCGDINFO((unsigned long)0x40000000 | ((sizeof(struct disklabel) &
0x1fff) << 16) | ((('d')) << 8) | ((101)))
, (caddr_t)&label,
679 FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
)) {
680 DNPRINTF(SR_D_META, "%s: sr_crypto_create_key_disk ioctl "
681 "failed\n", DEVNAME(sc));
682 goto done;
683 }
684 if (label.d_partitions[part].p_fstype != FS_RAID19) {
685 sr_error(sc, "%s partition not of type RAID (%d)",
686 devname, label.d_partitions[part].p_fstype);
687 goto done;
688 }
689
690 /*
691 * Create and populate chunk metadata.
692 */
693
694 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
695 km = &key_disk->src_meta;
696
697 key_disk->src_dev_mm = dev;
698 key_disk->src_vn = vn;
699 strlcpy(key_disk->src_devname, devname, sizeof(km->scmi_scm_invariant.scm_devname));
700 key_disk->src_size = 0;
701
702 km->scmi_scm_invariant.scm_volid = sd->sd_meta->ssdi_sdd_invariant.ssd_level;
703 km->scmi_scm_invariant.scm_chunk_id = 0;
704 km->scmi_scm_invariant.scm_size = 0;
705 km->scmi_scm_invariant.scm_coerced_size = 0;
706 strlcpy(km->scmi_scm_invariant.scm_devname, devname, sizeof(km->scmi_scm_invariant.scm_devname));
707 memcpy(&km->scmi.scm_uuid, &sd->sd_meta->ssdi.ssd_uuid,__builtin_memcpy((&km->_scm_invariant.scm_uuid), (&
sd->sd_meta->_sdd_invariant.ssd_uuid), (sizeof(struct sr_uuid
)))
708 sizeof(struct sr_uuid))__builtin_memcpy((&km->_scm_invariant.scm_uuid), (&
sd->sd_meta->_sdd_invariant.ssd_uuid), (sizeof(struct sr_uuid
)))
;
709
710 sr_checksum(sc, km, &km->scm_checksum,
711 sizeof(struct sr_meta_chunk_invariant));
712
713 km->scm_status = BIOC_SDONLINE0x00;
714
715 /*
716 * Create and populate our own discipline and metadata.
717 */
718
719 sm = malloc(sizeof(struct sr_metadata), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
720 sm->ssdi_sdd_invariant.ssd_magic = SR_MAGIC0x4d4152436372616dLLU;
721 sm->ssdi_sdd_invariant.ssd_version = SR_META_VERSION6;
722 sm->ssd_ondisk = 0;
723 sm->ssdi_sdd_invariant.ssd_vol_flags = 0;
724 memcpy(&sm->ssdi.ssd_uuid, &sd->sd_meta->ssdi.ssd_uuid,__builtin_memcpy((&sm->_sdd_invariant.ssd_uuid), (&
sd->sd_meta->_sdd_invariant.ssd_uuid), (sizeof(struct sr_uuid
)))
725 sizeof(struct sr_uuid))__builtin_memcpy((&sm->_sdd_invariant.ssd_uuid), (&
sd->sd_meta->_sdd_invariant.ssd_uuid), (sizeof(struct sr_uuid
)))
;
726 sm->ssdi_sdd_invariant.ssd_chunk_no = 1;
727 sm->ssdi_sdd_invariant.ssd_volid = SR_KEYDISK_VOLID0xfffffffe;
728 sm->ssdi_sdd_invariant.ssd_level = SR_KEYDISK_LEVEL0xfffffffe;
729 sm->ssdi_sdd_invariant.ssd_size = 0;
730 strlcpy(sm->ssdi_sdd_invariant.ssd_vendor, "OPENBSD", sizeof(sm->ssdi_sdd_invariant.ssd_vendor));
731 snprintf(sm->ssdi_sdd_invariant.ssd_product, sizeof(sm->ssdi_sdd_invariant.ssd_product),
732 "SR %s", "KEYDISK");
733 snprintf(sm->ssdi_sdd_invariant.ssd_revision, sizeof(sm->ssdi_sdd_invariant.ssd_revision),
734 "%03d", SR_META_VERSION6);
735
736 fakesd = malloc(sizeof(struct sr_discipline), M_DEVBUF2,
737 M_WAITOK0x0001 | M_ZERO0x0008);
738 fakesd->sd_sc = sd->sd_sc;
739 fakesd->sd_meta = sm;
740 fakesd->sd_meta_type = SR_META_F_NATIVE0;
741 fakesd->sd_vol_status = BIOC_SVONLINE0x00;
742 strlcpy(fakesd->sd_name, "KEYDISK", sizeof(fakesd->sd_name));
743 SLIST_INIT(&fakesd->sd_meta_opt){ ((&fakesd->sd_meta_opt)->slh_first) = ((void *)0)
; }
;
744
745 /* Add chunk to volume. */
746 fakesd->sd_vol.sv_chunks = malloc(sizeof(struct sr_chunk *), M_DEVBUF2,
747 M_WAITOK0x0001 | M_ZERO0x0008);
748 fakesd->sd_vol.sv_chunks[0] = key_disk;
749 SLIST_INIT(&fakesd->sd_vol.sv_chunk_list){ ((&fakesd->sd_vol.sv_chunk_list)->slh_first) = ((
void *)0); }
;
750 SLIST_INSERT_HEAD(&fakesd->sd_vol.sv_chunk_list, key_disk, src_link)do { (key_disk)->src_link.sle_next = (&fakesd->sd_vol
.sv_chunk_list)->slh_first; (&fakesd->sd_vol.sv_chunk_list
)->slh_first = (key_disk); } while (0)
;
751
752 /* Generate mask key. */
753 arc4random_buf(mdd_crypto->scr_maskkey,
754 sizeof(mdd_crypto->scr_maskkey));
755
756 /* Copy mask key to optional metadata area. */
757 omi = malloc(sizeof(struct sr_meta_opt_item), M_DEVBUF2,
758 M_WAITOK0x0001 | M_ZERO0x0008);
759 omi->omi_som = malloc(sizeof(struct sr_meta_keydisk), M_DEVBUF2,
760 M_WAITOK0x0001 | M_ZERO0x0008);
761 omi->omi_som->som_type = SR_OPT_KEYDISK0x03;
762 omi->omi_som->som_length = sizeof(struct sr_meta_keydisk);
763 skm = (struct sr_meta_keydisk *)omi->omi_som;
764 memcpy(&skm->skm_maskkey, mdd_crypto->scr_maskkey,__builtin_memcpy((&skm->skm_maskkey), (mdd_crypto->
scr_maskkey), (sizeof(skm->skm_maskkey)))
765 sizeof(skm->skm_maskkey))__builtin_memcpy((&skm->skm_maskkey), (mdd_crypto->
scr_maskkey), (sizeof(skm->skm_maskkey)))
;
766 SLIST_INSERT_HEAD(&fakesd->sd_meta_opt, omi, omi_link)do { (omi)->omi_link.sle_next = (&fakesd->sd_meta_opt
)->slh_first; (&fakesd->sd_meta_opt)->slh_first =
(omi); } while (0)
;
767 fakesd->sd_meta->ssdi_sdd_invariant.ssd_opt_no++;
768
769 /* Save metadata. */
770 if (sr_meta_save(fakesd, SR_META_DIRTY0x1)) {
771 sr_error(sc, "could not save metadata to %s", devname);
772 goto fail;
773 }
774
775 goto done;
776
777fail:
778 free(key_disk, M_DEVBUF2, sizeof(struct sr_chunk));
779 key_disk = NULL((void *)0);
780
781done:
782 free(omi, M_DEVBUF2, sizeof(struct sr_meta_opt_item));
783 if (fakesd && fakesd->sd_vol.sv_chunks)
784 free(fakesd->sd_vol.sv_chunks, M_DEVBUF2,
785 sizeof(struct sr_chunk *));
786 free(fakesd, M_DEVBUF2, sizeof(struct sr_discipline));
787 free(sm, M_DEVBUF2, sizeof(struct sr_metadata));
788 if (open) {
789 VOP_CLOSE(vn, FREAD0x0001 | FWRITE0x0002, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
790 vput(vn);
791 }
792
793 return key_disk;
794}
795
796struct sr_chunk *
797sr_crypto_read_key_disk(struct sr_discipline *sd, struct sr_crypto *mdd_crypto,
798 dev_t dev)
799{
800 struct sr_softc *sc = sd->sd_sc;
801 struct sr_metadata *sm = NULL((void *)0);
802 struct sr_meta_opt_item *omi, *omi_next;
803 struct sr_meta_opt_hdr *omh;
804 struct sr_meta_keydisk *skm;
805 struct sr_meta_opt_head som;
806 struct sr_chunk *key_disk = NULL((void *)0);
807 struct disklabel label;
808 struct vnode *vn = NULL((void *)0);
809 char devname[32];
810 int c, part, open = 0;
811
812 /*
813 * Load a key disk and load keying material into memory.
814 */
815
816 SLIST_INIT(&som){ ((&som)->slh_first) = ((void *)0); };
817
818 sr_meta_getdevname(sc, dev, devname, sizeof(devname));
819
820 /* Make sure chunk is not already in use. */
821 c = sr_chunk_in_use(sc, dev);
822 if (c != BIOC_SDINVALID0xff && c != BIOC_SDOFFLINE0x01) {
823 sr_error(sc, "%s is already in use", devname);
824 goto done;
825 }
826
827 /* Open device. */
828 if (bdevvp(dev, &vn)) {
829 sr_error(sc, "cannot open key disk %s", devname);
830 goto done;
831 }
832 if (VOP_OPEN(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
)) {
833 DNPRINTF(SR_D_META,"%s: sr_crypto_read_key_disk cannot "
834 "open %s\n", DEVNAME(sc), devname);
835 vput(vn);
836 goto done;
837 }
838 open = 1; /* close dev on error */
839
840 /* Get partition details. */
841 part = DISKPART(dev)(((unsigned)((dev) & 0xff) | (((dev) & 0xffff0000) >>
8)) % 16)
;
842 if (VOP_IOCTL(vn, DIOCGDINFO((unsigned long)0x40000000 | ((sizeof(struct disklabel) &
0x1fff) << 16) | ((('d')) << 8) | ((101)))
, (caddr_t)&label, FREAD0x0001,
843 NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
)) {
844 DNPRINTF(SR_D_META, "%s: sr_crypto_read_key_disk ioctl "
845 "failed\n", DEVNAME(sc));
846 goto done;
847 }
848 if (label.d_partitions[part].p_fstype != FS_RAID19) {
849 sr_error(sc, "%s partition not of type RAID (%d)",
850 devname, label.d_partitions[part].p_fstype);
851 goto done;
852 }
853
854 /*
855 * Read and validate key disk metadata.
856 */
857 sm = malloc(SR_META_SIZE64 * DEV_BSIZE(1 << 9), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
858 if (sr_meta_native_read(sd, dev, sm, NULL((void *)0))) {
859 sr_error(sc, "native bootprobe could not read native metadata");
860 goto done;
861 }
862
863 if (sr_meta_validate(sd, dev, sm, NULL((void *)0))) {
864 DNPRINTF(SR_D_META, "%s: invalid metadata\n",
865 DEVNAME(sc));
866 goto done;
867 }
868
869 /* Make sure this is a key disk. */
870 if (sm->ssdi_sdd_invariant.ssd_level != SR_KEYDISK_LEVEL0xfffffffe) {
871 sr_error(sc, "%s is not a key disk", devname);
872 goto done;
873 }
874
875 /* Construct key disk chunk. */
876 key_disk = malloc(sizeof(struct sr_chunk), M_DEVBUF2, M_WAITOK0x0001 | M_ZERO0x0008);
877 key_disk->src_dev_mm = dev;
878 key_disk->src_vn = vn;
879 key_disk->src_size = 0;
880
881 memcpy(&key_disk->src_meta, (struct sr_meta_chunk *)(sm + 1),__builtin_memcpy((&key_disk->src_meta), ((struct sr_meta_chunk
*)(sm + 1)), (sizeof(key_disk->src_meta)))
882 sizeof(key_disk->src_meta))__builtin_memcpy((&key_disk->src_meta), ((struct sr_meta_chunk
*)(sm + 1)), (sizeof(key_disk->src_meta)))
;
883
884 /* Read mask key from optional metadata. */
885 sr_meta_opt_load(sc, sm, &som);
886 SLIST_FOREACH(omi, &som, omi_link)for((omi) = ((&som)->slh_first); (omi) != ((void *)0);
(omi) = ((omi)->omi_link.sle_next))
{
887 omh = omi->omi_som;
888 if (omh->som_type == SR_OPT_KEYDISK0x03) {
889 skm = (struct sr_meta_keydisk *)omh;
890 memcpy(mdd_crypto->scr_maskkey, &skm->skm_maskkey,__builtin_memcpy((mdd_crypto->scr_maskkey), (&skm->
skm_maskkey), (sizeof(mdd_crypto->scr_maskkey)))
891 sizeof(mdd_crypto->scr_maskkey))__builtin_memcpy((mdd_crypto->scr_maskkey), (&skm->
skm_maskkey), (sizeof(mdd_crypto->scr_maskkey)))
;
892 } else if (omh->som_type == SR_OPT_CRYPTO0x01) {
893 /* Original keydisk format with key in crypto area. */
894 memcpy(mdd_crypto->scr_maskkey,__builtin_memcpy((mdd_crypto->scr_maskkey), (omh + sizeof(
struct sr_meta_opt_hdr)), (sizeof(mdd_crypto->scr_maskkey)
))
895 omh + sizeof(struct sr_meta_opt_hdr),__builtin_memcpy((mdd_crypto->scr_maskkey), (omh + sizeof(
struct sr_meta_opt_hdr)), (sizeof(mdd_crypto->scr_maskkey)
))
896 sizeof(mdd_crypto->scr_maskkey))__builtin_memcpy((mdd_crypto->scr_maskkey), (omh + sizeof(
struct sr_meta_opt_hdr)), (sizeof(mdd_crypto->scr_maskkey)
))
;
897 }
898 }
899
900 open = 0;
901
902done:
903 for (omi = SLIST_FIRST(&som)((&som)->slh_first); omi != NULL((void *)0); omi = omi_next) {
904 omi_next = SLIST_NEXT(omi, omi_link)((omi)->omi_link.sle_next);
905 free(omi->omi_som, M_DEVBUF2, 0);
906 free(omi, M_DEVBUF2, sizeof(struct sr_meta_opt_item));
907 }
908
909 free(sm, M_DEVBUF2, SR_META_SIZE64 * DEV_BSIZE(1 << 9));
910
911 if (vn && open) {
912 VOP_CLOSE(vn, FREAD0x0001, NOCRED((struct ucred *)-1), curproc({struct cpu_info *__ci; asm volatile("movq %%gs:%P1,%0" : "=r"
(__ci) :"n" (__builtin_offsetof(struct cpu_info, ci_self)));
__ci;})->ci_curproc
);
913 vput(vn);
914 }
915
916 return key_disk;
917}
918
919void
920sr_crypto_free_sessions(struct sr_discipline *sd, struct sr_crypto *mdd_crypto)
921{
922 u_int i;
923
924 for (i = 0; i < SR_CRYPTO_MAXKEYS32; i++) {
925 if (mdd_crypto->scr_sid[i] != (u_int64_t)-1) {
926 crypto_freesession(mdd_crypto->scr_sid[i]);
927 mdd_crypto->scr_sid[i] = (u_int64_t)-1;
928 }
929 }
930}
931
932int
933sr_crypto_alloc_resources_internal(struct sr_discipline *sd,
934 struct sr_crypto *mdd_crypto)
935{
936 struct sr_workunit *wu;
937 struct sr_crypto_wu *crwu;
938 struct cryptoini cri;
939 u_int num_keys, i;
940
941 DNPRINTF(SR_D_DIS, "%s: sr_crypto_alloc_resources\n",
942 DEVNAME(sd->sd_sc));
943
944 mdd_crypto->scr_alg = CRYPTO_AES_XTS15;
945 switch (mdd_crypto->scr_meta->scm_alg) {
946 case SR_CRYPTOA_AES_XTS_1281:
947 mdd_crypto->scr_klen = 256;
948 break;
949 case SR_CRYPTOA_AES_XTS_2562:
950 mdd_crypto->scr_klen = 512;
951 break;
952 default:
953 sr_error(sd->sd_sc, "unknown crypto algorithm");
954 return (EINVAL22);
955 }
956
957 for (i = 0; i < SR_CRYPTO_MAXKEYS32; i++)
958 mdd_crypto->scr_sid[i] = (u_int64_t)-1;
959
960 if (sr_wu_alloc(sd)) {
961 sr_error(sd->sd_sc, "unable to allocate work units");
962 return (ENOMEM12);
963 }
964 if (sr_ccb_alloc(sd)) {
965 sr_error(sd->sd_sc, "unable to allocate CCBs");
966 return (ENOMEM12);
967 }
968 if (sr_crypto_decrypt_key(sd, mdd_crypto)) {
969 sr_error(sd->sd_sc, "incorrect key or passphrase");
970 return (EPERM1);
971 }
972
973 /*
974 * For each work unit allocate the uio, iovec and crypto structures.
975 * These have to be allocated now because during runtime we cannot
976 * fail an allocation without failing the I/O (which can cause real
977 * problems).
978 */
979 TAILQ_FOREACH(wu, &sd->sd_wu, swu_next)for((wu) = ((&sd->sd_wu)->tqh_first); (wu) != ((void
*)0); (wu) = ((wu)->swu_next.tqe_next))
{
980 crwu = (struct sr_crypto_wu *)wu;
981 crwu->cr_uio.uio_iov = &crwu->cr_iov;
982 crwu->cr_dmabuf = dma_alloc(MAXPHYS(64 * 1024), PR_WAITOK0x0001);
983 crwu->cr_crp = crypto_getreq(MAXPHYS(64 * 1024) >> DEV_BSHIFT9);
984 if (crwu->cr_crp == NULL((void *)0))
985 return (ENOMEM12);
986 }
987
988 memset(&cri, 0, sizeof(cri))__builtin_memset((&cri), (0), (sizeof(cri)));
989 cri.cri_alg = mdd_crypto->scr_alg;
990 cri.cri_klen = mdd_crypto->scr_klen;
991
992 /* Allocate a session for every 2^SR_CRYPTO_KEY_BLKSHIFT blocks. */
993 num_keys = ((sd->sd_meta->ssdi_sdd_invariant.ssd_size - 1) >>
994 SR_CRYPTO_KEY_BLKSHIFT30) + 1;
995 if (num_keys > SR_CRYPTO_MAXKEYS32)
996 return (EFBIG27);
997 for (i = 0; i < num_keys; i++) {
998 cri.cri_key = mdd_crypto->scr_key[i];
999 if (crypto_newsession(&mdd_crypto->scr_sid[i],
1000 &cri, 0) != 0) {
1001 sr_crypto_free_sessions(sd, mdd_crypto);
1002 return (EINVAL22);
1003 }
1004 }
1005
1006 sr_hotplug_register(sd, sr_crypto_hotplug);
1007
1008 return (0);
1009}
1010
1011int
1012sr_crypto_alloc_resources(struct sr_discipline *sd)
1013{
1014 return sr_crypto_alloc_resources_internal(sd, &sd->mdssd_dis_specific.mdd_crypto);
1015}
1016
1017void
1018sr_crypto_free_resources_internal(struct sr_discipline *sd,
1019 struct sr_crypto *mdd_crypto)
1020{
1021 struct sr_workunit *wu;
1022 struct sr_crypto_wu *crwu;
1023
1024 DNPRINTF(SR_D_DIS, "%s: sr_crypto_free_resources\n",
1025 DEVNAME(sd->sd_sc));
1026
1027 if (mdd_crypto->key_disk != NULL((void *)0)) {
1028 explicit_bzero(mdd_crypto->key_disk,
1029 sizeof(*mdd_crypto->key_disk));
1030 free(mdd_crypto->key_disk, M_DEVBUF2,
1031 sizeof(*mdd_crypto->key_disk));
1032 }
1033
1034 sr_hotplug_unregister(sd, sr_crypto_hotplug);
1035
1036 sr_crypto_free_sessions(sd, mdd_crypto);
1037
1038 TAILQ_FOREACH(wu, &sd->sd_wu, swu_next)for((wu) = ((&sd->sd_wu)->tqh_first); (wu) != ((void
*)0); (wu) = ((wu)->swu_next.tqe_next))
{
1039 crwu = (struct sr_crypto_wu *)wu;
1040 if (crwu->cr_dmabuf)
1041 dma_free(crwu->cr_dmabuf, MAXPHYS(64 * 1024));
1042 if (crwu->cr_crp)
1043 crypto_freereq(crwu->cr_crp);
1044 }
1045
1046 sr_wu_free(sd);
1047 sr_ccb_free(sd);
1048}
1049
1050void
1051sr_crypto_free_resources(struct sr_discipline *sd)
1052{
1053 struct sr_crypto *mdd_crypto = &sd->mdssd_dis_specific.mdd_crypto;
1054 sr_crypto_free_resources_internal(sd, mdd_crypto);
1055}
1056
1057int
1058sr_crypto_ioctl_internal(struct sr_discipline *sd,
1059 struct sr_crypto *mdd_crypto, struct bioc_discipline *bd)
1060{
1061 struct sr_crypto_kdfpair kdfpair;
1062 struct sr_crypto_kdfinfo kdfinfo1, kdfinfo2;
1063 int size, rv = 1;
1064
1065 DNPRINTF(SR_D_IOCTL, "%s: sr_crypto_ioctl %u\n",
1066 DEVNAME(sd->sd_sc), bd->bd_cmd);
1067
1068 switch (bd->bd_cmd) {
1069 case SR_IOCTL_GET_KDFHINT0x01:
1070
1071 /* Get KDF hint for userland. */
1072 size = sizeof(mdd_crypto->scr_meta->scm_kdfhint);
1073 if (bd->bd_data == NULL((void *)0) || bd->bd_size > size)
1074 goto bad;
1075 if (copyout(mdd_crypto->scr_meta->scm_kdfhint,
1076 bd->bd_data, bd->bd_size))
1077 goto bad;
1078
1079 rv = 0;
1080
1081 break;
1082
1083 case SR_IOCTL_CHANGE_PASSPHRASE0x02:
1084
1085 /* Attempt to change passphrase. */
1086
1087 size = sizeof(kdfpair);
1088 if (bd->bd_data == NULL((void *)0) || bd->bd_size > size)
1089 goto bad;
1090 if (copyin(bd->bd_data, &kdfpair, size))
1091 goto bad;
1092
1093 size = sizeof(kdfinfo1);
1094 if (kdfpair.kdfinfo1 == NULL((void *)0) || kdfpair.kdfsize1 > size)
1095 goto bad;
1096 if (copyin(kdfpair.kdfinfo1, &kdfinfo1, size))
1097 goto bad;
1098
1099 size = sizeof(kdfinfo2);
1100 if (kdfpair.kdfinfo2 == NULL((void *)0) || kdfpair.kdfsize2 > size)
1101 goto bad;
1102 if (copyin(kdfpair.kdfinfo2, &kdfinfo2, size))
1103 goto bad;
1104
1105 if (sr_crypto_change_maskkey(sd, mdd_crypto, &kdfinfo1,
1106 &kdfinfo2))
1107 goto bad;
1108
1109 /* Save metadata to disk. */
1110 rv = sr_meta_save(sd, SR_META_DIRTY0x1);
1111
1112 break;
1113 }
1114
1115bad:
1116 explicit_bzero(&kdfpair, sizeof(kdfpair));
1117 explicit_bzero(&kdfinfo1, sizeof(kdfinfo1));
1118 explicit_bzero(&kdfinfo2, sizeof(kdfinfo2));
1119
1120 return (rv);
1121}
1122
1123int
1124sr_crypto_ioctl(struct sr_discipline *sd, struct bioc_discipline *bd)
1125{
1126 struct sr_crypto *mdd_crypto = &sd->mdssd_dis_specific.mdd_crypto;
1127 return sr_crypto_ioctl_internal(sd, mdd_crypto, bd);
1128}
1129
1130int
1131sr_crypto_meta_opt_handler_internal(struct sr_discipline *sd,
1132 struct sr_crypto *mdd_crypto, struct sr_meta_opt_hdr *om)
1133{
1134 int rv = EINVAL22;
1135
1136 if (om->som_type == SR_OPT_CRYPTO0x01) {
1137 mdd_crypto->scr_meta = (struct sr_meta_crypto *)om;
1138 rv = 0;
1139 }
1140
1141 return (rv);
1142}
1143
1144int
1145sr_crypto_meta_opt_handler(struct sr_discipline *sd, struct sr_meta_opt_hdr *om)
1146{
1147 struct sr_crypto *mdd_crypto = &sd->mdssd_dis_specific.mdd_crypto;
1148 return sr_crypto_meta_opt_handler_internal(sd, mdd_crypto, om);
1149}
1150
1151int
1152sr_crypto_rw(struct sr_workunit *wu)
1153{
1154 struct sr_crypto_wu *crwu;
1155 struct sr_crypto *mdd_crypto;
1156 daddr_t blkno;
1157 int rv, err;
1158 int s;
1159
1160 DNPRINTF(SR_D_DIS, "%s: sr_crypto_rw wu %p\n",
1161 DEVNAME(wu->swu_dis->sd_sc), wu);
1162
1163 if (sr_validate_io(wu, &blkno, "sr_crypto_rw"))
1164 return (1);
1165
1166 if (wu->swu_xs->flags & SCSI_DATA_OUT0x01000) {
1167 mdd_crypto = &wu->swu_dis->mdssd_dis_specific.mdd_crypto;
1168 crwu = sr_crypto_prepare(wu, mdd_crypto, 1);
1169 rv = crypto_invoke(crwu->cr_crp);
1170
1171 DNPRINTF(SR_D_INTR, "%s: sr_crypto_rw: wu %p xs: %p\n",
1172 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1173
1174 if (rv) {
1175 /* fail io */
1176 wu->swu_xs->error = XS_DRIVER_STUFFUP2;
1177 s = splbio()splraise(0x3);
1178 sr_scsi_done(wu->swu_dis, wu->swu_xs);
1179 splx(s)spllower(s);
1180 }
1181
1182 if ((err = sr_crypto_dev_rw(wu, crwu)) != 0)
1183 return err;
1184 } else
1185 rv = sr_crypto_dev_rw(wu, NULL((void *)0));
1186
1187 return (rv);
1188}
1189
1190int
1191sr_crypto_dev_rw(struct sr_workunit *wu, struct sr_crypto_wu *crwu)
1192{
1193 struct sr_discipline *sd = wu->swu_dis;
1194 struct scsi_xfer *xs = wu->swu_xs;
1195 struct sr_ccb *ccb;
1196 struct uio *uio;
1197 daddr_t blkno;
1198
1199 blkno = wu->swu_blk_start;
1200
1201 ccb = sr_ccb_rw(sd, 0, blkno, xs->datalen, xs->data, xs->flags, 0);
1202 if (!ccb) {
1203 /* should never happen but handle more gracefully */
1204 printf("%s: %s: too many ccbs queued\n",
1205 DEVNAME(sd->sd_sc)((sd->sd_sc)->sc_dev.dv_xname), sd->sd_meta->ssd_devname);
1206 goto bad;
1207 }
1208 if (!ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800))) {
1209 uio = crwu->cr_crp->crp_buf;
1210 ccb->ccb_buf.b_data = uio->uio_iov->iov_base;
1211 ccb->ccb_opaque = crwu;
1212 }
1213 sr_wu_enqueue_ccb(wu, ccb);
1214 sr_schedule_wu(wu);
1215
1216 return (0);
1217
1218bad:
1219 return (EINVAL22);
1220}
1221
1222void
1223sr_crypto_done_internal(struct sr_workunit *wu, struct sr_crypto *mdd_crypto)
1224{
1225 struct scsi_xfer *xs = wu->swu_xs;
1226 struct sr_crypto_wu *crwu;
1227 int rv;
1228 int s;
1229
1230 if (ISSET(wu->swu_flags, SR_WUF_REBUILD)((wu->swu_flags) & ((1<<0)))) /* RAID 1C */
1231 return;
1232
1233 /* If this was a successful read, initiate decryption of the data. */
1234 if (ISSET(xs->flags, SCSI_DATA_IN)((xs->flags) & (0x00800)) && xs->error == XS_NOERROR0) {
1235 crwu = sr_crypto_prepare(wu, mdd_crypto, 0);
1236 DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: crypto_invoke %p\n",
1237 DEVNAME(wu->swu_dis->sd_sc), crwu->cr_crp);
1238 rv = crypto_invoke(crwu->cr_crp);
1239
1240 DNPRINTF(SR_D_INTR, "%s: sr_crypto_done: wu %p xs: %p\n",
1241 DEVNAME(wu->swu_dis->sd_sc), wu, wu->swu_xs);
1242
1243 if (rv)
1244 wu->swu_xs->error = XS_DRIVER_STUFFUP2;
1245
1246 s = splbio()splraise(0x3);
1247 sr_scsi_done(wu->swu_dis, wu->swu_xs);
1248 splx(s)spllower(s);
1249 return;
1250 }
1251
1252 s = splbio()splraise(0x3);
1253 sr_scsi_done(wu->swu_dis, wu->swu_xs);
1254 splx(s)spllower(s);
1255}
1256
1257void
1258sr_crypto_done(struct sr_workunit *wu)
1259{
1260 struct sr_crypto *mdd_crypto = &wu->swu_dis->mdssd_dis_specific.mdd_crypto;
1261 sr_crypto_done_internal(wu, mdd_crypto);
1262}
1263
1264void
1265sr_crypto_hotplug(struct sr_discipline *sd, struct disk *diskp, int action)
1266{
1267 DNPRINTF(SR_D_MISC, "%s: sr_crypto_hotplug: %s %d\n",
1268 DEVNAME(sd->sd_sc), diskp->dk_name, action);
1269}
1270
1271#ifdef SR_DEBUG0
1272void
1273sr_crypto_dumpkeys(struct sr_crypto *mdd_crypto)
1274{
1275 int i, j;
1276
1277 printf("sr_crypto_dumpkeys:\n");
1278 for (i = 0; i < SR_CRYPTO_MAXKEYS32; i++) {
1279 printf("\tscm_key[%d]: 0x", i);
1280 for (j = 0; j < SR_CRYPTO_KEYBYTES(512 >> 3); j++) {
1281 printf("%02x", mdd_crypto->scr_meta->scm_key[i][j]);
1282 }
1283 printf("\n");
1284 }
1285 printf("sr_crypto_dumpkeys: runtime data keys:\n");
1286 for (i = 0; i < SR_CRYPTO_MAXKEYS32; i++) {
1287 printf("\tscr_key[%d]: 0x", i);
1288 for (j = 0; j < SR_CRYPTO_KEYBYTES(512 >> 3); j++) {
1289 printf("%02x", mdd_crypto->scr_key[i][j]);
1290 }
1291 printf("\n");
1292 }
1293}
1294#endif /* SR_DEBUG */