1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2019 Google LLC
4 */
5
6 /*
7 * Refer to Documentation/block/inline-encryption.rst for detailed explanation.
8 */
9
10 #define pr_fmt(fmt) "blk-crypto: " fmt
11
12 #include <linux/bio.h>
13 #include <linux/blkdev.h>
14 #include <linux/blk-crypto-profile.h>
15 #include <linux/module.h>
16 #include <linux/ratelimit.h>
17 #include <linux/slab.h>
18
19 #include "blk-crypto-internal.h"
20
21 const struct blk_crypto_mode blk_crypto_modes[] = {
22 [BLK_ENCRYPTION_MODE_AES_256_XTS] = {
23 .name = "AES-256-XTS",
24 .cipher_str = "xts(aes)",
25 .keysize = 64,
26 .security_strength = 32,
27 .ivsize = 16,
28 },
29 [BLK_ENCRYPTION_MODE_AES_128_CBC_ESSIV] = {
30 .name = "AES-128-CBC-ESSIV",
31 .cipher_str = "essiv(cbc(aes),sha256)",
32 .keysize = 16,
33 .security_strength = 16,
34 .ivsize = 16,
35 },
36 [BLK_ENCRYPTION_MODE_ADIANTUM] = {
37 .name = "Adiantum",
38 .cipher_str = "adiantum(xchacha12,aes)",
39 .keysize = 32,
40 .security_strength = 32,
41 .ivsize = 32,
42 },
43 [BLK_ENCRYPTION_MODE_SM4_XTS] = {
44 .name = "SM4-XTS",
45 .cipher_str = "xts(sm4)",
46 .keysize = 32,
47 .security_strength = 16,
48 .ivsize = 16,
49 },
50 };
51
52 /*
53 * This number needs to be at least (the number of threads doing IO
54 * concurrently) * (maximum recursive depth of a bio), so that we don't
55 * deadlock on crypt_ctx allocations. The default is chosen to be the same
56 * as the default number of post read contexts in both EXT4 and F2FS.
57 */
58 static int num_prealloc_crypt_ctxs = 128;
59
60 module_param(num_prealloc_crypt_ctxs, int, 0444);
61 MODULE_PARM_DESC(num_prealloc_crypt_ctxs,
62 "Number of bio crypto contexts to preallocate");
63
64 static struct kmem_cache *bio_crypt_ctx_cache;
65 static mempool_t *bio_crypt_ctx_pool;
66
bio_crypt_ctx_init(void)67 static int __init bio_crypt_ctx_init(void)
68 {
69 size_t i;
70
71 bio_crypt_ctx_cache = KMEM_CACHE(bio_crypt_ctx, 0);
72 if (!bio_crypt_ctx_cache)
73 goto out_no_mem;
74
75 bio_crypt_ctx_pool = mempool_create_slab_pool(num_prealloc_crypt_ctxs,
76 bio_crypt_ctx_cache);
77 if (!bio_crypt_ctx_pool)
78 goto out_no_mem;
79
80 /* This is assumed in various places. */
81 BUILD_BUG_ON(BLK_ENCRYPTION_MODE_INVALID != 0);
82
83 /*
84 * Validate the crypto mode properties. This ideally would be done with
85 * static assertions, but boot-time checks are the next best thing.
86 */
87 for (i = 0; i < BLK_ENCRYPTION_MODE_MAX; i++) {
88 BUG_ON(blk_crypto_modes[i].keysize >
89 BLK_CRYPTO_MAX_RAW_KEY_SIZE);
90 BUG_ON(blk_crypto_modes[i].security_strength >
91 blk_crypto_modes[i].keysize);
92 BUG_ON(blk_crypto_modes[i].ivsize > BLK_CRYPTO_MAX_IV_SIZE);
93 }
94
95 return 0;
96 out_no_mem:
97 panic("Failed to allocate mem for bio crypt ctxs\n");
98 }
99 subsys_initcall(bio_crypt_ctx_init);
100
bio_crypt_set_ctx(struct bio * bio,const struct blk_crypto_key * key,const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],gfp_t gfp_mask)101 void bio_crypt_set_ctx(struct bio *bio, const struct blk_crypto_key *key,
102 const u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE], gfp_t gfp_mask)
103 {
104 struct bio_crypt_ctx *bc;
105
106 /*
107 * The caller must use a gfp_mask that contains __GFP_DIRECT_RECLAIM so
108 * that the mempool_alloc() can't fail.
109 */
110 WARN_ON_ONCE(!(gfp_mask & __GFP_DIRECT_RECLAIM));
111
112 bc = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
113
114 bc->bc_key = key;
115 memcpy(bc->bc_dun, dun, sizeof(bc->bc_dun));
116
117 bio->bi_crypt_context = bc;
118 }
119
__bio_crypt_free_ctx(struct bio * bio)120 void __bio_crypt_free_ctx(struct bio *bio)
121 {
122 mempool_free(bio->bi_crypt_context, bio_crypt_ctx_pool);
123 bio->bi_crypt_context = NULL;
124 }
125
__bio_crypt_clone(struct bio * dst,struct bio * src,gfp_t gfp_mask)126 int __bio_crypt_clone(struct bio *dst, struct bio *src, gfp_t gfp_mask)
127 {
128 dst->bi_crypt_context = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
129 if (!dst->bi_crypt_context)
130 return -ENOMEM;
131 *dst->bi_crypt_context = *src->bi_crypt_context;
132 return 0;
133 }
134
135 /* Increments @dun by @inc, treating @dun as a multi-limb integer. */
bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],unsigned int inc)136 void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
137 unsigned int inc)
138 {
139 int i;
140
141 for (i = 0; inc && i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
142 dun[i] += inc;
143 /*
144 * If the addition in this limb overflowed, then we need to
145 * carry 1 into the next limb. Else the carry is 0.
146 */
147 if (dun[i] < inc)
148 inc = 1;
149 else
150 inc = 0;
151 }
152 }
153
__bio_crypt_advance(struct bio * bio,unsigned int bytes)154 void __bio_crypt_advance(struct bio *bio, unsigned int bytes)
155 {
156 struct bio_crypt_ctx *bc = bio->bi_crypt_context;
157
158 bio_crypt_dun_increment(bc->bc_dun,
159 bytes >> bc->bc_key->data_unit_size_bits);
160 }
161
162 /*
163 * Returns true if @bc->bc_dun plus @bytes converted to data units is equal to
164 * @next_dun, treating the DUNs as multi-limb integers.
165 */
bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx * bc,unsigned int bytes,const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])166 bool bio_crypt_dun_is_contiguous(const struct bio_crypt_ctx *bc,
167 unsigned int bytes,
168 const u64 next_dun[BLK_CRYPTO_DUN_ARRAY_SIZE])
169 {
170 int i;
171 unsigned int carry = bytes >> bc->bc_key->data_unit_size_bits;
172
173 for (i = 0; i < BLK_CRYPTO_DUN_ARRAY_SIZE; i++) {
174 if (bc->bc_dun[i] + carry != next_dun[i])
175 return false;
176 /*
177 * If the addition in this limb overflowed, then we need to
178 * carry 1 into the next limb. Else the carry is 0.
179 */
180 if ((bc->bc_dun[i] + carry) < carry)
181 carry = 1;
182 else
183 carry = 0;
184 }
185
186 /* If the DUN wrapped through 0, don't treat it as contiguous. */
187 return carry == 0;
188 }
189
190 /*
191 * Checks that two bio crypt contexts are compatible - i.e. that
192 * they are mergeable except for data_unit_num continuity.
193 */
bio_crypt_ctx_compatible(struct bio_crypt_ctx * bc1,struct bio_crypt_ctx * bc2)194 static bool bio_crypt_ctx_compatible(struct bio_crypt_ctx *bc1,
195 struct bio_crypt_ctx *bc2)
196 {
197 if (!bc1)
198 return !bc2;
199
200 return bc2 && bc1->bc_key == bc2->bc_key;
201 }
202
bio_crypt_rq_ctx_compatible(struct request * rq,struct bio * bio)203 bool bio_crypt_rq_ctx_compatible(struct request *rq, struct bio *bio)
204 {
205 return bio_crypt_ctx_compatible(rq->crypt_ctx, bio->bi_crypt_context);
206 }
207
208 /*
209 * Checks that two bio crypt contexts are compatible, and also
210 * that their data_unit_nums are continuous (and can hence be merged)
211 * in the order @bc1 followed by @bc2.
212 */
bio_crypt_ctx_mergeable(struct bio_crypt_ctx * bc1,unsigned int bc1_bytes,struct bio_crypt_ctx * bc2)213 bool bio_crypt_ctx_mergeable(struct bio_crypt_ctx *bc1, unsigned int bc1_bytes,
214 struct bio_crypt_ctx *bc2)
215 {
216 if (!bio_crypt_ctx_compatible(bc1, bc2))
217 return false;
218
219 return !bc1 || bio_crypt_dun_is_contiguous(bc1, bc1_bytes, bc2->bc_dun);
220 }
221
__blk_crypto_rq_get_keyslot(struct request * rq)222 blk_status_t __blk_crypto_rq_get_keyslot(struct request *rq)
223 {
224 return blk_crypto_get_keyslot(rq->q->crypto_profile,
225 rq->crypt_ctx->bc_key,
226 &rq->crypt_keyslot);
227 }
228
__blk_crypto_rq_put_keyslot(struct request * rq)229 void __blk_crypto_rq_put_keyslot(struct request *rq)
230 {
231 blk_crypto_put_keyslot(rq->crypt_keyslot);
232 rq->crypt_keyslot = NULL;
233 }
234
__blk_crypto_free_request(struct request * rq)235 void __blk_crypto_free_request(struct request *rq)
236 {
237 /* The keyslot, if one was needed, should have been released earlier. */
238 if (WARN_ON_ONCE(rq->crypt_keyslot))
239 __blk_crypto_rq_put_keyslot(rq);
240
241 mempool_free(rq->crypt_ctx, bio_crypt_ctx_pool);
242 rq->crypt_ctx = NULL;
243 }
244
245 /*
246 * Process a bio with a crypto context. Returns true if the caller should
247 * submit the passed in bio, false if the bio is consumed.
248 *
249 * See the kerneldoc comment for blk_crypto_submit_bio for further details.
250 */
__blk_crypto_submit_bio(struct bio * bio)251 bool __blk_crypto_submit_bio(struct bio *bio)
252 {
253 const struct blk_crypto_key *bc_key = bio->bi_crypt_context->bc_key;
254 struct block_device *bdev = bio->bi_bdev;
255
256 /* Error if bio has no data. */
257 if (WARN_ON_ONCE(!bio_has_data(bio))) {
258 bio_io_error(bio);
259 return false;
260 }
261
262 /*
263 * If the device does not natively support the encryption context, try to use
264 * the fallback if available.
265 */
266 if (!blk_crypto_config_supported_natively(bdev, &bc_key->crypto_cfg)) {
267 if (!IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK)) {
268 pr_warn_once("%pg: crypto API fallback disabled; failing request.\n",
269 bdev);
270 bio->bi_status = BLK_STS_NOTSUPP;
271 bio_endio(bio);
272 return false;
273 }
274 return blk_crypto_fallback_bio_prep(bio);
275 }
276
277 return true;
278 }
279 EXPORT_SYMBOL_GPL(__blk_crypto_submit_bio);
280
__blk_crypto_rq_bio_prep(struct request * rq,struct bio * bio,gfp_t gfp_mask)281 int __blk_crypto_rq_bio_prep(struct request *rq, struct bio *bio,
282 gfp_t gfp_mask)
283 {
284 if (!rq->crypt_ctx) {
285 rq->crypt_ctx = mempool_alloc(bio_crypt_ctx_pool, gfp_mask);
286 if (!rq->crypt_ctx)
287 return -ENOMEM;
288 }
289 *rq->crypt_ctx = *bio->bi_crypt_context;
290 return 0;
291 }
292
293 /**
294 * blk_crypto_init_key() - Prepare a key for use with blk-crypto
295 * @blk_key: Pointer to the blk_crypto_key to initialize.
296 * @key_bytes: the bytes of the key
297 * @key_size: size of the key in bytes
298 * @key_type: type of the key -- either raw or hardware-wrapped
299 * @crypto_mode: identifier for the encryption algorithm to use
300 * @dun_bytes: number of bytes that will be used to specify the DUN when this
301 * key is used
302 * @data_unit_size: the data unit size to use for en/decryption
303 *
304 * Return: 0 on success, -errno on failure. The caller is responsible for
305 * zeroizing both blk_key and key_bytes when done with them.
306 */
blk_crypto_init_key(struct blk_crypto_key * blk_key,const u8 * key_bytes,size_t key_size,enum blk_crypto_key_type key_type,enum blk_crypto_mode_num crypto_mode,unsigned int dun_bytes,unsigned int data_unit_size)307 int blk_crypto_init_key(struct blk_crypto_key *blk_key,
308 const u8 *key_bytes, size_t key_size,
309 enum blk_crypto_key_type key_type,
310 enum blk_crypto_mode_num crypto_mode,
311 unsigned int dun_bytes,
312 unsigned int data_unit_size)
313 {
314 const struct blk_crypto_mode *mode;
315
316 memset(blk_key, 0, sizeof(*blk_key));
317
318 if (crypto_mode >= ARRAY_SIZE(blk_crypto_modes))
319 return -EINVAL;
320
321 mode = &blk_crypto_modes[crypto_mode];
322 switch (key_type) {
323 case BLK_CRYPTO_KEY_TYPE_RAW:
324 if (key_size != mode->keysize)
325 return -EINVAL;
326 break;
327 case BLK_CRYPTO_KEY_TYPE_HW_WRAPPED:
328 if (key_size < mode->security_strength ||
329 key_size > BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE)
330 return -EINVAL;
331 break;
332 default:
333 return -EINVAL;
334 }
335
336 if (dun_bytes == 0 || dun_bytes > mode->ivsize)
337 return -EINVAL;
338
339 if (!is_power_of_2(data_unit_size))
340 return -EINVAL;
341
342 blk_key->crypto_cfg.crypto_mode = crypto_mode;
343 blk_key->crypto_cfg.dun_bytes = dun_bytes;
344 blk_key->crypto_cfg.data_unit_size = data_unit_size;
345 blk_key->crypto_cfg.key_type = key_type;
346 blk_key->data_unit_size_bits = ilog2(data_unit_size);
347 blk_key->size = key_size;
348 memcpy(blk_key->bytes, key_bytes, key_size);
349
350 return 0;
351 }
352
blk_crypto_config_supported_natively(struct block_device * bdev,const struct blk_crypto_config * cfg)353 bool blk_crypto_config_supported_natively(struct block_device *bdev,
354 const struct blk_crypto_config *cfg)
355 {
356 return __blk_crypto_cfg_supported(bdev_get_queue(bdev)->crypto_profile,
357 cfg);
358 }
359
360 /*
361 * Check if bios with @cfg can be en/decrypted by blk-crypto (i.e. either the
362 * block_device it's submitted to supports inline crypto, or the
363 * blk-crypto-fallback is enabled and supports the cfg).
364 */
blk_crypto_config_supported(struct block_device * bdev,const struct blk_crypto_config * cfg)365 bool blk_crypto_config_supported(struct block_device *bdev,
366 const struct blk_crypto_config *cfg)
367 {
368 if (IS_ENABLED(CONFIG_BLK_INLINE_ENCRYPTION_FALLBACK) &&
369 cfg->key_type == BLK_CRYPTO_KEY_TYPE_RAW)
370 return true;
371 return blk_crypto_config_supported_natively(bdev, cfg);
372 }
373
374 /**
375 * blk_crypto_start_using_key() - Start using a blk_crypto_key on a device
376 * @bdev: block device to operate on
377 * @key: A key to use on the device
378 *
379 * Upper layers must call this function to ensure that either the hardware
380 * supports the key's crypto settings, or the crypto API fallback has transforms
381 * for the needed mode allocated and ready to go. This function may allocate
382 * an skcipher, and *should not* be called from the data path, since that might
383 * cause a deadlock
384 *
385 * Return: 0 on success; -EOPNOTSUPP if the key is wrapped but the hardware does
386 * not support wrapped keys; -ENOPKG if the key is a raw key but the
387 * hardware does not support raw keys and blk-crypto-fallback is either
388 * disabled or the needed algorithm is disabled in the crypto API; or
389 * another -errno code if something else went wrong.
390 */
blk_crypto_start_using_key(struct block_device * bdev,const struct blk_crypto_key * key)391 int blk_crypto_start_using_key(struct block_device *bdev,
392 const struct blk_crypto_key *key)
393 {
394 if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
395 return 0;
396 if (key->crypto_cfg.key_type != BLK_CRYPTO_KEY_TYPE_RAW) {
397 pr_warn_ratelimited("%pg: no support for wrapped keys\n", bdev);
398 return -EOPNOTSUPP;
399 }
400 return blk_crypto_fallback_start_using_mode(key->crypto_cfg.crypto_mode);
401 }
402
403 /**
404 * blk_crypto_evict_key() - Evict a blk_crypto_key from a block_device
405 * @bdev: a block_device on which I/O using the key may have been done
406 * @key: the key to evict
407 *
408 * For a given block_device, this function removes the given blk_crypto_key from
409 * the keyslot management structures and evicts it from any underlying hardware
410 * keyslot(s) or blk-crypto-fallback keyslot it may have been programmed into.
411 *
412 * Upper layers must call this before freeing the blk_crypto_key. It must be
413 * called for every block_device the key may have been used on. The key must no
414 * longer be in use by any I/O when this function is called.
415 *
416 * Context: May sleep.
417 */
blk_crypto_evict_key(struct block_device * bdev,const struct blk_crypto_key * key)418 void blk_crypto_evict_key(struct block_device *bdev,
419 const struct blk_crypto_key *key)
420 {
421 struct request_queue *q = bdev_get_queue(bdev);
422 int err;
423
424 if (blk_crypto_config_supported_natively(bdev, &key->crypto_cfg))
425 err = __blk_crypto_evict_key(q->crypto_profile, key);
426 else
427 err = blk_crypto_fallback_evict_key(key);
428 /*
429 * An error can only occur here if the key failed to be evicted from a
430 * keyslot (due to a hardware or driver issue) or is allegedly still in
431 * use by I/O (due to a kernel bug). Even in these cases, the key is
432 * still unlinked from the keyslot management structures, and the caller
433 * is allowed and expected to free it right away. There's nothing
434 * callers can do to handle errors, so just log them and return void.
435 */
436 if (err)
437 pr_warn_ratelimited("%pg: error %d evicting key\n", bdev, err);
438 }
439 EXPORT_SYMBOL_GPL(blk_crypto_evict_key);
440
blk_crypto_ioctl_import_key(struct blk_crypto_profile * profile,void __user * argp)441 static int blk_crypto_ioctl_import_key(struct blk_crypto_profile *profile,
442 void __user *argp)
443 {
444 struct blk_crypto_import_key_arg arg;
445 u8 raw_key[BLK_CRYPTO_MAX_RAW_KEY_SIZE];
446 u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
447 int ret;
448
449 if (copy_from_user(&arg, argp, sizeof(arg)))
450 return -EFAULT;
451
452 if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
453 return -EINVAL;
454
455 if (arg.raw_key_size < 16 || arg.raw_key_size > sizeof(raw_key))
456 return -EINVAL;
457
458 if (copy_from_user(raw_key, u64_to_user_ptr(arg.raw_key_ptr),
459 arg.raw_key_size)) {
460 ret = -EFAULT;
461 goto out;
462 }
463 ret = blk_crypto_import_key(profile, raw_key, arg.raw_key_size, lt_key);
464 if (ret < 0)
465 goto out;
466 if (ret > arg.lt_key_size) {
467 ret = -EOVERFLOW;
468 goto out;
469 }
470 arg.lt_key_size = ret;
471 if (copy_to_user(u64_to_user_ptr(arg.lt_key_ptr), lt_key,
472 arg.lt_key_size) ||
473 copy_to_user(argp, &arg, sizeof(arg))) {
474 ret = -EFAULT;
475 goto out;
476 }
477 ret = 0;
478
479 out:
480 memzero_explicit(raw_key, sizeof(raw_key));
481 memzero_explicit(lt_key, sizeof(lt_key));
482 return ret;
483 }
484
blk_crypto_ioctl_generate_key(struct blk_crypto_profile * profile,void __user * argp)485 static int blk_crypto_ioctl_generate_key(struct blk_crypto_profile *profile,
486 void __user *argp)
487 {
488 struct blk_crypto_generate_key_arg arg;
489 u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
490 int ret;
491
492 if (copy_from_user(&arg, argp, sizeof(arg)))
493 return -EFAULT;
494
495 if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
496 return -EINVAL;
497
498 ret = blk_crypto_generate_key(profile, lt_key);
499 if (ret < 0)
500 goto out;
501 if (ret > arg.lt_key_size) {
502 ret = -EOVERFLOW;
503 goto out;
504 }
505 arg.lt_key_size = ret;
506 if (copy_to_user(u64_to_user_ptr(arg.lt_key_ptr), lt_key,
507 arg.lt_key_size) ||
508 copy_to_user(argp, &arg, sizeof(arg))) {
509 ret = -EFAULT;
510 goto out;
511 }
512 ret = 0;
513
514 out:
515 memzero_explicit(lt_key, sizeof(lt_key));
516 return ret;
517 }
518
blk_crypto_ioctl_prepare_key(struct blk_crypto_profile * profile,void __user * argp)519 static int blk_crypto_ioctl_prepare_key(struct blk_crypto_profile *profile,
520 void __user *argp)
521 {
522 struct blk_crypto_prepare_key_arg arg;
523 u8 lt_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
524 u8 eph_key[BLK_CRYPTO_MAX_HW_WRAPPED_KEY_SIZE];
525 int ret;
526
527 if (copy_from_user(&arg, argp, sizeof(arg)))
528 return -EFAULT;
529
530 if (memchr_inv(arg.reserved, 0, sizeof(arg.reserved)))
531 return -EINVAL;
532
533 if (arg.lt_key_size > sizeof(lt_key))
534 return -EINVAL;
535
536 if (copy_from_user(lt_key, u64_to_user_ptr(arg.lt_key_ptr),
537 arg.lt_key_size)) {
538 ret = -EFAULT;
539 goto out;
540 }
541 ret = blk_crypto_prepare_key(profile, lt_key, arg.lt_key_size, eph_key);
542 if (ret < 0)
543 goto out;
544 if (ret > arg.eph_key_size) {
545 ret = -EOVERFLOW;
546 goto out;
547 }
548 arg.eph_key_size = ret;
549 if (copy_to_user(u64_to_user_ptr(arg.eph_key_ptr), eph_key,
550 arg.eph_key_size) ||
551 copy_to_user(argp, &arg, sizeof(arg))) {
552 ret = -EFAULT;
553 goto out;
554 }
555 ret = 0;
556
557 out:
558 memzero_explicit(lt_key, sizeof(lt_key));
559 memzero_explicit(eph_key, sizeof(eph_key));
560 return ret;
561 }
562
blk_crypto_ioctl(struct block_device * bdev,unsigned int cmd,void __user * argp)563 int blk_crypto_ioctl(struct block_device *bdev, unsigned int cmd,
564 void __user *argp)
565 {
566 struct blk_crypto_profile *profile =
567 bdev_get_queue(bdev)->crypto_profile;
568
569 if (!profile)
570 return -EOPNOTSUPP;
571
572 switch (cmd) {
573 case BLKCRYPTOIMPORTKEY:
574 return blk_crypto_ioctl_import_key(profile, argp);
575 case BLKCRYPTOGENERATEKEY:
576 return blk_crypto_ioctl_generate_key(profile, argp);
577 case BLKCRYPTOPREPAREKEY:
578 return blk_crypto_ioctl_prepare_key(profile, argp);
579 default:
580 return -ENOTTY;
581 }
582 }
583