1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2012 Red Hat, Inc.
4 *
5 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 *
7 * Based on Chromium dm-verity driver (C) 2011 The Chromium OS Authors
8 *
9 * In the file "/sys/module/dm_verity/parameters/prefetch_cluster" you can set
10 * default prefetch value. Data are read in "prefetch_cluster" chunks from the
11 * hash device. Setting this greatly improves performance when data and hash
12 * are on the same disk on different partitions on devices with poor random
13 * access behavior.
14 */
15
16 #include "dm-verity.h"
17 #include "dm-verity-fec.h"
18 #include "dm-verity-verify-sig.h"
19 #include "dm-audit.h"
20 #include <linux/hex.h>
21 #include <linux/module.h>
22 #include <linux/reboot.h>
23 #include <linux/string.h>
24 #include <linux/jump_label.h>
25 #include <linux/security.h>
26
27 #define DM_MSG_PREFIX "verity"
28
29 #define DM_VERITY_ENV_LENGTH 42
30 #define DM_VERITY_ENV_VAR_NAME "DM_VERITY_ERR_BLOCK_NR"
31
32 #define DM_VERITY_DEFAULT_PREFETCH_SIZE 262144
33 #define DM_VERITY_USE_BH_DEFAULT_BYTES 8192
34
35 #define DM_VERITY_MAX_CORRUPTED_ERRS 100
36
37 #define DM_VERITY_OPT_LOGGING "ignore_corruption"
38 #define DM_VERITY_OPT_RESTART "restart_on_corruption"
39 #define DM_VERITY_OPT_PANIC "panic_on_corruption"
40 #define DM_VERITY_OPT_ERROR_RESTART "restart_on_error"
41 #define DM_VERITY_OPT_ERROR_PANIC "panic_on_error"
42 #define DM_VERITY_OPT_IGN_ZEROES "ignore_zero_blocks"
43 #define DM_VERITY_OPT_AT_MOST_ONCE "check_at_most_once"
44 #define DM_VERITY_OPT_TASKLET_VERIFY "try_verify_in_tasklet"
45
46 #define DM_VERITY_OPTS_MAX (5 + DM_VERITY_OPTS_FEC + \
47 DM_VERITY_ROOT_HASH_VERIFICATION_OPTS)
48
49 static unsigned int dm_verity_prefetch_cluster = DM_VERITY_DEFAULT_PREFETCH_SIZE;
50
51 module_param_named(prefetch_cluster, dm_verity_prefetch_cluster, uint, 0644);
52
53 static unsigned int dm_verity_use_bh_bytes[4] = {
54 DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_NONE
55 DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_RT
56 DM_VERITY_USE_BH_DEFAULT_BYTES, // IOPRIO_CLASS_BE
57 0 // IOPRIO_CLASS_IDLE
58 };
59
60 module_param_array_named(use_bh_bytes, dm_verity_use_bh_bytes, uint, NULL, 0644);
61
62 static DEFINE_STATIC_KEY_FALSE(use_bh_wq_enabled);
63
64 struct dm_verity_prefetch_work {
65 struct work_struct work;
66 struct dm_verity *v;
67 unsigned short ioprio;
68 sector_t block;
69 unsigned int n_blocks;
70 };
71
72 /*
73 * Auxiliary structure appended to each dm-bufio buffer. If the value
74 * hash_verified is nonzero, hash of the block has been verified.
75 *
76 * The variable hash_verified is set to 0 when allocating the buffer, then
77 * it can be changed to 1 and it is never reset to 0 again.
78 *
79 * There is no lock around this value, a race condition can at worst cause
80 * that multiple processes verify the hash of the same buffer simultaneously
81 * and write 1 to hash_verified simultaneously.
82 * This condition is harmless, so we don't need locking.
83 */
84 struct buffer_aux {
85 int hash_verified;
86 };
87
88 /*
89 * Initialize struct buffer_aux for a freshly created buffer.
90 */
dm_bufio_alloc_callback(struct dm_buffer * buf)91 static void dm_bufio_alloc_callback(struct dm_buffer *buf)
92 {
93 struct buffer_aux *aux = dm_bufio_get_aux_data(buf);
94
95 aux->hash_verified = 0;
96 }
97
98 /*
99 * Translate input sector number to the sector number on the target device.
100 */
verity_map_sector(struct dm_verity * v,sector_t bi_sector)101 static sector_t verity_map_sector(struct dm_verity *v, sector_t bi_sector)
102 {
103 return dm_target_offset(v->ti, bi_sector);
104 }
105
106 /*
107 * Return hash position of a specified block at a specified tree level
108 * (0 is the lowest level).
109 * The lowest "hash_per_block_bits"-bits of the result denote hash position
110 * inside a hash block. The remaining bits denote location of the hash block.
111 */
verity_position_at_level(struct dm_verity * v,sector_t block,int level)112 static sector_t verity_position_at_level(struct dm_verity *v, sector_t block,
113 int level)
114 {
115 return block >> (level * v->hash_per_block_bits);
116 }
117
verity_hash(struct dm_verity * v,struct dm_verity_io * io,const u8 * data,size_t len,u8 * digest)118 int verity_hash(struct dm_verity *v, struct dm_verity_io *io,
119 const u8 *data, size_t len, u8 *digest)
120 {
121 struct shash_desc *desc;
122 int r;
123
124 if (likely(v->use_sha256_lib)) {
125 struct sha256_ctx *ctx = &io->hash_ctx.sha256;
126
127 /*
128 * Fast path using SHA-256 library. This is enabled only for
129 * verity version 1, where the salt is at the beginning.
130 */
131 *ctx = *v->initial_hashstate.sha256;
132 sha256_update(ctx, data, len);
133 sha256_final(ctx, digest);
134 return 0;
135 }
136
137 desc = &io->hash_ctx.shash;
138 desc->tfm = v->shash_tfm;
139 if (unlikely(v->initial_hashstate.shash == NULL)) {
140 /* Version 0: salt at end */
141 r = crypto_shash_init(desc) ?:
142 crypto_shash_update(desc, data, len) ?:
143 crypto_shash_update(desc, v->salt, v->salt_size) ?:
144 crypto_shash_final(desc, digest);
145 } else {
146 /* Version 1: salt at beginning */
147 r = crypto_shash_import(desc, v->initial_hashstate.shash) ?:
148 crypto_shash_finup(desc, data, len, digest);
149 }
150 if (unlikely(r))
151 DMERR("Error hashing block: %d", r);
152 return r;
153 }
154
verity_hash_at_level(struct dm_verity * v,sector_t block,int level,sector_t * hash_block,unsigned int * offset)155 static void verity_hash_at_level(struct dm_verity *v, sector_t block, int level,
156 sector_t *hash_block, unsigned int *offset)
157 {
158 sector_t position = verity_position_at_level(v, block, level);
159 unsigned int idx;
160
161 *hash_block = v->hash_level_block[level] + (position >> v->hash_per_block_bits);
162
163 if (!offset)
164 return;
165
166 idx = position & ((1 << v->hash_per_block_bits) - 1);
167 if (!v->version)
168 *offset = idx * v->digest_size;
169 else
170 *offset = idx << (v->hash_dev_block_bits - v->hash_per_block_bits);
171 }
172
173 /*
174 * Handle verification errors.
175 */
verity_handle_err(struct dm_verity * v,enum verity_block_type type,unsigned long long block)176 static int verity_handle_err(struct dm_verity *v, enum verity_block_type type,
177 unsigned long long block)
178 {
179 char verity_env[DM_VERITY_ENV_LENGTH];
180 char *envp[] = { verity_env, NULL };
181 const char *type_str = "";
182 struct mapped_device *md = dm_table_get_md(v->ti->table);
183
184 /* Corruption should be visible in device status in all modes */
185 v->hash_failed = true;
186
187 if (v->corrupted_errs >= DM_VERITY_MAX_CORRUPTED_ERRS)
188 goto out;
189
190 v->corrupted_errs++;
191
192 switch (type) {
193 case DM_VERITY_BLOCK_TYPE_DATA:
194 type_str = "data";
195 break;
196 case DM_VERITY_BLOCK_TYPE_METADATA:
197 type_str = "metadata";
198 break;
199 default:
200 BUG();
201 }
202
203 DMERR_LIMIT("%s: %s block %llu is corrupted", v->data_dev->name,
204 type_str, block);
205
206 if (v->corrupted_errs == DM_VERITY_MAX_CORRUPTED_ERRS) {
207 DMERR("%s: reached maximum errors", v->data_dev->name);
208 dm_audit_log_target(DM_MSG_PREFIX, "max-corrupted-errors", v->ti, 0);
209 }
210
211 snprintf(verity_env, DM_VERITY_ENV_LENGTH, "%s=%d,%llu",
212 DM_VERITY_ENV_VAR_NAME, type, block);
213
214 kobject_uevent_env(&disk_to_dev(dm_disk(md))->kobj, KOBJ_CHANGE, envp);
215
216 out:
217 if (v->mode == DM_VERITY_MODE_LOGGING)
218 return 0;
219
220 if (v->mode == DM_VERITY_MODE_RESTART)
221 kernel_restart("dm-verity device corrupted");
222
223 if (v->mode == DM_VERITY_MODE_PANIC)
224 panic("dm-verity device corrupted");
225
226 return 1;
227 }
228
229 /*
230 * Verify hash of a metadata block pertaining to the specified data block
231 * ("block" argument) at a specified level ("level" argument).
232 *
233 * On successful return, want_digest contains the hash value for a lower tree
234 * level or for the data block (if we're at the lowest level).
235 *
236 * If "skip_unverified" is true, unverified buffer is skipped and 1 is returned.
237 * If "skip_unverified" is false, unverified buffer is hashed and verified
238 * against current value of want_digest.
239 */
verity_verify_level(struct dm_verity * v,struct dm_verity_io * io,sector_t block,int level,bool skip_unverified,u8 * want_digest)240 static int verity_verify_level(struct dm_verity *v, struct dm_verity_io *io,
241 sector_t block, int level, bool skip_unverified,
242 u8 *want_digest)
243 {
244 struct dm_buffer *buf;
245 struct buffer_aux *aux;
246 u8 *data;
247 int r;
248 sector_t hash_block;
249 unsigned int offset;
250 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
251
252 verity_hash_at_level(v, block, level, &hash_block, &offset);
253
254 if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
255 data = dm_bufio_get(v->bufio, hash_block, &buf);
256 if (IS_ERR_OR_NULL(data)) {
257 /*
258 * In softirq and the hash was not in the bufio cache.
259 * Return early and resume execution from a kworker to
260 * read the hash from disk.
261 */
262 return -EAGAIN;
263 }
264 } else {
265 data = dm_bufio_read_with_ioprio(v->bufio, hash_block,
266 &buf, bio->bi_ioprio);
267 }
268
269 if (IS_ERR(data)) {
270 if (skip_unverified)
271 return 1;
272 r = PTR_ERR(data);
273 data = dm_bufio_new(v->bufio, hash_block, &buf);
274 if (IS_ERR(data))
275 return r;
276 if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
277 want_digest, hash_block, data) == 0) {
278 aux = dm_bufio_get_aux_data(buf);
279 aux->hash_verified = 1;
280 goto release_ok;
281 } else {
282 dm_bufio_release(buf);
283 dm_bufio_forget(v->bufio, hash_block);
284 return r;
285 }
286 }
287
288 aux = dm_bufio_get_aux_data(buf);
289
290 if (!aux->hash_verified) {
291 if (skip_unverified) {
292 r = 1;
293 goto release_ret_r;
294 }
295
296 r = verity_hash(v, io, data, 1 << v->hash_dev_block_bits,
297 io->tmp_digest);
298 if (unlikely(r < 0))
299 goto release_ret_r;
300
301 if (likely(memcmp(io->tmp_digest, want_digest,
302 v->digest_size) == 0))
303 aux->hash_verified = 1;
304 else if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
305 /*
306 * Error handling code (FEC included) cannot be run in a
307 * softirq since it may sleep, so fallback to a kworker.
308 */
309 r = -EAGAIN;
310 goto release_ret_r;
311 } else if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_METADATA,
312 want_digest, hash_block, data) == 0)
313 aux->hash_verified = 1;
314 else if (verity_handle_err(v,
315 DM_VERITY_BLOCK_TYPE_METADATA,
316 hash_block)) {
317 struct bio *bio;
318 io->had_mismatch = true;
319 bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
320 dm_audit_log_bio(DM_MSG_PREFIX, "verify-metadata", bio,
321 block, 0);
322 r = -EIO;
323 goto release_ret_r;
324 }
325 }
326
327 release_ok:
328 data += offset;
329 memcpy(want_digest, data, v->digest_size);
330 r = 0;
331
332 release_ret_r:
333 dm_bufio_release(buf);
334 return r;
335 }
336
337 /*
338 * Find a hash for a given block, write it to digest and verify the integrity
339 * of the hash tree if necessary.
340 */
verity_hash_for_block(struct dm_verity * v,struct dm_verity_io * io,sector_t block,u8 * digest,bool * is_zero)341 int verity_hash_for_block(struct dm_verity *v, struct dm_verity_io *io,
342 sector_t block, u8 *digest, bool *is_zero)
343 {
344 int r = 0, i;
345
346 if (likely(v->levels)) {
347 /*
348 * First, we try to get the requested hash for
349 * the current block. If the hash block itself is
350 * verified, zero is returned. If it isn't, this
351 * function returns 1 and we fall back to whole
352 * chain verification.
353 */
354 r = verity_verify_level(v, io, block, 0, true, digest);
355 if (likely(r <= 0))
356 goto out;
357 }
358
359 memcpy(digest, v->root_digest, v->digest_size);
360
361 for (i = v->levels - 1; i >= 0; i--) {
362 r = verity_verify_level(v, io, block, i, false, digest);
363 if (unlikely(r))
364 goto out;
365 }
366 out:
367 if (!r && v->zero_digest)
368 *is_zero = !memcmp(v->zero_digest, digest, v->digest_size);
369 else
370 *is_zero = false;
371
372 return r;
373 }
374
verity_recheck(struct dm_verity * v,struct dm_verity_io * io,const u8 * want_digest,sector_t cur_block,u8 * dest)375 static noinline int verity_recheck(struct dm_verity *v, struct dm_verity_io *io,
376 const u8 *want_digest, sector_t cur_block,
377 u8 *dest)
378 {
379 struct page *page;
380 void *buffer;
381 int r;
382 struct dm_io_request io_req;
383 struct dm_io_region io_loc;
384
385 page = mempool_alloc(&v->recheck_pool, GFP_NOIO);
386 buffer = page_to_virt(page);
387
388 io_req.bi_opf = REQ_OP_READ;
389 io_req.mem.type = DM_IO_KMEM;
390 io_req.mem.ptr.addr = buffer;
391 io_req.notify.fn = NULL;
392 io_req.client = v->io;
393 io_loc.bdev = v->data_dev->bdev;
394 io_loc.sector = cur_block << (v->data_dev_block_bits - SECTOR_SHIFT);
395 io_loc.count = 1 << (v->data_dev_block_bits - SECTOR_SHIFT);
396 r = dm_io(&io_req, 1, &io_loc, NULL, IOPRIO_DEFAULT);
397 if (unlikely(r))
398 goto free_ret;
399
400 r = verity_hash(v, io, buffer, 1 << v->data_dev_block_bits,
401 io->tmp_digest);
402 if (unlikely(r))
403 goto free_ret;
404
405 if (memcmp(io->tmp_digest, want_digest, v->digest_size)) {
406 r = -EIO;
407 goto free_ret;
408 }
409
410 memcpy(dest, buffer, 1 << v->data_dev_block_bits);
411 r = 0;
412 free_ret:
413 mempool_free(page, &v->recheck_pool);
414
415 return r;
416 }
417
verity_handle_data_hash_mismatch(struct dm_verity * v,struct dm_verity_io * io,struct bio * bio,struct pending_block * block)418 static int verity_handle_data_hash_mismatch(struct dm_verity *v,
419 struct dm_verity_io *io,
420 struct bio *bio,
421 struct pending_block *block)
422 {
423 const u8 *want_digest = block->want_digest;
424 sector_t blkno = block->blkno;
425 u8 *data = block->data;
426
427 if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
428 /*
429 * Error handling code (FEC included) cannot be run in a
430 * softirq since it may sleep, so fallback to a kworker.
431 */
432 return -EAGAIN;
433 }
434 if (verity_recheck(v, io, want_digest, blkno, data) == 0) {
435 if (v->validated_blocks)
436 set_bit(blkno, v->validated_blocks);
437 return 0;
438 }
439 if (verity_fec_decode(v, io, DM_VERITY_BLOCK_TYPE_DATA, want_digest,
440 blkno, data) == 0)
441 return 0;
442 if (bio->bi_status)
443 return -EIO; /* Error correction failed; Just return error */
444
445 if (verity_handle_err(v, DM_VERITY_BLOCK_TYPE_DATA, blkno)) {
446 io->had_mismatch = true;
447 dm_audit_log_bio(DM_MSG_PREFIX, "verify-data", bio, blkno, 0);
448 return -EIO;
449 }
450 return 0;
451 }
452
verity_clear_pending_blocks(struct dm_verity_io * io)453 static void verity_clear_pending_blocks(struct dm_verity_io *io)
454 {
455 int i;
456
457 for (i = io->num_pending - 1; i >= 0; i--) {
458 kunmap_local(io->pending_blocks[i].data);
459 io->pending_blocks[i].data = NULL;
460 }
461 io->num_pending = 0;
462 }
463
verity_verify_pending_blocks(struct dm_verity * v,struct dm_verity_io * io,struct bio * bio)464 static int verity_verify_pending_blocks(struct dm_verity *v,
465 struct dm_verity_io *io,
466 struct bio *bio)
467 {
468 const unsigned int block_size = 1 << v->data_dev_block_bits;
469 int i, r;
470
471 if (io->num_pending == 2) {
472 /* num_pending == 2 implies that the algorithm is SHA-256 */
473 sha256_finup_2x(v->initial_hashstate.sha256,
474 io->pending_blocks[0].data,
475 io->pending_blocks[1].data, block_size,
476 io->pending_blocks[0].real_digest,
477 io->pending_blocks[1].real_digest);
478 } else {
479 for (i = 0; i < io->num_pending; i++) {
480 r = verity_hash(v, io, io->pending_blocks[i].data,
481 block_size,
482 io->pending_blocks[i].real_digest);
483 if (unlikely(r))
484 return r;
485 }
486 }
487
488 for (i = 0; i < io->num_pending; i++) {
489 struct pending_block *block = &io->pending_blocks[i];
490
491 if (likely(memcmp(block->real_digest, block->want_digest,
492 v->digest_size) == 0)) {
493 if (v->validated_blocks)
494 set_bit(block->blkno, v->validated_blocks);
495 } else {
496 r = verity_handle_data_hash_mismatch(v, io, bio, block);
497 if (unlikely(r))
498 return r;
499 }
500 }
501 verity_clear_pending_blocks(io);
502 return 0;
503 }
504
505 /*
506 * Verify one "dm_verity_io" structure.
507 */
verity_verify_io(struct dm_verity_io * io)508 static int verity_verify_io(struct dm_verity_io *io)
509 {
510 struct dm_verity *v = io->v;
511 const unsigned int block_size = 1 << v->data_dev_block_bits;
512 const int max_pending = v->use_sha256_finup_2x ? 2 : 1;
513 struct bvec_iter iter_copy;
514 struct bvec_iter *iter;
515 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
516 unsigned int b;
517 int r;
518
519 io->num_pending = 0;
520
521 if (static_branch_unlikely(&use_bh_wq_enabled) && io->in_bh) {
522 /*
523 * Copy the iterator in case we need to restart verification in
524 * a kworker.
525 */
526 iter_copy = io->iter;
527 iter = &iter_copy;
528 } else
529 iter = &io->iter;
530
531 for (b = 0; b < io->n_blocks;
532 b++, bio_advance_iter_single(bio, iter, block_size)) {
533 sector_t blkno = io->block + b;
534 struct pending_block *block;
535 bool is_zero;
536 struct bio_vec bv;
537 void *data;
538
539 if (v->validated_blocks && bio->bi_status == BLK_STS_OK &&
540 likely(test_bit(blkno, v->validated_blocks)))
541 continue;
542
543 block = &io->pending_blocks[io->num_pending];
544
545 r = verity_hash_for_block(v, io, blkno, block->want_digest,
546 &is_zero);
547 if (unlikely(r < 0))
548 goto error;
549
550 bv = bio_iter_iovec(bio, *iter);
551 if (unlikely(bv.bv_len < block_size)) {
552 /*
553 * Data block spans pages. This should not happen,
554 * since dm-verity sets dma_alignment to the data block
555 * size minus 1, and dm-verity also doesn't allow the
556 * data block size to be greater than PAGE_SIZE.
557 */
558 DMERR_LIMIT("unaligned io (data block spans pages)");
559 r = -EIO;
560 goto error;
561 }
562
563 data = bvec_kmap_local(&bv);
564
565 if (is_zero) {
566 /*
567 * If we expect a zero block, don't validate, just
568 * return zeros.
569 */
570 memset(data, 0, block_size);
571 kunmap_local(data);
572 continue;
573 }
574 block->data = data;
575 block->blkno = blkno;
576 if (++io->num_pending == max_pending) {
577 r = verity_verify_pending_blocks(v, io, bio);
578 if (unlikely(r))
579 goto error;
580 }
581 }
582
583 if (io->num_pending) {
584 r = verity_verify_pending_blocks(v, io, bio);
585 if (unlikely(r))
586 goto error;
587 }
588
589 return 0;
590
591 error:
592 verity_clear_pending_blocks(io);
593 return r;
594 }
595
596 /*
597 * Skip verity work in response to I/O error when system is shutting down.
598 */
verity_is_system_shutting_down(void)599 static inline bool verity_is_system_shutting_down(void)
600 {
601 return system_state == SYSTEM_HALT || system_state == SYSTEM_POWER_OFF
602 || system_state == SYSTEM_RESTART;
603 }
604
restart_io_error(struct work_struct * w)605 static void restart_io_error(struct work_struct *w)
606 {
607 kernel_restart("dm-verity device has I/O error");
608 }
609
610 /*
611 * End one "io" structure with a given error.
612 */
verity_finish_io(struct dm_verity_io * io,blk_status_t status)613 static void verity_finish_io(struct dm_verity_io *io, blk_status_t status)
614 {
615 struct dm_verity *v = io->v;
616 struct bio *bio = dm_bio_from_per_bio_data(io, v->ti->per_io_data_size);
617
618 bio->bi_end_io = io->orig_bi_end_io;
619 bio->bi_status = status;
620
621 verity_fec_finish_io(io);
622
623 if (unlikely(status != BLK_STS_OK) &&
624 unlikely(!(bio->bi_opf & REQ_RAHEAD)) &&
625 !io->had_mismatch &&
626 !verity_is_system_shutting_down()) {
627 if (v->error_mode == DM_VERITY_MODE_PANIC) {
628 panic("dm-verity device has I/O error");
629 }
630 if (v->error_mode == DM_VERITY_MODE_RESTART) {
631 static DECLARE_WORK(restart_work, restart_io_error);
632 queue_work(v->verify_wq, &restart_work);
633 /*
634 * We deliberately don't call bio_endio here, because
635 * the machine will be restarted anyway.
636 */
637 return;
638 }
639 }
640
641 bio_endio(bio);
642 }
643
verity_work(struct work_struct * w)644 static void verity_work(struct work_struct *w)
645 {
646 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
647
648 io->in_bh = false;
649
650 verity_finish_io(io, errno_to_blk_status(verity_verify_io(io)));
651 }
652
verity_bh_work(struct work_struct * w)653 static void verity_bh_work(struct work_struct *w)
654 {
655 struct dm_verity_io *io = container_of(w, struct dm_verity_io, work);
656 int err;
657
658 io->in_bh = true;
659 err = verity_verify_io(io);
660 if (err == -EAGAIN || err == -ENOMEM) {
661 /* fallback to retrying in a kworker */
662 INIT_WORK(&io->work, verity_work);
663 queue_work(io->v->verify_wq, &io->work);
664 return;
665 }
666
667 verity_finish_io(io, errno_to_blk_status(err));
668 }
669
verity_use_bh(unsigned int bytes,unsigned short ioprio)670 static inline bool verity_use_bh(unsigned int bytes, unsigned short ioprio)
671 {
672 return ioprio <= IOPRIO_CLASS_IDLE &&
673 bytes <= READ_ONCE(dm_verity_use_bh_bytes[ioprio]) &&
674 !need_resched();
675 }
676
verity_end_io(struct bio * bio)677 static void verity_end_io(struct bio *bio)
678 {
679 struct dm_verity_io *io = bio->bi_private;
680 unsigned short ioprio = IOPRIO_PRIO_CLASS(bio->bi_ioprio);
681 unsigned int bytes = io->n_blocks << io->v->data_dev_block_bits;
682
683 if (bio->bi_status &&
684 (!verity_fec_is_enabled(io->v) ||
685 verity_is_system_shutting_down() ||
686 (bio->bi_opf & REQ_RAHEAD))) {
687 verity_finish_io(io, bio->bi_status);
688 return;
689 }
690
691 if (static_branch_unlikely(&use_bh_wq_enabled) && io->v->use_bh_wq &&
692 verity_use_bh(bytes, ioprio)) {
693 if (in_hardirq() || irqs_disabled()) {
694 INIT_WORK(&io->work, verity_bh_work);
695 queue_work(system_bh_wq, &io->work);
696 } else {
697 verity_bh_work(&io->work);
698 }
699 } else {
700 INIT_WORK(&io->work, verity_work);
701 queue_work(io->v->verify_wq, &io->work);
702 }
703 }
704
705 /*
706 * Prefetch buffers for the specified io.
707 * The root buffer is not prefetched, it is assumed that it will be cached
708 * all the time.
709 */
verity_prefetch_io(struct work_struct * work)710 static void verity_prefetch_io(struct work_struct *work)
711 {
712 struct dm_verity_prefetch_work *pw =
713 container_of(work, struct dm_verity_prefetch_work, work);
714 struct dm_verity *v = pw->v;
715 int i;
716
717 for (i = v->levels - 2; i >= 0; i--) {
718 sector_t hash_block_start;
719 sector_t hash_block_end;
720
721 verity_hash_at_level(v, pw->block, i, &hash_block_start, NULL);
722 verity_hash_at_level(v, pw->block + pw->n_blocks - 1, i, &hash_block_end, NULL);
723
724 if (!i) {
725 unsigned int cluster = READ_ONCE(dm_verity_prefetch_cluster);
726
727 cluster >>= v->data_dev_block_bits;
728 if (unlikely(!cluster))
729 goto no_prefetch_cluster;
730
731 if (unlikely(cluster & (cluster - 1)))
732 cluster = 1 << __fls(cluster);
733
734 hash_block_start &= ~(sector_t)(cluster - 1);
735 hash_block_end |= cluster - 1;
736 if (unlikely(hash_block_end >= v->hash_end))
737 hash_block_end = v->hash_end - 1;
738 }
739 no_prefetch_cluster:
740 dm_bufio_prefetch_with_ioprio(v->bufio, hash_block_start,
741 hash_block_end - hash_block_start + 1,
742 pw->ioprio);
743 }
744
745 kfree(pw);
746 }
747
verity_submit_prefetch(struct dm_verity * v,struct dm_verity_io * io,unsigned short ioprio)748 static void verity_submit_prefetch(struct dm_verity *v, struct dm_verity_io *io,
749 unsigned short ioprio)
750 {
751 sector_t block = io->block;
752 unsigned int n_blocks = io->n_blocks;
753 struct dm_verity_prefetch_work *pw;
754
755 if (v->validated_blocks) {
756 while (n_blocks && test_bit(block, v->validated_blocks)) {
757 block++;
758 n_blocks--;
759 }
760 while (n_blocks && test_bit(block + n_blocks - 1,
761 v->validated_blocks))
762 n_blocks--;
763 if (!n_blocks)
764 return;
765 }
766
767 pw = kmalloc_obj(struct dm_verity_prefetch_work,
768 GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
769
770 if (!pw)
771 return;
772
773 INIT_WORK(&pw->work, verity_prefetch_io);
774 pw->v = v;
775 pw->block = block;
776 pw->n_blocks = n_blocks;
777 pw->ioprio = ioprio;
778 queue_work(v->verify_wq, &pw->work);
779 }
780
781 /*
782 * Bio map function. It allocates dm_verity_io structure and bio vector and
783 * fills them. Then it issues prefetches and the I/O.
784 */
verity_map(struct dm_target * ti,struct bio * bio)785 static int verity_map(struct dm_target *ti, struct bio *bio)
786 {
787 struct dm_verity *v = ti->private;
788 struct dm_verity_io *io;
789
790 bio_set_dev(bio, v->data_dev->bdev);
791 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
792
793 if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) &
794 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
795 DMERR_LIMIT("unaligned io");
796 return DM_MAPIO_KILL;
797 }
798
799 if (bio_end_sector(bio) >>
800 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
801 DMERR_LIMIT("io out of range");
802 return DM_MAPIO_KILL;
803 }
804
805 if (bio_data_dir(bio) == WRITE)
806 return DM_MAPIO_KILL;
807
808 io = dm_per_bio_data(bio, ti->per_io_data_size);
809 io->v = v;
810 io->orig_bi_end_io = bio->bi_end_io;
811 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
812 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
813 io->had_mismatch = false;
814
815 bio->bi_end_io = verity_end_io;
816 bio->bi_private = io;
817 io->iter = bio->bi_iter;
818
819 verity_fec_init_io(io);
820
821 verity_submit_prefetch(v, io, bio->bi_ioprio);
822
823 submit_bio_noacct(bio);
824
825 return DM_MAPIO_SUBMITTED;
826 }
827
verity_postsuspend(struct dm_target * ti)828 static void verity_postsuspend(struct dm_target *ti)
829 {
830 struct dm_verity *v = ti->private;
831 flush_workqueue(v->verify_wq);
832 dm_bufio_client_reset(v->bufio);
833 }
834
835 /*
836 * Status: V (valid) or C (corruption found)
837 */
verity_status(struct dm_target * ti,status_type_t type,unsigned int status_flags,char * result,unsigned int maxlen)838 static void verity_status(struct dm_target *ti, status_type_t type,
839 unsigned int status_flags, char *result, unsigned int maxlen)
840 {
841 struct dm_verity *v = ti->private;
842 unsigned int args = 0;
843 unsigned int sz = 0;
844 unsigned int x;
845
846 switch (type) {
847 case STATUSTYPE_INFO:
848 DMEMIT("%c", v->hash_failed ? 'C' : 'V');
849 if (verity_fec_is_enabled(v))
850 DMEMIT(" %lld", atomic64_read(&v->fec->corrected));
851 else
852 DMEMIT(" -");
853 break;
854 case STATUSTYPE_TABLE:
855 DMEMIT("%u %s %s %u %u %llu %llu %s ",
856 v->version,
857 v->data_dev->name,
858 v->hash_dev->name,
859 1 << v->data_dev_block_bits,
860 1 << v->hash_dev_block_bits,
861 (unsigned long long)v->data_blocks,
862 (unsigned long long)v->hash_start,
863 v->alg_name
864 );
865 for (x = 0; x < v->digest_size; x++)
866 DMEMIT("%02x", v->root_digest[x]);
867 DMEMIT(" ");
868 if (!v->salt_size)
869 DMEMIT("-");
870 else
871 for (x = 0; x < v->salt_size; x++)
872 DMEMIT("%02x", v->salt[x]);
873 if (v->mode != DM_VERITY_MODE_EIO)
874 args++;
875 if (v->error_mode != DM_VERITY_MODE_EIO)
876 args++;
877 if (verity_fec_is_enabled(v))
878 args += DM_VERITY_OPTS_FEC;
879 if (v->zero_digest)
880 args++;
881 if (v->validated_blocks)
882 args++;
883 if (v->use_bh_wq)
884 args++;
885 if (v->signature_key_desc)
886 args += DM_VERITY_ROOT_HASH_VERIFICATION_OPTS;
887 if (!args)
888 return;
889 DMEMIT(" %u", args);
890 if (v->mode != DM_VERITY_MODE_EIO) {
891 DMEMIT(" ");
892 switch (v->mode) {
893 case DM_VERITY_MODE_LOGGING:
894 DMEMIT(DM_VERITY_OPT_LOGGING);
895 break;
896 case DM_VERITY_MODE_RESTART:
897 DMEMIT(DM_VERITY_OPT_RESTART);
898 break;
899 case DM_VERITY_MODE_PANIC:
900 DMEMIT(DM_VERITY_OPT_PANIC);
901 break;
902 default:
903 BUG();
904 }
905 }
906 if (v->error_mode != DM_VERITY_MODE_EIO) {
907 DMEMIT(" ");
908 switch (v->error_mode) {
909 case DM_VERITY_MODE_RESTART:
910 DMEMIT(DM_VERITY_OPT_ERROR_RESTART);
911 break;
912 case DM_VERITY_MODE_PANIC:
913 DMEMIT(DM_VERITY_OPT_ERROR_PANIC);
914 break;
915 default:
916 BUG();
917 }
918 }
919 if (v->zero_digest)
920 DMEMIT(" " DM_VERITY_OPT_IGN_ZEROES);
921 if (v->validated_blocks)
922 DMEMIT(" " DM_VERITY_OPT_AT_MOST_ONCE);
923 if (v->use_bh_wq)
924 DMEMIT(" " DM_VERITY_OPT_TASKLET_VERIFY);
925 sz = verity_fec_status_table(v, sz, result, maxlen);
926 if (v->signature_key_desc)
927 DMEMIT(" " DM_VERITY_ROOT_HASH_VERIFICATION_OPT_SIG_KEY
928 " %s", v->signature_key_desc);
929 break;
930
931 case STATUSTYPE_IMA:
932 DMEMIT_TARGET_NAME_VERSION(ti->type);
933 DMEMIT(",hash_failed=%c", v->hash_failed ? 'C' : 'V');
934 DMEMIT(",verity_version=%u", v->version);
935 DMEMIT(",data_device_name=%s", v->data_dev->name);
936 DMEMIT(",hash_device_name=%s", v->hash_dev->name);
937 DMEMIT(",verity_algorithm=%s", v->alg_name);
938
939 DMEMIT(",root_digest=");
940 for (x = 0; x < v->digest_size; x++)
941 DMEMIT("%02x", v->root_digest[x]);
942
943 DMEMIT(",salt=");
944 if (!v->salt_size)
945 DMEMIT("-");
946 else
947 for (x = 0; x < v->salt_size; x++)
948 DMEMIT("%02x", v->salt[x]);
949
950 DMEMIT(",ignore_zero_blocks=%c", v->zero_digest ? 'y' : 'n');
951 DMEMIT(",check_at_most_once=%c", v->validated_blocks ? 'y' : 'n');
952 if (v->signature_key_desc)
953 DMEMIT(",root_hash_sig_key_desc=%s", v->signature_key_desc);
954
955 if (v->mode != DM_VERITY_MODE_EIO) {
956 DMEMIT(",verity_mode=");
957 switch (v->mode) {
958 case DM_VERITY_MODE_LOGGING:
959 DMEMIT(DM_VERITY_OPT_LOGGING);
960 break;
961 case DM_VERITY_MODE_RESTART:
962 DMEMIT(DM_VERITY_OPT_RESTART);
963 break;
964 case DM_VERITY_MODE_PANIC:
965 DMEMIT(DM_VERITY_OPT_PANIC);
966 break;
967 default:
968 DMEMIT("invalid");
969 }
970 }
971 if (v->error_mode != DM_VERITY_MODE_EIO) {
972 DMEMIT(",verity_error_mode=");
973 switch (v->error_mode) {
974 case DM_VERITY_MODE_RESTART:
975 DMEMIT(DM_VERITY_OPT_ERROR_RESTART);
976 break;
977 case DM_VERITY_MODE_PANIC:
978 DMEMIT(DM_VERITY_OPT_ERROR_PANIC);
979 break;
980 default:
981 DMEMIT("invalid");
982 }
983 }
984 DMEMIT(";");
985 break;
986 }
987 }
988
verity_prepare_ioctl(struct dm_target * ti,struct block_device ** bdev,unsigned int cmd,unsigned long arg,bool * forward)989 static int verity_prepare_ioctl(struct dm_target *ti, struct block_device **bdev,
990 unsigned int cmd, unsigned long arg,
991 bool *forward)
992 {
993 struct dm_verity *v = ti->private;
994
995 *bdev = v->data_dev->bdev;
996
997 if (ti->len != bdev_nr_sectors(v->data_dev->bdev))
998 return 1;
999 return 0;
1000 }
1001
verity_iterate_devices(struct dm_target * ti,iterate_devices_callout_fn fn,void * data)1002 static int verity_iterate_devices(struct dm_target *ti,
1003 iterate_devices_callout_fn fn, void *data)
1004 {
1005 struct dm_verity *v = ti->private;
1006
1007 return fn(ti, v->data_dev, 0, ti->len, data);
1008 }
1009
verity_io_hints(struct dm_target * ti,struct queue_limits * limits)1010 static void verity_io_hints(struct dm_target *ti, struct queue_limits *limits)
1011 {
1012 struct dm_verity *v = ti->private;
1013
1014 dm_stack_bs_limits(limits, 1 << v->data_dev_block_bits);
1015
1016 /*
1017 * Similar to what dm-crypt does, opt dm-verity out of support for
1018 * direct I/O that is aligned to less than the traditional direct I/O
1019 * alignment requirement of logical_block_size. This prevents dm-verity
1020 * data blocks from crossing pages, eliminating various edge cases.
1021 */
1022 limits->dma_alignment = limits->logical_block_size - 1;
1023 }
1024
1025 #ifdef CONFIG_SECURITY
1026
verity_init_sig(struct dm_verity * v,const void * sig,size_t sig_size)1027 static int verity_init_sig(struct dm_verity *v, const void *sig,
1028 size_t sig_size)
1029 {
1030 v->sig_size = sig_size;
1031
1032 if (sig) {
1033 v->root_digest_sig = kmemdup(sig, v->sig_size, GFP_KERNEL);
1034 if (!v->root_digest_sig)
1035 return -ENOMEM;
1036 }
1037
1038 return 0;
1039 }
1040
verity_free_sig(struct dm_verity * v)1041 static void verity_free_sig(struct dm_verity *v)
1042 {
1043 kfree(v->root_digest_sig);
1044 }
1045
1046 #else
1047
verity_init_sig(struct dm_verity * v,const void * sig,size_t sig_size)1048 static inline int verity_init_sig(struct dm_verity *v, const void *sig,
1049 size_t sig_size)
1050 {
1051 return 0;
1052 }
1053
verity_free_sig(struct dm_verity * v)1054 static inline void verity_free_sig(struct dm_verity *v)
1055 {
1056 }
1057
1058 #endif /* CONFIG_SECURITY */
1059
verity_dtr(struct dm_target * ti)1060 static void verity_dtr(struct dm_target *ti)
1061 {
1062 struct dm_verity *v = ti->private;
1063
1064 if (v->verify_wq)
1065 destroy_workqueue(v->verify_wq);
1066
1067 mempool_exit(&v->recheck_pool);
1068 if (v->io)
1069 dm_io_client_destroy(v->io);
1070
1071 if (v->bufio)
1072 dm_bufio_client_destroy(v->bufio);
1073
1074 kvfree(v->validated_blocks);
1075 kfree(v->salt);
1076 kfree(v->initial_hashstate.shash);
1077 kfree(v->root_digest);
1078 kfree(v->zero_digest);
1079 verity_free_sig(v);
1080
1081 crypto_free_shash(v->shash_tfm);
1082
1083 kfree(v->alg_name);
1084
1085 if (v->hash_dev)
1086 dm_put_device(ti, v->hash_dev);
1087
1088 if (v->data_dev)
1089 dm_put_device(ti, v->data_dev);
1090
1091 verity_fec_dtr(v);
1092
1093 kfree(v->signature_key_desc);
1094
1095 if (v->use_bh_wq)
1096 static_branch_dec(&use_bh_wq_enabled);
1097
1098 kfree(v);
1099
1100 dm_audit_log_dtr(DM_MSG_PREFIX, ti, 1);
1101 }
1102
verity_alloc_most_once(struct dm_verity * v)1103 static int verity_alloc_most_once(struct dm_verity *v)
1104 {
1105 struct dm_target *ti = v->ti;
1106
1107 if (v->validated_blocks)
1108 return 0;
1109
1110 /* the bitset can only handle INT_MAX blocks */
1111 if (v->data_blocks > INT_MAX) {
1112 ti->error = "device too large to use check_at_most_once";
1113 return -E2BIG;
1114 }
1115
1116 v->validated_blocks = kvcalloc(BITS_TO_LONGS(v->data_blocks),
1117 sizeof(unsigned long),
1118 GFP_KERNEL);
1119 if (!v->validated_blocks) {
1120 ti->error = "failed to allocate bitset for check_at_most_once";
1121 return -ENOMEM;
1122 }
1123
1124 return 0;
1125 }
1126
verity_alloc_zero_digest(struct dm_verity * v)1127 static int verity_alloc_zero_digest(struct dm_verity *v)
1128 {
1129 int r = -ENOMEM;
1130 struct dm_verity_io *io;
1131 u8 *zero_data;
1132
1133 if (v->zero_digest)
1134 return 0;
1135
1136 v->zero_digest = kmalloc(v->digest_size, GFP_KERNEL);
1137
1138 if (!v->zero_digest)
1139 return r;
1140
1141 io = kmalloc(v->ti->per_io_data_size, GFP_KERNEL);
1142
1143 if (!io)
1144 return r; /* verity_dtr will free zero_digest */
1145
1146 zero_data = kzalloc(1 << v->data_dev_block_bits, GFP_KERNEL);
1147
1148 if (!zero_data)
1149 goto out;
1150
1151 r = verity_hash(v, io, zero_data, 1 << v->data_dev_block_bits,
1152 v->zero_digest);
1153
1154 out:
1155 kfree(io);
1156 kfree(zero_data);
1157
1158 return r;
1159 }
1160
verity_is_verity_mode(const char * arg_name)1161 static inline bool verity_is_verity_mode(const char *arg_name)
1162 {
1163 return (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING) ||
1164 !strcasecmp(arg_name, DM_VERITY_OPT_RESTART) ||
1165 !strcasecmp(arg_name, DM_VERITY_OPT_PANIC));
1166 }
1167
verity_parse_verity_mode(struct dm_verity * v,const char * arg_name)1168 static int verity_parse_verity_mode(struct dm_verity *v, const char *arg_name)
1169 {
1170 if (v->mode)
1171 return -EINVAL;
1172
1173 if (!strcasecmp(arg_name, DM_VERITY_OPT_LOGGING))
1174 v->mode = DM_VERITY_MODE_LOGGING;
1175 else if (!strcasecmp(arg_name, DM_VERITY_OPT_RESTART))
1176 v->mode = DM_VERITY_MODE_RESTART;
1177 else if (!strcasecmp(arg_name, DM_VERITY_OPT_PANIC))
1178 v->mode = DM_VERITY_MODE_PANIC;
1179
1180 return 0;
1181 }
1182
verity_is_verity_error_mode(const char * arg_name)1183 static inline bool verity_is_verity_error_mode(const char *arg_name)
1184 {
1185 return (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_RESTART) ||
1186 !strcasecmp(arg_name, DM_VERITY_OPT_ERROR_PANIC));
1187 }
1188
verity_parse_verity_error_mode(struct dm_verity * v,const char * arg_name)1189 static int verity_parse_verity_error_mode(struct dm_verity *v, const char *arg_name)
1190 {
1191 if (v->error_mode)
1192 return -EINVAL;
1193
1194 if (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_RESTART))
1195 v->error_mode = DM_VERITY_MODE_RESTART;
1196 else if (!strcasecmp(arg_name, DM_VERITY_OPT_ERROR_PANIC))
1197 v->error_mode = DM_VERITY_MODE_PANIC;
1198
1199 return 0;
1200 }
1201
verity_parse_opt_args(struct dm_arg_set * as,struct dm_verity * v,struct dm_verity_sig_opts * verify_args,bool only_modifier_opts)1202 static int verity_parse_opt_args(struct dm_arg_set *as, struct dm_verity *v,
1203 struct dm_verity_sig_opts *verify_args,
1204 bool only_modifier_opts)
1205 {
1206 int r = 0;
1207 unsigned int argc;
1208 struct dm_target *ti = v->ti;
1209 const char *arg_name;
1210
1211 static const struct dm_arg _args[] = {
1212 {0, DM_VERITY_OPTS_MAX, "Invalid number of feature args"},
1213 };
1214
1215 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1216 if (r)
1217 return -EINVAL;
1218
1219 if (!argc)
1220 return 0;
1221
1222 do {
1223 arg_name = dm_shift_arg(as);
1224 argc--;
1225
1226 if (verity_is_verity_mode(arg_name)) {
1227 if (only_modifier_opts)
1228 continue;
1229 r = verity_parse_verity_mode(v, arg_name);
1230 if (r) {
1231 ti->error = "Conflicting error handling parameters";
1232 return r;
1233 }
1234 continue;
1235
1236 } else if (verity_is_verity_error_mode(arg_name)) {
1237 if (only_modifier_opts)
1238 continue;
1239 r = verity_parse_verity_error_mode(v, arg_name);
1240 if (r) {
1241 ti->error = "Conflicting error handling parameters";
1242 return r;
1243 }
1244 continue;
1245
1246 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_IGN_ZEROES)) {
1247 if (only_modifier_opts)
1248 continue;
1249 r = verity_alloc_zero_digest(v);
1250 if (r) {
1251 ti->error = "Cannot allocate zero digest";
1252 return r;
1253 }
1254 continue;
1255
1256 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_AT_MOST_ONCE)) {
1257 if (only_modifier_opts)
1258 continue;
1259 r = verity_alloc_most_once(v);
1260 if (r)
1261 return r;
1262 continue;
1263
1264 } else if (!strcasecmp(arg_name, DM_VERITY_OPT_TASKLET_VERIFY)) {
1265 v->use_bh_wq = true;
1266 static_branch_inc(&use_bh_wq_enabled);
1267 continue;
1268
1269 } else if (verity_is_fec_opt_arg(arg_name)) {
1270 if (only_modifier_opts)
1271 continue;
1272 r = verity_fec_parse_opt_args(as, v, &argc, arg_name);
1273 if (r)
1274 return r;
1275 continue;
1276
1277 } else if (verity_verify_is_sig_opt_arg(arg_name)) {
1278 if (only_modifier_opts)
1279 continue;
1280 r = verity_verify_sig_parse_opt_args(as, v,
1281 verify_args,
1282 &argc, arg_name);
1283 if (r)
1284 return r;
1285 continue;
1286
1287 } else if (only_modifier_opts) {
1288 /*
1289 * Ignore unrecognized opt, could easily be an extra
1290 * argument to an option whose parsing was skipped.
1291 * Normal parsing (@only_modifier_opts=false) will
1292 * properly parse all options (and their extra args).
1293 */
1294 continue;
1295 }
1296
1297 DMERR("Unrecognized verity feature request: %s", arg_name);
1298 ti->error = "Unrecognized verity feature request";
1299 return -EINVAL;
1300 } while (argc && !r);
1301
1302 return r;
1303 }
1304
verity_setup_hash_alg(struct dm_verity * v,const char * alg_name)1305 static int verity_setup_hash_alg(struct dm_verity *v, const char *alg_name)
1306 {
1307 struct dm_target *ti = v->ti;
1308 struct crypto_shash *shash;
1309
1310 v->alg_name = kstrdup(alg_name, GFP_KERNEL);
1311 if (!v->alg_name) {
1312 ti->error = "Cannot allocate algorithm name";
1313 return -ENOMEM;
1314 }
1315
1316 shash = crypto_alloc_shash(alg_name, 0, 0);
1317 if (IS_ERR(shash)) {
1318 ti->error = "Cannot initialize hash function";
1319 return PTR_ERR(shash);
1320 }
1321 v->shash_tfm = shash;
1322 v->digest_size = crypto_shash_digestsize(shash);
1323 if ((1 << v->hash_dev_block_bits) < v->digest_size * 2) {
1324 ti->error = "Digest size too big";
1325 return -EINVAL;
1326 }
1327 if (likely(v->version && strcmp(alg_name, "sha256") == 0)) {
1328 /*
1329 * Fast path: use the library API for reduced overhead and
1330 * interleaved hashing support.
1331 */
1332 v->use_sha256_lib = true;
1333 if (sha256_finup_2x_is_optimized())
1334 v->use_sha256_finup_2x = true;
1335 ti->per_io_data_size =
1336 offsetofend(struct dm_verity_io, hash_ctx.sha256);
1337 } else {
1338 /* Fallback case: use the generic crypto API. */
1339 ti->per_io_data_size =
1340 offsetofend(struct dm_verity_io, hash_ctx.shash) +
1341 crypto_shash_descsize(shash);
1342 }
1343 return 0;
1344 }
1345
verity_setup_salt_and_hashstate(struct dm_verity * v,const char * arg)1346 static int verity_setup_salt_and_hashstate(struct dm_verity *v, const char *arg)
1347 {
1348 struct dm_target *ti = v->ti;
1349
1350 if (strcmp(arg, "-") != 0) {
1351 v->salt_size = strlen(arg) / 2;
1352 v->salt = kmalloc(v->salt_size, GFP_KERNEL);
1353 if (!v->salt) {
1354 ti->error = "Cannot allocate salt";
1355 return -ENOMEM;
1356 }
1357 if (strlen(arg) != v->salt_size * 2 ||
1358 hex2bin(v->salt, arg, v->salt_size)) {
1359 ti->error = "Invalid salt";
1360 return -EINVAL;
1361 }
1362 }
1363 if (likely(v->use_sha256_lib)) {
1364 /* Implies version 1: salt at beginning */
1365 v->initial_hashstate.sha256 =
1366 kmalloc_obj(struct sha256_ctx);
1367 if (!v->initial_hashstate.sha256) {
1368 ti->error = "Cannot allocate initial hash state";
1369 return -ENOMEM;
1370 }
1371 sha256_init(v->initial_hashstate.sha256);
1372 sha256_update(v->initial_hashstate.sha256,
1373 v->salt, v->salt_size);
1374 } else if (v->version) { /* Version 1: salt at beginning */
1375 SHASH_DESC_ON_STACK(desc, v->shash_tfm);
1376 int r;
1377
1378 /*
1379 * Compute the pre-salted hash state that can be passed to
1380 * crypto_shash_import() for each block later.
1381 */
1382 v->initial_hashstate.shash = kmalloc(
1383 crypto_shash_statesize(v->shash_tfm), GFP_KERNEL);
1384 if (!v->initial_hashstate.shash) {
1385 ti->error = "Cannot allocate initial hash state";
1386 return -ENOMEM;
1387 }
1388 desc->tfm = v->shash_tfm;
1389 r = crypto_shash_init(desc) ?:
1390 crypto_shash_update(desc, v->salt, v->salt_size) ?:
1391 crypto_shash_export(desc, v->initial_hashstate.shash);
1392 if (r) {
1393 ti->error = "Cannot set up initial hash state";
1394 return r;
1395 }
1396 }
1397 return 0;
1398 }
1399
1400 /*
1401 * Target parameters:
1402 * <version> The current format is version 1.
1403 * Vsn 0 is compatible with original Chromium OS releases.
1404 * <data device>
1405 * <hash device>
1406 * <data block size>
1407 * <hash block size>
1408 * <the number of data blocks>
1409 * <hash start block>
1410 * <algorithm>
1411 * <digest>
1412 * <salt> Hex string or "-" if no salt.
1413 */
verity_ctr(struct dm_target * ti,unsigned int argc,char ** argv)1414 static int verity_ctr(struct dm_target *ti, unsigned int argc, char **argv)
1415 {
1416 struct dm_verity *v;
1417 struct dm_verity_sig_opts verify_args = {0};
1418 struct dm_arg_set as;
1419 unsigned int num;
1420 unsigned long long num_ll;
1421 int r;
1422 int i;
1423 sector_t hash_position;
1424 char dummy;
1425 char *root_hash_digest_to_validate;
1426
1427 v = kzalloc_obj(struct dm_verity);
1428 if (!v) {
1429 ti->error = "Cannot allocate verity structure";
1430 return -ENOMEM;
1431 }
1432 ti->private = v;
1433 v->ti = ti;
1434
1435 r = verity_fec_ctr_alloc(v);
1436 if (r)
1437 goto bad;
1438
1439 if ((dm_table_get_mode(ti->table) & ~BLK_OPEN_READ)) {
1440 ti->error = "Device must be readonly";
1441 r = -EINVAL;
1442 goto bad;
1443 }
1444
1445 if (argc < 10) {
1446 ti->error = "Not enough arguments";
1447 r = -EINVAL;
1448 goto bad;
1449 }
1450
1451 /* Parse optional parameters that modify primary args */
1452 if (argc > 10) {
1453 as.argc = argc - 10;
1454 as.argv = argv + 10;
1455 r = verity_parse_opt_args(&as, v, &verify_args, true);
1456 if (r < 0)
1457 goto bad;
1458 }
1459
1460 if (sscanf(argv[0], "%u%c", &num, &dummy) != 1 ||
1461 num > 1) {
1462 ti->error = "Invalid version";
1463 r = -EINVAL;
1464 goto bad;
1465 }
1466 v->version = num;
1467
1468 r = dm_get_device(ti, argv[1], BLK_OPEN_READ, &v->data_dev);
1469 if (r) {
1470 ti->error = "Data device lookup failed";
1471 goto bad;
1472 }
1473
1474 r = dm_get_device(ti, argv[2], BLK_OPEN_READ, &v->hash_dev);
1475 if (r) {
1476 ti->error = "Hash device lookup failed";
1477 goto bad;
1478 }
1479
1480 if (sscanf(argv[3], "%u%c", &num, &dummy) != 1 ||
1481 !num || (num & (num - 1)) ||
1482 num < bdev_logical_block_size(v->data_dev->bdev) ||
1483 num > PAGE_SIZE) {
1484 ti->error = "Invalid data device block size";
1485 r = -EINVAL;
1486 goto bad;
1487 }
1488 v->data_dev_block_bits = __ffs(num);
1489
1490 if (sscanf(argv[4], "%u%c", &num, &dummy) != 1 ||
1491 !num || (num & (num - 1)) ||
1492 num < bdev_logical_block_size(v->hash_dev->bdev) ||
1493 num > INT_MAX) {
1494 ti->error = "Invalid hash device block size";
1495 r = -EINVAL;
1496 goto bad;
1497 }
1498 v->hash_dev_block_bits = __ffs(num);
1499
1500 if (sscanf(argv[5], "%llu%c", &num_ll, &dummy) != 1 ||
1501 (sector_t)(num_ll << (v->data_dev_block_bits - SECTOR_SHIFT))
1502 >> (v->data_dev_block_bits - SECTOR_SHIFT) != num_ll) {
1503 ti->error = "Invalid data blocks";
1504 r = -EINVAL;
1505 goto bad;
1506 }
1507 v->data_blocks = num_ll;
1508
1509 if (ti->len > (v->data_blocks << (v->data_dev_block_bits - SECTOR_SHIFT))) {
1510 ti->error = "Data device is too small";
1511 r = -EINVAL;
1512 goto bad;
1513 }
1514
1515 if (sscanf(argv[6], "%llu%c", &num_ll, &dummy) != 1 ||
1516 (sector_t)(num_ll << (v->hash_dev_block_bits - SECTOR_SHIFT))
1517 >> (v->hash_dev_block_bits - SECTOR_SHIFT) != num_ll) {
1518 ti->error = "Invalid hash start";
1519 r = -EINVAL;
1520 goto bad;
1521 }
1522 v->hash_start = num_ll;
1523
1524 r = verity_setup_hash_alg(v, argv[7]);
1525 if (r)
1526 goto bad;
1527
1528 v->root_digest = kmalloc(v->digest_size, GFP_KERNEL);
1529 if (!v->root_digest) {
1530 ti->error = "Cannot allocate root digest";
1531 r = -ENOMEM;
1532 goto bad;
1533 }
1534 if (strlen(argv[8]) != v->digest_size * 2 ||
1535 hex2bin(v->root_digest, argv[8], v->digest_size)) {
1536 ti->error = "Invalid root digest";
1537 r = -EINVAL;
1538 goto bad;
1539 }
1540 root_hash_digest_to_validate = argv[8];
1541
1542 r = verity_setup_salt_and_hashstate(v, argv[9]);
1543 if (r)
1544 goto bad;
1545
1546 argv += 10;
1547 argc -= 10;
1548
1549 /* Optional parameters */
1550 if (argc) {
1551 as.argc = argc;
1552 as.argv = argv;
1553 r = verity_parse_opt_args(&as, v, &verify_args, false);
1554 if (r < 0)
1555 goto bad;
1556 }
1557
1558 /* Root hash signature is an optional parameter */
1559 r = verity_verify_root_hash(root_hash_digest_to_validate,
1560 strlen(root_hash_digest_to_validate),
1561 verify_args.sig,
1562 verify_args.sig_size);
1563 if (r < 0) {
1564 ti->error = "Root hash verification failed";
1565 goto bad;
1566 }
1567
1568 r = verity_init_sig(v, verify_args.sig, verify_args.sig_size);
1569 if (r < 0) {
1570 ti->error = "Cannot allocate root digest signature";
1571 goto bad;
1572 }
1573
1574 v->hash_per_block_bits =
1575 __fls((1 << v->hash_dev_block_bits) / v->digest_size);
1576
1577 v->levels = 0;
1578 if (v->data_blocks)
1579 while (v->hash_per_block_bits * v->levels < 64 &&
1580 (unsigned long long)(v->data_blocks - 1) >>
1581 (v->hash_per_block_bits * v->levels))
1582 v->levels++;
1583
1584 if (v->levels > DM_VERITY_MAX_LEVELS) {
1585 ti->error = "Too many tree levels";
1586 r = -E2BIG;
1587 goto bad;
1588 }
1589
1590 hash_position = v->hash_start;
1591 for (i = v->levels - 1; i >= 0; i--) {
1592 sector_t s;
1593
1594 v->hash_level_block[i] = hash_position;
1595 s = (v->data_blocks + ((sector_t)1 << ((i + 1) * v->hash_per_block_bits)) - 1)
1596 >> ((i + 1) * v->hash_per_block_bits);
1597 if (hash_position + s < hash_position) {
1598 ti->error = "Hash device offset overflow";
1599 r = -E2BIG;
1600 goto bad;
1601 }
1602 hash_position += s;
1603 }
1604 v->hash_end = hash_position;
1605
1606 r = mempool_init_page_pool(&v->recheck_pool, 1, 0);
1607 if (unlikely(r)) {
1608 ti->error = "Cannot allocate mempool";
1609 goto bad;
1610 }
1611
1612 v->io = dm_io_client_create();
1613 if (IS_ERR(v->io)) {
1614 r = PTR_ERR(v->io);
1615 v->io = NULL;
1616 ti->error = "Cannot allocate dm io";
1617 goto bad;
1618 }
1619
1620 v->bufio = dm_bufio_client_create(v->hash_dev->bdev,
1621 1 << v->hash_dev_block_bits, 1, sizeof(struct buffer_aux),
1622 dm_bufio_alloc_callback, NULL,
1623 v->use_bh_wq ? DM_BUFIO_CLIENT_NO_SLEEP : 0);
1624 if (IS_ERR(v->bufio)) {
1625 ti->error = "Cannot initialize dm-bufio";
1626 r = PTR_ERR(v->bufio);
1627 v->bufio = NULL;
1628 goto bad;
1629 }
1630
1631 if (dm_bufio_get_device_size(v->bufio) < v->hash_end) {
1632 ti->error = "Hash device is too small";
1633 r = -E2BIG;
1634 goto bad;
1635 }
1636
1637 /*
1638 * Using WQ_HIGHPRI improves throughput and completion latency by
1639 * reducing wait times when reading from a dm-verity device.
1640 *
1641 * Also as required for the "try_verify_in_tasklet" feature: WQ_HIGHPRI
1642 * allows verify_wq to preempt softirq since verification in softirq
1643 * will fall-back to using it for error handling (or if the bufio cache
1644 * doesn't have required hashes).
1645 */
1646 v->verify_wq = alloc_workqueue("kverityd",
1647 WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_PERCPU,
1648 0);
1649 if (!v->verify_wq) {
1650 ti->error = "Cannot allocate workqueue";
1651 r = -ENOMEM;
1652 goto bad;
1653 }
1654
1655 r = verity_fec_ctr(v);
1656 if (r)
1657 goto bad;
1658
1659 ti->per_io_data_size = roundup(ti->per_io_data_size,
1660 __alignof__(struct dm_verity_io));
1661
1662 verity_verify_sig_opts_cleanup(&verify_args);
1663
1664 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 1);
1665
1666 return 0;
1667
1668 bad:
1669
1670 verity_verify_sig_opts_cleanup(&verify_args);
1671 dm_audit_log_ctr(DM_MSG_PREFIX, ti, 0);
1672 verity_dtr(ti);
1673
1674 return r;
1675 }
1676
1677 /*
1678 * Get the verity mode (error behavior) of a verity target.
1679 *
1680 * Returns the verity mode of the target, or -EINVAL if 'ti' is not a verity
1681 * target.
1682 */
dm_verity_get_mode(struct dm_target * ti)1683 int dm_verity_get_mode(struct dm_target *ti)
1684 {
1685 struct dm_verity *v = ti->private;
1686
1687 if (!dm_is_verity_target(ti))
1688 return -EINVAL;
1689
1690 return v->mode;
1691 }
1692
1693 /*
1694 * Get the root digest of a verity target.
1695 *
1696 * Returns a copy of the root digest, the caller is responsible for
1697 * freeing the memory of the digest.
1698 */
dm_verity_get_root_digest(struct dm_target * ti,u8 ** root_digest,unsigned int * digest_size)1699 int dm_verity_get_root_digest(struct dm_target *ti, u8 **root_digest, unsigned int *digest_size)
1700 {
1701 struct dm_verity *v = ti->private;
1702
1703 if (!dm_is_verity_target(ti))
1704 return -EINVAL;
1705
1706 *root_digest = kmemdup(v->root_digest, v->digest_size, GFP_KERNEL);
1707 if (*root_digest == NULL)
1708 return -ENOMEM;
1709
1710 *digest_size = v->digest_size;
1711
1712 return 0;
1713 }
1714
1715 #ifdef CONFIG_SECURITY
1716
1717 #ifdef CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG
1718
verity_security_set_signature(struct block_device * bdev,struct dm_verity * v)1719 static int verity_security_set_signature(struct block_device *bdev,
1720 struct dm_verity *v)
1721 {
1722 /*
1723 * if the dm-verity target is unsigned, v->root_digest_sig will
1724 * be NULL, and the hook call is still required to let LSMs mark
1725 * the device as unsigned. This information is crucial for LSMs to
1726 * block operations such as execution on unsigned files
1727 */
1728 return security_bdev_setintegrity(bdev,
1729 LSM_INT_DMVERITY_SIG_VALID,
1730 v->root_digest_sig,
1731 v->sig_size);
1732 }
1733
1734 #else
1735
verity_security_set_signature(struct block_device * bdev,struct dm_verity * v)1736 static inline int verity_security_set_signature(struct block_device *bdev,
1737 struct dm_verity *v)
1738 {
1739 return 0;
1740 }
1741
1742 #endif /* CONFIG_DM_VERITY_VERIFY_ROOTHASH_SIG */
1743
1744 /*
1745 * Expose verity target's root hash and signature data to LSMs before resume.
1746 *
1747 * Returns 0 on success, or -ENOMEM if the system is out of memory.
1748 */
verity_preresume(struct dm_target * ti)1749 static int verity_preresume(struct dm_target *ti)
1750 {
1751 struct block_device *bdev;
1752 struct dm_verity_digest root_digest;
1753 struct dm_verity *v;
1754 int r;
1755
1756 v = ti->private;
1757 bdev = dm_disk(dm_table_get_md(ti->table))->part0;
1758 root_digest.digest = v->root_digest;
1759 root_digest.digest_len = v->digest_size;
1760 root_digest.alg = crypto_shash_alg_name(v->shash_tfm);
1761
1762 r = security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, &root_digest,
1763 sizeof(root_digest));
1764 if (r)
1765 return r;
1766
1767 r = verity_security_set_signature(bdev, v);
1768 if (r)
1769 goto bad;
1770
1771 return 0;
1772
1773 bad:
1774
1775 security_bdev_setintegrity(bdev, LSM_INT_DMVERITY_ROOTHASH, NULL, 0);
1776
1777 return r;
1778 }
1779
1780 #endif /* CONFIG_SECURITY */
1781
1782 static struct target_type verity_target = {
1783 .name = "verity",
1784 /* Note: the LSMs depend on the singleton and immutable features */
1785 .features = DM_TARGET_SINGLETON | DM_TARGET_IMMUTABLE,
1786 .version = {1, 13, 0},
1787 .module = THIS_MODULE,
1788 .ctr = verity_ctr,
1789 .dtr = verity_dtr,
1790 .map = verity_map,
1791 .postsuspend = verity_postsuspend,
1792 .status = verity_status,
1793 .prepare_ioctl = verity_prepare_ioctl,
1794 .iterate_devices = verity_iterate_devices,
1795 .io_hints = verity_io_hints,
1796 #ifdef CONFIG_SECURITY
1797 .preresume = verity_preresume,
1798 #endif /* CONFIG_SECURITY */
1799 };
1800
dm_verity_init(void)1801 static int __init dm_verity_init(void)
1802 {
1803 int r;
1804
1805 r = dm_verity_verify_sig_init();
1806 if (r)
1807 return r;
1808
1809 r = dm_register_target(&verity_target);
1810 if (r) {
1811 dm_verity_verify_sig_exit();
1812 return r;
1813 }
1814
1815 return 0;
1816 }
1817 module_init(dm_verity_init);
1818
dm_verity_exit(void)1819 static void __exit dm_verity_exit(void)
1820 {
1821 dm_unregister_target(&verity_target);
1822 dm_verity_verify_sig_exit();
1823 }
1824 module_exit(dm_verity_exit);
1825
1826 /*
1827 * Check whether a DM target is a verity target.
1828 */
dm_is_verity_target(struct dm_target * ti)1829 bool dm_is_verity_target(struct dm_target *ti)
1830 {
1831 return ti->type == &verity_target;
1832 }
1833
1834 MODULE_AUTHOR("Mikulas Patocka <mpatocka@redhat.com>");
1835 MODULE_AUTHOR("Mandeep Baines <msb@chromium.org>");
1836 MODULE_AUTHOR("Will Drewry <wad@chromium.org>");
1837 MODULE_DESCRIPTION(DM_NAME " target for transparent disk integrity checking");
1838 MODULE_LICENSE("GPL");
1839