1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 *
6 * This file is released under the GPL.
7 */
8
9 #include "dm-core.h"
10 #include "dm-rq.h"
11 #include "dm-uevent.h"
12 #include "dm-ima.h"
13
14 #include <linux/bio-integrity.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/mutex.h>
18 #include <linux/sched/mm.h>
19 #include <linux/sched/signal.h>
20 #include <linux/blkpg.h>
21 #include <linux/bio.h>
22 #include <linux/mempool.h>
23 #include <linux/dax.h>
24 #include <linux/slab.h>
25 #include <linux/idr.h>
26 #include <linux/uio.h>
27 #include <linux/hdreg.h>
28 #include <linux/delay.h>
29 #include <linux/wait.h>
30 #include <linux/pr.h>
31 #include <linux/refcount.h>
32 #include <linux/part_stat.h>
33 #include <linux/blk-crypto.h>
34 #include <linux/blk-crypto-profile.h>
35
36 #define DM_MSG_PREFIX "core"
37
38 /*
39 * Cookies are numeric values sent with CHANGE and REMOVE
40 * uevents while resuming, removing or renaming the device.
41 */
42 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
43 #define DM_COOKIE_LENGTH 24
44
45 /*
46 * For REQ_POLLED fs bio, this flag is set if we link mapped underlying
47 * dm_io into one list, and reuse bio->bi_private as the list head. Before
48 * ending this fs bio, we will recover its ->bi_private.
49 */
50 #define REQ_DM_POLL_LIST REQ_DRV
51
52 static const char *_name = DM_NAME;
53
54 static unsigned int major;
55 static unsigned int _major;
56
57 static DEFINE_IDR(_minor_idr);
58
59 static DEFINE_SPINLOCK(_minor_lock);
60
61 static void do_deferred_remove(struct work_struct *w);
62
63 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
64
65 static struct workqueue_struct *deferred_remove_workqueue;
66
67 atomic_t dm_global_event_nr = ATOMIC_INIT(0);
68 DECLARE_WAIT_QUEUE_HEAD(dm_global_eventq);
69
dm_issue_global_event(void)70 void dm_issue_global_event(void)
71 {
72 atomic_inc(&dm_global_event_nr);
73 wake_up(&dm_global_eventq);
74 }
75
76 DEFINE_STATIC_KEY_FALSE(stats_enabled);
77 DEFINE_STATIC_KEY_FALSE(swap_bios_enabled);
78 DEFINE_STATIC_KEY_FALSE(zoned_enabled);
79
80 /*
81 * One of these is allocated (on-stack) per original bio.
82 */
83 struct clone_info {
84 struct dm_table *map;
85 struct bio *bio;
86 struct dm_io *io;
87 sector_t sector;
88 unsigned int sector_count;
89 bool is_abnormal_io:1;
90 bool submit_as_polled:1;
91 };
92
clone_to_tio(struct bio * clone)93 static inline struct dm_target_io *clone_to_tio(struct bio *clone)
94 {
95 return container_of(clone, struct dm_target_io, clone);
96 }
97
dm_per_bio_data(struct bio * bio,size_t data_size)98 void *dm_per_bio_data(struct bio *bio, size_t data_size)
99 {
100 if (!dm_tio_flagged(clone_to_tio(bio), DM_TIO_INSIDE_DM_IO))
101 return (char *)bio - DM_TARGET_IO_BIO_OFFSET - data_size;
102 return (char *)bio - DM_IO_BIO_OFFSET - data_size;
103 }
104 EXPORT_SYMBOL_GPL(dm_per_bio_data);
105
dm_bio_from_per_bio_data(void * data,size_t data_size)106 struct bio *dm_bio_from_per_bio_data(void *data, size_t data_size)
107 {
108 struct dm_io *io = (struct dm_io *)((char *)data + data_size);
109
110 if (io->magic == DM_IO_MAGIC)
111 return (struct bio *)((char *)io + DM_IO_BIO_OFFSET);
112 BUG_ON(io->magic != DM_TIO_MAGIC);
113 return (struct bio *)((char *)io + DM_TARGET_IO_BIO_OFFSET);
114 }
115 EXPORT_SYMBOL_GPL(dm_bio_from_per_bio_data);
116
dm_bio_get_target_bio_nr(const struct bio * bio)117 unsigned int dm_bio_get_target_bio_nr(const struct bio *bio)
118 {
119 return container_of(bio, struct dm_target_io, clone)->target_bio_nr;
120 }
121 EXPORT_SYMBOL_GPL(dm_bio_get_target_bio_nr);
122
123 #define MINOR_ALLOCED ((void *)-1)
124
125 #define DM_NUMA_NODE NUMA_NO_NODE
126 static int dm_numa_node = DM_NUMA_NODE;
127
128 #define DEFAULT_SWAP_BIOS (8 * 1048576 / PAGE_SIZE)
129 static int swap_bios = DEFAULT_SWAP_BIOS;
get_swap_bios(void)130 static int get_swap_bios(void)
131 {
132 int latch = READ_ONCE(swap_bios);
133
134 if (unlikely(latch <= 0))
135 latch = DEFAULT_SWAP_BIOS;
136 return latch;
137 }
138
139 struct table_device {
140 struct list_head list;
141 refcount_t count;
142 struct dm_dev dm_dev;
143 };
144
145 /*
146 * Bio-based DM's mempools' reserved IOs set by the user.
147 */
148 #define RESERVED_BIO_BASED_IOS 16
149 static unsigned int reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
150
__dm_get_module_param_int(int * module_param,int min,int max)151 static int __dm_get_module_param_int(int *module_param, int min, int max)
152 {
153 int param = READ_ONCE(*module_param);
154 int modified_param = 0;
155 bool modified = true;
156
157 if (param < min)
158 modified_param = min;
159 else if (param > max)
160 modified_param = max;
161 else
162 modified = false;
163
164 if (modified) {
165 (void)cmpxchg(module_param, param, modified_param);
166 param = modified_param;
167 }
168
169 return param;
170 }
171
__dm_get_module_param(unsigned int * module_param,unsigned int def,unsigned int max)172 unsigned int __dm_get_module_param(unsigned int *module_param, unsigned int def, unsigned int max)
173 {
174 unsigned int param = READ_ONCE(*module_param);
175 unsigned int modified_param = 0;
176
177 if (!param)
178 modified_param = def;
179 else if (param > max)
180 modified_param = max;
181
182 if (modified_param) {
183 (void)cmpxchg(module_param, param, modified_param);
184 param = modified_param;
185 }
186
187 return param;
188 }
189
dm_get_reserved_bio_based_ios(void)190 unsigned int dm_get_reserved_bio_based_ios(void)
191 {
192 return __dm_get_module_param(&reserved_bio_based_ios,
193 RESERVED_BIO_BASED_IOS, DM_RESERVED_MAX_IOS);
194 }
195 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
196
dm_get_numa_node(void)197 static unsigned int dm_get_numa_node(void)
198 {
199 return __dm_get_module_param_int(&dm_numa_node,
200 DM_NUMA_NODE, num_online_nodes() - 1);
201 }
202
local_init(void)203 static int __init local_init(void)
204 {
205 int r;
206
207 r = dm_uevent_init();
208 if (r)
209 return r;
210
211 deferred_remove_workqueue = alloc_ordered_workqueue("kdmremove", 0);
212 if (!deferred_remove_workqueue) {
213 r = -ENOMEM;
214 goto out_uevent_exit;
215 }
216
217 _major = major;
218 r = register_blkdev(_major, _name);
219 if (r < 0)
220 goto out_free_workqueue;
221
222 if (!_major)
223 _major = r;
224
225 return 0;
226
227 out_free_workqueue:
228 destroy_workqueue(deferred_remove_workqueue);
229 out_uevent_exit:
230 dm_uevent_exit();
231
232 return r;
233 }
234
local_exit(void)235 static void local_exit(void)
236 {
237 destroy_workqueue(deferred_remove_workqueue);
238
239 unregister_blkdev(_major, _name);
240 dm_uevent_exit();
241
242 _major = 0;
243
244 DMINFO("cleaned up");
245 }
246
247 static int (*_inits[])(void) __initdata = {
248 local_init,
249 dm_target_init,
250 dm_linear_init,
251 dm_stripe_init,
252 dm_io_init,
253 dm_kcopyd_init,
254 dm_interface_init,
255 dm_statistics_init,
256 };
257
258 static void (*_exits[])(void) = {
259 local_exit,
260 dm_target_exit,
261 dm_linear_exit,
262 dm_stripe_exit,
263 dm_io_exit,
264 dm_kcopyd_exit,
265 dm_interface_exit,
266 dm_statistics_exit,
267 };
268
dm_init(void)269 static int __init dm_init(void)
270 {
271 const int count = ARRAY_SIZE(_inits);
272 int r, i;
273
274 #if (IS_ENABLED(CONFIG_IMA) && !IS_ENABLED(CONFIG_IMA_DISABLE_HTABLE))
275 DMINFO("CONFIG_IMA_DISABLE_HTABLE is disabled."
276 " Duplicate IMA measurements will not be recorded in the IMA log.");
277 #endif
278
279 for (i = 0; i < count; i++) {
280 r = _inits[i]();
281 if (r)
282 goto bad;
283 }
284
285 return 0;
286 bad:
287 while (i--)
288 _exits[i]();
289
290 return r;
291 }
292
dm_exit(void)293 static void __exit dm_exit(void)
294 {
295 int i = ARRAY_SIZE(_exits);
296
297 while (i--)
298 _exits[i]();
299
300 /*
301 * Should be empty by this point.
302 */
303 idr_destroy(&_minor_idr);
304 }
305
306 /*
307 * Block device functions
308 */
dm_deleting_md(struct mapped_device * md)309 int dm_deleting_md(struct mapped_device *md)
310 {
311 return test_bit(DMF_DELETING, &md->flags);
312 }
313
dm_blk_open(struct gendisk * disk,blk_mode_t mode)314 static int dm_blk_open(struct gendisk *disk, blk_mode_t mode)
315 {
316 struct mapped_device *md;
317
318 spin_lock(&_minor_lock);
319
320 md = disk->private_data;
321 if (!md)
322 goto out;
323
324 if (test_bit(DMF_FREEING, &md->flags) ||
325 dm_deleting_md(md)) {
326 md = NULL;
327 goto out;
328 }
329
330 dm_get(md);
331 atomic_inc(&md->open_count);
332 out:
333 spin_unlock(&_minor_lock);
334
335 return md ? 0 : -ENXIO;
336 }
337
dm_blk_close(struct gendisk * disk)338 static void dm_blk_close(struct gendisk *disk)
339 {
340 struct mapped_device *md;
341
342 spin_lock(&_minor_lock);
343
344 md = disk->private_data;
345 if (WARN_ON(!md))
346 goto out;
347
348 if (atomic_dec_and_test(&md->open_count) &&
349 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
350 queue_work(deferred_remove_workqueue, &deferred_remove_work);
351
352 dm_put(md);
353 out:
354 spin_unlock(&_minor_lock);
355 }
356
dm_open_count(struct mapped_device * md)357 int dm_open_count(struct mapped_device *md)
358 {
359 return atomic_read(&md->open_count);
360 }
361
362 /*
363 * Guarantees nothing is using the device before it's deleted.
364 */
dm_lock_for_deletion(struct mapped_device * md,bool mark_deferred,bool only_deferred)365 int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
366 {
367 int r = 0;
368
369 spin_lock(&_minor_lock);
370
371 if (dm_open_count(md)) {
372 r = -EBUSY;
373 if (mark_deferred)
374 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
375 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
376 r = -EEXIST;
377 else
378 set_bit(DMF_DELETING, &md->flags);
379
380 spin_unlock(&_minor_lock);
381
382 return r;
383 }
384
dm_cancel_deferred_remove(struct mapped_device * md)385 int dm_cancel_deferred_remove(struct mapped_device *md)
386 {
387 int r = 0;
388
389 spin_lock(&_minor_lock);
390
391 if (test_bit(DMF_DELETING, &md->flags))
392 r = -EBUSY;
393 else
394 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
395
396 spin_unlock(&_minor_lock);
397
398 return r;
399 }
400
do_deferred_remove(struct work_struct * w)401 static void do_deferred_remove(struct work_struct *w)
402 {
403 dm_deferred_remove();
404 }
405
dm_blk_getgeo(struct gendisk * disk,struct hd_geometry * geo)406 static int dm_blk_getgeo(struct gendisk *disk, struct hd_geometry *geo)
407 {
408 struct mapped_device *md = disk->private_data;
409
410 return dm_get_geometry(md, geo);
411 }
412
dm_prepare_ioctl(struct mapped_device * md,int * srcu_idx,struct block_device ** bdev,unsigned int cmd,unsigned long arg,bool * forward)413 static int dm_prepare_ioctl(struct mapped_device *md, int *srcu_idx,
414 struct block_device **bdev, unsigned int cmd,
415 unsigned long arg, bool *forward)
416 {
417 struct dm_target *ti;
418 struct dm_table *map;
419 int r;
420
421 retry:
422 r = -ENOTTY;
423 map = dm_get_live_table(md, srcu_idx);
424 if (!map || !dm_table_get_size(map))
425 return r;
426
427 /* We only support devices that have a single target */
428 if (map->num_targets != 1)
429 return r;
430
431 ti = dm_table_get_target(map, 0);
432 if (!ti->type->prepare_ioctl)
433 return r;
434
435 if (dm_suspended_md(md))
436 return -EAGAIN;
437
438 r = ti->type->prepare_ioctl(ti, bdev, cmd, arg, forward);
439 if (r == -ENOTCONN && *forward && !fatal_signal_pending(current)) {
440 dm_put_live_table(md, *srcu_idx);
441 fsleep(10000);
442 goto retry;
443 }
444
445 return r;
446 }
447
dm_unprepare_ioctl(struct mapped_device * md,int srcu_idx)448 static void dm_unprepare_ioctl(struct mapped_device *md, int srcu_idx)
449 {
450 dm_put_live_table(md, srcu_idx);
451 }
452
dm_blk_ioctl(struct block_device * bdev,blk_mode_t mode,unsigned int cmd,unsigned long arg)453 static int dm_blk_ioctl(struct block_device *bdev, blk_mode_t mode,
454 unsigned int cmd, unsigned long arg)
455 {
456 struct mapped_device *md = bdev->bd_disk->private_data;
457 int r, srcu_idx;
458 bool forward = true;
459
460 r = dm_prepare_ioctl(md, &srcu_idx, &bdev, cmd, arg, &forward);
461 if (!forward || r < 0)
462 goto out;
463
464 if (r > 0) {
465 /*
466 * Target determined this ioctl is being issued against a
467 * subset of the parent bdev; require extra privileges.
468 */
469 if (!capable(CAP_SYS_RAWIO)) {
470 DMDEBUG_LIMIT(
471 "%s: sending ioctl %x to DM device without required privilege.",
472 current->comm, cmd);
473 r = -ENOIOCTLCMD;
474 goto out;
475 }
476 }
477
478 if (!bdev->bd_disk->fops->ioctl)
479 r = -ENOTTY;
480 else
481 r = bdev->bd_disk->fops->ioctl(bdev, mode, cmd, arg);
482 out:
483 dm_unprepare_ioctl(md, srcu_idx);
484 return r;
485 }
486
dm_start_time_ns_from_clone(struct bio * bio)487 u64 dm_start_time_ns_from_clone(struct bio *bio)
488 {
489 return jiffies_to_nsecs(clone_to_tio(bio)->io->start_time);
490 }
491 EXPORT_SYMBOL_GPL(dm_start_time_ns_from_clone);
492
dm_io_sectors(struct dm_io * io,struct bio * bio)493 static inline unsigned int dm_io_sectors(struct dm_io *io, struct bio *bio)
494 {
495 /*
496 * If REQ_PREFLUSH set, don't account payload, it will be
497 * submitted (and accounted) after this flush completes.
498 */
499 if (io->requeue_flush_with_data)
500 return 0;
501 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
502 return io->sectors;
503 return bio_sectors(bio);
504 }
505
dm_io_acct(struct dm_io * io,bool end)506 static void dm_io_acct(struct dm_io *io, bool end)
507 {
508 struct bio *bio = io->orig_bio;
509
510 if (dm_io_flagged(io, DM_IO_BLK_STAT)) {
511 if (!end)
512 bdev_start_io_acct(bio->bi_bdev, bio_op(bio),
513 io->start_time);
514 else
515 bdev_end_io_acct(bio->bi_bdev, bio_op(bio),
516 dm_io_sectors(io, bio),
517 io->start_time);
518 }
519
520 if (static_branch_unlikely(&stats_enabled) &&
521 unlikely(dm_stats_used(&io->md->stats))) {
522 sector_t sector;
523
524 if (unlikely(dm_io_flagged(io, DM_IO_WAS_SPLIT)))
525 sector = bio_end_sector(bio) - io->sector_offset;
526 else
527 sector = bio->bi_iter.bi_sector;
528
529 dm_stats_account_io(&io->md->stats, bio_data_dir(bio),
530 sector, dm_io_sectors(io, bio),
531 end, io->start_time, &io->stats_aux);
532 }
533 }
534
__dm_start_io_acct(struct dm_io * io)535 static void __dm_start_io_acct(struct dm_io *io)
536 {
537 dm_io_acct(io, false);
538 }
539
dm_start_io_acct(struct dm_io * io,struct bio * clone)540 static void dm_start_io_acct(struct dm_io *io, struct bio *clone)
541 {
542 /*
543 * Ensure IO accounting is only ever started once.
544 */
545 if (dm_io_flagged(io, DM_IO_ACCOUNTED))
546 return;
547
548 /* Expect no possibility for race unless DM_TIO_IS_DUPLICATE_BIO. */
549 if (!clone || likely(dm_tio_is_normal(clone_to_tio(clone)))) {
550 dm_io_set_flag(io, DM_IO_ACCOUNTED);
551 } else {
552 unsigned long flags;
553 /* Can afford locking given DM_TIO_IS_DUPLICATE_BIO */
554 spin_lock_irqsave(&io->lock, flags);
555 if (dm_io_flagged(io, DM_IO_ACCOUNTED)) {
556 spin_unlock_irqrestore(&io->lock, flags);
557 return;
558 }
559 dm_io_set_flag(io, DM_IO_ACCOUNTED);
560 spin_unlock_irqrestore(&io->lock, flags);
561 }
562
563 __dm_start_io_acct(io);
564 }
565
dm_end_io_acct(struct dm_io * io)566 static void dm_end_io_acct(struct dm_io *io)
567 {
568 dm_io_acct(io, true);
569 }
570
alloc_io(struct mapped_device * md,struct bio * bio,gfp_t gfp_mask)571 static struct dm_io *alloc_io(struct mapped_device *md, struct bio *bio, gfp_t gfp_mask)
572 {
573 struct dm_io *io;
574 struct dm_target_io *tio;
575 struct bio *clone;
576
577 clone = bio_alloc_clone(NULL, bio, gfp_mask, &md->mempools->io_bs);
578 if (unlikely(!clone))
579 return NULL;
580 tio = clone_to_tio(clone);
581 tio->flags = 0;
582 dm_tio_set_flag(tio, DM_TIO_INSIDE_DM_IO);
583 tio->io = NULL;
584
585 io = container_of(tio, struct dm_io, tio);
586 io->magic = DM_IO_MAGIC;
587 io->status = BLK_STS_OK;
588 io->requeue_flush_with_data = false;
589
590 /* one ref is for submission, the other is for completion */
591 atomic_set(&io->io_count, 2);
592 this_cpu_inc(*md->pending_io);
593 io->orig_bio = bio;
594 io->md = md;
595 spin_lock_init(&io->lock);
596 io->start_time = jiffies;
597 io->flags = 0;
598 if (blk_queue_io_stat(md->queue))
599 dm_io_set_flag(io, DM_IO_BLK_STAT);
600
601 if (static_branch_unlikely(&stats_enabled) &&
602 unlikely(dm_stats_used(&md->stats)))
603 dm_stats_record_start(&md->stats, &io->stats_aux);
604
605 return io;
606 }
607
free_io(struct dm_io * io)608 static void free_io(struct dm_io *io)
609 {
610 bio_put(&io->tio.clone);
611 }
612
alloc_tio(struct clone_info * ci,struct dm_target * ti,unsigned int target_bio_nr,unsigned int * len,gfp_t gfp_mask)613 static struct bio *alloc_tio(struct clone_info *ci, struct dm_target *ti,
614 unsigned int target_bio_nr, unsigned int *len, gfp_t gfp_mask)
615 {
616 struct mapped_device *md = ci->io->md;
617 struct dm_target_io *tio;
618 struct bio *clone;
619
620 if (!ci->io->tio.io) {
621 /* the dm_target_io embedded in ci->io is available */
622 tio = &ci->io->tio;
623 /* alloc_io() already initialized embedded clone */
624 clone = &tio->clone;
625 } else {
626 clone = bio_alloc_clone(NULL, ci->bio, gfp_mask,
627 &md->mempools->bs);
628 if (!clone)
629 return NULL;
630
631 /* REQ_DM_POLL_LIST shouldn't be inherited */
632 clone->bi_opf &= ~REQ_DM_POLL_LIST;
633
634 tio = clone_to_tio(clone);
635 tio->flags = 0; /* also clears DM_TIO_INSIDE_DM_IO */
636 }
637
638 tio->magic = DM_TIO_MAGIC;
639 tio->io = ci->io;
640 tio->ti = ti;
641 tio->target_bio_nr = target_bio_nr;
642 tio->len_ptr = len;
643 tio->old_sector = 0;
644
645 /* Set default bdev, but target must bio_set_dev() before issuing IO */
646 clone->bi_bdev = md->disk->part0;
647 if (likely(ti != NULL) && unlikely(ti->needs_bio_set_dev))
648 bio_set_dev(clone, md->disk->part0);
649
650 if (len) {
651 clone->bi_iter.bi_size = to_bytes(*len);
652 if (bio_integrity(clone))
653 bio_integrity_trim(clone);
654 }
655
656 return clone;
657 }
658
free_tio(struct bio * clone)659 static void free_tio(struct bio *clone)
660 {
661 if (dm_tio_flagged(clone_to_tio(clone), DM_TIO_INSIDE_DM_IO))
662 return;
663 bio_put(clone);
664 }
665
666 /*
667 * Add the bio to the list of deferred io.
668 */
queue_io(struct mapped_device * md,struct bio * bio)669 static void queue_io(struct mapped_device *md, struct bio *bio)
670 {
671 unsigned long flags;
672
673 spin_lock_irqsave(&md->deferred_lock, flags);
674 bio_list_add(&md->deferred, bio);
675 spin_unlock_irqrestore(&md->deferred_lock, flags);
676 queue_work(md->wq, &md->work);
677 }
678
679 /*
680 * Everyone (including functions in this file), should use this
681 * function to access the md->map field, and make sure they call
682 * dm_put_live_table() when finished.
683 */
dm_get_live_table(struct mapped_device * md,int * srcu_idx)684 struct dm_table *dm_get_live_table(struct mapped_device *md,
685 int *srcu_idx) __acquires(md->io_barrier)
686 {
687 *srcu_idx = srcu_read_lock(&md->io_barrier);
688
689 return srcu_dereference(md->map, &md->io_barrier);
690 }
691
dm_put_live_table(struct mapped_device * md,int srcu_idx)692 void dm_put_live_table(struct mapped_device *md,
693 int srcu_idx) __releases(md->io_barrier)
694 {
695 srcu_read_unlock(&md->io_barrier, srcu_idx);
696 }
697
dm_sync_table(struct mapped_device * md)698 void dm_sync_table(struct mapped_device *md)
699 {
700 synchronize_srcu(&md->io_barrier);
701 synchronize_rcu_expedited();
702 }
703
704 /*
705 * A fast alternative to dm_get_live_table/dm_put_live_table.
706 * The caller must not block between these two functions.
707 */
dm_get_live_table_fast(struct mapped_device * md)708 static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
709 {
710 rcu_read_lock();
711 return rcu_dereference(md->map);
712 }
713
dm_put_live_table_fast(struct mapped_device * md)714 static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
715 {
716 rcu_read_unlock();
717 }
718
719 static char *_dm_claim_ptr = "I belong to device-mapper";
720
721 /*
722 * Open a table device so we can use it as a map destination.
723 */
open_table_device(struct mapped_device * md,dev_t dev,blk_mode_t mode)724 static struct table_device *open_table_device(struct mapped_device *md,
725 dev_t dev, blk_mode_t mode)
726 {
727 struct table_device *td;
728 struct file *bdev_file;
729 struct block_device *bdev;
730 u64 part_off;
731 int r;
732
733 td = kmalloc_node(sizeof(*td), GFP_KERNEL, md->numa_node_id);
734 if (!td)
735 return ERR_PTR(-ENOMEM);
736 refcount_set(&td->count, 1);
737
738 bdev_file = bdev_file_open_by_dev(dev, mode, _dm_claim_ptr, NULL);
739 if (IS_ERR(bdev_file)) {
740 r = PTR_ERR(bdev_file);
741 goto out_free_td;
742 }
743
744 bdev = file_bdev(bdev_file);
745
746 /*
747 * We can be called before the dm disk is added. In that case we can't
748 * register the holder relation here. It will be done once add_disk was
749 * called.
750 */
751 if (md->disk->slave_dir) {
752 r = bd_link_disk_holder(bdev, md->disk);
753 if (r)
754 goto out_blkdev_put;
755 }
756
757 td->dm_dev.mode = mode;
758 td->dm_dev.bdev = bdev;
759 td->dm_dev.bdev_file = bdev_file;
760 td->dm_dev.dax_dev = fs_dax_get_by_bdev(bdev, &part_off,
761 NULL, NULL);
762 format_dev_t(td->dm_dev.name, dev);
763 list_add(&td->list, &md->table_devices);
764 return td;
765
766 out_blkdev_put:
767 __fput_sync(bdev_file);
768 out_free_td:
769 kfree(td);
770 return ERR_PTR(r);
771 }
772
773 /*
774 * Close a table device that we've been using.
775 */
close_table_device(struct table_device * td,struct mapped_device * md)776 static void close_table_device(struct table_device *td, struct mapped_device *md)
777 {
778 if (md->disk->slave_dir)
779 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
780
781 /* Leverage async fput() if DMF_DEFERRED_REMOVE set */
782 if (unlikely(test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
783 fput(td->dm_dev.bdev_file);
784 else
785 __fput_sync(td->dm_dev.bdev_file);
786
787 put_dax(td->dm_dev.dax_dev);
788 list_del(&td->list);
789 kfree(td);
790 }
791
find_table_device(struct list_head * l,dev_t dev,blk_mode_t mode)792 static struct table_device *find_table_device(struct list_head *l, dev_t dev,
793 blk_mode_t mode)
794 {
795 struct table_device *td;
796
797 list_for_each_entry(td, l, list)
798 if (td->dm_dev.bdev->bd_dev == dev && td->dm_dev.mode == mode)
799 return td;
800
801 return NULL;
802 }
803
dm_get_table_device(struct mapped_device * md,dev_t dev,blk_mode_t mode,struct dm_dev ** result)804 int dm_get_table_device(struct mapped_device *md, dev_t dev, blk_mode_t mode,
805 struct dm_dev **result)
806 {
807 struct table_device *td;
808
809 mutex_lock(&md->table_devices_lock);
810 td = find_table_device(&md->table_devices, dev, mode);
811 if (!td) {
812 td = open_table_device(md, dev, mode);
813 if (IS_ERR(td)) {
814 mutex_unlock(&md->table_devices_lock);
815 return PTR_ERR(td);
816 }
817 } else {
818 refcount_inc(&td->count);
819 }
820 mutex_unlock(&md->table_devices_lock);
821
822 *result = &td->dm_dev;
823 return 0;
824 }
825
dm_put_table_device(struct mapped_device * md,struct dm_dev * d)826 void dm_put_table_device(struct mapped_device *md, struct dm_dev *d)
827 {
828 struct table_device *td = container_of(d, struct table_device, dm_dev);
829
830 mutex_lock(&md->table_devices_lock);
831 if (refcount_dec_and_test(&td->count))
832 close_table_device(td, md);
833 mutex_unlock(&md->table_devices_lock);
834 }
835
836 /*
837 * Get the geometry associated with a dm device
838 */
dm_get_geometry(struct mapped_device * md,struct hd_geometry * geo)839 int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
840 {
841 *geo = md->geometry;
842
843 return 0;
844 }
845
846 /*
847 * Set the geometry of a device.
848 */
dm_set_geometry(struct mapped_device * md,struct hd_geometry * geo)849 int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
850 {
851 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
852
853 if (geo->start > sz) {
854 DMERR("Start sector is beyond the geometry limits.");
855 return -EINVAL;
856 }
857
858 md->geometry = *geo;
859
860 return 0;
861 }
862
__noflush_suspending(struct mapped_device * md)863 static int __noflush_suspending(struct mapped_device *md)
864 {
865 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
866 }
867
dm_requeue_add_io(struct dm_io * io,bool first_stage)868 static void dm_requeue_add_io(struct dm_io *io, bool first_stage)
869 {
870 struct mapped_device *md = io->md;
871
872 if (first_stage) {
873 struct dm_io *next = md->requeue_list;
874
875 md->requeue_list = io;
876 io->next = next;
877 } else {
878 bio_list_add_head(&md->deferred, io->orig_bio);
879 }
880 }
881
dm_kick_requeue(struct mapped_device * md,bool first_stage)882 static void dm_kick_requeue(struct mapped_device *md, bool first_stage)
883 {
884 if (first_stage)
885 queue_work(md->wq, &md->requeue_work);
886 else
887 queue_work(md->wq, &md->work);
888 }
889
890 /*
891 * Return true if the dm_io's original bio is requeued.
892 * io->status is updated with error if requeue disallowed.
893 */
dm_handle_requeue(struct dm_io * io,bool first_stage)894 static bool dm_handle_requeue(struct dm_io *io, bool first_stage)
895 {
896 struct bio *bio = io->orig_bio;
897 bool handle_requeue = (io->status == BLK_STS_DM_REQUEUE);
898 bool handle_polled_eagain = ((io->status == BLK_STS_AGAIN) &&
899 (bio->bi_opf & REQ_POLLED));
900 struct mapped_device *md = io->md;
901 bool requeued = false;
902
903 if (handle_requeue || handle_polled_eagain) {
904 unsigned long flags;
905
906 if (bio->bi_opf & REQ_POLLED) {
907 /*
908 * Upper layer won't help us poll split bio
909 * (io->orig_bio may only reflect a subset of the
910 * pre-split original) so clear REQ_POLLED.
911 */
912 bio_clear_polled(bio);
913 }
914
915 /*
916 * Target requested pushing back the I/O or
917 * polled IO hit BLK_STS_AGAIN.
918 */
919 spin_lock_irqsave(&md->deferred_lock, flags);
920 if ((__noflush_suspending(md) &&
921 !WARN_ON_ONCE(dm_is_zone_write(md, bio))) ||
922 handle_polled_eagain || first_stage) {
923 dm_requeue_add_io(io, first_stage);
924 requeued = true;
925 } else {
926 /*
927 * noflush suspend was interrupted or this is
928 * a write to a zoned target.
929 */
930 io->status = BLK_STS_IOERR;
931 }
932 spin_unlock_irqrestore(&md->deferred_lock, flags);
933 }
934
935 if (requeued)
936 dm_kick_requeue(md, first_stage);
937
938 return requeued;
939 }
940
__dm_io_complete(struct dm_io * io,bool first_stage)941 static void __dm_io_complete(struct dm_io *io, bool first_stage)
942 {
943 struct bio *bio = io->orig_bio;
944 struct mapped_device *md = io->md;
945 blk_status_t io_error;
946 bool requeued;
947 bool requeue_flush_with_data;
948
949 requeued = dm_handle_requeue(io, first_stage);
950 if (requeued && first_stage)
951 return;
952
953 io_error = io->status;
954 if (dm_io_flagged(io, DM_IO_ACCOUNTED))
955 dm_end_io_acct(io);
956 else if (!io_error) {
957 /*
958 * Must handle target that DM_MAPIO_SUBMITTED only to
959 * then bio_endio() rather than dm_submit_bio_remap()
960 */
961 __dm_start_io_acct(io);
962 dm_end_io_acct(io);
963 }
964 requeue_flush_with_data = io->requeue_flush_with_data;
965 free_io(io);
966 smp_wmb();
967 this_cpu_dec(*md->pending_io);
968
969 /* nudge anyone waiting on suspend queue */
970 if (unlikely(wq_has_sleeper(&md->wait)))
971 wake_up(&md->wait);
972
973 /* Return early if the original bio was requeued */
974 if (requeued)
975 return;
976
977 if (unlikely(requeue_flush_with_data)) {
978 /*
979 * Preflush done for flush with data, reissue
980 * without REQ_PREFLUSH.
981 */
982 bio->bi_opf &= ~REQ_PREFLUSH;
983 queue_io(md, bio);
984 } else {
985 /* done with normal IO or empty flush */
986 if (io_error)
987 bio->bi_status = io_error;
988 bio_endio(bio);
989 }
990 }
991
dm_wq_requeue_work(struct work_struct * work)992 static void dm_wq_requeue_work(struct work_struct *work)
993 {
994 struct mapped_device *md = container_of(work, struct mapped_device,
995 requeue_work);
996 unsigned long flags;
997 struct dm_io *io;
998
999 /* reuse deferred lock to simplify dm_handle_requeue */
1000 spin_lock_irqsave(&md->deferred_lock, flags);
1001 io = md->requeue_list;
1002 md->requeue_list = NULL;
1003 spin_unlock_irqrestore(&md->deferred_lock, flags);
1004
1005 while (io) {
1006 struct dm_io *next = io->next;
1007
1008 dm_io_rewind(io, &md->disk->bio_split);
1009
1010 io->next = NULL;
1011 __dm_io_complete(io, false);
1012 io = next;
1013 cond_resched();
1014 }
1015 }
1016
1017 /*
1018 * Two staged requeue:
1019 *
1020 * 1) io->orig_bio points to the real original bio, and the part mapped to
1021 * this io must be requeued, instead of other parts of the original bio.
1022 *
1023 * 2) io->orig_bio points to new cloned bio which matches the requeued dm_io.
1024 */
dm_io_complete(struct dm_io * io)1025 static inline void dm_io_complete(struct dm_io *io)
1026 {
1027 /*
1028 * Only dm_io that has been split needs two stage requeue, otherwise
1029 * we may run into long bio clone chain during suspend and OOM could
1030 * be triggered.
1031 *
1032 * Also flush data dm_io won't be marked as DM_IO_WAS_SPLIT, so they
1033 * also aren't handled via the first stage requeue.
1034 */
1035 __dm_io_complete(io, dm_io_flagged(io, DM_IO_WAS_SPLIT));
1036 }
1037
1038 /*
1039 * Decrements the number of outstanding ios that a bio has been
1040 * cloned into, completing the original io if necc.
1041 */
__dm_io_dec_pending(struct dm_io * io)1042 static inline void __dm_io_dec_pending(struct dm_io *io)
1043 {
1044 if (atomic_dec_and_test(&io->io_count))
1045 dm_io_complete(io);
1046 }
1047
dm_io_set_error(struct dm_io * io,blk_status_t error)1048 static void dm_io_set_error(struct dm_io *io, blk_status_t error)
1049 {
1050 unsigned long flags;
1051
1052 /* Push-back supersedes any I/O errors */
1053 spin_lock_irqsave(&io->lock, flags);
1054 if (!(io->status == BLK_STS_DM_REQUEUE &&
1055 __noflush_suspending(io->md))) {
1056 io->status = error;
1057 }
1058 spin_unlock_irqrestore(&io->lock, flags);
1059 }
1060
dm_io_dec_pending(struct dm_io * io,blk_status_t error)1061 static void dm_io_dec_pending(struct dm_io *io, blk_status_t error)
1062 {
1063 if (unlikely(error))
1064 dm_io_set_error(io, error);
1065
1066 __dm_io_dec_pending(io);
1067 }
1068
1069 /*
1070 * The queue_limits are only valid as long as you have a reference
1071 * count on 'md'. But _not_ imposing verification to avoid atomic_read(),
1072 */
dm_get_queue_limits(struct mapped_device * md)1073 static inline struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
1074 {
1075 return &md->queue->limits;
1076 }
1077
swap_bios_limit(struct dm_target * ti,struct bio * bio)1078 static bool swap_bios_limit(struct dm_target *ti, struct bio *bio)
1079 {
1080 return unlikely((bio->bi_opf & REQ_SWAP) != 0) && unlikely(ti->limit_swap_bios);
1081 }
1082
clone_endio(struct bio * bio)1083 static void clone_endio(struct bio *bio)
1084 {
1085 blk_status_t error = bio->bi_status;
1086 struct dm_target_io *tio = clone_to_tio(bio);
1087 struct dm_target *ti = tio->ti;
1088 dm_endio_fn endio = likely(ti != NULL) ? ti->type->end_io : NULL;
1089 struct dm_io *io = tio->io;
1090 struct mapped_device *md = io->md;
1091
1092 if (unlikely(error == BLK_STS_TARGET)) {
1093 if (bio_op(bio) == REQ_OP_DISCARD &&
1094 !bdev_max_discard_sectors(bio->bi_bdev))
1095 blk_queue_disable_discard(md->queue);
1096 else if (bio_op(bio) == REQ_OP_WRITE_ZEROES &&
1097 !bdev_write_zeroes_sectors(bio->bi_bdev))
1098 blk_queue_disable_write_zeroes(md->queue);
1099 }
1100
1101 if (static_branch_unlikely(&zoned_enabled) &&
1102 unlikely(bdev_is_zoned(bio->bi_bdev)))
1103 dm_zone_endio(io, bio);
1104
1105 if (endio) {
1106 int r = endio(ti, bio, &error);
1107
1108 switch (r) {
1109 case DM_ENDIO_REQUEUE:
1110 if (static_branch_unlikely(&zoned_enabled)) {
1111 /*
1112 * Requeuing writes to a sequential zone of a zoned
1113 * target will break the sequential write pattern:
1114 * fail such IO.
1115 */
1116 if (WARN_ON_ONCE(dm_is_zone_write(md, bio)))
1117 error = BLK_STS_IOERR;
1118 else
1119 error = BLK_STS_DM_REQUEUE;
1120 } else
1121 error = BLK_STS_DM_REQUEUE;
1122 fallthrough;
1123 case DM_ENDIO_DONE:
1124 break;
1125 case DM_ENDIO_INCOMPLETE:
1126 /* The target will handle the io */
1127 return;
1128 default:
1129 DMCRIT("unimplemented target endio return value: %d", r);
1130 BUG();
1131 }
1132 }
1133
1134 if (static_branch_unlikely(&swap_bios_enabled) &&
1135 likely(ti != NULL) && unlikely(swap_bios_limit(ti, bio)))
1136 up(&md->swap_bios_semaphore);
1137
1138 free_tio(bio);
1139 dm_io_dec_pending(io, error);
1140 }
1141
1142 /*
1143 * Return maximum size of I/O possible at the supplied sector up to the current
1144 * target boundary.
1145 */
max_io_len_target_boundary(struct dm_target * ti,sector_t target_offset)1146 static inline sector_t max_io_len_target_boundary(struct dm_target *ti,
1147 sector_t target_offset)
1148 {
1149 return ti->len - target_offset;
1150 }
1151
__max_io_len(struct dm_target * ti,sector_t sector,unsigned int max_granularity,unsigned int max_sectors)1152 static sector_t __max_io_len(struct dm_target *ti, sector_t sector,
1153 unsigned int max_granularity,
1154 unsigned int max_sectors)
1155 {
1156 sector_t target_offset = dm_target_offset(ti, sector);
1157 sector_t len = max_io_len_target_boundary(ti, target_offset);
1158
1159 /*
1160 * Does the target need to split IO even further?
1161 * - varied (per target) IO splitting is a tenet of DM; this
1162 * explains why stacked chunk_sectors based splitting via
1163 * bio_split_to_limits() isn't possible here.
1164 */
1165 if (!max_granularity)
1166 return len;
1167 return min_t(sector_t, len,
1168 min(max_sectors ? : queue_max_sectors(ti->table->md->queue),
1169 blk_boundary_sectors_left(target_offset, max_granularity)));
1170 }
1171
max_io_len(struct dm_target * ti,sector_t sector)1172 static inline sector_t max_io_len(struct dm_target *ti, sector_t sector)
1173 {
1174 return __max_io_len(ti, sector, ti->max_io_len, 0);
1175 }
1176
dm_set_target_max_io_len(struct dm_target * ti,sector_t len)1177 int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1178 {
1179 if (len > UINT_MAX) {
1180 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1181 (unsigned long long)len, UINT_MAX);
1182 ti->error = "Maximum size of target IO is too large";
1183 return -EINVAL;
1184 }
1185
1186 ti->max_io_len = (uint32_t) len;
1187
1188 return 0;
1189 }
1190 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1191
dm_dax_get_live_target(struct mapped_device * md,sector_t sector,int * srcu_idx)1192 static struct dm_target *dm_dax_get_live_target(struct mapped_device *md,
1193 sector_t sector, int *srcu_idx)
1194 __acquires(md->io_barrier)
1195 {
1196 struct dm_table *map;
1197 struct dm_target *ti;
1198
1199 map = dm_get_live_table(md, srcu_idx);
1200 if (!map)
1201 return NULL;
1202
1203 ti = dm_table_find_target(map, sector);
1204 if (!ti)
1205 return NULL;
1206
1207 return ti;
1208 }
1209
dm_dax_direct_access(struct dax_device * dax_dev,pgoff_t pgoff,long nr_pages,enum dax_access_mode mode,void ** kaddr,unsigned long * pfn)1210 static long dm_dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff,
1211 long nr_pages, enum dax_access_mode mode, void **kaddr,
1212 unsigned long *pfn)
1213 {
1214 struct mapped_device *md = dax_get_private(dax_dev);
1215 sector_t sector = pgoff * PAGE_SECTORS;
1216 struct dm_target *ti;
1217 long len, ret = -EIO;
1218 int srcu_idx;
1219
1220 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1221
1222 if (!ti)
1223 goto out;
1224 if (!ti->type->direct_access)
1225 goto out;
1226 len = max_io_len(ti, sector) / PAGE_SECTORS;
1227 if (len < 1)
1228 goto out;
1229 nr_pages = min(len, nr_pages);
1230 ret = ti->type->direct_access(ti, pgoff, nr_pages, mode, kaddr, pfn);
1231
1232 out:
1233 dm_put_live_table(md, srcu_idx);
1234
1235 return ret;
1236 }
1237
dm_dax_zero_page_range(struct dax_device * dax_dev,pgoff_t pgoff,size_t nr_pages)1238 static int dm_dax_zero_page_range(struct dax_device *dax_dev, pgoff_t pgoff,
1239 size_t nr_pages)
1240 {
1241 struct mapped_device *md = dax_get_private(dax_dev);
1242 sector_t sector = pgoff * PAGE_SECTORS;
1243 struct dm_target *ti;
1244 int ret = -EIO;
1245 int srcu_idx;
1246
1247 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1248
1249 if (!ti)
1250 goto out;
1251 if (WARN_ON(!ti->type->dax_zero_page_range)) {
1252 /*
1253 * ->zero_page_range() is mandatory dax operation. If we are
1254 * here, something is wrong.
1255 */
1256 goto out;
1257 }
1258 ret = ti->type->dax_zero_page_range(ti, pgoff, nr_pages);
1259 out:
1260 dm_put_live_table(md, srcu_idx);
1261
1262 return ret;
1263 }
1264
dm_dax_recovery_write(struct dax_device * dax_dev,pgoff_t pgoff,void * addr,size_t bytes,struct iov_iter * i)1265 static size_t dm_dax_recovery_write(struct dax_device *dax_dev, pgoff_t pgoff,
1266 void *addr, size_t bytes, struct iov_iter *i)
1267 {
1268 struct mapped_device *md = dax_get_private(dax_dev);
1269 sector_t sector = pgoff * PAGE_SECTORS;
1270 struct dm_target *ti;
1271 int srcu_idx;
1272 long ret = 0;
1273
1274 ti = dm_dax_get_live_target(md, sector, &srcu_idx);
1275 if (!ti || !ti->type->dax_recovery_write)
1276 goto out;
1277
1278 ret = ti->type->dax_recovery_write(ti, pgoff, addr, bytes, i);
1279 out:
1280 dm_put_live_table(md, srcu_idx);
1281 return ret;
1282 }
1283
1284 /*
1285 * A target may call dm_accept_partial_bio only from the map routine. It is
1286 * allowed for all bio types except REQ_PREFLUSH, REQ_OP_ZONE_* zone management
1287 * operations, zone append writes (native with REQ_OP_ZONE_APPEND or emulated
1288 * with write BIOs flagged with BIO_EMULATES_ZONE_APPEND) and any bio serviced
1289 * by __send_duplicate_bios().
1290 *
1291 * dm_accept_partial_bio informs the dm that the target only wants to process
1292 * additional n_sectors sectors of the bio and the rest of the data should be
1293 * sent in a next bio.
1294 *
1295 * A diagram that explains the arithmetics:
1296 * +--------------------+---------------+-------+
1297 * | 1 | 2 | 3 |
1298 * +--------------------+---------------+-------+
1299 *
1300 * <-------------- *tio->len_ptr --------------->
1301 * <----- bio_sectors ----->
1302 * <-- n_sectors -->
1303 *
1304 * Region 1 was already iterated over with bio_advance or similar function.
1305 * (it may be empty if the target doesn't use bio_advance)
1306 * Region 2 is the remaining bio size that the target wants to process.
1307 * (it may be empty if region 1 is non-empty, although there is no reason
1308 * to make it empty)
1309 * The target requires that region 3 is to be sent in the next bio.
1310 *
1311 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1312 * the partially processed part (the sum of regions 1+2) must be the same for all
1313 * copies of the bio.
1314 */
dm_accept_partial_bio(struct bio * bio,unsigned int n_sectors)1315 void dm_accept_partial_bio(struct bio *bio, unsigned int n_sectors)
1316 {
1317 struct dm_target_io *tio = clone_to_tio(bio);
1318 struct dm_io *io = tio->io;
1319 unsigned int bio_sectors = bio_sectors(bio);
1320
1321 BUG_ON(dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
1322 BUG_ON(bio_sectors > *tio->len_ptr);
1323 BUG_ON(n_sectors > bio_sectors);
1324 BUG_ON(bio->bi_opf & REQ_ATOMIC);
1325
1326 if (static_branch_unlikely(&zoned_enabled) &&
1327 unlikely(bdev_is_zoned(bio->bi_bdev))) {
1328 enum req_op op = bio_op(bio);
1329
1330 BUG_ON(op_is_zone_mgmt(op));
1331 BUG_ON(op == REQ_OP_WRITE);
1332 BUG_ON(op == REQ_OP_WRITE_ZEROES);
1333 BUG_ON(op == REQ_OP_ZONE_APPEND);
1334 }
1335
1336 *tio->len_ptr -= bio_sectors - n_sectors;
1337 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1338
1339 /*
1340 * __split_and_process_bio() may have already saved mapped part
1341 * for accounting but it is being reduced so update accordingly.
1342 */
1343 dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1344 io->sectors = n_sectors;
1345 io->sector_offset = bio_sectors(io->orig_bio);
1346 }
1347 EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1348
1349 /*
1350 * @clone: clone bio that DM core passed to target's .map function
1351 * @tgt_clone: clone of @clone bio that target needs submitted
1352 *
1353 * Targets should use this interface to submit bios they take
1354 * ownership of when returning DM_MAPIO_SUBMITTED.
1355 *
1356 * Target should also enable ti->accounts_remapped_io
1357 */
dm_submit_bio_remap(struct bio * clone,struct bio * tgt_clone)1358 void dm_submit_bio_remap(struct bio *clone, struct bio *tgt_clone)
1359 {
1360 struct dm_target_io *tio = clone_to_tio(clone);
1361 struct dm_io *io = tio->io;
1362
1363 /* establish bio that will get submitted */
1364 if (!tgt_clone)
1365 tgt_clone = clone;
1366
1367 bio_clone_blkg_association(tgt_clone, io->orig_bio);
1368
1369 /*
1370 * Account io->origin_bio to DM dev on behalf of target
1371 * that took ownership of IO with DM_MAPIO_SUBMITTED.
1372 */
1373 dm_start_io_acct(io, clone);
1374
1375 trace_block_bio_remap(tgt_clone, disk_devt(io->md->disk),
1376 tio->old_sector);
1377 submit_bio_noacct(tgt_clone);
1378 }
1379 EXPORT_SYMBOL_GPL(dm_submit_bio_remap);
1380
__set_swap_bios_limit(struct mapped_device * md,int latch)1381 static noinline void __set_swap_bios_limit(struct mapped_device *md, int latch)
1382 {
1383 mutex_lock(&md->swap_bios_lock);
1384 while (latch < md->swap_bios) {
1385 cond_resched();
1386 down(&md->swap_bios_semaphore);
1387 md->swap_bios--;
1388 }
1389 while (latch > md->swap_bios) {
1390 cond_resched();
1391 up(&md->swap_bios_semaphore);
1392 md->swap_bios++;
1393 }
1394 mutex_unlock(&md->swap_bios_lock);
1395 }
1396
__map_bio(struct bio * clone)1397 static void __map_bio(struct bio *clone)
1398 {
1399 struct dm_target_io *tio = clone_to_tio(clone);
1400 struct dm_target *ti = tio->ti;
1401 struct dm_io *io = tio->io;
1402 struct mapped_device *md = io->md;
1403 int r;
1404
1405 clone->bi_end_io = clone_endio;
1406
1407 /*
1408 * Map the clone.
1409 */
1410 tio->old_sector = clone->bi_iter.bi_sector;
1411
1412 if (static_branch_unlikely(&swap_bios_enabled) &&
1413 unlikely(swap_bios_limit(ti, clone))) {
1414 int latch = get_swap_bios();
1415
1416 if (unlikely(latch != md->swap_bios))
1417 __set_swap_bios_limit(md, latch);
1418 down(&md->swap_bios_semaphore);
1419 }
1420
1421 if (likely(ti->type->map == linear_map))
1422 r = linear_map(ti, clone);
1423 else if (ti->type->map == stripe_map)
1424 r = stripe_map(ti, clone);
1425 else
1426 r = ti->type->map(ti, clone);
1427
1428 switch (r) {
1429 case DM_MAPIO_SUBMITTED:
1430 /* target has assumed ownership of this io */
1431 if (!ti->accounts_remapped_io)
1432 dm_start_io_acct(io, clone);
1433 break;
1434 case DM_MAPIO_REMAPPED:
1435 dm_submit_bio_remap(clone, NULL);
1436 break;
1437 case DM_MAPIO_KILL:
1438 case DM_MAPIO_REQUEUE:
1439 if (static_branch_unlikely(&swap_bios_enabled) &&
1440 unlikely(swap_bios_limit(ti, clone)))
1441 up(&md->swap_bios_semaphore);
1442 free_tio(clone);
1443 if (r == DM_MAPIO_KILL)
1444 dm_io_dec_pending(io, BLK_STS_IOERR);
1445 else
1446 dm_io_dec_pending(io, BLK_STS_DM_REQUEUE);
1447 break;
1448 default:
1449 DMCRIT("unimplemented target map return value: %d", r);
1450 BUG();
1451 }
1452 }
1453
setup_split_accounting(struct clone_info * ci,unsigned int len)1454 static void setup_split_accounting(struct clone_info *ci, unsigned int len)
1455 {
1456 struct dm_io *io = ci->io;
1457
1458 if (ci->sector_count > len) {
1459 /*
1460 * Split needed, save the mapped part for accounting.
1461 * NOTE: dm_accept_partial_bio() will update accordingly.
1462 */
1463 dm_io_set_flag(io, DM_IO_WAS_SPLIT);
1464 io->sectors = len;
1465 io->sector_offset = bio_sectors(ci->bio);
1466 }
1467 }
1468
alloc_multiple_bios(struct bio_list * blist,struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned * len)1469 static void alloc_multiple_bios(struct bio_list *blist, struct clone_info *ci,
1470 struct dm_target *ti, unsigned int num_bios,
1471 unsigned *len)
1472 {
1473 struct bio *bio;
1474 int try;
1475
1476 for (try = 0; try < 2; try++) {
1477 int bio_nr;
1478
1479 if (try && num_bios > 1)
1480 mutex_lock(&ci->io->md->table_devices_lock);
1481 for (bio_nr = 0; bio_nr < num_bios; bio_nr++) {
1482 bio = alloc_tio(ci, ti, bio_nr, len,
1483 try ? GFP_NOIO : GFP_NOWAIT);
1484 if (!bio)
1485 break;
1486
1487 bio_list_add(blist, bio);
1488 }
1489 if (try && num_bios > 1)
1490 mutex_unlock(&ci->io->md->table_devices_lock);
1491 if (bio_nr == num_bios)
1492 return;
1493
1494 while ((bio = bio_list_pop(blist)))
1495 free_tio(bio);
1496 }
1497 }
1498
__send_duplicate_bios(struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned int * len)1499 static unsigned int __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1500 unsigned int num_bios, unsigned int *len)
1501 {
1502 struct bio_list blist = BIO_EMPTY_LIST;
1503 struct bio *clone;
1504 unsigned int ret = 0;
1505
1506 if (WARN_ON_ONCE(num_bios == 0)) /* num_bios = 0 is a bug in caller */
1507 return 0;
1508
1509 /* dm_accept_partial_bio() is not supported with shared tio->len_ptr */
1510 if (len)
1511 setup_split_accounting(ci, *len);
1512
1513 /*
1514 * Using alloc_multiple_bios(), even if num_bios is 1, to consistently
1515 * support allocating using GFP_NOWAIT with GFP_NOIO fallback.
1516 */
1517 alloc_multiple_bios(&blist, ci, ti, num_bios, len);
1518 while ((clone = bio_list_pop(&blist))) {
1519 if (num_bios > 1)
1520 dm_tio_set_flag(clone_to_tio(clone), DM_TIO_IS_DUPLICATE_BIO);
1521 __map_bio(clone);
1522 ret += 1;
1523 }
1524
1525 return ret;
1526 }
1527
__send_empty_flush(struct clone_info * ci)1528 static void __send_empty_flush(struct clone_info *ci)
1529 {
1530 struct dm_table *t = ci->map;
1531 struct bio flush_bio;
1532 blk_opf_t opf = REQ_OP_WRITE | REQ_PREFLUSH | REQ_SYNC;
1533
1534 if ((ci->io->orig_bio->bi_opf & (REQ_IDLE | REQ_SYNC)) ==
1535 (REQ_IDLE | REQ_SYNC))
1536 opf |= REQ_IDLE;
1537
1538 /*
1539 * Use an on-stack bio for this, it's safe since we don't
1540 * need to reference it after submit. It's just used as
1541 * the basis for the clone(s).
1542 */
1543 bio_init(&flush_bio, ci->io->md->disk->part0, NULL, 0, opf);
1544
1545 ci->bio = &flush_bio;
1546 ci->sector_count = 0;
1547 ci->io->tio.clone.bi_iter.bi_size = 0;
1548
1549 if (!t->flush_bypasses_map) {
1550 for (unsigned int i = 0; i < t->num_targets; i++) {
1551 unsigned int bios;
1552 struct dm_target *ti = dm_table_get_target(t, i);
1553
1554 if (unlikely(ti->num_flush_bios == 0))
1555 continue;
1556
1557 atomic_add(ti->num_flush_bios, &ci->io->io_count);
1558 bios = __send_duplicate_bios(ci, ti, ti->num_flush_bios,
1559 NULL);
1560 atomic_sub(ti->num_flush_bios - bios, &ci->io->io_count);
1561 }
1562 } else {
1563 /*
1564 * Note that there's no need to grab t->devices_lock here
1565 * because the targets that support flush optimization don't
1566 * modify the list of devices.
1567 */
1568 struct list_head *devices = dm_table_get_devices(t);
1569 unsigned int len = 0;
1570 struct dm_dev_internal *dd;
1571 list_for_each_entry(dd, devices, list) {
1572 struct bio *clone;
1573 /*
1574 * Note that the structure dm_target_io is not
1575 * associated with any target (because the device may be
1576 * used by multiple targets), so we set tio->ti = NULL.
1577 * We must check for NULL in the I/O processing path, to
1578 * avoid NULL pointer dereference.
1579 */
1580 clone = alloc_tio(ci, NULL, 0, &len, GFP_NOIO);
1581 atomic_add(1, &ci->io->io_count);
1582 bio_set_dev(clone, dd->dm_dev->bdev);
1583 clone->bi_end_io = clone_endio;
1584 dm_submit_bio_remap(clone, NULL);
1585 }
1586 }
1587
1588 /*
1589 * alloc_io() takes one extra reference for submission, so the
1590 * reference won't reach 0 without the following subtraction
1591 */
1592 atomic_sub(1, &ci->io->io_count);
1593
1594 bio_uninit(ci->bio);
1595 }
1596
__send_abnormal_io(struct clone_info * ci,struct dm_target * ti,unsigned int num_bios,unsigned int max_granularity,unsigned int max_sectors)1597 static void __send_abnormal_io(struct clone_info *ci, struct dm_target *ti,
1598 unsigned int num_bios, unsigned int max_granularity,
1599 unsigned int max_sectors)
1600 {
1601 unsigned int len, bios;
1602
1603 len = min_t(sector_t, ci->sector_count,
1604 __max_io_len(ti, ci->sector, max_granularity, max_sectors));
1605
1606 atomic_add(num_bios, &ci->io->io_count);
1607 bios = __send_duplicate_bios(ci, ti, num_bios, &len);
1608 /*
1609 * alloc_io() takes one extra reference for submission, so the
1610 * reference won't reach 0 without the following (+1) subtraction
1611 */
1612 atomic_sub(num_bios - bios + 1, &ci->io->io_count);
1613
1614 ci->sector += len;
1615 ci->sector_count -= len;
1616 }
1617
is_abnormal_io(struct bio * bio)1618 static bool is_abnormal_io(struct bio *bio)
1619 {
1620 switch (bio_op(bio)) {
1621 case REQ_OP_READ:
1622 case REQ_OP_WRITE:
1623 case REQ_OP_FLUSH:
1624 return false;
1625 case REQ_OP_DISCARD:
1626 case REQ_OP_SECURE_ERASE:
1627 case REQ_OP_WRITE_ZEROES:
1628 case REQ_OP_ZONE_RESET_ALL:
1629 return true;
1630 default:
1631 return false;
1632 }
1633 }
1634
__process_abnormal_io(struct clone_info * ci,struct dm_target * ti)1635 static blk_status_t __process_abnormal_io(struct clone_info *ci,
1636 struct dm_target *ti)
1637 {
1638 unsigned int num_bios = 0;
1639 unsigned int max_granularity = 0;
1640 unsigned int max_sectors = 0;
1641 struct queue_limits *limits = dm_get_queue_limits(ti->table->md);
1642
1643 switch (bio_op(ci->bio)) {
1644 case REQ_OP_DISCARD:
1645 num_bios = ti->num_discard_bios;
1646 max_sectors = limits->max_discard_sectors;
1647 if (ti->max_discard_granularity)
1648 max_granularity = max_sectors;
1649 break;
1650 case REQ_OP_SECURE_ERASE:
1651 num_bios = ti->num_secure_erase_bios;
1652 max_sectors = limits->max_secure_erase_sectors;
1653 break;
1654 case REQ_OP_WRITE_ZEROES:
1655 num_bios = ti->num_write_zeroes_bios;
1656 max_sectors = limits->max_write_zeroes_sectors;
1657 break;
1658 default:
1659 break;
1660 }
1661
1662 /*
1663 * Even though the device advertised support for this type of
1664 * request, that does not mean every target supports it, and
1665 * reconfiguration might also have changed that since the
1666 * check was performed.
1667 */
1668 if (unlikely(!num_bios))
1669 return BLK_STS_NOTSUPP;
1670
1671 __send_abnormal_io(ci, ti, num_bios, max_granularity, max_sectors);
1672
1673 return BLK_STS_OK;
1674 }
1675
1676 /*
1677 * Reuse ->bi_private as dm_io list head for storing all dm_io instances
1678 * associated with this bio, and this bio's bi_private needs to be
1679 * stored in dm_io->data before the reuse.
1680 *
1681 * bio->bi_private is owned by fs or upper layer, so block layer won't
1682 * touch it after splitting. Meantime it won't be changed by anyone after
1683 * bio is submitted. So this reuse is safe.
1684 */
dm_poll_list_head(struct bio * bio)1685 static inline struct dm_io **dm_poll_list_head(struct bio *bio)
1686 {
1687 return (struct dm_io **)&bio->bi_private;
1688 }
1689
dm_queue_poll_io(struct bio * bio,struct dm_io * io)1690 static void dm_queue_poll_io(struct bio *bio, struct dm_io *io)
1691 {
1692 struct dm_io **head = dm_poll_list_head(bio);
1693
1694 if (!(bio->bi_opf & REQ_DM_POLL_LIST)) {
1695 bio->bi_opf |= REQ_DM_POLL_LIST;
1696 /*
1697 * Save .bi_private into dm_io, so that we can reuse
1698 * .bi_private as dm_io list head for storing dm_io list
1699 */
1700 io->data = bio->bi_private;
1701
1702 /* tell block layer to poll for completion */
1703 bio->bi_cookie = ~BLK_QC_T_NONE;
1704
1705 io->next = NULL;
1706 } else {
1707 /*
1708 * bio recursed due to split, reuse original poll list,
1709 * and save bio->bi_private too.
1710 */
1711 io->data = (*head)->data;
1712 io->next = *head;
1713 }
1714
1715 *head = io;
1716 }
1717
1718 /*
1719 * Select the correct strategy for processing a non-flush bio.
1720 */
__split_and_process_bio(struct clone_info * ci)1721 static blk_status_t __split_and_process_bio(struct clone_info *ci)
1722 {
1723 struct bio *clone;
1724 struct dm_target *ti;
1725 unsigned int len;
1726
1727 ti = dm_table_find_target(ci->map, ci->sector);
1728 if (unlikely(!ti))
1729 return BLK_STS_IOERR;
1730
1731 if (unlikely(ci->is_abnormal_io))
1732 return __process_abnormal_io(ci, ti);
1733
1734 /*
1735 * Only support bio polling for normal IO, and the target io is
1736 * exactly inside the dm_io instance (verified in dm_poll_dm_io)
1737 */
1738 ci->submit_as_polled = !!(ci->bio->bi_opf & REQ_POLLED);
1739
1740 len = min_t(sector_t, max_io_len(ti, ci->sector), ci->sector_count);
1741 if (ci->bio->bi_opf & REQ_ATOMIC) {
1742 if (unlikely(!dm_target_supports_atomic_writes(ti->type)))
1743 return BLK_STS_IOERR;
1744 if (unlikely(len != ci->sector_count))
1745 return BLK_STS_IOERR;
1746 }
1747
1748 setup_split_accounting(ci, len);
1749
1750 if (unlikely(ci->bio->bi_opf & REQ_NOWAIT)) {
1751 if (unlikely(!dm_target_supports_nowait(ti->type)))
1752 return BLK_STS_NOTSUPP;
1753
1754 clone = alloc_tio(ci, ti, 0, &len, GFP_NOWAIT);
1755 if (unlikely(!clone))
1756 return BLK_STS_AGAIN;
1757 } else {
1758 clone = alloc_tio(ci, ti, 0, &len, GFP_NOIO);
1759 }
1760 __map_bio(clone);
1761
1762 ci->sector += len;
1763 ci->sector_count -= len;
1764
1765 return BLK_STS_OK;
1766 }
1767
init_clone_info(struct clone_info * ci,struct dm_io * io,struct dm_table * map,struct bio * bio,bool is_abnormal)1768 static void init_clone_info(struct clone_info *ci, struct dm_io *io,
1769 struct dm_table *map, struct bio *bio, bool is_abnormal)
1770 {
1771 ci->map = map;
1772 ci->io = io;
1773 ci->bio = bio;
1774 ci->is_abnormal_io = is_abnormal;
1775 ci->submit_as_polled = false;
1776 ci->sector = bio->bi_iter.bi_sector;
1777 ci->sector_count = bio_sectors(bio);
1778
1779 /* Shouldn't happen but sector_count was being set to 0 so... */
1780 if (static_branch_unlikely(&zoned_enabled) &&
1781 WARN_ON_ONCE(op_is_zone_mgmt(bio_op(bio)) && ci->sector_count))
1782 ci->sector_count = 0;
1783 }
1784
1785 #ifdef CONFIG_BLK_DEV_ZONED
dm_zone_bio_needs_split(struct bio * bio)1786 static inline bool dm_zone_bio_needs_split(struct bio *bio)
1787 {
1788 /*
1789 * Special case the zone operations that cannot or should not be split.
1790 */
1791 switch (bio_op(bio)) {
1792 case REQ_OP_ZONE_APPEND:
1793 case REQ_OP_ZONE_FINISH:
1794 case REQ_OP_ZONE_RESET:
1795 case REQ_OP_ZONE_RESET_ALL:
1796 return false;
1797 default:
1798 break;
1799 }
1800
1801 /*
1802 * When mapped devices use the block layer zone write plugging, we must
1803 * split any large BIO to the mapped device limits to not submit BIOs
1804 * that span zone boundaries and to avoid potential deadlocks with
1805 * queue freeze operations.
1806 */
1807 return bio_needs_zone_write_plugging(bio) || bio_straddles_zones(bio);
1808 }
1809
dm_zone_plug_bio(struct mapped_device * md,struct bio * bio)1810 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
1811 {
1812 if (!bio_needs_zone_write_plugging(bio))
1813 return false;
1814 return blk_zone_plug_bio(bio, 0);
1815 }
1816
__send_zone_reset_all_emulated(struct clone_info * ci,struct dm_target * ti)1817 static blk_status_t __send_zone_reset_all_emulated(struct clone_info *ci,
1818 struct dm_target *ti)
1819 {
1820 struct bio_list blist = BIO_EMPTY_LIST;
1821 struct mapped_device *md = ci->io->md;
1822 unsigned int zone_sectors = md->disk->queue->limits.chunk_sectors;
1823 unsigned long *need_reset;
1824 unsigned int i, nr_zones, nr_reset;
1825 unsigned int num_bios = 0;
1826 blk_status_t sts = BLK_STS_OK;
1827 sector_t sector = ti->begin;
1828 struct bio *clone;
1829 int ret;
1830
1831 nr_zones = ti->len >> ilog2(zone_sectors);
1832 need_reset = bitmap_zalloc(nr_zones, GFP_NOIO);
1833 if (!need_reset)
1834 return BLK_STS_RESOURCE;
1835
1836 ret = dm_zone_get_reset_bitmap(md, ci->map, ti->begin,
1837 nr_zones, need_reset);
1838 if (ret) {
1839 sts = BLK_STS_IOERR;
1840 goto free_bitmap;
1841 }
1842
1843 /* If we have no zone to reset, we are done. */
1844 nr_reset = bitmap_weight(need_reset, nr_zones);
1845 if (!nr_reset)
1846 goto free_bitmap;
1847
1848 atomic_add(nr_zones, &ci->io->io_count);
1849
1850 for (i = 0; i < nr_zones; i++) {
1851
1852 if (!test_bit(i, need_reset)) {
1853 sector += zone_sectors;
1854 continue;
1855 }
1856
1857 if (bio_list_empty(&blist)) {
1858 /* This may take a while, so be nice to others */
1859 if (num_bios)
1860 cond_resched();
1861
1862 /*
1863 * We may need to reset thousands of zones, so let's
1864 * not go crazy with the clone allocation.
1865 */
1866 alloc_multiple_bios(&blist, ci, ti, min(nr_reset, 32),
1867 NULL);
1868 }
1869
1870 /* Get a clone and change it to a regular reset operation. */
1871 clone = bio_list_pop(&blist);
1872 clone->bi_opf &= ~REQ_OP_MASK;
1873 clone->bi_opf |= REQ_OP_ZONE_RESET | REQ_SYNC;
1874 clone->bi_iter.bi_sector = sector;
1875 clone->bi_iter.bi_size = 0;
1876 __map_bio(clone);
1877
1878 sector += zone_sectors;
1879 num_bios++;
1880 nr_reset--;
1881 }
1882
1883 WARN_ON_ONCE(!bio_list_empty(&blist));
1884 atomic_sub(nr_zones - num_bios, &ci->io->io_count);
1885 ci->sector_count = 0;
1886
1887 free_bitmap:
1888 bitmap_free(need_reset);
1889
1890 return sts;
1891 }
1892
__send_zone_reset_all_native(struct clone_info * ci,struct dm_target * ti)1893 static void __send_zone_reset_all_native(struct clone_info *ci,
1894 struct dm_target *ti)
1895 {
1896 unsigned int bios;
1897
1898 atomic_add(1, &ci->io->io_count);
1899 bios = __send_duplicate_bios(ci, ti, 1, NULL);
1900 atomic_sub(1 - bios, &ci->io->io_count);
1901
1902 ci->sector_count = 0;
1903 }
1904
__send_zone_reset_all(struct clone_info * ci)1905 static blk_status_t __send_zone_reset_all(struct clone_info *ci)
1906 {
1907 struct dm_table *t = ci->map;
1908 blk_status_t sts = BLK_STS_OK;
1909
1910 for (unsigned int i = 0; i < t->num_targets; i++) {
1911 struct dm_target *ti = dm_table_get_target(t, i);
1912
1913 if (ti->zone_reset_all_supported) {
1914 __send_zone_reset_all_native(ci, ti);
1915 continue;
1916 }
1917
1918 sts = __send_zone_reset_all_emulated(ci, ti);
1919 if (sts != BLK_STS_OK)
1920 break;
1921 }
1922
1923 /* Release the reference that alloc_io() took for submission. */
1924 atomic_sub(1, &ci->io->io_count);
1925
1926 return sts;
1927 }
1928
1929 #else
dm_zone_bio_needs_split(struct bio * bio)1930 static inline bool dm_zone_bio_needs_split(struct bio *bio)
1931 {
1932 return false;
1933 }
dm_zone_plug_bio(struct mapped_device * md,struct bio * bio)1934 static inline bool dm_zone_plug_bio(struct mapped_device *md, struct bio *bio)
1935 {
1936 return false;
1937 }
__send_zone_reset_all(struct clone_info * ci)1938 static blk_status_t __send_zone_reset_all(struct clone_info *ci)
1939 {
1940 return BLK_STS_NOTSUPP;
1941 }
1942 #endif
1943
1944 /*
1945 * Entry point to split a bio into clones and submit them to the targets.
1946 */
dm_split_and_process_bio(struct mapped_device * md,struct dm_table * map,struct bio * bio)1947 static void dm_split_and_process_bio(struct mapped_device *md,
1948 struct dm_table *map, struct bio *bio)
1949 {
1950 struct clone_info ci;
1951 struct dm_io *io;
1952 blk_status_t error = BLK_STS_OK;
1953 bool is_abnormal, need_split;
1954
1955 is_abnormal = is_abnormal_io(bio);
1956 if (static_branch_unlikely(&zoned_enabled)) {
1957 need_split = is_abnormal || dm_zone_bio_needs_split(bio);
1958 } else {
1959 need_split = is_abnormal;
1960 }
1961
1962 if (unlikely(need_split)) {
1963 /*
1964 * Use bio_split_to_limits() for abnormal IO (e.g. discard, etc)
1965 * otherwise associated queue_limits won't be imposed.
1966 * Also split the BIO for mapped devices needing zone append
1967 * emulation to ensure that the BIO does not cross zone
1968 * boundaries.
1969 */
1970 bio = bio_split_to_limits(bio);
1971 if (!bio)
1972 return;
1973 }
1974
1975 /*
1976 * Use the block layer zone write plugging for mapped devices that
1977 * need zone append emulation (e.g. dm-crypt).
1978 */
1979 if (static_branch_unlikely(&zoned_enabled) && dm_zone_plug_bio(md, bio))
1980 return;
1981
1982 /* Only support nowait for normal IO */
1983 if (unlikely(bio->bi_opf & REQ_NOWAIT) && !is_abnormal) {
1984 /*
1985 * Don't support NOWAIT for FLUSH because it may allocate
1986 * multiple bios and there's no easy way how to undo the
1987 * allocations.
1988 */
1989 if (bio->bi_opf & REQ_PREFLUSH) {
1990 bio_wouldblock_error(bio);
1991 return;
1992 }
1993 io = alloc_io(md, bio, GFP_NOWAIT);
1994 if (unlikely(!io)) {
1995 /* Unable to do anything without dm_io. */
1996 bio_wouldblock_error(bio);
1997 return;
1998 }
1999 } else {
2000 io = alloc_io(md, bio, GFP_NOIO);
2001 }
2002 init_clone_info(&ci, io, map, bio, is_abnormal);
2003
2004 if (unlikely((bio->bi_opf & REQ_PREFLUSH) != 0)) {
2005 /*
2006 * The "flush_bypasses_map" is set on targets where it is safe
2007 * to skip the map function and submit bios directly to the
2008 * underlying block devices - currently, it is set for dm-linear
2009 * and dm-stripe.
2010 *
2011 * If we have just one underlying device (i.e. there is one
2012 * linear target or multiple linear targets pointing to the same
2013 * device), we can send the flush with data directly to it.
2014 */
2015 if (bio->bi_iter.bi_size && map->flush_bypasses_map) {
2016 struct list_head *devices = dm_table_get_devices(map);
2017 if (devices->next == devices->prev)
2018 goto send_preflush_with_data;
2019 }
2020 if (bio->bi_iter.bi_size)
2021 io->requeue_flush_with_data = true;
2022 __send_empty_flush(&ci);
2023 /* dm_io_complete submits any data associated with flush */
2024 goto out;
2025 }
2026
2027 send_preflush_with_data:
2028 if (static_branch_unlikely(&zoned_enabled) &&
2029 (bio_op(bio) == REQ_OP_ZONE_RESET_ALL)) {
2030 error = __send_zone_reset_all(&ci);
2031 goto out;
2032 }
2033
2034 error = __split_and_process_bio(&ci);
2035 if (error || !ci.sector_count)
2036 goto out;
2037 /*
2038 * Remainder must be passed to submit_bio_noacct() so it gets handled
2039 * *after* bios already submitted have been completely processed.
2040 */
2041 bio_trim(bio, io->sectors, ci.sector_count);
2042 trace_block_split(bio, bio->bi_iter.bi_sector);
2043 bio_inc_remaining(bio);
2044 submit_bio_noacct(bio);
2045 out:
2046 /*
2047 * Drop the extra reference count for non-POLLED bio, and hold one
2048 * reference for POLLED bio, which will be released in dm_poll_bio
2049 *
2050 * Add every dm_io instance into the dm_io list head which is stored
2051 * in bio->bi_private, so that dm_poll_bio can poll them all.
2052 */
2053 if (error || !ci.submit_as_polled) {
2054 /*
2055 * In case of submission failure, the extra reference for
2056 * submitting io isn't consumed yet
2057 */
2058 if (error)
2059 atomic_dec(&io->io_count);
2060 dm_io_dec_pending(io, error);
2061 } else
2062 dm_queue_poll_io(bio, io);
2063 }
2064
dm_submit_bio(struct bio * bio)2065 static void dm_submit_bio(struct bio *bio)
2066 {
2067 struct mapped_device *md = bio->bi_bdev->bd_disk->private_data;
2068 int srcu_idx;
2069 struct dm_table *map;
2070
2071 map = dm_get_live_table(md, &srcu_idx);
2072 if (unlikely(!map)) {
2073 DMERR_LIMIT("%s: mapping table unavailable, erroring io",
2074 dm_device_name(md));
2075 bio_io_error(bio);
2076 goto out;
2077 }
2078
2079 /* If suspended, queue this IO for later */
2080 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
2081 if (bio->bi_opf & REQ_NOWAIT)
2082 bio_wouldblock_error(bio);
2083 else if (bio->bi_opf & REQ_RAHEAD)
2084 bio_io_error(bio);
2085 else
2086 queue_io(md, bio);
2087 goto out;
2088 }
2089
2090 dm_split_and_process_bio(md, map, bio);
2091 out:
2092 dm_put_live_table(md, srcu_idx);
2093 }
2094
dm_poll_dm_io(struct dm_io * io,struct io_comp_batch * iob,unsigned int flags)2095 static bool dm_poll_dm_io(struct dm_io *io, struct io_comp_batch *iob,
2096 unsigned int flags)
2097 {
2098 WARN_ON_ONCE(!dm_tio_is_normal(&io->tio));
2099
2100 /* don't poll if the mapped io is done */
2101 if (atomic_read(&io->io_count) > 1)
2102 bio_poll(&io->tio.clone, iob, flags);
2103
2104 /* bio_poll holds the last reference */
2105 return atomic_read(&io->io_count) == 1;
2106 }
2107
dm_poll_bio(struct bio * bio,struct io_comp_batch * iob,unsigned int flags)2108 static int dm_poll_bio(struct bio *bio, struct io_comp_batch *iob,
2109 unsigned int flags)
2110 {
2111 struct dm_io **head = dm_poll_list_head(bio);
2112 struct dm_io *list = *head;
2113 struct dm_io *tmp = NULL;
2114 struct dm_io *curr, *next;
2115
2116 /* Only poll normal bio which was marked as REQ_DM_POLL_LIST */
2117 if (!(bio->bi_opf & REQ_DM_POLL_LIST))
2118 return 0;
2119
2120 WARN_ON_ONCE(!list);
2121
2122 /*
2123 * Restore .bi_private before possibly completing dm_io.
2124 *
2125 * bio_poll() is only possible once @bio has been completely
2126 * submitted via submit_bio_noacct()'s depth-first submission.
2127 * So there is no dm_queue_poll_io() race associated with
2128 * clearing REQ_DM_POLL_LIST here.
2129 */
2130 bio->bi_opf &= ~REQ_DM_POLL_LIST;
2131 bio->bi_private = list->data;
2132
2133 for (curr = list, next = curr->next; curr; curr = next, next =
2134 curr ? curr->next : NULL) {
2135 if (dm_poll_dm_io(curr, iob, flags)) {
2136 /*
2137 * clone_endio() has already occurred, so no
2138 * error handling is needed here.
2139 */
2140 __dm_io_dec_pending(curr);
2141 } else {
2142 curr->next = tmp;
2143 tmp = curr;
2144 }
2145 }
2146
2147 /* Not done? */
2148 if (tmp) {
2149 bio->bi_opf |= REQ_DM_POLL_LIST;
2150 /* Reset bio->bi_private to dm_io list head */
2151 *head = tmp;
2152 return 0;
2153 }
2154 return 1;
2155 }
2156
2157 /*
2158 *---------------------------------------------------------------
2159 * An IDR is used to keep track of allocated minor numbers.
2160 *---------------------------------------------------------------
2161 */
free_minor(int minor)2162 static void free_minor(int minor)
2163 {
2164 spin_lock(&_minor_lock);
2165 idr_remove(&_minor_idr, minor);
2166 spin_unlock(&_minor_lock);
2167 }
2168
2169 /*
2170 * See if the device with a specific minor # is free.
2171 */
specific_minor(int minor)2172 static int specific_minor(int minor)
2173 {
2174 int r;
2175
2176 if (minor >= (1 << MINORBITS))
2177 return -EINVAL;
2178
2179 idr_preload(GFP_KERNEL);
2180 spin_lock(&_minor_lock);
2181
2182 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
2183
2184 spin_unlock(&_minor_lock);
2185 idr_preload_end();
2186 if (r < 0)
2187 return r == -ENOSPC ? -EBUSY : r;
2188 return 0;
2189 }
2190
next_free_minor(int * minor)2191 static int next_free_minor(int *minor)
2192 {
2193 int r;
2194
2195 idr_preload(GFP_KERNEL);
2196 spin_lock(&_minor_lock);
2197
2198 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
2199
2200 spin_unlock(&_minor_lock);
2201 idr_preload_end();
2202 if (r < 0)
2203 return r;
2204 *minor = r;
2205 return 0;
2206 }
2207
2208 static const struct block_device_operations dm_blk_dops;
2209 static const struct block_device_operations dm_rq_blk_dops;
2210 static const struct dax_operations dm_dax_ops;
2211
2212 static void dm_wq_work(struct work_struct *work);
2213
2214 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
dm_queue_destroy_crypto_profile(struct request_queue * q)2215 static void dm_queue_destroy_crypto_profile(struct request_queue *q)
2216 {
2217 dm_destroy_crypto_profile(q->crypto_profile);
2218 }
2219
2220 #else /* CONFIG_BLK_INLINE_ENCRYPTION */
2221
dm_queue_destroy_crypto_profile(struct request_queue * q)2222 static inline void dm_queue_destroy_crypto_profile(struct request_queue *q)
2223 {
2224 }
2225 #endif /* !CONFIG_BLK_INLINE_ENCRYPTION */
2226
cleanup_mapped_device(struct mapped_device * md)2227 static void cleanup_mapped_device(struct mapped_device *md)
2228 {
2229 if (md->wq)
2230 destroy_workqueue(md->wq);
2231 dm_free_md_mempools(md->mempools);
2232
2233 if (md->dax_dev) {
2234 dax_remove_host(md->disk);
2235 kill_dax(md->dax_dev);
2236 put_dax(md->dax_dev);
2237 md->dax_dev = NULL;
2238 }
2239
2240 if (md->disk) {
2241 spin_lock(&_minor_lock);
2242 md->disk->private_data = NULL;
2243 spin_unlock(&_minor_lock);
2244 if (dm_get_md_type(md) != DM_TYPE_NONE) {
2245 struct table_device *td;
2246
2247 dm_sysfs_exit(md);
2248 list_for_each_entry(td, &md->table_devices, list) {
2249 bd_unlink_disk_holder(td->dm_dev.bdev,
2250 md->disk);
2251 }
2252
2253 /*
2254 * Hold lock to make sure del_gendisk() won't concurrent
2255 * with open/close_table_device().
2256 */
2257 mutex_lock(&md->table_devices_lock);
2258 del_gendisk(md->disk);
2259 mutex_unlock(&md->table_devices_lock);
2260 }
2261 dm_queue_destroy_crypto_profile(md->queue);
2262 put_disk(md->disk);
2263 }
2264
2265 if (md->pending_io) {
2266 free_percpu(md->pending_io);
2267 md->pending_io = NULL;
2268 }
2269
2270 cleanup_srcu_struct(&md->io_barrier);
2271
2272 mutex_destroy(&md->suspend_lock);
2273 mutex_destroy(&md->type_lock);
2274 mutex_destroy(&md->table_devices_lock);
2275 mutex_destroy(&md->swap_bios_lock);
2276
2277 dm_mq_cleanup_mapped_device(md);
2278 }
2279
2280 /*
2281 * Allocate and initialise a blank device with a given minor.
2282 */
alloc_dev(int minor)2283 static struct mapped_device *alloc_dev(int minor)
2284 {
2285 int r, numa_node_id = dm_get_numa_node();
2286 struct dax_device *dax_dev;
2287 struct mapped_device *md;
2288 void *old_md;
2289
2290 md = kvzalloc_node(sizeof(*md), GFP_KERNEL, numa_node_id);
2291 if (!md) {
2292 DMERR("unable to allocate device, out of memory.");
2293 return NULL;
2294 }
2295
2296 if (!try_module_get(THIS_MODULE))
2297 goto bad_module_get;
2298
2299 /* get a minor number for the dev */
2300 if (minor == DM_ANY_MINOR)
2301 r = next_free_minor(&minor);
2302 else
2303 r = specific_minor(minor);
2304 if (r < 0)
2305 goto bad_minor;
2306
2307 r = init_srcu_struct(&md->io_barrier);
2308 if (r < 0)
2309 goto bad_io_barrier;
2310
2311 md->numa_node_id = numa_node_id;
2312 md->init_tio_pdu = false;
2313 md->type = DM_TYPE_NONE;
2314 mutex_init(&md->suspend_lock);
2315 mutex_init(&md->type_lock);
2316 mutex_init(&md->table_devices_lock);
2317 spin_lock_init(&md->deferred_lock);
2318 atomic_set(&md->holders, 1);
2319 atomic_set(&md->open_count, 0);
2320 atomic_set(&md->event_nr, 0);
2321 atomic_set(&md->uevent_seq, 0);
2322 INIT_LIST_HEAD(&md->uevent_list);
2323 INIT_LIST_HEAD(&md->table_devices);
2324 spin_lock_init(&md->uevent_lock);
2325
2326 /*
2327 * default to bio-based until DM table is loaded and md->type
2328 * established. If request-based table is loaded: blk-mq will
2329 * override accordingly.
2330 */
2331 md->disk = blk_alloc_disk(NULL, md->numa_node_id);
2332 if (IS_ERR(md->disk)) {
2333 md->disk = NULL;
2334 goto bad;
2335 }
2336 md->queue = md->disk->queue;
2337
2338 init_waitqueue_head(&md->wait);
2339 INIT_WORK(&md->work, dm_wq_work);
2340 INIT_WORK(&md->requeue_work, dm_wq_requeue_work);
2341 init_waitqueue_head(&md->eventq);
2342 init_completion(&md->kobj_holder.completion);
2343
2344 md->requeue_list = NULL;
2345 md->swap_bios = get_swap_bios();
2346 sema_init(&md->swap_bios_semaphore, md->swap_bios);
2347 mutex_init(&md->swap_bios_lock);
2348
2349 md->disk->major = _major;
2350 md->disk->first_minor = minor;
2351 md->disk->minors = 1;
2352 md->disk->flags |= GENHD_FL_NO_PART;
2353 md->disk->fops = &dm_blk_dops;
2354 md->disk->private_data = md;
2355 sprintf(md->disk->disk_name, "dm-%d", minor);
2356
2357 dax_dev = alloc_dax(md, &dm_dax_ops);
2358 if (IS_ERR(dax_dev)) {
2359 if (PTR_ERR(dax_dev) != -EOPNOTSUPP)
2360 goto bad;
2361 } else {
2362 set_dax_nocache(dax_dev);
2363 set_dax_nomc(dax_dev);
2364 md->dax_dev = dax_dev;
2365 if (dax_add_host(dax_dev, md->disk))
2366 goto bad;
2367 }
2368
2369 format_dev_t(md->name, MKDEV(_major, minor));
2370
2371 md->wq = alloc_workqueue("kdmflush/%s", WQ_MEM_RECLAIM | WQ_PERCPU, 0,
2372 md->name);
2373 if (!md->wq)
2374 goto bad;
2375
2376 md->pending_io = alloc_percpu(unsigned long);
2377 if (!md->pending_io)
2378 goto bad;
2379
2380 r = dm_stats_init(&md->stats);
2381 if (r < 0)
2382 goto bad;
2383
2384 /* Populate the mapping, nobody knows we exist yet */
2385 spin_lock(&_minor_lock);
2386 old_md = idr_replace(&_minor_idr, md, minor);
2387 spin_unlock(&_minor_lock);
2388
2389 BUG_ON(old_md != MINOR_ALLOCED);
2390
2391 return md;
2392
2393 bad:
2394 cleanup_mapped_device(md);
2395 bad_io_barrier:
2396 free_minor(minor);
2397 bad_minor:
2398 module_put(THIS_MODULE);
2399 bad_module_get:
2400 kvfree(md);
2401 return NULL;
2402 }
2403
2404 static void unlock_fs(struct mapped_device *md);
2405
free_dev(struct mapped_device * md)2406 static void free_dev(struct mapped_device *md)
2407 {
2408 int minor = MINOR(disk_devt(md->disk));
2409
2410 unlock_fs(md);
2411
2412 cleanup_mapped_device(md);
2413
2414 WARN_ON_ONCE(!list_empty(&md->table_devices));
2415 dm_stats_cleanup(&md->stats);
2416 free_minor(minor);
2417
2418 module_put(THIS_MODULE);
2419 kvfree(md);
2420 }
2421
2422 /*
2423 * Bind a table to the device.
2424 */
event_callback(void * context)2425 static void event_callback(void *context)
2426 {
2427 unsigned long flags;
2428 LIST_HEAD(uevents);
2429 struct mapped_device *md = context;
2430
2431 spin_lock_irqsave(&md->uevent_lock, flags);
2432 list_splice_init(&md->uevent_list, &uevents);
2433 spin_unlock_irqrestore(&md->uevent_lock, flags);
2434
2435 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
2436
2437 atomic_inc(&md->event_nr);
2438 wake_up(&md->eventq);
2439 dm_issue_global_event();
2440 }
2441
2442 /*
2443 * Returns old map, which caller must destroy.
2444 */
__bind(struct mapped_device * md,struct dm_table * t,struct queue_limits * limits)2445 static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2446 struct queue_limits *limits)
2447 {
2448 struct dm_table *old_map;
2449 sector_t size, old_size;
2450
2451 lockdep_assert_held(&md->suspend_lock);
2452
2453 size = dm_table_get_size(t);
2454
2455 old_size = dm_get_size(md);
2456
2457 if (!dm_table_supports_size_change(t, old_size, size)) {
2458 old_map = ERR_PTR(-EINVAL);
2459 goto out;
2460 }
2461
2462 set_capacity(md->disk, size);
2463
2464 if (limits) {
2465 int ret = dm_table_set_restrictions(t, md->queue, limits);
2466 if (ret) {
2467 set_capacity(md->disk, old_size);
2468 old_map = ERR_PTR(ret);
2469 goto out;
2470 }
2471 }
2472
2473 /*
2474 * Wipe any geometry if the size of the table changed.
2475 */
2476 if (size != old_size)
2477 memset(&md->geometry, 0, sizeof(md->geometry));
2478
2479 dm_table_event_callback(t, event_callback, md);
2480
2481 if (dm_table_request_based(t)) {
2482 /*
2483 * Leverage the fact that request-based DM targets are
2484 * immutable singletons - used to optimize dm_mq_queue_rq.
2485 */
2486 md->immutable_target = dm_table_get_immutable_target(t);
2487
2488 /*
2489 * There is no need to reload with request-based dm because the
2490 * size of front_pad doesn't change.
2491 *
2492 * Note for future: If you are to reload bioset, prep-ed
2493 * requests in the queue may refer to bio from the old bioset,
2494 * so you must walk through the queue to unprep.
2495 */
2496 if (!md->mempools)
2497 md->mempools = t->mempools;
2498 else
2499 dm_free_md_mempools(t->mempools);
2500 } else {
2501 /*
2502 * The md may already have mempools that need changing.
2503 * If so, reload bioset because front_pad may have changed
2504 * because a different table was loaded.
2505 */
2506 dm_free_md_mempools(md->mempools);
2507 md->mempools = t->mempools;
2508 }
2509 t->mempools = NULL;
2510
2511 old_map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
2512 rcu_assign_pointer(md->map, (void *)t);
2513 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2514
2515 if (old_map)
2516 dm_sync_table(md);
2517 out:
2518 return old_map;
2519 }
2520
2521 /*
2522 * Returns unbound table for the caller to free.
2523 */
__unbind(struct mapped_device * md)2524 static struct dm_table *__unbind(struct mapped_device *md)
2525 {
2526 struct dm_table *map = rcu_dereference_protected(md->map, 1);
2527
2528 if (!map)
2529 return NULL;
2530
2531 dm_table_event_callback(map, NULL, NULL);
2532 RCU_INIT_POINTER(md->map, NULL);
2533 dm_sync_table(md);
2534
2535 return map;
2536 }
2537
2538 /*
2539 * Constructor for a new device.
2540 */
dm_create(int minor,struct mapped_device ** result)2541 int dm_create(int minor, struct mapped_device **result)
2542 {
2543 struct mapped_device *md;
2544
2545 md = alloc_dev(minor);
2546 if (!md)
2547 return -ENXIO;
2548
2549 dm_ima_reset_data(md);
2550
2551 *result = md;
2552 return 0;
2553 }
2554
2555 /*
2556 * Functions to manage md->type.
2557 * All are required to hold md->type_lock.
2558 */
dm_lock_md_type(struct mapped_device * md)2559 void dm_lock_md_type(struct mapped_device *md)
2560 {
2561 mutex_lock(&md->type_lock);
2562 }
2563
dm_unlock_md_type(struct mapped_device * md)2564 void dm_unlock_md_type(struct mapped_device *md)
2565 {
2566 mutex_unlock(&md->type_lock);
2567 }
2568
dm_get_md_type(struct mapped_device * md)2569 enum dm_queue_mode dm_get_md_type(struct mapped_device *md)
2570 {
2571 return md->type;
2572 }
2573
dm_get_immutable_target_type(struct mapped_device * md)2574 struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2575 {
2576 return md->immutable_target_type;
2577 }
2578
2579 /*
2580 * Setup the DM device's queue based on md's type
2581 */
dm_setup_md_queue(struct mapped_device * md,struct dm_table * t)2582 int dm_setup_md_queue(struct mapped_device *md, struct dm_table *t)
2583 {
2584 enum dm_queue_mode type = dm_table_get_type(t);
2585 struct queue_limits limits;
2586 struct table_device *td;
2587 int r;
2588
2589 WARN_ON_ONCE(type == DM_TYPE_NONE);
2590
2591 if (type == DM_TYPE_REQUEST_BASED) {
2592 md->disk->fops = &dm_rq_blk_dops;
2593 r = dm_mq_init_request_queue(md, t);
2594 if (r) {
2595 DMERR("Cannot initialize queue for request-based dm mapped device");
2596 return r;
2597 }
2598 }
2599
2600 r = dm_calculate_queue_limits(t, &limits);
2601 if (r) {
2602 DMERR("Cannot calculate initial queue limits");
2603 return r;
2604 }
2605 r = dm_table_set_restrictions(t, md->queue, &limits);
2606 if (r)
2607 return r;
2608
2609 /*
2610 * Hold lock to make sure add_disk() and del_gendisk() won't concurrent
2611 * with open_table_device() and close_table_device().
2612 */
2613 mutex_lock(&md->table_devices_lock);
2614 r = add_disk(md->disk);
2615 mutex_unlock(&md->table_devices_lock);
2616 if (r)
2617 return r;
2618
2619 /*
2620 * Register the holder relationship for devices added before the disk
2621 * was live.
2622 */
2623 list_for_each_entry(td, &md->table_devices, list) {
2624 r = bd_link_disk_holder(td->dm_dev.bdev, md->disk);
2625 if (r)
2626 goto out_undo_holders;
2627 }
2628
2629 r = dm_sysfs_init(md);
2630 if (r)
2631 goto out_undo_holders;
2632
2633 md->type = type;
2634 return 0;
2635
2636 out_undo_holders:
2637 list_for_each_entry_continue_reverse(td, &md->table_devices, list)
2638 bd_unlink_disk_holder(td->dm_dev.bdev, md->disk);
2639 mutex_lock(&md->table_devices_lock);
2640 del_gendisk(md->disk);
2641 mutex_unlock(&md->table_devices_lock);
2642 return r;
2643 }
2644
dm_get_md(dev_t dev)2645 struct mapped_device *dm_get_md(dev_t dev)
2646 {
2647 struct mapped_device *md;
2648 unsigned int minor = MINOR(dev);
2649
2650 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2651 return NULL;
2652
2653 spin_lock(&_minor_lock);
2654
2655 md = idr_find(&_minor_idr, minor);
2656 if (!md || md == MINOR_ALLOCED || (MINOR(disk_devt(dm_disk(md))) != minor) ||
2657 test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
2658 md = NULL;
2659 goto out;
2660 }
2661 dm_get(md);
2662 out:
2663 spin_unlock(&_minor_lock);
2664
2665 return md;
2666 }
2667 EXPORT_SYMBOL_GPL(dm_get_md);
2668
dm_get_mdptr(struct mapped_device * md)2669 void *dm_get_mdptr(struct mapped_device *md)
2670 {
2671 return md->interface_ptr;
2672 }
2673
dm_set_mdptr(struct mapped_device * md,void * ptr)2674 void dm_set_mdptr(struct mapped_device *md, void *ptr)
2675 {
2676 md->interface_ptr = ptr;
2677 }
2678
dm_get(struct mapped_device * md)2679 void dm_get(struct mapped_device *md)
2680 {
2681 atomic_inc(&md->holders);
2682 BUG_ON(test_bit(DMF_FREEING, &md->flags));
2683 }
2684
dm_hold(struct mapped_device * md)2685 int dm_hold(struct mapped_device *md)
2686 {
2687 spin_lock(&_minor_lock);
2688 if (test_bit(DMF_FREEING, &md->flags)) {
2689 spin_unlock(&_minor_lock);
2690 return -EBUSY;
2691 }
2692 dm_get(md);
2693 spin_unlock(&_minor_lock);
2694 return 0;
2695 }
2696 EXPORT_SYMBOL_GPL(dm_hold);
2697
dm_device_name(struct mapped_device * md)2698 const char *dm_device_name(struct mapped_device *md)
2699 {
2700 return md->name;
2701 }
2702 EXPORT_SYMBOL_GPL(dm_device_name);
2703
__dm_destroy(struct mapped_device * md,bool wait)2704 static void __dm_destroy(struct mapped_device *md, bool wait)
2705 {
2706 struct dm_table *map;
2707 int srcu_idx;
2708
2709 might_sleep();
2710
2711 spin_lock(&_minor_lock);
2712 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2713 set_bit(DMF_FREEING, &md->flags);
2714 spin_unlock(&_minor_lock);
2715
2716 blk_mark_disk_dead(md->disk);
2717
2718 /*
2719 * Take suspend_lock so that presuspend and postsuspend methods
2720 * do not race with internal suspend.
2721 */
2722 mutex_lock(&md->suspend_lock);
2723 map = dm_get_live_table(md, &srcu_idx);
2724 if (!dm_suspended_md(md)) {
2725 dm_table_presuspend_targets(map);
2726 set_bit(DMF_SUSPENDED, &md->flags);
2727 set_bit(DMF_POST_SUSPENDING, &md->flags);
2728 dm_table_postsuspend_targets(map);
2729 }
2730 /* dm_put_live_table must be before fsleep, otherwise deadlock is possible */
2731 dm_put_live_table(md, srcu_idx);
2732 mutex_unlock(&md->suspend_lock);
2733
2734 /*
2735 * Rare, but there may be I/O requests still going to complete,
2736 * for example. Wait for all references to disappear.
2737 * No one should increment the reference count of the mapped_device,
2738 * after the mapped_device state becomes DMF_FREEING.
2739 */
2740 if (wait)
2741 while (atomic_read(&md->holders))
2742 fsleep(1000);
2743 else if (atomic_read(&md->holders))
2744 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2745 dm_device_name(md), atomic_read(&md->holders));
2746
2747 dm_table_destroy(__unbind(md));
2748 free_dev(md);
2749 }
2750
dm_destroy(struct mapped_device * md)2751 void dm_destroy(struct mapped_device *md)
2752 {
2753 __dm_destroy(md, true);
2754 }
2755
dm_destroy_immediate(struct mapped_device * md)2756 void dm_destroy_immediate(struct mapped_device *md)
2757 {
2758 __dm_destroy(md, false);
2759 }
2760
dm_put(struct mapped_device * md)2761 void dm_put(struct mapped_device *md)
2762 {
2763 atomic_dec(&md->holders);
2764 }
2765 EXPORT_SYMBOL_GPL(dm_put);
2766
dm_in_flight_bios(struct mapped_device * md)2767 static bool dm_in_flight_bios(struct mapped_device *md)
2768 {
2769 int cpu;
2770 unsigned long sum = 0;
2771
2772 for_each_possible_cpu(cpu)
2773 sum += *per_cpu_ptr(md->pending_io, cpu);
2774
2775 return sum != 0;
2776 }
2777
dm_wait_for_bios_completion(struct mapped_device * md,unsigned int task_state)2778 static int dm_wait_for_bios_completion(struct mapped_device *md, unsigned int task_state)
2779 {
2780 int r = 0;
2781 DEFINE_WAIT(wait);
2782
2783 while (true) {
2784 prepare_to_wait(&md->wait, &wait, task_state);
2785
2786 if (!dm_in_flight_bios(md))
2787 break;
2788
2789 if (signal_pending_state(task_state, current)) {
2790 r = -ERESTARTSYS;
2791 break;
2792 }
2793
2794 io_schedule();
2795 }
2796 finish_wait(&md->wait, &wait);
2797
2798 smp_rmb();
2799
2800 return r;
2801 }
2802
dm_wait_for_completion(struct mapped_device * md,unsigned int task_state)2803 static int dm_wait_for_completion(struct mapped_device *md, unsigned int task_state)
2804 {
2805 int r = 0;
2806
2807 if (!queue_is_mq(md->queue))
2808 return dm_wait_for_bios_completion(md, task_state);
2809
2810 while (true) {
2811 if (!blk_mq_queue_inflight(md->queue))
2812 break;
2813
2814 if (signal_pending_state(task_state, current)) {
2815 r = -ERESTARTSYS;
2816 break;
2817 }
2818
2819 fsleep(5000);
2820 }
2821
2822 return r;
2823 }
2824
2825 /*
2826 * Process the deferred bios
2827 */
dm_wq_work(struct work_struct * work)2828 static void dm_wq_work(struct work_struct *work)
2829 {
2830 struct mapped_device *md = container_of(work, struct mapped_device, work);
2831 struct bio *bio;
2832
2833 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
2834 spin_lock_irq(&md->deferred_lock);
2835 bio = bio_list_pop(&md->deferred);
2836 spin_unlock_irq(&md->deferred_lock);
2837
2838 if (!bio)
2839 break;
2840
2841 submit_bio_noacct(bio);
2842 cond_resched();
2843 }
2844 }
2845
dm_queue_flush(struct mapped_device * md)2846 static void dm_queue_flush(struct mapped_device *md)
2847 {
2848 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2849 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2850 smp_mb__after_atomic();
2851 queue_work(md->wq, &md->work);
2852 }
2853
2854 /*
2855 * Swap in a new table, returning the old one for the caller to destroy.
2856 */
dm_swap_table(struct mapped_device * md,struct dm_table * table)2857 struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
2858 {
2859 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
2860 struct queue_limits limits;
2861 bool update_limits = true;
2862 int r;
2863
2864 mutex_lock(&md->suspend_lock);
2865
2866 /* device must be suspended */
2867 if (!dm_suspended_md(md))
2868 goto out;
2869
2870 /*
2871 * To avoid a potential deadlock locking the queue limits, disallow
2872 * updating the queue limits during a table swap, when updating an
2873 * immutable request-based dm device (dm-multipath) during a noflush
2874 * suspend. It is userspace's responsibility to make sure that the new
2875 * table uses the same limits as the existing table, if it asks for a
2876 * noflush suspend.
2877 */
2878 if (dm_request_based(md) && md->immutable_target &&
2879 __noflush_suspending(md))
2880 update_limits = false;
2881 /*
2882 * If the new table has no data devices, retain the existing limits.
2883 * This helps multipath with queue_if_no_path if all paths disappear,
2884 * then new I/O is queued based on these limits, and then some paths
2885 * reappear.
2886 */
2887 else if (dm_table_has_no_data_devices(table)) {
2888 live_map = dm_get_live_table_fast(md);
2889 if (live_map)
2890 limits = md->queue->limits;
2891 dm_put_live_table_fast(md);
2892 }
2893
2894 if (update_limits && !live_map) {
2895 r = dm_calculate_queue_limits(table, &limits);
2896 if (r) {
2897 map = ERR_PTR(r);
2898 goto out;
2899 }
2900 }
2901
2902 map = __bind(md, table, update_limits ? &limits : NULL);
2903 dm_issue_global_event();
2904
2905 out:
2906 mutex_unlock(&md->suspend_lock);
2907 return map;
2908 }
2909
2910 /*
2911 * Functions to lock and unlock any filesystem running on the
2912 * device.
2913 */
lock_fs(struct mapped_device * md)2914 static int lock_fs(struct mapped_device *md)
2915 {
2916 int r;
2917
2918 WARN_ON(test_bit(DMF_FROZEN, &md->flags));
2919
2920 r = bdev_freeze(md->disk->part0);
2921 if (!r)
2922 set_bit(DMF_FROZEN, &md->flags);
2923 return r;
2924 }
2925
unlock_fs(struct mapped_device * md)2926 static void unlock_fs(struct mapped_device *md)
2927 {
2928 if (!test_bit(DMF_FROZEN, &md->flags))
2929 return;
2930 bdev_thaw(md->disk->part0);
2931 clear_bit(DMF_FROZEN, &md->flags);
2932 }
2933
2934 /*
2935 * @suspend_flags: DM_SUSPEND_LOCKFS_FLAG and/or DM_SUSPEND_NOFLUSH_FLAG
2936 * @task_state: e.g. TASK_INTERRUPTIBLE or TASK_UNINTERRUPTIBLE
2937 * @dmf_suspended_flag: DMF_SUSPENDED or DMF_SUSPENDED_INTERNALLY
2938 *
2939 * If __dm_suspend returns 0, the device is completely quiescent
2940 * now. There is no request-processing activity. All new requests
2941 * are being added to md->deferred list.
2942 */
__dm_suspend(struct mapped_device * md,struct dm_table * map,unsigned int suspend_flags,unsigned int task_state,int dmf_suspended_flag)2943 static int __dm_suspend(struct mapped_device *md, struct dm_table *map,
2944 unsigned int suspend_flags, unsigned int task_state,
2945 int dmf_suspended_flag)
2946 {
2947 bool do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG;
2948 bool noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG;
2949 int r = 0;
2950
2951 lockdep_assert_held(&md->suspend_lock);
2952
2953 /*
2954 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2955 */
2956 if (noflush)
2957 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2958 else
2959 DMDEBUG("%s: suspending with flush", dm_device_name(md));
2960
2961 /*
2962 * This gets reverted if there's an error later and the targets
2963 * provide the .presuspend_undo hook.
2964 */
2965 dm_table_presuspend_targets(map);
2966
2967 /*
2968 * Flush I/O to the device.
2969 * Any I/O submitted after lock_fs() may not be flushed.
2970 * noflush takes precedence over do_lockfs.
2971 * (lock_fs() flushes I/Os and waits for them to complete.)
2972 */
2973 if (!noflush && do_lockfs) {
2974 r = lock_fs(md);
2975 if (r) {
2976 dm_table_presuspend_undo_targets(map);
2977 return r;
2978 }
2979 }
2980
2981 /*
2982 * Here we must make sure that no processes are submitting requests
2983 * to target drivers i.e. no one may be executing
2984 * dm_split_and_process_bio from dm_submit_bio.
2985 *
2986 * To get all processes out of dm_split_and_process_bio in dm_submit_bio,
2987 * we take the write lock. To prevent any process from reentering
2988 * dm_split_and_process_bio from dm_submit_bio and quiesce the thread
2989 * (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND and call
2990 * flush_workqueue(md->wq).
2991 */
2992 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2993 if (map)
2994 synchronize_srcu(&md->io_barrier);
2995
2996 /*
2997 * Stop md->queue before flushing md->wq in case request-based
2998 * dm defers requests to md->wq from md->queue.
2999 */
3000 if (map && dm_request_based(md)) {
3001 dm_stop_queue(md->queue);
3002 set_bit(DMF_QUEUE_STOPPED, &md->flags);
3003 }
3004
3005 flush_workqueue(md->wq);
3006
3007 /*
3008 * At this point no more requests are entering target request routines.
3009 * We call dm_wait_for_completion to wait for all existing requests
3010 * to finish.
3011 */
3012 if (map)
3013 r = dm_wait_for_completion(md, task_state);
3014 if (!r)
3015 set_bit(dmf_suspended_flag, &md->flags);
3016
3017 if (map)
3018 synchronize_srcu(&md->io_barrier);
3019
3020 /* were we interrupted ? */
3021 if (r < 0) {
3022 dm_queue_flush(md);
3023
3024 if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
3025 dm_start_queue(md->queue);
3026
3027 unlock_fs(md);
3028 dm_table_presuspend_undo_targets(map);
3029 /* pushback list is already flushed, so skip flush */
3030 }
3031
3032 return r;
3033 }
3034
3035 /*
3036 * We need to be able to change a mapping table under a mounted
3037 * filesystem. For example we might want to move some data in
3038 * the background. Before the table can be swapped with
3039 * dm_bind_table, dm_suspend must be called to flush any in
3040 * flight bios and ensure that any further io gets deferred.
3041 */
3042 /*
3043 * Suspend mechanism in request-based dm.
3044 *
3045 * 1. Flush all I/Os by lock_fs() if needed.
3046 * 2. Stop dispatching any I/O by stopping the request_queue.
3047 * 3. Wait for all in-flight I/Os to be completed or requeued.
3048 *
3049 * To abort suspend, start the request_queue.
3050 */
dm_suspend(struct mapped_device * md,unsigned int suspend_flags)3051 int dm_suspend(struct mapped_device *md, unsigned int suspend_flags)
3052 {
3053 struct dm_table *map = NULL;
3054 int r = 0;
3055
3056 retry:
3057 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3058
3059 if (dm_suspended_md(md)) {
3060 r = -EINVAL;
3061 goto out_unlock;
3062 }
3063
3064 if (dm_suspended_internally_md(md)) {
3065 /* already internally suspended, wait for internal resume */
3066 mutex_unlock(&md->suspend_lock);
3067 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3068 if (r)
3069 return r;
3070 goto retry;
3071 }
3072
3073 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3074 if (!map) {
3075 /* avoid deadlock with fs/namespace.c:do_mount() */
3076 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
3077 }
3078
3079 r = __dm_suspend(md, map, suspend_flags, TASK_INTERRUPTIBLE, DMF_SUSPENDED);
3080 if (r)
3081 goto out_unlock;
3082
3083 set_bit(DMF_POST_SUSPENDING, &md->flags);
3084 dm_table_postsuspend_targets(map);
3085 clear_bit(DMF_POST_SUSPENDING, &md->flags);
3086
3087 out_unlock:
3088 mutex_unlock(&md->suspend_lock);
3089 return r;
3090 }
3091
__dm_resume(struct mapped_device * md,struct dm_table * map)3092 static int __dm_resume(struct mapped_device *md, struct dm_table *map)
3093 {
3094 if (map) {
3095 int r = dm_table_resume_targets(map);
3096
3097 if (r)
3098 return r;
3099 }
3100
3101 dm_queue_flush(md);
3102
3103 /*
3104 * Flushing deferred I/Os must be done after targets are resumed
3105 * so that mapping of targets can work correctly.
3106 * Request-based dm is queueing the deferred I/Os in its request_queue.
3107 */
3108 if (test_and_clear_bit(DMF_QUEUE_STOPPED, &md->flags))
3109 dm_start_queue(md->queue);
3110
3111 unlock_fs(md);
3112
3113 return 0;
3114 }
3115
dm_resume(struct mapped_device * md)3116 int dm_resume(struct mapped_device *md)
3117 {
3118 int r;
3119 struct dm_table *map = NULL;
3120
3121 retry:
3122 r = -EINVAL;
3123 mutex_lock_nested(&md->suspend_lock, SINGLE_DEPTH_NESTING);
3124
3125 if (!dm_suspended_md(md))
3126 goto out;
3127
3128 if (dm_suspended_internally_md(md)) {
3129 /* already internally suspended, wait for internal resume */
3130 mutex_unlock(&md->suspend_lock);
3131 r = wait_on_bit(&md->flags, DMF_SUSPENDED_INTERNALLY, TASK_INTERRUPTIBLE);
3132 if (r)
3133 return r;
3134 goto retry;
3135 }
3136
3137 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3138 if (!map || !dm_table_get_size(map))
3139 goto out;
3140
3141 r = __dm_resume(md, map);
3142 if (r)
3143 goto out;
3144
3145 clear_bit(DMF_SUSPENDED, &md->flags);
3146 out:
3147 mutex_unlock(&md->suspend_lock);
3148
3149 return r;
3150 }
3151
3152 /*
3153 * Internal suspend/resume works like userspace-driven suspend. It waits
3154 * until all bios finish and prevents issuing new bios to the target drivers.
3155 * It may be used only from the kernel.
3156 */
3157
__dm_internal_suspend(struct mapped_device * md,unsigned int suspend_flags)3158 static void __dm_internal_suspend(struct mapped_device *md, unsigned int suspend_flags)
3159 {
3160 struct dm_table *map = NULL;
3161
3162 lockdep_assert_held(&md->suspend_lock);
3163
3164 if (md->internal_suspend_count++)
3165 return; /* nested internal suspend */
3166
3167 if (dm_suspended_md(md)) {
3168 set_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3169 return; /* nest suspend */
3170 }
3171
3172 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3173
3174 /*
3175 * Using TASK_UNINTERRUPTIBLE because only NOFLUSH internal suspend is
3176 * supported. Properly supporting a TASK_INTERRUPTIBLE internal suspend
3177 * would require changing .presuspend to return an error -- avoid this
3178 * until there is a need for more elaborate variants of internal suspend.
3179 */
3180 (void) __dm_suspend(md, map, suspend_flags, TASK_UNINTERRUPTIBLE,
3181 DMF_SUSPENDED_INTERNALLY);
3182
3183 set_bit(DMF_POST_SUSPENDING, &md->flags);
3184 dm_table_postsuspend_targets(map);
3185 clear_bit(DMF_POST_SUSPENDING, &md->flags);
3186 }
3187
__dm_internal_resume(struct mapped_device * md)3188 static void __dm_internal_resume(struct mapped_device *md)
3189 {
3190 int r;
3191 struct dm_table *map;
3192
3193 BUG_ON(!md->internal_suspend_count);
3194
3195 if (--md->internal_suspend_count)
3196 return; /* resume from nested internal suspend */
3197
3198 if (dm_suspended_md(md))
3199 goto done; /* resume from nested suspend */
3200
3201 map = rcu_dereference_protected(md->map, lockdep_is_held(&md->suspend_lock));
3202 r = __dm_resume(md, map);
3203 if (r) {
3204 /*
3205 * If a preresume method of some target failed, we are in a
3206 * tricky situation. We can't return an error to the caller. We
3207 * can't fake success because then the "resume" and
3208 * "postsuspend" methods would not be paired correctly, and it
3209 * would break various targets, for example it would cause list
3210 * corruption in the "origin" target.
3211 *
3212 * So, we fake normal suspend here, to make sure that the
3213 * "resume" and "postsuspend" methods will be paired correctly.
3214 */
3215 DMERR("Preresume method failed: %d", r);
3216 set_bit(DMF_SUSPENDED, &md->flags);
3217 }
3218 done:
3219 clear_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3220 smp_mb__after_atomic();
3221 wake_up_bit(&md->flags, DMF_SUSPENDED_INTERNALLY);
3222 }
3223
dm_internal_suspend_noflush(struct mapped_device * md)3224 void dm_internal_suspend_noflush(struct mapped_device *md)
3225 {
3226 mutex_lock(&md->suspend_lock);
3227 __dm_internal_suspend(md, DM_SUSPEND_NOFLUSH_FLAG);
3228 mutex_unlock(&md->suspend_lock);
3229 }
3230 EXPORT_SYMBOL_GPL(dm_internal_suspend_noflush);
3231
dm_internal_resume(struct mapped_device * md)3232 void dm_internal_resume(struct mapped_device *md)
3233 {
3234 mutex_lock(&md->suspend_lock);
3235 __dm_internal_resume(md);
3236 mutex_unlock(&md->suspend_lock);
3237 }
3238 EXPORT_SYMBOL_GPL(dm_internal_resume);
3239
3240 /*
3241 * Fast variants of internal suspend/resume hold md->suspend_lock,
3242 * which prevents interaction with userspace-driven suspend.
3243 */
3244
dm_internal_suspend_fast(struct mapped_device * md)3245 void dm_internal_suspend_fast(struct mapped_device *md)
3246 {
3247 mutex_lock(&md->suspend_lock);
3248 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3249 return;
3250
3251 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
3252 synchronize_srcu(&md->io_barrier);
3253 flush_workqueue(md->wq);
3254 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
3255 }
3256 EXPORT_SYMBOL_GPL(dm_internal_suspend_fast);
3257
dm_internal_resume_fast(struct mapped_device * md)3258 void dm_internal_resume_fast(struct mapped_device *md)
3259 {
3260 if (dm_suspended_md(md) || dm_suspended_internally_md(md))
3261 goto done;
3262
3263 dm_queue_flush(md);
3264
3265 done:
3266 mutex_unlock(&md->suspend_lock);
3267 }
3268 EXPORT_SYMBOL_GPL(dm_internal_resume_fast);
3269
3270 /*
3271 *---------------------------------------------------------------
3272 * Event notification.
3273 *---------------------------------------------------------------
3274 */
dm_kobject_uevent(struct mapped_device * md,enum kobject_action action,unsigned int cookie,bool need_resize_uevent)3275 int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
3276 unsigned int cookie, bool need_resize_uevent)
3277 {
3278 int r;
3279 unsigned int noio_flag;
3280 char udev_cookie[DM_COOKIE_LENGTH];
3281 char *envp[3] = { NULL, NULL, NULL };
3282 char **envpp = envp;
3283 if (cookie) {
3284 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
3285 DM_COOKIE_ENV_VAR_NAME, cookie);
3286 *envpp++ = udev_cookie;
3287 }
3288 if (need_resize_uevent) {
3289 *envpp++ = "RESIZE=1";
3290 }
3291
3292 noio_flag = memalloc_noio_save();
3293
3294 r = kobject_uevent_env(&disk_to_dev(md->disk)->kobj, action, envp);
3295
3296 memalloc_noio_restore(noio_flag);
3297
3298 return r;
3299 }
3300
dm_next_uevent_seq(struct mapped_device * md)3301 uint32_t dm_next_uevent_seq(struct mapped_device *md)
3302 {
3303 return atomic_add_return(1, &md->uevent_seq);
3304 }
3305
dm_get_event_nr(struct mapped_device * md)3306 uint32_t dm_get_event_nr(struct mapped_device *md)
3307 {
3308 return atomic_read(&md->event_nr);
3309 }
3310
dm_wait_event(struct mapped_device * md,int event_nr)3311 int dm_wait_event(struct mapped_device *md, int event_nr)
3312 {
3313 return wait_event_interruptible(md->eventq,
3314 (event_nr != atomic_read(&md->event_nr)));
3315 }
3316
dm_uevent_add(struct mapped_device * md,struct list_head * elist)3317 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
3318 {
3319 unsigned long flags;
3320
3321 spin_lock_irqsave(&md->uevent_lock, flags);
3322 list_add(elist, &md->uevent_list);
3323 spin_unlock_irqrestore(&md->uevent_lock, flags);
3324 }
3325
3326 /*
3327 * The gendisk is only valid as long as you have a reference
3328 * count on 'md'.
3329 */
dm_disk(struct mapped_device * md)3330 struct gendisk *dm_disk(struct mapped_device *md)
3331 {
3332 return md->disk;
3333 }
3334 EXPORT_SYMBOL_GPL(dm_disk);
3335
dm_kobject(struct mapped_device * md)3336 struct kobject *dm_kobject(struct mapped_device *md)
3337 {
3338 return &md->kobj_holder.kobj;
3339 }
3340
dm_get_from_kobject(struct kobject * kobj)3341 struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
3342 {
3343 struct mapped_device *md;
3344
3345 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
3346
3347 spin_lock(&_minor_lock);
3348 if (test_bit(DMF_FREEING, &md->flags) || dm_deleting_md(md)) {
3349 md = NULL;
3350 goto out;
3351 }
3352 dm_get(md);
3353 out:
3354 spin_unlock(&_minor_lock);
3355
3356 return md;
3357 }
3358
dm_suspended_md(struct mapped_device * md)3359 int dm_suspended_md(struct mapped_device *md)
3360 {
3361 return test_bit(DMF_SUSPENDED, &md->flags);
3362 }
3363
dm_post_suspending_md(struct mapped_device * md)3364 static int dm_post_suspending_md(struct mapped_device *md)
3365 {
3366 return test_bit(DMF_POST_SUSPENDING, &md->flags);
3367 }
3368
dm_suspended_internally_md(struct mapped_device * md)3369 int dm_suspended_internally_md(struct mapped_device *md)
3370 {
3371 return test_bit(DMF_SUSPENDED_INTERNALLY, &md->flags);
3372 }
3373
dm_test_deferred_remove_flag(struct mapped_device * md)3374 int dm_test_deferred_remove_flag(struct mapped_device *md)
3375 {
3376 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
3377 }
3378
dm_suspended(struct dm_target * ti)3379 int dm_suspended(struct dm_target *ti)
3380 {
3381 return dm_suspended_md(ti->table->md);
3382 }
3383 EXPORT_SYMBOL_GPL(dm_suspended);
3384
dm_post_suspending(struct dm_target * ti)3385 int dm_post_suspending(struct dm_target *ti)
3386 {
3387 return dm_post_suspending_md(ti->table->md);
3388 }
3389 EXPORT_SYMBOL_GPL(dm_post_suspending);
3390
dm_noflush_suspending(struct dm_target * ti)3391 int dm_noflush_suspending(struct dm_target *ti)
3392 {
3393 return __noflush_suspending(ti->table->md);
3394 }
3395 EXPORT_SYMBOL_GPL(dm_noflush_suspending);
3396
dm_free_md_mempools(struct dm_md_mempools * pools)3397 void dm_free_md_mempools(struct dm_md_mempools *pools)
3398 {
3399 if (!pools)
3400 return;
3401
3402 bioset_exit(&pools->bs);
3403 bioset_exit(&pools->io_bs);
3404
3405 kfree(pools);
3406 }
3407
3408 struct dm_blkdev_id {
3409 u8 *id;
3410 enum blk_unique_id type;
3411 };
3412
__dm_get_unique_id(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3413 static int __dm_get_unique_id(struct dm_target *ti, struct dm_dev *dev,
3414 sector_t start, sector_t len, void *data)
3415 {
3416 struct dm_blkdev_id *dm_id = data;
3417 const struct block_device_operations *fops = dev->bdev->bd_disk->fops;
3418
3419 if (!fops->get_unique_id)
3420 return 0;
3421
3422 return fops->get_unique_id(dev->bdev->bd_disk, dm_id->id, dm_id->type);
3423 }
3424
3425 /*
3426 * Allow access to get_unique_id() for the first device returning a
3427 * non-zero result. Reasonable use expects all devices to have the
3428 * same unique id.
3429 */
dm_blk_get_unique_id(struct gendisk * disk,u8 * id,enum blk_unique_id type)3430 static int dm_blk_get_unique_id(struct gendisk *disk, u8 *id,
3431 enum blk_unique_id type)
3432 {
3433 struct mapped_device *md = disk->private_data;
3434 struct dm_table *table;
3435 struct dm_target *ti;
3436 int ret = 0, srcu_idx;
3437
3438 struct dm_blkdev_id dm_id = {
3439 .id = id,
3440 .type = type,
3441 };
3442
3443 table = dm_get_live_table(md, &srcu_idx);
3444 if (!table || !dm_table_get_size(table))
3445 goto out;
3446
3447 /* We only support devices that have a single target */
3448 if (table->num_targets != 1)
3449 goto out;
3450 ti = dm_table_get_target(table, 0);
3451
3452 if (!ti->type->iterate_devices)
3453 goto out;
3454
3455 ret = ti->type->iterate_devices(ti, __dm_get_unique_id, &dm_id);
3456 out:
3457 dm_put_live_table(md, srcu_idx);
3458 return ret;
3459 }
3460
3461 struct dm_pr {
3462 u64 old_key;
3463 u64 new_key;
3464 u32 flags;
3465 bool abort;
3466 bool fail_early;
3467 int ret;
3468 enum pr_type type;
3469 struct pr_keys *read_keys;
3470 struct pr_held_reservation *rsv;
3471 };
3472
dm_call_pr(struct block_device * bdev,iterate_devices_callout_fn fn,struct dm_pr * pr)3473 static int dm_call_pr(struct block_device *bdev, iterate_devices_callout_fn fn,
3474 struct dm_pr *pr)
3475 {
3476 struct mapped_device *md = bdev->bd_disk->private_data;
3477 struct dm_table *table;
3478 struct dm_target *ti;
3479 int ret = -ENOTTY, srcu_idx;
3480
3481 table = dm_get_live_table(md, &srcu_idx);
3482 if (!table || !dm_table_get_size(table))
3483 goto out;
3484
3485 /* We only support devices that have a single target */
3486 if (table->num_targets != 1)
3487 goto out;
3488 ti = dm_table_get_target(table, 0);
3489
3490 if (dm_suspended_md(md)) {
3491 ret = -EAGAIN;
3492 goto out;
3493 }
3494
3495 ret = -EINVAL;
3496 if (!ti->type->iterate_devices)
3497 goto out;
3498
3499 ti->type->iterate_devices(ti, fn, pr);
3500 ret = 0;
3501 out:
3502 dm_put_live_table(md, srcu_idx);
3503 return ret;
3504 }
3505
3506 /*
3507 * For register / unregister we need to manually call out to every path.
3508 */
__dm_pr_register(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3509 static int __dm_pr_register(struct dm_target *ti, struct dm_dev *dev,
3510 sector_t start, sector_t len, void *data)
3511 {
3512 struct dm_pr *pr = data;
3513 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3514 int ret;
3515
3516 if (!ops || !ops->pr_register) {
3517 pr->ret = -EOPNOTSUPP;
3518 return -1;
3519 }
3520
3521 ret = ops->pr_register(dev->bdev, pr->old_key, pr->new_key, pr->flags);
3522 if (!ret)
3523 return 0;
3524
3525 if (!pr->ret)
3526 pr->ret = ret;
3527
3528 if (pr->fail_early)
3529 return -1;
3530
3531 return 0;
3532 }
3533
dm_pr_register(struct block_device * bdev,u64 old_key,u64 new_key,u32 flags)3534 static int dm_pr_register(struct block_device *bdev, u64 old_key, u64 new_key,
3535 u32 flags)
3536 {
3537 struct dm_pr pr = {
3538 .old_key = old_key,
3539 .new_key = new_key,
3540 .flags = flags,
3541 .fail_early = true,
3542 .ret = 0,
3543 };
3544 int ret;
3545
3546 ret = dm_call_pr(bdev, __dm_pr_register, &pr);
3547 if (ret) {
3548 /* Didn't even get to register a path */
3549 return ret;
3550 }
3551
3552 if (!pr.ret)
3553 return 0;
3554 ret = pr.ret;
3555
3556 if (!new_key)
3557 return ret;
3558
3559 /* unregister all paths if we failed to register any path */
3560 pr.old_key = new_key;
3561 pr.new_key = 0;
3562 pr.flags = 0;
3563 pr.fail_early = false;
3564 (void) dm_call_pr(bdev, __dm_pr_register, &pr);
3565 return ret;
3566 }
3567
3568
__dm_pr_reserve(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3569 static int __dm_pr_reserve(struct dm_target *ti, struct dm_dev *dev,
3570 sector_t start, sector_t len, void *data)
3571 {
3572 struct dm_pr *pr = data;
3573 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3574
3575 if (!ops || !ops->pr_reserve) {
3576 pr->ret = -EOPNOTSUPP;
3577 return -1;
3578 }
3579
3580 pr->ret = ops->pr_reserve(dev->bdev, pr->old_key, pr->type, pr->flags);
3581 if (!pr->ret)
3582 return -1;
3583
3584 return 0;
3585 }
3586
dm_pr_reserve(struct block_device * bdev,u64 key,enum pr_type type,u32 flags)3587 static int dm_pr_reserve(struct block_device *bdev, u64 key, enum pr_type type,
3588 u32 flags)
3589 {
3590 struct dm_pr pr = {
3591 .old_key = key,
3592 .flags = flags,
3593 .type = type,
3594 .fail_early = false,
3595 .ret = 0,
3596 };
3597 int ret;
3598
3599 ret = dm_call_pr(bdev, __dm_pr_reserve, &pr);
3600 if (ret)
3601 return ret;
3602
3603 return pr.ret;
3604 }
3605
3606 /*
3607 * If there is a non-All Registrants type of reservation, the release must be
3608 * sent down the holding path. For the cases where there is no reservation or
3609 * the path is not the holder the device will also return success, so we must
3610 * try each path to make sure we got the correct path.
3611 */
__dm_pr_release(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3612 static int __dm_pr_release(struct dm_target *ti, struct dm_dev *dev,
3613 sector_t start, sector_t len, void *data)
3614 {
3615 struct dm_pr *pr = data;
3616 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3617
3618 if (!ops || !ops->pr_release) {
3619 pr->ret = -EOPNOTSUPP;
3620 return -1;
3621 }
3622
3623 pr->ret = ops->pr_release(dev->bdev, pr->old_key, pr->type);
3624 if (pr->ret)
3625 return -1;
3626
3627 return 0;
3628 }
3629
dm_pr_release(struct block_device * bdev,u64 key,enum pr_type type)3630 static int dm_pr_release(struct block_device *bdev, u64 key, enum pr_type type)
3631 {
3632 struct dm_pr pr = {
3633 .old_key = key,
3634 .type = type,
3635 .fail_early = false,
3636 };
3637 int ret;
3638
3639 ret = dm_call_pr(bdev, __dm_pr_release, &pr);
3640 if (ret)
3641 return ret;
3642
3643 return pr.ret;
3644 }
3645
__dm_pr_preempt(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3646 static int __dm_pr_preempt(struct dm_target *ti, struct dm_dev *dev,
3647 sector_t start, sector_t len, void *data)
3648 {
3649 struct dm_pr *pr = data;
3650 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3651
3652 if (!ops || !ops->pr_preempt) {
3653 pr->ret = -EOPNOTSUPP;
3654 return -1;
3655 }
3656
3657 pr->ret = ops->pr_preempt(dev->bdev, pr->old_key, pr->new_key, pr->type,
3658 pr->abort);
3659 if (!pr->ret)
3660 return -1;
3661
3662 return 0;
3663 }
3664
dm_pr_preempt(struct block_device * bdev,u64 old_key,u64 new_key,enum pr_type type,bool abort)3665 static int dm_pr_preempt(struct block_device *bdev, u64 old_key, u64 new_key,
3666 enum pr_type type, bool abort)
3667 {
3668 struct dm_pr pr = {
3669 .new_key = new_key,
3670 .old_key = old_key,
3671 .type = type,
3672 .fail_early = false,
3673 };
3674 int ret;
3675
3676 ret = dm_call_pr(bdev, __dm_pr_preempt, &pr);
3677 if (ret)
3678 return ret;
3679
3680 return pr.ret;
3681 }
3682
dm_pr_clear(struct block_device * bdev,u64 key)3683 static int dm_pr_clear(struct block_device *bdev, u64 key)
3684 {
3685 struct mapped_device *md = bdev->bd_disk->private_data;
3686 const struct pr_ops *ops;
3687 int r, srcu_idx;
3688 bool forward = true;
3689
3690 /* Not a real ioctl, but targets must not interpret non-DM ioctls */
3691 r = dm_prepare_ioctl(md, &srcu_idx, &bdev, 0, 0, &forward);
3692 if (r < 0)
3693 goto out;
3694 WARN_ON_ONCE(!forward);
3695
3696 ops = bdev->bd_disk->fops->pr_ops;
3697 if (ops && ops->pr_clear)
3698 r = ops->pr_clear(bdev, key);
3699 else
3700 r = -EOPNOTSUPP;
3701 out:
3702 dm_unprepare_ioctl(md, srcu_idx);
3703 return r;
3704 }
3705
__dm_pr_read_keys(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3706 static int __dm_pr_read_keys(struct dm_target *ti, struct dm_dev *dev,
3707 sector_t start, sector_t len, void *data)
3708 {
3709 struct dm_pr *pr = data;
3710 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3711
3712 if (!ops || !ops->pr_read_keys) {
3713 pr->ret = -EOPNOTSUPP;
3714 return -1;
3715 }
3716
3717 pr->ret = ops->pr_read_keys(dev->bdev, pr->read_keys);
3718 if (!pr->ret)
3719 return -1;
3720
3721 return 0;
3722 }
3723
dm_pr_read_keys(struct block_device * bdev,struct pr_keys * keys)3724 static int dm_pr_read_keys(struct block_device *bdev, struct pr_keys *keys)
3725 {
3726 struct dm_pr pr = {
3727 .read_keys = keys,
3728 };
3729 int ret;
3730
3731 ret = dm_call_pr(bdev, __dm_pr_read_keys, &pr);
3732 if (ret)
3733 return ret;
3734
3735 return pr.ret;
3736 }
3737
__dm_pr_read_reservation(struct dm_target * ti,struct dm_dev * dev,sector_t start,sector_t len,void * data)3738 static int __dm_pr_read_reservation(struct dm_target *ti, struct dm_dev *dev,
3739 sector_t start, sector_t len, void *data)
3740 {
3741 struct dm_pr *pr = data;
3742 const struct pr_ops *ops = dev->bdev->bd_disk->fops->pr_ops;
3743
3744 if (!ops || !ops->pr_read_reservation) {
3745 pr->ret = -EOPNOTSUPP;
3746 return -1;
3747 }
3748
3749 pr->ret = ops->pr_read_reservation(dev->bdev, pr->rsv);
3750 if (!pr->ret)
3751 return -1;
3752
3753 return 0;
3754 }
3755
dm_pr_read_reservation(struct block_device * bdev,struct pr_held_reservation * rsv)3756 static int dm_pr_read_reservation(struct block_device *bdev,
3757 struct pr_held_reservation *rsv)
3758 {
3759 struct dm_pr pr = {
3760 .rsv = rsv,
3761 };
3762 int ret;
3763
3764 ret = dm_call_pr(bdev, __dm_pr_read_reservation, &pr);
3765 if (ret)
3766 return ret;
3767
3768 return pr.ret;
3769 }
3770
3771 static const struct pr_ops dm_pr_ops = {
3772 .pr_register = dm_pr_register,
3773 .pr_reserve = dm_pr_reserve,
3774 .pr_release = dm_pr_release,
3775 .pr_preempt = dm_pr_preempt,
3776 .pr_clear = dm_pr_clear,
3777 .pr_read_keys = dm_pr_read_keys,
3778 .pr_read_reservation = dm_pr_read_reservation,
3779 };
3780
3781 static const struct block_device_operations dm_blk_dops = {
3782 .submit_bio = dm_submit_bio,
3783 .poll_bio = dm_poll_bio,
3784 .open = dm_blk_open,
3785 .release = dm_blk_close,
3786 .ioctl = dm_blk_ioctl,
3787 .getgeo = dm_blk_getgeo,
3788 .report_zones = dm_blk_report_zones,
3789 .get_unique_id = dm_blk_get_unique_id,
3790 .pr_ops = &dm_pr_ops,
3791 .owner = THIS_MODULE
3792 };
3793
3794 static const struct block_device_operations dm_rq_blk_dops = {
3795 .open = dm_blk_open,
3796 .release = dm_blk_close,
3797 .ioctl = dm_blk_ioctl,
3798 .getgeo = dm_blk_getgeo,
3799 .get_unique_id = dm_blk_get_unique_id,
3800 .pr_ops = &dm_pr_ops,
3801 .owner = THIS_MODULE
3802 };
3803
3804 static const struct dax_operations dm_dax_ops = {
3805 .direct_access = dm_dax_direct_access,
3806 .zero_page_range = dm_dax_zero_page_range,
3807 .recovery_write = dm_dax_recovery_write,
3808 };
3809
3810 /*
3811 * module hooks
3812 */
3813 module_init(dm_init);
3814 module_exit(dm_exit);
3815
3816 module_param(major, uint, 0);
3817 MODULE_PARM_DESC(major, "The major number of the device mapper");
3818
3819 module_param(reserved_bio_based_ios, uint, 0644);
3820 MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
3821
3822 module_param(dm_numa_node, int, 0644);
3823 MODULE_PARM_DESC(dm_numa_node, "NUMA node for DM device memory allocations");
3824
3825 module_param(swap_bios, int, 0644);
3826 MODULE_PARM_DESC(swap_bios, "Maximum allowed inflight swap IOs");
3827
3828 MODULE_DESCRIPTION(DM_NAME " driver");
3829 MODULE_AUTHOR("Joe Thornber <dm-devel@lists.linux.dev>");
3830 MODULE_LICENSE("GPL");
3831