Lines Matching +full:- +full:m

3  * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
8 #include <linux/device-mapper.h>
10 #include "dm-rq.h"
11 #include "dm-bio-record.h"
12 #include "dm-path-selector.h"
13 #include "dm-uevent.h"
28 #include <linux/blk-mq.h>
32 #define DM_PG_INIT_DELAY_DEFAULT ((unsigned) -1)
59 struct multipath *m; /* Owning multipath instance */ member
119 /*-----------------------------------------------
121 *-----------------------------------------------*/
131 static bool mpath_double_check_test_bit(int MPATHF_bit, struct multipath *m) in mpath_double_check_test_bit() argument
133 bool r = test_bit(MPATHF_bit, &m->flags); in mpath_double_check_test_bit()
137 spin_lock_irqsave(&m->lock, flags); in mpath_double_check_test_bit()
138 r = test_bit(MPATHF_bit, &m->flags); in mpath_double_check_test_bit()
139 spin_unlock_irqrestore(&m->lock, flags); in mpath_double_check_test_bit()
145 /*-----------------------------------------------
147 *-----------------------------------------------*/
156 pgpath->is_active = true; in alloc_pgpath()
173 INIT_LIST_HEAD(&pg->pgpaths); in alloc_priority_group()
183 list_del(&pgpath->list); in free_pgpaths()
184 dm_put_device(ti, pgpath->path.dev); in free_pgpaths()
192 struct path_selector *ps = &pg->ps; in free_priority_group()
194 if (ps->type) { in free_priority_group()
195 ps->type->destroy(ps); in free_priority_group()
196 dm_put_path_selector(ps->type); in free_priority_group()
199 free_pgpaths(&pg->pgpaths, ti); in free_priority_group()
205 struct multipath *m; in alloc_multipath() local
207 m = kzalloc(sizeof(*m), GFP_KERNEL); in alloc_multipath()
208 if (m) { in alloc_multipath()
209 INIT_LIST_HEAD(&m->priority_groups); in alloc_multipath()
210 spin_lock_init(&m->lock); in alloc_multipath()
211 atomic_set(&m->nr_valid_paths, 0); in alloc_multipath()
212 INIT_WORK(&m->trigger_event, trigger_event); in alloc_multipath()
213 mutex_init(&m->work_mutex); in alloc_multipath()
215 m->queue_mode = DM_TYPE_NONE; in alloc_multipath()
217 m->ti = ti; in alloc_multipath()
218 ti->private = m; in alloc_multipath()
220 timer_setup(&m->nopath_timer, queue_if_no_path_timeout_work, 0); in alloc_multipath()
223 return m; in alloc_multipath()
226 static int alloc_multipath_stage2(struct dm_target *ti, struct multipath *m) in alloc_multipath_stage2() argument
228 if (m->queue_mode == DM_TYPE_NONE) { in alloc_multipath_stage2()
229 m->queue_mode = DM_TYPE_REQUEST_BASED; in alloc_multipath_stage2()
230 } else if (m->queue_mode == DM_TYPE_BIO_BASED) { in alloc_multipath_stage2()
231 INIT_WORK(&m->process_queued_bios, process_queued_bios); in alloc_multipath_stage2()
233 * bio-based doesn't support any direct scsi_dh management; in alloc_multipath_stage2()
236 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); in alloc_multipath_stage2()
239 dm_table_set_type(ti->table, m->queue_mode); in alloc_multipath_stage2()
243 * - must do this unconditionally (really doesn't hurt non-SCSI uses) in alloc_multipath_stage2()
245 set_bit(MPATHF_QUEUE_IO, &m->flags); in alloc_multipath_stage2()
246 atomic_set(&m->pg_init_in_progress, 0); in alloc_multipath_stage2()
247 atomic_set(&m->pg_init_count, 0); in alloc_multipath_stage2()
248 m->pg_init_delay_msecs = DM_PG_INIT_DELAY_DEFAULT; in alloc_multipath_stage2()
249 init_waitqueue_head(&m->pg_init_wait); in alloc_multipath_stage2()
254 static void free_multipath(struct multipath *m) in free_multipath() argument
258 list_for_each_entry_safe(pg, tmp, &m->priority_groups, list) { in free_multipath()
259 list_del(&pg->list); in free_multipath()
260 free_priority_group(pg, m->ti); in free_multipath()
263 kfree(m->hw_handler_name); in free_multipath()
264 kfree(m->hw_handler_params); in free_multipath()
265 mutex_destroy(&m->work_mutex); in free_multipath()
266 kfree(m); in free_multipath()
271 return info->ptr; in get_mpio()
286 /* dm_bio_details is immediately after the dm_mpath_io in bio's per-bio-data */ in get_bio_details_from_mpio()
296 mpio->nr_bytes = bio->bi_iter.bi_size; in multipath_init_per_bio_data()
297 mpio->pgpath = NULL; in multipath_init_per_bio_data()
303 /*-----------------------------------------------
305 *-----------------------------------------------*/
307 static int __pg_init_all_paths(struct multipath *m) in __pg_init_all_paths() argument
312 lockdep_assert_held(&m->lock); in __pg_init_all_paths()
314 if (atomic_read(&m->pg_init_in_progress) || test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) in __pg_init_all_paths()
317 atomic_inc(&m->pg_init_count); in __pg_init_all_paths()
318 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); in __pg_init_all_paths()
321 if (!m->current_pg) in __pg_init_all_paths()
324 if (test_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags)) in __pg_init_all_paths()
325 pg_init_delay = msecs_to_jiffies(m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT ? in __pg_init_all_paths()
326 m->pg_init_delay_msecs : DM_PG_INIT_DELAY_MSECS); in __pg_init_all_paths()
327 list_for_each_entry(pgpath, &m->current_pg->pgpaths, list) { in __pg_init_all_paths()
329 if (!pgpath->is_active) in __pg_init_all_paths()
331 if (queue_delayed_work(kmpath_handlerd, &pgpath->activate_path, in __pg_init_all_paths()
333 atomic_inc(&m->pg_init_in_progress); in __pg_init_all_paths()
335 return atomic_read(&m->pg_init_in_progress); in __pg_init_all_paths()
338 static int pg_init_all_paths(struct multipath *m) in pg_init_all_paths() argument
343 spin_lock_irqsave(&m->lock, flags); in pg_init_all_paths()
344 ret = __pg_init_all_paths(m); in pg_init_all_paths()
345 spin_unlock_irqrestore(&m->lock, flags); in pg_init_all_paths()
350 static void __switch_pg(struct multipath *m, struct priority_group *pg) in __switch_pg() argument
352 lockdep_assert_held(&m->lock); in __switch_pg()
354 m->current_pg = pg; in __switch_pg()
357 if (m->hw_handler_name) { in __switch_pg()
358 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); in __switch_pg()
359 set_bit(MPATHF_QUEUE_IO, &m->flags); in __switch_pg()
361 clear_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); in __switch_pg()
362 clear_bit(MPATHF_QUEUE_IO, &m->flags); in __switch_pg()
365 atomic_set(&m->pg_init_count, 0); in __switch_pg()
368 static struct pgpath *choose_path_in_pg(struct multipath *m, in choose_path_in_pg() argument
376 path = pg->ps.type->select_path(&pg->ps, nr_bytes); in choose_path_in_pg()
378 return ERR_PTR(-ENXIO); in choose_path_in_pg()
382 if (unlikely(READ_ONCE(m->current_pg) != pg)) { in choose_path_in_pg()
384 spin_lock_irqsave(&m->lock, flags); in choose_path_in_pg()
385 m->current_pgpath = pgpath; in choose_path_in_pg()
386 __switch_pg(m, pg); in choose_path_in_pg()
387 spin_unlock_irqrestore(&m->lock, flags); in choose_path_in_pg()
393 static struct pgpath *choose_pgpath(struct multipath *m, size_t nr_bytes) in choose_pgpath() argument
400 if (!atomic_read(&m->nr_valid_paths)) { in choose_pgpath()
401 spin_lock_irqsave(&m->lock, flags); in choose_pgpath()
402 clear_bit(MPATHF_QUEUE_IO, &m->flags); in choose_pgpath()
403 spin_unlock_irqrestore(&m->lock, flags); in choose_pgpath()
408 if (READ_ONCE(m->next_pg)) { in choose_pgpath()
409 spin_lock_irqsave(&m->lock, flags); in choose_pgpath()
410 pg = m->next_pg; in choose_pgpath()
412 spin_unlock_irqrestore(&m->lock, flags); in choose_pgpath()
415 m->next_pg = NULL; in choose_pgpath()
416 spin_unlock_irqrestore(&m->lock, flags); in choose_pgpath()
417 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
424 pg = READ_ONCE(m->current_pg); in choose_pgpath()
426 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
438 list_for_each_entry(pg, &m->priority_groups, list) { in choose_pgpath()
439 if (pg->bypassed == !!bypassed) in choose_pgpath()
441 pgpath = choose_path_in_pg(m, pg, nr_bytes); in choose_pgpath()
444 spin_lock_irqsave(&m->lock, flags); in choose_pgpath()
445 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); in choose_pgpath()
446 spin_unlock_irqrestore(&m->lock, flags); in choose_pgpath()
451 } while (bypassed--); in choose_pgpath()
454 spin_lock_irqsave(&m->lock, flags); in choose_pgpath()
455 m->current_pgpath = NULL; in choose_pgpath()
456 m->current_pg = NULL; in choose_pgpath()
457 spin_unlock_irqrestore(&m->lock, flags); in choose_pgpath()
467 #define dm_report_EIO(m) \ argument
470 dm_table_device_name((m)->ti->table), \
471 test_bit(MPATHF_QUEUE_IF_NO_PATH, &(m)->flags), \
472 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &(m)->flags), \
473 dm_noflush_suspending((m)->ti)); \
477 * Check whether bios must be queued in the device-mapper core rather
480 static bool __must_push_back(struct multipath *m) in __must_push_back() argument
482 return dm_noflush_suspending(m->ti); in __must_push_back()
485 static bool must_push_back_rq(struct multipath *m) in must_push_back_rq() argument
490 spin_lock_irqsave(&m->lock, flags); in must_push_back_rq()
491 ret = (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) || __must_push_back(m)); in must_push_back_rq()
492 spin_unlock_irqrestore(&m->lock, flags); in must_push_back_rq()
498 * Map cloned requests (request-based multipath)
504 struct multipath *m = ti->private; in multipath_clone_and_map() local
513 pgpath = READ_ONCE(m->current_pgpath); in multipath_clone_and_map()
514 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in multipath_clone_and_map()
515 pgpath = choose_pgpath(m, nr_bytes); in multipath_clone_and_map()
518 if (must_push_back_rq(m)) in multipath_clone_and_map()
520 dm_report_EIO(m); /* Failed */ in multipath_clone_and_map()
522 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) || in multipath_clone_and_map()
523 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) { in multipath_clone_and_map()
524 pg_init_all_paths(m); in multipath_clone_and_map()
528 mpio->pgpath = pgpath; in multipath_clone_and_map()
529 mpio->nr_bytes = nr_bytes; in multipath_clone_and_map()
531 bdev = pgpath->path.dev->bdev; in multipath_clone_and_map()
533 clone = blk_get_request(q, rq->cmd_flags | REQ_NOMERGE, in multipath_clone_and_map()
538 atomic_inc(&m->pg_init_in_progress); in multipath_clone_and_map()
544 * blk-mq's SCHED_RESTART can cover this requeue, so we in multipath_clone_and_map()
546 * we have to return DM_MAPIO_REQUEUE so that blk-mq can in multipath_clone_and_map()
552 clone->bio = clone->biotail = NULL; in multipath_clone_and_map()
553 clone->rq_disk = bdev->bd_disk; in multipath_clone_and_map()
554 clone->cmd_flags |= REQ_FAILFAST_TRANSPORT; in multipath_clone_and_map()
557 if (pgpath->pg->ps.type->start_io) in multipath_clone_and_map()
558 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in multipath_clone_and_map()
559 &pgpath->path, in multipath_clone_and_map()
569 * non-NULL map_context means caller is still map in multipath_release_clone()
573 struct pgpath *pgpath = mpio->pgpath; in multipath_release_clone()
575 if (pgpath && pgpath->pg->ps.type->end_io) in multipath_release_clone()
576 pgpath->pg->ps.type->end_io(&pgpath->pg->ps, in multipath_release_clone()
577 &pgpath->path, in multipath_release_clone()
578 mpio->nr_bytes, in multipath_release_clone()
579 clone->io_start_time_ns); in multipath_release_clone()
586 * Map cloned bios (bio-based multipath)
589 static void __multipath_queue_bio(struct multipath *m, struct bio *bio) in __multipath_queue_bio() argument
592 bio_list_add(&m->queued_bios, bio); in __multipath_queue_bio()
593 if (!test_bit(MPATHF_QUEUE_IO, &m->flags)) in __multipath_queue_bio()
594 queue_work(kmultipathd, &m->process_queued_bios); in __multipath_queue_bio()
597 static void multipath_queue_bio(struct multipath *m, struct bio *bio) in multipath_queue_bio() argument
601 spin_lock_irqsave(&m->lock, flags); in multipath_queue_bio()
602 __multipath_queue_bio(m, bio); in multipath_queue_bio()
603 spin_unlock_irqrestore(&m->lock, flags); in multipath_queue_bio()
606 static struct pgpath *__map_bio(struct multipath *m, struct bio *bio) in __map_bio() argument
612 pgpath = READ_ONCE(m->current_pgpath); in __map_bio()
613 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in __map_bio()
614 pgpath = choose_pgpath(m, bio->bi_iter.bi_size); in __map_bio()
617 spin_lock_irqsave(&m->lock, flags); in __map_bio()
618 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { in __map_bio()
619 __multipath_queue_bio(m, bio); in __map_bio()
620 pgpath = ERR_PTR(-EAGAIN); in __map_bio()
622 spin_unlock_irqrestore(&m->lock, flags); in __map_bio()
624 } else if (mpath_double_check_test_bit(MPATHF_QUEUE_IO, m) || in __map_bio()
625 mpath_double_check_test_bit(MPATHF_PG_INIT_REQUIRED, m)) { in __map_bio()
626 multipath_queue_bio(m, bio); in __map_bio()
627 pg_init_all_paths(m); in __map_bio()
628 return ERR_PTR(-EAGAIN); in __map_bio()
634 static int __multipath_map_bio(struct multipath *m, struct bio *bio, in __multipath_map_bio() argument
637 struct pgpath *pgpath = __map_bio(m, bio); in __multipath_map_bio()
643 if (__must_push_back(m)) in __multipath_map_bio()
645 dm_report_EIO(m); in __multipath_map_bio()
649 mpio->pgpath = pgpath; in __multipath_map_bio()
651 bio->bi_status = 0; in __multipath_map_bio()
652 bio_set_dev(bio, pgpath->path.dev->bdev); in __multipath_map_bio()
653 bio->bi_opf |= REQ_FAILFAST_TRANSPORT; in __multipath_map_bio()
655 if (pgpath->pg->ps.type->start_io) in __multipath_map_bio()
656 pgpath->pg->ps.type->start_io(&pgpath->pg->ps, in __multipath_map_bio()
657 &pgpath->path, in __multipath_map_bio()
658 mpio->nr_bytes); in __multipath_map_bio()
664 struct multipath *m = ti->private; in multipath_map_bio() local
668 return __multipath_map_bio(m, bio, mpio); in multipath_map_bio()
671 static void process_queued_io_list(struct multipath *m) in process_queued_io_list() argument
673 if (m->queue_mode == DM_TYPE_REQUEST_BASED) in process_queued_io_list()
674 dm_mq_kick_requeue_list(dm_table_get_md(m->ti->table)); in process_queued_io_list()
675 else if (m->queue_mode == DM_TYPE_BIO_BASED) in process_queued_io_list()
676 queue_work(kmultipathd, &m->process_queued_bios); in process_queued_io_list()
686 struct multipath *m = in process_queued_bios() local
691 spin_lock_irqsave(&m->lock, flags); in process_queued_bios()
693 if (bio_list_empty(&m->queued_bios)) { in process_queued_bios()
694 spin_unlock_irqrestore(&m->lock, flags); in process_queued_bios()
698 bio_list_merge(&bios, &m->queued_bios); in process_queued_bios()
699 bio_list_init(&m->queued_bios); in process_queued_bios()
701 spin_unlock_irqrestore(&m->lock, flags); in process_queued_bios()
707 r = __multipath_map_bio(m, bio, mpio); in process_queued_bios()
710 bio->bi_status = BLK_STS_IOERR; in process_queued_bios()
714 bio->bi_status = BLK_STS_DM_REQUEUE; in process_queued_bios()
732 static int queue_if_no_path(struct multipath *m, bool queue_if_no_path, in queue_if_no_path() argument
737 const char *dm_dev_name = dm_table_device_name(m->ti->table); in queue_if_no_path()
742 spin_lock_irqsave(&m->lock, flags); in queue_if_no_path()
744 queue_if_no_path_bit = test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); in queue_if_no_path()
745 saved_queue_if_no_path_bit = test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); in queue_if_no_path()
752 assign_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path_bit); in queue_if_no_path()
755 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); in queue_if_no_path()
757 assign_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags, queue_if_no_path); in queue_if_no_path()
761 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), in queue_if_no_path()
762 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags), in queue_if_no_path()
763 dm_noflush_suspending(m->ti)); in queue_if_no_path()
765 spin_unlock_irqrestore(&m->lock, flags); in queue_if_no_path()
768 dm_table_run_md_queue_async(m->ti->table); in queue_if_no_path()
769 process_queued_io_list(m); in queue_if_no_path()
781 struct multipath *m = from_timer(m, t, nopath_timer); in queue_if_no_path_timeout_work() local
784 dm_table_device_name(m->ti->table)); in queue_if_no_path_timeout_work()
785 queue_if_no_path(m, false, false, __func__); in queue_if_no_path_timeout_work()
790 * Called with m->lock held.
792 static void enable_nopath_timeout(struct multipath *m) in enable_nopath_timeout() argument
797 lockdep_assert_held(&m->lock); in enable_nopath_timeout()
800 atomic_read(&m->nr_valid_paths) == 0 && in enable_nopath_timeout()
801 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { in enable_nopath_timeout()
802 mod_timer(&m->nopath_timer, in enable_nopath_timeout()
807 static void disable_nopath_timeout(struct multipath *m) in disable_nopath_timeout() argument
809 del_timer_sync(&m->nopath_timer); in disable_nopath_timeout()
818 struct multipath *m = in trigger_event() local
821 dm_table_event(m->ti->table); in trigger_event()
824 /*-----------------------------------------------------------------
831 * <#paths> <#per-path selector args>
833 *---------------------------------------------------------------*/
847 ti->error = "unknown path selector type"; in parse_path_selector()
848 return -EINVAL; in parse_path_selector()
851 r = dm_read_arg_group(_args, as, &ps_argc, &ti->error); in parse_path_selector()
854 return -EINVAL; in parse_path_selector()
857 r = pst->create(&pg->ps, ps_argc, as->argv); in parse_path_selector()
860 ti->error = "path selector constructor failed"; in parse_path_selector()
864 pg->ps.type = pst; in parse_path_selector()
870 static int setup_scsi_dh(struct block_device *bdev, struct multipath *m, in setup_scsi_dh() argument
876 if (mpath_double_check_test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, m)) { in setup_scsi_dh()
883 if (m->hw_handler_name && strcmp(*attached_handler_name, m->hw_handler_name)) { in setup_scsi_dh()
884 kfree(m->hw_handler_params); in setup_scsi_dh()
885 m->hw_handler_params = NULL; in setup_scsi_dh()
894 kfree(m->hw_handler_name); in setup_scsi_dh()
895 m->hw_handler_name = *attached_handler_name; in setup_scsi_dh()
900 if (m->hw_handler_name) { in setup_scsi_dh()
901 r = scsi_dh_attach(q, m->hw_handler_name); in setup_scsi_dh()
902 if (r == -EBUSY) { in setup_scsi_dh()
905 printk(KERN_INFO "dm-mpath: retaining handler on device %s\n", in setup_scsi_dh()
914 if (m->hw_handler_params) { in setup_scsi_dh()
915 r = scsi_dh_set_params(q, m->hw_handler_params); in setup_scsi_dh()
931 struct multipath *m = ti->private; in parse_path() local
936 if (as->argc < 1) { in parse_path()
937 ti->error = "no device given"; in parse_path()
938 return ERR_PTR(-EINVAL); in parse_path()
943 return ERR_PTR(-ENOMEM); in parse_path()
945 r = dm_get_device(ti, dm_shift_arg(as), dm_table_get_mode(ti->table), in parse_path()
946 &p->path.dev); in parse_path()
948 ti->error = "error getting device"; in parse_path()
952 q = bdev_get_queue(p->path.dev->bdev); in parse_path()
954 if (attached_handler_name || m->hw_handler_name) { in parse_path()
955 INIT_DELAYED_WORK(&p->activate_path, activate_path_work); in parse_path()
956 r = setup_scsi_dh(p->path.dev->bdev, m, &attached_handler_name, &ti->error); in parse_path()
959 dm_put_device(ti, p->path.dev); in parse_path()
964 r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error); in parse_path()
966 dm_put_device(ti, p->path.dev); in parse_path()
977 struct multipath *m) in parse_priority_group() argument
987 struct dm_target *ti = m->ti; in parse_priority_group()
989 if (as->argc < 2) { in parse_priority_group()
990 as->argc = 0; in parse_priority_group()
991 ti->error = "not enough priority group arguments"; in parse_priority_group()
992 return ERR_PTR(-EINVAL); in parse_priority_group()
997 ti->error = "couldn't allocate priority group"; in parse_priority_group()
998 return ERR_PTR(-ENOMEM); in parse_priority_group()
1000 pg->m = m; in parse_priority_group()
1009 r = dm_read_arg(_args, as, &pg->nr_pgpaths, &ti->error); in parse_priority_group()
1013 r = dm_read_arg(_args + 1, as, &nr_selector_args, &ti->error); in parse_priority_group()
1018 for (i = 0; i < pg->nr_pgpaths; i++) { in parse_priority_group()
1022 if (as->argc < nr_args) { in parse_priority_group()
1023 ti->error = "not enough path parameters"; in parse_priority_group()
1024 r = -EINVAL; in parse_priority_group()
1029 path_args.argv = as->argv; in parse_priority_group()
1031 pgpath = parse_path(&path_args, &pg->ps, ti); in parse_priority_group()
1037 pgpath->pg = pg; in parse_priority_group()
1038 list_add_tail(&pgpath->list, &pg->pgpaths); in parse_priority_group()
1049 static int parse_hw_handler(struct dm_arg_set *as, struct multipath *m) in parse_hw_handler() argument
1053 struct dm_target *ti = m->ti; in parse_hw_handler()
1059 if (dm_read_arg_group(_args, as, &hw_argc, &ti->error)) in parse_hw_handler()
1060 return -EINVAL; in parse_hw_handler()
1065 if (m->queue_mode == DM_TYPE_BIO_BASED) { in parse_hw_handler()
1067 DMERR("bio-based multipath doesn't allow hardware handler args"); in parse_hw_handler()
1071 m->hw_handler_name = kstrdup(dm_shift_arg(as), GFP_KERNEL); in parse_hw_handler()
1072 if (!m->hw_handler_name) in parse_hw_handler()
1073 return -EINVAL; in parse_hw_handler()
1079 for (i = 0; i <= hw_argc - 2; i++) in parse_hw_handler()
1080 len += strlen(as->argv[i]) + 1; in parse_hw_handler()
1081 p = m->hw_handler_params = kzalloc(len, GFP_KERNEL); in parse_hw_handler()
1083 ti->error = "memory allocation failed"; in parse_hw_handler()
1084 ret = -ENOMEM; in parse_hw_handler()
1087 j = sprintf(p, "%d", hw_argc - 1); in parse_hw_handler()
1088 for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1) in parse_hw_handler()
1089 j = sprintf(p, "%s", as->argv[i]); in parse_hw_handler()
1091 dm_consume_args(as, hw_argc - 1); in parse_hw_handler()
1095 kfree(m->hw_handler_name); in parse_hw_handler()
1096 m->hw_handler_name = NULL; in parse_hw_handler()
1100 static int parse_features(struct dm_arg_set *as, struct multipath *m) in parse_features() argument
1104 struct dm_target *ti = m->ti; in parse_features()
1113 r = dm_read_arg_group(_args, as, &argc, &ti->error); in parse_features()
1115 return -EINVAL; in parse_features()
1122 argc--; in parse_features()
1125 r = queue_if_no_path(m, true, false, __func__); in parse_features()
1130 set_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags); in parse_features()
1136 r = dm_read_arg(_args + 1, as, &m->pg_init_retries, &ti->error); in parse_features()
1137 argc--; in parse_features()
1143 r = dm_read_arg(_args + 2, as, &m->pg_init_delay_msecs, &ti->error); in parse_features()
1144 argc--; in parse_features()
1153 m->queue_mode = DM_TYPE_BIO_BASED; in parse_features()
1156 m->queue_mode = DM_TYPE_REQUEST_BASED; in parse_features()
1158 ti->error = "Unknown 'queue_mode' requested"; in parse_features()
1159 r = -EINVAL; in parse_features()
1161 argc--; in parse_features()
1165 ti->error = "Unrecognised multipath feature request"; in parse_features()
1166 r = -EINVAL; in parse_features()
1181 struct multipath *m; in multipath_ctr() local
1190 m = alloc_multipath(ti); in multipath_ctr()
1191 if (!m) { in multipath_ctr()
1192 ti->error = "can't allocate multipath"; in multipath_ctr()
1193 return -EINVAL; in multipath_ctr()
1196 r = parse_features(&as, m); in multipath_ctr()
1200 r = alloc_multipath_stage2(ti, m); in multipath_ctr()
1204 r = parse_hw_handler(&as, m); in multipath_ctr()
1208 r = dm_read_arg(_args, &as, &m->nr_priority_groups, &ti->error); in multipath_ctr()
1212 r = dm_read_arg(_args + 1, &as, &next_pg_num, &ti->error); in multipath_ctr()
1216 if ((!m->nr_priority_groups && next_pg_num) || in multipath_ctr()
1217 (m->nr_priority_groups && !next_pg_num)) { in multipath_ctr()
1218 ti->error = "invalid initial priority group"; in multipath_ctr()
1219 r = -EINVAL; in multipath_ctr()
1226 unsigned nr_valid_paths = atomic_read(&m->nr_valid_paths); in multipath_ctr()
1228 pg = parse_priority_group(&as, m); in multipath_ctr()
1234 nr_valid_paths += pg->nr_pgpaths; in multipath_ctr()
1235 atomic_set(&m->nr_valid_paths, nr_valid_paths); in multipath_ctr()
1237 list_add_tail(&pg->list, &m->priority_groups); in multipath_ctr()
1239 pg->pg_num = pg_count; in multipath_ctr()
1240 if (!--next_pg_num) in multipath_ctr()
1241 m->next_pg = pg; in multipath_ctr()
1244 if (pg_count != m->nr_priority_groups) { in multipath_ctr()
1245 ti->error = "priority group count mismatch"; in multipath_ctr()
1246 r = -EINVAL; in multipath_ctr()
1250 spin_lock_irqsave(&m->lock, flags); in multipath_ctr()
1251 enable_nopath_timeout(m); in multipath_ctr()
1252 spin_unlock_irqrestore(&m->lock, flags); in multipath_ctr()
1254 ti->num_flush_bios = 1; in multipath_ctr()
1255 ti->num_discard_bios = 1; in multipath_ctr()
1256 ti->num_write_same_bios = 1; in multipath_ctr()
1257 ti->num_write_zeroes_bios = 1; in multipath_ctr()
1258 if (m->queue_mode == DM_TYPE_BIO_BASED) in multipath_ctr()
1259 ti->per_io_data_size = multipath_per_bio_data_size(); in multipath_ctr()
1261 ti->per_io_data_size = sizeof(struct dm_mpath_io); in multipath_ctr()
1266 free_multipath(m); in multipath_ctr()
1270 static void multipath_wait_for_pg_init_completion(struct multipath *m) in multipath_wait_for_pg_init_completion() argument
1275 prepare_to_wait(&m->pg_init_wait, &wait, TASK_UNINTERRUPTIBLE); in multipath_wait_for_pg_init_completion()
1277 if (!atomic_read(&m->pg_init_in_progress)) in multipath_wait_for_pg_init_completion()
1282 finish_wait(&m->pg_init_wait, &wait); in multipath_wait_for_pg_init_completion()
1285 static void flush_multipath_work(struct multipath *m) in flush_multipath_work() argument
1287 if (m->hw_handler_name) { in flush_multipath_work()
1290 if (!atomic_read(&m->pg_init_in_progress)) in flush_multipath_work()
1293 spin_lock_irqsave(&m->lock, flags); in flush_multipath_work()
1294 if (atomic_read(&m->pg_init_in_progress) && in flush_multipath_work()
1295 !test_and_set_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) { in flush_multipath_work()
1296 spin_unlock_irqrestore(&m->lock, flags); in flush_multipath_work()
1299 multipath_wait_for_pg_init_completion(m); in flush_multipath_work()
1301 spin_lock_irqsave(&m->lock, flags); in flush_multipath_work()
1302 clear_bit(MPATHF_PG_INIT_DISABLED, &m->flags); in flush_multipath_work()
1304 spin_unlock_irqrestore(&m->lock, flags); in flush_multipath_work()
1307 if (m->queue_mode == DM_TYPE_BIO_BASED) in flush_multipath_work()
1308 flush_work(&m->process_queued_bios); in flush_multipath_work()
1309 flush_work(&m->trigger_event); in flush_multipath_work()
1314 struct multipath *m = ti->private; in multipath_dtr() local
1316 disable_nopath_timeout(m); in multipath_dtr()
1317 flush_multipath_work(m); in multipath_dtr()
1318 free_multipath(m); in multipath_dtr()
1327 struct multipath *m = pgpath->pg->m; in fail_path() local
1329 spin_lock_irqsave(&m->lock, flags); in fail_path()
1331 if (!pgpath->is_active) in fail_path()
1335 dm_table_device_name(m->ti->table), in fail_path()
1336 pgpath->path.dev->name); in fail_path()
1338 pgpath->pg->ps.type->fail_path(&pgpath->pg->ps, &pgpath->path); in fail_path()
1339 pgpath->is_active = false; in fail_path()
1340 pgpath->fail_count++; in fail_path()
1342 atomic_dec(&m->nr_valid_paths); in fail_path()
1344 if (pgpath == m->current_pgpath) in fail_path()
1345 m->current_pgpath = NULL; in fail_path()
1347 dm_path_uevent(DM_UEVENT_PATH_FAILED, m->ti, in fail_path()
1348 pgpath->path.dev->name, atomic_read(&m->nr_valid_paths)); in fail_path()
1350 schedule_work(&m->trigger_event); in fail_path()
1352 enable_nopath_timeout(m); in fail_path()
1355 spin_unlock_irqrestore(&m->lock, flags); in fail_path()
1361 * Reinstate a previously-failed path
1367 struct multipath *m = pgpath->pg->m; in reinstate_path() local
1370 spin_lock_irqsave(&m->lock, flags); in reinstate_path()
1372 if (pgpath->is_active) in reinstate_path()
1376 dm_table_device_name(m->ti->table), in reinstate_path()
1377 pgpath->path.dev->name); in reinstate_path()
1379 r = pgpath->pg->ps.type->reinstate_path(&pgpath->pg->ps, &pgpath->path); in reinstate_path()
1383 pgpath->is_active = true; in reinstate_path()
1385 nr_valid_paths = atomic_inc_return(&m->nr_valid_paths); in reinstate_path()
1387 m->current_pgpath = NULL; in reinstate_path()
1389 } else if (m->hw_handler_name && (m->current_pg == pgpath->pg)) { in reinstate_path()
1390 if (queue_work(kmpath_handlerd, &pgpath->activate_path.work)) in reinstate_path()
1391 atomic_inc(&m->pg_init_in_progress); in reinstate_path()
1394 dm_path_uevent(DM_UEVENT_PATH_REINSTATED, m->ti, in reinstate_path()
1395 pgpath->path.dev->name, nr_valid_paths); in reinstate_path()
1397 schedule_work(&m->trigger_event); in reinstate_path()
1400 spin_unlock_irqrestore(&m->lock, flags); in reinstate_path()
1402 dm_table_run_md_queue_async(m->ti->table); in reinstate_path()
1403 process_queued_io_list(m); in reinstate_path()
1406 if (pgpath->is_active) in reinstate_path()
1407 disable_nopath_timeout(m); in reinstate_path()
1415 static int action_dev(struct multipath *m, struct dm_dev *dev, in action_dev() argument
1418 int r = -EINVAL; in action_dev()
1422 list_for_each_entry(pg, &m->priority_groups, list) { in action_dev()
1423 list_for_each_entry(pgpath, &pg->pgpaths, list) { in action_dev()
1424 if (pgpath->path.dev == dev) in action_dev()
1435 static void bypass_pg(struct multipath *m, struct priority_group *pg, in bypass_pg() argument
1440 spin_lock_irqsave(&m->lock, flags); in bypass_pg()
1442 pg->bypassed = bypassed; in bypass_pg()
1443 m->current_pgpath = NULL; in bypass_pg()
1444 m->current_pg = NULL; in bypass_pg()
1446 spin_unlock_irqrestore(&m->lock, flags); in bypass_pg()
1448 schedule_work(&m->trigger_event); in bypass_pg()
1454 static int switch_pg_num(struct multipath *m, const char *pgstr) in switch_pg_num() argument
1462 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) { in switch_pg_num()
1464 return -EINVAL; in switch_pg_num()
1467 spin_lock_irqsave(&m->lock, flags); in switch_pg_num()
1468 list_for_each_entry(pg, &m->priority_groups, list) { in switch_pg_num()
1469 pg->bypassed = false; in switch_pg_num()
1470 if (--pgnum) in switch_pg_num()
1473 m->current_pgpath = NULL; in switch_pg_num()
1474 m->current_pg = NULL; in switch_pg_num()
1475 m->next_pg = pg; in switch_pg_num()
1477 spin_unlock_irqrestore(&m->lock, flags); in switch_pg_num()
1479 schedule_work(&m->trigger_event); in switch_pg_num()
1487 static int bypass_pg_num(struct multipath *m, const char *pgstr, bool bypassed) in bypass_pg_num() argument
1494 !m->nr_priority_groups || (pgnum > m->nr_priority_groups)) { in bypass_pg_num()
1496 return -EINVAL; in bypass_pg_num()
1499 list_for_each_entry(pg, &m->priority_groups, list) { in bypass_pg_num()
1500 if (!--pgnum) in bypass_pg_num()
1504 bypass_pg(m, pg, bypassed); in bypass_pg_num()
1511 static bool pg_init_limit_reached(struct multipath *m, struct pgpath *pgpath) in pg_init_limit_reached() argument
1516 spin_lock_irqsave(&m->lock, flags); in pg_init_limit_reached()
1518 if (atomic_read(&m->pg_init_count) <= m->pg_init_retries && in pg_init_limit_reached()
1519 !test_bit(MPATHF_PG_INIT_DISABLED, &m->flags)) in pg_init_limit_reached()
1520 set_bit(MPATHF_PG_INIT_REQUIRED, &m->flags); in pg_init_limit_reached()
1524 spin_unlock_irqrestore(&m->lock, flags); in pg_init_limit_reached()
1532 struct priority_group *pg = pgpath->pg; in pg_init_done()
1533 struct multipath *m = pg->m; in pg_init_done() local
1542 if (!m->hw_handler_name) { in pg_init_done()
1547 "Error %d.", m->hw_handler_name, errors); in pg_init_done()
1558 bypass_pg(m, pg, true); in pg_init_done()
1566 if (pg_init_limit_reached(m, pgpath)) in pg_init_done()
1580 spin_lock_irqsave(&m->lock, flags); in pg_init_done()
1582 if (pgpath == m->current_pgpath) { in pg_init_done()
1584 m->current_pgpath = NULL; in pg_init_done()
1585 m->current_pg = NULL; in pg_init_done()
1587 } else if (!test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) in pg_init_done()
1588 pg->bypassed = false; in pg_init_done()
1590 if (atomic_dec_return(&m->pg_init_in_progress) > 0) in pg_init_done()
1594 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) { in pg_init_done()
1596 set_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); in pg_init_done()
1598 clear_bit(MPATHF_PG_INIT_DELAY_RETRY, &m->flags); in pg_init_done()
1600 if (__pg_init_all_paths(m)) in pg_init_done()
1603 clear_bit(MPATHF_QUEUE_IO, &m->flags); in pg_init_done()
1605 process_queued_io_list(m); in pg_init_done()
1610 wake_up(&m->pg_init_wait); in pg_init_done()
1613 spin_unlock_irqrestore(&m->lock, flags); in pg_init_done()
1618 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in activate_or_offline_path()
1620 if (pgpath->is_active && !blk_queue_dying(q)) in activate_or_offline_path()
1638 struct pgpath *pgpath = mpio->pgpath; in multipath_end_io()
1646 * (See drivers/md/dm-rq.c:end_clone_bio() about why the clone requests in multipath_end_io()
1653 struct multipath *m = ti->private; in multipath_end_io() local
1663 if (!atomic_read(&m->nr_valid_paths) && in multipath_end_io()
1664 !must_push_back_rq(m)) { in multipath_end_io()
1666 dm_report_EIO(m); in multipath_end_io()
1673 struct path_selector *ps = &pgpath->pg->ps; in multipath_end_io()
1675 if (ps->type->end_io) in multipath_end_io()
1676 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io()
1677 clone->io_start_time_ns); in multipath_end_io()
1686 struct multipath *m = ti->private; in multipath_end_io_bio() local
1688 struct pgpath *pgpath = mpio->pgpath; in multipath_end_io_bio()
1698 if (!atomic_read(&m->nr_valid_paths)) { in multipath_end_io_bio()
1699 spin_lock_irqsave(&m->lock, flags); in multipath_end_io_bio()
1700 if (!test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { in multipath_end_io_bio()
1701 if (__must_push_back(m)) { in multipath_end_io_bio()
1704 dm_report_EIO(m); in multipath_end_io_bio()
1707 spin_unlock_irqrestore(&m->lock, flags); in multipath_end_io_bio()
1710 spin_unlock_irqrestore(&m->lock, flags); in multipath_end_io_bio()
1713 multipath_queue_bio(m, clone); in multipath_end_io_bio()
1717 struct path_selector *ps = &pgpath->pg->ps; in multipath_end_io_bio()
1719 if (ps->type->end_io) in multipath_end_io_bio()
1720 ps->type->end_io(ps, &pgpath->path, mpio->nr_bytes, in multipath_end_io_bio()
1730 * - Note that if the freeze_bdev fails while suspending, the
1731 * queue_if_no_path state is lost - userspace should reset it.
1736 struct multipath *m = ti->private; in multipath_presuspend() local
1738 /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */ in multipath_presuspend()
1739 if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti)) in multipath_presuspend()
1740 queue_if_no_path(m, false, true, __func__); in multipath_presuspend()
1745 struct multipath *m = ti->private; in multipath_postsuspend() local
1747 mutex_lock(&m->work_mutex); in multipath_postsuspend()
1748 flush_multipath_work(m); in multipath_postsuspend()
1749 mutex_unlock(&m->work_mutex); in multipath_postsuspend()
1757 struct multipath *m = ti->private; in multipath_resume() local
1760 spin_lock_irqsave(&m->lock, flags); in multipath_resume()
1761 if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) { in multipath_resume()
1762 set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); in multipath_resume()
1763 clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); in multipath_resume()
1767 dm_table_device_name(m->ti->table), __func__, in multipath_resume()
1768 test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags), in multipath_resume()
1769 test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)); in multipath_resume()
1771 spin_unlock_irqrestore(&m->lock, flags); in multipath_resume()
1787 * [priority selector-name num_ps_args [ps_args]*
1795 struct multipath *m = ti->private; in multipath_status() local
1801 spin_lock_irqsave(&m->lock, flags); in multipath_status()
1805 DMEMIT("2 %u %u ", test_bit(MPATHF_QUEUE_IO, &m->flags), in multipath_status()
1806 atomic_read(&m->pg_init_count)); in multipath_status()
1808 DMEMIT("%u ", test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags) + in multipath_status()
1809 (m->pg_init_retries > 0) * 2 + in multipath_status()
1810 (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) * 2 + in multipath_status()
1811 test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags) + in multipath_status()
1812 (m->queue_mode != DM_TYPE_REQUEST_BASED) * 2); in multipath_status()
1814 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) in multipath_status()
1816 if (m->pg_init_retries) in multipath_status()
1817 DMEMIT("pg_init_retries %u ", m->pg_init_retries); in multipath_status()
1818 if (m->pg_init_delay_msecs != DM_PG_INIT_DELAY_DEFAULT) in multipath_status()
1819 DMEMIT("pg_init_delay_msecs %u ", m->pg_init_delay_msecs); in multipath_status()
1820 if (test_bit(MPATHF_RETAIN_ATTACHED_HW_HANDLER, &m->flags)) in multipath_status()
1822 if (m->queue_mode != DM_TYPE_REQUEST_BASED) { in multipath_status()
1823 switch(m->queue_mode) { in multipath_status()
1834 if (!m->hw_handler_name || type == STATUSTYPE_INFO) in multipath_status()
1837 DMEMIT("1 %s ", m->hw_handler_name); in multipath_status()
1839 DMEMIT("%u ", m->nr_priority_groups); in multipath_status()
1841 if (m->next_pg) in multipath_status()
1842 pg_num = m->next_pg->pg_num; in multipath_status()
1843 else if (m->current_pg) in multipath_status()
1844 pg_num = m->current_pg->pg_num; in multipath_status()
1846 pg_num = (m->nr_priority_groups ? 1 : 0); in multipath_status()
1852 list_for_each_entry(pg, &m->priority_groups, list) { in multipath_status()
1853 if (pg->bypassed) in multipath_status()
1855 else if (pg == m->current_pg) in multipath_status()
1862 if (pg->ps.type->status) in multipath_status()
1863 sz += pg->ps.type->status(&pg->ps, NULL, type, in multipath_status()
1865 maxlen - sz); in multipath_status()
1869 DMEMIT("%u %u ", pg->nr_pgpaths, in multipath_status()
1870 pg->ps.type->info_args); in multipath_status()
1872 list_for_each_entry(p, &pg->pgpaths, list) { in multipath_status()
1873 DMEMIT("%s %s %u ", p->path.dev->name, in multipath_status()
1874 p->is_active ? "A" : "F", in multipath_status()
1875 p->fail_count); in multipath_status()
1876 if (pg->ps.type->status) in multipath_status()
1877 sz += pg->ps.type->status(&pg->ps, in multipath_status()
1878 &p->path, type, result + sz, in multipath_status()
1879 maxlen - sz); in multipath_status()
1885 list_for_each_entry(pg, &m->priority_groups, list) { in multipath_status()
1886 DMEMIT("%s ", pg->ps.type->name); in multipath_status()
1888 if (pg->ps.type->status) in multipath_status()
1889 sz += pg->ps.type->status(&pg->ps, NULL, type, in multipath_status()
1891 maxlen - sz); in multipath_status()
1895 DMEMIT("%u %u ", pg->nr_pgpaths, in multipath_status()
1896 pg->ps.type->table_args); in multipath_status()
1898 list_for_each_entry(p, &pg->pgpaths, list) { in multipath_status()
1899 DMEMIT("%s ", p->path.dev->name); in multipath_status()
1900 if (pg->ps.type->status) in multipath_status()
1901 sz += pg->ps.type->status(&pg->ps, in multipath_status()
1902 &p->path, type, result + sz, in multipath_status()
1903 maxlen - sz); in multipath_status()
1909 spin_unlock_irqrestore(&m->lock, flags); in multipath_status()
1915 int r = -EINVAL; in multipath_message()
1917 struct multipath *m = ti->private; in multipath_message() local
1921 mutex_lock(&m->work_mutex); in multipath_message()
1924 r = -EBUSY; in multipath_message()
1930 r = queue_if_no_path(m, true, false, __func__); in multipath_message()
1931 spin_lock_irqsave(&m->lock, flags); in multipath_message()
1932 enable_nopath_timeout(m); in multipath_message()
1933 spin_unlock_irqrestore(&m->lock, flags); in multipath_message()
1936 r = queue_if_no_path(m, false, false, __func__); in multipath_message()
1937 disable_nopath_timeout(m); in multipath_message()
1948 r = bypass_pg_num(m, argv[1], true); in multipath_message()
1951 r = bypass_pg_num(m, argv[1], false); in multipath_message()
1954 r = switch_pg_num(m, argv[1]); in multipath_message()
1965 r = dm_get_device(ti, argv[1], dm_table_get_mode(ti->table), &dev); in multipath_message()
1972 r = action_dev(m, dev, action); in multipath_message()
1977 mutex_unlock(&m->work_mutex); in multipath_message()
1984 struct multipath *m = ti->private; in multipath_prepare_ioctl() local
1989 pgpath = READ_ONCE(m->current_pgpath); in multipath_prepare_ioctl()
1990 if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) in multipath_prepare_ioctl()
1991 pgpath = choose_pgpath(m, 0); in multipath_prepare_ioctl()
1994 if (!mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) { in multipath_prepare_ioctl()
1995 *bdev = pgpath->path.dev->bdev; in multipath_prepare_ioctl()
1999 r = -ENOTCONN; in multipath_prepare_ioctl()
2003 r = -EIO; in multipath_prepare_ioctl()
2004 spin_lock_irqsave(&m->lock, flags); in multipath_prepare_ioctl()
2005 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) in multipath_prepare_ioctl()
2006 r = -ENOTCONN; in multipath_prepare_ioctl()
2007 spin_unlock_irqrestore(&m->lock, flags); in multipath_prepare_ioctl()
2010 if (r == -ENOTCONN) { in multipath_prepare_ioctl()
2011 if (!READ_ONCE(m->current_pg)) { in multipath_prepare_ioctl()
2013 (void) choose_pgpath(m, 0); in multipath_prepare_ioctl()
2015 spin_lock_irqsave(&m->lock, flags); in multipath_prepare_ioctl()
2016 if (test_bit(MPATHF_PG_INIT_REQUIRED, &m->flags)) in multipath_prepare_ioctl()
2017 (void) __pg_init_all_paths(m); in multipath_prepare_ioctl()
2018 spin_unlock_irqrestore(&m->lock, flags); in multipath_prepare_ioctl()
2019 dm_table_run_md_queue_async(m->ti->table); in multipath_prepare_ioctl()
2020 process_queued_io_list(m); in multipath_prepare_ioctl()
2026 if (!r && ti->len != i_size_read((*bdev)->bd_inode) >> SECTOR_SHIFT) in multipath_prepare_ioctl()
2034 struct multipath *m = ti->private; in multipath_iterate_devices() local
2039 list_for_each_entry(pg, &m->priority_groups, list) { in multipath_iterate_devices()
2040 list_for_each_entry(p, &pg->pgpaths, list) { in multipath_iterate_devices()
2041 ret = fn(ti, p->path.dev, ti->begin, ti->len, data); in multipath_iterate_devices()
2053 struct request_queue *q = bdev_get_queue(pgpath->path.dev->bdev); in pgpath_busy()
2069 struct multipath *m = ti->private; in multipath_busy() local
2074 if (atomic_read(&m->pg_init_in_progress)) in multipath_busy()
2077 /* no paths available, for blk-mq: rely on IO mapping to delay requeue */ in multipath_busy()
2078 if (!atomic_read(&m->nr_valid_paths)) { in multipath_busy()
2080 spin_lock_irqsave(&m->lock, flags); in multipath_busy()
2081 if (test_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags)) { in multipath_busy()
2082 spin_unlock_irqrestore(&m->lock, flags); in multipath_busy()
2083 return (m->queue_mode != DM_TYPE_REQUEST_BASED); in multipath_busy()
2085 spin_unlock_irqrestore(&m->lock, flags); in multipath_busy()
2089 pg = READ_ONCE(m->current_pg); in multipath_busy()
2090 next_pg = READ_ONCE(m->next_pg); in multipath_busy()
2091 if (unlikely(!READ_ONCE(m->current_pgpath) && next_pg)) in multipath_busy()
2106 * If there is one non-busy active path at least, the path selector in multipath_busy()
2110 list_for_each_entry(pgpath, &pg->pgpaths, list) { in multipath_busy()
2111 if (pgpath->is_active) { in multipath_busy()
2132 /*-----------------------------------------------------------------
2134 *---------------------------------------------------------------*/
2165 r = -ENOMEM; in dm_multipath_init()
2179 r = -ENOMEM; in dm_multipath_init()
2185 DMERR("request-based register failed %d", r); in dm_multipath_init()
2186 r = -EINVAL; in dm_multipath_init()
2216 MODULE_AUTHOR("Sistina Software <dm-devel@redhat.com>");