Lines Matching full:pool
41 * The block size of the device holding pool data must be
191 * A pool device ties together a metadata device and a data device. It
198 * The pool runs in various modes. Ordered in degraded order for comparisons.
229 struct pool { struct
231 struct dm_target *ti; /* Only set if a pool target is bound */ argument
289 static void metadata_operation_failed(struct pool *pool, const char *op, int r); argument
291 static enum pool_mode get_pool_mode(struct pool *pool) in get_pool_mode() argument
293 return pool->pf.mode; in get_pool_mode()
296 static void notify_of_pool_mode_change(struct pool *pool) in notify_of_pool_mode_change() argument
306 enum pool_mode mode = get_pool_mode(pool); in notify_of_pool_mode_change()
309 if (!pool->pf.error_if_no_space) in notify_of_pool_mode_change()
315 dm_table_event(pool->ti->table); in notify_of_pool_mode_change()
316 DMINFO("%s: switching pool to %s%s mode", in notify_of_pool_mode_change()
317 dm_device_name(pool->pool_md), in notify_of_pool_mode_change()
322 * Target context for a pool.
326 struct pool *pool; member
345 struct pool *pool; member
366 static bool block_size_is_power_of_two(struct pool *pool) in block_size_is_power_of_two() argument
368 return pool->sectors_per_block_shift >= 0; in block_size_is_power_of_two()
371 static sector_t block_to_sectors(struct pool *pool, dm_block_t b) in block_to_sectors() argument
373 return block_size_is_power_of_two(pool) ? in block_to_sectors()
374 (b << pool->sectors_per_block_shift) : in block_to_sectors()
375 (b * pool->sectors_per_block); in block_to_sectors()
400 sector_t s = block_to_sectors(tc->pool, data_b); in issue_discard()
401 sector_t len = block_to_sectors(tc->pool, data_e - data_b); in issue_discard()
436 static void wake_worker(struct pool *pool) in wake_worker() argument
438 queue_work(pool->wq, &pool->worker); in wake_worker()
443 static int bio_detain(struct pool *pool, struct dm_cell_key *key, struct bio *bio, in bio_detain() argument
453 cell_prealloc = dm_bio_prison_alloc_cell(pool->prison, GFP_NOIO); in bio_detain()
455 r = dm_bio_detain(pool->prison, key, bio, cell_prealloc, cell_result); in bio_detain()
461 dm_bio_prison_free_cell(pool->prison, cell_prealloc); in bio_detain()
466 static void cell_release(struct pool *pool, in cell_release() argument
470 dm_cell_release(pool->prison, cell, bios); in cell_release()
471 dm_bio_prison_free_cell(pool->prison, cell); in cell_release()
474 static void cell_visit_release(struct pool *pool, in cell_visit_release() argument
479 dm_cell_visit_release(pool->prison, fn, context, cell); in cell_visit_release()
480 dm_bio_prison_free_cell(pool->prison, cell); in cell_visit_release()
483 static void cell_release_no_holder(struct pool *pool, in cell_release_no_holder() argument
487 dm_cell_release_no_holder(pool->prison, cell, bios); in cell_release_no_holder()
488 dm_bio_prison_free_cell(pool->prison, cell); in cell_release_no_holder()
491 static void cell_error_with_code(struct pool *pool, in cell_error_with_code() argument
494 dm_cell_error(pool->prison, cell, error_code); in cell_error_with_code()
495 dm_bio_prison_free_cell(pool->prison, cell); in cell_error_with_code()
498 static blk_status_t get_pool_io_error_code(struct pool *pool) in get_pool_io_error_code() argument
500 return pool->out_of_data_space ? BLK_STS_NOSPC : BLK_STS_IOERR; in get_pool_io_error_code()
503 static void cell_error(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_error() argument
505 cell_error_with_code(pool, cell, get_pool_io_error_code(pool)); in cell_error()
508 static void cell_success(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_success() argument
510 cell_error_with_code(pool, cell, 0); in cell_success()
513 static void cell_requeue(struct pool *pool, struct dm_bio_prison_cell *cell) in cell_requeue() argument
515 cell_error_with_code(pool, cell, BLK_STS_DM_REQUEUE); in cell_requeue()
539 static void __pool_table_insert(struct pool *pool) in __pool_table_insert() argument
542 list_add(&pool->list, &dm_thin_pool_table.pools); in __pool_table_insert()
545 static void __pool_table_remove(struct pool *pool) in __pool_table_remove() argument
548 list_del(&pool->list); in __pool_table_remove()
551 static struct pool *__pool_table_lookup(struct mapped_device *md) in __pool_table_lookup()
553 struct pool *pool = NULL, *tmp; in __pool_table_lookup() local
559 pool = tmp; in __pool_table_lookup()
564 return pool; in __pool_table_lookup()
567 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev) in __pool_table_lookup_metadata_dev()
569 struct pool *pool = NULL, *tmp; in __pool_table_lookup_metadata_dev() local
575 pool = tmp; in __pool_table_lookup_metadata_dev()
580 return pool; in __pool_table_lookup_metadata_dev()
626 struct pool *pool = tc->pool; in requeue_deferred_cells() local
637 cell_requeue(pool, cell); in requeue_deferred_cells()
655 static void error_retry_list_with_code(struct pool *pool, blk_status_t error) in error_retry_list_with_code() argument
660 list_for_each_entry_rcu(tc, &pool->active_thins, list) in error_retry_list_with_code()
665 static void error_retry_list(struct pool *pool) in error_retry_list() argument
667 error_retry_list_with_code(pool, get_pool_io_error_code(pool)); in error_retry_list()
672 * Much of the code depends on pool object resources (lists, workqueues, etc)
673 * but most is exclusively called from the thin target rather than the thin-pool
679 struct pool *pool = tc->pool; in get_bio_block() local
682 if (block_size_is_power_of_two(pool)) in get_bio_block()
683 block_nr >>= pool->sectors_per_block_shift; in get_bio_block()
685 (void) sector_div(block_nr, pool->sectors_per_block); in get_bio_block()
696 struct pool *pool = tc->pool; in get_bio_block_range() local
700 b += pool->sectors_per_block - 1ull; /* so we round up */ in get_bio_block_range()
702 if (block_size_is_power_of_two(pool)) { in get_bio_block_range()
703 b >>= pool->sectors_per_block_shift; in get_bio_block_range()
704 e >>= pool->sectors_per_block_shift; in get_bio_block_range()
706 (void) sector_div(b, pool->sectors_per_block); in get_bio_block_range()
707 (void) sector_div(e, pool->sectors_per_block); in get_bio_block_range()
720 struct pool *pool = tc->pool; in remap() local
724 if (block_size_is_power_of_two(pool)) in remap()
726 (block << pool->sectors_per_block_shift) | in remap()
727 (bi_sector & (pool->sectors_per_block - 1)); in remap()
729 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + in remap()
730 sector_div(bi_sector, pool->sectors_per_block); in remap()
744 static void inc_all_io_entry(struct pool *pool, struct bio *bio) in inc_all_io_entry() argument
752 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds); in inc_all_io_entry()
757 struct pool *pool = tc->pool; in issue() local
778 spin_lock_irq(&pool->lock); in issue()
779 bio_list_add(&pool->deferred_flush_bios, bio); in issue()
780 spin_unlock_irq(&pool->lock); in issue()
832 struct pool *pool = m->tc->pool; in __complete_mapping_preparation() local
835 list_add_tail(&m->list, &pool->prepared_mappings); in __complete_mapping_preparation()
836 wake_worker(pool); in __complete_mapping_preparation()
843 struct pool *pool = m->tc->pool; in complete_mapping_preparation() local
845 spin_lock_irqsave(&pool->lock, flags); in complete_mapping_preparation()
847 spin_unlock_irqrestore(&pool->lock, flags); in complete_mapping_preparation()
885 struct pool *pool = tc->pool; in cell_defer_no_holder() local
890 cell_release_no_holder(pool, cell, &tc->deferred_bio_list); in cell_defer_no_holder()
895 wake_worker(pool); in cell_defer_no_holder()
916 inc_all_io_entry(info->tc->pool, bio); in __inc_remap_and_issue_cell()
944 cell_visit_release(tc->pool, __inc_remap_and_issue_cell, in inc_remap_and_issue_cell()
956 cell_error(m->tc->pool, m->cell); in process_prepared_mapping_fail()
958 mempool_free(m, &m->tc->pool->mapping_pool); in process_prepared_mapping_fail()
963 struct pool *pool = tc->pool; in complete_overwrite_bio() local
988 spin_lock_irq(&pool->lock); in complete_overwrite_bio()
989 bio_list_add(&pool->deferred_flush_completions, bio); in complete_overwrite_bio()
990 spin_unlock_irq(&pool->lock); in complete_overwrite_bio()
996 struct pool *pool = tc->pool; in process_prepared_mapping() local
1001 cell_error(pool, m->cell); in process_prepared_mapping()
1012 metadata_operation_failed(pool, "dm_thin_insert_block", r); in process_prepared_mapping()
1013 cell_error(pool, m->cell); in process_prepared_mapping()
1027 inc_all_io_entry(tc->pool, m->cell->holder); in process_prepared_mapping()
1034 mempool_free(m, &pool->mapping_pool); in process_prepared_mapping()
1044 mempool_free(m, &tc->pool->mapping_pool); in free_discard_mapping()
1066 metadata_operation_failed(tc->pool, "dm_thin_remove_range", r); in process_prepared_discard_no_passdown()
1072 mempool_free(m, &tc->pool->mapping_pool); in process_prepared_discard_no_passdown()
1087 struct pool *pool = tc->pool; in passdown_double_checking_shared_status() local
1095 r = dm_pool_block_is_shared(pool->pmd, b, &shared); in passdown_double_checking_shared_status()
1108 r = dm_pool_block_is_shared(pool->pmd, e, &shared); in passdown_double_checking_shared_status()
1129 struct pool *pool = m->tc->pool; in queue_passdown_pt2() local
1131 spin_lock_irqsave(&pool->lock, flags); in queue_passdown_pt2()
1132 list_add_tail(&m->list, &pool->prepared_discards_pt2); in queue_passdown_pt2()
1133 spin_unlock_irqrestore(&pool->lock, flags); in queue_passdown_pt2()
1134 wake_worker(pool); in queue_passdown_pt2()
1151 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt1() local
1162 metadata_operation_failed(pool, "dm_thin_remove_range", r); in process_prepared_discard_passdown_pt1()
1165 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1173 r = dm_pool_inc_data_range(pool->pmd, m->data_block, data_end); in process_prepared_discard_passdown_pt1()
1175 metadata_operation_failed(pool, "dm_pool_inc_data_range", r); in process_prepared_discard_passdown_pt1()
1178 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt1()
1185 dm_device_name(tc->pool->pool_md)); in process_prepared_discard_passdown_pt1()
1208 struct pool *pool = tc->pool; in process_prepared_discard_passdown_pt2() local
1214 r = dm_pool_dec_data_range(pool->pmd, m->data_block, in process_prepared_discard_passdown_pt2()
1217 metadata_operation_failed(pool, "dm_pool_dec_data_range", r); in process_prepared_discard_passdown_pt2()
1223 mempool_free(m, &pool->mapping_pool); in process_prepared_discard_passdown_pt2()
1226 static void process_prepared(struct pool *pool, struct list_head *head, in process_prepared() argument
1233 spin_lock_irq(&pool->lock); in process_prepared()
1235 spin_unlock_irq(&pool->lock); in process_prepared()
1244 static int io_overlaps_block(struct pool *pool, struct bio *bio) in io_overlaps_block() argument
1247 (pool->sectors_per_block << SECTOR_SHIFT); in io_overlaps_block()
1250 static int io_overwrites_block(struct pool *pool, struct bio *bio) in io_overwrites_block() argument
1253 io_overlaps_block(pool, bio); in io_overwrites_block()
1263 static int ensure_next_mapping(struct pool *pool) in ensure_next_mapping() argument
1265 if (pool->next_mapping) in ensure_next_mapping()
1268 pool->next_mapping = mempool_alloc(&pool->mapping_pool, GFP_ATOMIC); in ensure_next_mapping()
1270 return pool->next_mapping ? 0 : -ENOMEM; in ensure_next_mapping()
1273 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool) in get_next_mapping() argument
1275 struct dm_thin_new_mapping *m = pool->next_mapping; in get_next_mapping()
1277 BUG_ON(!pool->next_mapping); in get_next_mapping()
1283 pool->next_mapping = NULL; in get_next_mapping()
1297 dm_kcopyd_zero(tc->pool->copier, 1, &to, 0, copy_complete, m); in ll_zero()
1304 struct pool *pool = tc->pool; in remap_and_issue_overwrite() local
1310 inc_all_io_entry(pool, bio); in remap_and_issue_overwrite()
1323 struct pool *pool = tc->pool; in schedule_copy() local
1324 struct dm_thin_new_mapping *m = get_next_mapping(pool); in schedule_copy()
1339 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list)) in schedule_copy()
1343 * IO to pool_dev remaps to the pool target's data_dev. in schedule_copy()
1348 if (io_overwrites_block(pool, bio)) in schedule_copy()
1354 from.sector = data_origin * pool->sectors_per_block; in schedule_copy()
1358 to.sector = data_dest * pool->sectors_per_block; in schedule_copy()
1361 dm_kcopyd_copy(pool->copier, &from, 1, &to, in schedule_copy()
1367 if (len < pool->sectors_per_block && pool->pf.zero_new_blocks) { in schedule_copy()
1370 data_dest * pool->sectors_per_block + len, in schedule_copy()
1371 (data_dest + 1) * pool->sectors_per_block); in schedule_copy()
1384 tc->pool->sectors_per_block); in schedule_internal_copy()
1391 struct pool *pool = tc->pool; in schedule_zero() local
1392 struct dm_thin_new_mapping *m = get_next_mapping(pool); in schedule_zero()
1406 if (pool->pf.zero_new_blocks) { in schedule_zero()
1407 if (io_overwrites_block(pool, bio)) in schedule_zero()
1410 ll_zero(tc, m, data_block * pool->sectors_per_block, in schedule_zero()
1411 (data_block + 1) * pool->sectors_per_block); in schedule_zero()
1420 struct pool *pool = tc->pool; in schedule_external_copy() local
1421 sector_t virt_block_begin = virt_block * pool->sectors_per_block; in schedule_external_copy()
1422 sector_t virt_block_end = (virt_block + 1) * pool->sectors_per_block; in schedule_external_copy()
1427 pool->sectors_per_block); in schedule_external_copy()
1438 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode);
1440 static void requeue_bios(struct pool *pool);
1447 static bool is_read_only(struct pool *pool) in is_read_only() argument
1449 return is_read_only_pool_mode(get_pool_mode(pool)); in is_read_only()
1452 static void check_for_metadata_space(struct pool *pool) in check_for_metadata_space() argument
1458 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free); in check_for_metadata_space()
1464 if (ooms_reason && !is_read_only(pool)) { in check_for_metadata_space()
1466 set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE); in check_for_metadata_space()
1470 static void check_for_data_space(struct pool *pool) in check_for_data_space() argument
1475 if (get_pool_mode(pool) != PM_OUT_OF_DATA_SPACE) in check_for_data_space()
1478 r = dm_pool_get_free_block_count(pool->pmd, &nr_free); in check_for_data_space()
1483 set_pool_mode(pool, PM_WRITE); in check_for_data_space()
1484 requeue_bios(pool); in check_for_data_space()
1492 static int commit(struct pool *pool) in commit() argument
1496 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) in commit()
1499 r = dm_pool_commit_metadata(pool->pmd); in commit()
1501 metadata_operation_failed(pool, "dm_pool_commit_metadata", r); in commit()
1503 check_for_metadata_space(pool); in commit()
1504 check_for_data_space(pool); in commit()
1510 static void check_low_water_mark(struct pool *pool, dm_block_t free_blocks) in check_low_water_mark() argument
1512 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) { in check_low_water_mark()
1514 dm_device_name(pool->pool_md)); in check_low_water_mark()
1515 spin_lock_irq(&pool->lock); in check_low_water_mark()
1516 pool->low_water_triggered = true; in check_low_water_mark()
1517 spin_unlock_irq(&pool->lock); in check_low_water_mark()
1518 dm_table_event(pool->ti->table); in check_low_water_mark()
1526 struct pool *pool = tc->pool; in alloc_data_block() local
1528 if (WARN_ON(get_pool_mode(pool) != PM_WRITE)) in alloc_data_block()
1531 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1533 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); in alloc_data_block()
1537 check_low_water_mark(pool, free_blocks); in alloc_data_block()
1544 r = commit(pool); in alloc_data_block()
1548 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1550 metadata_operation_failed(pool, "dm_pool_get_free_block_count", r); in alloc_data_block()
1555 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); in alloc_data_block()
1560 r = dm_pool_alloc_data_block(pool->pmd, result); in alloc_data_block()
1563 set_pool_mode(pool, PM_OUT_OF_DATA_SPACE); in alloc_data_block()
1565 metadata_operation_failed(pool, "dm_pool_alloc_data_block", r); in alloc_data_block()
1569 r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks); in alloc_data_block()
1571 metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r); in alloc_data_block()
1577 r = commit(pool); in alloc_data_block()
1599 static blk_status_t should_error_unserviceable_bio(struct pool *pool) in should_error_unserviceable_bio() argument
1601 enum pool_mode m = get_pool_mode(pool); in should_error_unserviceable_bio()
1606 DMERR_LIMIT("bio unserviceable, yet pool is in PM_WRITE mode"); in should_error_unserviceable_bio()
1610 return pool->pf.error_if_no_space ? BLK_STS_NOSPC : 0; in should_error_unserviceable_bio()
1618 DMERR_LIMIT("bio unserviceable, yet pool has an unknown mode"); in should_error_unserviceable_bio()
1623 static void handle_unserviceable_bio(struct pool *pool, struct bio *bio) in handle_unserviceable_bio() argument
1625 blk_status_t error = should_error_unserviceable_bio(pool); in handle_unserviceable_bio()
1634 static void retry_bios_on_resume(struct pool *pool, struct dm_bio_prison_cell *cell) in retry_bios_on_resume() argument
1640 error = should_error_unserviceable_bio(pool); in retry_bios_on_resume()
1642 cell_error_with_code(pool, cell, error); in retry_bios_on_resume()
1647 cell_release(pool, cell, &bios); in retry_bios_on_resume()
1656 struct pool *pool = tc->pool; in process_discard_cell_no_passdown() local
1657 struct dm_thin_new_mapping *m = get_next_mapping(pool); in process_discard_cell_no_passdown()
1669 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in process_discard_cell_no_passdown()
1670 pool->process_prepared_discard(m); in process_discard_cell_no_passdown()
1676 struct pool *pool = tc->pool; in break_up_discard_bio() local
1686 r = ensure_next_mapping(pool); in break_up_discard_bio()
1701 if (bio_detain(tc->pool, &data_key, NULL, &data_cell)) { in break_up_discard_bio()
1711 m = get_next_mapping(pool); in break_up_discard_bio()
1729 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) in break_up_discard_bio()
1730 pool->process_prepared_discard(m); in break_up_discard_bio()
1773 if (bio_detain(tc->pool, &virt_key, bio, &virt_cell)) in process_discard_bio()
1783 tc->pool->process_discard_cell(tc, virt_cell); in process_discard_bio()
1793 struct pool *pool = tc->pool; in break_sharing() local
1803 retry_bios_on_resume(pool, cell); in break_sharing()
1809 cell_error(pool, cell); in break_sharing()
1827 h->shared_read_entry = dm_deferred_entry_inc(info->tc->pool->shared_read_ds); in __remap_and_issue_shared_cell()
1828 inc_all_io_entry(info->tc->pool, bio); in __remap_and_issue_shared_cell()
1845 cell_visit_release(tc->pool, __remap_and_issue_shared_cell, in remap_and_issue_shared_cell()
1861 struct pool *pool = tc->pool; in process_shared_bio() local
1869 if (bio_detain(pool, &key, bio, &data_cell)) { in process_shared_bio()
1880 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds); in process_shared_bio()
1881 inc_all_io_entry(pool, bio); in process_shared_bio()
1894 struct pool *pool = tc->pool; in provision_block() local
1900 inc_all_io_entry(pool, bio); in provision_block()
1927 retry_bios_on_resume(pool, cell); in provision_block()
1933 cell_error(pool, cell); in provision_block()
1941 struct pool *pool = tc->pool; in process_cell() local
1947 cell_requeue(pool, cell); in process_cell()
1957 inc_all_io_entry(pool, bio); in process_cell()
1965 inc_all_io_entry(pool, bio); in process_cell()
1995 struct pool *pool = tc->pool; in process_bio() local
2005 if (bio_detain(pool, &key, bio, &cell)) in process_bio()
2023 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2027 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2038 handle_unserviceable_bio(tc->pool, bio); in __process_bio_read_only()
2043 inc_all_io_entry(tc->pool, bio); in __process_bio_read_only()
2084 cell_success(tc->pool, cell); in process_cell_success()
2089 cell_error(tc->pool, cell); in process_cell_fail()
2096 static int need_commit_due_to_time(struct pool *pool) in need_commit_due_to_time() argument
2098 return !time_in_range(jiffies, pool->last_commit_jiffies, in need_commit_due_to_time()
2099 pool->last_commit_jiffies + COMMIT_PERIOD); in need_commit_due_to_time()
2168 struct pool *pool = tc->pool; in process_thin_deferred_bios() local
2203 if (ensure_next_mapping(pool)) { in process_thin_deferred_bios()
2212 pool->process_discard(tc, bio); in process_thin_deferred_bios()
2214 pool->process_bio(tc, bio); in process_thin_deferred_bios()
2217 throttle_work_update(&pool->throttle); in process_thin_deferred_bios()
2218 dm_pool_issue_prefetches(pool->pmd); in process_thin_deferred_bios()
2241 static unsigned sort_cells(struct pool *pool, struct list_head *cells) in sort_cells() argument
2250 pool->cell_sort_array[count++] = cell; in sort_cells()
2254 sort(pool->cell_sort_array, count, sizeof(cell), cmp_cells, NULL); in sort_cells()
2261 struct pool *pool = tc->pool; in process_thin_deferred_cells() local
2276 count = sort_cells(tc->pool, &cells); in process_thin_deferred_cells()
2279 cell = pool->cell_sort_array[i]; in process_thin_deferred_cells()
2287 if (ensure_next_mapping(pool)) { in process_thin_deferred_cells()
2289 list_add(&pool->cell_sort_array[j]->user_list, &cells); in process_thin_deferred_cells()
2298 pool->process_discard_cell(tc, cell); in process_thin_deferred_cells()
2300 pool->process_cell(tc, cell); in process_thin_deferred_cells()
2313 static struct thin_c *get_first_thin(struct pool *pool) in get_first_thin() argument
2318 if (!list_empty(&pool->active_thins)) { in get_first_thin()
2319 tc = list_entry_rcu(pool->active_thins.next, struct thin_c, list); in get_first_thin()
2327 static struct thin_c *get_next_thin(struct pool *pool, struct thin_c *tc) in get_next_thin() argument
2332 list_for_each_entry_continue_rcu(tc, &pool->active_thins, list) { in get_next_thin()
2344 static void process_deferred_bios(struct pool *pool) in process_deferred_bios() argument
2350 tc = get_first_thin(pool); in process_deferred_bios()
2354 tc = get_next_thin(pool, tc); in process_deferred_bios()
2364 spin_lock_irq(&pool->lock); in process_deferred_bios()
2365 bio_list_merge(&bios, &pool->deferred_flush_bios); in process_deferred_bios()
2366 bio_list_init(&pool->deferred_flush_bios); in process_deferred_bios()
2368 bio_list_merge(&bio_completions, &pool->deferred_flush_completions); in process_deferred_bios()
2369 bio_list_init(&pool->deferred_flush_completions); in process_deferred_bios()
2370 spin_unlock_irq(&pool->lock); in process_deferred_bios()
2373 !(dm_pool_changed_this_transaction(pool->pmd) && need_commit_due_to_time(pool))) in process_deferred_bios()
2376 if (commit(pool)) { in process_deferred_bios()
2383 pool->last_commit_jiffies = jiffies; in process_deferred_bios()
2402 struct pool *pool = container_of(ws, struct pool, worker); in do_worker() local
2404 throttle_work_start(&pool->throttle); in do_worker()
2405 dm_pool_issue_prefetches(pool->pmd); in do_worker()
2406 throttle_work_update(&pool->throttle); in do_worker()
2407 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping); in do_worker()
2408 throttle_work_update(&pool->throttle); in do_worker()
2409 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard); in do_worker()
2410 throttle_work_update(&pool->throttle); in do_worker()
2411 process_prepared(pool, &pool->prepared_discards_pt2, &pool->process_prepared_discard_pt2); in do_worker()
2412 throttle_work_update(&pool->throttle); in do_worker()
2413 process_deferred_bios(pool); in do_worker()
2414 throttle_work_complete(&pool->throttle); in do_worker()
2423 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker); in do_waker() local
2424 wake_worker(pool); in do_waker()
2425 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD); in do_waker()
2430 * timeout either the pool will have been resized (and thus back in
2435 struct pool *pool = container_of(to_delayed_work(ws), struct pool, in do_no_space_timeout() local
2438 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { in do_no_space_timeout()
2439 pool->pf.error_if_no_space = true; in do_no_space_timeout()
2440 notify_of_pool_mode_change(pool); in do_no_space_timeout()
2441 error_retry_list_with_code(pool, BLK_STS_NOSPC); in do_no_space_timeout()
2462 static void pool_work_wait(struct pool_work *pw, struct pool *pool, in pool_work_wait() argument
2467 queue_work(pool->wq, &pw->worker); in pool_work_wait()
2503 pool_work_wait(&w.pw, tc->pool, fn); in noflush_work()
2513 static void set_discard_callbacks(struct pool *pool) in set_discard_callbacks() argument
2515 struct pool_c *pt = pool->ti->private; in set_discard_callbacks()
2518 pool->process_discard_cell = process_discard_cell_passdown; in set_discard_callbacks()
2519 pool->process_prepared_discard = process_prepared_discard_passdown_pt1; in set_discard_callbacks()
2520 pool->process_prepared_discard_pt2 = process_prepared_discard_passdown_pt2; in set_discard_callbacks()
2522 pool->process_discard_cell = process_discard_cell_no_passdown; in set_discard_callbacks()
2523 pool->process_prepared_discard = process_prepared_discard_no_passdown; in set_discard_callbacks()
2527 static void set_pool_mode(struct pool *pool, enum pool_mode new_mode) in set_pool_mode() argument
2529 struct pool_c *pt = pool->ti->private; in set_pool_mode()
2530 bool needs_check = dm_pool_metadata_needs_check(pool->pmd); in set_pool_mode()
2531 enum pool_mode old_mode = get_pool_mode(pool); in set_pool_mode()
2535 * Never allow the pool to transition to PM_WRITE mode if user in set_pool_mode()
2539 DMERR("%s: unable to switch pool to write mode until repaired.", in set_pool_mode()
2540 dm_device_name(pool->pool_md)); in set_pool_mode()
2549 * pool move out of the old mode. in set_pool_mode()
2556 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2557 pool->process_bio = process_bio_fail; in set_pool_mode()
2558 pool->process_discard = process_bio_fail; in set_pool_mode()
2559 pool->process_cell = process_cell_fail; in set_pool_mode()
2560 pool->process_discard_cell = process_cell_fail; in set_pool_mode()
2561 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2562 pool->process_prepared_discard = process_prepared_discard_fail; in set_pool_mode()
2564 error_retry_list(pool); in set_pool_mode()
2569 dm_pool_metadata_read_only(pool->pmd); in set_pool_mode()
2570 pool->process_bio = process_bio_read_only; in set_pool_mode()
2571 pool->process_discard = process_bio_success; in set_pool_mode()
2572 pool->process_cell = process_cell_read_only; in set_pool_mode()
2573 pool->process_discard_cell = process_cell_success; in set_pool_mode()
2574 pool->process_prepared_mapping = process_prepared_mapping_fail; in set_pool_mode()
2575 pool->process_prepared_discard = process_prepared_discard_success; in set_pool_mode()
2577 error_retry_list(pool); in set_pool_mode()
2583 * would trigger userland to extend the pool before we in set_pool_mode()
2589 pool->out_of_data_space = true; in set_pool_mode()
2590 pool->process_bio = process_bio_read_only; in set_pool_mode()
2591 pool->process_discard = process_discard_bio; in set_pool_mode()
2592 pool->process_cell = process_cell_read_only; in set_pool_mode()
2593 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2594 set_discard_callbacks(pool); in set_pool_mode()
2596 if (!pool->pf.error_if_no_space && no_space_timeout) in set_pool_mode()
2597 queue_delayed_work(pool->wq, &pool->no_space_timeout, no_space_timeout); in set_pool_mode()
2602 cancel_delayed_work_sync(&pool->no_space_timeout); in set_pool_mode()
2603 pool->out_of_data_space = false; in set_pool_mode()
2604 pool->pf.error_if_no_space = pt->requested_pf.error_if_no_space; in set_pool_mode()
2605 dm_pool_metadata_read_write(pool->pmd); in set_pool_mode()
2606 pool->process_bio = process_bio; in set_pool_mode()
2607 pool->process_discard = process_discard_bio; in set_pool_mode()
2608 pool->process_cell = process_cell; in set_pool_mode()
2609 pool->process_prepared_mapping = process_prepared_mapping; in set_pool_mode()
2610 set_discard_callbacks(pool); in set_pool_mode()
2614 pool->pf.mode = new_mode; in set_pool_mode()
2616 * The pool mode may have changed, sync it so bind_control_target() in set_pool_mode()
2622 notify_of_pool_mode_change(pool); in set_pool_mode()
2625 static void abort_transaction(struct pool *pool) in abort_transaction() argument
2627 const char *dev_name = dm_device_name(pool->pool_md); in abort_transaction()
2630 if (dm_pool_abort_metadata(pool->pmd)) { in abort_transaction()
2632 set_pool_mode(pool, PM_FAIL); in abort_transaction()
2635 if (dm_pool_metadata_set_needs_check(pool->pmd)) { in abort_transaction()
2637 set_pool_mode(pool, PM_FAIL); in abort_transaction()
2641 static void metadata_operation_failed(struct pool *pool, const char *op, int r) in metadata_operation_failed() argument
2644 dm_device_name(pool->pool_md), op, r); in metadata_operation_failed()
2646 abort_transaction(pool); in metadata_operation_failed()
2647 set_pool_mode(pool, PM_READ_ONLY); in metadata_operation_failed()
2661 struct pool *pool = tc->pool; in thin_defer_bio() local
2667 wake_worker(pool); in thin_defer_bio()
2672 struct pool *pool = tc->pool; in thin_defer_bio_with_throttle() local
2674 throttle_lock(&pool->throttle); in thin_defer_bio_with_throttle()
2676 throttle_unlock(&pool->throttle); in thin_defer_bio_with_throttle()
2681 struct pool *pool = tc->pool; in thin_defer_cell() local
2683 throttle_lock(&pool->throttle); in thin_defer_cell()
2687 throttle_unlock(&pool->throttle); in thin_defer_cell()
2689 wake_worker(pool); in thin_defer_cell()
2724 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_bio_map()
2739 if (bio_detain(tc->pool, &key, bio, &virt_cell)) in thin_bio_map()
2769 if (bio_detain(tc->pool, &key, bio, &data_cell)) { in thin_bio_map()
2774 inc_all_io_entry(tc->pool, bio); in thin_bio_map()
2790 * pool is switched to fail-io mode. in thin_bio_map()
2798 static void requeue_bios(struct pool *pool) in requeue_bios() argument
2803 list_for_each_entry_rcu(tc, &pool->active_thins, list) { in requeue_bios()
2813 * Binding of control targets to a pool object
2833 struct pool *pool = pt->pool; in disable_passdown_if_not_supported() local
2845 else if (data_limits->max_discard_sectors < pool->sectors_per_block) in disable_passdown_if_not_supported()
2854 static int bind_control_target(struct pool *pool, struct dm_target *ti) in bind_control_target() argument
2859 * We want to make sure that a pool in PM_FAIL mode is never upgraded. in bind_control_target()
2861 enum pool_mode old_mode = get_pool_mode(pool); in bind_control_target()
2865 * Don't change the pool's mode until set_pool_mode() below. in bind_control_target()
2866 * Otherwise the pool's process_* function pointers may in bind_control_target()
2867 * not match the desired pool mode. in bind_control_target()
2871 pool->ti = ti; in bind_control_target()
2872 pool->pf = pt->adjusted_pf; in bind_control_target()
2873 pool->low_water_blocks = pt->low_water_blocks; in bind_control_target()
2875 set_pool_mode(pool, new_mode); in bind_control_target()
2880 static void unbind_control_target(struct pool *pool, struct dm_target *ti) in unbind_control_target() argument
2882 if (pool->ti == ti) in unbind_control_target()
2883 pool->ti = NULL; in unbind_control_target()
2887 * Pool creation
2889 /* Initialize pool features. */
2899 static void __pool_destroy(struct pool *pool) in __pool_destroy() argument
2901 __pool_table_remove(pool); in __pool_destroy()
2903 vfree(pool->cell_sort_array); in __pool_destroy()
2904 if (dm_pool_metadata_close(pool->pmd) < 0) in __pool_destroy()
2907 dm_bio_prison_destroy(pool->prison); in __pool_destroy()
2908 dm_kcopyd_client_destroy(pool->copier); in __pool_destroy()
2910 if (pool->wq) in __pool_destroy()
2911 destroy_workqueue(pool->wq); in __pool_destroy()
2913 if (pool->next_mapping) in __pool_destroy()
2914 mempool_free(pool->next_mapping, &pool->mapping_pool); in __pool_destroy()
2915 mempool_exit(&pool->mapping_pool); in __pool_destroy()
2916 bio_uninit(&pool->flush_bio); in __pool_destroy()
2917 dm_deferred_set_destroy(pool->shared_read_ds); in __pool_destroy()
2918 dm_deferred_set_destroy(pool->all_io_ds); in __pool_destroy()
2919 kfree(pool); in __pool_destroy()
2924 static struct pool *pool_create(struct mapped_device *pool_md, in pool_create()
2932 struct pool *pool; in pool_create() local
2939 return (struct pool *)pmd; in pool_create()
2942 pool = kzalloc(sizeof(*pool), GFP_KERNEL); in pool_create()
2943 if (!pool) { in pool_create()
2944 *error = "Error allocating memory for pool"; in pool_create()
2949 pool->pmd = pmd; in pool_create()
2950 pool->sectors_per_block = block_size; in pool_create()
2952 pool->sectors_per_block_shift = -1; in pool_create()
2954 pool->sectors_per_block_shift = __ffs(block_size); in pool_create()
2955 pool->low_water_blocks = 0; in pool_create()
2956 pool_features_init(&pool->pf); in pool_create()
2957 pool->prison = dm_bio_prison_create(); in pool_create()
2958 if (!pool->prison) { in pool_create()
2959 *error = "Error creating pool's bio prison"; in pool_create()
2964 pool->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle); in pool_create()
2965 if (IS_ERR(pool->copier)) { in pool_create()
2966 r = PTR_ERR(pool->copier); in pool_create()
2967 *error = "Error creating pool's kcopyd client"; in pool_create()
2976 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM); in pool_create()
2977 if (!pool->wq) { in pool_create()
2978 *error = "Error creating pool's workqueue"; in pool_create()
2983 throttle_init(&pool->throttle); in pool_create()
2984 INIT_WORK(&pool->worker, do_worker); in pool_create()
2985 INIT_DELAYED_WORK(&pool->waker, do_waker); in pool_create()
2986 INIT_DELAYED_WORK(&pool->no_space_timeout, do_no_space_timeout); in pool_create()
2987 spin_lock_init(&pool->lock); in pool_create()
2988 bio_list_init(&pool->deferred_flush_bios); in pool_create()
2989 bio_list_init(&pool->deferred_flush_completions); in pool_create()
2990 INIT_LIST_HEAD(&pool->prepared_mappings); in pool_create()
2991 INIT_LIST_HEAD(&pool->prepared_discards); in pool_create()
2992 INIT_LIST_HEAD(&pool->prepared_discards_pt2); in pool_create()
2993 INIT_LIST_HEAD(&pool->active_thins); in pool_create()
2994 pool->low_water_triggered = false; in pool_create()
2995 pool->suspended = true; in pool_create()
2996 pool->out_of_data_space = false; in pool_create()
2997 bio_init(&pool->flush_bio, NULL, 0); in pool_create()
2999 pool->shared_read_ds = dm_deferred_set_create(); in pool_create()
3000 if (!pool->shared_read_ds) { in pool_create()
3001 *error = "Error creating pool's shared read deferred set"; in pool_create()
3006 pool->all_io_ds = dm_deferred_set_create(); in pool_create()
3007 if (!pool->all_io_ds) { in pool_create()
3008 *error = "Error creating pool's all io deferred set"; in pool_create()
3013 pool->next_mapping = NULL; in pool_create()
3014 r = mempool_init_slab_pool(&pool->mapping_pool, MAPPING_POOL_SIZE, in pool_create()
3017 *error = "Error creating pool's mapping mempool"; in pool_create()
3022 pool->cell_sort_array = in pool_create()
3024 sizeof(*pool->cell_sort_array))); in pool_create()
3025 if (!pool->cell_sort_array) { in pool_create()
3031 pool->ref_count = 1; in pool_create()
3032 pool->last_commit_jiffies = jiffies; in pool_create()
3033 pool->pool_md = pool_md; in pool_create()
3034 pool->md_dev = metadata_dev; in pool_create()
3035 pool->data_dev = data_dev; in pool_create()
3036 __pool_table_insert(pool); in pool_create()
3038 return pool; in pool_create()
3041 mempool_exit(&pool->mapping_pool); in pool_create()
3043 dm_deferred_set_destroy(pool->all_io_ds); in pool_create()
3045 dm_deferred_set_destroy(pool->shared_read_ds); in pool_create()
3047 destroy_workqueue(pool->wq); in pool_create()
3049 dm_kcopyd_client_destroy(pool->copier); in pool_create()
3051 dm_bio_prison_destroy(pool->prison); in pool_create()
3053 kfree(pool); in pool_create()
3061 static void __pool_inc(struct pool *pool) in __pool_inc() argument
3064 pool->ref_count++; in __pool_inc()
3067 static void __pool_dec(struct pool *pool) in __pool_dec() argument
3070 BUG_ON(!pool->ref_count); in __pool_dec()
3071 if (!--pool->ref_count) in __pool_dec()
3072 __pool_destroy(pool); in __pool_dec()
3075 static struct pool *__pool_find(struct mapped_device *pool_md, in __pool_find()
3081 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev); in __pool_find() local
3083 if (pool) { in __pool_find()
3084 if (pool->pool_md != pool_md) { in __pool_find()
3085 *error = "metadata device already in use by a pool"; in __pool_find()
3088 if (pool->data_dev != data_dev) { in __pool_find()
3089 *error = "data device already in use by a pool"; in __pool_find()
3092 __pool_inc(pool); in __pool_find()
3095 pool = __pool_table_lookup(pool_md); in __pool_find()
3096 if (pool) { in __pool_find()
3097 if (pool->md_dev != metadata_dev || pool->data_dev != data_dev) { in __pool_find()
3098 *error = "different pool cannot replace a pool"; in __pool_find()
3101 __pool_inc(pool); in __pool_find()
3104 pool = pool_create(pool_md, metadata_dev, data_dev, block_size, read_only, error); in __pool_find()
3109 return pool; in __pool_find()
3113 * Pool target methods
3121 unbind_control_target(pt->pool, ti); in pool_dtr()
3122 __pool_dec(pt->pool); in pool_dtr()
3138 {0, 4, "Invalid number of pool feature arguments"}, in parse_pool_features()
3171 ti->error = "Unrecognised pool feature requested"; in parse_pool_features()
3182 struct pool *pool = context; in metadata_low_callback() local
3185 dm_device_name(pool->pool_md)); in metadata_low_callback()
3187 dm_table_event(pool->ti->table); in metadata_low_callback()
3203 struct pool *pool = context; in metadata_pre_commit_callback() local
3204 struct bio *flush_bio = &pool->flush_bio; in metadata_pre_commit_callback()
3207 bio_set_dev(flush_bio, pool->data_dev); in metadata_pre_commit_callback()
3265 * thin-pool <metadata dev> <data dev>
3274 * read_only: Don't allow any changes to be made to the pool metadata.
3281 struct pool *pool; in pool_ctr() local
3312 * Set default pool features. in pool_ctr()
3356 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev, data_dev->bdev, in pool_ctr()
3358 if (IS_ERR(pool)) { in pool_ctr()
3359 r = PTR_ERR(pool); in pool_ctr()
3366 * initial load. This would require a pool reload to trigger thin in pool_ctr()
3369 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) { in pool_ctr()
3375 pt->pool = pool; in pool_ctr()
3384 * Only need to enable discards if the pool should pass in pool_ctr()
3393 * stacking of discard limits (this keeps the pool and in pool_ctr()
3400 r = dm_pool_register_metadata_threshold(pt->pool->pmd, in pool_ctr()
3403 pool); in pool_ctr()
3407 dm_pool_register_pre_commit_callback(pool->pmd, in pool_ctr()
3408 metadata_pre_commit_callback, pool); in pool_ctr()
3415 __pool_dec(pool); in pool_ctr()
3432 struct pool *pool = pt->pool; in pool_map() local
3437 spin_lock_irq(&pool->lock); in pool_map()
3440 spin_unlock_irq(&pool->lock); in pool_map()
3449 struct pool *pool = pt->pool; in maybe_resize_data_dev() local
3455 (void) sector_div(data_size, pool->sectors_per_block); in maybe_resize_data_dev()
3457 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size); in maybe_resize_data_dev()
3460 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3465 DMERR("%s: pool target (%llu blocks) too small: expected %llu", in maybe_resize_data_dev()
3466 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3471 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_data_dev()
3473 dm_device_name(pool->pool_md)); in maybe_resize_data_dev()
3479 dm_device_name(pool->pool_md), in maybe_resize_data_dev()
3481 r = dm_pool_resize_data_dev(pool->pmd, data_size); in maybe_resize_data_dev()
3483 metadata_operation_failed(pool, "dm_pool_resize_data_dev", r); in maybe_resize_data_dev()
3497 struct pool *pool = pt->pool; in maybe_resize_metadata_dev() local
3502 metadata_dev_size = get_metadata_dev_size_in_blocks(pool->md_dev); in maybe_resize_metadata_dev()
3504 r = dm_pool_get_metadata_dev_size(pool->pmd, &sb_metadata_dev_size); in maybe_resize_metadata_dev()
3507 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3513 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3518 if (dm_pool_metadata_needs_check(pool->pmd)) { in maybe_resize_metadata_dev()
3520 dm_device_name(pool->pool_md)); in maybe_resize_metadata_dev()
3524 warn_if_metadata_device_too_big(pool->md_dev); in maybe_resize_metadata_dev()
3526 dm_device_name(pool->pool_md), in maybe_resize_metadata_dev()
3529 if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE) in maybe_resize_metadata_dev()
3530 set_pool_mode(pool, PM_WRITE); in maybe_resize_metadata_dev()
3532 r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size); in maybe_resize_metadata_dev()
3534 metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r); in maybe_resize_metadata_dev()
3560 struct pool *pool = pt->pool; in pool_preresume() local
3563 * Take control of the pool object. in pool_preresume()
3565 r = bind_control_target(pool, ti); in pool_preresume()
3578 (void) commit(pool); in pool_preresume()
3583 static void pool_suspend_active_thins(struct pool *pool) in pool_suspend_active_thins() argument
3588 tc = get_first_thin(pool); in pool_suspend_active_thins()
3591 tc = get_next_thin(pool, tc); in pool_suspend_active_thins()
3595 static void pool_resume_active_thins(struct pool *pool) in pool_resume_active_thins() argument
3600 tc = get_first_thin(pool); in pool_resume_active_thins()
3603 tc = get_next_thin(pool, tc); in pool_resume_active_thins()
3610 struct pool *pool = pt->pool; in pool_resume() local
3616 requeue_bios(pool); in pool_resume()
3617 pool_resume_active_thins(pool); in pool_resume()
3619 spin_lock_irq(&pool->lock); in pool_resume()
3620 pool->low_water_triggered = false; in pool_resume()
3621 pool->suspended = false; in pool_resume()
3622 spin_unlock_irq(&pool->lock); in pool_resume()
3624 do_waker(&pool->waker.work); in pool_resume()
3630 struct pool *pool = pt->pool; in pool_presuspend() local
3632 spin_lock_irq(&pool->lock); in pool_presuspend()
3633 pool->suspended = true; in pool_presuspend()
3634 spin_unlock_irq(&pool->lock); in pool_presuspend()
3636 pool_suspend_active_thins(pool); in pool_presuspend()
3642 struct pool *pool = pt->pool; in pool_presuspend_undo() local
3644 pool_resume_active_thins(pool); in pool_presuspend_undo()
3646 spin_lock_irq(&pool->lock); in pool_presuspend_undo()
3647 pool->suspended = false; in pool_presuspend_undo()
3648 spin_unlock_irq(&pool->lock); in pool_presuspend_undo()
3654 struct pool *pool = pt->pool; in pool_postsuspend() local
3656 cancel_delayed_work_sync(&pool->waker); in pool_postsuspend()
3657 cancel_delayed_work_sync(&pool->no_space_timeout); in pool_postsuspend()
3658 flush_workqueue(pool->wq); in pool_postsuspend()
3659 (void) commit(pool); in pool_postsuspend()
3685 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool) in process_create_thin_mesg() argument
3698 r = dm_pool_create_thin(pool->pmd, dev_id); in process_create_thin_mesg()
3708 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_create_snap_mesg() argument
3726 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id); in process_create_snap_mesg()
3736 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool) in process_delete_mesg() argument
3749 r = dm_pool_delete_thin_device(pool->pmd, dev_id); in process_delete_mesg()
3756 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool) in process_set_transaction_id_mesg() argument
3775 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id); in process_set_transaction_id_mesg()
3785 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_reserve_metadata_snap_mesg() argument
3793 (void) commit(pool); in process_reserve_metadata_snap_mesg()
3795 r = dm_pool_reserve_metadata_snap(pool->pmd); in process_reserve_metadata_snap_mesg()
3802 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool) in process_release_metadata_snap_mesg() argument
3810 r = dm_pool_release_metadata_snap(pool->pmd); in process_release_metadata_snap_mesg()
3831 struct pool *pool = pt->pool; in pool_message() local
3833 if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) { in pool_message()
3834 DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode", in pool_message()
3835 dm_device_name(pool->pool_md)); in pool_message()
3840 r = process_create_thin_mesg(argc, argv, pool); in pool_message()
3843 r = process_create_snap_mesg(argc, argv, pool); in pool_message()
3846 r = process_delete_mesg(argc, argv, pool); in pool_message()
3849 r = process_set_transaction_id_mesg(argc, argv, pool); in pool_message()
3852 r = process_reserve_metadata_snap_mesg(argc, argv, pool); in pool_message()
3855 r = process_release_metadata_snap_mesg(argc, argv, pool); in pool_message()
3858 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); in pool_message()
3861 (void) commit(pool); in pool_message()
3894 * <pool mode> <discard config> <no space config> <needs_check>
3911 struct pool *pool = pt->pool; in pool_status() local
3915 if (get_pool_mode(pool) == PM_FAIL) { in pool_status()
3922 (void) commit(pool); in pool_status()
3924 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id); in pool_status()
3927 dm_device_name(pool->pool_md), r); in pool_status()
3931 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata); in pool_status()
3934 dm_device_name(pool->pool_md), r); in pool_status()
3938 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata); in pool_status()
3941 dm_device_name(pool->pool_md), r); in pool_status()
3945 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data); in pool_status()
3948 dm_device_name(pool->pool_md), r); in pool_status()
3952 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data); in pool_status()
3955 dm_device_name(pool->pool_md), r); in pool_status()
3959 r = dm_pool_get_metadata_snap(pool->pmd, &held_root); in pool_status()
3962 dm_device_name(pool->pool_md), r); in pool_status()
3978 mode = get_pool_mode(pool); in pool_status()
3986 if (!pool->pf.discard_enabled) in pool_status()
3988 else if (pool->pf.discard_passdown) in pool_status()
3993 if (pool->pf.error_if_no_space) in pool_status()
3998 if (dm_pool_metadata_needs_check(pool->pmd)) in pool_status()
4011 (unsigned long)pool->sectors_per_block, in pool_status()
4033 struct pool *pool = pt->pool; in pool_io_hints() local
4037 * If max_sectors is smaller than pool->sectors_per_block adjust it in pool_io_hints()
4038 * to the highest possible power-of-2 factor of pool->sectors_per_block. in pool_io_hints()
4039 * This is especially beneficial when the pool's data device is a RAID in pool_io_hints()
4040 * device that has a full stripe width that matches pool->sectors_per_block in pool_io_hints()
4045 if (limits->max_sectors < pool->sectors_per_block) { in pool_io_hints()
4046 while (!is_factor(pool->sectors_per_block, limits->max_sectors)) { in pool_io_hints()
4055 * pool's blocksize (io_opt is a factor) do not override them. in pool_io_hints()
4057 if (io_opt_sectors < pool->sectors_per_block || in pool_io_hints()
4058 !is_factor(io_opt_sectors, pool->sectors_per_block)) { in pool_io_hints()
4059 if (is_factor(pool->sectors_per_block, limits->max_sectors)) in pool_io_hints()
4062 blk_limits_io_min(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4063 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT); in pool_io_hints()
4068 * They get transferred to the live pool in bind_control_target() in pool_io_hints()
4074 * block layer will stack them if pool's data device has support. in pool_io_hints()
4085 * The pool uses the same discard limits as the underlying data in pool_io_hints()
4091 .name = "thin-pool",
4128 spin_lock_irq(&tc->pool->lock); in thin_dtr()
4130 spin_unlock_irq(&tc->pool->lock); in thin_dtr()
4138 __pool_dec(tc->pool); in thin_dtr()
4153 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
4155 * origin_dev: a device external to the pool that should act as the origin
4157 * If the pool device has discards disabled, they get disabled for the thin
4205 ti->error = "Error opening pool device"; in thin_ctr()
4218 ti->error = "Couldn't get pool mapped device"; in thin_ctr()
4223 tc->pool = __pool_table_lookup(pool_md); in thin_ctr()
4224 if (!tc->pool) { in thin_ctr()
4225 ti->error = "Couldn't find pool object"; in thin_ctr()
4229 __pool_inc(tc->pool); in thin_ctr()
4231 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_ctr()
4232 ti->error = "Couldn't open thin device, Pool is in fail mode"; in thin_ctr()
4237 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td); in thin_ctr()
4243 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block); in thin_ctr()
4251 /* In case the pool supports discards, pass them on. */ in thin_ctr()
4252 if (tc->pool->pf.discard_enabled) { in thin_ctr()
4259 spin_lock_irq(&tc->pool->lock); in thin_ctr()
4260 if (tc->pool->suspended) { in thin_ctr()
4261 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4263 ti->error = "Unable to activate thin device while pool is suspended"; in thin_ctr()
4269 list_add_tail_rcu(&tc->list, &tc->pool->active_thins); in thin_ctr()
4270 spin_unlock_irq(&tc->pool->lock); in thin_ctr()
4286 __pool_dec(tc->pool); in thin_ctr()
4316 struct pool *pool = h->tc->pool; in thin_endio() local
4322 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4327 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4334 spin_lock_irqsave(&pool->lock, flags); in thin_endio()
4336 list_add_tail(&m->list, &pool->prepared_discards); in thin_endio()
4337 spin_unlock_irqrestore(&pool->lock, flags); in thin_endio()
4338 wake_worker(pool); in thin_endio()
4389 if (get_pool_mode(tc->pool) == PM_FAIL) { in thin_status()
4411 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block); in thin_status()
4414 tc->pool->sectors_per_block) - 1); in thin_status()
4440 struct pool *pool = tc->pool; in thin_iterate_devices() local
4444 * we follow a more convoluted path through to the pool's target. in thin_iterate_devices()
4446 if (!pool->ti) in thin_iterate_devices()
4449 blocks = pool->ti->len; in thin_iterate_devices()
4450 (void) sector_div(blocks, pool->sectors_per_block); in thin_iterate_devices()
4452 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data); in thin_iterate_devices()
4460 struct pool *pool = tc->pool; in thin_io_hints() local
4462 if (!pool->pf.discard_enabled) in thin_io_hints()
4465 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT; in thin_io_hints()