Lines Matching full:ca
342 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL; in bch2_alloc_to_text() local
362 if (ca) in bch2_alloc_to_text()
363 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); in bch2_alloc_to_text()
367 bch2_dev_put(ca); in bch2_alloc_to_text()
595 struct bch_dev *ca = NULL; in bch2_alloc_read() local
607 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
612 if (!ca) { in bch2_alloc_read()
619 for (u64 b = max_t(u64, ca->mi.first_bucket, start); in bch2_alloc_read()
620 b < min_t(u64, ca->mi.nbuckets, end); in bch2_alloc_read()
622 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; in bch2_alloc_read()
628 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
633 if (!ca) { in bch2_alloc_read()
638 if (k.k->p.offset < ca->mi.first_bucket) { in bch2_alloc_read()
639 bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket)); in bch2_alloc_read()
643 if (k.k->p.offset >= ca->mi.nbuckets) { in bch2_alloc_read()
649 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; in bch2_alloc_read()
654 bch2_dev_put(ca); in bch2_alloc_read()
698 struct bch_dev *ca, in bch2_bucket_do_index() argument
729 need_discard_or_freespace_err_on(ca->mi.freespace_initialized && in bch2_bucket_do_index()
776 static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca, in bch2_dev_data_type_accounting_mod() argument
786 .dev = ca->dev_idx, in bch2_dev_data_type_accounting_mod()
790 int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca, in bch2_alloc_key_to_dev_counters() argument
798 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
799 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?: in bch2_alloc_key_to_dev_counters()
800 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, in bch2_alloc_key_to_dev_counters()
801 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
805 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
808 bch2_bucket_sectors_fragmented(ca, *new) - in bch2_alloc_key_to_dev_counters()
809 bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
817 int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped, in bch2_alloc_key_to_dev_counters()
838 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); in bch2_trigger_alloc() local
839 if (!ca) in bch2_trigger_alloc()
885 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?: in bch2_trigger_alloc()
886 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true); in bch2_trigger_alloc()
905 alloc_lru_idx_fragmentation(*old_a, ca), in bch2_trigger_alloc()
906 alloc_lru_idx_fragmentation(*new_a, ca)); in bch2_trigger_alloc()
918 ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx, in bch2_trigger_alloc()
925 ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); in bch2_trigger_alloc()
984 u8 *gen = bucket_gen(ca, new.k->p.offset); in bch2_trigger_alloc()
1004 bch2_discard_one_bucket_fast(ca, new.k->p.offset); in bch2_trigger_alloc()
1008 should_invalidate_buckets(ca, bch2_dev_usage_read(ca))) in bch2_trigger_alloc()
1009 bch2_dev_do_invalidates(ca); in bch2_trigger_alloc()
1017 struct bucket *g = gc_bucket(ca, new.k->p.offset); in bch2_trigger_alloc()
1029 bch2_dev_put(ca); in bch2_trigger_alloc()
1085 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket) in next_bucket() argument
1087 if (*ca) { in next_bucket()
1088 if (bucket->offset < (*ca)->mi.first_bucket) in next_bucket()
1089 bucket->offset = (*ca)->mi.first_bucket; in next_bucket()
1091 if (bucket->offset < (*ca)->mi.nbuckets) in next_bucket()
1094 bch2_dev_put(*ca); in next_bucket()
1095 *ca = NULL; in next_bucket()
1101 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); in next_bucket()
1102 if (*ca) { in next_bucket()
1103 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); in next_bucket()
1104 bch2_dev_get(*ca); in next_bucket()
1108 return *ca != NULL; in next_bucket()
1113 struct bch_dev **ca, struct bkey *hole) in bch2_get_key_or_real_bucket_hole() argument
1122 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); in bch2_get_key_or_real_bucket_hole()
1127 if (!*ca || !bucket_valid(*ca, hole_start.offset)) { in bch2_get_key_or_real_bucket_hole()
1128 if (!next_bucket(c, ca, &hole_start)) in bch2_get_key_or_real_bucket_hole()
1135 if (k.k->p.offset > (*ca)->mi.nbuckets) in bch2_get_key_or_real_bucket_hole()
1136 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); in bch2_get_key_or_real_bucket_hole()
1158 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); in bch2_check_alloc_key() local
1159 if (fsck_err_on(!ca, in bch2_check_alloc_key()
1164 if (!ca) in bch2_check_alloc_key()
1167 if (!ca->mi.freespace_initialized) in bch2_check_alloc_key()
1235 bch2_dev_put(ca); in bch2_check_alloc_key()
1242 struct bch_dev *ca, in bch2_check_alloc_hole_freespace() argument
1251 if (!ca->mi.freespace_initialized) in bch2_check_alloc_hole_freespace()
1503 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); in bch2_check_bucket_gens_key() local
1504 if (!ca) { in bch2_check_bucket_gens_key()
1512 if (fsck_err_on(end <= ca->mi.first_bucket || in bch2_check_bucket_gens_key()
1513 start >= ca->mi.nbuckets, in bch2_check_bucket_gens_key()
1521 for (b = start; b < ca->mi.first_bucket; b++) in bch2_check_bucket_gens_key()
1529 for (b = ca->mi.nbuckets; b < end; b++) in bch2_check_bucket_gens_key()
1549 bch2_dev_put(ca); in bch2_check_bucket_gens_key()
1558 struct bch_dev *ca = NULL; in bch2_check_alloc_info() local
1577 k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole); in bch2_check_alloc_info()
1598 ret = bch2_check_alloc_hole_freespace(trans, ca, in bch2_check_alloc_info()
1626 bch2_dev_put(ca); in bch2_check_alloc_info()
1627 ca = NULL; in bch2_check_alloc_info()
1698 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); in bch2_check_alloc_to_lru_ref() local
1699 if (!ca) in bch2_check_alloc_to_lru_ref()
1704 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); in bch2_check_alloc_to_lru_ref()
1744 bch2_dev_put(ca); in bch2_check_alloc_to_lru_ref()
1768 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress) in discard_in_flight_add() argument
1772 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1773 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_add()
1779 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { in discard_in_flight_add()
1784 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1788 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket) in discard_in_flight_remove() argument
1790 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1791 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_remove()
1794 darray_remove_item(&ca->discard_buckets_in_flight, i); in discard_in_flight_remove()
1799 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1815 static inline bool discard_opt_enabled(struct bch_fs *c, struct bch_dev *ca) in discard_opt_enabled() argument
1819 : ca->mi.discard; in discard_opt_enabled()
1823 struct bch_dev *ca, in bch2_discard_one_bucket() argument
1875 if (discard_in_flight_add(ca, iter.pos.offset, true)) in bch2_discard_one_bucket()
1885 if (discard_opt_enabled(c, ca) && !c->opts.nochanges) { in bch2_discard_one_bucket()
1891 blkdev_issue_discard(ca->disk_sb.bdev, in bch2_discard_one_bucket()
1892 k.k->p.offset * ca->mi.bucket_size, in bch2_discard_one_bucket()
1893 ca->mi.bucket_size, in bch2_discard_one_bucket()
1921 discard_in_flight_remove(ca, iter.pos.offset); in bch2_discard_one_bucket()
1931 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work); in bch2_do_discards_work() local
1932 struct bch_fs *c = ca->fs; in bch2_do_discards_work()
1945 POS(ca->dev_idx, 0), in bch2_do_discards_work()
1946 POS(ca->dev_idx, U64_MAX), 0, k, in bch2_do_discards_work()
1947 bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false))); in bch2_do_discards_work()
1949 if (s.need_journal_commit > dev_buckets_available(ca, BCH_WATERMARK_normal)) in bch2_do_discards_work()
1955 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_do_discards_work()
1959 void bch2_dev_do_discards(struct bch_dev *ca) in bch2_dev_do_discards() argument
1961 struct bch_fs *c = ca->fs; in bch2_dev_do_discards()
1966 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_discards()
1969 if (queue_work(c->write_ref_wq, &ca->discard_work)) in bch2_dev_do_discards()
1972 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_dev_do_discards()
1979 for_each_member_device(c, ca) in bch2_do_discards()
1980 bch2_dev_do_discards(ca); in bch2_do_discards()
1984 struct bch_dev *ca, in bch2_do_discards_fast_one() argument
1991 BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); in bch2_do_discards_fast_one()
1999 ca->dev_idx, bucket)) in bch2_do_discards_fast_one()
2002 ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true); in bch2_do_discards_fast_one()
2011 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); in bch2_do_discards_fast_work() local
2012 struct bch_fs *c = ca->fs; in bch2_do_discards_fast_work()
2022 mutex_lock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2023 darray_for_each(ca->discard_buckets_in_flight, i) { in bch2_do_discards_fast_work()
2032 mutex_unlock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2038 bch2_do_discards_fast_one(trans, ca, bucket, &discard_pos_done, &s)); in bch2_do_discards_fast_work()
2041 discard_in_flight_remove(ca, bucket); in bch2_do_discards_fast_work()
2050 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_do_discards_fast_work()
2054 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) in bch2_discard_one_bucket_fast() argument
2056 struct bch_fs *c = ca->fs; in bch2_discard_one_bucket_fast()
2058 if (discard_in_flight_add(ca, bucket, false)) in bch2_discard_one_bucket_fast()
2064 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_discard_one_bucket_fast()
2067 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) in bch2_discard_one_bucket_fast()
2070 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_discard_one_bucket_fast()
2076 struct bch_dev *ca, in invalidate_one_bp() argument
2097 bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx); in invalidate_one_bp()
2104 struct bch_dev *ca, in invalidate_one_bucket_by_bps() argument
2109 struct bpos bp_start = bucket_pos_to_bp_start(ca, bucket); in invalidate_one_bucket_by_bps()
2110 struct bpos bp_end = bucket_pos_to_bp_end(ca, bucket); in invalidate_one_bucket_by_bps()
2127 invalidate_one_bp(trans, ca, bp, last_flushed); in invalidate_one_bucket_by_bps()
2133 struct bch_dev *ca, in invalidate_one_bucket() argument
2189 ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed); in invalidate_one_bucket()
2203 struct bch_dev *ca, bool *wrapped) in next_lru_key() argument
2207 k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); in next_lru_key()
2209 bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0)); in next_lru_key()
2219 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); in bch2_do_invalidates_work() local
2220 struct bch_fs *c = ca->fs; in bch2_do_invalidates_work()
2233 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); in bch2_do_invalidates_work()
2238 lru_pos(ca->dev_idx, 0, in bch2_do_invalidates_work()
2245 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); in bch2_do_invalidates_work()
2252 ret = invalidate_one_bucket(trans, ca, &iter, k, &last_flushed, &nr_to_invalidate); in bch2_do_invalidates_work()
2264 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_do_invalidates_work()
2269 void bch2_dev_do_invalidates(struct bch_dev *ca) in bch2_dev_do_invalidates() argument
2271 struct bch_fs *c = ca->fs; in bch2_dev_do_invalidates()
2276 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_invalidates()
2279 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) in bch2_dev_do_invalidates()
2282 percpu_ref_put(&ca->io_ref[WRITE]); in bch2_dev_do_invalidates()
2289 for_each_member_device(c, ca) in bch2_do_invalidates()
2290 bch2_dev_do_invalidates(ca); in bch2_do_invalidates()
2293 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, in bch2_dev_freespace_init() argument
2300 struct bpos end = POS(ca->dev_idx, bucket_end); in bch2_dev_freespace_init()
2306 BUG_ON(bucket_end > ca->mi.nbuckets); in bch2_dev_freespace_init()
2309 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), in bch2_dev_freespace_init()
2312 * Scan the alloc btree for every bucket on @ca, and add buckets to the in bch2_dev_freespace_init()
2317 bch_info(ca, "%s: currently at %llu/%llu", in bch2_dev_freespace_init()
2318 __func__, iter.pos.offset, ca->mi.nbuckets); in bch2_dev_freespace_init()
2342 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: in bch2_dev_freespace_init()
2381 bch_err_msg(ca, ret, "initializing free space"); in bch2_dev_freespace_init()
2386 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); in bch2_dev_freespace_init()
2403 for_each_member_device(c, ca) { in bch2_fs_freespace_init()
2404 if (ca->mi.freespace_initialized) in bch2_fs_freespace_init()
2412 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); in bch2_fs_freespace_init()
2414 bch2_dev_put(ca); in bch2_fs_freespace_init()
2432 int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_remove_alloc() argument
2434 struct bpos start = POS(ca->dev_idx, 0); in bch2_dev_remove_alloc()
2435 struct bpos end = POS(ca->dev_idx, U64_MAX); in bch2_dev_remove_alloc()
2442 ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?: in bch2_dev_remove_alloc()
2455 bch2_dev_usage_remove(c, ca->dev_idx); in bch2_dev_remove_alloc()
2456 bch_err_msg(ca, ret, "removing dev alloc info"); in bch2_dev_remove_alloc()
2506 for_each_online_member(c, ca) { in bch2_recalc_capacity()
2507 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; in bch2_recalc_capacity()
2514 __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) { in bch2_recalc_capacity()
2534 dev_reserve += ca->nr_btree_reserve * 2; in bch2_recalc_capacity()
2535 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ in bch2_recalc_capacity()
2541 dev_reserve *= ca->mi.bucket_size; in bch2_recalc_capacity()
2543 capacity += bucket_to_sector(ca, ca->mi.nbuckets - in bch2_recalc_capacity()
2544 ca->mi.first_bucket); in bch2_recalc_capacity()
2549 ca->mi.bucket_size); in bch2_recalc_capacity()
2573 for_each_rw_member(c, ca) in bch2_min_rw_member_capacity()
2574 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); in bch2_min_rw_member_capacity()
2578 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_has_open_write_point() argument
2588 ob->dev == ca->dev_idx) in bch2_dev_has_open_write_point()
2597 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_remove() argument
2604 clear_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_remove()
2613 bch2_open_buckets_stop(c, ca, false); in bch2_dev_allocator_remove()
2630 !bch2_dev_has_open_write_point(c, ca)); in bch2_dev_allocator_remove()
2634 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_add() argument
2639 if (ca->mi.data_allowed & (1 << i)) in bch2_dev_allocator_add()
2640 set_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_add()
2645 void bch2_dev_allocator_background_exit(struct bch_dev *ca) in bch2_dev_allocator_background_exit() argument
2647 darray_exit(&ca->discard_buckets_in_flight); in bch2_dev_allocator_background_exit()
2650 void bch2_dev_allocator_background_init(struct bch_dev *ca) in bch2_dev_allocator_background_init() argument
2652 mutex_init(&ca->discard_buckets_in_flight_lock); in bch2_dev_allocator_background_init()
2653 INIT_WORK(&ca->discard_work, bch2_do_discards_work); in bch2_dev_allocator_background_init()
2654 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); in bch2_dev_allocator_background_init()
2655 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); in bch2_dev_allocator_background_init()