14e4d5207SChristoph Hellwig // SPDX-License-Identifier: GPL-2.0 24e4d5207SChristoph Hellwig /* 34e4d5207SChristoph Hellwig * Copyright (c) 2023-2025 Christoph Hellwig. 44e4d5207SChristoph Hellwig * Copyright (c) 2024-2025, Western Digital Corporation or its affiliates. 54e4d5207SChristoph Hellwig */ 64e4d5207SChristoph Hellwig #include "xfs.h" 74e4d5207SChristoph Hellwig #include "xfs_shared.h" 84e4d5207SChristoph Hellwig #include "xfs_format.h" 94e4d5207SChristoph Hellwig #include "xfs_log_format.h" 104e4d5207SChristoph Hellwig #include "xfs_error.h" 114e4d5207SChristoph Hellwig #include "xfs_trans_resv.h" 124e4d5207SChristoph Hellwig #include "xfs_mount.h" 134e4d5207SChristoph Hellwig #include "xfs_inode.h" 144e4d5207SChristoph Hellwig #include "xfs_iomap.h" 154e4d5207SChristoph Hellwig #include "xfs_trans.h" 164e4d5207SChristoph Hellwig #include "xfs_alloc.h" 174e4d5207SChristoph Hellwig #include "xfs_bmap.h" 184e4d5207SChristoph Hellwig #include "xfs_bmap_btree.h" 194e4d5207SChristoph Hellwig #include "xfs_trans_space.h" 204e4d5207SChristoph Hellwig #include "xfs_refcount.h" 214e4d5207SChristoph Hellwig #include "xfs_rtbitmap.h" 224e4d5207SChristoph Hellwig #include "xfs_rtrmap_btree.h" 234e4d5207SChristoph Hellwig #include "xfs_zone_alloc.h" 244e4d5207SChristoph Hellwig #include "xfs_zone_priv.h" 254e4d5207SChristoph Hellwig #include "xfs_zones.h" 264e4d5207SChristoph Hellwig #include "xfs_trace.h" 274e4d5207SChristoph Hellwig 284e4d5207SChristoph Hellwig void 294e4d5207SChristoph Hellwig xfs_open_zone_put( 304e4d5207SChristoph Hellwig struct xfs_open_zone *oz) 314e4d5207SChristoph Hellwig { 324e4d5207SChristoph Hellwig if (atomic_dec_and_test(&oz->oz_ref)) { 334e4d5207SChristoph Hellwig xfs_rtgroup_rele(oz->oz_rtg); 344e4d5207SChristoph Hellwig kfree(oz); 354e4d5207SChristoph Hellwig } 364e4d5207SChristoph Hellwig } 374e4d5207SChristoph Hellwig 38080d01c4SChristoph Hellwig static inline uint32_t 39080d01c4SChristoph Hellwig xfs_zone_bucket( 40080d01c4SChristoph Hellwig struct xfs_mount *mp, 41080d01c4SChristoph Hellwig uint32_t used_blocks) 42080d01c4SChristoph Hellwig { 43080d01c4SChristoph Hellwig return XFS_ZONE_USED_BUCKETS * used_blocks / 44080d01c4SChristoph Hellwig mp->m_groups[XG_TYPE_RTG].blocks; 45080d01c4SChristoph Hellwig } 46080d01c4SChristoph Hellwig 47080d01c4SChristoph Hellwig static inline void 48080d01c4SChristoph Hellwig xfs_zone_add_to_bucket( 49080d01c4SChristoph Hellwig struct xfs_zone_info *zi, 50080d01c4SChristoph Hellwig xfs_rgnumber_t rgno, 51080d01c4SChristoph Hellwig uint32_t to_bucket) 52080d01c4SChristoph Hellwig { 53080d01c4SChristoph Hellwig __set_bit(rgno, zi->zi_used_bucket_bitmap[to_bucket]); 54080d01c4SChristoph Hellwig zi->zi_used_bucket_entries[to_bucket]++; 55080d01c4SChristoph Hellwig } 56080d01c4SChristoph Hellwig 57080d01c4SChristoph Hellwig static inline void 58080d01c4SChristoph Hellwig xfs_zone_remove_from_bucket( 59080d01c4SChristoph Hellwig struct xfs_zone_info *zi, 60080d01c4SChristoph Hellwig xfs_rgnumber_t rgno, 61080d01c4SChristoph Hellwig uint32_t from_bucket) 62080d01c4SChristoph Hellwig { 63080d01c4SChristoph Hellwig __clear_bit(rgno, zi->zi_used_bucket_bitmap[from_bucket]); 64080d01c4SChristoph Hellwig zi->zi_used_bucket_entries[from_bucket]--; 65080d01c4SChristoph Hellwig } 66080d01c4SChristoph Hellwig 67080d01c4SChristoph Hellwig static void 68080d01c4SChristoph Hellwig xfs_zone_account_reclaimable( 69080d01c4SChristoph Hellwig struct xfs_rtgroup *rtg, 70080d01c4SChristoph Hellwig uint32_t freed) 71080d01c4SChristoph Hellwig { 72080d01c4SChristoph Hellwig struct xfs_group *xg = &rtg->rtg_group; 73080d01c4SChristoph Hellwig struct xfs_mount *mp = rtg_mount(rtg); 74080d01c4SChristoph Hellwig struct xfs_zone_info *zi = mp->m_zone_info; 75080d01c4SChristoph Hellwig uint32_t used = rtg_rmap(rtg)->i_used_blocks; 76080d01c4SChristoph Hellwig xfs_rgnumber_t rgno = rtg_rgno(rtg); 77080d01c4SChristoph Hellwig uint32_t from_bucket = xfs_zone_bucket(mp, used + freed); 78080d01c4SChristoph Hellwig uint32_t to_bucket = xfs_zone_bucket(mp, used); 79080d01c4SChristoph Hellwig bool was_full = (used + freed == rtg_blocks(rtg)); 80080d01c4SChristoph Hellwig 81080d01c4SChristoph Hellwig /* 82080d01c4SChristoph Hellwig * This can be called from log recovery, where the zone_info structure 83080d01c4SChristoph Hellwig * hasn't been allocated yet. Skip all work as xfs_mount_zones will 84080d01c4SChristoph Hellwig * add the zones to the right buckets before the file systems becomes 85080d01c4SChristoph Hellwig * active. 86080d01c4SChristoph Hellwig */ 87080d01c4SChristoph Hellwig if (!zi) 88080d01c4SChristoph Hellwig return; 89080d01c4SChristoph Hellwig 90080d01c4SChristoph Hellwig if (!used) { 91080d01c4SChristoph Hellwig /* 92080d01c4SChristoph Hellwig * The zone is now empty, remove it from the bottom bucket and 93080d01c4SChristoph Hellwig * trigger a reset. 94080d01c4SChristoph Hellwig */ 95080d01c4SChristoph Hellwig trace_xfs_zone_emptied(rtg); 96080d01c4SChristoph Hellwig 97080d01c4SChristoph Hellwig if (!was_full) 98080d01c4SChristoph Hellwig xfs_group_clear_mark(xg, XFS_RTG_RECLAIMABLE); 99080d01c4SChristoph Hellwig 100080d01c4SChristoph Hellwig spin_lock(&zi->zi_used_buckets_lock); 101080d01c4SChristoph Hellwig if (!was_full) 102080d01c4SChristoph Hellwig xfs_zone_remove_from_bucket(zi, rgno, from_bucket); 103080d01c4SChristoph Hellwig spin_unlock(&zi->zi_used_buckets_lock); 104080d01c4SChristoph Hellwig 105080d01c4SChristoph Hellwig spin_lock(&zi->zi_reset_list_lock); 106080d01c4SChristoph Hellwig xg->xg_next_reset = zi->zi_reset_list; 107080d01c4SChristoph Hellwig zi->zi_reset_list = xg; 108080d01c4SChristoph Hellwig spin_unlock(&zi->zi_reset_list_lock); 109080d01c4SChristoph Hellwig 110080d01c4SChristoph Hellwig if (zi->zi_gc_thread) 111080d01c4SChristoph Hellwig wake_up_process(zi->zi_gc_thread); 112080d01c4SChristoph Hellwig } else if (was_full) { 113080d01c4SChristoph Hellwig /* 114080d01c4SChristoph Hellwig * The zone transitioned from full, mark it up as reclaimable 115080d01c4SChristoph Hellwig * and wake up GC which might be waiting for zones to reclaim. 116080d01c4SChristoph Hellwig */ 117080d01c4SChristoph Hellwig spin_lock(&zi->zi_used_buckets_lock); 118080d01c4SChristoph Hellwig xfs_zone_add_to_bucket(zi, rgno, to_bucket); 119080d01c4SChristoph Hellwig spin_unlock(&zi->zi_used_buckets_lock); 120080d01c4SChristoph Hellwig 121080d01c4SChristoph Hellwig xfs_group_set_mark(xg, XFS_RTG_RECLAIMABLE); 122080d01c4SChristoph Hellwig if (zi->zi_gc_thread && xfs_zoned_need_gc(mp)) 123080d01c4SChristoph Hellwig wake_up_process(zi->zi_gc_thread); 124080d01c4SChristoph Hellwig } else if (to_bucket != from_bucket) { 125080d01c4SChristoph Hellwig /* 126080d01c4SChristoph Hellwig * Move the zone to a new bucket if it dropped below the 127080d01c4SChristoph Hellwig * threshold. 128080d01c4SChristoph Hellwig */ 129080d01c4SChristoph Hellwig spin_lock(&zi->zi_used_buckets_lock); 130080d01c4SChristoph Hellwig xfs_zone_add_to_bucket(zi, rgno, to_bucket); 131080d01c4SChristoph Hellwig xfs_zone_remove_from_bucket(zi, rgno, from_bucket); 132080d01c4SChristoph Hellwig spin_unlock(&zi->zi_used_buckets_lock); 133080d01c4SChristoph Hellwig } 134080d01c4SChristoph Hellwig } 135080d01c4SChristoph Hellwig 1364e4d5207SChristoph Hellwig static void 1374e4d5207SChristoph Hellwig xfs_open_zone_mark_full( 1384e4d5207SChristoph Hellwig struct xfs_open_zone *oz) 1394e4d5207SChristoph Hellwig { 1404e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg = oz->oz_rtg; 1414e4d5207SChristoph Hellwig struct xfs_mount *mp = rtg_mount(rtg); 1424e4d5207SChristoph Hellwig struct xfs_zone_info *zi = mp->m_zone_info; 143080d01c4SChristoph Hellwig uint32_t used = rtg_rmap(rtg)->i_used_blocks; 1444e4d5207SChristoph Hellwig 1454e4d5207SChristoph Hellwig trace_xfs_zone_full(rtg); 1464e4d5207SChristoph Hellwig 1474e4d5207SChristoph Hellwig WRITE_ONCE(rtg->rtg_open_zone, NULL); 1484e4d5207SChristoph Hellwig 1494e4d5207SChristoph Hellwig spin_lock(&zi->zi_open_zones_lock); 1504e4d5207SChristoph Hellwig if (oz->oz_is_gc) { 1514e4d5207SChristoph Hellwig ASSERT(current == zi->zi_gc_thread); 1524e4d5207SChristoph Hellwig zi->zi_open_gc_zone = NULL; 1534e4d5207SChristoph Hellwig } else { 1544e4d5207SChristoph Hellwig zi->zi_nr_open_zones--; 1554e4d5207SChristoph Hellwig list_del_init(&oz->oz_entry); 1564e4d5207SChristoph Hellwig } 1574e4d5207SChristoph Hellwig spin_unlock(&zi->zi_open_zones_lock); 1584e4d5207SChristoph Hellwig xfs_open_zone_put(oz); 1594e4d5207SChristoph Hellwig 1604e4d5207SChristoph Hellwig wake_up_all(&zi->zi_zone_wait); 161080d01c4SChristoph Hellwig if (used < rtg_blocks(rtg)) 162080d01c4SChristoph Hellwig xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used); 1634e4d5207SChristoph Hellwig } 1644e4d5207SChristoph Hellwig 1654e4d5207SChristoph Hellwig static void 1664e4d5207SChristoph Hellwig xfs_zone_record_blocks( 1674e4d5207SChristoph Hellwig struct xfs_trans *tp, 1684e4d5207SChristoph Hellwig xfs_fsblock_t fsbno, 1694e4d5207SChristoph Hellwig xfs_filblks_t len, 1704e4d5207SChristoph Hellwig struct xfs_open_zone *oz, 1714e4d5207SChristoph Hellwig bool used) 1724e4d5207SChristoph Hellwig { 1734e4d5207SChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 1744e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg = oz->oz_rtg; 1754e4d5207SChristoph Hellwig struct xfs_inode *rmapip = rtg_rmap(rtg); 1764e4d5207SChristoph Hellwig 1774e4d5207SChristoph Hellwig trace_xfs_zone_record_blocks(oz, xfs_rtb_to_rgbno(mp, fsbno), len); 1784e4d5207SChristoph Hellwig 1794e4d5207SChristoph Hellwig xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); 1804e4d5207SChristoph Hellwig xfs_rtgroup_trans_join(tp, rtg, XFS_RTGLOCK_RMAP); 1814e4d5207SChristoph Hellwig if (used) { 1824e4d5207SChristoph Hellwig rmapip->i_used_blocks += len; 1834e4d5207SChristoph Hellwig ASSERT(rmapip->i_used_blocks <= rtg_blocks(rtg)); 1844e4d5207SChristoph Hellwig } else { 1854e4d5207SChristoph Hellwig xfs_add_frextents(mp, len); 1864e4d5207SChristoph Hellwig } 1874e4d5207SChristoph Hellwig oz->oz_written += len; 1884e4d5207SChristoph Hellwig if (oz->oz_written == rtg_blocks(rtg)) 1894e4d5207SChristoph Hellwig xfs_open_zone_mark_full(oz); 1904e4d5207SChristoph Hellwig xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE); 1914e4d5207SChristoph Hellwig } 1924e4d5207SChristoph Hellwig 1934e4d5207SChristoph Hellwig static int 1944e4d5207SChristoph Hellwig xfs_zoned_map_extent( 1954e4d5207SChristoph Hellwig struct xfs_trans *tp, 1964e4d5207SChristoph Hellwig struct xfs_inode *ip, 1974e4d5207SChristoph Hellwig struct xfs_bmbt_irec *new, 1984e4d5207SChristoph Hellwig struct xfs_open_zone *oz, 1994e4d5207SChristoph Hellwig xfs_fsblock_t old_startblock) 2004e4d5207SChristoph Hellwig { 2014e4d5207SChristoph Hellwig struct xfs_bmbt_irec data; 2024e4d5207SChristoph Hellwig int nmaps = 1; 2034e4d5207SChristoph Hellwig int error; 2044e4d5207SChristoph Hellwig 2054e4d5207SChristoph Hellwig /* Grab the corresponding mapping in the data fork. */ 2064e4d5207SChristoph Hellwig error = xfs_bmapi_read(ip, new->br_startoff, new->br_blockcount, &data, 2074e4d5207SChristoph Hellwig &nmaps, 0); 2084e4d5207SChristoph Hellwig if (error) 2094e4d5207SChristoph Hellwig return error; 2104e4d5207SChristoph Hellwig 2114e4d5207SChristoph Hellwig /* 2124e4d5207SChristoph Hellwig * Cap the update to the existing extent in the data fork because we can 2134e4d5207SChristoph Hellwig * only overwrite one extent at a time. 2144e4d5207SChristoph Hellwig */ 2154e4d5207SChristoph Hellwig ASSERT(new->br_blockcount >= data.br_blockcount); 2164e4d5207SChristoph Hellwig new->br_blockcount = data.br_blockcount; 2174e4d5207SChristoph Hellwig 2184e4d5207SChristoph Hellwig /* 2194e4d5207SChristoph Hellwig * If a data write raced with this GC write, keep the existing data in 2204e4d5207SChristoph Hellwig * the data fork, mark our newly written GC extent as reclaimable, then 2214e4d5207SChristoph Hellwig * move on to the next extent. 2224e4d5207SChristoph Hellwig */ 2234e4d5207SChristoph Hellwig if (old_startblock != NULLFSBLOCK && 2244e4d5207SChristoph Hellwig old_startblock != data.br_startblock) 2254e4d5207SChristoph Hellwig goto skip; 2264e4d5207SChristoph Hellwig 2274e4d5207SChristoph Hellwig trace_xfs_reflink_cow_remap_from(ip, new); 2284e4d5207SChristoph Hellwig trace_xfs_reflink_cow_remap_to(ip, &data); 2294e4d5207SChristoph Hellwig 2304e4d5207SChristoph Hellwig error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, 2314e4d5207SChristoph Hellwig XFS_IEXT_REFLINK_END_COW_CNT); 2324e4d5207SChristoph Hellwig if (error) 2334e4d5207SChristoph Hellwig return error; 2344e4d5207SChristoph Hellwig 2354e4d5207SChristoph Hellwig if (data.br_startblock != HOLESTARTBLOCK) { 2364e4d5207SChristoph Hellwig ASSERT(data.br_startblock != DELAYSTARTBLOCK); 2374e4d5207SChristoph Hellwig ASSERT(!isnullstartblock(data.br_startblock)); 2384e4d5207SChristoph Hellwig 2394e4d5207SChristoph Hellwig xfs_bmap_unmap_extent(tp, ip, XFS_DATA_FORK, &data); 2404e4d5207SChristoph Hellwig if (xfs_is_reflink_inode(ip)) { 2414e4d5207SChristoph Hellwig xfs_refcount_decrease_extent(tp, true, &data); 2424e4d5207SChristoph Hellwig } else { 2434e4d5207SChristoph Hellwig error = xfs_free_extent_later(tp, data.br_startblock, 2444e4d5207SChristoph Hellwig data.br_blockcount, NULL, 2454e4d5207SChristoph Hellwig XFS_AG_RESV_NONE, 2464e4d5207SChristoph Hellwig XFS_FREE_EXTENT_REALTIME); 2474e4d5207SChristoph Hellwig if (error) 2484e4d5207SChristoph Hellwig return error; 2494e4d5207SChristoph Hellwig } 2504e4d5207SChristoph Hellwig } 2514e4d5207SChristoph Hellwig 2524e4d5207SChristoph Hellwig xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz, 2534e4d5207SChristoph Hellwig true); 2544e4d5207SChristoph Hellwig 2554e4d5207SChristoph Hellwig /* Map the new blocks into the data fork. */ 2564e4d5207SChristoph Hellwig xfs_bmap_map_extent(tp, ip, XFS_DATA_FORK, new); 2574e4d5207SChristoph Hellwig return 0; 2584e4d5207SChristoph Hellwig 2594e4d5207SChristoph Hellwig skip: 2604e4d5207SChristoph Hellwig trace_xfs_reflink_cow_remap_skip(ip, new); 2614e4d5207SChristoph Hellwig xfs_zone_record_blocks(tp, new->br_startblock, new->br_blockcount, oz, 2624e4d5207SChristoph Hellwig false); 2634e4d5207SChristoph Hellwig return 0; 2644e4d5207SChristoph Hellwig } 2654e4d5207SChristoph Hellwig 2664e4d5207SChristoph Hellwig int 2674e4d5207SChristoph Hellwig xfs_zoned_end_io( 2684e4d5207SChristoph Hellwig struct xfs_inode *ip, 2694e4d5207SChristoph Hellwig xfs_off_t offset, 2704e4d5207SChristoph Hellwig xfs_off_t count, 2714e4d5207SChristoph Hellwig xfs_daddr_t daddr, 2724e4d5207SChristoph Hellwig struct xfs_open_zone *oz, 2734e4d5207SChristoph Hellwig xfs_fsblock_t old_startblock) 2744e4d5207SChristoph Hellwig { 2754e4d5207SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 2764e4d5207SChristoph Hellwig xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + count); 2774e4d5207SChristoph Hellwig struct xfs_bmbt_irec new = { 2784e4d5207SChristoph Hellwig .br_startoff = XFS_B_TO_FSBT(mp, offset), 2794e4d5207SChristoph Hellwig .br_startblock = xfs_daddr_to_rtb(mp, daddr), 2804e4d5207SChristoph Hellwig .br_state = XFS_EXT_NORM, 2814e4d5207SChristoph Hellwig }; 2824e4d5207SChristoph Hellwig unsigned int resblks = 2834e4d5207SChristoph Hellwig XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK); 2844e4d5207SChristoph Hellwig struct xfs_trans *tp; 2854e4d5207SChristoph Hellwig int error; 2864e4d5207SChristoph Hellwig 2874e4d5207SChristoph Hellwig if (xfs_is_shutdown(mp)) 2884e4d5207SChristoph Hellwig return -EIO; 2894e4d5207SChristoph Hellwig 2904e4d5207SChristoph Hellwig while (new.br_startoff < end_fsb) { 2914e4d5207SChristoph Hellwig new.br_blockcount = end_fsb - new.br_startoff; 2924e4d5207SChristoph Hellwig 2934e4d5207SChristoph Hellwig error = xfs_trans_alloc(mp, &M_RES(mp)->tr_write, resblks, 0, 2944e4d5207SChristoph Hellwig XFS_TRANS_RESERVE | XFS_TRANS_RES_FDBLKS, &tp); 2954e4d5207SChristoph Hellwig if (error) 2964e4d5207SChristoph Hellwig return error; 2974e4d5207SChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_EXCL); 2984e4d5207SChristoph Hellwig xfs_trans_ijoin(tp, ip, 0); 2994e4d5207SChristoph Hellwig 3004e4d5207SChristoph Hellwig error = xfs_zoned_map_extent(tp, ip, &new, oz, old_startblock); 3014e4d5207SChristoph Hellwig if (error) 3024e4d5207SChristoph Hellwig xfs_trans_cancel(tp); 3034e4d5207SChristoph Hellwig else 3044e4d5207SChristoph Hellwig error = xfs_trans_commit(tp); 3054e4d5207SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_EXCL); 3064e4d5207SChristoph Hellwig if (error) 3074e4d5207SChristoph Hellwig return error; 3084e4d5207SChristoph Hellwig 3094e4d5207SChristoph Hellwig new.br_startoff += new.br_blockcount; 3104e4d5207SChristoph Hellwig new.br_startblock += new.br_blockcount; 3114e4d5207SChristoph Hellwig if (old_startblock != NULLFSBLOCK) 3124e4d5207SChristoph Hellwig old_startblock += new.br_blockcount; 3134e4d5207SChristoph Hellwig } 3144e4d5207SChristoph Hellwig 3154e4d5207SChristoph Hellwig return 0; 3164e4d5207SChristoph Hellwig } 3174e4d5207SChristoph Hellwig 3184e4d5207SChristoph Hellwig /* 3194e4d5207SChristoph Hellwig * "Free" blocks allocated in a zone. 3204e4d5207SChristoph Hellwig * 3214e4d5207SChristoph Hellwig * Just decrement the used blocks counter and report the space as freed. 3224e4d5207SChristoph Hellwig */ 3234e4d5207SChristoph Hellwig int 3244e4d5207SChristoph Hellwig xfs_zone_free_blocks( 3254e4d5207SChristoph Hellwig struct xfs_trans *tp, 3264e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg, 3274e4d5207SChristoph Hellwig xfs_fsblock_t fsbno, 3284e4d5207SChristoph Hellwig xfs_filblks_t len) 3294e4d5207SChristoph Hellwig { 3304e4d5207SChristoph Hellwig struct xfs_mount *mp = tp->t_mountp; 3314e4d5207SChristoph Hellwig struct xfs_inode *rmapip = rtg_rmap(rtg); 3324e4d5207SChristoph Hellwig 3334e4d5207SChristoph Hellwig xfs_assert_ilocked(rmapip, XFS_ILOCK_EXCL); 3344e4d5207SChristoph Hellwig 3354e4d5207SChristoph Hellwig if (len > rmapip->i_used_blocks) { 3364e4d5207SChristoph Hellwig xfs_err(mp, 3374e4d5207SChristoph Hellwig "trying to free more blocks (%lld) than used counter (%u).", 3384e4d5207SChristoph Hellwig len, rmapip->i_used_blocks); 3394e4d5207SChristoph Hellwig ASSERT(len <= rmapip->i_used_blocks); 3404e4d5207SChristoph Hellwig xfs_rtginode_mark_sick(rtg, XFS_RTGI_RMAP); 3414e4d5207SChristoph Hellwig xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 3424e4d5207SChristoph Hellwig return -EFSCORRUPTED; 3434e4d5207SChristoph Hellwig } 3444e4d5207SChristoph Hellwig 3454e4d5207SChristoph Hellwig trace_xfs_zone_free_blocks(rtg, xfs_rtb_to_rgbno(mp, fsbno), len); 3464e4d5207SChristoph Hellwig 3474e4d5207SChristoph Hellwig rmapip->i_used_blocks -= len; 348080d01c4SChristoph Hellwig /* 349080d01c4SChristoph Hellwig * Don't add open zones to the reclaimable buckets. The I/O completion 350080d01c4SChristoph Hellwig * for writing the last block will take care of accounting for already 351080d01c4SChristoph Hellwig * unused blocks instead. 352080d01c4SChristoph Hellwig */ 353080d01c4SChristoph Hellwig if (!READ_ONCE(rtg->rtg_open_zone)) 354080d01c4SChristoph Hellwig xfs_zone_account_reclaimable(rtg, len); 3554e4d5207SChristoph Hellwig xfs_add_frextents(mp, len); 3564e4d5207SChristoph Hellwig xfs_trans_log_inode(tp, rmapip, XFS_ILOG_CORE); 3574e4d5207SChristoph Hellwig return 0; 3584e4d5207SChristoph Hellwig } 3594e4d5207SChristoph Hellwig 3604e4d5207SChristoph Hellwig /* 3614e4d5207SChristoph Hellwig * Check if the zone containing the data just before the offset we are 3624e4d5207SChristoph Hellwig * writing to is still open and has space. 3634e4d5207SChristoph Hellwig */ 3644e4d5207SChristoph Hellwig static struct xfs_open_zone * 3654e4d5207SChristoph Hellwig xfs_last_used_zone( 3664e4d5207SChristoph Hellwig struct iomap_ioend *ioend) 3674e4d5207SChristoph Hellwig { 3684e4d5207SChristoph Hellwig struct xfs_inode *ip = XFS_I(ioend->io_inode); 3694e4d5207SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 3704e4d5207SChristoph Hellwig xfs_fileoff_t offset_fsb = XFS_B_TO_FSB(mp, ioend->io_offset); 3714e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg = NULL; 3724e4d5207SChristoph Hellwig struct xfs_open_zone *oz = NULL; 3734e4d5207SChristoph Hellwig struct xfs_iext_cursor icur; 3744e4d5207SChristoph Hellwig struct xfs_bmbt_irec got; 3754e4d5207SChristoph Hellwig 3764e4d5207SChristoph Hellwig xfs_ilock(ip, XFS_ILOCK_SHARED); 3774e4d5207SChristoph Hellwig if (!xfs_iext_lookup_extent_before(ip, &ip->i_df, &offset_fsb, 3784e4d5207SChristoph Hellwig &icur, &got)) { 3794e4d5207SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_SHARED); 3804e4d5207SChristoph Hellwig return NULL; 3814e4d5207SChristoph Hellwig } 3824e4d5207SChristoph Hellwig xfs_iunlock(ip, XFS_ILOCK_SHARED); 3834e4d5207SChristoph Hellwig 3844e4d5207SChristoph Hellwig rtg = xfs_rtgroup_grab(mp, xfs_rtb_to_rgno(mp, got.br_startblock)); 3854e4d5207SChristoph Hellwig if (!rtg) 3864e4d5207SChristoph Hellwig return NULL; 3874e4d5207SChristoph Hellwig 3884e4d5207SChristoph Hellwig xfs_ilock(rtg_rmap(rtg), XFS_ILOCK_SHARED); 3894e4d5207SChristoph Hellwig oz = READ_ONCE(rtg->rtg_open_zone); 3904e4d5207SChristoph Hellwig if (oz && (oz->oz_is_gc || !atomic_inc_not_zero(&oz->oz_ref))) 3914e4d5207SChristoph Hellwig oz = NULL; 3924e4d5207SChristoph Hellwig xfs_iunlock(rtg_rmap(rtg), XFS_ILOCK_SHARED); 3934e4d5207SChristoph Hellwig 3944e4d5207SChristoph Hellwig xfs_rtgroup_rele(rtg); 3954e4d5207SChristoph Hellwig return oz; 3964e4d5207SChristoph Hellwig } 3974e4d5207SChristoph Hellwig 3984e4d5207SChristoph Hellwig static struct xfs_group * 3994e4d5207SChristoph Hellwig xfs_find_free_zone( 4004e4d5207SChristoph Hellwig struct xfs_mount *mp, 4014e4d5207SChristoph Hellwig unsigned long start, 4024e4d5207SChristoph Hellwig unsigned long end) 4034e4d5207SChristoph Hellwig { 4044e4d5207SChristoph Hellwig struct xfs_zone_info *zi = mp->m_zone_info; 4054e4d5207SChristoph Hellwig XA_STATE (xas, &mp->m_groups[XG_TYPE_RTG].xa, start); 4064e4d5207SChristoph Hellwig struct xfs_group *xg; 4074e4d5207SChristoph Hellwig 4084e4d5207SChristoph Hellwig xas_lock(&xas); 4094e4d5207SChristoph Hellwig xas_for_each_marked(&xas, xg, end, XFS_RTG_FREE) 4104e4d5207SChristoph Hellwig if (atomic_inc_not_zero(&xg->xg_active_ref)) 4114e4d5207SChristoph Hellwig goto found; 4124e4d5207SChristoph Hellwig xas_unlock(&xas); 4134e4d5207SChristoph Hellwig return NULL; 4144e4d5207SChristoph Hellwig 4154e4d5207SChristoph Hellwig found: 4164e4d5207SChristoph Hellwig xas_clear_mark(&xas, XFS_RTG_FREE); 4174e4d5207SChristoph Hellwig atomic_dec(&zi->zi_nr_free_zones); 4184e4d5207SChristoph Hellwig zi->zi_free_zone_cursor = xg->xg_gno; 4194e4d5207SChristoph Hellwig xas_unlock(&xas); 4204e4d5207SChristoph Hellwig return xg; 4214e4d5207SChristoph Hellwig } 4224e4d5207SChristoph Hellwig 4234e4d5207SChristoph Hellwig static struct xfs_open_zone * 4244e4d5207SChristoph Hellwig xfs_init_open_zone( 4254e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg, 4264e4d5207SChristoph Hellwig xfs_rgblock_t write_pointer, 42764d03611SHans Holmberg enum rw_hint write_hint, 4284e4d5207SChristoph Hellwig bool is_gc) 4294e4d5207SChristoph Hellwig { 4304e4d5207SChristoph Hellwig struct xfs_open_zone *oz; 4314e4d5207SChristoph Hellwig 4324e4d5207SChristoph Hellwig oz = kzalloc(sizeof(*oz), GFP_NOFS | __GFP_NOFAIL); 4334e4d5207SChristoph Hellwig spin_lock_init(&oz->oz_alloc_lock); 4344e4d5207SChristoph Hellwig atomic_set(&oz->oz_ref, 1); 4354e4d5207SChristoph Hellwig oz->oz_rtg = rtg; 4364e4d5207SChristoph Hellwig oz->oz_write_pointer = write_pointer; 4374e4d5207SChristoph Hellwig oz->oz_written = write_pointer; 43864d03611SHans Holmberg oz->oz_write_hint = write_hint; 4394e4d5207SChristoph Hellwig oz->oz_is_gc = is_gc; 4404e4d5207SChristoph Hellwig 4414e4d5207SChristoph Hellwig /* 4424e4d5207SChristoph Hellwig * All dereferences of rtg->rtg_open_zone hold the ILOCK for the rmap 4434e4d5207SChristoph Hellwig * inode, but we don't really want to take that here because we are 4444e4d5207SChristoph Hellwig * under the zone_list_lock. Ensure the pointer is only set for a fully 4454e4d5207SChristoph Hellwig * initialized open zone structure so that a racy lookup finding it is 4464e4d5207SChristoph Hellwig * fine. 4474e4d5207SChristoph Hellwig */ 4484e4d5207SChristoph Hellwig WRITE_ONCE(rtg->rtg_open_zone, oz); 4494e4d5207SChristoph Hellwig return oz; 4504e4d5207SChristoph Hellwig } 4514e4d5207SChristoph Hellwig 4524e4d5207SChristoph Hellwig /* 4534e4d5207SChristoph Hellwig * Find a completely free zone, open it, and return a reference. 4544e4d5207SChristoph Hellwig */ 4554e4d5207SChristoph Hellwig struct xfs_open_zone * 4564e4d5207SChristoph Hellwig xfs_open_zone( 4574e4d5207SChristoph Hellwig struct xfs_mount *mp, 45864d03611SHans Holmberg enum rw_hint write_hint, 4594e4d5207SChristoph Hellwig bool is_gc) 4604e4d5207SChristoph Hellwig { 4614e4d5207SChristoph Hellwig struct xfs_zone_info *zi = mp->m_zone_info; 4624e4d5207SChristoph Hellwig struct xfs_group *xg; 4634e4d5207SChristoph Hellwig 4644e4d5207SChristoph Hellwig xg = xfs_find_free_zone(mp, zi->zi_free_zone_cursor, ULONG_MAX); 4654e4d5207SChristoph Hellwig if (!xg) 4664e4d5207SChristoph Hellwig xg = xfs_find_free_zone(mp, 0, zi->zi_free_zone_cursor); 4674e4d5207SChristoph Hellwig if (!xg) 4684e4d5207SChristoph Hellwig return NULL; 4694e4d5207SChristoph Hellwig 4704e4d5207SChristoph Hellwig set_current_state(TASK_RUNNING); 47164d03611SHans Holmberg return xfs_init_open_zone(to_rtg(xg), 0, write_hint, is_gc); 4724e4d5207SChristoph Hellwig } 4734e4d5207SChristoph Hellwig 4744e4d5207SChristoph Hellwig static struct xfs_open_zone * 4754e4d5207SChristoph Hellwig xfs_try_open_zone( 47664d03611SHans Holmberg struct xfs_mount *mp, 47764d03611SHans Holmberg enum rw_hint write_hint) 4784e4d5207SChristoph Hellwig { 4794e4d5207SChristoph Hellwig struct xfs_zone_info *zi = mp->m_zone_info; 4804e4d5207SChristoph Hellwig struct xfs_open_zone *oz; 4814e4d5207SChristoph Hellwig 4824e4d5207SChristoph Hellwig if (zi->zi_nr_open_zones >= mp->m_max_open_zones - XFS_OPEN_GC_ZONES) 4834e4d5207SChristoph Hellwig return NULL; 4844e4d5207SChristoph Hellwig if (atomic_read(&zi->zi_nr_free_zones) < 4854e4d5207SChristoph Hellwig XFS_GC_ZONES - XFS_OPEN_GC_ZONES) 4864e4d5207SChristoph Hellwig return NULL; 4874e4d5207SChristoph Hellwig 4884e4d5207SChristoph Hellwig /* 4894e4d5207SChristoph Hellwig * Increment the open zone count to reserve our slot before dropping 4904e4d5207SChristoph Hellwig * zi_open_zones_lock. 4914e4d5207SChristoph Hellwig */ 4924e4d5207SChristoph Hellwig zi->zi_nr_open_zones++; 4934e4d5207SChristoph Hellwig spin_unlock(&zi->zi_open_zones_lock); 49464d03611SHans Holmberg oz = xfs_open_zone(mp, write_hint, false); 4954e4d5207SChristoph Hellwig spin_lock(&zi->zi_open_zones_lock); 4964e4d5207SChristoph Hellwig if (!oz) { 4974e4d5207SChristoph Hellwig zi->zi_nr_open_zones--; 4984e4d5207SChristoph Hellwig return NULL; 4994e4d5207SChristoph Hellwig } 5004e4d5207SChristoph Hellwig 5014e4d5207SChristoph Hellwig atomic_inc(&oz->oz_ref); 5024e4d5207SChristoph Hellwig list_add_tail(&oz->oz_entry, &zi->zi_open_zones); 5034e4d5207SChristoph Hellwig 5044e4d5207SChristoph Hellwig /* 5054e4d5207SChristoph Hellwig * If this was the last free zone, other waiters might be waiting 5064e4d5207SChristoph Hellwig * on us to write to it as well. 5074e4d5207SChristoph Hellwig */ 5084e4d5207SChristoph Hellwig wake_up_all(&zi->zi_zone_wait); 5094e4d5207SChristoph Hellwig 510080d01c4SChristoph Hellwig if (xfs_zoned_need_gc(mp)) 511080d01c4SChristoph Hellwig wake_up_process(zi->zi_gc_thread); 512080d01c4SChristoph Hellwig 5134e4d5207SChristoph Hellwig trace_xfs_zone_opened(oz->oz_rtg); 5144e4d5207SChristoph Hellwig return oz; 5154e4d5207SChristoph Hellwig } 5164e4d5207SChristoph Hellwig 51764d03611SHans Holmberg /* 51864d03611SHans Holmberg * For data with short or medium lifetime, try to colocated it into an 51964d03611SHans Holmberg * already open zone with a matching temperature. 52064d03611SHans Holmberg */ 52164d03611SHans Holmberg static bool 52264d03611SHans Holmberg xfs_colocate_eagerly( 52364d03611SHans Holmberg enum rw_hint file_hint) 52464d03611SHans Holmberg { 52564d03611SHans Holmberg switch (file_hint) { 52664d03611SHans Holmberg case WRITE_LIFE_MEDIUM: 52764d03611SHans Holmberg case WRITE_LIFE_SHORT: 52864d03611SHans Holmberg case WRITE_LIFE_NONE: 52964d03611SHans Holmberg return true; 53064d03611SHans Holmberg default: 53164d03611SHans Holmberg return false; 53264d03611SHans Holmberg } 53364d03611SHans Holmberg } 53464d03611SHans Holmberg 53564d03611SHans Holmberg static bool 53664d03611SHans Holmberg xfs_good_hint_match( 53764d03611SHans Holmberg struct xfs_open_zone *oz, 53864d03611SHans Holmberg enum rw_hint file_hint) 53964d03611SHans Holmberg { 54064d03611SHans Holmberg switch (oz->oz_write_hint) { 54164d03611SHans Holmberg case WRITE_LIFE_LONG: 54264d03611SHans Holmberg case WRITE_LIFE_EXTREME: 54364d03611SHans Holmberg /* colocate long and extreme */ 54464d03611SHans Holmberg if (file_hint == WRITE_LIFE_LONG || 54564d03611SHans Holmberg file_hint == WRITE_LIFE_EXTREME) 54664d03611SHans Holmberg return true; 54764d03611SHans Holmberg break; 54864d03611SHans Holmberg case WRITE_LIFE_MEDIUM: 54964d03611SHans Holmberg /* colocate medium with medium */ 55064d03611SHans Holmberg if (file_hint == WRITE_LIFE_MEDIUM) 55164d03611SHans Holmberg return true; 55264d03611SHans Holmberg break; 55364d03611SHans Holmberg case WRITE_LIFE_SHORT: 55464d03611SHans Holmberg case WRITE_LIFE_NONE: 55564d03611SHans Holmberg case WRITE_LIFE_NOT_SET: 55664d03611SHans Holmberg /* colocate short and none */ 55764d03611SHans Holmberg if (file_hint <= WRITE_LIFE_SHORT) 55864d03611SHans Holmberg return true; 55964d03611SHans Holmberg break; 56064d03611SHans Holmberg } 56164d03611SHans Holmberg return false; 56264d03611SHans Holmberg } 56364d03611SHans Holmberg 5644e4d5207SChristoph Hellwig static bool 5654e4d5207SChristoph Hellwig xfs_try_use_zone( 5664e4d5207SChristoph Hellwig struct xfs_zone_info *zi, 56764d03611SHans Holmberg enum rw_hint file_hint, 56864d03611SHans Holmberg struct xfs_open_zone *oz, 56964d03611SHans Holmberg bool lowspace) 5704e4d5207SChristoph Hellwig { 5714e4d5207SChristoph Hellwig if (oz->oz_write_pointer == rtg_blocks(oz->oz_rtg)) 5724e4d5207SChristoph Hellwig return false; 57364d03611SHans Holmberg if (!lowspace && !xfs_good_hint_match(oz, file_hint)) 57464d03611SHans Holmberg return false; 5754e4d5207SChristoph Hellwig if (!atomic_inc_not_zero(&oz->oz_ref)) 5764e4d5207SChristoph Hellwig return false; 5774e4d5207SChristoph Hellwig 5784e4d5207SChristoph Hellwig /* 57964d03611SHans Holmberg * If we have a hint set for the data, use that for the zone even if 58064d03611SHans Holmberg * some data was written already without any hint set, but don't change 58164d03611SHans Holmberg * the temperature after that as that would make little sense without 58264d03611SHans Holmberg * tracking per-temperature class written block counts, which is 58364d03611SHans Holmberg * probably overkill anyway. 58464d03611SHans Holmberg */ 58564d03611SHans Holmberg if (file_hint != WRITE_LIFE_NOT_SET && 58664d03611SHans Holmberg oz->oz_write_hint == WRITE_LIFE_NOT_SET) 58764d03611SHans Holmberg oz->oz_write_hint = file_hint; 58864d03611SHans Holmberg 58964d03611SHans Holmberg /* 5904e4d5207SChristoph Hellwig * If we couldn't match by inode or life time we just pick the first 5914e4d5207SChristoph Hellwig * zone with enough space above. For that we want the least busy zone 5924e4d5207SChristoph Hellwig * for some definition of "least" busy. For now this simple LRU 5934e4d5207SChristoph Hellwig * algorithm that rotates every zone to the end of the list will do it, 5944e4d5207SChristoph Hellwig * even if it isn't exactly cache friendly. 5954e4d5207SChristoph Hellwig */ 5964e4d5207SChristoph Hellwig if (!list_is_last(&oz->oz_entry, &zi->zi_open_zones)) 5974e4d5207SChristoph Hellwig list_move_tail(&oz->oz_entry, &zi->zi_open_zones); 5984e4d5207SChristoph Hellwig return true; 5994e4d5207SChristoph Hellwig } 6004e4d5207SChristoph Hellwig 6014e4d5207SChristoph Hellwig static struct xfs_open_zone * 6024e4d5207SChristoph Hellwig xfs_select_open_zone_lru( 60364d03611SHans Holmberg struct xfs_zone_info *zi, 60464d03611SHans Holmberg enum rw_hint file_hint, 60564d03611SHans Holmberg bool lowspace) 6064e4d5207SChristoph Hellwig { 6074e4d5207SChristoph Hellwig struct xfs_open_zone *oz; 6084e4d5207SChristoph Hellwig 6094e4d5207SChristoph Hellwig lockdep_assert_held(&zi->zi_open_zones_lock); 6104e4d5207SChristoph Hellwig 6114e4d5207SChristoph Hellwig list_for_each_entry(oz, &zi->zi_open_zones, oz_entry) 61264d03611SHans Holmberg if (xfs_try_use_zone(zi, file_hint, oz, lowspace)) 6134e4d5207SChristoph Hellwig return oz; 6144e4d5207SChristoph Hellwig 6154e4d5207SChristoph Hellwig cond_resched_lock(&zi->zi_open_zones_lock); 6164e4d5207SChristoph Hellwig return NULL; 6174e4d5207SChristoph Hellwig } 6184e4d5207SChristoph Hellwig 6194e4d5207SChristoph Hellwig static struct xfs_open_zone * 6204e4d5207SChristoph Hellwig xfs_select_open_zone_mru( 62164d03611SHans Holmberg struct xfs_zone_info *zi, 62264d03611SHans Holmberg enum rw_hint file_hint) 6234e4d5207SChristoph Hellwig { 6244e4d5207SChristoph Hellwig struct xfs_open_zone *oz; 6254e4d5207SChristoph Hellwig 6264e4d5207SChristoph Hellwig lockdep_assert_held(&zi->zi_open_zones_lock); 6274e4d5207SChristoph Hellwig 6284e4d5207SChristoph Hellwig list_for_each_entry_reverse(oz, &zi->zi_open_zones, oz_entry) 62964d03611SHans Holmberg if (xfs_try_use_zone(zi, file_hint, oz, false)) 6304e4d5207SChristoph Hellwig return oz; 6314e4d5207SChristoph Hellwig 6324e4d5207SChristoph Hellwig cond_resched_lock(&zi->zi_open_zones_lock); 6334e4d5207SChristoph Hellwig return NULL; 6344e4d5207SChristoph Hellwig } 6354e4d5207SChristoph Hellwig 63664d03611SHans Holmberg static inline enum rw_hint xfs_inode_write_hint(struct xfs_inode *ip) 63764d03611SHans Holmberg { 63864d03611SHans Holmberg if (xfs_has_nolifetime(ip->i_mount)) 63964d03611SHans Holmberg return WRITE_LIFE_NOT_SET; 64064d03611SHans Holmberg return VFS_I(ip)->i_write_hint; 64164d03611SHans Holmberg } 64264d03611SHans Holmberg 6434e4d5207SChristoph Hellwig /* 6444e4d5207SChristoph Hellwig * Try to pack inodes that are written back after they were closed tight instead 6454e4d5207SChristoph Hellwig * of trying to open new zones for them or spread them to the least recently 6464e4d5207SChristoph Hellwig * used zone. This optimizes the data layout for workloads that untar or copy 6474e4d5207SChristoph Hellwig * a lot of small files. Right now this does not separate multiple such 6484e4d5207SChristoph Hellwig * streams. 6494e4d5207SChristoph Hellwig */ 6504e4d5207SChristoph Hellwig static inline bool xfs_zoned_pack_tight(struct xfs_inode *ip) 6514e4d5207SChristoph Hellwig { 6524e4d5207SChristoph Hellwig return !inode_is_open_for_write(VFS_I(ip)) && 6534e4d5207SChristoph Hellwig !(ip->i_diflags & XFS_DIFLAG_APPEND); 6544e4d5207SChristoph Hellwig } 6554e4d5207SChristoph Hellwig 6564e4d5207SChristoph Hellwig /* 6574e4d5207SChristoph Hellwig * Pick a new zone for writes. 6584e4d5207SChristoph Hellwig * 6594e4d5207SChristoph Hellwig * If we aren't using up our budget of open zones just open a new one from the 6604e4d5207SChristoph Hellwig * freelist. Else try to find one that matches the expected data lifetime. If 6614e4d5207SChristoph Hellwig * we don't find one that is good pick any zone that is available. 6624e4d5207SChristoph Hellwig */ 6634e4d5207SChristoph Hellwig static struct xfs_open_zone * 6644e4d5207SChristoph Hellwig xfs_select_zone_nowait( 6654e4d5207SChristoph Hellwig struct xfs_mount *mp, 66664d03611SHans Holmberg enum rw_hint write_hint, 6674e4d5207SChristoph Hellwig bool pack_tight) 6684e4d5207SChristoph Hellwig { 6694e4d5207SChristoph Hellwig struct xfs_zone_info *zi = mp->m_zone_info; 6704e4d5207SChristoph Hellwig struct xfs_open_zone *oz = NULL; 6714e4d5207SChristoph Hellwig 6724e4d5207SChristoph Hellwig if (xfs_is_shutdown(mp)) 6734e4d5207SChristoph Hellwig return NULL; 6744e4d5207SChristoph Hellwig 67564d03611SHans Holmberg /* 67664d03611SHans Holmberg * Try to fill up open zones with matching temperature if available. It 67764d03611SHans Holmberg * is better to try to co-locate data when this is favorable, so we can 67864d03611SHans Holmberg * activate empty zones when it is statistically better to separate 67964d03611SHans Holmberg * data. 68064d03611SHans Holmberg */ 6814e4d5207SChristoph Hellwig spin_lock(&zi->zi_open_zones_lock); 68264d03611SHans Holmberg if (xfs_colocate_eagerly(write_hint)) 68364d03611SHans Holmberg oz = xfs_select_open_zone_lru(zi, write_hint, false); 68464d03611SHans Holmberg else if (pack_tight) 68564d03611SHans Holmberg oz = xfs_select_open_zone_mru(zi, write_hint); 6864e4d5207SChristoph Hellwig if (oz) 6874e4d5207SChristoph Hellwig goto out_unlock; 6884e4d5207SChristoph Hellwig 6894e4d5207SChristoph Hellwig /* 6904e4d5207SChristoph Hellwig * See if we can open a new zone and use that. 6914e4d5207SChristoph Hellwig */ 69264d03611SHans Holmberg oz = xfs_try_open_zone(mp, write_hint); 6934e4d5207SChristoph Hellwig if (oz) 6944e4d5207SChristoph Hellwig goto out_unlock; 6954e4d5207SChristoph Hellwig 69664d03611SHans Holmberg /* 69764d03611SHans Holmberg * Try to colocate cold data with other cold data if we failed to open a 69864d03611SHans Holmberg * new zone for it. 69964d03611SHans Holmberg */ 70064d03611SHans Holmberg if (write_hint != WRITE_LIFE_NOT_SET && 70164d03611SHans Holmberg !xfs_colocate_eagerly(write_hint)) 70264d03611SHans Holmberg oz = xfs_select_open_zone_lru(zi, write_hint, false); 70364d03611SHans Holmberg if (!oz) 70464d03611SHans Holmberg oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, false); 70564d03611SHans Holmberg if (!oz) 70664d03611SHans Holmberg oz = xfs_select_open_zone_lru(zi, WRITE_LIFE_NOT_SET, true); 7074e4d5207SChristoph Hellwig out_unlock: 7084e4d5207SChristoph Hellwig spin_unlock(&zi->zi_open_zones_lock); 7094e4d5207SChristoph Hellwig return oz; 7104e4d5207SChristoph Hellwig } 7114e4d5207SChristoph Hellwig 7124e4d5207SChristoph Hellwig static struct xfs_open_zone * 7134e4d5207SChristoph Hellwig xfs_select_zone( 7144e4d5207SChristoph Hellwig struct xfs_mount *mp, 71564d03611SHans Holmberg enum rw_hint write_hint, 7164e4d5207SChristoph Hellwig bool pack_tight) 7174e4d5207SChristoph Hellwig { 7184e4d5207SChristoph Hellwig struct xfs_zone_info *zi = mp->m_zone_info; 7194e4d5207SChristoph Hellwig DEFINE_WAIT (wait); 7204e4d5207SChristoph Hellwig struct xfs_open_zone *oz; 7214e4d5207SChristoph Hellwig 72264d03611SHans Holmberg oz = xfs_select_zone_nowait(mp, write_hint, pack_tight); 7234e4d5207SChristoph Hellwig if (oz) 7244e4d5207SChristoph Hellwig return oz; 7254e4d5207SChristoph Hellwig 7264e4d5207SChristoph Hellwig for (;;) { 7274e4d5207SChristoph Hellwig prepare_to_wait(&zi->zi_zone_wait, &wait, TASK_UNINTERRUPTIBLE); 72864d03611SHans Holmberg oz = xfs_select_zone_nowait(mp, write_hint, pack_tight); 7294e4d5207SChristoph Hellwig if (oz) 7304e4d5207SChristoph Hellwig break; 7314e4d5207SChristoph Hellwig schedule(); 7324e4d5207SChristoph Hellwig } 7334e4d5207SChristoph Hellwig finish_wait(&zi->zi_zone_wait, &wait); 7344e4d5207SChristoph Hellwig return oz; 7354e4d5207SChristoph Hellwig } 7364e4d5207SChristoph Hellwig 7374e4d5207SChristoph Hellwig static unsigned int 7384e4d5207SChristoph Hellwig xfs_zone_alloc_blocks( 7394e4d5207SChristoph Hellwig struct xfs_open_zone *oz, 7404e4d5207SChristoph Hellwig xfs_filblks_t count_fsb, 7414e4d5207SChristoph Hellwig sector_t *sector, 7424e4d5207SChristoph Hellwig bool *is_seq) 7434e4d5207SChristoph Hellwig { 7444e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg = oz->oz_rtg; 7454e4d5207SChristoph Hellwig struct xfs_mount *mp = rtg_mount(rtg); 7464e4d5207SChristoph Hellwig xfs_rgblock_t rgbno; 7474e4d5207SChristoph Hellwig 7484e4d5207SChristoph Hellwig spin_lock(&oz->oz_alloc_lock); 7494e4d5207SChristoph Hellwig count_fsb = min3(count_fsb, XFS_MAX_BMBT_EXTLEN, 7504e4d5207SChristoph Hellwig (xfs_filblks_t)rtg_blocks(rtg) - oz->oz_write_pointer); 7514e4d5207SChristoph Hellwig if (!count_fsb) { 7524e4d5207SChristoph Hellwig spin_unlock(&oz->oz_alloc_lock); 7534e4d5207SChristoph Hellwig return 0; 7544e4d5207SChristoph Hellwig } 7554e4d5207SChristoph Hellwig rgbno = oz->oz_write_pointer; 7564e4d5207SChristoph Hellwig oz->oz_write_pointer += count_fsb; 7574e4d5207SChristoph Hellwig spin_unlock(&oz->oz_alloc_lock); 7584e4d5207SChristoph Hellwig 7594e4d5207SChristoph Hellwig trace_xfs_zone_alloc_blocks(oz, rgbno, count_fsb); 7604e4d5207SChristoph Hellwig 7614e4d5207SChristoph Hellwig *sector = xfs_gbno_to_daddr(&rtg->rtg_group, 0); 7624e4d5207SChristoph Hellwig *is_seq = bdev_zone_is_seq(mp->m_rtdev_targp->bt_bdev, *sector); 7634e4d5207SChristoph Hellwig if (!*is_seq) 7644e4d5207SChristoph Hellwig *sector += XFS_FSB_TO_BB(mp, rgbno); 7654e4d5207SChristoph Hellwig return XFS_FSB_TO_B(mp, count_fsb); 7664e4d5207SChristoph Hellwig } 7674e4d5207SChristoph Hellwig 7684e4d5207SChristoph Hellwig void 7694e4d5207SChristoph Hellwig xfs_mark_rtg_boundary( 7704e4d5207SChristoph Hellwig struct iomap_ioend *ioend) 7714e4d5207SChristoph Hellwig { 7724e4d5207SChristoph Hellwig struct xfs_mount *mp = XFS_I(ioend->io_inode)->i_mount; 7734e4d5207SChristoph Hellwig sector_t sector = ioend->io_bio.bi_iter.bi_sector; 7744e4d5207SChristoph Hellwig 7754e4d5207SChristoph Hellwig if (xfs_rtb_to_rgbno(mp, xfs_daddr_to_rtb(mp, sector)) == 0) 7764e4d5207SChristoph Hellwig ioend->io_flags |= IOMAP_IOEND_BOUNDARY; 7774e4d5207SChristoph Hellwig } 7784e4d5207SChristoph Hellwig 7794e4d5207SChristoph Hellwig static void 7804e4d5207SChristoph Hellwig xfs_submit_zoned_bio( 7814e4d5207SChristoph Hellwig struct iomap_ioend *ioend, 7824e4d5207SChristoph Hellwig struct xfs_open_zone *oz, 7834e4d5207SChristoph Hellwig bool is_seq) 7844e4d5207SChristoph Hellwig { 7854e4d5207SChristoph Hellwig ioend->io_bio.bi_iter.bi_sector = ioend->io_sector; 7864e4d5207SChristoph Hellwig ioend->io_private = oz; 7874e4d5207SChristoph Hellwig atomic_inc(&oz->oz_ref); /* for xfs_zoned_end_io */ 7884e4d5207SChristoph Hellwig 7894e4d5207SChristoph Hellwig if (is_seq) { 7904e4d5207SChristoph Hellwig ioend->io_bio.bi_opf &= ~REQ_OP_WRITE; 7914e4d5207SChristoph Hellwig ioend->io_bio.bi_opf |= REQ_OP_ZONE_APPEND; 7924e4d5207SChristoph Hellwig } else { 7934e4d5207SChristoph Hellwig xfs_mark_rtg_boundary(ioend); 7944e4d5207SChristoph Hellwig } 7954e4d5207SChristoph Hellwig 7964e4d5207SChristoph Hellwig submit_bio(&ioend->io_bio); 7974e4d5207SChristoph Hellwig } 7984e4d5207SChristoph Hellwig 7994e4d5207SChristoph Hellwig void 8004e4d5207SChristoph Hellwig xfs_zone_alloc_and_submit( 8014e4d5207SChristoph Hellwig struct iomap_ioend *ioend, 8024e4d5207SChristoph Hellwig struct xfs_open_zone **oz) 8034e4d5207SChristoph Hellwig { 8044e4d5207SChristoph Hellwig struct xfs_inode *ip = XFS_I(ioend->io_inode); 8054e4d5207SChristoph Hellwig struct xfs_mount *mp = ip->i_mount; 80664d03611SHans Holmberg enum rw_hint write_hint = xfs_inode_write_hint(ip); 8074e4d5207SChristoph Hellwig bool pack_tight = xfs_zoned_pack_tight(ip); 8084e4d5207SChristoph Hellwig unsigned int alloc_len; 8094e4d5207SChristoph Hellwig struct iomap_ioend *split; 8104e4d5207SChristoph Hellwig bool is_seq; 8114e4d5207SChristoph Hellwig 8124e4d5207SChristoph Hellwig if (xfs_is_shutdown(mp)) 8134e4d5207SChristoph Hellwig goto out_error; 8144e4d5207SChristoph Hellwig 8154e4d5207SChristoph Hellwig /* 8164e4d5207SChristoph Hellwig * If we don't have a cached zone in this write context, see if the 8174e4d5207SChristoph Hellwig * last extent before the one we are writing to points to an active 8184e4d5207SChristoph Hellwig * zone. If so, just continue writing to it. 8194e4d5207SChristoph Hellwig */ 8204e4d5207SChristoph Hellwig if (!*oz && ioend->io_offset) 8214e4d5207SChristoph Hellwig *oz = xfs_last_used_zone(ioend); 8224e4d5207SChristoph Hellwig if (!*oz) { 8234e4d5207SChristoph Hellwig select_zone: 82464d03611SHans Holmberg *oz = xfs_select_zone(mp, write_hint, pack_tight); 8254e4d5207SChristoph Hellwig if (!*oz) 8264e4d5207SChristoph Hellwig goto out_error; 8274e4d5207SChristoph Hellwig } 8284e4d5207SChristoph Hellwig 8294e4d5207SChristoph Hellwig alloc_len = xfs_zone_alloc_blocks(*oz, XFS_B_TO_FSB(mp, ioend->io_size), 8304e4d5207SChristoph Hellwig &ioend->io_sector, &is_seq); 8314e4d5207SChristoph Hellwig if (!alloc_len) { 8324e4d5207SChristoph Hellwig xfs_open_zone_put(*oz); 8334e4d5207SChristoph Hellwig goto select_zone; 8344e4d5207SChristoph Hellwig } 8354e4d5207SChristoph Hellwig 8364e4d5207SChristoph Hellwig while ((split = iomap_split_ioend(ioend, alloc_len, is_seq))) { 8374e4d5207SChristoph Hellwig if (IS_ERR(split)) 8384e4d5207SChristoph Hellwig goto out_split_error; 8394e4d5207SChristoph Hellwig alloc_len -= split->io_bio.bi_iter.bi_size; 8404e4d5207SChristoph Hellwig xfs_submit_zoned_bio(split, *oz, is_seq); 8414e4d5207SChristoph Hellwig if (!alloc_len) { 8424e4d5207SChristoph Hellwig xfs_open_zone_put(*oz); 8434e4d5207SChristoph Hellwig goto select_zone; 8444e4d5207SChristoph Hellwig } 8454e4d5207SChristoph Hellwig } 8464e4d5207SChristoph Hellwig 8474e4d5207SChristoph Hellwig xfs_submit_zoned_bio(ioend, *oz, is_seq); 8484e4d5207SChristoph Hellwig return; 8494e4d5207SChristoph Hellwig 8504e4d5207SChristoph Hellwig out_split_error: 8514e4d5207SChristoph Hellwig ioend->io_bio.bi_status = errno_to_blk_status(PTR_ERR(split)); 8524e4d5207SChristoph Hellwig out_error: 8534e4d5207SChristoph Hellwig bio_io_error(&ioend->io_bio); 8544e4d5207SChristoph Hellwig } 8554e4d5207SChristoph Hellwig 856f56f73ebSDarrick J. Wong /* 857f56f73ebSDarrick J. Wong * Wake up all threads waiting for a zoned space allocation when the file system 858f56f73ebSDarrick J. Wong * is shut down. 859f56f73ebSDarrick J. Wong */ 8604e4d5207SChristoph Hellwig void 8614e4d5207SChristoph Hellwig xfs_zoned_wake_all( 8624e4d5207SChristoph Hellwig struct xfs_mount *mp) 8634e4d5207SChristoph Hellwig { 864f56f73ebSDarrick J. Wong /* 865f56f73ebSDarrick J. Wong * Don't wake up if there is no m_zone_info. This is complicated by the 866f56f73ebSDarrick J. Wong * fact that unmount can't atomically clear m_zone_info and thus we need 867f56f73ebSDarrick J. Wong * to check SB_ACTIVE for that, but mount temporarily enables SB_ACTIVE 868f56f73ebSDarrick J. Wong * during log recovery so we can't entirely rely on that either. 869f56f73ebSDarrick J. Wong */ 870f56f73ebSDarrick J. Wong if ((mp->m_super->s_flags & SB_ACTIVE) && mp->m_zone_info) 8714e4d5207SChristoph Hellwig wake_up_all(&mp->m_zone_info->zi_zone_wait); 8724e4d5207SChristoph Hellwig } 8734e4d5207SChristoph Hellwig 8744e4d5207SChristoph Hellwig /* 8754e4d5207SChristoph Hellwig * Check if @rgbno in @rgb is a potentially valid block. It might still be 8764e4d5207SChristoph Hellwig * unused, but that information is only found in the rmap. 8774e4d5207SChristoph Hellwig */ 8784e4d5207SChristoph Hellwig bool 8794e4d5207SChristoph Hellwig xfs_zone_rgbno_is_valid( 8804e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg, 8814e4d5207SChristoph Hellwig xfs_rgnumber_t rgbno) 8824e4d5207SChristoph Hellwig { 8834e4d5207SChristoph Hellwig lockdep_assert_held(&rtg_rmap(rtg)->i_lock); 8844e4d5207SChristoph Hellwig 8854e4d5207SChristoph Hellwig if (rtg->rtg_open_zone) 8864e4d5207SChristoph Hellwig return rgbno < rtg->rtg_open_zone->oz_write_pointer; 8874e4d5207SChristoph Hellwig return !xa_get_mark(&rtg_mount(rtg)->m_groups[XG_TYPE_RTG].xa, 8884e4d5207SChristoph Hellwig rtg_rgno(rtg), XFS_RTG_FREE); 8894e4d5207SChristoph Hellwig } 8904e4d5207SChristoph Hellwig 8914e4d5207SChristoph Hellwig static void 8924e4d5207SChristoph Hellwig xfs_free_open_zones( 8934e4d5207SChristoph Hellwig struct xfs_zone_info *zi) 8944e4d5207SChristoph Hellwig { 8954e4d5207SChristoph Hellwig struct xfs_open_zone *oz; 8964e4d5207SChristoph Hellwig 8974e4d5207SChristoph Hellwig spin_lock(&zi->zi_open_zones_lock); 8984e4d5207SChristoph Hellwig while ((oz = list_first_entry_or_null(&zi->zi_open_zones, 8994e4d5207SChristoph Hellwig struct xfs_open_zone, oz_entry))) { 9004e4d5207SChristoph Hellwig list_del(&oz->oz_entry); 9014e4d5207SChristoph Hellwig xfs_open_zone_put(oz); 9024e4d5207SChristoph Hellwig } 9034e4d5207SChristoph Hellwig spin_unlock(&zi->zi_open_zones_lock); 9044e4d5207SChristoph Hellwig } 9054e4d5207SChristoph Hellwig 9064e4d5207SChristoph Hellwig struct xfs_init_zones { 9074e4d5207SChristoph Hellwig struct xfs_mount *mp; 9084e4d5207SChristoph Hellwig uint64_t available; 9094e4d5207SChristoph Hellwig uint64_t reclaimable; 9104e4d5207SChristoph Hellwig }; 9114e4d5207SChristoph Hellwig 9124e4d5207SChristoph Hellwig static int 9134e4d5207SChristoph Hellwig xfs_init_zone( 9144e4d5207SChristoph Hellwig struct xfs_init_zones *iz, 9154e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg, 9164e4d5207SChristoph Hellwig struct blk_zone *zone) 9174e4d5207SChristoph Hellwig { 9184e4d5207SChristoph Hellwig struct xfs_mount *mp = rtg_mount(rtg); 9194e4d5207SChristoph Hellwig struct xfs_zone_info *zi = mp->m_zone_info; 9204e4d5207SChristoph Hellwig uint64_t used = rtg_rmap(rtg)->i_used_blocks; 9214e4d5207SChristoph Hellwig xfs_rgblock_t write_pointer, highest_rgbno; 922080d01c4SChristoph Hellwig int error; 9234e4d5207SChristoph Hellwig 9244e4d5207SChristoph Hellwig if (zone && !xfs_zone_validate(zone, rtg, &write_pointer)) 9254e4d5207SChristoph Hellwig return -EFSCORRUPTED; 9264e4d5207SChristoph Hellwig 9274e4d5207SChristoph Hellwig /* 9284e4d5207SChristoph Hellwig * For sequential write required zones we retrieved the hardware write 9294e4d5207SChristoph Hellwig * pointer above. 9304e4d5207SChristoph Hellwig * 9314e4d5207SChristoph Hellwig * For conventional zones or conventional devices we don't have that 9324e4d5207SChristoph Hellwig * luxury. Instead query the rmap to find the highest recorded block 9334e4d5207SChristoph Hellwig * and set the write pointer to the block after that. In case of a 9344e4d5207SChristoph Hellwig * power loss this misses blocks where the data I/O has completed but 9354e4d5207SChristoph Hellwig * not recorded in the rmap yet, and it also rewrites blocks if the most 9364e4d5207SChristoph Hellwig * recently written ones got deleted again before unmount, but this is 9374e4d5207SChristoph Hellwig * the best we can do without hardware support. 9384e4d5207SChristoph Hellwig */ 9394e4d5207SChristoph Hellwig if (!zone || zone->cond == BLK_ZONE_COND_NOT_WP) { 9404e4d5207SChristoph Hellwig xfs_rtgroup_lock(rtg, XFS_RTGLOCK_RMAP); 9414e4d5207SChristoph Hellwig highest_rgbno = xfs_rtrmap_highest_rgbno(rtg); 9424e4d5207SChristoph Hellwig if (highest_rgbno == NULLRGBLOCK) 9434e4d5207SChristoph Hellwig write_pointer = 0; 9444e4d5207SChristoph Hellwig else 9454e4d5207SChristoph Hellwig write_pointer = highest_rgbno + 1; 9464e4d5207SChristoph Hellwig xfs_rtgroup_unlock(rtg, XFS_RTGLOCK_RMAP); 9474e4d5207SChristoph Hellwig } 9484e4d5207SChristoph Hellwig 949080d01c4SChristoph Hellwig /* 950080d01c4SChristoph Hellwig * If there are no used blocks, but the zone is not in empty state yet 951080d01c4SChristoph Hellwig * we lost power before the zoned reset. In that case finish the work 952080d01c4SChristoph Hellwig * here. 953080d01c4SChristoph Hellwig */ 954080d01c4SChristoph Hellwig if (write_pointer == rtg_blocks(rtg) && used == 0) { 955080d01c4SChristoph Hellwig error = xfs_zone_gc_reset_sync(rtg); 956080d01c4SChristoph Hellwig if (error) 957080d01c4SChristoph Hellwig return error; 958080d01c4SChristoph Hellwig write_pointer = 0; 959080d01c4SChristoph Hellwig } 960080d01c4SChristoph Hellwig 9614e4d5207SChristoph Hellwig if (write_pointer == 0) { 9624e4d5207SChristoph Hellwig /* zone is empty */ 9634e4d5207SChristoph Hellwig atomic_inc(&zi->zi_nr_free_zones); 9644e4d5207SChristoph Hellwig xfs_group_set_mark(&rtg->rtg_group, XFS_RTG_FREE); 9654e4d5207SChristoph Hellwig iz->available += rtg_blocks(rtg); 9664e4d5207SChristoph Hellwig } else if (write_pointer < rtg_blocks(rtg)) { 9674e4d5207SChristoph Hellwig /* zone is open */ 9684e4d5207SChristoph Hellwig struct xfs_open_zone *oz; 9694e4d5207SChristoph Hellwig 9704e4d5207SChristoph Hellwig atomic_inc(&rtg_group(rtg)->xg_active_ref); 97164d03611SHans Holmberg oz = xfs_init_open_zone(rtg, write_pointer, WRITE_LIFE_NOT_SET, 97264d03611SHans Holmberg false); 9734e4d5207SChristoph Hellwig list_add_tail(&oz->oz_entry, &zi->zi_open_zones); 9744e4d5207SChristoph Hellwig zi->zi_nr_open_zones++; 9754e4d5207SChristoph Hellwig 9764e4d5207SChristoph Hellwig iz->available += (rtg_blocks(rtg) - write_pointer); 9774e4d5207SChristoph Hellwig iz->reclaimable += write_pointer - used; 9784e4d5207SChristoph Hellwig } else if (used < rtg_blocks(rtg)) { 9794e4d5207SChristoph Hellwig /* zone fully written, but has freed blocks */ 980080d01c4SChristoph Hellwig xfs_zone_account_reclaimable(rtg, rtg_blocks(rtg) - used); 9814e4d5207SChristoph Hellwig iz->reclaimable += (rtg_blocks(rtg) - used); 9824e4d5207SChristoph Hellwig } 9834e4d5207SChristoph Hellwig 9844e4d5207SChristoph Hellwig return 0; 9854e4d5207SChristoph Hellwig } 9864e4d5207SChristoph Hellwig 9874e4d5207SChristoph Hellwig static int 9884e4d5207SChristoph Hellwig xfs_get_zone_info_cb( 9894e4d5207SChristoph Hellwig struct blk_zone *zone, 9904e4d5207SChristoph Hellwig unsigned int idx, 9914e4d5207SChristoph Hellwig void *data) 9924e4d5207SChristoph Hellwig { 9934e4d5207SChristoph Hellwig struct xfs_init_zones *iz = data; 9944e4d5207SChristoph Hellwig struct xfs_mount *mp = iz->mp; 9954e4d5207SChristoph Hellwig xfs_fsblock_t zsbno = xfs_daddr_to_rtb(mp, zone->start); 9964e4d5207SChristoph Hellwig xfs_rgnumber_t rgno; 9974e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg; 9984e4d5207SChristoph Hellwig int error; 9994e4d5207SChristoph Hellwig 10004e4d5207SChristoph Hellwig if (xfs_rtb_to_rgbno(mp, zsbno) != 0) { 10014e4d5207SChristoph Hellwig xfs_warn(mp, "mismatched zone start 0x%llx.", zsbno); 10024e4d5207SChristoph Hellwig return -EFSCORRUPTED; 10034e4d5207SChristoph Hellwig } 10044e4d5207SChristoph Hellwig 10054e4d5207SChristoph Hellwig rgno = xfs_rtb_to_rgno(mp, zsbno); 10064e4d5207SChristoph Hellwig rtg = xfs_rtgroup_grab(mp, rgno); 10074e4d5207SChristoph Hellwig if (!rtg) { 10084e4d5207SChristoph Hellwig xfs_warn(mp, "realtime group not found for zone %u.", rgno); 10094e4d5207SChristoph Hellwig return -EFSCORRUPTED; 10104e4d5207SChristoph Hellwig } 10114e4d5207SChristoph Hellwig error = xfs_init_zone(iz, rtg, zone); 10124e4d5207SChristoph Hellwig xfs_rtgroup_rele(rtg); 10134e4d5207SChristoph Hellwig return error; 10144e4d5207SChristoph Hellwig } 10154e4d5207SChristoph Hellwig 10164e4d5207SChristoph Hellwig /* 10174e4d5207SChristoph Hellwig * Calculate the max open zone limit based on the of number of 10184e4d5207SChristoph Hellwig * backing zones available 10194e4d5207SChristoph Hellwig */ 10204e4d5207SChristoph Hellwig static inline uint32_t 10214e4d5207SChristoph Hellwig xfs_max_open_zones( 10224e4d5207SChristoph Hellwig struct xfs_mount *mp) 10234e4d5207SChristoph Hellwig { 10244e4d5207SChristoph Hellwig unsigned int max_open, max_open_data_zones; 10254e4d5207SChristoph Hellwig /* 10264e4d5207SChristoph Hellwig * We need two zones for every open data zone, 10274e4d5207SChristoph Hellwig * one in reserve as we don't reclaim open zones. One data zone 10284e4d5207SChristoph Hellwig * and its spare is included in XFS_MIN_ZONES. 10294e4d5207SChristoph Hellwig */ 10304e4d5207SChristoph Hellwig max_open_data_zones = (mp->m_sb.sb_rgcount - XFS_MIN_ZONES) / 2 + 1; 10314e4d5207SChristoph Hellwig max_open = max_open_data_zones + XFS_OPEN_GC_ZONES; 10324e4d5207SChristoph Hellwig 10334e4d5207SChristoph Hellwig /* 10344e4d5207SChristoph Hellwig * Cap the max open limit to 1/4 of available space 10354e4d5207SChristoph Hellwig */ 10364e4d5207SChristoph Hellwig max_open = min(max_open, mp->m_sb.sb_rgcount / 4); 10374e4d5207SChristoph Hellwig 10384e4d5207SChristoph Hellwig return max(XFS_MIN_OPEN_ZONES, max_open); 10394e4d5207SChristoph Hellwig } 10404e4d5207SChristoph Hellwig 10414e4d5207SChristoph Hellwig /* 10424e4d5207SChristoph Hellwig * Normally we use the open zone limit that the device reports. If there is 10434e4d5207SChristoph Hellwig * none let the user pick one from the command line. 10444e4d5207SChristoph Hellwig * 10454e4d5207SChristoph Hellwig * If the device doesn't report an open zone limit and there is no override, 10464e4d5207SChristoph Hellwig * allow to hold about a quarter of the zones open. In theory we could allow 10474e4d5207SChristoph Hellwig * all to be open, but at that point we run into GC deadlocks because we can't 10484e4d5207SChristoph Hellwig * reclaim open zones. 10494e4d5207SChristoph Hellwig * 10504e4d5207SChristoph Hellwig * When used on conventional SSDs a lower open limit is advisable as we'll 10514e4d5207SChristoph Hellwig * otherwise overwhelm the FTL just as much as a conventional block allocator. 10524e4d5207SChristoph Hellwig * 10534e4d5207SChristoph Hellwig * Note: To debug the open zone management code, force max_open to 1 here. 10544e4d5207SChristoph Hellwig */ 10554e4d5207SChristoph Hellwig static int 10564e4d5207SChristoph Hellwig xfs_calc_open_zones( 10574e4d5207SChristoph Hellwig struct xfs_mount *mp) 10584e4d5207SChristoph Hellwig { 10594e4d5207SChristoph Hellwig struct block_device *bdev = mp->m_rtdev_targp->bt_bdev; 10604e4d5207SChristoph Hellwig unsigned int bdev_open_zones = bdev_max_open_zones(bdev); 10614e4d5207SChristoph Hellwig 10624e4d5207SChristoph Hellwig if (!mp->m_max_open_zones) { 10634e4d5207SChristoph Hellwig if (bdev_open_zones) 10644e4d5207SChristoph Hellwig mp->m_max_open_zones = bdev_open_zones; 10654e4d5207SChristoph Hellwig else 10664e4d5207SChristoph Hellwig mp->m_max_open_zones = xfs_max_open_zones(mp); 10674e4d5207SChristoph Hellwig } 10684e4d5207SChristoph Hellwig 10694e4d5207SChristoph Hellwig if (mp->m_max_open_zones < XFS_MIN_OPEN_ZONES) { 10704e4d5207SChristoph Hellwig xfs_notice(mp, "need at least %u open zones.", 10714e4d5207SChristoph Hellwig XFS_MIN_OPEN_ZONES); 10724e4d5207SChristoph Hellwig return -EIO; 10734e4d5207SChristoph Hellwig } 10744e4d5207SChristoph Hellwig 10754e4d5207SChristoph Hellwig if (bdev_open_zones && bdev_open_zones < mp->m_max_open_zones) { 10764e4d5207SChristoph Hellwig mp->m_max_open_zones = bdev_open_zones; 10774e4d5207SChristoph Hellwig xfs_info(mp, "limiting open zones to %u due to hardware limit.\n", 10784e4d5207SChristoph Hellwig bdev_open_zones); 10794e4d5207SChristoph Hellwig } 10804e4d5207SChristoph Hellwig 10814e4d5207SChristoph Hellwig if (mp->m_max_open_zones > xfs_max_open_zones(mp)) { 10824e4d5207SChristoph Hellwig mp->m_max_open_zones = xfs_max_open_zones(mp); 10834e4d5207SChristoph Hellwig xfs_info(mp, 10844e4d5207SChristoph Hellwig "limiting open zones to %u due to total zone count (%u)", 10854e4d5207SChristoph Hellwig mp->m_max_open_zones, mp->m_sb.sb_rgcount); 10864e4d5207SChristoph Hellwig } 10874e4d5207SChristoph Hellwig 10884e4d5207SChristoph Hellwig return 0; 10894e4d5207SChristoph Hellwig } 10904e4d5207SChristoph Hellwig 1091080d01c4SChristoph Hellwig static unsigned long * 1092080d01c4SChristoph Hellwig xfs_alloc_bucket_bitmap( 1093080d01c4SChristoph Hellwig struct xfs_mount *mp) 1094080d01c4SChristoph Hellwig { 1095080d01c4SChristoph Hellwig return kvmalloc_array(BITS_TO_LONGS(mp->m_sb.sb_rgcount), 1096080d01c4SChristoph Hellwig sizeof(unsigned long), GFP_KERNEL | __GFP_ZERO); 1097080d01c4SChristoph Hellwig } 1098080d01c4SChristoph Hellwig 10994e4d5207SChristoph Hellwig static struct xfs_zone_info * 11004e4d5207SChristoph Hellwig xfs_alloc_zone_info( 11014e4d5207SChristoph Hellwig struct xfs_mount *mp) 11024e4d5207SChristoph Hellwig { 11034e4d5207SChristoph Hellwig struct xfs_zone_info *zi; 1104080d01c4SChristoph Hellwig int i; 11054e4d5207SChristoph Hellwig 11064e4d5207SChristoph Hellwig zi = kzalloc(sizeof(*zi), GFP_KERNEL); 11074e4d5207SChristoph Hellwig if (!zi) 11084e4d5207SChristoph Hellwig return NULL; 11094e4d5207SChristoph Hellwig INIT_LIST_HEAD(&zi->zi_open_zones); 11104e4d5207SChristoph Hellwig INIT_LIST_HEAD(&zi->zi_reclaim_reservations); 11114e4d5207SChristoph Hellwig spin_lock_init(&zi->zi_reset_list_lock); 11124e4d5207SChristoph Hellwig spin_lock_init(&zi->zi_open_zones_lock); 11134e4d5207SChristoph Hellwig spin_lock_init(&zi->zi_reservation_lock); 11144e4d5207SChristoph Hellwig init_waitqueue_head(&zi->zi_zone_wait); 1115080d01c4SChristoph Hellwig spin_lock_init(&zi->zi_used_buckets_lock); 1116080d01c4SChristoph Hellwig for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) { 1117080d01c4SChristoph Hellwig zi->zi_used_bucket_bitmap[i] = xfs_alloc_bucket_bitmap(mp); 1118080d01c4SChristoph Hellwig if (!zi->zi_used_bucket_bitmap[i]) 1119080d01c4SChristoph Hellwig goto out_free_bitmaps; 1120080d01c4SChristoph Hellwig } 11214e4d5207SChristoph Hellwig return zi; 1122080d01c4SChristoph Hellwig 1123080d01c4SChristoph Hellwig out_free_bitmaps: 1124080d01c4SChristoph Hellwig while (--i > 0) 1125080d01c4SChristoph Hellwig kvfree(zi->zi_used_bucket_bitmap[i]); 1126080d01c4SChristoph Hellwig kfree(zi); 1127080d01c4SChristoph Hellwig return NULL; 11284e4d5207SChristoph Hellwig } 11294e4d5207SChristoph Hellwig 11304e4d5207SChristoph Hellwig static void 11314e4d5207SChristoph Hellwig xfs_free_zone_info( 11324e4d5207SChristoph Hellwig struct xfs_zone_info *zi) 11334e4d5207SChristoph Hellwig { 1134080d01c4SChristoph Hellwig int i; 1135080d01c4SChristoph Hellwig 11364e4d5207SChristoph Hellwig xfs_free_open_zones(zi); 1137080d01c4SChristoph Hellwig for (i = 0; i < XFS_ZONE_USED_BUCKETS; i++) 1138080d01c4SChristoph Hellwig kvfree(zi->zi_used_bucket_bitmap[i]); 11394e4d5207SChristoph Hellwig kfree(zi); 11404e4d5207SChristoph Hellwig } 11414e4d5207SChristoph Hellwig 11424e4d5207SChristoph Hellwig int 11434e4d5207SChristoph Hellwig xfs_mount_zones( 11444e4d5207SChristoph Hellwig struct xfs_mount *mp) 11454e4d5207SChristoph Hellwig { 11464e4d5207SChristoph Hellwig struct xfs_init_zones iz = { 11474e4d5207SChristoph Hellwig .mp = mp, 11484e4d5207SChristoph Hellwig }; 11494e4d5207SChristoph Hellwig struct xfs_buftarg *bt = mp->m_rtdev_targp; 11504e4d5207SChristoph Hellwig int error; 11514e4d5207SChristoph Hellwig 11524e4d5207SChristoph Hellwig if (!bt) { 11534e4d5207SChristoph Hellwig xfs_notice(mp, "RT device missing."); 11544e4d5207SChristoph Hellwig return -EINVAL; 11554e4d5207SChristoph Hellwig } 11564e4d5207SChristoph Hellwig 11574e4d5207SChristoph Hellwig if (!xfs_has_rtgroups(mp) || !xfs_has_rmapbt(mp)) { 11584e4d5207SChristoph Hellwig xfs_notice(mp, "invalid flag combination."); 11594e4d5207SChristoph Hellwig return -EFSCORRUPTED; 11604e4d5207SChristoph Hellwig } 11614e4d5207SChristoph Hellwig if (mp->m_sb.sb_rextsize != 1) { 11624e4d5207SChristoph Hellwig xfs_notice(mp, "zoned file systems do not support rextsize."); 11634e4d5207SChristoph Hellwig return -EFSCORRUPTED; 11644e4d5207SChristoph Hellwig } 11654e4d5207SChristoph Hellwig if (mp->m_sb.sb_rgcount < XFS_MIN_ZONES) { 11664e4d5207SChristoph Hellwig xfs_notice(mp, 11674e4d5207SChristoph Hellwig "zoned file systems need to have at least %u zones.", XFS_MIN_ZONES); 11684e4d5207SChristoph Hellwig return -EFSCORRUPTED; 11694e4d5207SChristoph Hellwig } 11704e4d5207SChristoph Hellwig 11714e4d5207SChristoph Hellwig error = xfs_calc_open_zones(mp); 11724e4d5207SChristoph Hellwig if (error) 11734e4d5207SChristoph Hellwig return error; 11744e4d5207SChristoph Hellwig 11754e4d5207SChristoph Hellwig mp->m_zone_info = xfs_alloc_zone_info(mp); 11764e4d5207SChristoph Hellwig if (!mp->m_zone_info) 11774e4d5207SChristoph Hellwig return -ENOMEM; 11784e4d5207SChristoph Hellwig 11794e4d5207SChristoph Hellwig xfs_info(mp, "%u zones of %u blocks size (%u max open)", 11804e4d5207SChristoph Hellwig mp->m_sb.sb_rgcount, mp->m_groups[XG_TYPE_RTG].blocks, 11814e4d5207SChristoph Hellwig mp->m_max_open_zones); 11820bb21930SChristoph Hellwig trace_xfs_zones_mount(mp); 11834e4d5207SChristoph Hellwig 11844e4d5207SChristoph Hellwig if (bdev_is_zoned(bt->bt_bdev)) { 11854e4d5207SChristoph Hellwig error = blkdev_report_zones(bt->bt_bdev, 11864e4d5207SChristoph Hellwig XFS_FSB_TO_BB(mp, mp->m_sb.sb_rtstart), 11874e4d5207SChristoph Hellwig mp->m_sb.sb_rgcount, xfs_get_zone_info_cb, &iz); 11884e4d5207SChristoph Hellwig if (error < 0) 11894e4d5207SChristoph Hellwig goto out_free_zone_info; 11904e4d5207SChristoph Hellwig } else { 11914e4d5207SChristoph Hellwig struct xfs_rtgroup *rtg = NULL; 11924e4d5207SChristoph Hellwig 11934e4d5207SChristoph Hellwig while ((rtg = xfs_rtgroup_next(mp, rtg))) { 11944e4d5207SChristoph Hellwig error = xfs_init_zone(&iz, rtg, NULL); 11954e4d5207SChristoph Hellwig if (error) 11964e4d5207SChristoph Hellwig goto out_free_zone_info; 11974e4d5207SChristoph Hellwig } 11984e4d5207SChristoph Hellwig } 11994e4d5207SChristoph Hellwig 12000bb21930SChristoph Hellwig xfs_set_freecounter(mp, XC_FREE_RTAVAILABLE, iz.available); 12014e4d5207SChristoph Hellwig xfs_set_freecounter(mp, XC_FREE_RTEXTENTS, 12024e4d5207SChristoph Hellwig iz.available + iz.reclaimable); 1203080d01c4SChristoph Hellwig 1204*845abeb1SHans Holmberg /* 1205*845abeb1SHans Holmberg * The user may configure GC to free up a percentage of unused blocks. 1206*845abeb1SHans Holmberg * By default this is 0. GC will always trigger at the minimum level 1207*845abeb1SHans Holmberg * for keeping max_open_zones available for data placement. 1208*845abeb1SHans Holmberg */ 1209*845abeb1SHans Holmberg mp->m_zonegc_low_space = 0; 1210*845abeb1SHans Holmberg 1211080d01c4SChristoph Hellwig error = xfs_zone_gc_mount(mp); 1212080d01c4SChristoph Hellwig if (error) 1213080d01c4SChristoph Hellwig goto out_free_zone_info; 12144e4d5207SChristoph Hellwig return 0; 12154e4d5207SChristoph Hellwig 12164e4d5207SChristoph Hellwig out_free_zone_info: 12174e4d5207SChristoph Hellwig xfs_free_zone_info(mp->m_zone_info); 12184e4d5207SChristoph Hellwig return error; 12194e4d5207SChristoph Hellwig } 12204e4d5207SChristoph Hellwig 12214e4d5207SChristoph Hellwig void 12224e4d5207SChristoph Hellwig xfs_unmount_zones( 12234e4d5207SChristoph Hellwig struct xfs_mount *mp) 12244e4d5207SChristoph Hellwig { 1225080d01c4SChristoph Hellwig xfs_zone_gc_unmount(mp); 12264e4d5207SChristoph Hellwig xfs_free_zone_info(mp->m_zone_info); 12274e4d5207SChristoph Hellwig } 1228