Lines Matching full:chunk

72  * Chunk that is read and written for each GC operation.
74 * Note that for writes to actual zoned devices, the chunk can be split when
97 * GC chunk is operating on.
618 struct xfs_gc_bio *chunk = in xfs_zone_gc_end_io() local
620 struct xfs_zone_gc_data *data = chunk->data; in xfs_zone_gc_end_io()
622 WRITE_ONCE(chunk->state, XFS_GC_BIO_DONE); in xfs_zone_gc_end_io()
681 struct xfs_gc_bio *chunk; in xfs_zone_gc_start_chunk() local
701 chunk = container_of(bio, struct xfs_gc_bio, bio); in xfs_zone_gc_start_chunk()
702 chunk->ip = ip; in xfs_zone_gc_start_chunk()
703 chunk->offset = XFS_FSB_TO_B(mp, irec.rm_offset); in xfs_zone_gc_start_chunk()
704 chunk->len = XFS_FSB_TO_B(mp, irec.rm_blockcount); in xfs_zone_gc_start_chunk()
705 chunk->old_startblock = in xfs_zone_gc_start_chunk()
707 chunk->new_daddr = daddr; in xfs_zone_gc_start_chunk()
708 chunk->is_seq = is_seq; in xfs_zone_gc_start_chunk()
709 chunk->scratch = &data->scratch[data->scratch_idx]; in xfs_zone_gc_start_chunk()
710 chunk->data = data; in xfs_zone_gc_start_chunk()
711 chunk->oz = oz; in xfs_zone_gc_start_chunk()
713 bio->bi_iter.bi_sector = xfs_rtb_to_daddr(mp, chunk->old_startblock); in xfs_zone_gc_start_chunk()
715 bio_add_folio_nofail(bio, chunk->scratch->folio, chunk->len, in xfs_zone_gc_start_chunk()
716 chunk->scratch->offset); in xfs_zone_gc_start_chunk()
717 chunk->scratch->offset += chunk->len; in xfs_zone_gc_start_chunk()
718 if (chunk->scratch->offset == XFS_GC_CHUNK_SIZE) { in xfs_zone_gc_start_chunk()
722 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); in xfs_zone_gc_start_chunk()
723 list_add_tail(&chunk->entry, &data->reading); in xfs_zone_gc_start_chunk()
732 struct xfs_gc_bio *chunk) in xfs_zone_gc_free_chunk() argument
734 list_del(&chunk->entry); in xfs_zone_gc_free_chunk()
735 xfs_open_zone_put(chunk->oz); in xfs_zone_gc_free_chunk()
736 xfs_irele(chunk->ip); in xfs_zone_gc_free_chunk()
737 bio_put(&chunk->bio); in xfs_zone_gc_free_chunk()
743 struct xfs_gc_bio *chunk) in xfs_zone_gc_submit_write() argument
745 if (chunk->is_seq) { in xfs_zone_gc_submit_write()
746 chunk->bio.bi_opf &= ~REQ_OP_WRITE; in xfs_zone_gc_submit_write()
747 chunk->bio.bi_opf |= REQ_OP_ZONE_APPEND; in xfs_zone_gc_submit_write()
749 chunk->bio.bi_iter.bi_sector = chunk->new_daddr; in xfs_zone_gc_submit_write()
750 chunk->bio.bi_end_io = xfs_zone_gc_end_io; in xfs_zone_gc_submit_write()
751 submit_bio(&chunk->bio); in xfs_zone_gc_submit_write()
757 struct xfs_gc_bio *chunk) in xfs_zone_gc_split_write() argument
760 &bdev_get_queue(chunk->bio.bi_bdev)->limits; in xfs_zone_gc_split_write()
767 if (!chunk->is_seq) in xfs_zone_gc_split_write()
770 split_sectors = bio_split_rw_at(&chunk->bio, lim, &nsegs, in xfs_zone_gc_split_write()
775 /* ensure the split chunk is still block size aligned */ in xfs_zone_gc_split_write()
780 split = bio_split(&chunk->bio, split_sectors, GFP_NOFS, &data->bio_set); in xfs_zone_gc_split_write()
783 ihold(VFS_I(chunk->ip)); in xfs_zone_gc_split_write()
784 split_chunk->ip = chunk->ip; in xfs_zone_gc_split_write()
785 split_chunk->is_seq = chunk->is_seq; in xfs_zone_gc_split_write()
786 split_chunk->scratch = chunk->scratch; in xfs_zone_gc_split_write()
787 split_chunk->offset = chunk->offset; in xfs_zone_gc_split_write()
789 split_chunk->old_startblock = chunk->old_startblock; in xfs_zone_gc_split_write()
790 split_chunk->new_daddr = chunk->new_daddr; in xfs_zone_gc_split_write()
791 split_chunk->oz = chunk->oz; in xfs_zone_gc_split_write()
792 atomic_inc(&chunk->oz->oz_ref); in xfs_zone_gc_split_write()
794 chunk->offset += split_len; in xfs_zone_gc_split_write()
795 chunk->len -= split_len; in xfs_zone_gc_split_write()
796 chunk->old_startblock += XFS_B_TO_FSB(data->mp, split_len); in xfs_zone_gc_split_write()
798 /* add right before the original chunk */ in xfs_zone_gc_split_write()
800 list_add_tail(&split_chunk->entry, &chunk->entry); in xfs_zone_gc_split_write()
806 struct xfs_gc_bio *chunk) in xfs_zone_gc_write_chunk() argument
808 struct xfs_zone_gc_data *data = chunk->data; in xfs_zone_gc_write_chunk()
809 struct xfs_mount *mp = chunk->ip->i_mount; in xfs_zone_gc_write_chunk()
811 bvec_phys(bio_first_bvec_all(&chunk->bio)); in xfs_zone_gc_write_chunk()
814 if (chunk->bio.bi_status) in xfs_zone_gc_write_chunk()
817 xfs_zone_gc_free_chunk(chunk); in xfs_zone_gc_write_chunk()
821 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); in xfs_zone_gc_write_chunk()
822 list_move_tail(&chunk->entry, &data->writing); in xfs_zone_gc_write_chunk()
824 bio_reset(&chunk->bio, mp->m_rtdev_targp->bt_bdev, REQ_OP_WRITE); in xfs_zone_gc_write_chunk()
825 bio_add_folio_nofail(&chunk->bio, chunk->scratch->folio, chunk->len, in xfs_zone_gc_write_chunk()
826 offset_in_folio(chunk->scratch->folio, bvec_paddr)); in xfs_zone_gc_write_chunk()
828 while ((split_chunk = xfs_zone_gc_split_write(data, chunk))) in xfs_zone_gc_write_chunk()
830 xfs_zone_gc_submit_write(data, chunk); in xfs_zone_gc_write_chunk()
835 struct xfs_gc_bio *chunk) in xfs_zone_gc_finish_chunk() argument
838 struct xfs_inode *ip = chunk->ip; in xfs_zone_gc_finish_chunk()
842 if (chunk->bio.bi_status) in xfs_zone_gc_finish_chunk()
845 xfs_zone_gc_free_chunk(chunk); in xfs_zone_gc_finish_chunk()
849 chunk->scratch->freed += chunk->len; in xfs_zone_gc_finish_chunk()
850 if (chunk->scratch->freed == chunk->scratch->offset) { in xfs_zone_gc_finish_chunk()
851 chunk->scratch->offset = 0; in xfs_zone_gc_finish_chunk()
852 chunk->scratch->freed = 0; in xfs_zone_gc_finish_chunk()
871 if (chunk->is_seq) in xfs_zone_gc_finish_chunk()
872 chunk->new_daddr = chunk->bio.bi_iter.bi_sector; in xfs_zone_gc_finish_chunk()
873 error = xfs_zoned_end_io(ip, chunk->offset, chunk->len, in xfs_zone_gc_finish_chunk()
874 chunk->new_daddr, chunk->oz, chunk->old_startblock); in xfs_zone_gc_finish_chunk()
878 xfs_zone_gc_free_chunk(chunk); in xfs_zone_gc_finish_chunk()
883 struct xfs_gc_bio *chunk) in xfs_zone_gc_finish_reset() argument
885 struct xfs_rtgroup *rtg = chunk->bio.bi_private; in xfs_zone_gc_finish_reset()
889 if (chunk->bio.bi_status) { in xfs_zone_gc_finish_reset()
901 list_del(&chunk->entry); in xfs_zone_gc_finish_reset()
902 bio_put(&chunk->bio); in xfs_zone_gc_finish_reset()
955 struct xfs_gc_bio *chunk; in xfs_zone_gc_reset_zones() local
968 chunk = container_of(bio, struct xfs_gc_bio, bio); in xfs_zone_gc_reset_zones()
969 chunk->data = data; in xfs_zone_gc_reset_zones()
970 WRITE_ONCE(chunk->state, XFS_GC_BIO_NEW); in xfs_zone_gc_reset_zones()
971 list_add_tail(&chunk->entry, &data->resetting); in xfs_zone_gc_reset_zones()
996 struct xfs_gc_bio *chunk, *next; in xfs_zone_gc_handle_work() local
1020 list_for_each_entry_safe(chunk, next, &data->resetting, entry) { in xfs_zone_gc_handle_work()
1021 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) in xfs_zone_gc_handle_work()
1023 xfs_zone_gc_finish_reset(chunk); in xfs_zone_gc_handle_work()
1026 list_for_each_entry_safe(chunk, next, &data->writing, entry) { in xfs_zone_gc_handle_work()
1027 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) in xfs_zone_gc_handle_work()
1029 xfs_zone_gc_finish_chunk(chunk); in xfs_zone_gc_handle_work()
1033 list_for_each_entry_safe(chunk, next, &data->reading, entry) { in xfs_zone_gc_handle_work()
1034 if (READ_ONCE(chunk->state) != XFS_GC_BIO_DONE) in xfs_zone_gc_handle_work()
1036 xfs_zone_gc_write_chunk(chunk); in xfs_zone_gc_handle_work()