Lines Matching +full:left +full:- +full:shift

1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
4 * Copyright (c) 2016-2018 Christoph Hellwig.
37 (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log)
44 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, in xfs_alert_fsblock_zero()
47 "blkcnt: %llx extent-state: %x", in xfs_alert_fsblock_zero()
48 (unsigned long long)ip->i_ino, in xfs_alert_fsblock_zero()
49 (unsigned long long)imap->br_startblock, in xfs_alert_fsblock_zero()
50 (unsigned long long)imap->br_startoff, in xfs_alert_fsblock_zero()
51 (unsigned long long)imap->br_blockcount, in xfs_alert_fsblock_zero()
52 imap->br_state); in xfs_alert_fsblock_zero()
54 return -EFSCORRUPTED; in xfs_alert_fsblock_zero()
65 return READ_ONCE(ip->i_af.if_seq); in xfs_iomap_inode_sequence()
66 if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp) in xfs_iomap_inode_sequence()
67 cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32; in xfs_iomap_inode_sequence()
68 return cookie | READ_ONCE(ip->i_df.if_seq); in xfs_iomap_inode_sequence()
82 if (iomap->validity_cookie != in xfs_iomap_valid()
83 xfs_iomap_inode_sequence(ip, iomap->flags)) { in xfs_iomap_valid()
88 XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS); in xfs_iomap_valid()
105 struct xfs_mount *mp = ip->i_mount; in xfs_bmbt_to_iomap()
108 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) { in xfs_bmbt_to_iomap()
113 if (imap->br_startblock == HOLESTARTBLOCK) { in xfs_bmbt_to_iomap()
114 iomap->addr = IOMAP_NULL_ADDR; in xfs_bmbt_to_iomap()
115 iomap->type = IOMAP_HOLE; in xfs_bmbt_to_iomap()
116 } else if (imap->br_startblock == DELAYSTARTBLOCK || in xfs_bmbt_to_iomap()
117 isnullstartblock(imap->br_startblock)) { in xfs_bmbt_to_iomap()
118 iomap->addr = IOMAP_NULL_ADDR; in xfs_bmbt_to_iomap()
119 iomap->type = IOMAP_DELALLOC; in xfs_bmbt_to_iomap()
121 xfs_daddr_t daddr = xfs_fsb_to_db(ip, imap->br_startblock); in xfs_bmbt_to_iomap()
123 iomap->addr = BBTOB(daddr); in xfs_bmbt_to_iomap()
125 iomap->addr += target->bt_dax_part_off; in xfs_bmbt_to_iomap()
127 if (imap->br_state == XFS_EXT_UNWRITTEN) in xfs_bmbt_to_iomap()
128 iomap->type = IOMAP_UNWRITTEN; in xfs_bmbt_to_iomap()
130 iomap->type = IOMAP_MAPPED; in xfs_bmbt_to_iomap()
138 xfs_rtbno_is_group_start(mp, imap->br_startblock)) in xfs_bmbt_to_iomap()
139 iomap->flags |= IOMAP_F_BOUNDARY; in xfs_bmbt_to_iomap()
141 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); in xfs_bmbt_to_iomap()
142 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); in xfs_bmbt_to_iomap()
144 iomap->dax_dev = target->bt_daxdev; in xfs_bmbt_to_iomap()
146 iomap->bdev = target->bt_bdev; in xfs_bmbt_to_iomap()
147 iomap->flags = iomap_flags; in xfs_bmbt_to_iomap()
150 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) in xfs_bmbt_to_iomap()
151 iomap->flags |= IOMAP_F_DIRTY; in xfs_bmbt_to_iomap()
153 iomap->validity_cookie = sequence_cookie; in xfs_bmbt_to_iomap()
154 iomap->folio_ops = &xfs_iomap_folio_ops; in xfs_bmbt_to_iomap()
167 iomap->addr = IOMAP_NULL_ADDR; in xfs_hole_to_iomap()
168 iomap->type = IOMAP_HOLE; in xfs_hole_to_iomap()
169 iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb); in xfs_hole_to_iomap()
170 iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb); in xfs_hole_to_iomap()
171 iomap->bdev = target->bt_bdev; in xfs_hole_to_iomap()
172 iomap->dax_dev = target->bt_daxdev; in xfs_hole_to_iomap()
181 ASSERT(offset <= mp->m_super->s_maxbytes); in xfs_iomap_end_fsb()
183 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); in xfs_iomap_end_fsb()
190 struct xfs_mount *mp = ip->i_mount; in xfs_eof_alignment()
199 * If mounted with the "-o swalloc" option the alignment is in xfs_eof_alignment()
202 if (mp->m_swidth && xfs_has_swalloc(mp)) in xfs_eof_alignment()
203 align = mp->m_swidth; in xfs_eof_alignment()
204 else if (mp->m_dalign) in xfs_eof_alignment()
205 align = mp->m_dalign; in xfs_eof_alignment()
262 struct xfs_mount *mp = ip->i_mount; in xfs_iomap_write_direct()
299 * left but we need to do unwritten extent conversion. in xfs_iomap_write_direct()
303 if (imap->br_state == XFS_EXT_UNWRITTEN) { in xfs_iomap_write_direct()
310 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, in xfs_iomap_write_direct()
336 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) { in xfs_iomap_write_direct()
361 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) in xfs_quota_need_throttle()
365 res = &dq->q_rtb; in xfs_quota_need_throttle()
366 pre = &dq->q_rtb_prealloc; in xfs_quota_need_throttle()
368 res = &dq->q_blk; in xfs_quota_need_throttle()
369 pre = &dq->q_blk_prealloc; in xfs_quota_need_throttle()
373 if (!pre->q_prealloc_hi_wmark) in xfs_quota_need_throttle()
377 if (res->reserved + alloc_blocks < pre->q_prealloc_lo_wmark) in xfs_quota_need_throttle()
395 int shift = 0; in xfs_quota_calc_throttle() local
401 res = &dq->q_rtb; in xfs_quota_calc_throttle()
402 pre = &dq->q_rtb_prealloc; in xfs_quota_calc_throttle()
404 res = &dq->q_blk; in xfs_quota_calc_throttle()
405 pre = &dq->q_blk_prealloc; in xfs_quota_calc_throttle()
409 if (!res || res->reserved >= pre->q_prealloc_hi_wmark) { in xfs_quota_calc_throttle()
415 freesp = pre->q_prealloc_hi_wmark - res->reserved; in xfs_quota_calc_throttle()
416 if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT]) { in xfs_quota_calc_throttle()
417 shift = 2; in xfs_quota_calc_throttle()
418 if (freesp < pre->q_low_space[XFS_QLOWSP_3_PCNT]) in xfs_quota_calc_throttle()
419 shift += 2; in xfs_quota_calc_throttle()
420 if (freesp < pre->q_low_space[XFS_QLOWSP_1_PCNT]) in xfs_quota_calc_throttle()
421 shift += 2; in xfs_quota_calc_throttle()
428 if ((freesp >> shift) < (*qblocks >> *qshift)) { in xfs_quota_calc_throttle()
430 *qshift = shift; in xfs_quota_calc_throttle()
439 int *shift) in xfs_iomap_freesp() argument
445 *shift = 2; in xfs_iomap_freesp()
447 (*shift)++; in xfs_iomap_freesp()
449 (*shift)++; in xfs_iomap_freesp()
451 (*shift)++; in xfs_iomap_freesp()
453 (*shift)++; in xfs_iomap_freesp()
474 struct xfs_mount *mp = ip->i_mount; in xfs_iomap_prealloc_size()
481 int shift = 0; in xfs_iomap_prealloc_size() local
490 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)) in xfs_iomap_prealloc_size()
497 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || in xfs_iomap_prealloc_size()
500 return mp->m_allocsize_blocks; in xfs_iomap_prealloc_size()
543 mp->m_low_rtexts, &shift)); in xfs_iomap_prealloc_size()
545 freesp = xfs_iomap_freesp(mp, XC_FREE_BLOCKS, mp->m_low_space, in xfs_iomap_prealloc_size()
546 &shift); in xfs_iomap_prealloc_size()
549 * Check each quota to cap the prealloc size, provide a shift value to in xfs_iomap_prealloc_size()
566 * The shift throttle value is set to the maximum value as determined by in xfs_iomap_prealloc_size()
567 * the global low free space values and per-quota low free space values. in xfs_iomap_prealloc_size()
570 shift = max(shift, qshift); in xfs_iomap_prealloc_size()
572 if (shift) in xfs_iomap_prealloc_size()
573 alloc_blocks >>= shift; in xfs_iomap_prealloc_size()
591 if (alloc_blocks < mp->m_allocsize_blocks) in xfs_iomap_prealloc_size()
592 alloc_blocks = mp->m_allocsize_blocks; in xfs_iomap_prealloc_size()
593 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, in xfs_iomap_prealloc_size()
594 mp->m_allocsize_blocks); in xfs_iomap_prealloc_size()
605 xfs_mount_t *mp = ip->i_mount; in xfs_iomap_write_unwritten()
621 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); in xfs_iomap_write_unwritten()
650 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, in xfs_iomap_write_unwritten()
682 ip->i_disk_size = i_size; in xfs_iomap_write_unwritten()
705 count_fsb -= numblks_fsb; in xfs_iomap_write_unwritten()
727 imap->br_startblock == HOLESTARTBLOCK || in imap_needs_alloc()
728 imap->br_startblock == DELAYSTARTBLOCK) in imap_needs_alloc()
731 if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN) in imap_needs_alloc()
749 imap->br_startblock == HOLESTARTBLOCK || in imap_needs_cow()
750 imap->br_state == XFS_EXT_UNWRITTEN) in imap_needs_cow()
771 if (xfs_need_iread_extents(&ip->i_df)) in xfs_ilock_for_iomap()
772 return -EAGAIN; in xfs_ilock_for_iomap()
774 return -EAGAIN; in xfs_ilock_for_iomap()
776 if (xfs_need_iread_extents(&ip->i_df)) in xfs_ilock_for_iomap()
794 if (imap->br_startoff > offset_fsb) in imap_spans_range()
796 if (imap->br_startoff + imap->br_blockcount < end_fsb) in imap_spans_range()
811 struct xfs_mount *mp = ip->i_mount; in xfs_direct_write_iomap_begin()
824 return -EIO; in xfs_direct_write_iomap_begin()
834 /* HW-offload atomics are always used in this path */ in xfs_direct_write_iomap_begin()
862 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, in xfs_direct_write_iomap_begin()
868 error = -EAGAIN; in xfs_direct_write_iomap_begin()
872 /* may drop and re-acquire the ilock */ in xfs_direct_write_iomap_begin()
881 length = XFS_FSB_TO_B(mp, end_fsb) - offset; in xfs_direct_write_iomap_begin()
894 error = -EAGAIN; in xfs_direct_write_iomap_begin()
901 * requiring sub-block zeroing. This can only be done under an in xfs_direct_write_iomap_begin()
902 * exclusive IOLOCK, hence return -EAGAIN if this is not a written in xfs_direct_write_iomap_begin()
906 error = -EAGAIN; in xfs_direct_write_iomap_begin()
908 ((offset | length) & mp->m_blockmask)) in xfs_direct_write_iomap_begin()
918 error = -EAGAIN; in xfs_direct_write_iomap_begin()
928 * Note that the values needs to be less than 32-bits wide until the in xfs_direct_write_iomap_begin()
940 error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb, in xfs_direct_write_iomap_begin()
951 trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); in xfs_direct_write_iomap_begin()
997 return -EAGAIN; in xfs_zoned_direct_write_iomap_begin()
1003 if (xfs_need_iread_extents(&ip->i_df)) { in xfs_zoned_direct_write_iomap_begin()
1011 iomap->type = IOMAP_MAPPED; in xfs_zoned_direct_write_iomap_begin()
1012 iomap->flags = IOMAP_F_DIRTY; in xfs_zoned_direct_write_iomap_begin()
1013 iomap->bdev = ip->i_mount->m_rtdev_targp->bt_bdev; in xfs_zoned_direct_write_iomap_begin()
1014 iomap->offset = offset; in xfs_zoned_direct_write_iomap_begin()
1015 iomap->length = length; in xfs_zoned_direct_write_iomap_begin()
1016 iomap->flags = IOMAP_F_ANON_WRITE; in xfs_zoned_direct_write_iomap_begin()
1061 xfs_bmbt_irec_t left; /* left neighbor extent entry */ in xfs_bmap_add_extent_hole_delay() local
1069 ASSERT(isnullstartblock(new->br_startblock)); in xfs_bmap_add_extent_hole_delay()
1072 * Check and set flags if this segment has a left neighbor in xfs_bmap_add_extent_hole_delay()
1074 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { in xfs_bmap_add_extent_hole_delay()
1076 if (isnullstartblock(left.br_startblock)) in xfs_bmap_add_extent_hole_delay()
1082 * If it doesn't exist, we're converting the hole at end-of-file. in xfs_bmap_add_extent_hole_delay()
1091 * Set contiguity flags on the left and right neighbors. in xfs_bmap_add_extent_hole_delay()
1095 left.br_startoff + left.br_blockcount == new->br_startoff && in xfs_bmap_add_extent_hole_delay()
1096 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) in xfs_bmap_add_extent_hole_delay()
1100 new->br_startoff + new->br_blockcount == right.br_startoff && in xfs_bmap_add_extent_hole_delay()
1101 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && in xfs_bmap_add_extent_hole_delay()
1103 (left.br_blockcount + new->br_blockcount + in xfs_bmap_add_extent_hole_delay()
1114 * on the left and on the right. in xfs_bmap_add_extent_hole_delay()
1117 temp = left.br_blockcount + new->br_blockcount + in xfs_bmap_add_extent_hole_delay()
1120 oldlen = startblockval(left.br_startblock) + in xfs_bmap_add_extent_hole_delay()
1121 startblockval(new->br_startblock) + in xfs_bmap_add_extent_hole_delay()
1125 left.br_startblock = nullstartblock(newlen); in xfs_bmap_add_extent_hole_delay()
1126 left.br_blockcount = temp; in xfs_bmap_add_extent_hole_delay()
1130 xfs_iext_update_extent(ip, state, icur, &left); in xfs_bmap_add_extent_hole_delay()
1136 * on the left. in xfs_bmap_add_extent_hole_delay()
1137 * Merge the new allocation with the left neighbor. in xfs_bmap_add_extent_hole_delay()
1139 temp = left.br_blockcount + new->br_blockcount; in xfs_bmap_add_extent_hole_delay()
1141 oldlen = startblockval(left.br_startblock) + in xfs_bmap_add_extent_hole_delay()
1142 startblockval(new->br_startblock); in xfs_bmap_add_extent_hole_delay()
1145 left.br_blockcount = temp; in xfs_bmap_add_extent_hole_delay()
1146 left.br_startblock = nullstartblock(newlen); in xfs_bmap_add_extent_hole_delay()
1149 xfs_iext_update_extent(ip, state, icur, &left); in xfs_bmap_add_extent_hole_delay()
1158 temp = new->br_blockcount + right.br_blockcount; in xfs_bmap_add_extent_hole_delay()
1159 oldlen = startblockval(new->br_startblock) + in xfs_bmap_add_extent_hole_delay()
1163 right.br_startoff = new->br_startoff; in xfs_bmap_add_extent_hole_delay()
1181 xfs_add_fdblocks(ip->i_mount, oldlen - newlen); in xfs_bmap_add_extent_hole_delay()
1186 xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen); in xfs_bmap_add_extent_hole_delay()
1192 * global pool and the extent inserted into the inode in-core extent tree.
1214 struct xfs_mount *mp = ip->i_mount; in xfs_bmapi_reserve_delalloc()
1232 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); in xfs_bmapi_reserve_delalloc()
1234 prealloc = alen - len; in xfs_bmapi_reserve_delalloc()
1258 * Make a transaction-less quota reservation for delayed allocation in xfs_bmapi_reserve_delalloc()
1287 ip->i_delayed_blks += alen; in xfs_bmapi_reserve_delalloc()
1290 got->br_startoff = aoff; in xfs_bmapi_reserve_delalloc()
1291 got->br_startblock = nullstartblock(indlen); in xfs_bmapi_reserve_delalloc()
1292 got->br_blockcount = alen; in xfs_bmapi_reserve_delalloc()
1293 got->br_state = XFS_EXT_NORM; in xfs_bmapi_reserve_delalloc()
1316 if (error == -ENOSPC || error == -EDQUOT) { in xfs_bmapi_reserve_delalloc()
1340 struct xfs_zone_alloc_ctx *ac = iter->private; in xfs_zoned_buffered_write_iomap_begin()
1342 struct xfs_mount *mp = ip->i_mount; in xfs_zoned_buffered_write_iomap_begin()
1358 return -EIO; in xfs_zoned_buffered_write_iomap_begin()
1368 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || in xfs_zoned_buffered_write_iomap_begin()
1371 error = -EFSCORRUPTED; in xfs_zoned_buffered_write_iomap_begin()
1386 * read-modify-write of the whole block in the page cache. in xfs_zoned_buffered_write_iomap_begin()
1391 if (!IS_ALIGNED(offset, mp->m_sb.sb_blocksize) || in xfs_zoned_buffered_write_iomap_begin()
1392 !IS_ALIGNED(offset + count, mp->m_sb.sb_blocksize) || in xfs_zoned_buffered_write_iomap_begin()
1397 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &scur, in xfs_zoned_buffered_write_iomap_begin()
1414 end_fsb - offset_fsb); in xfs_zoned_buffered_write_iomap_begin()
1422 if (!ip->i_cowfp) in xfs_zoned_buffered_write_iomap_begin()
1425 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got)) in xfs_zoned_buffered_write_iomap_begin()
1437 count_fsb = min3(end_fsb - offset_fsb, XFS_MAX_BMBT_EXTLEN, in xfs_zoned_buffered_write_iomap_begin()
1447 * ->page_mkwrite in range this thread writes to, using up the in xfs_zoned_buffered_write_iomap_begin()
1452 * iteration short, causing the new call to ->iomap_begin that gets in xfs_zoned_buffered_write_iomap_begin()
1459 if (count_fsb > ac->reserved_blocks) { in xfs_zoned_buffered_write_iomap_begin()
1461 "Short write on ino 0x%llx comm %.20s due to three-way race with write fault and direct I/O", in xfs_zoned_buffered_write_iomap_begin()
1462 ip->i_ino, current->comm); in xfs_zoned_buffered_write_iomap_begin()
1463 count_fsb = ac->reserved_blocks; in xfs_zoned_buffered_write_iomap_begin()
1465 error = -EIO; in xfs_zoned_buffered_write_iomap_begin()
1478 ip->i_delayed_blks += count_fsb; in xfs_zoned_buffered_write_iomap_begin()
1486 ac->reserved_blocks -= count_fsb; in xfs_zoned_buffered_write_iomap_begin()
1509 struct xfs_mount *mp = ip->i_mount; in xfs_buffered_write_iomap_begin()
1523 return -EIO; in xfs_buffered_write_iomap_begin()
1542 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || in xfs_buffered_write_iomap_begin()
1545 error = -EFSCORRUPTED; in xfs_buffered_write_iomap_begin()
1559 * perform read-modify-write cycles for unaligned writes. in xfs_buffered_write_iomap_begin()
1561 eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap); in xfs_buffered_write_iomap_begin()
1586 end_fsb - offset_fsb); in xfs_buffered_write_iomap_begin()
1599 if (!ip->i_cowfp) { in xfs_buffered_write_iomap_begin()
1603 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, in xfs_buffered_write_iomap_begin()
1624 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); in xfs_buffered_write_iomap_begin()
1651 * Note that the values needs to be less than 32-bits wide until in xfs_buffered_write_iomap_begin()
1667 prealloc_blocks = mp->m_allocsize_blocks; in xfs_buffered_write_iomap_begin()
1679 end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1); in xfs_buffered_write_iomap_begin()
1688 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); in xfs_buffered_write_iomap_begin()
1690 prealloc_blocks = p_end_fsb - end_fsb; in xfs_buffered_write_iomap_begin()
1701 end_fsb - offset_fsb, prealloc_blocks, &cmap, in xfs_buffered_write_iomap_begin()
1711 end_fsb - offset_fsb, prealloc_blocks, &imap, &icur, in xfs_buffered_write_iomap_begin()
1741 imap.br_startoff - offset_fsb); in xfs_buffered_write_iomap_begin()
1765 (iomap->flags & IOMAP_F_SHARED) ? in xfs_buffered_write_delalloc_punch()
1767 offset, offset + length, iter->private); in xfs_buffered_write_delalloc_punch()
1782 if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW)) in xfs_buffered_write_iomap_end()
1801 rwsem_assert_held_write(&inode->i_mapping->invalidate_lock); in xfs_buffered_write_iomap_end()
1805 filemap_invalidate_lock(inode->i_mapping); in xfs_buffered_write_iomap_end()
1808 filemap_invalidate_unlock(inode->i_mapping); in xfs_buffered_write_iomap_end()
1829 struct xfs_mount *mp = ip->i_mount; in xfs_read_iomap_begin()
1841 return -EIO; in xfs_read_iomap_begin()
1846 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, in xfs_read_iomap_begin()
1874 struct xfs_mount *mp = ip->i_mount; in xfs_seek_iomap_begin()
1885 return -EIO; in xfs_seek_iomap_begin()
1892 if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) { in xfs_seek_iomap_begin()
1907 * If a COW fork extent covers the hole, report it - capped to the next in xfs_seek_iomap_begin()
1911 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) in xfs_seek_iomap_begin()
1916 xfs_trim_extent(&cmap, offset_fsb, end_fsb - offset_fsb); in xfs_seek_iomap_begin()
1925 iomap->type = IOMAP_UNWRITTEN; in xfs_seek_iomap_begin()
1933 imap.br_blockcount = cow_fsb - offset_fsb; in xfs_seek_iomap_begin()
1935 imap.br_blockcount = data_fsb - offset_fsb; in xfs_seek_iomap_begin()
1941 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); in xfs_seek_iomap_begin()
1962 struct xfs_mount *mp = ip->i_mount; in xfs_xattr_iomap_begin()
1971 return -EIO; in xfs_xattr_iomap_begin()
1976 if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) { in xfs_xattr_iomap_begin()
1977 error = -ENOENT; in xfs_xattr_iomap_begin()
1981 ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL); in xfs_xattr_iomap_begin()
1982 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, in xfs_xattr_iomap_begin()