1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2006 Silicon Graphics, Inc. 4 * Copyright (c) 2016-2018 Christoph Hellwig. 5 * All Rights Reserved. 6 */ 7 #include "xfs.h" 8 #include "xfs_fs.h" 9 #include "xfs_shared.h" 10 #include "xfs_format.h" 11 #include "xfs_log_format.h" 12 #include "xfs_trans_resv.h" 13 #include "xfs_mount.h" 14 #include "xfs_inode.h" 15 #include "xfs_btree.h" 16 #include "xfs_bmap_btree.h" 17 #include "xfs_bmap.h" 18 #include "xfs_bmap_util.h" 19 #include "xfs_errortag.h" 20 #include "xfs_error.h" 21 #include "xfs_trans.h" 22 #include "xfs_trans_space.h" 23 #include "xfs_inode_item.h" 24 #include "xfs_iomap.h" 25 #include "xfs_trace.h" 26 #include "xfs_quota.h" 27 #include "xfs_rtgroup.h" 28 #include "xfs_dquot_item.h" 29 #include "xfs_dquot.h" 30 #include "xfs_reflink.h" 31 #include "xfs_health.h" 32 #include "xfs_rtbitmap.h" 33 #include "xfs_icache.h" 34 #include "xfs_zone_alloc.h" 35 36 #define XFS_ALLOC_ALIGN(mp, off) \ 37 (((off) >> mp->m_allocsize_log) << mp->m_allocsize_log) 38 39 static int 40 xfs_alert_fsblock_zero( 41 xfs_inode_t *ip, 42 xfs_bmbt_irec_t *imap) 43 { 44 xfs_alert_tag(ip->i_mount, XFS_PTAG_FSBLOCK_ZERO, 45 "Access to block zero in inode %llu " 46 "start_block: %llx start_off: %llx " 47 "blkcnt: %llx extent-state: %x", 48 (unsigned long long)ip->i_ino, 49 (unsigned long long)imap->br_startblock, 50 (unsigned long long)imap->br_startoff, 51 (unsigned long long)imap->br_blockcount, 52 imap->br_state); 53 xfs_bmap_mark_sick(ip, XFS_DATA_FORK); 54 return -EFSCORRUPTED; 55 } 56 57 u64 58 xfs_iomap_inode_sequence( 59 struct xfs_inode *ip, 60 u16 iomap_flags) 61 { 62 u64 cookie = 0; 63 64 if (iomap_flags & IOMAP_F_XATTR) 65 return READ_ONCE(ip->i_af.if_seq); 66 if ((iomap_flags & IOMAP_F_SHARED) && ip->i_cowfp) 67 cookie = (u64)READ_ONCE(ip->i_cowfp->if_seq) << 32; 68 return cookie | READ_ONCE(ip->i_df.if_seq); 69 } 70 71 /* 72 * Check that the iomap passed to us is still valid for the given offset and 73 * length. 74 */ 75 static bool 76 xfs_iomap_valid( 77 struct inode *inode, 78 const struct iomap *iomap) 79 { 80 struct xfs_inode *ip = XFS_I(inode); 81 82 if (iomap->validity_cookie != 83 xfs_iomap_inode_sequence(ip, iomap->flags)) { 84 trace_xfs_iomap_invalid(ip, iomap); 85 return false; 86 } 87 88 XFS_ERRORTAG_DELAY(ip->i_mount, XFS_ERRTAG_WRITE_DELAY_MS); 89 return true; 90 } 91 92 static const struct iomap_folio_ops xfs_iomap_folio_ops = { 93 .iomap_valid = xfs_iomap_valid, 94 }; 95 96 int 97 xfs_bmbt_to_iomap( 98 struct xfs_inode *ip, 99 struct iomap *iomap, 100 struct xfs_bmbt_irec *imap, 101 unsigned int mapping_flags, 102 u16 iomap_flags, 103 u64 sequence_cookie) 104 { 105 struct xfs_mount *mp = ip->i_mount; 106 struct xfs_buftarg *target = xfs_inode_buftarg(ip); 107 108 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) { 109 xfs_bmap_mark_sick(ip, XFS_DATA_FORK); 110 return xfs_alert_fsblock_zero(ip, imap); 111 } 112 113 if (imap->br_startblock == HOLESTARTBLOCK) { 114 iomap->addr = IOMAP_NULL_ADDR; 115 iomap->type = IOMAP_HOLE; 116 } else if (imap->br_startblock == DELAYSTARTBLOCK || 117 isnullstartblock(imap->br_startblock)) { 118 iomap->addr = IOMAP_NULL_ADDR; 119 iomap->type = IOMAP_DELALLOC; 120 } else { 121 xfs_daddr_t daddr = xfs_fsb_to_db(ip, imap->br_startblock); 122 123 iomap->addr = BBTOB(daddr); 124 if (mapping_flags & IOMAP_DAX) 125 iomap->addr += target->bt_dax_part_off; 126 127 if (imap->br_state == XFS_EXT_UNWRITTEN) 128 iomap->type = IOMAP_UNWRITTEN; 129 else 130 iomap->type = IOMAP_MAPPED; 131 132 /* 133 * Mark iomaps starting at the first sector of a RTG as merge 134 * boundary so that each I/O completions is contained to a 135 * single RTG. 136 */ 137 if (XFS_IS_REALTIME_INODE(ip) && xfs_has_rtgroups(mp) && 138 xfs_rtbno_is_group_start(mp, imap->br_startblock)) 139 iomap->flags |= IOMAP_F_BOUNDARY; 140 } 141 iomap->offset = XFS_FSB_TO_B(mp, imap->br_startoff); 142 iomap->length = XFS_FSB_TO_B(mp, imap->br_blockcount); 143 if (mapping_flags & IOMAP_DAX) 144 iomap->dax_dev = target->bt_daxdev; 145 else 146 iomap->bdev = target->bt_bdev; 147 iomap->flags = iomap_flags; 148 149 if (xfs_ipincount(ip) && 150 (ip->i_itemp->ili_fsync_fields & ~XFS_ILOG_TIMESTAMP)) 151 iomap->flags |= IOMAP_F_DIRTY; 152 153 iomap->validity_cookie = sequence_cookie; 154 iomap->folio_ops = &xfs_iomap_folio_ops; 155 return 0; 156 } 157 158 static void 159 xfs_hole_to_iomap( 160 struct xfs_inode *ip, 161 struct iomap *iomap, 162 xfs_fileoff_t offset_fsb, 163 xfs_fileoff_t end_fsb) 164 { 165 struct xfs_buftarg *target = xfs_inode_buftarg(ip); 166 167 iomap->addr = IOMAP_NULL_ADDR; 168 iomap->type = IOMAP_HOLE; 169 iomap->offset = XFS_FSB_TO_B(ip->i_mount, offset_fsb); 170 iomap->length = XFS_FSB_TO_B(ip->i_mount, end_fsb - offset_fsb); 171 iomap->bdev = target->bt_bdev; 172 iomap->dax_dev = target->bt_daxdev; 173 } 174 175 static inline xfs_fileoff_t 176 xfs_iomap_end_fsb( 177 struct xfs_mount *mp, 178 loff_t offset, 179 loff_t count) 180 { 181 ASSERT(offset <= mp->m_super->s_maxbytes); 182 return min(XFS_B_TO_FSB(mp, offset + count), 183 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); 184 } 185 186 static xfs_extlen_t 187 xfs_eof_alignment( 188 struct xfs_inode *ip) 189 { 190 struct xfs_mount *mp = ip->i_mount; 191 xfs_extlen_t align = 0; 192 193 if (!XFS_IS_REALTIME_INODE(ip)) { 194 /* 195 * Round up the allocation request to a stripe unit 196 * (m_dalign) boundary if the file size is >= stripe unit 197 * size, and we are allocating past the allocation eof. 198 * 199 * If mounted with the "-o swalloc" option the alignment is 200 * increased from the strip unit size to the stripe width. 201 */ 202 if (mp->m_swidth && xfs_has_swalloc(mp)) 203 align = mp->m_swidth; 204 else if (mp->m_dalign) 205 align = mp->m_dalign; 206 207 if (align && XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, align)) 208 align = 0; 209 } 210 211 return align; 212 } 213 214 /* 215 * Check if last_fsb is outside the last extent, and if so grow it to the next 216 * stripe unit boundary. 217 */ 218 xfs_fileoff_t 219 xfs_iomap_eof_align_last_fsb( 220 struct xfs_inode *ip, 221 xfs_fileoff_t end_fsb) 222 { 223 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, XFS_DATA_FORK); 224 xfs_extlen_t extsz = xfs_get_extsz_hint(ip); 225 xfs_extlen_t align = xfs_eof_alignment(ip); 226 struct xfs_bmbt_irec irec; 227 struct xfs_iext_cursor icur; 228 229 ASSERT(!xfs_need_iread_extents(ifp)); 230 231 /* 232 * Always round up the allocation request to the extent hint boundary. 233 */ 234 if (extsz) { 235 if (align) 236 align = roundup_64(align, extsz); 237 else 238 align = extsz; 239 } 240 241 if (align) { 242 xfs_fileoff_t aligned_end_fsb = roundup_64(end_fsb, align); 243 244 xfs_iext_last(ifp, &icur); 245 if (!xfs_iext_get_extent(ifp, &icur, &irec) || 246 aligned_end_fsb >= irec.br_startoff + irec.br_blockcount) 247 return aligned_end_fsb; 248 } 249 250 return end_fsb; 251 } 252 253 int 254 xfs_iomap_write_direct( 255 struct xfs_inode *ip, 256 xfs_fileoff_t offset_fsb, 257 xfs_fileoff_t count_fsb, 258 unsigned int flags, 259 struct xfs_bmbt_irec *imap, 260 u64 *seq) 261 { 262 struct xfs_mount *mp = ip->i_mount; 263 struct xfs_trans *tp; 264 xfs_filblks_t resaligned; 265 int nimaps; 266 unsigned int dblocks, rblocks; 267 bool force = false; 268 int error; 269 int bmapi_flags = XFS_BMAPI_PREALLOC; 270 int nr_exts = XFS_IEXT_ADD_NOSPLIT_CNT; 271 272 ASSERT(count_fsb > 0); 273 274 resaligned = xfs_aligned_fsb_count(offset_fsb, count_fsb, 275 xfs_get_extsz_hint(ip)); 276 if (unlikely(XFS_IS_REALTIME_INODE(ip))) { 277 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0); 278 rblocks = resaligned; 279 } else { 280 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, resaligned); 281 rblocks = 0; 282 } 283 284 error = xfs_qm_dqattach(ip); 285 if (error) 286 return error; 287 288 /* 289 * For DAX, we do not allocate unwritten extents, but instead we zero 290 * the block before we commit the transaction. Ideally we'd like to do 291 * this outside the transaction context, but if we commit and then crash 292 * we may not have zeroed the blocks and this will be exposed on 293 * recovery of the allocation. Hence we must zero before commit. 294 * 295 * Further, if we are mapping unwritten extents here, we need to zero 296 * and convert them to written so that we don't need an unwritten extent 297 * callback for DAX. This also means that we need to be able to dip into 298 * the reserve block pool for bmbt block allocation if there is no space 299 * left but we need to do unwritten extent conversion. 300 */ 301 if (flags & IOMAP_DAX) { 302 bmapi_flags = XFS_BMAPI_CONVERT | XFS_BMAPI_ZERO; 303 if (imap->br_state == XFS_EXT_UNWRITTEN) { 304 force = true; 305 nr_exts = XFS_IEXT_WRITE_UNWRITTEN_CNT; 306 dblocks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 307 } 308 } 309 310 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, 311 rblocks, force, &tp); 312 if (error) 313 return error; 314 315 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, nr_exts); 316 if (error) 317 goto out_trans_cancel; 318 319 /* 320 * From this point onwards we overwrite the imap pointer that the 321 * caller gave to us. 322 */ 323 nimaps = 1; 324 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, bmapi_flags, 0, 325 imap, &nimaps); 326 if (error) 327 goto out_trans_cancel; 328 329 /* 330 * Complete the transaction 331 */ 332 error = xfs_trans_commit(tp); 333 if (error) 334 goto out_unlock; 335 336 if (unlikely(!xfs_valid_startblock(ip, imap->br_startblock))) { 337 xfs_bmap_mark_sick(ip, XFS_DATA_FORK); 338 error = xfs_alert_fsblock_zero(ip, imap); 339 } 340 341 out_unlock: 342 *seq = xfs_iomap_inode_sequence(ip, 0); 343 xfs_iunlock(ip, XFS_ILOCK_EXCL); 344 return error; 345 346 out_trans_cancel: 347 xfs_trans_cancel(tp); 348 goto out_unlock; 349 } 350 351 STATIC bool 352 xfs_quota_need_throttle( 353 struct xfs_inode *ip, 354 xfs_dqtype_t type, 355 xfs_fsblock_t alloc_blocks) 356 { 357 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 358 struct xfs_dquot_res *res; 359 struct xfs_dquot_pre *pre; 360 361 if (!dq || !xfs_this_quota_on(ip->i_mount, type)) 362 return false; 363 364 if (XFS_IS_REALTIME_INODE(ip)) { 365 res = &dq->q_rtb; 366 pre = &dq->q_rtb_prealloc; 367 } else { 368 res = &dq->q_blk; 369 pre = &dq->q_blk_prealloc; 370 } 371 372 /* no hi watermark, no throttle */ 373 if (!pre->q_prealloc_hi_wmark) 374 return false; 375 376 /* under the lo watermark, no throttle */ 377 if (res->reserved + alloc_blocks < pre->q_prealloc_lo_wmark) 378 return false; 379 380 return true; 381 } 382 383 STATIC void 384 xfs_quota_calc_throttle( 385 struct xfs_inode *ip, 386 xfs_dqtype_t type, 387 xfs_fsblock_t *qblocks, 388 int *qshift, 389 int64_t *qfreesp) 390 { 391 struct xfs_dquot *dq = xfs_inode_dquot(ip, type); 392 struct xfs_dquot_res *res; 393 struct xfs_dquot_pre *pre; 394 int64_t freesp; 395 int shift = 0; 396 397 if (!dq) { 398 res = NULL; 399 pre = NULL; 400 } else if (XFS_IS_REALTIME_INODE(ip)) { 401 res = &dq->q_rtb; 402 pre = &dq->q_rtb_prealloc; 403 } else { 404 res = &dq->q_blk; 405 pre = &dq->q_blk_prealloc; 406 } 407 408 /* no dq, or over hi wmark, squash the prealloc completely */ 409 if (!res || res->reserved >= pre->q_prealloc_hi_wmark) { 410 *qblocks = 0; 411 *qfreesp = 0; 412 return; 413 } 414 415 freesp = pre->q_prealloc_hi_wmark - res->reserved; 416 if (freesp < pre->q_low_space[XFS_QLOWSP_5_PCNT]) { 417 shift = 2; 418 if (freesp < pre->q_low_space[XFS_QLOWSP_3_PCNT]) 419 shift += 2; 420 if (freesp < pre->q_low_space[XFS_QLOWSP_1_PCNT]) 421 shift += 2; 422 } 423 424 if (freesp < *qfreesp) 425 *qfreesp = freesp; 426 427 /* only overwrite the throttle values if we are more aggressive */ 428 if ((freesp >> shift) < (*qblocks >> *qshift)) { 429 *qblocks = freesp; 430 *qshift = shift; 431 } 432 } 433 434 static int64_t 435 xfs_iomap_freesp( 436 struct xfs_mount *mp, 437 unsigned int idx, 438 uint64_t low_space[XFS_LOWSP_MAX], 439 int *shift) 440 { 441 int64_t freesp; 442 443 freesp = xfs_estimate_freecounter(mp, idx); 444 if (freesp < low_space[XFS_LOWSP_5_PCNT]) { 445 *shift = 2; 446 if (freesp < low_space[XFS_LOWSP_4_PCNT]) 447 (*shift)++; 448 if (freesp < low_space[XFS_LOWSP_3_PCNT]) 449 (*shift)++; 450 if (freesp < low_space[XFS_LOWSP_2_PCNT]) 451 (*shift)++; 452 if (freesp < low_space[XFS_LOWSP_1_PCNT]) 453 (*shift)++; 454 } 455 return freesp; 456 } 457 458 /* 459 * If we don't have a user specified preallocation size, dynamically increase 460 * the preallocation size as the size of the file grows. Cap the maximum size 461 * at a single extent or less if the filesystem is near full. The closer the 462 * filesystem is to being full, the smaller the maximum preallocation. 463 */ 464 STATIC xfs_fsblock_t 465 xfs_iomap_prealloc_size( 466 struct xfs_inode *ip, 467 int whichfork, 468 loff_t offset, 469 loff_t count, 470 struct xfs_iext_cursor *icur) 471 { 472 struct xfs_iext_cursor ncur = *icur; 473 struct xfs_bmbt_irec prev, got; 474 struct xfs_mount *mp = ip->i_mount; 475 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 476 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 477 int64_t freesp; 478 xfs_fsblock_t qblocks; 479 xfs_fsblock_t alloc_blocks = 0; 480 xfs_extlen_t plen; 481 int shift = 0; 482 int qshift = 0; 483 484 /* 485 * As an exception we don't do any preallocation at all if the file is 486 * smaller than the minimum preallocation and we are using the default 487 * dynamic preallocation scheme, as it is likely this is the only write 488 * to the file that is going to be done. 489 */ 490 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_allocsize_blocks)) 491 return 0; 492 493 /* 494 * Use the minimum preallocation size for small files or if we are 495 * writing right after a hole. 496 */ 497 if (XFS_ISIZE(ip) < XFS_FSB_TO_B(mp, mp->m_dalign) || 498 !xfs_iext_prev_extent(ifp, &ncur, &prev) || 499 prev.br_startoff + prev.br_blockcount < offset_fsb) 500 return mp->m_allocsize_blocks; 501 502 /* 503 * Take the size of the preceding data extents as the basis for the 504 * preallocation size. Note that we don't care if the previous extents 505 * are written or not. 506 */ 507 plen = prev.br_blockcount; 508 while (xfs_iext_prev_extent(ifp, &ncur, &got)) { 509 if (plen > XFS_MAX_BMBT_EXTLEN / 2 || 510 isnullstartblock(got.br_startblock) || 511 got.br_startoff + got.br_blockcount != prev.br_startoff || 512 got.br_startblock + got.br_blockcount != prev.br_startblock) 513 break; 514 plen += got.br_blockcount; 515 prev = got; 516 } 517 518 /* 519 * If the size of the extents is greater than half the maximum extent 520 * length, then use the current offset as the basis. This ensures that 521 * for large files the preallocation size always extends to 522 * XFS_BMBT_MAX_EXTLEN rather than falling short due to things like stripe 523 * unit/width alignment of real extents. 524 */ 525 alloc_blocks = plen * 2; 526 if (alloc_blocks > XFS_MAX_BMBT_EXTLEN) 527 alloc_blocks = XFS_B_TO_FSB(mp, offset); 528 qblocks = alloc_blocks; 529 530 /* 531 * XFS_BMBT_MAX_EXTLEN is not a power of two value but we round the prealloc 532 * down to the nearest power of two value after throttling. To prevent 533 * the round down from unconditionally reducing the maximum supported 534 * prealloc size, we round up first, apply appropriate throttling, round 535 * down and cap the value to XFS_BMBT_MAX_EXTLEN. 536 */ 537 alloc_blocks = XFS_FILEOFF_MIN(roundup_pow_of_two(XFS_MAX_BMBT_EXTLEN), 538 alloc_blocks); 539 540 if (unlikely(XFS_IS_REALTIME_INODE(ip))) 541 freesp = xfs_rtbxlen_to_blen(mp, 542 xfs_iomap_freesp(mp, XC_FREE_RTEXTENTS, 543 mp->m_low_rtexts, &shift)); 544 else 545 freesp = xfs_iomap_freesp(mp, XC_FREE_BLOCKS, mp->m_low_space, 546 &shift); 547 548 /* 549 * Check each quota to cap the prealloc size, provide a shift value to 550 * throttle with and adjust amount of available space. 551 */ 552 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_USER, alloc_blocks)) 553 xfs_quota_calc_throttle(ip, XFS_DQTYPE_USER, &qblocks, &qshift, 554 &freesp); 555 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_GROUP, alloc_blocks)) 556 xfs_quota_calc_throttle(ip, XFS_DQTYPE_GROUP, &qblocks, &qshift, 557 &freesp); 558 if (xfs_quota_need_throttle(ip, XFS_DQTYPE_PROJ, alloc_blocks)) 559 xfs_quota_calc_throttle(ip, XFS_DQTYPE_PROJ, &qblocks, &qshift, 560 &freesp); 561 562 /* 563 * The final prealloc size is set to the minimum of free space available 564 * in each of the quotas and the overall filesystem. 565 * 566 * The shift throttle value is set to the maximum value as determined by 567 * the global low free space values and per-quota low free space values. 568 */ 569 alloc_blocks = min(alloc_blocks, qblocks); 570 shift = max(shift, qshift); 571 572 if (shift) 573 alloc_blocks >>= shift; 574 /* 575 * rounddown_pow_of_two() returns an undefined result if we pass in 576 * alloc_blocks = 0. 577 */ 578 if (alloc_blocks) 579 alloc_blocks = rounddown_pow_of_two(alloc_blocks); 580 if (alloc_blocks > XFS_MAX_BMBT_EXTLEN) 581 alloc_blocks = XFS_MAX_BMBT_EXTLEN; 582 583 /* 584 * If we are still trying to allocate more space than is 585 * available, squash the prealloc hard. This can happen if we 586 * have a large file on a small filesystem and the above 587 * lowspace thresholds are smaller than XFS_BMBT_MAX_EXTLEN. 588 */ 589 while (alloc_blocks && alloc_blocks >= freesp) 590 alloc_blocks >>= 4; 591 if (alloc_blocks < mp->m_allocsize_blocks) 592 alloc_blocks = mp->m_allocsize_blocks; 593 trace_xfs_iomap_prealloc_size(ip, alloc_blocks, shift, 594 mp->m_allocsize_blocks); 595 return alloc_blocks; 596 } 597 598 int 599 xfs_iomap_write_unwritten( 600 xfs_inode_t *ip, 601 xfs_off_t offset, 602 xfs_off_t count, 603 bool update_isize) 604 { 605 xfs_mount_t *mp = ip->i_mount; 606 xfs_fileoff_t offset_fsb; 607 xfs_filblks_t count_fsb; 608 xfs_filblks_t numblks_fsb; 609 int nimaps; 610 xfs_trans_t *tp; 611 xfs_bmbt_irec_t imap; 612 struct inode *inode = VFS_I(ip); 613 xfs_fsize_t i_size; 614 uint resblks; 615 int error; 616 617 trace_xfs_unwritten_convert(ip, offset, count); 618 619 offset_fsb = XFS_B_TO_FSBT(mp, offset); 620 count_fsb = XFS_B_TO_FSB(mp, (xfs_ufsize_t)offset + count); 621 count_fsb = (xfs_filblks_t)(count_fsb - offset_fsb); 622 623 /* 624 * Reserve enough blocks in this transaction for two complete extent 625 * btree splits. We may be converting the middle part of an unwritten 626 * extent and in this case we will insert two new extents in the btree 627 * each of which could cause a full split. 628 * 629 * This reservation amount will be used in the first call to 630 * xfs_bmbt_split() to select an AG with enough space to satisfy the 631 * rest of the operation. 632 */ 633 resblks = XFS_DIOSTRAT_SPACE_RES(mp, 0) << 1; 634 635 /* Attach dquots so that bmbt splits are accounted correctly. */ 636 error = xfs_qm_dqattach(ip); 637 if (error) 638 return error; 639 640 do { 641 /* 642 * Set up a transaction to convert the range of extents 643 * from unwritten to real. Do allocations in a loop until 644 * we have covered the range passed in. 645 * 646 * Note that we can't risk to recursing back into the filesystem 647 * here as we might be asked to write out the same inode that we 648 * complete here and might deadlock on the iolock. 649 */ 650 error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, resblks, 651 0, true, &tp); 652 if (error) 653 return error; 654 655 error = xfs_iext_count_extend(tp, ip, XFS_DATA_FORK, 656 XFS_IEXT_WRITE_UNWRITTEN_CNT); 657 if (error) 658 goto error_on_bmapi_transaction; 659 660 /* 661 * Modify the unwritten extent state of the buffer. 662 */ 663 nimaps = 1; 664 error = xfs_bmapi_write(tp, ip, offset_fsb, count_fsb, 665 XFS_BMAPI_CONVERT, resblks, &imap, 666 &nimaps); 667 if (error) 668 goto error_on_bmapi_transaction; 669 670 /* 671 * Log the updated inode size as we go. We have to be careful 672 * to only log it up to the actual write offset if it is 673 * halfway into a block. 674 */ 675 i_size = XFS_FSB_TO_B(mp, offset_fsb + count_fsb); 676 if (i_size > offset + count) 677 i_size = offset + count; 678 if (update_isize && i_size > i_size_read(inode)) 679 i_size_write(inode, i_size); 680 i_size = xfs_new_eof(ip, i_size); 681 if (i_size) { 682 ip->i_disk_size = i_size; 683 xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); 684 } 685 686 error = xfs_trans_commit(tp); 687 xfs_iunlock(ip, XFS_ILOCK_EXCL); 688 if (error) 689 return error; 690 691 if (unlikely(!xfs_valid_startblock(ip, imap.br_startblock))) { 692 xfs_bmap_mark_sick(ip, XFS_DATA_FORK); 693 return xfs_alert_fsblock_zero(ip, &imap); 694 } 695 696 if ((numblks_fsb = imap.br_blockcount) == 0) { 697 /* 698 * The numblks_fsb value should always get 699 * smaller, otherwise the loop is stuck. 700 */ 701 ASSERT(imap.br_blockcount); 702 break; 703 } 704 offset_fsb += numblks_fsb; 705 count_fsb -= numblks_fsb; 706 } while (count_fsb > 0); 707 708 return 0; 709 710 error_on_bmapi_transaction: 711 xfs_trans_cancel(tp); 712 xfs_iunlock(ip, XFS_ILOCK_EXCL); 713 return error; 714 } 715 716 static inline bool 717 imap_needs_alloc( 718 struct inode *inode, 719 unsigned flags, 720 struct xfs_bmbt_irec *imap, 721 int nimaps) 722 { 723 /* don't allocate blocks when just zeroing */ 724 if (flags & IOMAP_ZERO) 725 return false; 726 if (!nimaps || 727 imap->br_startblock == HOLESTARTBLOCK || 728 imap->br_startblock == DELAYSTARTBLOCK) 729 return true; 730 /* we convert unwritten extents before copying the data for DAX */ 731 if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN) 732 return true; 733 return false; 734 } 735 736 static inline bool 737 imap_needs_cow( 738 struct xfs_inode *ip, 739 unsigned int flags, 740 struct xfs_bmbt_irec *imap, 741 int nimaps) 742 { 743 if (!xfs_is_cow_inode(ip)) 744 return false; 745 746 /* when zeroing we don't have to COW holes or unwritten extents */ 747 if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) { 748 if (!nimaps || 749 imap->br_startblock == HOLESTARTBLOCK || 750 imap->br_state == XFS_EXT_UNWRITTEN) 751 return false; 752 } 753 754 return true; 755 } 756 757 /* 758 * Extents not yet cached requires exclusive access, don't block for 759 * IOMAP_NOWAIT. 760 * 761 * This is basically an opencoded xfs_ilock_data_map_shared() call, but with 762 * support for IOMAP_NOWAIT. 763 */ 764 static int 765 xfs_ilock_for_iomap( 766 struct xfs_inode *ip, 767 unsigned flags, 768 unsigned *lockmode) 769 { 770 if (flags & IOMAP_NOWAIT) { 771 if (xfs_need_iread_extents(&ip->i_df)) 772 return -EAGAIN; 773 if (!xfs_ilock_nowait(ip, *lockmode)) 774 return -EAGAIN; 775 } else { 776 if (xfs_need_iread_extents(&ip->i_df)) 777 *lockmode = XFS_ILOCK_EXCL; 778 xfs_ilock(ip, *lockmode); 779 } 780 781 return 0; 782 } 783 784 /* 785 * Check that the imap we are going to return to the caller spans the entire 786 * range that the caller requested for the IO. 787 */ 788 static bool 789 imap_spans_range( 790 struct xfs_bmbt_irec *imap, 791 xfs_fileoff_t offset_fsb, 792 xfs_fileoff_t end_fsb) 793 { 794 if (imap->br_startoff > offset_fsb) 795 return false; 796 if (imap->br_startoff + imap->br_blockcount < end_fsb) 797 return false; 798 return true; 799 } 800 801 static int 802 xfs_direct_write_iomap_begin( 803 struct inode *inode, 804 loff_t offset, 805 loff_t length, 806 unsigned flags, 807 struct iomap *iomap, 808 struct iomap *srcmap) 809 { 810 struct xfs_inode *ip = XFS_I(inode); 811 struct xfs_mount *mp = ip->i_mount; 812 struct xfs_bmbt_irec imap, cmap; 813 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 814 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); 815 int nimaps = 1, error = 0; 816 bool shared = false; 817 u16 iomap_flags = 0; 818 unsigned int lockmode; 819 u64 seq; 820 821 ASSERT(flags & (IOMAP_WRITE | IOMAP_ZERO)); 822 823 if (xfs_is_shutdown(mp)) 824 return -EIO; 825 826 /* 827 * Writes that span EOF might trigger an IO size update on completion, 828 * so consider them to be dirty for the purposes of O_DSYNC even if 829 * there is no other metadata changes pending or have been made here. 830 */ 831 if (offset + length > i_size_read(inode)) 832 iomap_flags |= IOMAP_F_DIRTY; 833 834 /* HW-offload atomics are always used in this path */ 835 if (flags & IOMAP_ATOMIC) 836 iomap_flags |= IOMAP_F_ATOMIC_BIO; 837 838 /* 839 * COW writes may allocate delalloc space or convert unwritten COW 840 * extents, so we need to make sure to take the lock exclusively here. 841 */ 842 if (xfs_is_cow_inode(ip)) 843 lockmode = XFS_ILOCK_EXCL; 844 else 845 lockmode = XFS_ILOCK_SHARED; 846 847 relock: 848 error = xfs_ilock_for_iomap(ip, flags, &lockmode); 849 if (error) 850 return error; 851 852 /* 853 * The reflink iflag could have changed since the earlier unlocked 854 * check, check if it again and relock if needed. 855 */ 856 if (xfs_is_cow_inode(ip) && lockmode == XFS_ILOCK_SHARED) { 857 xfs_iunlock(ip, lockmode); 858 lockmode = XFS_ILOCK_EXCL; 859 goto relock; 860 } 861 862 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 863 &nimaps, 0); 864 if (error) 865 goto out_unlock; 866 867 if (imap_needs_cow(ip, flags, &imap, nimaps)) { 868 error = -EAGAIN; 869 if (flags & IOMAP_NOWAIT) 870 goto out_unlock; 871 872 /* may drop and re-acquire the ilock */ 873 error = xfs_reflink_allocate_cow(ip, &imap, &cmap, &shared, 874 &lockmode, 875 (flags & IOMAP_DIRECT) || IS_DAX(inode)); 876 if (error) 877 goto out_unlock; 878 if (shared) 879 goto out_found_cow; 880 end_fsb = imap.br_startoff + imap.br_blockcount; 881 length = XFS_FSB_TO_B(mp, end_fsb) - offset; 882 } 883 884 if (imap_needs_alloc(inode, flags, &imap, nimaps)) 885 goto allocate_blocks; 886 887 /* 888 * NOWAIT and OVERWRITE I/O needs to span the entire requested I/O with 889 * a single map so that we avoid partial IO failures due to the rest of 890 * the I/O range not covered by this map triggering an EAGAIN condition 891 * when it is subsequently mapped and aborting the I/O. 892 */ 893 if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) { 894 error = -EAGAIN; 895 if (!imap_spans_range(&imap, offset_fsb, end_fsb)) 896 goto out_unlock; 897 } 898 899 /* 900 * For overwrite only I/O, we cannot convert unwritten extents without 901 * requiring sub-block zeroing. This can only be done under an 902 * exclusive IOLOCK, hence return -EAGAIN if this is not a written 903 * extent to tell the caller to try again. 904 */ 905 if (flags & IOMAP_OVERWRITE_ONLY) { 906 error = -EAGAIN; 907 if (imap.br_state != XFS_EXT_NORM && 908 ((offset | length) & mp->m_blockmask)) 909 goto out_unlock; 910 } 911 912 seq = xfs_iomap_inode_sequence(ip, iomap_flags); 913 xfs_iunlock(ip, lockmode); 914 trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); 915 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq); 916 917 allocate_blocks: 918 error = -EAGAIN; 919 if (flags & (IOMAP_NOWAIT | IOMAP_OVERWRITE_ONLY)) 920 goto out_unlock; 921 922 /* 923 * We cap the maximum length we map to a sane size to keep the chunks 924 * of work done where somewhat symmetric with the work writeback does. 925 * This is a completely arbitrary number pulled out of thin air as a 926 * best guess for initial testing. 927 * 928 * Note that the values needs to be less than 32-bits wide until the 929 * lower level functions are updated. 930 */ 931 length = min_t(loff_t, length, 1024 * PAGE_SIZE); 932 end_fsb = xfs_iomap_end_fsb(mp, offset, length); 933 934 if (offset + length > XFS_ISIZE(ip)) 935 end_fsb = xfs_iomap_eof_align_last_fsb(ip, end_fsb); 936 else if (nimaps && imap.br_startblock == HOLESTARTBLOCK) 937 end_fsb = min(end_fsb, imap.br_startoff + imap.br_blockcount); 938 xfs_iunlock(ip, lockmode); 939 940 error = xfs_iomap_write_direct(ip, offset_fsb, end_fsb - offset_fsb, 941 flags, &imap, &seq); 942 if (error) 943 return error; 944 945 trace_xfs_iomap_alloc(ip, offset, length, XFS_DATA_FORK, &imap); 946 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 947 iomap_flags | IOMAP_F_NEW, seq); 948 949 out_found_cow: 950 length = XFS_FSB_TO_B(mp, cmap.br_startoff + cmap.br_blockcount); 951 trace_xfs_iomap_found(ip, offset, length - offset, XFS_COW_FORK, &cmap); 952 if (imap.br_startblock != HOLESTARTBLOCK) { 953 seq = xfs_iomap_inode_sequence(ip, 0); 954 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, seq); 955 if (error) 956 goto out_unlock; 957 } 958 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED); 959 xfs_iunlock(ip, lockmode); 960 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, IOMAP_F_SHARED, seq); 961 962 out_unlock: 963 if (lockmode) 964 xfs_iunlock(ip, lockmode); 965 return error; 966 } 967 968 const struct iomap_ops xfs_direct_write_iomap_ops = { 969 .iomap_begin = xfs_direct_write_iomap_begin, 970 }; 971 972 #ifdef CONFIG_XFS_RT 973 /* 974 * This is really simple. The space has already been reserved before taking the 975 * IOLOCK, the actual block allocation is done just before submitting the bio 976 * and only recorded in the extent map on I/O completion. 977 */ 978 static int 979 xfs_zoned_direct_write_iomap_begin( 980 struct inode *inode, 981 loff_t offset, 982 loff_t length, 983 unsigned flags, 984 struct iomap *iomap, 985 struct iomap *srcmap) 986 { 987 struct xfs_inode *ip = XFS_I(inode); 988 int error; 989 990 ASSERT(!(flags & IOMAP_OVERWRITE_ONLY)); 991 992 /* 993 * Needs to be pushed down into the allocator so that only writes into 994 * a single zone can be supported. 995 */ 996 if (flags & IOMAP_NOWAIT) 997 return -EAGAIN; 998 999 /* 1000 * Ensure the extent list is in memory in so that we don't have to do 1001 * read it from the I/O completion handler. 1002 */ 1003 if (xfs_need_iread_extents(&ip->i_df)) { 1004 xfs_ilock(ip, XFS_ILOCK_EXCL); 1005 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 1006 xfs_iunlock(ip, XFS_ILOCK_EXCL); 1007 if (error) 1008 return error; 1009 } 1010 1011 iomap->type = IOMAP_MAPPED; 1012 iomap->flags = IOMAP_F_DIRTY; 1013 iomap->bdev = ip->i_mount->m_rtdev_targp->bt_bdev; 1014 iomap->offset = offset; 1015 iomap->length = length; 1016 iomap->flags = IOMAP_F_ANON_WRITE; 1017 return 0; 1018 } 1019 1020 const struct iomap_ops xfs_zoned_direct_write_iomap_ops = { 1021 .iomap_begin = xfs_zoned_direct_write_iomap_begin, 1022 }; 1023 #endif /* CONFIG_XFS_RT */ 1024 1025 static int 1026 xfs_dax_write_iomap_end( 1027 struct inode *inode, 1028 loff_t pos, 1029 loff_t length, 1030 ssize_t written, 1031 unsigned flags, 1032 struct iomap *iomap) 1033 { 1034 struct xfs_inode *ip = XFS_I(inode); 1035 1036 if (!xfs_is_cow_inode(ip)) 1037 return 0; 1038 1039 if (!written) 1040 return xfs_reflink_cancel_cow_range(ip, pos, length, true); 1041 1042 return xfs_reflink_end_cow(ip, pos, written); 1043 } 1044 1045 const struct iomap_ops xfs_dax_write_iomap_ops = { 1046 .iomap_begin = xfs_direct_write_iomap_begin, 1047 .iomap_end = xfs_dax_write_iomap_end, 1048 }; 1049 1050 /* 1051 * Convert a hole to a delayed allocation. 1052 */ 1053 static void 1054 xfs_bmap_add_extent_hole_delay( 1055 struct xfs_inode *ip, /* incore inode pointer */ 1056 int whichfork, 1057 struct xfs_iext_cursor *icur, 1058 struct xfs_bmbt_irec *new) /* new data to add to file extents */ 1059 { 1060 struct xfs_ifork *ifp; /* inode fork pointer */ 1061 xfs_bmbt_irec_t left; /* left neighbor extent entry */ 1062 xfs_filblks_t newlen=0; /* new indirect size */ 1063 xfs_filblks_t oldlen=0; /* old indirect size */ 1064 xfs_bmbt_irec_t right; /* right neighbor extent entry */ 1065 uint32_t state = xfs_bmap_fork_to_state(whichfork); 1066 xfs_filblks_t temp; /* temp for indirect calculations */ 1067 1068 ifp = xfs_ifork_ptr(ip, whichfork); 1069 ASSERT(isnullstartblock(new->br_startblock)); 1070 1071 /* 1072 * Check and set flags if this segment has a left neighbor 1073 */ 1074 if (xfs_iext_peek_prev_extent(ifp, icur, &left)) { 1075 state |= BMAP_LEFT_VALID; 1076 if (isnullstartblock(left.br_startblock)) 1077 state |= BMAP_LEFT_DELAY; 1078 } 1079 1080 /* 1081 * Check and set flags if the current (right) segment exists. 1082 * If it doesn't exist, we're converting the hole at end-of-file. 1083 */ 1084 if (xfs_iext_get_extent(ifp, icur, &right)) { 1085 state |= BMAP_RIGHT_VALID; 1086 if (isnullstartblock(right.br_startblock)) 1087 state |= BMAP_RIGHT_DELAY; 1088 } 1089 1090 /* 1091 * Set contiguity flags on the left and right neighbors. 1092 * Don't let extents get too large, even if the pieces are contiguous. 1093 */ 1094 if ((state & BMAP_LEFT_VALID) && (state & BMAP_LEFT_DELAY) && 1095 left.br_startoff + left.br_blockcount == new->br_startoff && 1096 left.br_blockcount + new->br_blockcount <= XFS_MAX_BMBT_EXTLEN) 1097 state |= BMAP_LEFT_CONTIG; 1098 1099 if ((state & BMAP_RIGHT_VALID) && (state & BMAP_RIGHT_DELAY) && 1100 new->br_startoff + new->br_blockcount == right.br_startoff && 1101 new->br_blockcount + right.br_blockcount <= XFS_MAX_BMBT_EXTLEN && 1102 (!(state & BMAP_LEFT_CONTIG) || 1103 (left.br_blockcount + new->br_blockcount + 1104 right.br_blockcount <= XFS_MAX_BMBT_EXTLEN))) 1105 state |= BMAP_RIGHT_CONTIG; 1106 1107 /* 1108 * Switch out based on the contiguity flags. 1109 */ 1110 switch (state & (BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG)) { 1111 case BMAP_LEFT_CONTIG | BMAP_RIGHT_CONTIG: 1112 /* 1113 * New allocation is contiguous with delayed allocations 1114 * on the left and on the right. 1115 * Merge all three into a single extent record. 1116 */ 1117 temp = left.br_blockcount + new->br_blockcount + 1118 right.br_blockcount; 1119 1120 oldlen = startblockval(left.br_startblock) + 1121 startblockval(new->br_startblock) + 1122 startblockval(right.br_startblock); 1123 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1124 oldlen); 1125 left.br_startblock = nullstartblock(newlen); 1126 left.br_blockcount = temp; 1127 1128 xfs_iext_remove(ip, icur, state); 1129 xfs_iext_prev(ifp, icur); 1130 xfs_iext_update_extent(ip, state, icur, &left); 1131 break; 1132 1133 case BMAP_LEFT_CONTIG: 1134 /* 1135 * New allocation is contiguous with a delayed allocation 1136 * on the left. 1137 * Merge the new allocation with the left neighbor. 1138 */ 1139 temp = left.br_blockcount + new->br_blockcount; 1140 1141 oldlen = startblockval(left.br_startblock) + 1142 startblockval(new->br_startblock); 1143 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1144 oldlen); 1145 left.br_blockcount = temp; 1146 left.br_startblock = nullstartblock(newlen); 1147 1148 xfs_iext_prev(ifp, icur); 1149 xfs_iext_update_extent(ip, state, icur, &left); 1150 break; 1151 1152 case BMAP_RIGHT_CONTIG: 1153 /* 1154 * New allocation is contiguous with a delayed allocation 1155 * on the right. 1156 * Merge the new allocation with the right neighbor. 1157 */ 1158 temp = new->br_blockcount + right.br_blockcount; 1159 oldlen = startblockval(new->br_startblock) + 1160 startblockval(right.br_startblock); 1161 newlen = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), 1162 oldlen); 1163 right.br_startoff = new->br_startoff; 1164 right.br_startblock = nullstartblock(newlen); 1165 right.br_blockcount = temp; 1166 xfs_iext_update_extent(ip, state, icur, &right); 1167 break; 1168 1169 case 0: 1170 /* 1171 * New allocation is not contiguous with another 1172 * delayed allocation. 1173 * Insert a new entry. 1174 */ 1175 oldlen = newlen = 0; 1176 xfs_iext_insert(ip, icur, new, state); 1177 break; 1178 } 1179 if (oldlen != newlen) { 1180 ASSERT(oldlen > newlen); 1181 xfs_add_fdblocks(ip->i_mount, oldlen - newlen); 1182 1183 /* 1184 * Nothing to do for disk quota accounting here. 1185 */ 1186 xfs_mod_delalloc(ip, 0, (int64_t)newlen - oldlen); 1187 } 1188 } 1189 1190 /* 1191 * Add a delayed allocation extent to an inode. Blocks are reserved from the 1192 * global pool and the extent inserted into the inode in-core extent tree. 1193 * 1194 * On entry, got refers to the first extent beyond the offset of the extent to 1195 * allocate or eof is specified if no such extent exists. On return, got refers 1196 * to the extent record that was inserted to the inode fork. 1197 * 1198 * Note that the allocated extent may have been merged with contiguous extents 1199 * during insertion into the inode fork. Thus, got does not reflect the current 1200 * state of the inode fork on return. If necessary, the caller can use lastx to 1201 * look up the updated record in the inode fork. 1202 */ 1203 static int 1204 xfs_bmapi_reserve_delalloc( 1205 struct xfs_inode *ip, 1206 int whichfork, 1207 xfs_fileoff_t off, 1208 xfs_filblks_t len, 1209 xfs_filblks_t prealloc, 1210 struct xfs_bmbt_irec *got, 1211 struct xfs_iext_cursor *icur, 1212 int eof) 1213 { 1214 struct xfs_mount *mp = ip->i_mount; 1215 struct xfs_ifork *ifp = xfs_ifork_ptr(ip, whichfork); 1216 xfs_extlen_t alen; 1217 xfs_extlen_t indlen; 1218 uint64_t fdblocks; 1219 int error; 1220 xfs_fileoff_t aoff; 1221 bool use_cowextszhint = 1222 whichfork == XFS_COW_FORK && !prealloc; 1223 1224 retry: 1225 /* 1226 * Cap the alloc length. Keep track of prealloc so we know whether to 1227 * tag the inode before we return. 1228 */ 1229 aoff = off; 1230 alen = XFS_FILBLKS_MIN(len + prealloc, XFS_MAX_BMBT_EXTLEN); 1231 if (!eof) 1232 alen = XFS_FILBLKS_MIN(alen, got->br_startoff - aoff); 1233 if (prealloc && alen >= len) 1234 prealloc = alen - len; 1235 1236 /* 1237 * If we're targetting the COW fork but aren't creating a speculative 1238 * posteof preallocation, try to expand the reservation to align with 1239 * the COW extent size hint if there's sufficient free space. 1240 * 1241 * Unlike the data fork, the CoW cancellation functions will free all 1242 * the reservations at inactivation, so we don't require that every 1243 * delalloc reservation have a dirty pagecache. 1244 */ 1245 if (use_cowextszhint) { 1246 struct xfs_bmbt_irec prev; 1247 xfs_extlen_t extsz = xfs_get_cowextsz_hint(ip); 1248 1249 if (!xfs_iext_peek_prev_extent(ifp, icur, &prev)) 1250 prev.br_startoff = NULLFILEOFF; 1251 1252 error = xfs_bmap_extsize_align(mp, got, &prev, extsz, 0, eof, 1253 1, 0, &aoff, &alen); 1254 ASSERT(!error); 1255 } 1256 1257 /* 1258 * Make a transaction-less quota reservation for delayed allocation 1259 * blocks. This number gets adjusted later. We return if we haven't 1260 * allocated blocks already inside this loop. 1261 */ 1262 error = xfs_quota_reserve_blkres(ip, alen); 1263 if (error) 1264 goto out; 1265 1266 /* 1267 * Split changing sb for alen and indlen since they could be coming 1268 * from different places. 1269 */ 1270 indlen = (xfs_extlen_t)xfs_bmap_worst_indlen(ip, alen); 1271 ASSERT(indlen > 0); 1272 1273 fdblocks = indlen; 1274 if (XFS_IS_REALTIME_INODE(ip)) { 1275 ASSERT(!xfs_is_zoned_inode(ip)); 1276 error = xfs_dec_frextents(mp, xfs_blen_to_rtbxlen(mp, alen)); 1277 if (error) 1278 goto out_unreserve_quota; 1279 } else { 1280 fdblocks += alen; 1281 } 1282 1283 error = xfs_dec_fdblocks(mp, fdblocks, false); 1284 if (error) 1285 goto out_unreserve_frextents; 1286 1287 ip->i_delayed_blks += alen; 1288 xfs_mod_delalloc(ip, alen, indlen); 1289 1290 got->br_startoff = aoff; 1291 got->br_startblock = nullstartblock(indlen); 1292 got->br_blockcount = alen; 1293 got->br_state = XFS_EXT_NORM; 1294 1295 xfs_bmap_add_extent_hole_delay(ip, whichfork, icur, got); 1296 1297 /* 1298 * Tag the inode if blocks were preallocated. Note that COW fork 1299 * preallocation can occur at the start or end of the extent, even when 1300 * prealloc == 0, so we must also check the aligned offset and length. 1301 */ 1302 if (whichfork == XFS_DATA_FORK && prealloc) 1303 xfs_inode_set_eofblocks_tag(ip); 1304 if (whichfork == XFS_COW_FORK && (prealloc || aoff < off || alen > len)) 1305 xfs_inode_set_cowblocks_tag(ip); 1306 1307 return 0; 1308 1309 out_unreserve_frextents: 1310 if (XFS_IS_REALTIME_INODE(ip)) 1311 xfs_add_frextents(mp, xfs_blen_to_rtbxlen(mp, alen)); 1312 out_unreserve_quota: 1313 if (XFS_IS_QUOTA_ON(mp)) 1314 xfs_quota_unreserve_blkres(ip, alen); 1315 out: 1316 if (error == -ENOSPC || error == -EDQUOT) { 1317 trace_xfs_delalloc_enospc(ip, off, len); 1318 1319 if (prealloc || use_cowextszhint) { 1320 /* retry without any preallocation */ 1321 use_cowextszhint = false; 1322 prealloc = 0; 1323 goto retry; 1324 } 1325 } 1326 return error; 1327 } 1328 1329 static int 1330 xfs_zoned_buffered_write_iomap_begin( 1331 struct inode *inode, 1332 loff_t offset, 1333 loff_t count, 1334 unsigned flags, 1335 struct iomap *iomap, 1336 struct iomap *srcmap) 1337 { 1338 struct iomap_iter *iter = 1339 container_of(iomap, struct iomap_iter, iomap); 1340 struct xfs_zone_alloc_ctx *ac = iter->private; 1341 struct xfs_inode *ip = XFS_I(inode); 1342 struct xfs_mount *mp = ip->i_mount; 1343 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1344 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count); 1345 u16 iomap_flags = IOMAP_F_SHARED; 1346 unsigned int lockmode = XFS_ILOCK_EXCL; 1347 xfs_filblks_t count_fsb; 1348 xfs_extlen_t indlen; 1349 struct xfs_bmbt_irec got; 1350 struct xfs_iext_cursor icur; 1351 int error = 0; 1352 1353 ASSERT(!xfs_get_extsz_hint(ip)); 1354 ASSERT(!(flags & IOMAP_UNSHARE)); 1355 ASSERT(ac); 1356 1357 if (xfs_is_shutdown(mp)) 1358 return -EIO; 1359 1360 error = xfs_qm_dqattach(ip); 1361 if (error) 1362 return error; 1363 1364 error = xfs_ilock_for_iomap(ip, flags, &lockmode); 1365 if (error) 1366 return error; 1367 1368 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || 1369 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 1370 xfs_bmap_mark_sick(ip, XFS_DATA_FORK); 1371 error = -EFSCORRUPTED; 1372 goto out_unlock; 1373 } 1374 1375 XFS_STATS_INC(mp, xs_blk_mapw); 1376 1377 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 1378 if (error) 1379 goto out_unlock; 1380 1381 /* 1382 * For zeroing operations check if there is any data to zero first. 1383 * 1384 * For regular writes we always need to allocate new blocks, but need to 1385 * provide the source mapping when the range is unaligned to support 1386 * read-modify-write of the whole block in the page cache. 1387 * 1388 * In either case we need to limit the reported range to the boundaries 1389 * of the source map in the data fork. 1390 */ 1391 if (!IS_ALIGNED(offset, mp->m_sb.sb_blocksize) || 1392 !IS_ALIGNED(offset + count, mp->m_sb.sb_blocksize) || 1393 (flags & IOMAP_ZERO)) { 1394 struct xfs_bmbt_irec smap; 1395 struct xfs_iext_cursor scur; 1396 1397 if (!xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &scur, 1398 &smap)) 1399 smap.br_startoff = end_fsb; /* fake hole until EOF */ 1400 if (smap.br_startoff > offset_fsb) { 1401 /* 1402 * We never need to allocate blocks for zeroing a hole. 1403 */ 1404 if (flags & IOMAP_ZERO) { 1405 xfs_hole_to_iomap(ip, iomap, offset_fsb, 1406 smap.br_startoff); 1407 goto out_unlock; 1408 } 1409 end_fsb = min(end_fsb, smap.br_startoff); 1410 } else { 1411 end_fsb = min(end_fsb, 1412 smap.br_startoff + smap.br_blockcount); 1413 xfs_trim_extent(&smap, offset_fsb, 1414 end_fsb - offset_fsb); 1415 error = xfs_bmbt_to_iomap(ip, srcmap, &smap, flags, 0, 1416 xfs_iomap_inode_sequence(ip, 0)); 1417 if (error) 1418 goto out_unlock; 1419 } 1420 } 1421 1422 if (!ip->i_cowfp) 1423 xfs_ifork_init_cow(ip); 1424 1425 if (!xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &got)) 1426 got.br_startoff = end_fsb; 1427 if (got.br_startoff <= offset_fsb) { 1428 trace_xfs_reflink_cow_found(ip, &got); 1429 goto done; 1430 } 1431 1432 /* 1433 * Cap the maximum length to keep the chunks of work done here somewhat 1434 * symmetric with the work writeback does. 1435 */ 1436 end_fsb = min(end_fsb, got.br_startoff); 1437 count_fsb = min3(end_fsb - offset_fsb, XFS_MAX_BMBT_EXTLEN, 1438 XFS_B_TO_FSB(mp, 1024 * PAGE_SIZE)); 1439 1440 /* 1441 * The block reservation is supposed to cover all blocks that the 1442 * operation could possible write, but there is a nasty corner case 1443 * where blocks could be stolen from underneath us: 1444 * 1445 * 1) while this thread iterates over a larger buffered write, 1446 * 2) another thread is causing a write fault that calls into 1447 * ->page_mkwrite in range this thread writes to, using up the 1448 * delalloc reservation created by a previous call to this function. 1449 * 3) another thread does direct I/O on the range that the write fault 1450 * happened on, which causes writeback of the dirty data. 1451 * 4) this then set the stale flag, which cuts the current iomap 1452 * iteration short, causing the new call to ->iomap_begin that gets 1453 * us here again, but now without a sufficient reservation. 1454 * 1455 * This is a very unusual I/O pattern, and nothing but generic/095 is 1456 * known to hit it. There's not really much we can do here, so turn this 1457 * into a short write. 1458 */ 1459 if (count_fsb > ac->reserved_blocks) { 1460 xfs_warn_ratelimited(mp, 1461 "Short write on ino 0x%llx comm %.20s due to three-way race with write fault and direct I/O", 1462 ip->i_ino, current->comm); 1463 count_fsb = ac->reserved_blocks; 1464 if (!count_fsb) { 1465 error = -EIO; 1466 goto out_unlock; 1467 } 1468 } 1469 1470 error = xfs_quota_reserve_blkres(ip, count_fsb); 1471 if (error) 1472 goto out_unlock; 1473 1474 indlen = xfs_bmap_worst_indlen(ip, count_fsb); 1475 error = xfs_dec_fdblocks(mp, indlen, false); 1476 if (error) 1477 goto out_unlock; 1478 ip->i_delayed_blks += count_fsb; 1479 xfs_mod_delalloc(ip, count_fsb, indlen); 1480 1481 got.br_startoff = offset_fsb; 1482 got.br_startblock = nullstartblock(indlen); 1483 got.br_blockcount = count_fsb; 1484 got.br_state = XFS_EXT_NORM; 1485 xfs_bmap_add_extent_hole_delay(ip, XFS_COW_FORK, &icur, &got); 1486 ac->reserved_blocks -= count_fsb; 1487 iomap_flags |= IOMAP_F_NEW; 1488 1489 trace_xfs_iomap_alloc(ip, offset, XFS_FSB_TO_B(mp, count_fsb), 1490 XFS_COW_FORK, &got); 1491 done: 1492 error = xfs_bmbt_to_iomap(ip, iomap, &got, flags, iomap_flags, 1493 xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED)); 1494 out_unlock: 1495 xfs_iunlock(ip, lockmode); 1496 return error; 1497 } 1498 1499 static int 1500 xfs_buffered_write_iomap_begin( 1501 struct inode *inode, 1502 loff_t offset, 1503 loff_t count, 1504 unsigned flags, 1505 struct iomap *iomap, 1506 struct iomap *srcmap) 1507 { 1508 struct xfs_inode *ip = XFS_I(inode); 1509 struct xfs_mount *mp = ip->i_mount; 1510 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1511 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, count); 1512 struct xfs_bmbt_irec imap, cmap; 1513 struct xfs_iext_cursor icur, ccur; 1514 xfs_fsblock_t prealloc_blocks = 0; 1515 bool eof = false, cow_eof = false, shared = false; 1516 int allocfork = XFS_DATA_FORK; 1517 int error = 0; 1518 unsigned int lockmode = XFS_ILOCK_EXCL; 1519 unsigned int iomap_flags = 0; 1520 u64 seq; 1521 1522 if (xfs_is_shutdown(mp)) 1523 return -EIO; 1524 1525 if (xfs_is_zoned_inode(ip)) 1526 return xfs_zoned_buffered_write_iomap_begin(inode, offset, 1527 count, flags, iomap, srcmap); 1528 1529 /* we can't use delayed allocations when using extent size hints */ 1530 if (xfs_get_extsz_hint(ip)) 1531 return xfs_direct_write_iomap_begin(inode, offset, count, 1532 flags, iomap, srcmap); 1533 1534 error = xfs_qm_dqattach(ip); 1535 if (error) 1536 return error; 1537 1538 error = xfs_ilock_for_iomap(ip, flags, &lockmode); 1539 if (error) 1540 return error; 1541 1542 if (XFS_IS_CORRUPT(mp, !xfs_ifork_has_extents(&ip->i_df)) || 1543 XFS_TEST_ERROR(false, mp, XFS_ERRTAG_BMAPIFORMAT)) { 1544 xfs_bmap_mark_sick(ip, XFS_DATA_FORK); 1545 error = -EFSCORRUPTED; 1546 goto out_unlock; 1547 } 1548 1549 XFS_STATS_INC(mp, xs_blk_mapw); 1550 1551 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 1552 if (error) 1553 goto out_unlock; 1554 1555 /* 1556 * Search the data fork first to look up our source mapping. We 1557 * always need the data fork map, as we have to return it to the 1558 * iomap code so that the higher level write code can read data in to 1559 * perform read-modify-write cycles for unaligned writes. 1560 */ 1561 eof = !xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap); 1562 if (eof) 1563 imap.br_startoff = end_fsb; /* fake hole until the end */ 1564 1565 /* We never need to allocate blocks for zeroing or unsharing a hole. */ 1566 if ((flags & (IOMAP_UNSHARE | IOMAP_ZERO)) && 1567 imap.br_startoff > offset_fsb) { 1568 xfs_hole_to_iomap(ip, iomap, offset_fsb, imap.br_startoff); 1569 goto out_unlock; 1570 } 1571 1572 /* 1573 * For zeroing, trim a delalloc extent that extends beyond the EOF 1574 * block. If it starts beyond the EOF block, convert it to an 1575 * unwritten extent. 1576 */ 1577 if ((flags & IOMAP_ZERO) && imap.br_startoff <= offset_fsb && 1578 isnullstartblock(imap.br_startblock)) { 1579 xfs_fileoff_t eof_fsb = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)); 1580 1581 if (offset_fsb >= eof_fsb) 1582 goto convert_delay; 1583 if (end_fsb > eof_fsb) { 1584 end_fsb = eof_fsb; 1585 xfs_trim_extent(&imap, offset_fsb, 1586 end_fsb - offset_fsb); 1587 } 1588 } 1589 1590 /* 1591 * Search the COW fork extent list even if we did not find a data fork 1592 * extent. This serves two purposes: first this implements the 1593 * speculative preallocation using cowextsize, so that we also unshare 1594 * block adjacent to shared blocks instead of just the shared blocks 1595 * themselves. Second the lookup in the extent list is generally faster 1596 * than going out to the shared extent tree. 1597 */ 1598 if (xfs_is_cow_inode(ip)) { 1599 if (!ip->i_cowfp) { 1600 ASSERT(!xfs_is_reflink_inode(ip)); 1601 xfs_ifork_init_cow(ip); 1602 } 1603 cow_eof = !xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, 1604 &ccur, &cmap); 1605 if (!cow_eof && cmap.br_startoff <= offset_fsb) { 1606 trace_xfs_reflink_cow_found(ip, &cmap); 1607 goto found_cow; 1608 } 1609 } 1610 1611 if (imap.br_startoff <= offset_fsb) { 1612 /* 1613 * For reflink files we may need a delalloc reservation when 1614 * overwriting shared extents. This includes zeroing of 1615 * existing extents that contain data. 1616 */ 1617 if (!xfs_is_cow_inode(ip) || 1618 ((flags & IOMAP_ZERO) && imap.br_state != XFS_EXT_NORM)) { 1619 trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, 1620 &imap); 1621 goto found_imap; 1622 } 1623 1624 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); 1625 1626 /* Trim the mapping to the nearest shared extent boundary. */ 1627 error = xfs_bmap_trim_cow(ip, &imap, &shared); 1628 if (error) 1629 goto out_unlock; 1630 1631 /* Not shared? Just report the (potentially capped) extent. */ 1632 if (!shared) { 1633 trace_xfs_iomap_found(ip, offset, count, XFS_DATA_FORK, 1634 &imap); 1635 goto found_imap; 1636 } 1637 1638 /* 1639 * Fork all the shared blocks from our write offset until the 1640 * end of the extent. 1641 */ 1642 allocfork = XFS_COW_FORK; 1643 end_fsb = imap.br_startoff + imap.br_blockcount; 1644 } else { 1645 /* 1646 * We cap the maximum length we map here to MAX_WRITEBACK_PAGES 1647 * pages to keep the chunks of work done where somewhat 1648 * symmetric with the work writeback does. This is a completely 1649 * arbitrary number pulled out of thin air. 1650 * 1651 * Note that the values needs to be less than 32-bits wide until 1652 * the lower level functions are updated. 1653 */ 1654 count = min_t(loff_t, count, 1024 * PAGE_SIZE); 1655 end_fsb = xfs_iomap_end_fsb(mp, offset, count); 1656 1657 if (xfs_is_always_cow_inode(ip)) 1658 allocfork = XFS_COW_FORK; 1659 } 1660 1661 if (eof && offset + count > XFS_ISIZE(ip)) { 1662 /* 1663 * Determine the initial size of the preallocation. 1664 * We clean up any extra preallocation when the file is closed. 1665 */ 1666 if (xfs_has_allocsize(mp)) 1667 prealloc_blocks = mp->m_allocsize_blocks; 1668 else if (allocfork == XFS_DATA_FORK) 1669 prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, 1670 offset, count, &icur); 1671 else 1672 prealloc_blocks = xfs_iomap_prealloc_size(ip, allocfork, 1673 offset, count, &ccur); 1674 if (prealloc_blocks) { 1675 xfs_extlen_t align; 1676 xfs_off_t end_offset; 1677 xfs_fileoff_t p_end_fsb; 1678 1679 end_offset = XFS_ALLOC_ALIGN(mp, offset + count - 1); 1680 p_end_fsb = XFS_B_TO_FSBT(mp, end_offset) + 1681 prealloc_blocks; 1682 1683 align = xfs_eof_alignment(ip); 1684 if (align) 1685 p_end_fsb = roundup_64(p_end_fsb, align); 1686 1687 p_end_fsb = min(p_end_fsb, 1688 XFS_B_TO_FSB(mp, mp->m_super->s_maxbytes)); 1689 ASSERT(p_end_fsb > offset_fsb); 1690 prealloc_blocks = p_end_fsb - end_fsb; 1691 } 1692 } 1693 1694 /* 1695 * Flag newly allocated delalloc blocks with IOMAP_F_NEW so we punch 1696 * them out if the write happens to fail. 1697 */ 1698 iomap_flags |= IOMAP_F_NEW; 1699 if (allocfork == XFS_COW_FORK) { 1700 error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, 1701 end_fsb - offset_fsb, prealloc_blocks, &cmap, 1702 &ccur, cow_eof); 1703 if (error) 1704 goto out_unlock; 1705 1706 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &cmap); 1707 goto found_cow; 1708 } 1709 1710 error = xfs_bmapi_reserve_delalloc(ip, allocfork, offset_fsb, 1711 end_fsb - offset_fsb, prealloc_blocks, &imap, &icur, 1712 eof); 1713 if (error) 1714 goto out_unlock; 1715 1716 trace_xfs_iomap_alloc(ip, offset, count, allocfork, &imap); 1717 found_imap: 1718 seq = xfs_iomap_inode_sequence(ip, iomap_flags); 1719 xfs_iunlock(ip, lockmode); 1720 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, iomap_flags, seq); 1721 1722 convert_delay: 1723 xfs_iunlock(ip, lockmode); 1724 truncate_pagecache(inode, offset); 1725 error = xfs_bmapi_convert_delalloc(ip, XFS_DATA_FORK, offset, 1726 iomap, NULL); 1727 if (error) 1728 return error; 1729 1730 trace_xfs_iomap_alloc(ip, offset, count, XFS_DATA_FORK, &imap); 1731 return 0; 1732 1733 found_cow: 1734 if (imap.br_startoff <= offset_fsb) { 1735 error = xfs_bmbt_to_iomap(ip, srcmap, &imap, flags, 0, 1736 xfs_iomap_inode_sequence(ip, 0)); 1737 if (error) 1738 goto out_unlock; 1739 } else { 1740 xfs_trim_extent(&cmap, offset_fsb, 1741 imap.br_startoff - offset_fsb); 1742 } 1743 1744 iomap_flags |= IOMAP_F_SHARED; 1745 seq = xfs_iomap_inode_sequence(ip, iomap_flags); 1746 xfs_iunlock(ip, lockmode); 1747 return xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, iomap_flags, seq); 1748 1749 out_unlock: 1750 xfs_iunlock(ip, lockmode); 1751 return error; 1752 } 1753 1754 static void 1755 xfs_buffered_write_delalloc_punch( 1756 struct inode *inode, 1757 loff_t offset, 1758 loff_t length, 1759 struct iomap *iomap) 1760 { 1761 struct iomap_iter *iter = 1762 container_of(iomap, struct iomap_iter, iomap); 1763 1764 xfs_bmap_punch_delalloc_range(XFS_I(inode), 1765 (iomap->flags & IOMAP_F_SHARED) ? 1766 XFS_COW_FORK : XFS_DATA_FORK, 1767 offset, offset + length, iter->private); 1768 } 1769 1770 static int 1771 xfs_buffered_write_iomap_end( 1772 struct inode *inode, 1773 loff_t offset, 1774 loff_t length, 1775 ssize_t written, 1776 unsigned flags, 1777 struct iomap *iomap) 1778 { 1779 loff_t start_byte, end_byte; 1780 1781 /* If we didn't reserve the blocks, we're not allowed to punch them. */ 1782 if (iomap->type != IOMAP_DELALLOC || !(iomap->flags & IOMAP_F_NEW)) 1783 return 0; 1784 1785 /* 1786 * iomap_page_mkwrite() will never fail in a way that requires delalloc 1787 * extents that it allocated to be revoked. Hence never try to release 1788 * them here. 1789 */ 1790 if (flags & IOMAP_FAULT) 1791 return 0; 1792 1793 /* Nothing to do if we've written the entire delalloc extent */ 1794 start_byte = iomap_last_written_block(inode, offset, written); 1795 end_byte = round_up(offset + length, i_blocksize(inode)); 1796 if (start_byte >= end_byte) 1797 return 0; 1798 1799 /* For zeroing operations the callers already hold invalidate_lock. */ 1800 if (flags & (IOMAP_UNSHARE | IOMAP_ZERO)) { 1801 rwsem_assert_held_write(&inode->i_mapping->invalidate_lock); 1802 iomap_write_delalloc_release(inode, start_byte, end_byte, flags, 1803 iomap, xfs_buffered_write_delalloc_punch); 1804 } else { 1805 filemap_invalidate_lock(inode->i_mapping); 1806 iomap_write_delalloc_release(inode, start_byte, end_byte, flags, 1807 iomap, xfs_buffered_write_delalloc_punch); 1808 filemap_invalidate_unlock(inode->i_mapping); 1809 } 1810 1811 return 0; 1812 } 1813 1814 const struct iomap_ops xfs_buffered_write_iomap_ops = { 1815 .iomap_begin = xfs_buffered_write_iomap_begin, 1816 .iomap_end = xfs_buffered_write_iomap_end, 1817 }; 1818 1819 static int 1820 xfs_read_iomap_begin( 1821 struct inode *inode, 1822 loff_t offset, 1823 loff_t length, 1824 unsigned flags, 1825 struct iomap *iomap, 1826 struct iomap *srcmap) 1827 { 1828 struct xfs_inode *ip = XFS_I(inode); 1829 struct xfs_mount *mp = ip->i_mount; 1830 struct xfs_bmbt_irec imap; 1831 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1832 xfs_fileoff_t end_fsb = xfs_iomap_end_fsb(mp, offset, length); 1833 int nimaps = 1, error = 0; 1834 bool shared = false; 1835 unsigned int lockmode = XFS_ILOCK_SHARED; 1836 u64 seq; 1837 1838 ASSERT(!(flags & (IOMAP_WRITE | IOMAP_ZERO))); 1839 1840 if (xfs_is_shutdown(mp)) 1841 return -EIO; 1842 1843 error = xfs_ilock_for_iomap(ip, flags, &lockmode); 1844 if (error) 1845 return error; 1846 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1847 &nimaps, 0); 1848 if (!error && ((flags & IOMAP_REPORT) || IS_DAX(inode))) 1849 error = xfs_reflink_trim_around_shared(ip, &imap, &shared); 1850 seq = xfs_iomap_inode_sequence(ip, shared ? IOMAP_F_SHARED : 0); 1851 xfs_iunlock(ip, lockmode); 1852 1853 if (error) 1854 return error; 1855 trace_xfs_iomap_found(ip, offset, length, XFS_DATA_FORK, &imap); 1856 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 1857 shared ? IOMAP_F_SHARED : 0, seq); 1858 } 1859 1860 const struct iomap_ops xfs_read_iomap_ops = { 1861 .iomap_begin = xfs_read_iomap_begin, 1862 }; 1863 1864 static int 1865 xfs_seek_iomap_begin( 1866 struct inode *inode, 1867 loff_t offset, 1868 loff_t length, 1869 unsigned flags, 1870 struct iomap *iomap, 1871 struct iomap *srcmap) 1872 { 1873 struct xfs_inode *ip = XFS_I(inode); 1874 struct xfs_mount *mp = ip->i_mount; 1875 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1876 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1877 xfs_fileoff_t cow_fsb = NULLFILEOFF, data_fsb = NULLFILEOFF; 1878 struct xfs_iext_cursor icur; 1879 struct xfs_bmbt_irec imap, cmap; 1880 int error = 0; 1881 unsigned lockmode; 1882 u64 seq; 1883 1884 if (xfs_is_shutdown(mp)) 1885 return -EIO; 1886 1887 lockmode = xfs_ilock_data_map_shared(ip); 1888 error = xfs_iread_extents(NULL, ip, XFS_DATA_FORK); 1889 if (error) 1890 goto out_unlock; 1891 1892 if (xfs_iext_lookup_extent(ip, &ip->i_df, offset_fsb, &icur, &imap)) { 1893 /* 1894 * If we found a data extent we are done. 1895 */ 1896 if (imap.br_startoff <= offset_fsb) 1897 goto done; 1898 data_fsb = imap.br_startoff; 1899 } else { 1900 /* 1901 * Fake a hole until the end of the file. 1902 */ 1903 data_fsb = xfs_iomap_end_fsb(mp, offset, length); 1904 } 1905 1906 /* 1907 * If a COW fork extent covers the hole, report it - capped to the next 1908 * data fork extent: 1909 */ 1910 if (xfs_inode_has_cow_data(ip) && 1911 xfs_iext_lookup_extent(ip, ip->i_cowfp, offset_fsb, &icur, &cmap)) 1912 cow_fsb = cmap.br_startoff; 1913 if (cow_fsb != NULLFILEOFF && cow_fsb <= offset_fsb) { 1914 if (data_fsb < cow_fsb + cmap.br_blockcount) 1915 end_fsb = min(end_fsb, data_fsb); 1916 xfs_trim_extent(&cmap, offset_fsb, end_fsb - offset_fsb); 1917 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_SHARED); 1918 error = xfs_bmbt_to_iomap(ip, iomap, &cmap, flags, 1919 IOMAP_F_SHARED, seq); 1920 /* 1921 * This is a COW extent, so we must probe the page cache 1922 * because there could be dirty page cache being backed 1923 * by this extent. 1924 */ 1925 iomap->type = IOMAP_UNWRITTEN; 1926 goto out_unlock; 1927 } 1928 1929 /* 1930 * Else report a hole, capped to the next found data or COW extent. 1931 */ 1932 if (cow_fsb != NULLFILEOFF && cow_fsb < data_fsb) 1933 imap.br_blockcount = cow_fsb - offset_fsb; 1934 else 1935 imap.br_blockcount = data_fsb - offset_fsb; 1936 imap.br_startoff = offset_fsb; 1937 imap.br_startblock = HOLESTARTBLOCK; 1938 imap.br_state = XFS_EXT_NORM; 1939 done: 1940 seq = xfs_iomap_inode_sequence(ip, 0); 1941 xfs_trim_extent(&imap, offset_fsb, end_fsb - offset_fsb); 1942 error = xfs_bmbt_to_iomap(ip, iomap, &imap, flags, 0, seq); 1943 out_unlock: 1944 xfs_iunlock(ip, lockmode); 1945 return error; 1946 } 1947 1948 const struct iomap_ops xfs_seek_iomap_ops = { 1949 .iomap_begin = xfs_seek_iomap_begin, 1950 }; 1951 1952 static int 1953 xfs_xattr_iomap_begin( 1954 struct inode *inode, 1955 loff_t offset, 1956 loff_t length, 1957 unsigned flags, 1958 struct iomap *iomap, 1959 struct iomap *srcmap) 1960 { 1961 struct xfs_inode *ip = XFS_I(inode); 1962 struct xfs_mount *mp = ip->i_mount; 1963 xfs_fileoff_t offset_fsb = XFS_B_TO_FSBT(mp, offset); 1964 xfs_fileoff_t end_fsb = XFS_B_TO_FSB(mp, offset + length); 1965 struct xfs_bmbt_irec imap; 1966 int nimaps = 1, error = 0; 1967 unsigned lockmode; 1968 int seq; 1969 1970 if (xfs_is_shutdown(mp)) 1971 return -EIO; 1972 1973 lockmode = xfs_ilock_attr_map_shared(ip); 1974 1975 /* if there are no attribute fork or extents, return ENOENT */ 1976 if (!xfs_inode_has_attr_fork(ip) || !ip->i_af.if_nextents) { 1977 error = -ENOENT; 1978 goto out_unlock; 1979 } 1980 1981 ASSERT(ip->i_af.if_format != XFS_DINODE_FMT_LOCAL); 1982 error = xfs_bmapi_read(ip, offset_fsb, end_fsb - offset_fsb, &imap, 1983 &nimaps, XFS_BMAPI_ATTRFORK); 1984 out_unlock: 1985 1986 seq = xfs_iomap_inode_sequence(ip, IOMAP_F_XATTR); 1987 xfs_iunlock(ip, lockmode); 1988 1989 if (error) 1990 return error; 1991 ASSERT(nimaps); 1992 return xfs_bmbt_to_iomap(ip, iomap, &imap, flags, IOMAP_F_XATTR, seq); 1993 } 1994 1995 const struct iomap_ops xfs_xattr_iomap_ops = { 1996 .iomap_begin = xfs_xattr_iomap_begin, 1997 }; 1998 1999 int 2000 xfs_zero_range( 2001 struct xfs_inode *ip, 2002 loff_t pos, 2003 loff_t len, 2004 struct xfs_zone_alloc_ctx *ac, 2005 bool *did_zero) 2006 { 2007 struct inode *inode = VFS_I(ip); 2008 2009 xfs_assert_ilocked(ip, XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL); 2010 2011 if (IS_DAX(inode)) 2012 return dax_zero_range(inode, pos, len, did_zero, 2013 &xfs_dax_write_iomap_ops); 2014 return iomap_zero_range(inode, pos, len, did_zero, 2015 &xfs_buffered_write_iomap_ops, ac); 2016 } 2017 2018 int 2019 xfs_truncate_page( 2020 struct xfs_inode *ip, 2021 loff_t pos, 2022 struct xfs_zone_alloc_ctx *ac, 2023 bool *did_zero) 2024 { 2025 struct inode *inode = VFS_I(ip); 2026 2027 if (IS_DAX(inode)) 2028 return dax_truncate_page(inode, pos, did_zero, 2029 &xfs_dax_write_iomap_ops); 2030 return iomap_truncate_page(inode, pos, did_zero, 2031 &xfs_buffered_write_iomap_ops, ac); 2032 } 2033