1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (c) 2000-2005 Silicon Graphics, Inc. 4 * All Rights Reserved. 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_shared.h" 9 #include "xfs_format.h" 10 #include "xfs_log_format.h" 11 #include "xfs_trans_resv.h" 12 #include "xfs_sb.h" 13 #include "xfs_mount.h" 14 #include "xfs_trans.h" 15 #include "xfs_error.h" 16 #include "xfs_alloc.h" 17 #include "xfs_fsops.h" 18 #include "xfs_trans_space.h" 19 #include "xfs_log.h" 20 #include "xfs_log_priv.h" 21 #include "xfs_ag.h" 22 #include "xfs_ag_resv.h" 23 #include "xfs_trace.h" 24 #include "xfs_rtalloc.h" 25 #include "xfs_rtrmap_btree.h" 26 #include "xfs_rtrefcount_btree.h" 27 #include "xfs_metafile.h" 28 29 /* 30 * Write new AG headers to disk. Non-transactional, but need to be 31 * written and completed prior to the growfs transaction being logged. 32 * To do this, we use a delayed write buffer list and wait for 33 * submission and IO completion of the list as a whole. This allows the 34 * IO subsystem to merge all the AG headers in a single AG into a single 35 * IO and hide most of the latency of the IO from us. 36 * 37 * This also means that if we get an error whilst building the buffer 38 * list to write, we can cancel the entire list without having written 39 * anything. 40 */ 41 static int 42 xfs_resizefs_init_new_ags( 43 struct xfs_trans *tp, 44 struct aghdr_init_data *id, 45 xfs_agnumber_t oagcount, 46 xfs_agnumber_t nagcount, 47 xfs_rfsblock_t delta, 48 struct xfs_perag *last_pag, 49 bool *lastag_extended) 50 { 51 struct xfs_mount *mp = tp->t_mountp; 52 xfs_rfsblock_t nb = mp->m_sb.sb_dblocks + delta; 53 int error; 54 55 *lastag_extended = false; 56 57 INIT_LIST_HEAD(&id->buffer_list); 58 for (id->agno = nagcount - 1; 59 id->agno >= oagcount; 60 id->agno--, delta -= id->agsize) { 61 62 if (id->agno == nagcount - 1) 63 id->agsize = nb - (id->agno * 64 (xfs_rfsblock_t)mp->m_sb.sb_agblocks); 65 else 66 id->agsize = mp->m_sb.sb_agblocks; 67 68 error = xfs_ag_init_headers(mp, id); 69 if (error) { 70 xfs_buf_delwri_cancel(&id->buffer_list); 71 return error; 72 } 73 } 74 75 error = xfs_buf_delwri_submit(&id->buffer_list); 76 if (error) 77 return error; 78 79 if (delta) { 80 *lastag_extended = true; 81 error = xfs_ag_extend_space(last_pag, tp, delta); 82 } 83 return error; 84 } 85 86 /* 87 * growfs operations 88 */ 89 static int 90 xfs_growfs_data_private( 91 struct xfs_mount *mp, /* mount point for filesystem */ 92 struct xfs_growfs_data *in) /* growfs data input struct */ 93 { 94 xfs_agnumber_t oagcount = mp->m_sb.sb_agcount; 95 struct xfs_buf *bp; 96 int error; 97 xfs_agnumber_t nagcount; 98 xfs_agnumber_t nagimax = 0; 99 xfs_rfsblock_t nb, nb_div, nb_mod; 100 int64_t delta; 101 bool lastag_extended = false; 102 struct xfs_trans *tp; 103 struct aghdr_init_data id = {}; 104 struct xfs_perag *last_pag; 105 106 nb = in->newblocks; 107 error = xfs_sb_validate_fsb_count(&mp->m_sb, nb); 108 if (error) 109 return error; 110 111 if (nb > mp->m_sb.sb_dblocks) { 112 error = xfs_buf_read_uncached(mp->m_ddev_targp, 113 XFS_FSB_TO_BB(mp, nb) - XFS_FSS_TO_BB(mp, 1), 114 XFS_FSS_TO_BB(mp, 1), &bp, NULL); 115 if (error) 116 return error; 117 xfs_buf_relse(bp); 118 } 119 120 /* Make sure the new fs size won't cause problems with the log. */ 121 error = xfs_growfs_check_rtgeom(mp, nb, mp->m_sb.sb_rblocks, 122 mp->m_sb.sb_rextsize); 123 if (error) 124 return error; 125 126 nb_div = nb; 127 nb_mod = do_div(nb_div, mp->m_sb.sb_agblocks); 128 if (nb_mod && nb_mod >= XFS_MIN_AG_BLOCKS) 129 nb_div++; 130 else if (nb_mod) 131 nb = nb_div * mp->m_sb.sb_agblocks; 132 133 if (nb_div > XFS_MAX_AGNUMBER + 1) { 134 nb_div = XFS_MAX_AGNUMBER + 1; 135 nb = nb_div * mp->m_sb.sb_agblocks; 136 } 137 nagcount = nb_div; 138 delta = nb - mp->m_sb.sb_dblocks; 139 /* 140 * Reject filesystems with a single AG because they are not 141 * supported, and reject a shrink operation that would cause a 142 * filesystem to become unsupported. 143 */ 144 if (delta < 0 && nagcount < 2) 145 return -EINVAL; 146 147 /* No work to do */ 148 if (delta == 0) 149 return 0; 150 151 /* TODO: shrinking the entire AGs hasn't yet completed */ 152 if (nagcount < oagcount) 153 return -EINVAL; 154 155 /* allocate the new per-ag structures */ 156 error = xfs_initialize_perag(mp, oagcount, nagcount, nb, &nagimax); 157 if (error) 158 return error; 159 160 if (delta > 0) 161 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, 162 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, 163 &tp); 164 else 165 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, -delta, 0, 166 0, &tp); 167 if (error) 168 goto out_free_unused_perag; 169 170 last_pag = xfs_perag_get(mp, oagcount - 1); 171 if (delta > 0) { 172 error = xfs_resizefs_init_new_ags(tp, &id, oagcount, nagcount, 173 delta, last_pag, &lastag_extended); 174 } else { 175 xfs_warn_experimental(mp, XFS_EXPERIMENTAL_SHRINK); 176 error = xfs_ag_shrink_space(last_pag, &tp, -delta); 177 } 178 xfs_perag_put(last_pag); 179 if (error) 180 goto out_trans_cancel; 181 182 /* 183 * Update changed superblock fields transactionally. These are not 184 * seen by the rest of the world until the transaction commit applies 185 * them atomically to the superblock. 186 */ 187 if (nagcount > oagcount) 188 xfs_trans_mod_sb(tp, XFS_TRANS_SB_AGCOUNT, nagcount - oagcount); 189 if (delta) 190 xfs_trans_mod_sb(tp, XFS_TRANS_SB_DBLOCKS, delta); 191 if (id.nfree) 192 xfs_trans_mod_sb(tp, XFS_TRANS_SB_FDBLOCKS, id.nfree); 193 194 /* 195 * Sync sb counters now to reflect the updated values. This is 196 * particularly important for shrink because the write verifier 197 * will fail if sb_fdblocks is ever larger than sb_dblocks. 198 */ 199 if (xfs_has_lazysbcount(mp)) 200 xfs_log_sb(tp); 201 202 xfs_trans_set_sync(tp); 203 error = xfs_trans_commit(tp); 204 if (error) 205 return error; 206 207 /* New allocation groups fully initialized, so update mount struct */ 208 if (nagimax) 209 mp->m_maxagi = nagimax; 210 xfs_set_low_space_thresholds(mp); 211 mp->m_alloc_set_aside = xfs_alloc_set_aside(mp); 212 213 if (delta > 0) { 214 /* 215 * If we expanded the last AG, free the per-AG reservation 216 * so we can reinitialize it with the new size. 217 */ 218 if (lastag_extended) { 219 struct xfs_perag *pag; 220 221 pag = xfs_perag_get(mp, id.agno); 222 xfs_ag_resv_free(pag); 223 xfs_perag_put(pag); 224 } 225 /* 226 * Reserve AG metadata blocks. ENOSPC here does not mean there 227 * was a growfs failure, just that there still isn't space for 228 * new user data after the grow has been run. 229 */ 230 error = xfs_fs_reserve_ag_blocks(mp); 231 if (error == -ENOSPC) 232 error = 0; 233 234 /* Compute new maxlevels for rt btrees. */ 235 xfs_rtrmapbt_compute_maxlevels(mp); 236 xfs_rtrefcountbt_compute_maxlevels(mp); 237 } 238 239 return error; 240 241 out_trans_cancel: 242 xfs_trans_cancel(tp); 243 out_free_unused_perag: 244 if (nagcount > oagcount) 245 xfs_free_perag_range(mp, oagcount, nagcount); 246 return error; 247 } 248 249 static int 250 xfs_growfs_log_private( 251 struct xfs_mount *mp, /* mount point for filesystem */ 252 struct xfs_growfs_log *in) /* growfs log input struct */ 253 { 254 xfs_extlen_t nb; 255 256 nb = in->newblocks; 257 if (nb < XFS_MIN_LOG_BLOCKS || nb < XFS_B_TO_FSB(mp, XFS_MIN_LOG_BYTES)) 258 return -EINVAL; 259 if (nb == mp->m_sb.sb_logblocks && 260 in->isint == (mp->m_sb.sb_logstart != 0)) 261 return -EINVAL; 262 /* 263 * Moving the log is hard, need new interfaces to sync 264 * the log first, hold off all activity while moving it. 265 * Can have shorter or longer log in the same space, 266 * or transform internal to external log or vice versa. 267 */ 268 return -ENOSYS; 269 } 270 271 static int 272 xfs_growfs_imaxpct( 273 struct xfs_mount *mp, 274 __u32 imaxpct) 275 { 276 struct xfs_trans *tp; 277 int dpct; 278 int error; 279 280 if (imaxpct > 100) 281 return -EINVAL; 282 283 error = xfs_trans_alloc(mp, &M_RES(mp)->tr_growdata, 284 XFS_GROWFS_SPACE_RES(mp), 0, XFS_TRANS_RESERVE, &tp); 285 if (error) 286 return error; 287 288 dpct = imaxpct - mp->m_sb.sb_imax_pct; 289 xfs_trans_mod_sb(tp, XFS_TRANS_SB_IMAXPCT, dpct); 290 xfs_trans_set_sync(tp); 291 return xfs_trans_commit(tp); 292 } 293 294 /* 295 * protected versions of growfs function acquire and release locks on the mount 296 * point - exported through ioctls: XFS_IOC_FSGROWFSDATA, XFS_IOC_FSGROWFSLOG, 297 * XFS_IOC_FSGROWFSRT 298 */ 299 int 300 xfs_growfs_data( 301 struct xfs_mount *mp, 302 struct xfs_growfs_data *in) 303 { 304 int error; 305 306 if (!capable(CAP_SYS_ADMIN)) 307 return -EPERM; 308 if (!mutex_trylock(&mp->m_growlock)) 309 return -EWOULDBLOCK; 310 311 /* we can't grow the data section when an internal RT section exists */ 312 if (in->newblocks != mp->m_sb.sb_dblocks && mp->m_sb.sb_rtstart) { 313 error = -EINVAL; 314 goto out_unlock; 315 } 316 317 /* update imaxpct separately to the physical grow of the filesystem */ 318 if (in->imaxpct != mp->m_sb.sb_imax_pct) { 319 error = xfs_growfs_imaxpct(mp, in->imaxpct); 320 if (error) 321 goto out_unlock; 322 } 323 324 if (in->newblocks != mp->m_sb.sb_dblocks) { 325 error = xfs_growfs_data_private(mp, in); 326 if (error) 327 goto out_unlock; 328 } 329 330 /* Post growfs calculations needed to reflect new state in operations */ 331 if (mp->m_sb.sb_imax_pct) { 332 uint64_t icount = mp->m_sb.sb_dblocks * mp->m_sb.sb_imax_pct; 333 do_div(icount, 100); 334 M_IGEO(mp)->maxicount = XFS_FSB_TO_INO(mp, icount); 335 } else 336 M_IGEO(mp)->maxicount = 0; 337 338 /* Update secondary superblocks now the physical grow has completed */ 339 error = xfs_update_secondary_sbs(mp); 340 341 /* 342 * Increment the generation unconditionally, after trying to update the 343 * secondary superblocks, as the new size is live already at this point. 344 */ 345 mp->m_generation++; 346 out_unlock: 347 mutex_unlock(&mp->m_growlock); 348 return error; 349 } 350 351 int 352 xfs_growfs_log( 353 xfs_mount_t *mp, 354 struct xfs_growfs_log *in) 355 { 356 int error; 357 358 if (!capable(CAP_SYS_ADMIN)) 359 return -EPERM; 360 if (!mutex_trylock(&mp->m_growlock)) 361 return -EWOULDBLOCK; 362 error = xfs_growfs_log_private(mp, in); 363 mutex_unlock(&mp->m_growlock); 364 return error; 365 } 366 367 /* 368 * Reserve the requested number of blocks if available. Otherwise return 369 * as many as possible to satisfy the request. The actual number 370 * reserved are returned in outval. 371 */ 372 int 373 xfs_reserve_blocks( 374 struct xfs_mount *mp, 375 enum xfs_free_counter ctr, 376 uint64_t request) 377 { 378 int64_t lcounter, delta; 379 int64_t fdblks_delta = 0; 380 int64_t free; 381 int error = 0; 382 383 ASSERT(ctr < XC_FREE_NR); 384 385 /* 386 * With per-cpu counters, this becomes an interesting problem. we need 387 * to work out if we are freeing or allocation blocks first, then we can 388 * do the modification as necessary. 389 * 390 * We do this under the m_sb_lock so that if we are near ENOSPC, we will 391 * hold out any changes while we work out what to do. This means that 392 * the amount of free space can change while we do this, so we need to 393 * retry if we end up trying to reserve more space than is available. 394 */ 395 spin_lock(&mp->m_sb_lock); 396 397 /* 398 * If our previous reservation was larger than the current value, 399 * then move any unused blocks back to the free pool. Modify the resblks 400 * counters directly since we shouldn't have any problems unreserving 401 * space. 402 */ 403 if (mp->m_free[ctr].res_total > request) { 404 lcounter = mp->m_free[ctr].res_avail - request; 405 if (lcounter > 0) { /* release unused blocks */ 406 fdblks_delta = lcounter; 407 mp->m_free[ctr].res_avail -= lcounter; 408 } 409 mp->m_free[ctr].res_total = request; 410 if (fdblks_delta) { 411 spin_unlock(&mp->m_sb_lock); 412 xfs_add_freecounter(mp, ctr, fdblks_delta); 413 spin_lock(&mp->m_sb_lock); 414 } 415 416 goto out; 417 } 418 419 /* 420 * If the request is larger than the current reservation, reserve the 421 * blocks before we update the reserve counters. Sample m_free and 422 * perform a partial reservation if the request exceeds free space. 423 * 424 * The code below estimates how many blocks it can request from 425 * fdblocks to stash in the reserve pool. This is a classic TOCTOU 426 * race since fdblocks updates are not always coordinated via 427 * m_sb_lock. Set the reserve size even if there's not enough free 428 * space to fill it because mod_fdblocks will refill an undersized 429 * reserve when it can. 430 */ 431 free = xfs_sum_freecounter_raw(mp, ctr) - 432 xfs_freecounter_unavailable(mp, ctr); 433 delta = request - mp->m_free[ctr].res_total; 434 mp->m_free[ctr].res_total = request; 435 if (delta > 0 && free > 0) { 436 /* 437 * We'll either succeed in getting space from the free block 438 * count or we'll get an ENOSPC. Don't set the reserved flag 439 * here - we don't want to reserve the extra reserve blocks 440 * from the reserve. 441 * 442 * The desired reserve size can change after we drop the lock. 443 * Use mod_fdblocks to put the space into the reserve or into 444 * fdblocks as appropriate. 445 */ 446 fdblks_delta = min(free, delta); 447 spin_unlock(&mp->m_sb_lock); 448 error = xfs_dec_freecounter(mp, ctr, fdblks_delta, 0); 449 if (!error) 450 xfs_add_freecounter(mp, ctr, fdblks_delta); 451 spin_lock(&mp->m_sb_lock); 452 } 453 out: 454 spin_unlock(&mp->m_sb_lock); 455 return error; 456 } 457 458 int 459 xfs_fs_goingdown( 460 xfs_mount_t *mp, 461 uint32_t inflags) 462 { 463 switch (inflags) { 464 case XFS_FSOP_GOING_FLAGS_DEFAULT: { 465 if (!bdev_freeze(mp->m_super->s_bdev)) { 466 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 467 bdev_thaw(mp->m_super->s_bdev); 468 } 469 break; 470 } 471 case XFS_FSOP_GOING_FLAGS_LOGFLUSH: 472 xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT); 473 break; 474 case XFS_FSOP_GOING_FLAGS_NOLOGFLUSH: 475 xfs_force_shutdown(mp, 476 SHUTDOWN_FORCE_UMOUNT | SHUTDOWN_LOG_IO_ERROR); 477 break; 478 default: 479 return -EINVAL; 480 } 481 482 return 0; 483 } 484 485 /* 486 * Force a shutdown of the filesystem instantly while keeping the filesystem 487 * consistent. We don't do an unmount here; just shutdown the shop, make sure 488 * that absolutely nothing persistent happens to this filesystem after this 489 * point. 490 * 491 * The shutdown state change is atomic, resulting in the first and only the 492 * first shutdown call processing the shutdown. This means we only shutdown the 493 * log once as it requires, and we don't spam the logs when multiple concurrent 494 * shutdowns race to set the shutdown flags. 495 */ 496 void 497 xfs_do_force_shutdown( 498 struct xfs_mount *mp, 499 uint32_t flags, 500 char *fname, 501 int lnnum) 502 { 503 int tag; 504 const char *why; 505 506 507 if (xfs_set_shutdown(mp)) { 508 xlog_shutdown_wait(mp->m_log); 509 return; 510 } 511 if (mp->m_sb_bp) 512 mp->m_sb_bp->b_flags |= XBF_DONE; 513 514 if (flags & SHUTDOWN_FORCE_UMOUNT) 515 xfs_alert(mp, "User initiated shutdown received."); 516 517 if (xlog_force_shutdown(mp->m_log, flags)) { 518 tag = XFS_PTAG_SHUTDOWN_LOGERROR; 519 why = "Log I/O Error"; 520 } else if (flags & SHUTDOWN_CORRUPT_INCORE) { 521 tag = XFS_PTAG_SHUTDOWN_CORRUPT; 522 why = "Corruption of in-memory data"; 523 } else if (flags & SHUTDOWN_CORRUPT_ONDISK) { 524 tag = XFS_PTAG_SHUTDOWN_CORRUPT; 525 why = "Corruption of on-disk metadata"; 526 } else if (flags & SHUTDOWN_DEVICE_REMOVED) { 527 tag = XFS_PTAG_SHUTDOWN_IOERROR; 528 why = "Block device removal"; 529 } else { 530 tag = XFS_PTAG_SHUTDOWN_IOERROR; 531 why = "Metadata I/O Error"; 532 } 533 534 trace_xfs_force_shutdown(mp, tag, flags, fname, lnnum); 535 536 xfs_alert_tag(mp, tag, 537 "%s (0x%x) detected at %pS (%s:%d). Shutting down filesystem.", 538 why, flags, __return_address, fname, lnnum); 539 xfs_alert(mp, 540 "Please unmount the filesystem and rectify the problem(s)"); 541 if (xfs_error_level >= XFS_ERRLEVEL_HIGH) 542 xfs_stack_trace(); 543 } 544 545 /* 546 * Reserve free space for per-AG metadata. 547 */ 548 int 549 xfs_fs_reserve_ag_blocks( 550 struct xfs_mount *mp) 551 { 552 struct xfs_perag *pag = NULL; 553 int error = 0; 554 int err2; 555 556 mp->m_finobt_nores = false; 557 while ((pag = xfs_perag_next(mp, pag))) { 558 err2 = xfs_ag_resv_init(pag, NULL); 559 if (err2 && !error) 560 error = err2; 561 } 562 563 if (error && error != -ENOSPC) { 564 xfs_warn(mp, 565 "Error %d reserving per-AG metadata reserve pool.", error); 566 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 567 return error; 568 } 569 570 err2 = xfs_metafile_resv_init(mp); 571 if (err2 && err2 != -ENOSPC) { 572 xfs_warn(mp, 573 "Error %d reserving realtime metadata reserve pool.", err2); 574 xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE); 575 576 if (!error) 577 error = err2; 578 } 579 580 return error; 581 } 582 583 /* 584 * Free space reserved for per-AG metadata. 585 */ 586 void 587 xfs_fs_unreserve_ag_blocks( 588 struct xfs_mount *mp) 589 { 590 struct xfs_perag *pag = NULL; 591 592 xfs_metafile_resv_free(mp); 593 while ((pag = xfs_perag_next(mp, pag))) 594 xfs_ag_resv_free(pag); 595 } 596