1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2016 Oracle. All Rights Reserved. 4 * Author: Darrick J. Wong <darrick.wong@oracle.com> 5 */ 6 #include "xfs.h" 7 #include "xfs_fs.h" 8 #include "xfs_format.h" 9 #include "xfs_log_format.h" 10 #include "xfs_trans_resv.h" 11 #include "xfs_bit.h" 12 #include "xfs_shared.h" 13 #include "xfs_mount.h" 14 #include "xfs_defer.h" 15 #include "xfs_trans.h" 16 #include "xfs_trans_priv.h" 17 #include "xfs_refcount_item.h" 18 #include "xfs_log.h" 19 #include "xfs_refcount.h" 20 #include "xfs_error.h" 21 #include "xfs_log_priv.h" 22 #include "xfs_log_recover.h" 23 #include "xfs_ag.h" 24 #include "xfs_btree.h" 25 #include "xfs_trace.h" 26 #include "xfs_rtgroup.h" 27 28 struct kmem_cache *xfs_cui_cache; 29 struct kmem_cache *xfs_cud_cache; 30 31 static const struct xfs_item_ops xfs_cui_item_ops; 32 33 static inline struct xfs_cui_log_item *CUI_ITEM(struct xfs_log_item *lip) 34 { 35 return container_of(lip, struct xfs_cui_log_item, cui_item); 36 } 37 38 STATIC void 39 xfs_cui_item_free( 40 struct xfs_cui_log_item *cuip) 41 { 42 kvfree(cuip->cui_item.li_lv_shadow); 43 if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS) 44 kfree(cuip); 45 else 46 kmem_cache_free(xfs_cui_cache, cuip); 47 } 48 49 /* 50 * Freeing the CUI requires that we remove it from the AIL if it has already 51 * been placed there. However, the CUI may not yet have been placed in the AIL 52 * when called by xfs_cui_release() from CUD processing due to the ordering of 53 * committed vs unpin operations in bulk insert operations. Hence the reference 54 * count to ensure only the last caller frees the CUI. 55 */ 56 STATIC void 57 xfs_cui_release( 58 struct xfs_cui_log_item *cuip) 59 { 60 ASSERT(atomic_read(&cuip->cui_refcount) > 0); 61 if (!atomic_dec_and_test(&cuip->cui_refcount)) 62 return; 63 64 xfs_trans_ail_delete(&cuip->cui_item, 0); 65 xfs_cui_item_free(cuip); 66 } 67 68 69 STATIC void 70 xfs_cui_item_size( 71 struct xfs_log_item *lip, 72 int *nvecs, 73 int *nbytes) 74 { 75 struct xfs_cui_log_item *cuip = CUI_ITEM(lip); 76 77 *nvecs += 1; 78 *nbytes += xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents); 79 } 80 81 unsigned int xfs_cui_log_space(unsigned int nr) 82 { 83 return xlog_item_space(1, xfs_cui_log_format_sizeof(nr)); 84 } 85 86 /* 87 * This is called to fill in the vector of log iovecs for the 88 * given cui log item. We use only 1 iovec, and we point that 89 * at the cui_log_format structure embedded in the cui item. 90 * It is at this point that we assert that all of the extent 91 * slots in the cui item have been filled. 92 */ 93 STATIC void 94 xfs_cui_item_format( 95 struct xfs_log_item *lip, 96 struct xfs_log_vec *lv) 97 { 98 struct xfs_cui_log_item *cuip = CUI_ITEM(lip); 99 struct xfs_log_iovec *vecp = NULL; 100 101 ASSERT(atomic_read(&cuip->cui_next_extent) == 102 cuip->cui_format.cui_nextents); 103 ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT); 104 105 cuip->cui_format.cui_type = lip->li_type; 106 cuip->cui_format.cui_size = 1; 107 108 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUI_FORMAT, &cuip->cui_format, 109 xfs_cui_log_format_sizeof(cuip->cui_format.cui_nextents)); 110 } 111 112 /* 113 * The unpin operation is the last place an CUI is manipulated in the log. It is 114 * either inserted in the AIL or aborted in the event of a log I/O error. In 115 * either case, the CUI transaction has been successfully committed to make it 116 * this far. Therefore, we expect whoever committed the CUI to either construct 117 * and commit the CUD or drop the CUD's reference in the event of error. Simply 118 * drop the log's CUI reference now that the log is done with it. 119 */ 120 STATIC void 121 xfs_cui_item_unpin( 122 struct xfs_log_item *lip, 123 int remove) 124 { 125 struct xfs_cui_log_item *cuip = CUI_ITEM(lip); 126 127 xfs_cui_release(cuip); 128 } 129 130 /* 131 * The CUI has been either committed or aborted if the transaction has been 132 * cancelled. If the transaction was cancelled, an CUD isn't going to be 133 * constructed and thus we free the CUI here directly. 134 */ 135 STATIC void 136 xfs_cui_item_release( 137 struct xfs_log_item *lip) 138 { 139 xfs_cui_release(CUI_ITEM(lip)); 140 } 141 142 /* 143 * Allocate and initialize an cui item with the given number of extents. 144 */ 145 STATIC struct xfs_cui_log_item * 146 xfs_cui_init( 147 struct xfs_mount *mp, 148 unsigned short item_type, 149 uint nextents) 150 { 151 struct xfs_cui_log_item *cuip; 152 153 ASSERT(nextents > 0); 154 ASSERT(item_type == XFS_LI_CUI || item_type == XFS_LI_CUI_RT); 155 156 if (nextents > XFS_CUI_MAX_FAST_EXTENTS) 157 cuip = kzalloc(xfs_cui_log_item_sizeof(nextents), 158 GFP_KERNEL | __GFP_NOFAIL); 159 else 160 cuip = kmem_cache_zalloc(xfs_cui_cache, 161 GFP_KERNEL | __GFP_NOFAIL); 162 163 xfs_log_item_init(mp, &cuip->cui_item, item_type, &xfs_cui_item_ops); 164 cuip->cui_format.cui_nextents = nextents; 165 cuip->cui_format.cui_id = (uintptr_t)(void *)cuip; 166 atomic_set(&cuip->cui_next_extent, 0); 167 atomic_set(&cuip->cui_refcount, 2); 168 169 return cuip; 170 } 171 172 static inline struct xfs_cud_log_item *CUD_ITEM(struct xfs_log_item *lip) 173 { 174 return container_of(lip, struct xfs_cud_log_item, cud_item); 175 } 176 177 STATIC void 178 xfs_cud_item_size( 179 struct xfs_log_item *lip, 180 int *nvecs, 181 int *nbytes) 182 { 183 *nvecs += 1; 184 *nbytes += sizeof(struct xfs_cud_log_format); 185 } 186 187 unsigned int xfs_cud_log_space(void) 188 { 189 return xlog_item_space(1, sizeof(struct xfs_cud_log_format)); 190 } 191 192 /* 193 * This is called to fill in the vector of log iovecs for the 194 * given cud log item. We use only 1 iovec, and we point that 195 * at the cud_log_format structure embedded in the cud item. 196 * It is at this point that we assert that all of the extent 197 * slots in the cud item have been filled. 198 */ 199 STATIC void 200 xfs_cud_item_format( 201 struct xfs_log_item *lip, 202 struct xfs_log_vec *lv) 203 { 204 struct xfs_cud_log_item *cudp = CUD_ITEM(lip); 205 struct xfs_log_iovec *vecp = NULL; 206 207 ASSERT(lip->li_type == XFS_LI_CUD || lip->li_type == XFS_LI_CUD_RT); 208 209 cudp->cud_format.cud_type = lip->li_type; 210 cudp->cud_format.cud_size = 1; 211 212 xlog_copy_iovec(lv, &vecp, XLOG_REG_TYPE_CUD_FORMAT, &cudp->cud_format, 213 sizeof(struct xfs_cud_log_format)); 214 } 215 216 /* 217 * The CUD is either committed or aborted if the transaction is cancelled. If 218 * the transaction is cancelled, drop our reference to the CUI and free the 219 * CUD. 220 */ 221 STATIC void 222 xfs_cud_item_release( 223 struct xfs_log_item *lip) 224 { 225 struct xfs_cud_log_item *cudp = CUD_ITEM(lip); 226 227 xfs_cui_release(cudp->cud_cuip); 228 kvfree(cudp->cud_item.li_lv_shadow); 229 kmem_cache_free(xfs_cud_cache, cudp); 230 } 231 232 static struct xfs_log_item * 233 xfs_cud_item_intent( 234 struct xfs_log_item *lip) 235 { 236 return &CUD_ITEM(lip)->cud_cuip->cui_item; 237 } 238 239 static const struct xfs_item_ops xfs_cud_item_ops = { 240 .flags = XFS_ITEM_RELEASE_WHEN_COMMITTED | 241 XFS_ITEM_INTENT_DONE, 242 .iop_size = xfs_cud_item_size, 243 .iop_format = xfs_cud_item_format, 244 .iop_release = xfs_cud_item_release, 245 .iop_intent = xfs_cud_item_intent, 246 }; 247 248 static inline struct xfs_refcount_intent *ci_entry(const struct list_head *e) 249 { 250 return list_entry(e, struct xfs_refcount_intent, ri_list); 251 } 252 253 static inline bool 254 xfs_cui_item_isrt(const struct xfs_log_item *lip) 255 { 256 ASSERT(lip->li_type == XFS_LI_CUI || lip->li_type == XFS_LI_CUI_RT); 257 258 return lip->li_type == XFS_LI_CUI_RT; 259 } 260 261 /* Sort refcount intents by AG. */ 262 static int 263 xfs_refcount_update_diff_items( 264 void *priv, 265 const struct list_head *a, 266 const struct list_head *b) 267 { 268 struct xfs_refcount_intent *ra = ci_entry(a); 269 struct xfs_refcount_intent *rb = ci_entry(b); 270 271 return ra->ri_group->xg_gno - rb->ri_group->xg_gno; 272 } 273 274 /* Log refcount updates in the intent item. */ 275 STATIC void 276 xfs_refcount_update_log_item( 277 struct xfs_trans *tp, 278 struct xfs_cui_log_item *cuip, 279 struct xfs_refcount_intent *ri) 280 { 281 uint next_extent; 282 struct xfs_phys_extent *pmap; 283 284 /* 285 * atomic_inc_return gives us the value after the increment; 286 * we want to use it as an array index so we need to subtract 1 from 287 * it. 288 */ 289 next_extent = atomic_inc_return(&cuip->cui_next_extent) - 1; 290 ASSERT(next_extent < cuip->cui_format.cui_nextents); 291 pmap = &cuip->cui_format.cui_extents[next_extent]; 292 pmap->pe_startblock = ri->ri_startblock; 293 pmap->pe_len = ri->ri_blockcount; 294 295 pmap->pe_flags = 0; 296 switch (ri->ri_type) { 297 case XFS_REFCOUNT_INCREASE: 298 case XFS_REFCOUNT_DECREASE: 299 case XFS_REFCOUNT_ALLOC_COW: 300 case XFS_REFCOUNT_FREE_COW: 301 pmap->pe_flags |= ri->ri_type; 302 break; 303 default: 304 ASSERT(0); 305 } 306 } 307 308 static struct xfs_log_item * 309 __xfs_refcount_update_create_intent( 310 struct xfs_trans *tp, 311 struct list_head *items, 312 unsigned int count, 313 bool sort, 314 unsigned short item_type) 315 { 316 struct xfs_mount *mp = tp->t_mountp; 317 struct xfs_cui_log_item *cuip; 318 struct xfs_refcount_intent *ri; 319 320 ASSERT(count > 0); 321 322 cuip = xfs_cui_init(mp, item_type, count); 323 if (sort) 324 list_sort(mp, items, xfs_refcount_update_diff_items); 325 list_for_each_entry(ri, items, ri_list) 326 xfs_refcount_update_log_item(tp, cuip, ri); 327 return &cuip->cui_item; 328 } 329 330 static struct xfs_log_item * 331 xfs_refcount_update_create_intent( 332 struct xfs_trans *tp, 333 struct list_head *items, 334 unsigned int count, 335 bool sort) 336 { 337 return __xfs_refcount_update_create_intent(tp, items, count, sort, 338 XFS_LI_CUI); 339 } 340 341 static inline unsigned short 342 xfs_cud_type_from_cui(const struct xfs_cui_log_item *cuip) 343 { 344 return xfs_cui_item_isrt(&cuip->cui_item) ? XFS_LI_CUD_RT : XFS_LI_CUD; 345 } 346 347 /* Get an CUD so we can process all the deferred refcount updates. */ 348 static struct xfs_log_item * 349 xfs_refcount_update_create_done( 350 struct xfs_trans *tp, 351 struct xfs_log_item *intent, 352 unsigned int count) 353 { 354 struct xfs_cui_log_item *cuip = CUI_ITEM(intent); 355 struct xfs_cud_log_item *cudp; 356 357 cudp = kmem_cache_zalloc(xfs_cud_cache, GFP_KERNEL | __GFP_NOFAIL); 358 xfs_log_item_init(tp->t_mountp, &cudp->cud_item, 359 xfs_cud_type_from_cui(cuip), &xfs_cud_item_ops); 360 cudp->cud_cuip = cuip; 361 cudp->cud_format.cud_cui_id = cuip->cui_format.cui_id; 362 363 return &cudp->cud_item; 364 } 365 366 /* Add this deferred CUI to the transaction. */ 367 void 368 xfs_refcount_defer_add( 369 struct xfs_trans *tp, 370 struct xfs_refcount_intent *ri) 371 { 372 struct xfs_mount *mp = tp->t_mountp; 373 374 /* 375 * Deferred refcount updates for the realtime and data sections must 376 * use separate transactions to finish deferred work because updates to 377 * realtime metadata files can lock AGFs to allocate btree blocks and 378 * we don't want that mixing with the AGF locks taken to finish data 379 * section updates. 380 */ 381 ri->ri_group = xfs_group_intent_get(mp, ri->ri_startblock, 382 ri->ri_realtime ? XG_TYPE_RTG : XG_TYPE_AG); 383 384 trace_xfs_refcount_defer(mp, ri); 385 xfs_defer_add(tp, &ri->ri_list, ri->ri_realtime ? 386 &xfs_rtrefcount_update_defer_type : 387 &xfs_refcount_update_defer_type); 388 } 389 390 /* Cancel a deferred refcount update. */ 391 STATIC void 392 xfs_refcount_update_cancel_item( 393 struct list_head *item) 394 { 395 struct xfs_refcount_intent *ri = ci_entry(item); 396 397 xfs_group_intent_put(ri->ri_group); 398 kmem_cache_free(xfs_refcount_intent_cache, ri); 399 } 400 401 /* Process a deferred refcount update. */ 402 STATIC int 403 xfs_refcount_update_finish_item( 404 struct xfs_trans *tp, 405 struct xfs_log_item *done, 406 struct list_head *item, 407 struct xfs_btree_cur **state) 408 { 409 struct xfs_refcount_intent *ri = ci_entry(item); 410 int error; 411 412 /* Did we run out of reservation? Requeue what we didn't finish. */ 413 error = xfs_refcount_finish_one(tp, ri, state); 414 if (!error && ri->ri_blockcount > 0) { 415 ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE || 416 ri->ri_type == XFS_REFCOUNT_DECREASE); 417 return -EAGAIN; 418 } 419 420 xfs_refcount_update_cancel_item(item); 421 return error; 422 } 423 424 /* Clean up after calling xfs_refcount_finish_one. */ 425 STATIC void 426 xfs_refcount_finish_one_cleanup( 427 struct xfs_trans *tp, 428 struct xfs_btree_cur *rcur, 429 int error) 430 { 431 struct xfs_buf *agbp; 432 433 if (rcur == NULL) 434 return; 435 agbp = rcur->bc_ag.agbp; 436 xfs_btree_del_cursor(rcur, error); 437 if (error && agbp) 438 xfs_trans_brelse(tp, agbp); 439 } 440 441 /* Abort all pending CUIs. */ 442 STATIC void 443 xfs_refcount_update_abort_intent( 444 struct xfs_log_item *intent) 445 { 446 xfs_cui_release(CUI_ITEM(intent)); 447 } 448 449 /* Is this recovered CUI ok? */ 450 static inline bool 451 xfs_cui_validate_phys( 452 struct xfs_mount *mp, 453 bool isrt, 454 struct xfs_phys_extent *pmap) 455 { 456 if (!xfs_has_reflink(mp)) 457 return false; 458 459 if (pmap->pe_flags & ~XFS_REFCOUNT_EXTENT_FLAGS) 460 return false; 461 462 switch (pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK) { 463 case XFS_REFCOUNT_INCREASE: 464 case XFS_REFCOUNT_DECREASE: 465 case XFS_REFCOUNT_ALLOC_COW: 466 case XFS_REFCOUNT_FREE_COW: 467 break; 468 default: 469 return false; 470 } 471 472 if (isrt) 473 return xfs_verify_rtbext(mp, pmap->pe_startblock, pmap->pe_len); 474 475 return xfs_verify_fsbext(mp, pmap->pe_startblock, pmap->pe_len); 476 } 477 478 static inline void 479 xfs_cui_recover_work( 480 struct xfs_mount *mp, 481 struct xfs_defer_pending *dfp, 482 bool isrt, 483 struct xfs_phys_extent *pmap) 484 { 485 struct xfs_refcount_intent *ri; 486 487 ri = kmem_cache_alloc(xfs_refcount_intent_cache, 488 GFP_KERNEL | __GFP_NOFAIL); 489 ri->ri_type = pmap->pe_flags & XFS_REFCOUNT_EXTENT_TYPE_MASK; 490 ri->ri_startblock = pmap->pe_startblock; 491 ri->ri_blockcount = pmap->pe_len; 492 ri->ri_group = xfs_group_intent_get(mp, pmap->pe_startblock, 493 isrt ? XG_TYPE_RTG : XG_TYPE_AG); 494 ri->ri_realtime = isrt; 495 496 xfs_defer_add_item(dfp, &ri->ri_list); 497 } 498 499 /* 500 * Process a refcount update intent item that was recovered from the log. 501 * We need to update the refcountbt. 502 */ 503 STATIC int 504 xfs_refcount_recover_work( 505 struct xfs_defer_pending *dfp, 506 struct list_head *capture_list) 507 { 508 struct xfs_trans_res resv; 509 struct xfs_log_item *lip = dfp->dfp_intent; 510 struct xfs_cui_log_item *cuip = CUI_ITEM(lip); 511 struct xfs_trans *tp; 512 struct xfs_mount *mp = lip->li_log->l_mp; 513 bool isrt = xfs_cui_item_isrt(lip); 514 int i; 515 int error = 0; 516 517 /* 518 * First check the validity of the extents described by the 519 * CUI. If any are bad, then assume that all are bad and 520 * just toss the CUI. 521 */ 522 for (i = 0; i < cuip->cui_format.cui_nextents; i++) { 523 if (!xfs_cui_validate_phys(mp, isrt, 524 &cuip->cui_format.cui_extents[i])) { 525 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 526 &cuip->cui_format, 527 sizeof(cuip->cui_format)); 528 return -EFSCORRUPTED; 529 } 530 531 xfs_cui_recover_work(mp, dfp, isrt, 532 &cuip->cui_format.cui_extents[i]); 533 } 534 535 /* 536 * Under normal operation, refcount updates are deferred, so we 537 * wouldn't be adding them directly to a transaction. All 538 * refcount updates manage reservation usage internally and 539 * dynamically by deferring work that won't fit in the 540 * transaction. Normally, any work that needs to be deferred 541 * gets attached to the same defer_ops that scheduled the 542 * refcount update. However, we're in log recovery here, so we 543 * use the passed in defer_ops and to finish up any work that 544 * doesn't fit. We need to reserve enough blocks to handle a 545 * full btree split on either end of the refcount range. 546 */ 547 resv = xlog_recover_resv(&M_RES(mp)->tr_itruncate); 548 error = xfs_trans_alloc(mp, &resv, mp->m_refc_maxlevels * 2, 0, 549 XFS_TRANS_RESERVE, &tp); 550 if (error) 551 return error; 552 553 error = xlog_recover_finish_intent(tp, dfp); 554 if (error == -EFSCORRUPTED) 555 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 556 &cuip->cui_format, 557 sizeof(cuip->cui_format)); 558 if (error) 559 goto abort_error; 560 561 return xfs_defer_ops_capture_and_commit(tp, capture_list); 562 563 abort_error: 564 xfs_trans_cancel(tp); 565 return error; 566 } 567 568 /* Relog an intent item to push the log tail forward. */ 569 static struct xfs_log_item * 570 xfs_refcount_relog_intent( 571 struct xfs_trans *tp, 572 struct xfs_log_item *intent, 573 struct xfs_log_item *done_item) 574 { 575 struct xfs_cui_log_item *cuip; 576 struct xfs_phys_extent *pmap; 577 unsigned int count; 578 579 ASSERT(intent->li_type == XFS_LI_CUI || 580 intent->li_type == XFS_LI_CUI_RT); 581 582 count = CUI_ITEM(intent)->cui_format.cui_nextents; 583 pmap = CUI_ITEM(intent)->cui_format.cui_extents; 584 585 cuip = xfs_cui_init(tp->t_mountp, intent->li_type, count); 586 memcpy(cuip->cui_format.cui_extents, pmap, count * sizeof(*pmap)); 587 atomic_set(&cuip->cui_next_extent, count); 588 589 return &cuip->cui_item; 590 } 591 592 const struct xfs_defer_op_type xfs_refcount_update_defer_type = { 593 .name = "refcount", 594 .max_items = XFS_CUI_MAX_FAST_EXTENTS, 595 .create_intent = xfs_refcount_update_create_intent, 596 .abort_intent = xfs_refcount_update_abort_intent, 597 .create_done = xfs_refcount_update_create_done, 598 .finish_item = xfs_refcount_update_finish_item, 599 .finish_cleanup = xfs_refcount_finish_one_cleanup, 600 .cancel_item = xfs_refcount_update_cancel_item, 601 .recover_work = xfs_refcount_recover_work, 602 .relog_intent = xfs_refcount_relog_intent, 603 }; 604 605 #ifdef CONFIG_XFS_RT 606 static struct xfs_log_item * 607 xfs_rtrefcount_update_create_intent( 608 struct xfs_trans *tp, 609 struct list_head *items, 610 unsigned int count, 611 bool sort) 612 { 613 return __xfs_refcount_update_create_intent(tp, items, count, sort, 614 XFS_LI_CUI_RT); 615 } 616 617 /* Process a deferred realtime refcount update. */ 618 STATIC int 619 xfs_rtrefcount_update_finish_item( 620 struct xfs_trans *tp, 621 struct xfs_log_item *done, 622 struct list_head *item, 623 struct xfs_btree_cur **state) 624 { 625 struct xfs_refcount_intent *ri = ci_entry(item); 626 int error; 627 628 error = xfs_rtrefcount_finish_one(tp, ri, state); 629 630 /* Did we run out of reservation? Requeue what we didn't finish. */ 631 if (!error && ri->ri_blockcount > 0) { 632 ASSERT(ri->ri_type == XFS_REFCOUNT_INCREASE || 633 ri->ri_type == XFS_REFCOUNT_DECREASE); 634 return -EAGAIN; 635 } 636 637 xfs_refcount_update_cancel_item(item); 638 return error; 639 } 640 641 /* Clean up after calling xfs_rtrefcount_finish_one. */ 642 STATIC void 643 xfs_rtrefcount_finish_one_cleanup( 644 struct xfs_trans *tp, 645 struct xfs_btree_cur *rcur, 646 int error) 647 { 648 if (rcur) 649 xfs_btree_del_cursor(rcur, error); 650 } 651 652 const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = { 653 .name = "rtrefcount", 654 .max_items = XFS_CUI_MAX_FAST_EXTENTS, 655 .create_intent = xfs_rtrefcount_update_create_intent, 656 .abort_intent = xfs_refcount_update_abort_intent, 657 .create_done = xfs_refcount_update_create_done, 658 .finish_item = xfs_rtrefcount_update_finish_item, 659 .finish_cleanup = xfs_rtrefcount_finish_one_cleanup, 660 .cancel_item = xfs_refcount_update_cancel_item, 661 .recover_work = xfs_refcount_recover_work, 662 .relog_intent = xfs_refcount_relog_intent, 663 }; 664 #else 665 const struct xfs_defer_op_type xfs_rtrefcount_update_defer_type = { 666 .name = "rtrefcount", 667 }; 668 #endif /* CONFIG_XFS_RT */ 669 670 STATIC bool 671 xfs_cui_item_match( 672 struct xfs_log_item *lip, 673 uint64_t intent_id) 674 { 675 return CUI_ITEM(lip)->cui_format.cui_id == intent_id; 676 } 677 678 static const struct xfs_item_ops xfs_cui_item_ops = { 679 .flags = XFS_ITEM_INTENT, 680 .iop_size = xfs_cui_item_size, 681 .iop_format = xfs_cui_item_format, 682 .iop_unpin = xfs_cui_item_unpin, 683 .iop_release = xfs_cui_item_release, 684 .iop_match = xfs_cui_item_match, 685 }; 686 687 static inline void 688 xfs_cui_copy_format( 689 struct xfs_cui_log_format *dst, 690 const struct xfs_cui_log_format *src) 691 { 692 unsigned int i; 693 694 memcpy(dst, src, offsetof(struct xfs_cui_log_format, cui_extents)); 695 696 for (i = 0; i < src->cui_nextents; i++) 697 memcpy(&dst->cui_extents[i], &src->cui_extents[i], 698 sizeof(struct xfs_phys_extent)); 699 } 700 701 /* 702 * This routine is called to create an in-core extent refcount update 703 * item from the cui format structure which was logged on disk. 704 * It allocates an in-core cui, copies the extents from the format 705 * structure into it, and adds the cui to the AIL with the given 706 * LSN. 707 */ 708 STATIC int 709 xlog_recover_cui_commit_pass2( 710 struct xlog *log, 711 struct list_head *buffer_list, 712 struct xlog_recover_item *item, 713 xfs_lsn_t lsn) 714 { 715 struct xfs_mount *mp = log->l_mp; 716 struct xfs_cui_log_item *cuip; 717 struct xfs_cui_log_format *cui_formatp; 718 size_t len; 719 720 cui_formatp = item->ri_buf[0].i_addr; 721 722 if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) { 723 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 724 item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 725 return -EFSCORRUPTED; 726 } 727 728 len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents); 729 if (item->ri_buf[0].i_len != len) { 730 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 731 item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 732 return -EFSCORRUPTED; 733 } 734 735 cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents); 736 xfs_cui_copy_format(&cuip->cui_format, cui_formatp); 737 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); 738 739 xlog_recover_intent_item(log, &cuip->cui_item, lsn, 740 &xfs_refcount_update_defer_type); 741 return 0; 742 } 743 744 const struct xlog_recover_item_ops xlog_cui_item_ops = { 745 .item_type = XFS_LI_CUI, 746 .commit_pass2 = xlog_recover_cui_commit_pass2, 747 }; 748 749 #ifdef CONFIG_XFS_RT 750 STATIC int 751 xlog_recover_rtcui_commit_pass2( 752 struct xlog *log, 753 struct list_head *buffer_list, 754 struct xlog_recover_item *item, 755 xfs_lsn_t lsn) 756 { 757 struct xfs_mount *mp = log->l_mp; 758 struct xfs_cui_log_item *cuip; 759 struct xfs_cui_log_format *cui_formatp; 760 size_t len; 761 762 cui_formatp = item->ri_buf[0].i_addr; 763 764 if (item->ri_buf[0].i_len < xfs_cui_log_format_sizeof(0)) { 765 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 766 item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 767 return -EFSCORRUPTED; 768 } 769 770 len = xfs_cui_log_format_sizeof(cui_formatp->cui_nextents); 771 if (item->ri_buf[0].i_len != len) { 772 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, mp, 773 item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 774 return -EFSCORRUPTED; 775 } 776 777 cuip = xfs_cui_init(mp, ITEM_TYPE(item), cui_formatp->cui_nextents); 778 xfs_cui_copy_format(&cuip->cui_format, cui_formatp); 779 atomic_set(&cuip->cui_next_extent, cui_formatp->cui_nextents); 780 781 xlog_recover_intent_item(log, &cuip->cui_item, lsn, 782 &xfs_rtrefcount_update_defer_type); 783 return 0; 784 } 785 #else 786 STATIC int 787 xlog_recover_rtcui_commit_pass2( 788 struct xlog *log, 789 struct list_head *buffer_list, 790 struct xlog_recover_item *item, 791 xfs_lsn_t lsn) 792 { 793 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 794 item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 795 return -EFSCORRUPTED; 796 } 797 #endif 798 799 const struct xlog_recover_item_ops xlog_rtcui_item_ops = { 800 .item_type = XFS_LI_CUI_RT, 801 .commit_pass2 = xlog_recover_rtcui_commit_pass2, 802 }; 803 804 /* 805 * This routine is called when an CUD format structure is found in a committed 806 * transaction in the log. Its purpose is to cancel the corresponding CUI if it 807 * was still in the log. To do this it searches the AIL for the CUI with an id 808 * equal to that in the CUD format structure. If we find it we drop the CUD 809 * reference, which removes the CUI from the AIL and frees it. 810 */ 811 STATIC int 812 xlog_recover_cud_commit_pass2( 813 struct xlog *log, 814 struct list_head *buffer_list, 815 struct xlog_recover_item *item, 816 xfs_lsn_t lsn) 817 { 818 struct xfs_cud_log_format *cud_formatp; 819 820 cud_formatp = item->ri_buf[0].i_addr; 821 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { 822 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 823 item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 824 return -EFSCORRUPTED; 825 } 826 827 xlog_recover_release_intent(log, XFS_LI_CUI, cud_formatp->cud_cui_id); 828 return 0; 829 } 830 831 const struct xlog_recover_item_ops xlog_cud_item_ops = { 832 .item_type = XFS_LI_CUD, 833 .commit_pass2 = xlog_recover_cud_commit_pass2, 834 }; 835 836 #ifdef CONFIG_XFS_RT 837 STATIC int 838 xlog_recover_rtcud_commit_pass2( 839 struct xlog *log, 840 struct list_head *buffer_list, 841 struct xlog_recover_item *item, 842 xfs_lsn_t lsn) 843 { 844 struct xfs_cud_log_format *cud_formatp; 845 846 cud_formatp = item->ri_buf[0].i_addr; 847 if (item->ri_buf[0].i_len != sizeof(struct xfs_cud_log_format)) { 848 XFS_CORRUPTION_ERROR(__func__, XFS_ERRLEVEL_LOW, log->l_mp, 849 item->ri_buf[0].i_addr, item->ri_buf[0].i_len); 850 return -EFSCORRUPTED; 851 } 852 853 xlog_recover_release_intent(log, XFS_LI_CUI_RT, 854 cud_formatp->cud_cui_id); 855 return 0; 856 } 857 #else 858 # define xlog_recover_rtcud_commit_pass2 xlog_recover_rtcui_commit_pass2 859 #endif 860 861 const struct xlog_recover_item_ops xlog_rtcud_item_ops = { 862 .item_type = XFS_LI_CUD_RT, 863 .commit_pass2 = xlog_recover_rtcud_commit_pass2, 864 }; 865