1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * NILFS recovery logic 4 * 5 * Copyright (C) 2005-2008 Nippon Telegraph and Telephone Corporation. 6 * 7 * Written by Ryusuke Konishi. 8 */ 9 10 #include <linux/buffer_head.h> 11 #include <linux/blkdev.h> 12 #include <linux/swap.h> 13 #include <linux/slab.h> 14 #include <linux/crc32.h> 15 #include "nilfs.h" 16 #include "segment.h" 17 #include "sufile.h" 18 #include "page.h" 19 #include "segbuf.h" 20 21 /* 22 * Segment check result 23 */ 24 enum { 25 NILFS_SEG_VALID, 26 NILFS_SEG_NO_SUPER_ROOT, 27 NILFS_SEG_FAIL_IO, 28 NILFS_SEG_FAIL_MAGIC, 29 NILFS_SEG_FAIL_SEQ, 30 NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT, 31 NILFS_SEG_FAIL_CHECKSUM_FULL, 32 NILFS_SEG_FAIL_CONSISTENCY, 33 }; 34 35 /* work structure for recovery */ 36 struct nilfs_recovery_block { 37 ino_t ino; /* 38 * Inode number of the file that this block 39 * belongs to 40 */ 41 sector_t blocknr; /* block number */ 42 __u64 vblocknr; /* virtual block number */ 43 unsigned long blkoff; /* File offset of the data block (per block) */ 44 struct list_head list; 45 }; 46 47 48 static int nilfs_warn_segment_error(struct super_block *sb, int err) 49 { 50 const char *msg = NULL; 51 52 switch (err) { 53 case NILFS_SEG_FAIL_IO: 54 nilfs_err(sb, "I/O error reading segment"); 55 return -EIO; 56 case NILFS_SEG_FAIL_MAGIC: 57 msg = "Magic number mismatch"; 58 break; 59 case NILFS_SEG_FAIL_SEQ: 60 msg = "Sequence number mismatch"; 61 break; 62 case NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT: 63 msg = "Checksum error in super root"; 64 break; 65 case NILFS_SEG_FAIL_CHECKSUM_FULL: 66 msg = "Checksum error in segment payload"; 67 break; 68 case NILFS_SEG_FAIL_CONSISTENCY: 69 msg = "Inconsistency found"; 70 break; 71 case NILFS_SEG_NO_SUPER_ROOT: 72 msg = "No super root in the last segment"; 73 break; 74 default: 75 nilfs_err(sb, "unrecognized segment error %d", err); 76 return -EINVAL; 77 } 78 nilfs_warn(sb, "invalid segment: %s", msg); 79 return -EINVAL; 80 } 81 82 /** 83 * nilfs_compute_checksum - compute checksum of blocks continuously 84 * @nilfs: nilfs object 85 * @bhs: buffer head of start block 86 * @sum: place to store result 87 * @offset: offset bytes in the first block 88 * @check_bytes: number of bytes to be checked 89 * @start: DBN of start block 90 * @nblock: number of blocks to be checked 91 * 92 * Return: 0 on success, or %-EIO if an I/O error occurs. 93 */ 94 static int nilfs_compute_checksum(struct the_nilfs *nilfs, 95 struct buffer_head *bhs, u32 *sum, 96 unsigned long offset, u64 check_bytes, 97 sector_t start, unsigned long nblock) 98 { 99 unsigned int blocksize = nilfs->ns_blocksize; 100 unsigned long size; 101 u32 crc; 102 103 BUG_ON(offset >= blocksize); 104 check_bytes -= offset; 105 size = min_t(u64, check_bytes, blocksize - offset); 106 crc = crc32_le(nilfs->ns_crc_seed, 107 (unsigned char *)bhs->b_data + offset, size); 108 if (--nblock > 0) { 109 do { 110 struct buffer_head *bh; 111 112 bh = __bread(nilfs->ns_bdev, ++start, blocksize); 113 if (!bh) 114 return -EIO; 115 check_bytes -= size; 116 size = min_t(u64, check_bytes, blocksize); 117 crc = crc32_le(crc, bh->b_data, size); 118 brelse(bh); 119 } while (--nblock > 0); 120 } 121 *sum = crc; 122 return 0; 123 } 124 125 /** 126 * nilfs_read_super_root_block - read super root block 127 * @nilfs: nilfs object 128 * @sr_block: disk block number of the super root block 129 * @pbh: address of a buffer_head pointer to return super root buffer 130 * @check: CRC check flag 131 * 132 * Return: 0 on success, or one of the following negative error codes on 133 * failure: 134 * * %-EINVAL - Super root block corrupted. 135 * * %-EIO - I/O error. 136 */ 137 int nilfs_read_super_root_block(struct the_nilfs *nilfs, sector_t sr_block, 138 struct buffer_head **pbh, int check) 139 { 140 struct buffer_head *bh_sr; 141 struct nilfs_super_root *sr; 142 u32 crc; 143 int ret; 144 145 *pbh = NULL; 146 bh_sr = __bread(nilfs->ns_bdev, sr_block, nilfs->ns_blocksize); 147 if (unlikely(!bh_sr)) { 148 ret = NILFS_SEG_FAIL_IO; 149 goto failed; 150 } 151 152 sr = (struct nilfs_super_root *)bh_sr->b_data; 153 if (check) { 154 unsigned int bytes = le16_to_cpu(sr->sr_bytes); 155 156 if (bytes == 0 || bytes > nilfs->ns_blocksize) { 157 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; 158 goto failed_bh; 159 } 160 if (nilfs_compute_checksum( 161 nilfs, bh_sr, &crc, sizeof(sr->sr_sum), bytes, 162 sr_block, 1)) { 163 ret = NILFS_SEG_FAIL_IO; 164 goto failed_bh; 165 } 166 if (crc != le32_to_cpu(sr->sr_sum)) { 167 ret = NILFS_SEG_FAIL_CHECKSUM_SUPER_ROOT; 168 goto failed_bh; 169 } 170 } 171 *pbh = bh_sr; 172 return 0; 173 174 failed_bh: 175 brelse(bh_sr); 176 177 failed: 178 return nilfs_warn_segment_error(nilfs->ns_sb, ret); 179 } 180 181 /** 182 * nilfs_read_log_header - read summary header of the specified log 183 * @nilfs: nilfs object 184 * @start_blocknr: start block number of the log 185 * @sum: pointer to return segment summary structure 186 * 187 * Return: Buffer head pointer, or NULL if an I/O error occurs. 188 */ 189 static struct buffer_head * 190 nilfs_read_log_header(struct the_nilfs *nilfs, sector_t start_blocknr, 191 struct nilfs_segment_summary **sum) 192 { 193 struct buffer_head *bh_sum; 194 195 bh_sum = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); 196 if (bh_sum) 197 *sum = (struct nilfs_segment_summary *)bh_sum->b_data; 198 return bh_sum; 199 } 200 201 /** 202 * nilfs_validate_log - verify consistency of log 203 * @nilfs: nilfs object 204 * @seg_seq: sequence number of segment 205 * @bh_sum: buffer head of summary block 206 * @sum: segment summary struct 207 * 208 * Return: 0 on success, or one of the following internal codes on failure: 209 * * %NILFS_SEG_FAIL_MAGIC - Magic number mismatch. 210 * * %NILFS_SEG_FAIL_SEQ - Sequence number mismatch. 211 * * %NIFLS_SEG_FAIL_CONSISTENCY - Block count out of range. 212 * * %NILFS_SEG_FAIL_IO - I/O error. 213 * * %NILFS_SEG_FAIL_CHECKSUM_FULL - Full log checksum verification failed. 214 */ 215 static int nilfs_validate_log(struct the_nilfs *nilfs, u64 seg_seq, 216 struct buffer_head *bh_sum, 217 struct nilfs_segment_summary *sum) 218 { 219 unsigned long nblock; 220 u32 crc; 221 int ret; 222 223 ret = NILFS_SEG_FAIL_MAGIC; 224 if (le32_to_cpu(sum->ss_magic) != NILFS_SEGSUM_MAGIC) 225 goto out; 226 227 ret = NILFS_SEG_FAIL_SEQ; 228 if (le64_to_cpu(sum->ss_seq) != seg_seq) 229 goto out; 230 231 nblock = le32_to_cpu(sum->ss_nblocks); 232 ret = NILFS_SEG_FAIL_CONSISTENCY; 233 if (unlikely(nblock == 0 || nblock > nilfs->ns_blocks_per_segment)) 234 /* This limits the number of blocks read in the CRC check */ 235 goto out; 236 237 ret = NILFS_SEG_FAIL_IO; 238 if (nilfs_compute_checksum(nilfs, bh_sum, &crc, sizeof(sum->ss_datasum), 239 ((u64)nblock << nilfs->ns_blocksize_bits), 240 bh_sum->b_blocknr, nblock)) 241 goto out; 242 243 ret = NILFS_SEG_FAIL_CHECKSUM_FULL; 244 if (crc != le32_to_cpu(sum->ss_datasum)) 245 goto out; 246 ret = 0; 247 out: 248 return ret; 249 } 250 251 /** 252 * nilfs_read_summary_info - read an item on summary blocks of a log 253 * @nilfs: nilfs object 254 * @pbh: the current buffer head on summary blocks [in, out] 255 * @offset: the current byte offset on summary blocks [in, out] 256 * @bytes: byte size of the item to be read 257 * 258 * Return: Kernel space address of current segment summary entry, or 259 * NULL if an I/O error occurs. 260 */ 261 static void *nilfs_read_summary_info(struct the_nilfs *nilfs, 262 struct buffer_head **pbh, 263 unsigned int *offset, unsigned int bytes) 264 { 265 void *ptr; 266 sector_t blocknr; 267 268 BUG_ON((*pbh)->b_size < *offset); 269 if (bytes > (*pbh)->b_size - *offset) { 270 blocknr = (*pbh)->b_blocknr; 271 brelse(*pbh); 272 *pbh = __bread(nilfs->ns_bdev, blocknr + 1, 273 nilfs->ns_blocksize); 274 if (unlikely(!*pbh)) 275 return NULL; 276 *offset = 0; 277 } 278 ptr = (*pbh)->b_data + *offset; 279 *offset += bytes; 280 return ptr; 281 } 282 283 /** 284 * nilfs_skip_summary_info - skip items on summary blocks of a log 285 * @nilfs: nilfs object 286 * @pbh: the current buffer head on summary blocks [in, out] 287 * @offset: the current byte offset on summary blocks [in, out] 288 * @bytes: byte size of the item to be skipped 289 * @count: number of items to be skipped 290 */ 291 static void nilfs_skip_summary_info(struct the_nilfs *nilfs, 292 struct buffer_head **pbh, 293 unsigned int *offset, unsigned int bytes, 294 unsigned long count) 295 { 296 unsigned int rest_item_in_current_block 297 = ((*pbh)->b_size - *offset) / bytes; 298 299 if (count <= rest_item_in_current_block) { 300 *offset += bytes * count; 301 } else { 302 sector_t blocknr = (*pbh)->b_blocknr; 303 unsigned int nitem_per_block = (*pbh)->b_size / bytes; 304 unsigned int bcnt; 305 306 count -= rest_item_in_current_block; 307 bcnt = DIV_ROUND_UP(count, nitem_per_block); 308 *offset = bytes * (count - (bcnt - 1) * nitem_per_block); 309 310 brelse(*pbh); 311 *pbh = __bread(nilfs->ns_bdev, blocknr + bcnt, 312 nilfs->ns_blocksize); 313 } 314 } 315 316 /** 317 * nilfs_scan_dsync_log - get block information of a log written for data sync 318 * @nilfs: nilfs object 319 * @start_blocknr: start block number of the log 320 * @sum: log summary information 321 * @head: list head to add nilfs_recovery_block struct 322 * 323 * Return: 0 on success, or one of the following negative error codes on 324 * failure: 325 * * %-EIO - I/O error. 326 * * %-ENOMEM - Insufficient memory available. 327 */ 328 static int nilfs_scan_dsync_log(struct the_nilfs *nilfs, sector_t start_blocknr, 329 struct nilfs_segment_summary *sum, 330 struct list_head *head) 331 { 332 struct buffer_head *bh; 333 unsigned int offset; 334 u32 nfinfo, sumbytes; 335 sector_t blocknr; 336 ino_t ino; 337 int err = -EIO; 338 339 nfinfo = le32_to_cpu(sum->ss_nfinfo); 340 if (!nfinfo) 341 return 0; 342 343 sumbytes = le32_to_cpu(sum->ss_sumbytes); 344 blocknr = start_blocknr + DIV_ROUND_UP(sumbytes, nilfs->ns_blocksize); 345 bh = __bread(nilfs->ns_bdev, start_blocknr, nilfs->ns_blocksize); 346 if (unlikely(!bh)) 347 goto out; 348 349 offset = le16_to_cpu(sum->ss_bytes); 350 for (;;) { 351 unsigned long nblocks, ndatablk, nnodeblk; 352 struct nilfs_finfo *finfo; 353 354 finfo = nilfs_read_summary_info(nilfs, &bh, &offset, 355 sizeof(*finfo)); 356 if (unlikely(!finfo)) 357 goto out; 358 359 ino = le64_to_cpu(finfo->fi_ino); 360 nblocks = le32_to_cpu(finfo->fi_nblocks); 361 ndatablk = le32_to_cpu(finfo->fi_ndatablk); 362 nnodeblk = nblocks - ndatablk; 363 364 while (ndatablk-- > 0) { 365 struct nilfs_recovery_block *rb; 366 struct nilfs_binfo_v *binfo; 367 368 binfo = nilfs_read_summary_info(nilfs, &bh, &offset, 369 sizeof(*binfo)); 370 if (unlikely(!binfo)) 371 goto out; 372 373 rb = kmalloc(sizeof(*rb), GFP_NOFS); 374 if (unlikely(!rb)) { 375 err = -ENOMEM; 376 goto out; 377 } 378 rb->ino = ino; 379 rb->blocknr = blocknr++; 380 rb->vblocknr = le64_to_cpu(binfo->bi_vblocknr); 381 rb->blkoff = le64_to_cpu(binfo->bi_blkoff); 382 /* INIT_LIST_HEAD(&rb->list); */ 383 list_add_tail(&rb->list, head); 384 } 385 if (--nfinfo == 0) 386 break; 387 blocknr += nnodeblk; /* always 0 for data sync logs */ 388 nilfs_skip_summary_info(nilfs, &bh, &offset, sizeof(__le64), 389 nnodeblk); 390 if (unlikely(!bh)) 391 goto out; 392 } 393 err = 0; 394 out: 395 brelse(bh); /* brelse(NULL) is just ignored */ 396 return err; 397 } 398 399 static void dispose_recovery_list(struct list_head *head) 400 { 401 while (!list_empty(head)) { 402 struct nilfs_recovery_block *rb; 403 404 rb = list_first_entry(head, struct nilfs_recovery_block, list); 405 list_del(&rb->list); 406 kfree(rb); 407 } 408 } 409 410 struct nilfs_segment_entry { 411 struct list_head list; 412 __u64 segnum; 413 }; 414 415 static int nilfs_segment_list_add(struct list_head *head, __u64 segnum) 416 { 417 struct nilfs_segment_entry *ent = kmalloc(sizeof(*ent), GFP_NOFS); 418 419 if (unlikely(!ent)) 420 return -ENOMEM; 421 422 ent->segnum = segnum; 423 INIT_LIST_HEAD(&ent->list); 424 list_add_tail(&ent->list, head); 425 return 0; 426 } 427 428 void nilfs_dispose_segment_list(struct list_head *head) 429 { 430 while (!list_empty(head)) { 431 struct nilfs_segment_entry *ent; 432 433 ent = list_first_entry(head, struct nilfs_segment_entry, list); 434 list_del(&ent->list); 435 kfree(ent); 436 } 437 } 438 439 static int nilfs_prepare_segment_for_recovery(struct the_nilfs *nilfs, 440 struct super_block *sb, 441 struct nilfs_recovery_info *ri) 442 { 443 struct list_head *head = &ri->ri_used_segments; 444 struct nilfs_segment_entry *ent, *n; 445 struct inode *sufile = nilfs->ns_sufile; 446 __u64 segnum[4]; 447 int err; 448 int i; 449 450 segnum[0] = nilfs->ns_segnum; 451 segnum[1] = nilfs->ns_nextnum; 452 segnum[2] = ri->ri_segnum; 453 segnum[3] = ri->ri_nextnum; 454 455 /* 456 * Releasing the next segment of the latest super root. 457 * The next segment is invalidated by this recovery. 458 */ 459 err = nilfs_sufile_free(sufile, segnum[1]); 460 if (unlikely(err)) { 461 if (err == -ENOENT) { 462 nilfs_err(sb, 463 "checkpoint log inconsistency at block %llu (segment %llu): next segment %llu is unallocated", 464 (unsigned long long)nilfs->ns_last_pseg, 465 (unsigned long long)nilfs->ns_segnum, 466 (unsigned long long)segnum[1]); 467 err = -EINVAL; 468 } 469 goto failed; 470 } 471 472 for (i = 1; i < 4; i++) { 473 err = nilfs_segment_list_add(head, segnum[i]); 474 if (unlikely(err)) 475 goto failed; 476 } 477 478 /* 479 * Collecting segments written after the latest super root. 480 * These are marked dirty to avoid being reallocated in the next write. 481 */ 482 list_for_each_entry_safe(ent, n, head, list) { 483 if (ent->segnum != segnum[0]) { 484 err = nilfs_sufile_scrap(sufile, ent->segnum); 485 if (unlikely(err)) 486 goto failed; 487 } 488 list_del(&ent->list); 489 kfree(ent); 490 } 491 492 /* Allocate new segments for recovery */ 493 err = nilfs_sufile_alloc(sufile, &segnum[0]); 494 if (unlikely(err)) 495 goto failed; 496 497 nilfs->ns_pseg_offset = 0; 498 nilfs->ns_seg_seq = ri->ri_seq + 2; 499 nilfs->ns_nextnum = nilfs->ns_segnum = segnum[0]; 500 501 failed: 502 /* No need to recover sufile because it will be destroyed on error */ 503 return err; 504 } 505 506 static int nilfs_recovery_copy_block(struct the_nilfs *nilfs, 507 struct nilfs_recovery_block *rb, 508 loff_t pos, struct folio *folio) 509 { 510 struct buffer_head *bh_org; 511 size_t from = offset_in_folio(folio, pos); 512 513 bh_org = __bread(nilfs->ns_bdev, rb->blocknr, nilfs->ns_blocksize); 514 if (unlikely(!bh_org)) 515 return -EIO; 516 517 memcpy_to_folio(folio, from, bh_org->b_data, bh_org->b_size); 518 brelse(bh_org); 519 return 0; 520 } 521 522 static int nilfs_recover_dsync_blocks(struct the_nilfs *nilfs, 523 struct super_block *sb, 524 struct nilfs_root *root, 525 struct list_head *head, 526 unsigned long *nr_salvaged_blocks) 527 { 528 struct inode *inode; 529 struct nilfs_recovery_block *rb, *n; 530 unsigned int blocksize = nilfs->ns_blocksize; 531 struct folio *folio; 532 loff_t pos; 533 int err = 0, err2 = 0; 534 535 list_for_each_entry_safe(rb, n, head, list) { 536 inode = nilfs_iget(sb, root, rb->ino); 537 if (IS_ERR(inode)) { 538 err = PTR_ERR(inode); 539 inode = NULL; 540 goto failed_inode; 541 } 542 543 pos = rb->blkoff << inode->i_blkbits; 544 err = block_write_begin(inode->i_mapping, pos, blocksize, 545 &folio, nilfs_get_block); 546 if (unlikely(err)) { 547 loff_t isize = inode->i_size; 548 549 if (pos + blocksize > isize) 550 nilfs_write_failed(inode->i_mapping, 551 pos + blocksize); 552 goto failed_inode; 553 } 554 555 err = nilfs_recovery_copy_block(nilfs, rb, pos, folio); 556 if (unlikely(err)) 557 goto failed_folio; 558 559 err = nilfs_set_file_dirty(inode, 1); 560 if (unlikely(err)) 561 goto failed_folio; 562 563 block_write_end(NULL, inode->i_mapping, pos, blocksize, 564 blocksize, folio, NULL); 565 566 folio_unlock(folio); 567 folio_put(folio); 568 569 (*nr_salvaged_blocks)++; 570 goto next; 571 572 failed_folio: 573 folio_unlock(folio); 574 folio_put(folio); 575 576 failed_inode: 577 nilfs_warn(sb, 578 "error %d recovering data block (ino=%lu, block-offset=%llu)", 579 err, (unsigned long)rb->ino, 580 (unsigned long long)rb->blkoff); 581 if (!err2) 582 err2 = err; 583 next: 584 iput(inode); /* iput(NULL) is just ignored */ 585 list_del_init(&rb->list); 586 kfree(rb); 587 } 588 return err2; 589 } 590 591 /** 592 * nilfs_do_roll_forward - salvage logical segments newer than the latest 593 * checkpoint 594 * @nilfs: nilfs object 595 * @sb: super block instance 596 * @root: NILFS root instance 597 * @ri: pointer to a nilfs_recovery_info 598 * 599 * Return: 0 on success, or one of the following negative error codes on 600 * failure: 601 * * %-EINVAL - Log format error. 602 * * %-EIO - I/O error. 603 * * %-ENOMEM - Insufficient memory available. 604 */ 605 static int nilfs_do_roll_forward(struct the_nilfs *nilfs, 606 struct super_block *sb, 607 struct nilfs_root *root, 608 struct nilfs_recovery_info *ri) 609 { 610 struct buffer_head *bh_sum = NULL; 611 struct nilfs_segment_summary *sum = NULL; 612 sector_t pseg_start; 613 sector_t seg_start, seg_end; /* Starting/ending DBN of full segment */ 614 unsigned long nsalvaged_blocks = 0; 615 unsigned int flags; 616 u64 seg_seq; 617 __u64 segnum, nextnum = 0; 618 int empty_seg = 0; 619 int err = 0, ret; 620 LIST_HEAD(dsync_blocks); /* list of data blocks to be recovered */ 621 enum { 622 RF_INIT_ST, 623 RF_DSYNC_ST, /* scanning data-sync segments */ 624 }; 625 int state = RF_INIT_ST; 626 627 pseg_start = ri->ri_lsegs_start; 628 seg_seq = ri->ri_lsegs_start_seq; 629 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start); 630 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 631 632 while (segnum != ri->ri_segnum || pseg_start <= ri->ri_pseg_start) { 633 brelse(bh_sum); 634 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); 635 if (!bh_sum) { 636 err = -EIO; 637 goto failed; 638 } 639 640 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); 641 if (ret) { 642 if (ret == NILFS_SEG_FAIL_IO) { 643 err = -EIO; 644 goto failed; 645 } 646 goto strayed; 647 } 648 649 flags = le16_to_cpu(sum->ss_flags); 650 if (flags & NILFS_SS_SR) 651 goto confused; 652 653 /* Found a valid partial segment; do recovery actions */ 654 nextnum = nilfs_get_segnum_of_block(nilfs, 655 le64_to_cpu(sum->ss_next)); 656 empty_seg = 0; 657 nilfs->ns_ctime = le64_to_cpu(sum->ss_create); 658 if (!(flags & NILFS_SS_GC)) 659 nilfs->ns_nongc_ctime = nilfs->ns_ctime; 660 661 switch (state) { 662 case RF_INIT_ST: 663 if (!(flags & NILFS_SS_LOGBGN) || 664 !(flags & NILFS_SS_SYNDT)) 665 goto try_next_pseg; 666 state = RF_DSYNC_ST; 667 fallthrough; 668 case RF_DSYNC_ST: 669 if (!(flags & NILFS_SS_SYNDT)) 670 goto confused; 671 672 err = nilfs_scan_dsync_log(nilfs, pseg_start, sum, 673 &dsync_blocks); 674 if (unlikely(err)) 675 goto failed; 676 if (flags & NILFS_SS_LOGEND) { 677 err = nilfs_recover_dsync_blocks( 678 nilfs, sb, root, &dsync_blocks, 679 &nsalvaged_blocks); 680 if (unlikely(err)) 681 goto failed; 682 state = RF_INIT_ST; 683 } 684 break; /* Fall through to try_next_pseg */ 685 } 686 687 try_next_pseg: 688 if (pseg_start == ri->ri_lsegs_end) 689 break; 690 pseg_start += le32_to_cpu(sum->ss_nblocks); 691 if (pseg_start < seg_end) 692 continue; 693 goto feed_segment; 694 695 strayed: 696 if (pseg_start == ri->ri_lsegs_end) 697 break; 698 699 feed_segment: 700 /* Looking to the next full segment */ 701 if (empty_seg++) 702 break; 703 seg_seq++; 704 segnum = nextnum; 705 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 706 pseg_start = seg_start; 707 } 708 709 if (nsalvaged_blocks) { 710 nilfs_info(sb, "salvaged %lu blocks", nsalvaged_blocks); 711 ri->ri_need_recovery = NILFS_RECOVERY_ROLLFORWARD_DONE; 712 } 713 out: 714 brelse(bh_sum); 715 dispose_recovery_list(&dsync_blocks); 716 return err; 717 718 confused: 719 err = -EINVAL; 720 failed: 721 nilfs_err(sb, 722 "error %d roll-forwarding partial segment at blocknr = %llu", 723 err, (unsigned long long)pseg_start); 724 goto out; 725 } 726 727 static void nilfs_finish_roll_forward(struct the_nilfs *nilfs, 728 struct nilfs_recovery_info *ri) 729 { 730 struct buffer_head *bh; 731 int err; 732 733 if (nilfs_get_segnum_of_block(nilfs, ri->ri_lsegs_start) != 734 nilfs_get_segnum_of_block(nilfs, ri->ri_super_root)) 735 return; 736 737 bh = __getblk(nilfs->ns_bdev, ri->ri_lsegs_start, nilfs->ns_blocksize); 738 if (WARN_ON(!bh)) 739 return; /* should never happen */ 740 741 lock_buffer(bh); 742 memset(bh->b_data, 0, bh->b_size); 743 set_buffer_uptodate(bh); 744 set_buffer_dirty(bh); 745 unlock_buffer(bh); 746 747 err = sync_dirty_buffer(bh); 748 if (unlikely(err)) 749 nilfs_warn(nilfs->ns_sb, 750 "buffer sync write failed during post-cleaning of recovery."); 751 brelse(bh); 752 } 753 754 /** 755 * nilfs_abort_roll_forward - cleaning up after a failed rollforward recovery 756 * @nilfs: nilfs object 757 */ 758 static void nilfs_abort_roll_forward(struct the_nilfs *nilfs) 759 { 760 struct nilfs_inode_info *ii, *n; 761 LIST_HEAD(head); 762 763 /* Abandon inodes that have read recovery data */ 764 spin_lock(&nilfs->ns_inode_lock); 765 list_splice_init(&nilfs->ns_dirty_files, &head); 766 spin_unlock(&nilfs->ns_inode_lock); 767 if (list_empty(&head)) 768 return; 769 770 set_nilfs_purging(nilfs); 771 list_for_each_entry_safe(ii, n, &head, i_dirty) { 772 spin_lock(&nilfs->ns_inode_lock); 773 list_del_init(&ii->i_dirty); 774 spin_unlock(&nilfs->ns_inode_lock); 775 776 iput(&ii->vfs_inode); 777 } 778 clear_nilfs_purging(nilfs); 779 } 780 781 /** 782 * nilfs_salvage_orphan_logs - salvage logs written after the latest checkpoint 783 * @nilfs: nilfs object 784 * @sb: super block instance 785 * @ri: pointer to a nilfs_recovery_info struct to store search results. 786 * 787 * Return: 0 on success, or one of the following negative error codes on 788 * failure: 789 * * %-EINVAL - Inconsistent filesystem state. 790 * * %-EIO - I/O error. 791 * * %-ENOMEM - Insufficient memory available. 792 * * %-ENOSPC - No space left on device (only in a panic state). 793 * * %-ERESTARTSYS - Interrupted. 794 */ 795 int nilfs_salvage_orphan_logs(struct the_nilfs *nilfs, 796 struct super_block *sb, 797 struct nilfs_recovery_info *ri) 798 { 799 struct nilfs_root *root; 800 int err; 801 802 if (ri->ri_lsegs_start == 0 || ri->ri_lsegs_end == 0) 803 return 0; 804 805 err = nilfs_attach_checkpoint(sb, ri->ri_cno, true, &root); 806 if (unlikely(err)) { 807 nilfs_err(sb, "error %d loading the latest checkpoint", err); 808 return err; 809 } 810 811 err = nilfs_do_roll_forward(nilfs, sb, root, ri); 812 if (unlikely(err)) 813 goto failed; 814 815 if (ri->ri_need_recovery == NILFS_RECOVERY_ROLLFORWARD_DONE) { 816 err = nilfs_prepare_segment_for_recovery(nilfs, sb, ri); 817 if (unlikely(err)) { 818 nilfs_err(sb, "error %d preparing segment for recovery", 819 err); 820 goto failed; 821 } 822 823 err = nilfs_attach_log_writer(sb, root); 824 if (unlikely(err)) 825 goto failed; 826 827 set_nilfs_discontinued(nilfs); 828 err = nilfs_construct_segment(sb); 829 nilfs_detach_log_writer(sb); 830 831 if (unlikely(err)) { 832 nilfs_err(sb, "error %d writing segment for recovery", 833 err); 834 goto put_root; 835 } 836 837 nilfs_finish_roll_forward(nilfs, ri); 838 } 839 840 put_root: 841 nilfs_put_root(root); 842 return err; 843 844 failed: 845 nilfs_abort_roll_forward(nilfs); 846 goto put_root; 847 } 848 849 /** 850 * nilfs_search_super_root - search the latest valid super root 851 * @nilfs: the_nilfs 852 * @ri: pointer to a nilfs_recovery_info struct to store search results. 853 * 854 * nilfs_search_super_root() looks for the latest super-root from a partial 855 * segment pointed by the superblock. It sets up struct the_nilfs through 856 * this search. It fills nilfs_recovery_info (ri) required for recovery. 857 * 858 * Return: 0 on success, or one of the following negative error codes on 859 * failure: 860 * * %-EINVAL - No valid segment found. 861 * * %-EIO - I/O error. 862 * * %-ENOMEM - Insufficient memory available. 863 */ 864 int nilfs_search_super_root(struct the_nilfs *nilfs, 865 struct nilfs_recovery_info *ri) 866 { 867 struct buffer_head *bh_sum = NULL; 868 struct nilfs_segment_summary *sum = NULL; 869 sector_t pseg_start, pseg_end, sr_pseg_start = 0; 870 sector_t seg_start, seg_end; /* range of full segment (block number) */ 871 sector_t b, end; 872 unsigned long nblocks; 873 unsigned int flags; 874 u64 seg_seq; 875 __u64 segnum, nextnum = 0; 876 __u64 cno; 877 LIST_HEAD(segments); 878 int empty_seg = 0, scan_newer = 0; 879 int ret; 880 881 pseg_start = nilfs->ns_last_pseg; 882 seg_seq = nilfs->ns_last_seq; 883 cno = nilfs->ns_last_cno; 884 segnum = nilfs_get_segnum_of_block(nilfs, pseg_start); 885 886 /* Calculate range of segment */ 887 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 888 889 /* Read ahead segment */ 890 b = seg_start; 891 while (b <= seg_end) 892 __breadahead(nilfs->ns_bdev, b++, nilfs->ns_blocksize); 893 894 for (;;) { 895 brelse(bh_sum); 896 ret = NILFS_SEG_FAIL_IO; 897 bh_sum = nilfs_read_log_header(nilfs, pseg_start, &sum); 898 if (!bh_sum) 899 goto failed; 900 901 ret = nilfs_validate_log(nilfs, seg_seq, bh_sum, sum); 902 if (ret) { 903 if (ret == NILFS_SEG_FAIL_IO) 904 goto failed; 905 goto strayed; 906 } 907 908 nblocks = le32_to_cpu(sum->ss_nblocks); 909 pseg_end = pseg_start + nblocks - 1; 910 if (unlikely(pseg_end > seg_end)) { 911 ret = NILFS_SEG_FAIL_CONSISTENCY; 912 goto strayed; 913 } 914 915 /* A valid partial segment */ 916 ri->ri_pseg_start = pseg_start; 917 ri->ri_seq = seg_seq; 918 ri->ri_segnum = segnum; 919 nextnum = nilfs_get_segnum_of_block(nilfs, 920 le64_to_cpu(sum->ss_next)); 921 ri->ri_nextnum = nextnum; 922 empty_seg = 0; 923 924 flags = le16_to_cpu(sum->ss_flags); 925 if (!(flags & NILFS_SS_SR) && !scan_newer) { 926 /* 927 * This will never happen because a superblock 928 * (last_segment) always points to a pseg with 929 * a super root. 930 */ 931 ret = NILFS_SEG_FAIL_CONSISTENCY; 932 goto failed; 933 } 934 935 if (pseg_start == seg_start) { 936 nilfs_get_segment_range(nilfs, nextnum, &b, &end); 937 while (b <= end) 938 __breadahead(nilfs->ns_bdev, b++, 939 nilfs->ns_blocksize); 940 } 941 if (!(flags & NILFS_SS_SR)) { 942 if (!ri->ri_lsegs_start && (flags & NILFS_SS_LOGBGN)) { 943 ri->ri_lsegs_start = pseg_start; 944 ri->ri_lsegs_start_seq = seg_seq; 945 } 946 if (flags & NILFS_SS_LOGEND) 947 ri->ri_lsegs_end = pseg_start; 948 goto try_next_pseg; 949 } 950 951 /* A valid super root was found. */ 952 ri->ri_cno = cno++; 953 ri->ri_super_root = pseg_end; 954 ri->ri_lsegs_start = ri->ri_lsegs_end = 0; 955 956 nilfs_dispose_segment_list(&segments); 957 sr_pseg_start = pseg_start; 958 nilfs->ns_pseg_offset = pseg_start + nblocks - seg_start; 959 nilfs->ns_seg_seq = seg_seq; 960 nilfs->ns_segnum = segnum; 961 nilfs->ns_cno = cno; /* nilfs->ns_cno = ri->ri_cno + 1 */ 962 nilfs->ns_ctime = le64_to_cpu(sum->ss_create); 963 nilfs->ns_nextnum = nextnum; 964 965 if (scan_newer) 966 ri->ri_need_recovery = NILFS_RECOVERY_SR_UPDATED; 967 else { 968 if (nilfs->ns_mount_state & NILFS_VALID_FS) 969 goto super_root_found; 970 scan_newer = 1; 971 } 972 973 try_next_pseg: 974 /* Standing on a course, or met an inconsistent state */ 975 pseg_start += nblocks; 976 if (pseg_start < seg_end) 977 continue; 978 goto feed_segment; 979 980 strayed: 981 /* Off the trail */ 982 if (!scan_newer) 983 /* 984 * This can happen if a checkpoint was written without 985 * barriers, or as a result of an I/O failure. 986 */ 987 goto failed; 988 989 feed_segment: 990 /* Looking to the next full segment */ 991 if (empty_seg++) 992 goto super_root_found; /* found a valid super root */ 993 994 ret = nilfs_segment_list_add(&segments, segnum); 995 if (unlikely(ret)) 996 goto failed; 997 998 seg_seq++; 999 segnum = nextnum; 1000 nilfs_get_segment_range(nilfs, segnum, &seg_start, &seg_end); 1001 pseg_start = seg_start; 1002 } 1003 1004 super_root_found: 1005 /* Updating pointers relating to the latest checkpoint */ 1006 brelse(bh_sum); 1007 list_splice_tail(&segments, &ri->ri_used_segments); 1008 nilfs->ns_last_pseg = sr_pseg_start; 1009 nilfs->ns_last_seq = nilfs->ns_seg_seq; 1010 nilfs->ns_last_cno = ri->ri_cno; 1011 return 0; 1012 1013 failed: 1014 brelse(bh_sum); 1015 nilfs_dispose_segment_list(&segments); 1016 return ret < 0 ? ret : nilfs_warn_segment_error(nilfs->ns_sb, ret); 1017 } 1018