Lines Matching refs:log
36 struct xlog *log);
42 struct xlog *log);
45 struct xlog *log,
52 struct xlog *log,
58 struct xlog *log,
63 struct xlog *log,
72 struct xlog *log);
82 * However, this padding does not get written into the log, and hence we have to
83 * track the space used by the log vectors separately to prevent log space hangs
84 * due to inaccurate accounting (i.e. a leak) of the used log space through the
88 * log. This prepends the data region we return to the caller to copy their data
160 struct xlog *log,
164 int64_t diff = xlog_lsn_sub(log, new_head, old_head);
166 xlog_grant_sub_space(&log->l_reserve_head, diff);
167 xlog_grant_sub_space(&log->l_write_head, diff);
171 * Return the space in the log between the tail and the head. In the case where
180 struct xlog *log,
186 free_bytes = log->l_logsize - READ_ONCE(log->l_tail_space) -
207 struct xlog *log,
211 if (head == &log->l_write_head) {
224 struct xlog *log,
232 need_bytes = xlog_ticket_reservation(log, head, tic);
237 trace_xfs_log_grant_wake_up(log, tic);
246 struct xlog *log,
255 if (xlog_is_shutdown(log))
261 XFS_STATS_INC(log->l_mp, xs_sleep_logspace);
263 /* Push on the AIL to free up all the log space. */
264 xfs_ail_push_all(log->l_ailp);
266 trace_xfs_log_grant_sleep(log, tic);
268 trace_xfs_log_grant_wake(log, tic);
271 if (xlog_is_shutdown(log))
273 } while (xlog_grant_space_left(log, head) < need_bytes);
283 * Atomically get the log space required for a log ticket.
301 struct xlog *log,
309 ASSERT(!xlog_in_recovery(log));
317 *need_bytes = xlog_ticket_reservation(log, head, tic);
318 free_bytes = xlog_grant_space_left(log, head);
321 if (!xlog_grant_head_wake(log, head, &free_bytes) ||
323 error = xlog_grant_head_wait(log, head, tic,
329 error = xlog_grant_head_wait(log, head, tic, *need_bytes);
341 * Do not write to the log on norecovery mounts, if the data or log
343 * mounts allow internal writes for log recovery and unmount purposes,
365 struct xlog *log = mp->m_log;
369 if (xlog_is_shutdown(log))
377 * the log. Just add one to the existing tid so that we can see chains
378 * of rolling transactions in the log easily.
385 trace_xfs_log_regrant(log, tic);
387 error = xlog_grant_head_check(log, &log->l_write_head, tic,
392 xlog_grant_add_space(&log->l_write_head, need_bytes);
393 trace_xfs_log_regrant_exit(log, tic);
408 * Reserve log space and return a ticket corresponding to the reservation.
410 * Each reservation is going to reserve extra space for a log record header.
411 * When writes happen to the on-disk log, we don't subtract the length of the
412 * log record header from any reservation. By wasting space in each
423 struct xlog *log = mp->m_log;
428 if (xlog_is_shutdown(log))
434 tic = xlog_ticket_alloc(log, unit_bytes, cnt, permanent);
436 trace_xfs_log_reserve(log, tic);
437 error = xlog_grant_head_check(log, &log->l_reserve_head, tic,
442 xlog_grant_add_space(&log->l_reserve_head, need_bytes);
443 xlog_grant_add_space(&log->l_write_head, need_bytes);
444 trace_xfs_log_reserve_exit(log, tic);
459 * Run all the pending iclog callbacks and wake log force waiters and iclog
461 * don't care what order we process callbacks here because the log is shut down
474 struct xlog *log)
479 iclog = log->l_iclog;
486 spin_unlock(&log->l_icloglock);
490 spin_lock(&log->l_icloglock);
493 } while ((iclog = iclog->ic_next) != log->l_iclog);
495 wake_up_all(&log->l_flush_wait);
503 * log tail is updated correctly. NEED_FUA indicates that the iclog will be
505 * within the iclog. We need to ensure that the log tail does not move beyond
520 struct xlog *log,
526 lockdep_assert_held(&log->l_icloglock);
530 * Grabbing the current log tail needs to be atomic w.r.t. the writing
531 * of the tail LSN into the iclog so we guarantee that the log tail does
539 cpu_to_be64(atomic64_read(&log->l_tail_lsn));
544 if (xlog_is_shutdown(log)) {
551 xlog_state_shutdown_callbacks(log);
564 xlog_verify_tail_lsn(log, iclog);
567 spin_unlock(&log->l_icloglock);
568 xlog_sync(log, iclog, ticket);
569 spin_lock(&log->l_icloglock);
574 * Mount a log filesystem
577 * log_target - buftarg of on-disk log device
579 * num_bblocks - Number of BBSIZE blocks in on-disk log
590 struct xlog *log;
606 log = xlog_alloc_log(mp, log_target, blk_offset, num_bblks);
607 if (IS_ERR(log)) {
608 error = PTR_ERR(log);
611 mp->m_log = log;
614 * Now that we have set up the log and it's internal geometry
615 * parameters, we can validate the given log space and drop a critical
616 * message via syslog if the log size is too small. A log that is too
617 * small can lead to unexpected situations in transaction log space
619 * the other log geometry constraints, so we don't have to check those
625 * way to grow the log (short of black magic surgery with xfs_db).
629 * filesystem with a log that is too small.
649 "Continuing onwards, but if log hangs are experienced then please report this message in the bug report.");
653 * Initialize the AIL now we have a log.
660 log->l_ailp = mp->m_ail;
663 * skip log recovery on a norecovery mount. pretend it all
667 error = xlog_recover(log);
669 xfs_warn(mp, "log mount/recovery failed: error %d",
671 xlog_recover_cancel(log);
676 error = xfs_sysfs_init(&log->l_kobj, &xfs_log_ktype, &mp->m_kobj,
677 "log");
682 clear_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
685 * Now the log has been fully initialised and we know were our
689 xlog_cil_init_post_recovery(log);
696 xlog_dealloc_log(log);
707 * If we finish recovery successfully, start the background log work. If we are
715 struct xlog *log = mp->m_log;
724 * During the second phase of log recovery, we need iget and
727 * of inodes before we're done replaying log items on those
736 * in log recovery failure. We have to evict the unreferenced
745 if (xlog_recovery_needed(log))
746 error = xlog_recover_finish(log);
751 * Drain the buffer LRU after log recovery. This is required for v4
759 if (xlog_recovery_needed(log)) {
771 clear_bit(XLOG_RECOVERY_NEEDED, &log->l_opstate);
773 /* Make sure the log is dead if we're returning failure. */
774 ASSERT(!error || xlog_is_shutdown(log));
781 * the log.
807 * Cycle all the iclogbuf locks to make sure all log IO completion
811 xlog_wait_iclog_completion(struct xlog *log)
814 struct xlog_in_core *iclog = log->l_iclog;
816 for (i = 0; i < log->l_iclog_bufs; i++) {
825 * log force state machine. Waiting on ic_force_wait ensures iclog completions
834 struct xlog *log = iclog->ic_log;
837 if (!xlog_is_shutdown(log) &&
840 XFS_STATS_INC(log->l_mp, xs_log_force_sleep);
841 xlog_wait(&iclog->ic_force_wait, &log->l_icloglock);
843 spin_unlock(&log->l_icloglock);
846 if (xlog_is_shutdown(log))
858 struct xlog *log,
893 return xlog_write(log, NULL, &lv_chain, ticket, reg.i_len);
898 * log.
902 struct xlog *log)
904 struct xfs_mount *mp = log->l_mp;
913 error = xlog_write_unmount_record(log, tic);
916 * transitioning log state to shutdown. Just continue...
922 spin_lock(&log->l_icloglock);
923 iclog = log->l_iclog;
928 trace_xfs_log_umount_write(log, tic);
929 xfs_log_ticket_ungrant(log, tic);
935 struct xlog *log)
937 struct xlog_in_core *iclog = log->l_iclog;
942 } while ((iclog = iclog->ic_next) != log->l_iclog);
956 struct xlog *log = mp->m_log;
963 if (xlog_is_shutdown(log))
968 * record to force log recovery at next mount, after which the summary
979 xfs_log_unmount_verify_iclog(log);
980 xlog_unmount_write(log);
984 * Empty the log for unmount/freeze.
986 * To do this, we first need to shut down the background log work so it is not
987 * trying to cover the log as we clean up. We then need to unpin all objects in
988 * the log so we can then flush them out. Once they have completed their IO and
989 * run the callbacks removing themselves from the AIL, we can cover the log.
996 * Clear log incompat features since we're quiescing the log. Report
997 * failures, though it's not fatal to have a higher log feature
998 * protection level than the log contents actually require.
1006 "Failed to clear log incompat features on quiesce");
1039 * from the AIL so that the log is empty before we write the unmount record to
1040 * the log. Once this is done, we can tear down the AIL and the log.
1049 * If shutdown has come from iclog IO context, the log
1085 * Wake up processes waiting for log space after we have moved the log tail.
1091 struct xlog *log = mp->m_log;
1094 if (xlog_is_shutdown(log))
1097 if (!list_empty_careful(&log->l_write_head.waiters)) {
1098 ASSERT(!xlog_in_recovery(log));
1100 spin_lock(&log->l_write_head.lock);
1101 free_bytes = xlog_grant_space_left(log, &log->l_write_head);
1102 xlog_grant_head_wake(log, &log->l_write_head, &free_bytes);
1103 spin_unlock(&log->l_write_head.lock);
1106 if (!list_empty_careful(&log->l_reserve_head.waiters)) {
1107 ASSERT(!xlog_in_recovery(log));
1109 spin_lock(&log->l_reserve_head.lock);
1110 free_bytes = xlog_grant_space_left(log, &log->l_reserve_head);
1111 xlog_grant_head_wake(log, &log->l_reserve_head, &free_bytes);
1112 spin_unlock(&log->l_reserve_head.lock);
1118 * covered. To begin the transition to the idle state firstly the log needs to
1120 * we start attempting to cover the log.
1123 * informed that dummy transactions are required to move the log into the idle
1127 * cover the log as we may be in a situation where there isn't log space
1129 * tail of the log is pinned by an item that is modified in the CIL. Hence
1131 * can't start trying to idle the log until both the CIL and AIL are empty.
1137 struct xlog *log = mp->m_log;
1140 if (!xlog_cil_empty(log))
1143 spin_lock(&log->l_icloglock);
1144 switch (log->l_covered_state) {
1151 if (xfs_ail_min_lsn(log->l_ailp))
1153 if (!xlog_iclogs_empty(log))
1157 if (log->l_covered_state == XLOG_STATE_COVER_NEED)
1158 log->l_covered_state = XLOG_STATE_COVER_DONE;
1160 log->l_covered_state = XLOG_STATE_COVER_DONE2;
1166 spin_unlock(&log->l_icloglock);
1171 * Explicitly cover the log. This is similar to background log covering but
1173 * the log is idle and suitable for covering. The CIL, iclog buffers and AIL
1192 * state machine if the log requires covering. Therefore, we must call
1196 * Fall into the covering sequence if the log needs covering or the
1206 * To cover the log, commit the superblock twice (at most) in
1211 * covering the log. Push the AIL one more time to leave it empty, as
1230 struct xlog *log = iclog->ic_log;
1243 if (XFS_TEST_ERROR(error, log->l_mp, XFS_ERRTAG_IODONE_IOERR)) {
1244 xfs_alert(log->l_mp, "log I/O error %d", error);
1245 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1261 * Return size of each in-core log record buffer.
1271 struct xlog *log)
1278 log->l_iclog_bufs = mp->m_logbufs;
1279 log->l_iclog_size = mp->m_logbsize;
1284 log->l_iclog_heads =
1286 log->l_iclog_hsize = log->l_iclog_heads << BBSHIFT;
1298 * Clear the log incompat flags if we have the opportunity.
1300 * This only happens if we're about to log the second dummy transaction as part
1301 * of covering the log.
1305 struct xlog *log)
1307 struct xfs_mount *mp = log->l_mp;
1313 if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
1321 * disk. If there is nothing dirty, then we might need to cover the log to
1328 struct xlog *log = container_of(to_delayed_work(work),
1330 struct xfs_mount *mp = log->l_mp;
1335 * Dump a transaction into the log that contains no real change.
1336 * This is needed to stamp the current tail LSN into the log
1341 * will prevent log covering from making progress. Hence we
1342 * synchronously log the superblock instead to ensure the
1345 xlog_clear_incompat(log);
1358 * This routine initializes some of the log structure for a given mount point.
1369 struct xlog *log;
1377 log = kzalloc(sizeof(struct xlog), GFP_KERNEL | __GFP_RETRY_MAYFAIL);
1378 if (!log) {
1383 log->l_mp = mp;
1384 log->l_targ = log_target;
1385 log->l_logsize = BBTOB(num_bblks);
1386 log->l_logBBstart = blk_offset;
1387 log->l_logBBsize = num_bblks;
1388 log->l_covered_state = XLOG_STATE_COVER_IDLE;
1389 set_bit(XLOG_ACTIVE_RECOVERY, &log->l_opstate);
1390 INIT_DELAYED_WORK(&log->l_work, xfs_log_worker);
1391 INIT_LIST_HEAD(&log->r_dfops);
1393 log->l_prev_block = -1;
1394 /* log->l_tail_lsn = 0x100000000LL; cycle = 1; current block = 0 */
1395 xlog_assign_atomic_lsn(&log->l_tail_lsn, 1, 0);
1396 log->l_curr_cycle = 1; /* 0 is bad since this is initial value */
1399 log->l_iclog_roundoff = mp->m_sb.sb_logsunit;
1401 log->l_iclog_roundoff = BBSIZE;
1403 xlog_grant_head_init(&log->l_reserve_head);
1404 xlog_grant_head_init(&log->l_write_head);
1422 /* for larger sector sizes, must have v2 or external log */
1423 if (log2_size && log->l_logBBstart > 0 &&
1426 "log sector size (0x%x) invalid for configuration.",
1431 log->l_sectBBsize = 1 << log2_size;
1433 xlog_get_iclog_buffer_size(mp, log);
1435 spin_lock_init(&log->l_icloglock);
1436 init_waitqueue_head(&log->l_flush_wait);
1438 iclogp = &log->l_iclog;
1446 ASSERT(log->l_iclog_size >= 4096);
1447 for (i = 0; i < log->l_iclog_bufs; i++) {
1448 size_t bvec_size = howmany(log->l_iclog_size, PAGE_SIZE) *
1460 iclog->ic_data = kvzalloc(log->l_iclog_size,
1468 xfs_has_logv2(log->l_mp) ? 2 : 1);
1469 head->h_size = cpu_to_be32(log->l_iclog_size);
1474 iclog->ic_size = log->l_iclog_size - log->l_iclog_hsize;
1476 iclog->ic_log = log;
1479 iclog->ic_datap = (void *)iclog->ic_data + log->l_iclog_hsize;
1488 *iclogp = log->l_iclog; /* complete ring */
1489 log->l_iclog->ic_prev = prev_iclog; /* re-write 1st prev ptr */
1491 log->l_ioend_workqueue = alloc_workqueue("xfs-log/%s",
1495 if (!log->l_ioend_workqueue)
1498 error = xlog_cil_init(log);
1501 return log;
1504 destroy_workqueue(log->l_ioend_workqueue);
1506 for (iclog = log->l_iclog; iclog; iclog = prev_iclog) {
1510 if (prev_iclog == log->l_iclog)
1514 kfree(log);
1524 struct xlog *log,
1544 if (xfs_has_logv2(log->l_mp)) {
1555 for (i = 1; i < log->l_iclog_heads; i++)
1561 * Calculate the checksum for a log buffer.
1568 struct xlog *log,
1581 if (xfs_has_logv2(log->l_mp)) {
1612 struct xlog *log,
1617 ASSERT(bno < log->l_logBBsize);
1626 * across the log IO to archieve that.
1629 if (xlog_is_shutdown(log)) {
1632 * the log state machine to propagate I/O errors instead of
1643 * writeback throttle from throttling log writes behind background
1646 bio_init(&iclog->ic_bio, log->l_targ->bt_bdev, iclog->ic_bvec,
1649 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart + bno;
1656 * For external log devices, we also need to flush the data
1659 * but it *must* complete before we issue the external log IO.
1662 * writeback from the log succeeded. Repeating the flush is
1663 * not possible, hence we must shut down with log IO error to
1666 if (log->l_targ != log->l_mp->m_ddev_targp &&
1667 blkdev_issue_flush(log->l_mp->m_ddev_targp->bt_bdev))
1683 * If this log buffer would straddle the end of the log we will have
1686 if (bno + BTOBB(count) > log->l_logBBsize) {
1689 split = bio_split(&iclog->ic_bio, log->l_logBBsize - bno,
1695 iclog->ic_bio.bi_iter.bi_sector = log->l_logBBstart;
1701 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
1709 * written to the start of the log. Watch out for the header magic
1714 struct xlog *log,
1719 unsigned int split_offset = BBTOB(log->l_logBBsize - bno);
1733 struct xlog *log,
1740 count_init = log->l_iclog_hsize + iclog->ic_offset;
1741 count = roundup(count_init, log->l_iclog_roundoff);
1746 ASSERT(*roundoff < log->l_iclog_roundoff);
1751 * Flush out the in-core log (iclog) to the on-disk log in an asynchronous
1753 * ptr in the log to point to the next available iclog. This allows further
1755 * Before an in-core log can be written out, the data section must be scanned
1767 * log will require grabbing the lock though.
1769 * The entire log manager uses a logical block numbering scheme. Only
1770 * xlog_write_iclog knows about the fact that the log may not start with
1775 struct xlog *log,
1787 count = xlog_calc_iclog_size(log, iclog, &roundoff);
1797 xlog_grant_add_space(&log->l_reserve_head, roundoff);
1798 xlog_grant_add_space(&log->l_write_head, roundoff);
1802 xlog_pack_data(log, iclog, roundoff);
1806 if (xfs_has_logv2(log->l_mp))
1810 XFS_STATS_INC(log->l_mp, xs_log_writes);
1811 XFS_STATS_ADD(log->l_mp, xs_log_blocks, BTOBB(count));
1816 if (bno + BTOBB(count) > log->l_logBBsize)
1817 xlog_split_iclog(log, &iclog->ic_header, bno, count);
1820 iclog->ic_header.h_crc = xlog_cksum(log, &iclog->ic_header,
1823 * Intentionally corrupt the log record CRC based on the error injection
1824 * frequency, if defined. This facilitates testing log recovery in the
1825 * event of torn writes. Hence, set the IOABORT state to abort the log
1830 if (XFS_TEST_ERROR(false, log->l_mp, XFS_ERRTAG_LOG_BAD_CRC)) {
1833 xfs_warn(log->l_mp,
1834 "Intentionally corrupted log record at LSN 0x%llx. Shutdown imminent.",
1838 xlog_verify_iclog(log, iclog, count);
1839 xlog_write_iclog(log, iclog, bno, count);
1843 * Deallocate a log structure
1847 struct xlog *log)
1854 * iclog EIO error will try to shut down the log, which accesses the
1857 xlog_cil_destroy(log);
1859 iclog = log->l_iclog;
1860 for (i = 0; i < log->l_iclog_bufs; i++) {
1867 log->l_mp->m_log = NULL;
1868 destroy_workqueue(log->l_ioend_workqueue);
1869 kfree(log);
1877 struct xlog *log,
1882 lockdep_assert_held(&log->l_icloglock);
1916 xfs_warn(mp, " log res = %d", tp->t_log_res);
1917 xfs_warn(mp, " log count = %d", tp->t_log_count);
1922 /* dump each log item */
1928 xfs_warn(mp, "log item: ");
1938 /* dump each iovec for the log item */
1976 * Write log vectors into a single iclog which is guaranteed by the caller
1977 * to have enough space to write the entire log vector into.
1995 * Ordered log vectors have no regions to write so this
2018 struct xlog *log = iclog->ic_log;
2021 spin_lock(&log->l_icloglock);
2023 xlog_state_finish_copy(log, iclog, *record_cnt, *data_cnt);
2024 error = xlog_state_release_iclog(log, iclog, ticket);
2025 spin_unlock(&log->l_icloglock);
2029 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2040 * Write log vectors into a single iclog which is smaller than the current chain
2042 * and then stop. We return the log vector that is to be written that cannot
2068 * length otherwise log recovery will just skip over it and
2115 * space for log transaction opheaders left in the current
2169 * No more iovecs remain in this logvec so return the next log vec to
2177 * Write some region out to in-core log
2191 * 2. Write log operation header (header per region)
2198 * 5. Release iclog for potential flush to on-disk log.
2208 * on all log operation writes which don't contain the end of the
2209 * region. The XLOG_END_TRANS bit is used for the in-core log
2218 struct xlog *log,
2233 xfs_alert_tag(log->l_mp, XFS_PTAG_LOGRES,
2235 xlog_print_tic_res(log->l_mp, ticket);
2236 xlog_force_shutdown(log, SHUTDOWN_LOG_IO_ERROR);
2239 error = xlog_state_get_iclog_space(log, len, &iclog, ticket,
2256 * If the entire log vec does not fit in the iclog, punt it to
2284 spin_lock(&log->l_icloglock);
2285 xlog_state_finish_copy(log, iclog, record_cnt, 0);
2286 error = xlog_state_release_iclog(log, iclog, ticket);
2287 spin_unlock(&log->l_icloglock);
2332 struct xlog *log,
2335 struct xlog_in_core *iclog = log->l_iclog;
2346 } while ((iclog = iclog->ic_next) != log->l_iclog);
2385 struct xlog *log,
2394 xlog_state_activate_iclogs(log, &iclogs_changed);
2398 log->l_covered_state = xlog_covered_state(log->l_covered_state,
2405 struct xlog *log)
2407 struct xlog_in_core *iclog = log->l_iclog;
2418 } while ((iclog = iclog->ic_next) != log->l_iclog);
2430 struct xlog *log,
2451 lowest_lsn = xlog_get_lowest_lsn(log);
2460 xlog_state_clean_iclog(log, iclog);
2485 struct xlog *log)
2486 __releases(&log->l_icloglock)
2487 __acquires(&log->l_icloglock)
2489 struct xlog_in_core *first_iclog = log->l_iclog;
2496 if (xlog_state_iodone_process_iclog(log, iclog))
2503 spin_unlock(&log->l_icloglock);
2510 spin_lock(&log->l_icloglock);
2511 xlog_state_clean_iclog(log, iclog);
2525 struct xlog *log)
2530 spin_lock(&log->l_icloglock);
2531 while (xlog_state_do_iclog_callbacks(log)) {
2532 if (xlog_is_shutdown(log))
2538 xfs_warn(log->l_mp,
2544 if (log->l_iclog->ic_state == XLOG_STATE_ACTIVE)
2545 wake_up_all(&log->l_flush_wait);
2547 spin_unlock(&log->l_icloglock);
2555 * global state machine log lock.
2561 struct xlog *log = iclog->ic_log;
2563 spin_lock(&log->l_icloglock);
2569 * split log writes, on the second, we shut down the file system and
2572 if (!xlog_is_shutdown(log)) {
2583 spin_unlock(&log->l_icloglock);
2584 xlog_state_do_callback(log);
2588 * If the head of the in-core log ring is not (ACTIVE or DIRTY), then we must
2598 * log's data space.
2599 * * in-core log pointer to which xlog_write() should write.
2600 * * boolean indicating this is a continued write to an in-core log.
2601 * If this is the last write, then the in-core log's offset field
2607 struct xlog *log,
2618 spin_lock(&log->l_icloglock);
2619 if (xlog_is_shutdown(log)) {
2620 spin_unlock(&log->l_icloglock);
2624 iclog = log->l_iclog;
2626 XFS_STATS_INC(log->l_mp, xs_log_noiclogs);
2628 /* Wait for log writes to have flushed */
2629 xlog_wait(&log->l_flush_wait, &log->l_icloglock);
2646 ticket->t_curr_res -= log->l_iclog_hsize;
2647 head->h_cycle = cpu_to_be32(log->l_curr_cycle);
2649 xlog_assign_lsn(log->l_curr_cycle, log->l_curr_block));
2650 ASSERT(log->l_curr_block >= 0);
2665 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2675 error = xlog_state_release_iclog(log, iclog, ticket);
2676 spin_unlock(&log->l_icloglock);
2691 xlog_state_switch_iclogs(log, iclog, iclog->ic_size);
2695 spin_unlock(&log->l_icloglock);
2710 struct xlog *log,
2713 trace_xfs_log_ticket_regrant(log, ticket);
2718 xlog_grant_sub_space(&log->l_reserve_head, ticket->t_curr_res);
2719 xlog_grant_sub_space(&log->l_write_head, ticket->t_curr_res);
2722 trace_xfs_log_ticket_regrant_sub(log, ticket);
2726 xlog_grant_add_space(&log->l_reserve_head, ticket->t_unit_res);
2727 trace_xfs_log_ticket_regrant_exit(log, ticket);
2749 struct xlog *log,
2754 trace_xfs_log_ticket_ungrant(log, ticket);
2759 trace_xfs_log_ticket_ungrant_sub(log, ticket);
2771 xlog_grant_sub_space(&log->l_reserve_head, bytes);
2772 xlog_grant_sub_space(&log->l_write_head, bytes);
2774 trace_xfs_log_ticket_ungrant_exit(log, ticket);
2776 xfs_log_space_wake(log->l_mp);
2786 struct xlog *log,
2791 assert_spin_locked(&log->l_icloglock);
2797 iclog->ic_header.h_prev_block = cpu_to_be32(log->l_prev_block);
2798 log->l_prev_block = log->l_curr_block;
2799 log->l_prev_cycle = log->l_curr_cycle;
2801 /* roll log?: ic_offset changed later */
2802 log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
2804 /* Round up to next log-sunit */
2805 if (log->l_iclog_roundoff > BBSIZE) {
2806 uint32_t sunit_bb = BTOBB(log->l_iclog_roundoff);
2807 log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
2810 if (log->l_curr_block >= log->l_logBBsize) {
2814 * when the log wraps to the next cycle. This is to support the
2818 log->l_curr_block -= log->l_logBBsize;
2819 ASSERT(log->l_curr_block >= 0);
2821 log->l_curr_cycle++;
2822 if (log->l_curr_cycle == XLOG_HEADER_MAGIC_NUM)
2823 log->l_curr_cycle++;
2825 ASSERT(iclog == log->l_iclog);
2826 log->l_iclog = iclog->ic_next;
2859 * Write out all data in the in-core log as of this exact moment in time.
2861 * Data may be written to the in-core log during this call. However,
2890 struct xlog *log = mp->m_log;
2896 xlog_cil_force(log);
2898 spin_lock(&log->l_icloglock);
2899 if (xlog_is_shutdown(log))
2902 iclog = log->l_iclog;
2933 xlog_state_switch_iclogs(log, iclog, 0);
2949 spin_unlock(&log->l_icloglock);
2952 spin_unlock(&log->l_icloglock);
2957 * Force the log to a specific LSN.
2961 * If it is in the ACTIVE state, move the in-core log into the WANT_SYNC
2967 * specific in-core log. When given in-core log finally completes its write
2972 struct xlog *log,
2981 spin_lock(&log->l_icloglock);
2982 if (xlog_is_shutdown(log))
2985 iclog = log->l_iclog;
2989 if (iclog == log->l_iclog)
3004 * refcnt so we can release the log (which drops the ref count).
3014 &log->l_icloglock);
3047 spin_unlock(&log->l_icloglock);
3050 spin_unlock(&log->l_icloglock);
3055 * Force the log to a specific checkpoint sequence.
3060 * a synchronous log force, we will wait on the iclog with the LSN returned by
3070 struct xlog *log = mp->m_log;
3078 lsn = xlog_cil_force_seq(log, seq);
3082 ret = xlog_force_lsn(log, lsn, flags, log_flushed, false);
3085 ret = xlog_force_lsn(log, lsn, flags, log_flushed, true);
3112 * Figure out the total log space unit (in bytes) that would be
3113 * required for a log ticket.
3117 struct xlog *log,
3125 * Permanent reservations have up to 'cnt'-1 active log operations
3126 * in the log. A unit in this case is the amount of space for one
3127 * of these log operations. Normal reservations have a cnt of 1
3131 * which occupy space in the on-disk log.
3168 * increase the space required enough to require more log and op
3176 * Fundamentally, this means we must pass the entire log vector to
3179 iclog_space = log->l_iclog_size - log->l_iclog_hsize;
3191 unit_bytes += log->l_iclog_hsize * num_headers;
3194 unit_bytes += log->l_iclog_hsize;
3197 unit_bytes += 2 * log->l_iclog_roundoff;
3213 * Allocate and initialise a new log ticket.
3217 struct xlog *log,
3228 unit_res = xlog_calc_unit_res(log, unit_bytes, &tic->t_iclog_hdrs);
3247 struct xlog *log,
3250 xfs_alert(log->l_mp,
3251 "ran out of log space tail 0x%llx/0x%llx, head lsn 0x%llx, head 0x%x/0x%x, prev head 0x%x/0x%x",
3253 atomic64_read(&log->l_tail_lsn),
3254 log->l_ailp->ail_head_lsn,
3255 log->l_curr_cycle, log->l_curr_block,
3256 log->l_prev_cycle, log->l_prev_block);
3257 xfs_alert(log->l_mp,
3259 atomic64_read(&log->l_write_head.grant),
3260 atomic64_read(&log->l_reserve_head.grant),
3261 log->l_tail_space, log->l_logsize,
3265 /* Check if the new iclog will fit in the log. */
3268 struct xlog *log,
3274 if (CYCLE_LSN(tail_lsn) == log->l_prev_cycle) {
3275 blocks = log->l_logBBsize -
3276 (log->l_prev_block - BLOCK_LSN(tail_lsn));
3278 BTOBB(log->l_iclog_hsize)) {
3279 xfs_emerg(log->l_mp,
3280 "%s: ran out of log space", __func__);
3281 xlog_verify_dump_tail(log, iclog);
3286 if (CYCLE_LSN(tail_lsn) + 1 != log->l_prev_cycle) {
3287 xfs_emerg(log->l_mp, "%s: head has wrapped tail.", __func__);
3288 xlog_verify_dump_tail(log, iclog);
3291 if (BLOCK_LSN(tail_lsn) == log->l_prev_block) {
3292 xfs_emerg(log->l_mp, "%s: tail wrapped", __func__);
3293 xlog_verify_dump_tail(log, iclog);
3297 blocks = BLOCK_LSN(tail_lsn) - log->l_prev_block;
3299 xfs_emerg(log->l_mp, "%s: ran out of iclog space", __func__);
3300 xlog_verify_dump_tail(log, iclog);
3310 * 4. Check fields of each log operation header for:
3313 * C. Length in log record header is correct according to the
3316 * log, check the preceding blocks of the physical log to make sure all
3321 struct xlog *log,
3335 spin_lock(&log->l_icloglock);
3336 icptr = log->l_iclog;
3337 for (i = 0; i < log->l_iclog_bufs; i++, icptr = icptr->ic_next)
3340 if (icptr != log->l_iclog)
3341 xfs_emerg(log->l_mp, "%s: corrupt iclog ring", __func__);
3342 spin_unlock(&log->l_icloglock);
3344 /* check log magic numbers */
3346 xfs_emerg(log->l_mp, "%s: invalid magic num", __func__);
3352 xfs_emerg(log->l_mp, "%s: unexpected magic num",
3382 xfs_warn(log->l_mp,
3409 * Perform a forced shutdown on the log.
3411 * This can be called from low level log code to trigger a shutdown, or from the
3415 * a. if the shutdown was not due to a log IO error, flush the logs to
3417 * b. the log gets atomically marked 'XLOG_IO_ERROR' for all interested
3419 * c. Tasks sleeping on log reservations, pinned objects and
3421 * d. The mount is also marked as shut down so that log triggered shutdowns
3424 * Return true if the shutdown cause was a log IO error and we actually shut the
3425 * log down.
3429 struct xlog *log,
3434 if (!log)
3438 * Ensure that there is only ever one log shutdown being processed.
3439 * If we allow the log force below on a second pass after shutting
3440 * down the log, we risk deadlocking the CIL push as it may require
3442 * buffer locks to abort buffers on last unpin of buf log items).
3444 if (test_and_set_bit(XLOG_SHUTDOWN_STARTED, &log->l_opstate))
3448 * Flush all the completed transactions to disk before marking the log
3449 * being shut down. We need to do this first as shutting down the log
3450 * before the force will prevent the log force from flushing the iclogs
3454 * we don't want to touch the log because we don't want to perturb the
3456 * avoid a log force in this case.
3458 * If we are shutting down due to a log IO error, then we must avoid
3459 * trying to write the log as that may just result in more IO errors and
3462 if (!log_error && !xlog_in_recovery(log))
3463 xfs_log_force(log->l_mp, XFS_LOG_SYNC);
3471 * Much of the log state machine transitions assume that shutdown state
3472 * cannot change once they hold the log->l_icloglock. Hence we need to
3476 spin_lock(&log->l_icloglock);
3477 if (test_and_set_bit(XLOG_IO_ERROR, &log->l_opstate)) {
3478 spin_unlock(&log->l_icloglock);
3482 spin_unlock(&log->l_icloglock);
3485 * If this log shutdown also sets the mount shutdown state, issue a
3488 if (!xfs_set_shutdown(log->l_mp)) {
3489 xfs_alert_tag(log->l_mp, XFS_PTAG_SHUTDOWN_LOGERROR,
3490 "Filesystem has been shut down due to log error (0x%x).",
3492 xfs_alert(log->l_mp,
3499 * We don't want anybody waiting for log reservations after this. That
3505 xlog_grant_head_wake_all(&log->l_reserve_head);
3506 xlog_grant_head_wake_all(&log->l_write_head);
3510 * as if the log writes were completed. The abort handling in the log
3514 spin_lock(&log->l_cilp->xc_push_lock);
3515 wake_up_all(&log->l_cilp->xc_start_wait);
3516 wake_up_all(&log->l_cilp->xc_commit_wait);
3517 spin_unlock(&log->l_cilp->xc_push_lock);
3519 spin_lock(&log->l_icloglock);
3520 xlog_state_shutdown_callbacks(log);
3521 spin_unlock(&log->l_icloglock);
3523 wake_up_var(&log->l_opstate);
3524 if (IS_ENABLED(CONFIG_XFS_RT) && xfs_has_zoned(log->l_mp))
3525 xfs_zoned_wake_all(log->l_mp);
3532 struct xlog *log)
3536 iclog = log->l_iclog;
3544 } while (iclog != log->l_iclog);
3557 struct xlog *log = mp->m_log;
3561 * norecovery mode skips mount-time log processing and unconditionally
3579 spin_lock(&log->l_icloglock);
3584 log->l_curr_cycle, log->l_curr_block);
3585 spin_unlock(&log->l_icloglock);