Lines Matching full:j

30 static bool jset_csum_good(struct bch_fs *c, struct jset *j, struct bch_csum *csum)  in jset_csum_good()  argument
32 if (!bch2_checksum_type_valid(c, JSET_CSUM_TYPE(j))) { in jset_csum_good()
37 *csum = csum_vstruct(c, JSET_CSUM_TYPE(j), journal_nonce(j), j); in jset_csum_good()
38 return !bch2_crc_cmp(j->csum, *csum); in jset_csum_good()
51 journal_entry_radix_idx(c, le64_to_cpu(i->j.seq))); in __journal_replay_free()
55 kvpfree(i, offsetof(struct journal_replay, j) + in __journal_replay_free()
56 vstruct_bytes(&i->j)); in __journal_replay_free()
83 struct journal_list *jlist, struct jset *j) in journal_entry_add() argument
88 size_t bytes = vstruct_bytes(j); in journal_entry_add()
89 u64 last_seq = !JSET_NO_FLUSH(j) ? le64_to_cpu(j->last_seq) : 0; in journal_entry_add()
94 le64_to_cpu(j->seq) < jlist->last_seq) in journal_entry_add()
103 c->journal_entries_base_seq = max_t(s64, 1, le64_to_cpu(j->seq) - S32_MAX); in journal_entry_add()
114 if (le64_to_cpu(i->j.seq) >= last_seq) in journal_entry_add()
123 journal_entry_radix_idx(c, le64_to_cpu(j->seq)), in journal_entry_add()
134 if (bytes == vstruct_bytes(&dup->j) && in journal_entry_add()
135 !memcmp(j, &dup->j, bytes)) { in journal_entry_add()
150 le64_to_cpu(j->seq)); in journal_entry_add()
155 i = kvpmalloc(offsetof(struct journal_replay, j) + bytes, GFP_KERNEL); in journal_entry_add()
162 unsafe_memcpy(&i->j, j, bytes, "embedded variable length struct"); in journal_entry_add()
168 le64_to_cpu(i->j.seq)); in journal_entry_add()
186 le64_to_cpu(i->j.seq)); in journal_entry_add()
193 le64_to_cpu(i->j.seq)); in journal_entry_add()
933 struct jset *j = NULL; in journal_read_bucket() local
974 j = buf->data; in journal_read_bucket()
977 ret = jset_validate_early(c, ca, j, offset, in journal_read_bucket()
981 sectors = vstruct_sectors(j, c->block_bits); in journal_read_bucket()
984 if (vstruct_bytes(j) > buf->size) { in journal_read_bucket()
986 vstruct_bytes(j)); in journal_read_bucket()
1011 if (le64_to_cpu(j->seq) < ja->bucket_seq[bucket]) in journal_read_bucket()
1014 ja->bucket_seq[bucket] = le64_to_cpu(j->seq); in journal_read_bucket()
1016 enum bch_csum_type csum_type = JSET_CSUM_TYPE(j); in journal_read_bucket()
1018 csum_good = jset_csum_good(c, j, &csum); in journal_read_bucket()
1024 bch2_csum_err_msg(&err, csum_type, j->csum, csum), in journal_read_bucket()
1028 ret = bch2_encrypt(c, JSET_CSUM_TYPE(j), journal_nonce(j), in journal_read_bucket()
1029 j->encrypted_start, in journal_read_bucket()
1030 vstruct_end(j) - (void *) j->encrypted_start); in journal_read_bucket()
1043 }, jlist, j); in journal_read_bucket()
1058 j = ((void *) j) + (sectors << 9); in journal_read_bucket()
1108 vstruct_sectors(&r->j, c->block_bits); in CLOSURE_CALLBACK()
1159 struct journal_replay *j) in bch2_journal_ptrs_to_text() argument
1163 for (i = 0; i < j->nr_ptrs; i++) { in bch2_journal_ptrs_to_text()
1164 struct bch_dev *ca = bch_dev_bkey_exists(c, j->ptrs[i].dev); in bch2_journal_ptrs_to_text()
1167 div64_u64_rem(j->ptrs[i].sector, ca->mi.bucket_size, &offset); in bch2_journal_ptrs_to_text()
1172 j->ptrs[i].dev, in bch2_journal_ptrs_to_text()
1173 j->ptrs[i].bucket, in bch2_journal_ptrs_to_text()
1174 j->ptrs[i].bucket_offset, in bch2_journal_ptrs_to_text()
1175 j->ptrs[i].sector); in bch2_journal_ptrs_to_text()
1235 *blacklist_seq = *start_seq = le64_to_cpu(i->j.seq) + 1; in bch2_journal_read()
1237 if (JSET_NO_FLUSH(&i->j)) { in bch2_journal_read()
1248 if (journal_entry_err_on(le64_to_cpu(i->j.last_seq) > le64_to_cpu(i->j.seq), in bch2_journal_read()
1249 c, le32_to_cpu(i->j.version), &i->j, NULL, in bch2_journal_read()
1252 le64_to_cpu(i->j.last_seq), in bch2_journal_read()
1253 le64_to_cpu(i->j.seq))) in bch2_journal_read()
1254 i->j.last_seq = i->j.seq; in bch2_journal_read()
1256 *last_seq = le64_to_cpu(i->j.last_seq); in bch2_journal_read()
1257 *blacklist_seq = le64_to_cpu(i->j.seq) + 1; in bch2_journal_read()
1286 seq = le64_to_cpu(i->j.seq); in bch2_journal_read()
1293 fsck_err_on(!JSET_NO_FLUSH(&i->j), c, in bch2_journal_read()
1308 BUG_ON(seq > le64_to_cpu(i->j.seq)); in bch2_journal_read()
1310 while (seq < le64_to_cpu(i->j.seq)) { in bch2_journal_read()
1314 while (seq < le64_to_cpu(i->j.seq) && in bch2_journal_read()
1318 if (seq == le64_to_cpu(i->j.seq)) in bch2_journal_read()
1323 while (seq < le64_to_cpu(i->j.seq) && in bch2_journal_read()
1329 prt_printf(&buf1, " size %zu", vstruct_sectors(&prev->j, c->block_bits)); in bch2_journal_read()
1368 le64_to_cpu(i->j.seq), in bch2_journal_read()
1374 &i->j, in bch2_journal_read()
1390 (le64_to_cpu(i->j.seq) == *last_seq || in bch2_journal_read()
1393 le64_to_cpu(i->j.seq), buf.buf))) { in bch2_journal_read()
1407 static void __journal_write_alloc(struct journal *j, in __journal_write_alloc() argument
1414 struct bch_fs *c = container_of(j, struct bch_fs, journal); in __journal_write_alloc()
1440 bch2_dev_stripe_increment(ca, &j->wp.stripe); in __journal_write_alloc()
1464 * @j: journal object
1469 static int journal_write_alloc(struct journal *j, struct journal_buf *w) in journal_write_alloc() argument
1471 struct bch_fs *c = container_of(j, struct bch_fs, journal); in journal_write_alloc()
1488 devs_sorted = bch2_dev_alloc_list(c, &j->wp.stripe, &devs); in journal_write_alloc()
1490 __journal_write_alloc(j, w, &devs_sorted, in journal_write_alloc()
1505 bch2_journal_dev_buckets_available(j, ja, in journal_write_alloc()
1518 __journal_write_alloc(j, w, &devs_sorted, in journal_write_alloc()
1534 static void journal_buf_realloc(struct journal *j, struct journal_buf *buf) in journal_buf_realloc() argument
1536 struct bch_fs *c = container_of(j, struct bch_fs, journal); in journal_buf_realloc()
1538 /* we aren't holding j->lock: */ in journal_buf_realloc()
1539 unsigned new_size = READ_ONCE(j->buf_size_want); in journal_buf_realloc()
1556 spin_lock(&j->lock); in journal_buf_realloc()
1559 spin_unlock(&j->lock); in journal_buf_realloc()
1564 static inline struct journal_buf *journal_last_unwritten_buf(struct journal *j) in journal_last_unwritten_buf() argument
1566 return j->buf + (journal_last_unwritten_seq(j) & JOURNAL_BUF_MASK); in journal_last_unwritten_buf()
1571 closure_type(j, struct journal, io); in CLOSURE_CALLBACK()
1572 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1573 struct journal_buf *w = journal_last_unwritten_buf(j); in CLOSURE_CALLBACK()
1580 ? j->flush_write_time in CLOSURE_CALLBACK()
1581 : j->noflush_write_time, j->write_start_time); in CLOSURE_CALLBACK()
1596 spin_lock(&j->lock); in CLOSURE_CALLBACK()
1599 if (seq >= j->pin.front) in CLOSURE_CALLBACK()
1600 journal_seq_pin(j, seq)->devs = w->devs_written; in CLOSURE_CALLBACK()
1604 j->flushed_seq_ondisk = seq; in CLOSURE_CALLBACK()
1605 j->last_seq_ondisk = w->last_seq; in CLOSURE_CALLBACK()
1612 } else if (!j->err_seq || seq < j->err_seq) in CLOSURE_CALLBACK()
1613 j->err_seq = seq; in CLOSURE_CALLBACK()
1615 j->seq_ondisk = seq; in CLOSURE_CALLBACK()
1624 if (j->watermark != BCH_WATERMARK_stripe) in CLOSURE_CALLBACK()
1630 v = atomic64_read(&j->reservations.counter); in CLOSURE_CALLBACK()
1636 } while ((v = atomic64_cmpxchg(&j->reservations.counter, in CLOSURE_CALLBACK()
1639 bch2_journal_reclaim_fast(j); in CLOSURE_CALLBACK()
1640 bch2_journal_space_available(j); in CLOSURE_CALLBACK()
1643 &j->max_in_flight_start, false); in CLOSURE_CALLBACK()
1646 journal_wake(j); in CLOSURE_CALLBACK()
1649 journal_last_unwritten_seq(j) <= journal_cur_seq(j)) { in CLOSURE_CALLBACK()
1650 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1651 closure_call(&j->io, bch2_journal_write, c->io_complete_wq, NULL); in CLOSURE_CALLBACK()
1652 } else if (journal_last_unwritten_seq(j) == journal_cur_seq(j) && in CLOSURE_CALLBACK()
1654 struct journal_buf *buf = journal_cur_buf(j); in CLOSURE_CALLBACK()
1663 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1664 mod_delayed_work(c->io_complete_wq, &j->write_work, max(0L, delta)); in CLOSURE_CALLBACK()
1666 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1673 struct journal *j = &ca->fs->journal; in journal_write_endio() local
1674 struct journal_buf *w = journal_last_unwritten_buf(j); in journal_write_endio()
1682 spin_lock_irqsave(&j->err_lock, flags); in journal_write_endio()
1684 spin_unlock_irqrestore(&j->err_lock, flags); in journal_write_endio()
1687 closure_put(&j->io); in journal_write_endio()
1693 closure_type(j, struct journal, io); in CLOSURE_CALLBACK()
1694 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1696 struct journal_buf *w = journal_last_unwritten_buf(j); in CLOSURE_CALLBACK()
1737 static int bch2_journal_write_prep(struct journal *j, struct journal_buf *w) in bch2_journal_write_prep() argument
1739 struct bch_fs *c = container_of(j, struct bch_fs, journal); in bch2_journal_write_prep()
1809 BUG_ON(u64s > j->entry_u64s_reserved); in bch2_journal_write_prep()
1819 u64s, w->u64s_reserved, j->entry_u64s_reserved); in bch2_journal_write_prep()
1830 j->last_empty_seq = seq; in bch2_journal_write_prep()
1860 static int bch2_journal_write_pick_flush(struct journal *j, struct journal_buf *w) in bch2_journal_write_pick_flush() argument
1862 struct bch_fs *c = container_of(j, struct bch_fs, journal); in bch2_journal_write_pick_flush()
1863 int error = bch2_journal_error(j); in bch2_journal_write_pick_flush()
1881 if (error && test_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags)) in bch2_journal_write_pick_flush()
1887 (jiffies - j->last_flush_write) < msecs_to_jiffies(c->opts.journal_flush_delay) && in bch2_journal_write_pick_flush()
1888 test_bit(JOURNAL_MAY_SKIP_FLUSH, &j->flags))) { in bch2_journal_write_pick_flush()
1894 j->nr_noflush_writes++; in bch2_journal_write_pick_flush()
1896 j->last_flush_write = jiffies; in bch2_journal_write_pick_flush()
1897 j->nr_flush_writes++; in bch2_journal_write_pick_flush()
1898 clear_bit(JOURNAL_NEED_FLUSH_WRITE, &j->flags); in bch2_journal_write_pick_flush()
1906 closure_type(j, struct journal, io); in CLOSURE_CALLBACK()
1907 struct bch_fs *c = container_of(j, struct bch_fs, journal); in CLOSURE_CALLBACK()
1908 struct journal_buf *w = journal_last_unwritten_buf(j); in CLOSURE_CALLBACK()
1917 j->write_start_time = local_clock(); in CLOSURE_CALLBACK()
1919 spin_lock(&j->lock); in CLOSURE_CALLBACK()
1920 ret = bch2_journal_write_pick_flush(j, w); in CLOSURE_CALLBACK()
1921 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1925 mutex_lock(&j->buf_lock); in CLOSURE_CALLBACK()
1926 journal_buf_realloc(j, w); in CLOSURE_CALLBACK()
1928 ret = bch2_journal_write_prep(j, w); in CLOSURE_CALLBACK()
1929 mutex_unlock(&j->buf_lock); in CLOSURE_CALLBACK()
1933 j->entry_bytes_written += vstruct_bytes(w->data); in CLOSURE_CALLBACK()
1936 spin_lock(&j->lock); in CLOSURE_CALLBACK()
1937 ret = journal_write_alloc(j, w); in CLOSURE_CALLBACK()
1938 if (!ret || !j->can_discard) in CLOSURE_CALLBACK()
1941 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1942 bch2_journal_do_discards(j); in CLOSURE_CALLBACK()
1946 __bch2_journal_debug_to_text(&journal_debug_buf, j); in CLOSURE_CALLBACK()
1947 spin_unlock(&j->lock); in CLOSURE_CALLBACK()
1964 bch2_journal_space_available(j); in CLOSURE_CALLBACK()
1965 spin_unlock(&j->lock); in CLOSURE_CALLBACK()