1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2006 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/fs.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
18 
19 #include "bmap.h"
20 #include "dir.h"
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "glops.h"
26 #include "log.h"
27 #include "lops.h"
28 #include "meta_io.h"
29 #include "recovery.h"
30 #include "rgrp.h"
31 #include "trans.h"
32 #include "util.h"
33 #include "trace_gfs2.h"
34 
35 /**
36  * gfs2_pin - Pin a buffer in memory
37  * @sdp: The superblock
38  * @bh: The buffer to be pinned
39  *
40  * The log lock must be held when calling this function
41  */
42 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
43 {
44 	struct gfs2_bufdata *bd;
45 
46 	BUG_ON(!current->journal_info);
47 
48 	clear_buffer_dirty(bh);
49 	if (test_set_buffer_pinned(bh))
50 		gfs2_assert_withdraw(sdp, 0);
51 	if (!buffer_uptodate(bh))
52 		gfs2_io_error_bh_wd(sdp, bh);
53 	bd = bh->b_private;
54 	/* If this buffer is in the AIL and it has already been written
55 	 * to in-place disk block, remove it from the AIL.
56 	 */
57 	spin_lock(&sdp->sd_ail_lock);
58 	if (bd->bd_tr)
59 		list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
60 	spin_unlock(&sdp->sd_ail_lock);
61 	get_bh(bh);
62 	atomic_inc(&sdp->sd_log_pinned);
63 	trace_gfs2_pin(bd, 1);
64 }
65 
66 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
67 {
68 	return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
69 }
70 
71 static void maybe_release_space(struct gfs2_bufdata *bd)
72 {
73 	struct gfs2_glock *gl = bd->bd_gl;
74 	struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
75 	struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
76 	unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
77 	struct gfs2_bitmap *bi = rgd->rd_bits + index;
78 
79 	rgrp_lock_local(rgd);
80 	if (bi->bi_clone == NULL)
81 		goto out;
82 	if (sdp->sd_args.ar_discard)
83 		gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
84 	memcpy(bi->bi_clone + bi->bi_offset,
85 	       bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
86 	clear_bit(GBF_FULL, &bi->bi_flags);
87 	rgd->rd_free_clone = rgd->rd_free;
88 	BUG_ON(rgd->rd_free_clone < rgd->rd_reserved);
89 	rgd->rd_extfail_pt = rgd->rd_free;
90 
91 out:
92 	rgrp_unlock_local(rgd);
93 }
94 
95 /**
96  * gfs2_unpin - Unpin a buffer
97  * @sdp: the filesystem the buffer belongs to
98  * @bh: The buffer to unpin
99  * @tr: The system transaction being flushed
100  */
101 
102 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
103 		       struct gfs2_trans *tr)
104 {
105 	struct gfs2_bufdata *bd = bh->b_private;
106 
107 	BUG_ON(!buffer_uptodate(bh));
108 	BUG_ON(!buffer_pinned(bh));
109 
110 	lock_buffer(bh);
111 	mark_buffer_dirty(bh);
112 	clear_buffer_pinned(bh);
113 
114 	if (buffer_is_rgrp(bd))
115 		maybe_release_space(bd);
116 
117 	spin_lock(&sdp->sd_ail_lock);
118 	if (bd->bd_tr) {
119 		list_del(&bd->bd_ail_st_list);
120 		brelse(bh);
121 	} else {
122 		struct gfs2_glock *gl = bd->bd_gl;
123 		list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
124 		atomic_inc(&gl->gl_ail_count);
125 	}
126 	bd->bd_tr = tr;
127 	list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
128 	spin_unlock(&sdp->sd_ail_lock);
129 
130 	clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
131 	trace_gfs2_pin(bd, 0);
132 	unlock_buffer(bh);
133 	atomic_dec(&sdp->sd_log_pinned);
134 }
135 
136 void gfs2_log_incr_head(struct gfs2_sbd *sdp)
137 {
138 	BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
139 	       (sdp->sd_log_flush_head != sdp->sd_log_head));
140 
141 	if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
142 		sdp->sd_log_flush_head = 0;
143 }
144 
145 u64 gfs2_log_bmap(struct gfs2_jdesc *jd, unsigned int lblock)
146 {
147 	struct gfs2_journal_extent *je;
148 
149 	list_for_each_entry(je, &jd->extent_list, list) {
150 		if (lblock >= je->lblock && lblock < je->lblock + je->blocks)
151 			return je->dblock + lblock - je->lblock;
152 	}
153 
154 	return -1;
155 }
156 
157 /**
158  * gfs2_end_log_write_bh - end log write of pagecache data with buffers
159  * @sdp: The superblock
160  * @folio: The folio
161  * @offset: The first byte within the folio that completed
162  * @size: The number of bytes that completed
163  * @error: The i/o status
164  *
165  * This finds the relevant buffers and unlocks them and sets the
166  * error flag according to the status of the i/o request. This is
167  * used when the log is writing data which has an in-place version
168  * that is pinned in the pagecache.
169  */
170 
171 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp, struct folio *folio,
172 		size_t offset, size_t size, blk_status_t error)
173 {
174 	struct buffer_head *bh, *next;
175 
176 	bh = folio_buffers(folio);
177 	while (bh_offset(bh) < offset)
178 		bh = bh->b_this_page;
179 	do {
180 		if (error)
181 			mark_buffer_write_io_error(bh);
182 		unlock_buffer(bh);
183 		next = bh->b_this_page;
184 		size -= bh->b_size;
185 		brelse(bh);
186 		bh = next;
187 	} while (bh && size);
188 }
189 
190 /**
191  * gfs2_end_log_write - end of i/o to the log
192  * @bio: The bio
193  *
194  * Each bio_vec contains either data from the pagecache or data
195  * relating to the log itself. Here we iterate over the bio_vec
196  * array, processing both kinds of data.
197  *
198  */
199 
200 static void gfs2_end_log_write(struct bio *bio)
201 {
202 	struct gfs2_sbd *sdp = bio->bi_private;
203 	struct bio_vec *bvec;
204 	struct bvec_iter_all iter_all;
205 
206 	if (bio->bi_status) {
207 		int err = blk_status_to_errno(bio->bi_status);
208 
209 		if (!cmpxchg(&sdp->sd_log_error, 0, err))
210 			fs_err(sdp, "Error %d writing to journal, jid=%u\n",
211 			       err, sdp->sd_jdesc->jd_jid);
212 		gfs2_withdraw_delayed(sdp);
213 		/* prevent more writes to the journal */
214 		clear_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags);
215 		wake_up(&sdp->sd_logd_waitq);
216 	}
217 
218 	bio_for_each_segment_all(bvec, bio, iter_all) {
219 		struct page *page = bvec->bv_page;
220 		struct folio *folio = page_folio(page);
221 
222 		if (folio && folio_buffers(folio))
223 			gfs2_end_log_write_bh(sdp, folio, bvec->bv_offset,
224 					bvec->bv_len, bio->bi_status);
225 		else
226 			mempool_free(page, gfs2_page_pool);
227 	}
228 
229 	bio_put(bio);
230 	if (atomic_dec_and_test(&sdp->sd_log_in_flight))
231 		wake_up(&sdp->sd_log_flush_wait);
232 }
233 
234 /**
235  * gfs2_log_submit_bio - Submit any pending log bio
236  * @biop: Address of the bio pointer
237  * @opf: REQ_OP | op_flags
238  *
239  * Submit any pending part-built or full bio to the block device. If
240  * there is no pending bio, then this is a no-op.
241  */
242 
243 void gfs2_log_submit_bio(struct bio **biop, blk_opf_t opf)
244 {
245 	struct bio *bio = *biop;
246 	if (bio) {
247 		struct gfs2_sbd *sdp = bio->bi_private;
248 		atomic_inc(&sdp->sd_log_in_flight);
249 		bio->bi_opf = opf;
250 		submit_bio(bio);
251 		*biop = NULL;
252 	}
253 }
254 
255 /**
256  * gfs2_log_alloc_bio - Allocate a bio
257  * @sdp: The super block
258  * @blkno: The device block number we want to write to
259  * @end_io: The bi_end_io callback
260  *
261  * Allocate a new bio, initialize it with the given parameters and return it.
262  *
263  * Returns: The newly allocated bio
264  */
265 
266 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
267 				      bio_end_io_t *end_io)
268 {
269 	struct super_block *sb = sdp->sd_vfs;
270 	struct bio *bio = bio_alloc(sb->s_bdev, BIO_MAX_VECS, 0, GFP_NOIO);
271 
272 	bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
273 	bio->bi_end_io = end_io;
274 	bio->bi_private = sdp;
275 
276 	return bio;
277 }
278 
279 /**
280  * gfs2_log_get_bio - Get cached log bio, or allocate a new one
281  * @sdp: The super block
282  * @blkno: The device block number we want to write to
283  * @biop: The bio to get or allocate
284  * @op: REQ_OP
285  * @end_io: The bi_end_io callback
286  * @flush: Always flush the current bio and allocate a new one?
287  *
288  * If there is a cached bio, then if the next block number is sequential
289  * with the previous one, return it, otherwise flush the bio to the
290  * device. If there is no cached bio, or we just flushed it, then
291  * allocate a new one.
292  *
293  * Returns: The bio to use for log writes
294  */
295 
296 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
297 				    struct bio **biop, enum req_op op,
298 				    bio_end_io_t *end_io, bool flush)
299 {
300 	struct bio *bio = *biop;
301 
302 	if (bio) {
303 		u64 nblk;
304 
305 		nblk = bio_end_sector(bio);
306 		nblk >>= sdp->sd_fsb2bb_shift;
307 		if (blkno == nblk && !flush)
308 			return bio;
309 		gfs2_log_submit_bio(biop, op);
310 	}
311 
312 	*biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
313 	return *biop;
314 }
315 
316 /**
317  * gfs2_log_write - write to log
318  * @sdp: the filesystem
319  * @jd: The journal descriptor
320  * @page: the page to write
321  * @size: the size of the data to write
322  * @offset: the offset within the page
323  * @blkno: block number of the log entry
324  *
325  * Try and add the page segment to the current bio. If that fails,
326  * submit the current bio to the device and create a new one, and
327  * then add the page segment to that.
328  */
329 
330 void gfs2_log_write(struct gfs2_sbd *sdp, struct gfs2_jdesc *jd,
331 		    struct page *page, unsigned size, unsigned offset,
332 		    u64 blkno)
333 {
334 	struct bio *bio;
335 	int ret;
336 
337 	bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio, REQ_OP_WRITE,
338 			       gfs2_end_log_write, false);
339 	ret = bio_add_page(bio, page, size, offset);
340 	if (ret == 0) {
341 		bio = gfs2_log_get_bio(sdp, blkno, &jd->jd_log_bio,
342 				       REQ_OP_WRITE, gfs2_end_log_write, true);
343 		ret = bio_add_page(bio, page, size, offset);
344 		WARN_ON(ret == 0);
345 	}
346 }
347 
348 /**
349  * gfs2_log_write_bh - write a buffer's content to the log
350  * @sdp: The super block
351  * @bh: The buffer pointing to the in-place location
352  *
353  * This writes the content of the buffer to the next available location
354  * in the log. The buffer will be unlocked once the i/o to the log has
355  * completed.
356  */
357 
358 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
359 {
360 	u64 dblock;
361 
362 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
363 	gfs2_log_incr_head(sdp);
364 	gfs2_log_write(sdp, sdp->sd_jdesc, folio_page(bh->b_folio, 0),
365 			bh->b_size, bh_offset(bh), dblock);
366 }
367 
368 /**
369  * gfs2_log_write_page - write one block stored in a page, into the log
370  * @sdp: The superblock
371  * @page: The struct page
372  *
373  * This writes the first block-sized part of the page into the log. Note
374  * that the page must have been allocated from the gfs2_page_pool mempool
375  * and that after this has been called, ownership has been transferred and
376  * the page may be freed at any time.
377  */
378 
379 static void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
380 {
381 	struct super_block *sb = sdp->sd_vfs;
382 	u64 dblock;
383 
384 	dblock = gfs2_log_bmap(sdp->sd_jdesc, sdp->sd_log_flush_head);
385 	gfs2_log_incr_head(sdp);
386 	gfs2_log_write(sdp, sdp->sd_jdesc, page, sb->s_blocksize, 0, dblock);
387 }
388 
389 /**
390  * gfs2_end_log_read - end I/O callback for reads from the log
391  * @bio: The bio
392  *
393  * Simply unlock the pages in the bio. The main thread will wait on them and
394  * process them in order as necessary.
395  */
396 static void gfs2_end_log_read(struct bio *bio)
397 {
398 	int error = blk_status_to_errno(bio->bi_status);
399 	struct folio_iter fi;
400 
401 	bio_for_each_folio_all(fi, bio) {
402 		/* We're abusing wb_err to get the error to gfs2_find_jhead */
403 		filemap_set_wb_err(fi.folio->mapping, error);
404 		folio_end_read(fi.folio, !error);
405 	}
406 
407 	bio_put(bio);
408 }
409 
410 /**
411  * gfs2_jhead_folio_search - Look for the journal head in a given page.
412  * @jd: The journal descriptor
413  * @head: The journal head to start from
414  * @folio: The folio to look in
415  *
416  * Returns: 1 if found, 0 otherwise.
417  */
418 static bool gfs2_jhead_folio_search(struct gfs2_jdesc *jd,
419 				    struct gfs2_log_header_host *head,
420 				    struct folio *folio)
421 {
422 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
423 	struct gfs2_log_header_host lh;
424 	void *kaddr;
425 	unsigned int offset;
426 	bool ret = false;
427 
428 	VM_BUG_ON_FOLIO(folio_test_large(folio), folio);
429 	kaddr = kmap_local_folio(folio, 0);
430 	for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
431 		if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
432 			if (lh.lh_sequence >= head->lh_sequence)
433 				*head = lh;
434 			else {
435 				ret = true;
436 				break;
437 			}
438 		}
439 	}
440 	kunmap_local(kaddr);
441 	return ret;
442 }
443 
444 /**
445  * gfs2_jhead_process_page - Search/cleanup a page
446  * @jd: The journal descriptor
447  * @index: Index of the page to look into
448  * @head: The journal head to start from
449  * @done: If set, perform only cleanup, else search and set if found.
450  *
451  * Find the folio with 'index' in the journal's mapping. Search the folio for
452  * the journal head if requested (cleanup == false). Release refs on the
453  * folio so the page cache can reclaim it. We grabbed a
454  * reference on this folio twice, first when we did a filemap_grab_folio()
455  * to obtain the folio to add it to the bio and second when we do a
456  * filemap_get_folio() here to get the folio to wait on while I/O on it is being
457  * completed.
458  * This function is also used to free up a folio we might've grabbed but not
459  * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
460  * submitted the I/O, but we already found the jhead so we only need to drop
461  * our references to the folio.
462  */
463 
464 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
465 				    struct gfs2_log_header_host *head,
466 				    bool *done)
467 {
468 	struct folio *folio;
469 
470 	folio = filemap_get_folio(jd->jd_inode->i_mapping, index);
471 
472 	folio_wait_locked(folio);
473 	if (!folio_test_uptodate(folio))
474 		*done = true;
475 
476 	if (!*done)
477 		*done = gfs2_jhead_folio_search(jd, head, folio);
478 
479 	/* filemap_get_folio() and the earlier filemap_grab_folio() */
480 	folio_put_refs(folio, 2);
481 }
482 
483 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
484 {
485 	struct bio *new;
486 
487 	new = bio_alloc(prev->bi_bdev, nr_iovecs, prev->bi_opf, GFP_NOIO);
488 	bio_clone_blkg_association(new, prev);
489 	new->bi_iter.bi_sector = bio_end_sector(prev);
490 	bio_chain(new, prev);
491 	submit_bio(prev);
492 	return new;
493 }
494 
495 /**
496  * gfs2_find_jhead - find the head of a log
497  * @jd: The journal descriptor
498  * @head: The log descriptor for the head of the log is returned here
499  *
500  * Do a search of a journal by reading it in large chunks using bios and find
501  * the valid log entry with the highest sequence number.  (i.e. the log head)
502  *
503  * Returns: 0 on success, errno otherwise
504  */
505 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head)
506 {
507 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
508 	struct address_space *mapping = jd->jd_inode->i_mapping;
509 	unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
510 	unsigned int bsize = sdp->sd_sb.sb_bsize, off;
511 	unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
512 	unsigned int shift = PAGE_SHIFT - bsize_shift;
513 	unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
514 	struct gfs2_journal_extent *je;
515 	int ret = 0;
516 	struct bio *bio = NULL;
517 	struct folio *folio = NULL;
518 	bool done = false;
519 	errseq_t since;
520 
521 	memset(head, 0, sizeof(*head));
522 	if (list_empty(&jd->extent_list))
523 		gfs2_map_journal_extents(sdp, jd);
524 
525 	since = filemap_sample_wb_err(mapping);
526 	list_for_each_entry(je, &jd->extent_list, list) {
527 		u64 dblock = je->dblock;
528 
529 		for (; block < je->lblock + je->blocks; block++, dblock++) {
530 			if (!folio) {
531 				folio = filemap_grab_folio(mapping,
532 						block >> shift);
533 				if (IS_ERR(folio)) {
534 					ret = PTR_ERR(folio);
535 					done = true;
536 					goto out;
537 				}
538 				off = 0;
539 			}
540 
541 			if (bio && (off || block < blocks_submitted + max_blocks)) {
542 				sector_t sector = dblock << sdp->sd_fsb2bb_shift;
543 
544 				if (bio_end_sector(bio) == sector) {
545 					if (bio_add_folio(bio, folio, bsize, off))
546 						goto block_added;
547 				}
548 				if (off) {
549 					unsigned int blocks =
550 						(PAGE_SIZE - off) >> bsize_shift;
551 
552 					bio = gfs2_chain_bio(bio, blocks);
553 					goto add_block_to_new_bio;
554 				}
555 			}
556 
557 			if (bio) {
558 				blocks_submitted = block;
559 				submit_bio(bio);
560 			}
561 
562 			bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
563 			bio->bi_opf = REQ_OP_READ;
564 add_block_to_new_bio:
565 			if (!bio_add_folio(bio, folio, bsize, off))
566 				BUG();
567 block_added:
568 			off += bsize;
569 			if (off == folio_size(folio))
570 				folio = NULL;
571 			if (blocks_submitted <= blocks_read + max_blocks) {
572 				/* Keep at least one bio in flight */
573 				continue;
574 			}
575 
576 			gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
577 			blocks_read += PAGE_SIZE >> bsize_shift;
578 			if (done)
579 				goto out;  /* found */
580 		}
581 	}
582 
583 out:
584 	if (bio)
585 		submit_bio(bio);
586 	while (blocks_read < block) {
587 		gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
588 		blocks_read += PAGE_SIZE >> bsize_shift;
589 	}
590 
591 	if (!ret)
592 		ret = filemap_check_wb_err(mapping, since);
593 
594 	truncate_inode_pages(mapping, 0);
595 
596 	return ret;
597 }
598 
599 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
600 				      u32 ld_length, u32 ld_data1)
601 {
602 	struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
603 	struct gfs2_log_descriptor *ld = page_address(page);
604 	clear_page(ld);
605 	ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
606 	ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
607 	ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
608 	ld->ld_type = cpu_to_be32(ld_type);
609 	ld->ld_length = cpu_to_be32(ld_length);
610 	ld->ld_data1 = cpu_to_be32(ld_data1);
611 	ld->ld_data2 = 0;
612 	return page;
613 }
614 
615 static void gfs2_check_magic(struct buffer_head *bh)
616 {
617 	__be32 *ptr;
618 
619 	clear_buffer_escaped(bh);
620 	ptr = kmap_local_folio(bh->b_folio, bh_offset(bh));
621 	if (*ptr == cpu_to_be32(GFS2_MAGIC))
622 		set_buffer_escaped(bh);
623 	kunmap_local(ptr);
624 }
625 
626 static int blocknr_cmp(void *priv, const struct list_head *a,
627 		       const struct list_head *b)
628 {
629 	struct gfs2_bufdata *bda, *bdb;
630 
631 	bda = list_entry(a, struct gfs2_bufdata, bd_list);
632 	bdb = list_entry(b, struct gfs2_bufdata, bd_list);
633 
634 	if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
635 		return -1;
636 	if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
637 		return 1;
638 	return 0;
639 }
640 
641 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
642 				unsigned int total, struct list_head *blist,
643 				bool is_databuf)
644 {
645 	struct gfs2_log_descriptor *ld;
646 	struct gfs2_bufdata *bd1 = NULL, *bd2;
647 	struct page *page;
648 	unsigned int num;
649 	unsigned n;
650 	__be64 *ptr;
651 
652 	gfs2_log_lock(sdp);
653 	list_sort(NULL, blist, blocknr_cmp);
654 	bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
655 	while(total) {
656 		num = total;
657 		if (total > limit)
658 			num = limit;
659 		gfs2_log_unlock(sdp);
660 		page = gfs2_get_log_desc(sdp,
661 					 is_databuf ? GFS2_LOG_DESC_JDATA :
662 					 GFS2_LOG_DESC_METADATA, num + 1, num);
663 		ld = page_address(page);
664 		gfs2_log_lock(sdp);
665 		ptr = (__be64 *)(ld + 1);
666 
667 		n = 0;
668 		list_for_each_entry_continue(bd1, blist, bd_list) {
669 			*ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
670 			if (is_databuf) {
671 				gfs2_check_magic(bd1->bd_bh);
672 				*ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
673 			}
674 			if (++n >= num)
675 				break;
676 		}
677 
678 		gfs2_log_unlock(sdp);
679 		gfs2_log_write_page(sdp, page);
680 		gfs2_log_lock(sdp);
681 
682 		n = 0;
683 		list_for_each_entry_continue(bd2, blist, bd_list) {
684 			get_bh(bd2->bd_bh);
685 			gfs2_log_unlock(sdp);
686 			lock_buffer(bd2->bd_bh);
687 
688 			if (buffer_escaped(bd2->bd_bh)) {
689 				void *p;
690 
691 				page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
692 				p = page_address(page);
693 				memcpy_from_page(p, page, bh_offset(bd2->bd_bh), bd2->bd_bh->b_size);
694 				*(__be32 *)p = 0;
695 				clear_buffer_escaped(bd2->bd_bh);
696 				unlock_buffer(bd2->bd_bh);
697 				brelse(bd2->bd_bh);
698 				gfs2_log_write_page(sdp, page);
699 			} else {
700 				gfs2_log_write_bh(sdp, bd2->bd_bh);
701 			}
702 			gfs2_log_lock(sdp);
703 			if (++n >= num)
704 				break;
705 		}
706 
707 		BUG_ON(total < num);
708 		total -= num;
709 	}
710 	gfs2_log_unlock(sdp);
711 }
712 
713 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
714 {
715 	unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
716 	unsigned int nbuf;
717 	if (tr == NULL)
718 		return;
719 	nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
720 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
721 }
722 
723 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
724 {
725 	struct list_head *head;
726 	struct gfs2_bufdata *bd;
727 
728 	if (tr == NULL)
729 		return;
730 
731 	head = &tr->tr_buf;
732 	while (!list_empty(head)) {
733 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
734 		list_del_init(&bd->bd_list);
735 		gfs2_unpin(sdp, bd->bd_bh, tr);
736 	}
737 }
738 
739 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
740 			       struct gfs2_log_header_host *head, int pass)
741 {
742 	if (pass != 0)
743 		return;
744 
745 	jd->jd_found_blocks = 0;
746 	jd->jd_replayed_blocks = 0;
747 }
748 
749 #define obsolete_rgrp_replay \
750 "Replaying 0x%llx from jid=%d/0x%llx but we already have a bh!\n"
751 #define obsolete_rgrp_replay2 \
752 "busy:%d, pinned:%d rg_gen:0x%llx, j_gen:0x%llx\n"
753 
754 static void obsolete_rgrp(struct gfs2_jdesc *jd, struct buffer_head *bh_log,
755 			  u64 blkno)
756 {
757 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
758 	struct gfs2_rgrpd *rgd;
759 	struct gfs2_rgrp *jrgd = (struct gfs2_rgrp *)bh_log->b_data;
760 
761 	rgd = gfs2_blk2rgrpd(sdp, blkno, false);
762 	if (rgd && rgd->rd_addr == blkno &&
763 	    rgd->rd_bits && rgd->rd_bits->bi_bh) {
764 		fs_info(sdp, obsolete_rgrp_replay, (unsigned long long)blkno,
765 			jd->jd_jid, bh_log->b_blocknr);
766 		fs_info(sdp, obsolete_rgrp_replay2,
767 			buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
768 			buffer_pinned(rgd->rd_bits->bi_bh),
769 			rgd->rd_igeneration,
770 			be64_to_cpu(jrgd->rg_igeneration));
771 		gfs2_dump_glock(NULL, rgd->rd_gl, true);
772 	}
773 }
774 
775 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
776 				struct gfs2_log_descriptor *ld, __be64 *ptr,
777 				int pass)
778 {
779 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
780 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
781 	struct gfs2_glock *gl = ip->i_gl;
782 	unsigned int blks = be32_to_cpu(ld->ld_data1);
783 	struct buffer_head *bh_log, *bh_ip;
784 	u64 blkno;
785 	int error = 0;
786 
787 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
788 		return 0;
789 
790 	gfs2_replay_incr_blk(jd, &start);
791 
792 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
793 		blkno = be64_to_cpu(*ptr++);
794 
795 		jd->jd_found_blocks++;
796 
797 		if (gfs2_revoke_check(jd, blkno, start))
798 			continue;
799 
800 		error = gfs2_replay_read_block(jd, start, &bh_log);
801 		if (error)
802 			return error;
803 
804 		bh_ip = gfs2_meta_new(gl, blkno);
805 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
806 
807 		if (gfs2_meta_check(sdp, bh_ip))
808 			error = -EIO;
809 		else {
810 			struct gfs2_meta_header *mh =
811 				(struct gfs2_meta_header *)bh_ip->b_data;
812 
813 			if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG))
814 				obsolete_rgrp(jd, bh_log, blkno);
815 
816 			mark_buffer_dirty(bh_ip);
817 		}
818 		brelse(bh_log);
819 		brelse(bh_ip);
820 
821 		if (error)
822 			break;
823 
824 		jd->jd_replayed_blocks++;
825 	}
826 
827 	return error;
828 }
829 
830 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
831 {
832 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
833 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
834 
835 	if (error) {
836 		gfs2_inode_metasync(ip->i_gl);
837 		return;
838 	}
839 	if (pass != 1)
840 		return;
841 
842 	gfs2_inode_metasync(ip->i_gl);
843 
844 	fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
845 	        jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
846 }
847 
848 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
849 {
850 	struct gfs2_meta_header *mh;
851 	unsigned int offset;
852 	struct list_head *head = &sdp->sd_log_revokes;
853 	struct gfs2_bufdata *bd;
854 	struct page *page;
855 	unsigned int length;
856 
857 	gfs2_flush_revokes(sdp);
858 	if (!sdp->sd_log_num_revoke)
859 		return;
860 
861 	length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke);
862 	page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
863 	offset = sizeof(struct gfs2_log_descriptor);
864 
865 	list_for_each_entry(bd, head, bd_list) {
866 		sdp->sd_log_num_revoke--;
867 
868 		if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
869 			gfs2_log_write_page(sdp, page);
870 			page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
871 			mh = page_address(page);
872 			clear_page(mh);
873 			mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
874 			mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
875 			mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
876 			offset = sizeof(struct gfs2_meta_header);
877 		}
878 
879 		*(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
880 		offset += sizeof(u64);
881 	}
882 	gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
883 
884 	gfs2_log_write_page(sdp, page);
885 }
886 
887 void gfs2_drain_revokes(struct gfs2_sbd *sdp)
888 {
889 	struct list_head *head = &sdp->sd_log_revokes;
890 	struct gfs2_bufdata *bd;
891 	struct gfs2_glock *gl;
892 
893 	while (!list_empty(head)) {
894 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
895 		list_del_init(&bd->bd_list);
896 		gl = bd->bd_gl;
897 		gfs2_glock_remove_revoke(gl);
898 		kmem_cache_free(gfs2_bufdata_cachep, bd);
899 	}
900 }
901 
902 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
903 {
904 	gfs2_drain_revokes(sdp);
905 }
906 
907 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
908 				  struct gfs2_log_header_host *head, int pass)
909 {
910 	if (pass != 0)
911 		return;
912 
913 	jd->jd_found_revokes = 0;
914 	jd->jd_replay_tail = head->lh_tail;
915 }
916 
917 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
918 				   struct gfs2_log_descriptor *ld, __be64 *ptr,
919 				   int pass)
920 {
921 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
922 	unsigned int blks = be32_to_cpu(ld->ld_length);
923 	unsigned int revokes = be32_to_cpu(ld->ld_data1);
924 	struct buffer_head *bh;
925 	unsigned int offset;
926 	u64 blkno;
927 	int first = 1;
928 	int error;
929 
930 	if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
931 		return 0;
932 
933 	offset = sizeof(struct gfs2_log_descriptor);
934 
935 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
936 		error = gfs2_replay_read_block(jd, start, &bh);
937 		if (error)
938 			return error;
939 
940 		if (!first)
941 			gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
942 
943 		while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
944 			blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
945 
946 			error = gfs2_revoke_add(jd, blkno, start);
947 			if (error < 0) {
948 				brelse(bh);
949 				return error;
950 			}
951 			else if (error)
952 				jd->jd_found_revokes++;
953 
954 			if (!--revokes)
955 				break;
956 			offset += sizeof(u64);
957 		}
958 
959 		brelse(bh);
960 		offset = sizeof(struct gfs2_meta_header);
961 		first = 0;
962 	}
963 
964 	return 0;
965 }
966 
967 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
968 {
969 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
970 
971 	if (error) {
972 		gfs2_revoke_clean(jd);
973 		return;
974 	}
975 	if (pass != 1)
976 		return;
977 
978 	fs_info(sdp, "jid=%u: Found %u revoke tags\n",
979 	        jd->jd_jid, jd->jd_found_revokes);
980 
981 	gfs2_revoke_clean(jd);
982 }
983 
984 /**
985  * databuf_lo_before_commit - Scan the data buffers, writing as we go
986  * @sdp: The filesystem
987  * @tr: The system transaction being flushed
988  */
989 
990 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
991 {
992 	unsigned int limit = databuf_limit(sdp);
993 	unsigned int nbuf;
994 	if (tr == NULL)
995 		return;
996 	nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
997 	gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
998 }
999 
1000 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
1001 				    struct gfs2_log_descriptor *ld,
1002 				    __be64 *ptr, int pass)
1003 {
1004 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1005 	struct gfs2_glock *gl = ip->i_gl;
1006 	unsigned int blks = be32_to_cpu(ld->ld_data1);
1007 	struct buffer_head *bh_log, *bh_ip;
1008 	u64 blkno;
1009 	u64 esc;
1010 	int error = 0;
1011 
1012 	if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1013 		return 0;
1014 
1015 	gfs2_replay_incr_blk(jd, &start);
1016 	for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1017 		blkno = be64_to_cpu(*ptr++);
1018 		esc = be64_to_cpu(*ptr++);
1019 
1020 		jd->jd_found_blocks++;
1021 
1022 		if (gfs2_revoke_check(jd, blkno, start))
1023 			continue;
1024 
1025 		error = gfs2_replay_read_block(jd, start, &bh_log);
1026 		if (error)
1027 			return error;
1028 
1029 		bh_ip = gfs2_meta_new(gl, blkno);
1030 		memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1031 
1032 		/* Unescape */
1033 		if (esc) {
1034 			__be32 *eptr = (__be32 *)bh_ip->b_data;
1035 			*eptr = cpu_to_be32(GFS2_MAGIC);
1036 		}
1037 		mark_buffer_dirty(bh_ip);
1038 
1039 		brelse(bh_log);
1040 		brelse(bh_ip);
1041 
1042 		jd->jd_replayed_blocks++;
1043 	}
1044 
1045 	return error;
1046 }
1047 
1048 /* FIXME: sort out accounting for log blocks etc. */
1049 
1050 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1051 {
1052 	struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1053 	struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1054 
1055 	if (error) {
1056 		gfs2_inode_metasync(ip->i_gl);
1057 		return;
1058 	}
1059 	if (pass != 1)
1060 		return;
1061 
1062 	/* data sync? */
1063 	gfs2_inode_metasync(ip->i_gl);
1064 
1065 	fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1066 		jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1067 }
1068 
1069 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1070 {
1071 	struct list_head *head;
1072 	struct gfs2_bufdata *bd;
1073 
1074 	if (tr == NULL)
1075 		return;
1076 
1077 	head = &tr->tr_databuf;
1078 	while (!list_empty(head)) {
1079 		bd = list_first_entry(head, struct gfs2_bufdata, bd_list);
1080 		list_del_init(&bd->bd_list);
1081 		gfs2_unpin(sdp, bd->bd_bh, tr);
1082 	}
1083 }
1084 
1085 
1086 static const struct gfs2_log_operations gfs2_buf_lops = {
1087 	.lo_before_commit = buf_lo_before_commit,
1088 	.lo_after_commit = buf_lo_after_commit,
1089 	.lo_before_scan = buf_lo_before_scan,
1090 	.lo_scan_elements = buf_lo_scan_elements,
1091 	.lo_after_scan = buf_lo_after_scan,
1092 	.lo_name = "buf",
1093 };
1094 
1095 static const struct gfs2_log_operations gfs2_revoke_lops = {
1096 	.lo_before_commit = revoke_lo_before_commit,
1097 	.lo_after_commit = revoke_lo_after_commit,
1098 	.lo_before_scan = revoke_lo_before_scan,
1099 	.lo_scan_elements = revoke_lo_scan_elements,
1100 	.lo_after_scan = revoke_lo_after_scan,
1101 	.lo_name = "revoke",
1102 };
1103 
1104 static const struct gfs2_log_operations gfs2_databuf_lops = {
1105 	.lo_before_commit = databuf_lo_before_commit,
1106 	.lo_after_commit = databuf_lo_after_commit,
1107 	.lo_scan_elements = databuf_lo_scan_elements,
1108 	.lo_after_scan = databuf_lo_after_scan,
1109 	.lo_name = "databuf",
1110 };
1111 
1112 const struct gfs2_log_operations *gfs2_log_ops[] = {
1113 	&gfs2_databuf_lops,
1114 	&gfs2_buf_lops,
1115 	&gfs2_revoke_lops,
1116 	NULL,
1117 };
1118 
1119