1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) International Business Machines Corp., 2000-2005
4  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
5  */
6 
7 #include <linux/blkdev.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/module.h>
11 #include <linux/bio.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/seq_file.h>
17 #include <linux/writeback.h>
18 #include <linux/migrate.h>
19 #include "jfs_incore.h"
20 #include "jfs_superblock.h"
21 #include "jfs_filsys.h"
22 #include "jfs_metapage.h"
23 #include "jfs_txnmgr.h"
24 #include "jfs_debug.h"
25 
26 #ifdef CONFIG_JFS_STATISTICS
27 static struct {
28 	uint	pagealloc;	/* # of page allocations */
29 	uint	pagefree;	/* # of page frees */
30 	uint	lockwait;	/* # of sleeping lock_metapage() calls */
31 } mpStat;
32 #endif
33 
34 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
35 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
36 
37 static inline void unlock_metapage(struct metapage *mp)
38 {
39 	clear_bit_unlock(META_locked, &mp->flag);
40 	wake_up(&mp->wait);
41 }
42 
43 static inline void __lock_metapage(struct metapage *mp)
44 {
45 	DECLARE_WAITQUEUE(wait, current);
46 	INCREMENT(mpStat.lockwait);
47 	add_wait_queue_exclusive(&mp->wait, &wait);
48 	do {
49 		set_current_state(TASK_UNINTERRUPTIBLE);
50 		if (metapage_locked(mp)) {
51 			folio_unlock(mp->folio);
52 			io_schedule();
53 			folio_lock(mp->folio);
54 		}
55 	} while (trylock_metapage(mp));
56 	__set_current_state(TASK_RUNNING);
57 	remove_wait_queue(&mp->wait, &wait);
58 }
59 
60 /*
61  * Must have mp->folio locked
62  */
63 static inline void lock_metapage(struct metapage *mp)
64 {
65 	if (trylock_metapage(mp))
66 		__lock_metapage(mp);
67 }
68 
69 #define METAPOOL_MIN_PAGES 32
70 static struct kmem_cache *metapage_cache;
71 static mempool_t *metapage_mempool;
72 
73 #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
74 
75 #if MPS_PER_PAGE > 1
76 
77 struct meta_anchor {
78 	int mp_count;
79 	atomic_t io_count;
80 	blk_status_t status;
81 	struct metapage *mp[MPS_PER_PAGE];
82 };
83 
84 static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
85 {
86 	struct meta_anchor *anchor = folio->private;
87 
88 	if (!anchor)
89 		return NULL;
90 	return anchor->mp[offset >> L2PSIZE];
91 }
92 
93 static inline int insert_metapage(struct folio *folio, struct metapage *mp)
94 {
95 	struct meta_anchor *a;
96 	int index;
97 	int l2mp_blocks;	/* log2 blocks per metapage */
98 
99 	a = folio->private;
100 	if (!a) {
101 		a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
102 		if (!a)
103 			return -ENOMEM;
104 		folio_attach_private(folio, a);
105 		kmap(&folio->page);
106 	}
107 
108 	if (mp) {
109 		l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
110 		index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
111 		a->mp_count++;
112 		a->mp[index] = mp;
113 	}
114 
115 	return 0;
116 }
117 
118 static inline void remove_metapage(struct folio *folio, struct metapage *mp)
119 {
120 	struct meta_anchor *a = folio->private;
121 	int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
122 	int index;
123 
124 	index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
125 
126 	BUG_ON(a->mp[index] != mp);
127 
128 	a->mp[index] = NULL;
129 	if (--a->mp_count == 0) {
130 		kfree(a);
131 		folio_detach_private(folio);
132 		kunmap(&folio->page);
133 	}
134 }
135 
136 static inline void inc_io(struct folio *folio)
137 {
138 	struct meta_anchor *anchor = folio->private;
139 
140 	atomic_inc(&anchor->io_count);
141 }
142 
143 static inline void dec_io(struct folio *folio, blk_status_t status,
144 		void (*handler)(struct folio *, blk_status_t))
145 {
146 	struct meta_anchor *anchor = folio->private;
147 
148 	if (anchor->status == BLK_STS_OK)
149 		anchor->status = status;
150 
151 	if (atomic_dec_and_test(&anchor->io_count))
152 		handler(folio, anchor->status);
153 }
154 
155 #ifdef CONFIG_MIGRATION
156 static int __metapage_migrate_folio(struct address_space *mapping,
157 				    struct folio *dst, struct folio *src,
158 				    enum migrate_mode mode)
159 {
160 	struct meta_anchor *src_anchor = src->private;
161 	struct metapage *mps[MPS_PER_PAGE] = {0};
162 	struct metapage *mp;
163 	int i, rc;
164 
165 	for (i = 0; i < MPS_PER_PAGE; i++) {
166 		mp = src_anchor->mp[i];
167 		if (mp && metapage_locked(mp))
168 			return -EAGAIN;
169 	}
170 
171 	rc = filemap_migrate_folio(mapping, dst, src, mode);
172 	if (rc != MIGRATEPAGE_SUCCESS)
173 		return rc;
174 
175 	for (i = 0; i < MPS_PER_PAGE; i++) {
176 		mp = src_anchor->mp[i];
177 		if (!mp)
178 			continue;
179 		if (unlikely(insert_metapage(dst, mp))) {
180 			/* If error, roll-back previosly inserted pages */
181 			for (int j = 0 ; j < i; j++) {
182 				if (mps[j])
183 					remove_metapage(dst, mps[j]);
184 			}
185 			return -EAGAIN;
186 		}
187 		mps[i] = mp;
188 	}
189 
190 	/* Update the metapage and remove it from src */
191 	for (i = 0; i < MPS_PER_PAGE; i++) {
192 		mp = mps[i];
193 		if (mp) {
194 			int page_offset = mp->data - folio_address(src);
195 
196 			mp->data = folio_address(dst) + page_offset;
197 			mp->folio = dst;
198 			remove_metapage(src, mp);
199 		}
200 	}
201 
202 	return MIGRATEPAGE_SUCCESS;
203 }
204 #endif	/* CONFIG_MIGRATION */
205 
206 #else
207 
208 static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
209 {
210 	return folio->private;
211 }
212 
213 static inline int insert_metapage(struct folio *folio, struct metapage *mp)
214 {
215 	if (mp) {
216 		folio_attach_private(folio, mp);
217 		kmap(&folio->page);
218 	}
219 	return 0;
220 }
221 
222 static inline void remove_metapage(struct folio *folio, struct metapage *mp)
223 {
224 	folio_detach_private(folio);
225 	kunmap(&folio->page);
226 }
227 
228 #define inc_io(folio) do {} while(0)
229 #define dec_io(folio, status, handler) handler(folio, status)
230 
231 #ifdef CONFIG_MIGRATION
232 static int __metapage_migrate_folio(struct address_space *mapping,
233 				    struct folio *dst, struct folio *src,
234 				    enum migrate_mode mode)
235 {
236 	struct metapage *mp;
237 	int page_offset;
238 	int rc;
239 
240 	mp = folio_to_mp(src, 0);
241 	if (metapage_locked(mp))
242 		return -EAGAIN;
243 
244 	rc = filemap_migrate_folio(mapping, dst, src, mode);
245 	if (rc != MIGRATEPAGE_SUCCESS)
246 		return rc;
247 
248 	if (unlikely(insert_metapage(dst, mp)))
249 		return -EAGAIN;
250 
251 	page_offset = mp->data - folio_address(src);
252 	mp->data = folio_address(dst) + page_offset;
253 	mp->folio = dst;
254 	remove_metapage(src, mp);
255 
256 	return MIGRATEPAGE_SUCCESS;
257 }
258 #endif	/* CONFIG_MIGRATION */
259 
260 #endif
261 
262 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
263 {
264 	struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
265 
266 	if (mp) {
267 		mp->lid = 0;
268 		mp->lsn = 0;
269 		mp->data = NULL;
270 		mp->clsn = 0;
271 		mp->log = NULL;
272 		init_waitqueue_head(&mp->wait);
273 	}
274 	return mp;
275 }
276 
277 static inline void free_metapage(struct metapage *mp)
278 {
279 	mempool_free(mp, metapage_mempool);
280 }
281 
282 int __init metapage_init(void)
283 {
284 	/*
285 	 * Allocate the metapage structures
286 	 */
287 	metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
288 					   0, 0, NULL);
289 	if (metapage_cache == NULL)
290 		return -ENOMEM;
291 
292 	metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
293 						    metapage_cache);
294 
295 	if (metapage_mempool == NULL) {
296 		kmem_cache_destroy(metapage_cache);
297 		return -ENOMEM;
298 	}
299 
300 	return 0;
301 }
302 
303 void metapage_exit(void)
304 {
305 	mempool_destroy(metapage_mempool);
306 	kmem_cache_destroy(metapage_cache);
307 }
308 
309 static inline void drop_metapage(struct folio *folio, struct metapage *mp)
310 {
311 	if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
312 	    test_bit(META_io, &mp->flag))
313 		return;
314 	remove_metapage(folio, mp);
315 	INCREMENT(mpStat.pagefree);
316 	free_metapage(mp);
317 }
318 
319 /*
320  * Metapage address space operations
321  */
322 
323 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
324 				    int *len)
325 {
326 	int rc = 0;
327 	int xflag;
328 	s64 xaddr;
329 	sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
330 			       inode->i_blkbits;
331 
332 	if (lblock >= file_blocks)
333 		return 0;
334 	if (lblock + *len > file_blocks)
335 		*len = file_blocks - lblock;
336 
337 	if (inode->i_ino) {
338 		rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
339 		if ((rc == 0) && *len)
340 			lblock = (sector_t)xaddr;
341 		else
342 			lblock = 0;
343 	} /* else no mapping */
344 
345 	return lblock;
346 }
347 
348 static void last_read_complete(struct folio *folio, blk_status_t status)
349 {
350 	if (status)
351 		printk(KERN_ERR "Read error %d at %#llx\n", status,
352 				folio_pos(folio));
353 
354 	folio_end_read(folio, status == 0);
355 }
356 
357 static void metapage_read_end_io(struct bio *bio)
358 {
359 	struct folio *folio = bio->bi_private;
360 
361 	dec_io(folio, bio->bi_status, last_read_complete);
362 	bio_put(bio);
363 }
364 
365 static void remove_from_logsync(struct metapage *mp)
366 {
367 	struct jfs_log *log = mp->log;
368 	unsigned long flags;
369 /*
370  * This can race.  Recheck that log hasn't been set to null, and after
371  * acquiring logsync lock, recheck lsn
372  */
373 	if (!log)
374 		return;
375 
376 	LOGSYNC_LOCK(log, flags);
377 	if (mp->lsn) {
378 		mp->log = NULL;
379 		mp->lsn = 0;
380 		mp->clsn = 0;
381 		log->count--;
382 		list_del(&mp->synclist);
383 	}
384 	LOGSYNC_UNLOCK(log, flags);
385 }
386 
387 static void last_write_complete(struct folio *folio, blk_status_t status)
388 {
389 	struct metapage *mp;
390 	unsigned int offset;
391 
392 	if (status) {
393 		int err = blk_status_to_errno(status);
394 		printk(KERN_ERR "metapage_write_end_io: I/O error\n");
395 		mapping_set_error(folio->mapping, err);
396 	}
397 
398 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
399 		mp = folio_to_mp(folio, offset);
400 		if (mp && test_bit(META_io, &mp->flag)) {
401 			if (mp->lsn)
402 				remove_from_logsync(mp);
403 			clear_bit(META_io, &mp->flag);
404 		}
405 		/*
406 		 * I'd like to call drop_metapage here, but I don't think it's
407 		 * safe unless I have the page locked
408 		 */
409 	}
410 	folio_end_writeback(folio);
411 }
412 
413 static void metapage_write_end_io(struct bio *bio)
414 {
415 	struct folio *folio = bio->bi_private;
416 
417 	BUG_ON(!folio->private);
418 
419 	dec_io(folio, bio->bi_status, last_write_complete);
420 	bio_put(bio);
421 }
422 
423 static int metapage_write_folio(struct folio *folio,
424 		struct writeback_control *wbc, void *unused)
425 {
426 	struct bio *bio = NULL;
427 	int block_offset;	/* block offset of mp within page */
428 	struct inode *inode = folio->mapping->host;
429 	int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
430 	int len;
431 	int xlen;
432 	struct metapage *mp;
433 	int redirty = 0;
434 	sector_t lblock;
435 	int nr_underway = 0;
436 	sector_t pblock;
437 	sector_t next_block = 0;
438 	sector_t page_start;
439 	unsigned long bio_bytes = 0;
440 	unsigned long bio_offset = 0;
441 	int offset;
442 	int bad_blocks = 0;
443 
444 	page_start = folio_pos(folio) >> inode->i_blkbits;
445 	BUG_ON(!folio_test_locked(folio));
446 	BUG_ON(folio_test_writeback(folio));
447 	folio_start_writeback(folio);
448 
449 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
450 		mp = folio_to_mp(folio, offset);
451 
452 		if (!mp || !test_bit(META_dirty, &mp->flag))
453 			continue;
454 
455 		if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
456 			redirty = 1;
457 			/*
458 			 * Make sure this page isn't blocked indefinitely.
459 			 * If the journal isn't undergoing I/O, push it
460 			 */
461 			if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
462 				jfs_flush_journal(mp->log, 0);
463 			continue;
464 		}
465 
466 		clear_bit(META_dirty, &mp->flag);
467 		set_bit(META_io, &mp->flag);
468 		block_offset = offset >> inode->i_blkbits;
469 		lblock = page_start + block_offset;
470 		if (bio) {
471 			if (xlen && lblock == next_block) {
472 				/* Contiguous, in memory & on disk */
473 				len = min(xlen, blocks_per_mp);
474 				xlen -= len;
475 				bio_bytes += len << inode->i_blkbits;
476 				continue;
477 			}
478 			/* Not contiguous */
479 			bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
480 			/*
481 			 * Increment counter before submitting i/o to keep
482 			 * count from hitting zero before we're through
483 			 */
484 			inc_io(folio);
485 			if (!bio->bi_iter.bi_size)
486 				goto dump_bio;
487 			submit_bio(bio);
488 			nr_underway++;
489 			bio = NULL;
490 		} else
491 			inc_io(folio);
492 		xlen = (folio_size(folio) - offset) >> inode->i_blkbits;
493 		pblock = metapage_get_blocks(inode, lblock, &xlen);
494 		if (!pblock) {
495 			printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
496 			/*
497 			 * We already called inc_io(), but can't cancel it
498 			 * with dec_io() until we're done with the page
499 			 */
500 			bad_blocks++;
501 			continue;
502 		}
503 		len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
504 
505 		bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
506 		bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
507 		bio->bi_end_io = metapage_write_end_io;
508 		bio->bi_private = folio;
509 
510 		/* Don't call bio_add_page yet, we may add to this vec */
511 		bio_offset = offset;
512 		bio_bytes = len << inode->i_blkbits;
513 
514 		xlen -= len;
515 		next_block = lblock + len;
516 	}
517 	if (bio) {
518 		bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
519 		if (!bio->bi_iter.bi_size)
520 			goto dump_bio;
521 
522 		submit_bio(bio);
523 		nr_underway++;
524 	}
525 	if (redirty)
526 		folio_redirty_for_writepage(wbc, folio);
527 
528 	folio_unlock(folio);
529 
530 	if (bad_blocks)
531 		goto err_out;
532 
533 	if (nr_underway == 0)
534 		folio_end_writeback(folio);
535 
536 	return 0;
537 dump_bio:
538 	print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
539 		       4, bio, sizeof(*bio), 0);
540 	bio_put(bio);
541 	folio_unlock(folio);
542 	dec_io(folio, BLK_STS_OK, last_write_complete);
543 err_out:
544 	while (bad_blocks--)
545 		dec_io(folio, BLK_STS_OK, last_write_complete);
546 	return -EIO;
547 }
548 
549 static int metapage_writepages(struct address_space *mapping,
550 		struct writeback_control *wbc)
551 {
552 	struct blk_plug plug;
553 	int err;
554 
555 	blk_start_plug(&plug);
556 	err = write_cache_pages(mapping, wbc, metapage_write_folio, NULL);
557 	blk_finish_plug(&plug);
558 
559 	return err;
560 }
561 
562 static int metapage_read_folio(struct file *fp, struct folio *folio)
563 {
564 	struct inode *inode = folio->mapping->host;
565 	struct bio *bio = NULL;
566 	int block_offset;
567 	int blocks_per_page = i_blocks_per_folio(inode, folio);
568 	sector_t page_start;	/* address of page in fs blocks */
569 	sector_t pblock;
570 	int xlen;
571 	unsigned int len;
572 	int offset;
573 
574 	BUG_ON(!folio_test_locked(folio));
575 	page_start = folio_pos(folio) >> inode->i_blkbits;
576 
577 	block_offset = 0;
578 	while (block_offset < blocks_per_page) {
579 		xlen = blocks_per_page - block_offset;
580 		pblock = metapage_get_blocks(inode, page_start + block_offset,
581 					     &xlen);
582 		if (pblock) {
583 			if (!folio->private)
584 				insert_metapage(folio, NULL);
585 			inc_io(folio);
586 			if (bio)
587 				submit_bio(bio);
588 
589 			bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
590 					GFP_NOFS);
591 			bio->bi_iter.bi_sector =
592 				pblock << (inode->i_blkbits - 9);
593 			bio->bi_end_io = metapage_read_end_io;
594 			bio->bi_private = folio;
595 			len = xlen << inode->i_blkbits;
596 			offset = block_offset << inode->i_blkbits;
597 			bio_add_folio_nofail(bio, folio, len, offset);
598 			block_offset += xlen;
599 		} else
600 			block_offset++;
601 	}
602 	if (bio)
603 		submit_bio(bio);
604 	else
605 		folio_unlock(folio);
606 
607 	return 0;
608 }
609 
610 static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
611 {
612 	struct metapage *mp;
613 	bool ret = true;
614 	int offset;
615 
616 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
617 		mp = folio_to_mp(folio, offset);
618 
619 		if (!mp)
620 			continue;
621 
622 		jfs_info("metapage_release_folio: mp = 0x%p", mp);
623 		if (mp->count || mp->nohomeok ||
624 		    test_bit(META_dirty, &mp->flag)) {
625 			jfs_info("count = %ld, nohomeok = %d", mp->count,
626 				 mp->nohomeok);
627 			ret = false;
628 			continue;
629 		}
630 		if (mp->lsn)
631 			remove_from_logsync(mp);
632 		remove_metapage(folio, mp);
633 		INCREMENT(mpStat.pagefree);
634 		free_metapage(mp);
635 	}
636 	return ret;
637 }
638 
639 #ifdef CONFIG_MIGRATION
640 /*
641  * metapage_migrate_folio - Migration function for JFS metapages
642  */
643 static int metapage_migrate_folio(struct address_space *mapping,
644 				  struct folio *dst, struct folio *src,
645 				  enum migrate_mode mode)
646 {
647 	int expected_count;
648 
649 	if (!src->private)
650 		return filemap_migrate_folio(mapping, dst, src, mode);
651 
652 	/* Check whether page does not have extra refs before we do more work */
653 	expected_count = folio_expected_ref_count(src) + 1;
654 	if (folio_ref_count(src) != expected_count)
655 		return -EAGAIN;
656 	return __metapage_migrate_folio(mapping, dst, src, mode);
657 }
658 #else
659 #define metapage_migrate_folio NULL
660 #endif	/* CONFIG_MIGRATION */
661 
662 static void metapage_invalidate_folio(struct folio *folio, size_t offset,
663 				    size_t length)
664 {
665 	BUG_ON(offset || length < folio_size(folio));
666 
667 	BUG_ON(folio_test_writeback(folio));
668 
669 	metapage_release_folio(folio, 0);
670 }
671 
672 const struct address_space_operations jfs_metapage_aops = {
673 	.read_folio	= metapage_read_folio,
674 	.writepages	= metapage_writepages,
675 	.release_folio	= metapage_release_folio,
676 	.invalidate_folio = metapage_invalidate_folio,
677 	.dirty_folio	= filemap_dirty_folio,
678 	.migrate_folio	= metapage_migrate_folio,
679 };
680 
681 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
682 				unsigned int size, int absolute,
683 				unsigned long new)
684 {
685 	int l2BlocksPerPage;
686 	int l2bsize;
687 	struct address_space *mapping;
688 	struct metapage *mp = NULL;
689 	struct folio *folio;
690 	unsigned long page_index;
691 	unsigned long page_offset;
692 
693 	jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
694 		 inode->i_ino, lblock, absolute);
695 
696 	l2bsize = inode->i_blkbits;
697 	l2BlocksPerPage = PAGE_SHIFT - l2bsize;
698 	page_index = lblock >> l2BlocksPerPage;
699 	page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
700 	if ((page_offset + size) > PAGE_SIZE) {
701 		jfs_err("MetaData crosses page boundary!!");
702 		jfs_err("lblock = %lx, size  = %d", lblock, size);
703 		dump_stack();
704 		return NULL;
705 	}
706 	if (absolute)
707 		mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
708 	else {
709 		/*
710 		 * If an nfs client tries to read an inode that is larger
711 		 * than any existing inodes, we may try to read past the
712 		 * end of the inode map
713 		 */
714 		if ((lblock << inode->i_blkbits) >= inode->i_size)
715 			return NULL;
716 		mapping = inode->i_mapping;
717 	}
718 
719 	if (new && (PSIZE == PAGE_SIZE)) {
720 		folio = filemap_grab_folio(mapping, page_index);
721 		if (IS_ERR(folio)) {
722 			jfs_err("filemap_grab_folio failed!");
723 			return NULL;
724 		}
725 		folio_mark_uptodate(folio);
726 	} else {
727 		folio = read_mapping_folio(mapping, page_index, NULL);
728 		if (IS_ERR(folio)) {
729 			jfs_err("read_mapping_page failed!");
730 			return NULL;
731 		}
732 		folio_lock(folio);
733 	}
734 
735 	mp = folio_to_mp(folio, page_offset);
736 	if (mp) {
737 		if (mp->logical_size != size) {
738 			jfs_error(inode->i_sb,
739 				  "get_mp->logical_size != size\n");
740 			jfs_err("logical_size = %d, size = %d",
741 				mp->logical_size, size);
742 			dump_stack();
743 			goto unlock;
744 		}
745 		mp->count++;
746 		lock_metapage(mp);
747 		if (test_bit(META_discard, &mp->flag)) {
748 			if (!new) {
749 				jfs_error(inode->i_sb,
750 					  "using a discarded metapage\n");
751 				discard_metapage(mp);
752 				goto unlock;
753 			}
754 			clear_bit(META_discard, &mp->flag);
755 		}
756 	} else {
757 		INCREMENT(mpStat.pagealloc);
758 		mp = alloc_metapage(GFP_NOFS);
759 		if (!mp)
760 			goto unlock;
761 		mp->folio = folio;
762 		mp->sb = inode->i_sb;
763 		mp->flag = 0;
764 		mp->xflag = COMMIT_PAGE;
765 		mp->count = 1;
766 		mp->nohomeok = 0;
767 		mp->logical_size = size;
768 		mp->data = folio_address(folio) + page_offset;
769 		mp->index = lblock;
770 		if (unlikely(insert_metapage(folio, mp))) {
771 			free_metapage(mp);
772 			goto unlock;
773 		}
774 		lock_metapage(mp);
775 	}
776 
777 	if (new) {
778 		jfs_info("zeroing mp = 0x%p", mp);
779 		memset(mp->data, 0, PSIZE);
780 	}
781 
782 	folio_unlock(folio);
783 	jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
784 	return mp;
785 
786 unlock:
787 	folio_unlock(folio);
788 	return NULL;
789 }
790 
791 void grab_metapage(struct metapage * mp)
792 {
793 	jfs_info("grab_metapage: mp = 0x%p", mp);
794 	folio_get(mp->folio);
795 	folio_lock(mp->folio);
796 	mp->count++;
797 	lock_metapage(mp);
798 	folio_unlock(mp->folio);
799 }
800 
801 static int metapage_write_one(struct folio *folio)
802 {
803 	struct address_space *mapping = folio->mapping;
804 	struct writeback_control wbc = {
805 		.sync_mode = WB_SYNC_ALL,
806 		.nr_to_write = folio_nr_pages(folio),
807 	};
808 	int ret = 0;
809 
810 	BUG_ON(!folio_test_locked(folio));
811 
812 	folio_wait_writeback(folio);
813 
814 	if (folio_clear_dirty_for_io(folio)) {
815 		folio_get(folio);
816 		ret = metapage_write_folio(folio, &wbc, NULL);
817 		if (ret == 0)
818 			folio_wait_writeback(folio);
819 		folio_put(folio);
820 	} else {
821 		folio_unlock(folio);
822 	}
823 
824 	if (!ret)
825 		ret = filemap_check_errors(mapping);
826 	return ret;
827 }
828 
829 void force_metapage(struct metapage *mp)
830 {
831 	struct folio *folio = mp->folio;
832 	jfs_info("force_metapage: mp = 0x%p", mp);
833 	set_bit(META_forcewrite, &mp->flag);
834 	clear_bit(META_sync, &mp->flag);
835 	folio_get(folio);
836 	folio_lock(folio);
837 	folio_mark_dirty(folio);
838 	if (metapage_write_one(folio))
839 		jfs_error(mp->sb, "metapage_write_one() failed\n");
840 	clear_bit(META_forcewrite, &mp->flag);
841 	folio_put(folio);
842 }
843 
844 void hold_metapage(struct metapage *mp)
845 {
846 	folio_lock(mp->folio);
847 }
848 
849 void put_metapage(struct metapage *mp)
850 {
851 	if (mp->count || mp->nohomeok) {
852 		/* Someone else will release this */
853 		folio_unlock(mp->folio);
854 		return;
855 	}
856 	folio_get(mp->folio);
857 	mp->count++;
858 	lock_metapage(mp);
859 	folio_unlock(mp->folio);
860 	release_metapage(mp);
861 }
862 
863 void release_metapage(struct metapage * mp)
864 {
865 	struct folio *folio = mp->folio;
866 	jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
867 
868 	folio_lock(folio);
869 	unlock_metapage(mp);
870 
871 	assert(mp->count);
872 	if (--mp->count || mp->nohomeok) {
873 		folio_unlock(folio);
874 		folio_put(folio);
875 		return;
876 	}
877 
878 	if (test_bit(META_dirty, &mp->flag)) {
879 		folio_mark_dirty(folio);
880 		if (test_bit(META_sync, &mp->flag)) {
881 			clear_bit(META_sync, &mp->flag);
882 			if (metapage_write_one(folio))
883 				jfs_error(mp->sb, "metapage_write_one() failed\n");
884 			folio_lock(folio);
885 		}
886 	} else if (mp->lsn)	/* discard_metapage doesn't remove it */
887 		remove_from_logsync(mp);
888 
889 	/* Try to keep metapages from using up too much memory */
890 	drop_metapage(folio, mp);
891 
892 	folio_unlock(folio);
893 	folio_put(folio);
894 }
895 
896 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
897 {
898 	sector_t lblock;
899 	int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
900 	int BlocksPerPage = 1 << l2BlocksPerPage;
901 	/* All callers are interested in block device's mapping */
902 	struct address_space *mapping =
903 		JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
904 	struct metapage *mp;
905 	unsigned int offset;
906 
907 	/*
908 	 * Mark metapages to discard.  They will eventually be
909 	 * released, but should not be written.
910 	 */
911 	for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
912 	     lblock += BlocksPerPage) {
913 		struct folio *folio = filemap_lock_folio(mapping,
914 				lblock >> l2BlocksPerPage);
915 		if (IS_ERR(folio))
916 			continue;
917 		for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
918 			mp = folio_to_mp(folio, offset);
919 			if (!mp)
920 				continue;
921 			if (mp->index < addr)
922 				continue;
923 			if (mp->index >= addr + len)
924 				break;
925 
926 			clear_bit(META_dirty, &mp->flag);
927 			set_bit(META_discard, &mp->flag);
928 			if (mp->lsn)
929 				remove_from_logsync(mp);
930 		}
931 		folio_unlock(folio);
932 		folio_put(folio);
933 	}
934 }
935 
936 #ifdef CONFIG_JFS_STATISTICS
937 int jfs_mpstat_proc_show(struct seq_file *m, void *v)
938 {
939 	seq_printf(m,
940 		       "JFS Metapage statistics\n"
941 		       "=======================\n"
942 		       "page allocations = %d\n"
943 		       "page frees = %d\n"
944 		       "lock waits = %d\n",
945 		       mpStat.pagealloc,
946 		       mpStat.pagefree,
947 		       mpStat.lockwait);
948 	return 0;
949 }
950 #endif
951