xref: /linux/fs/jfs/jfs_metapage.c (revision ab93e0dd72c37d378dd936f031ffb83ff2bd87ce)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) International Business Machines Corp., 2000-2005
4  *   Portions Copyright (C) Christoph Hellwig, 2001-2002
5  */
6 
7 #include <linux/blkdev.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/module.h>
11 #include <linux/bio.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/seq_file.h>
17 #include <linux/writeback.h>
18 #include <linux/migrate.h>
19 #include "jfs_incore.h"
20 #include "jfs_superblock.h"
21 #include "jfs_filsys.h"
22 #include "jfs_metapage.h"
23 #include "jfs_txnmgr.h"
24 #include "jfs_debug.h"
25 
26 #ifdef CONFIG_JFS_STATISTICS
27 static struct {
28 	uint	pagealloc;	/* # of page allocations */
29 	uint	pagefree;	/* # of page frees */
30 	uint	lockwait;	/* # of sleeping lock_metapage() calls */
31 } mpStat;
32 #endif
33 
34 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
35 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
36 
unlock_metapage(struct metapage * mp)37 static inline void unlock_metapage(struct metapage *mp)
38 {
39 	clear_bit_unlock(META_locked, &mp->flag);
40 	wake_up(&mp->wait);
41 }
42 
__lock_metapage(struct metapage * mp)43 static inline void __lock_metapage(struct metapage *mp)
44 {
45 	DECLARE_WAITQUEUE(wait, current);
46 	INCREMENT(mpStat.lockwait);
47 	add_wait_queue_exclusive(&mp->wait, &wait);
48 	do {
49 		set_current_state(TASK_UNINTERRUPTIBLE);
50 		if (metapage_locked(mp)) {
51 			folio_unlock(mp->folio);
52 			io_schedule();
53 			folio_lock(mp->folio);
54 		}
55 	} while (trylock_metapage(mp));
56 	__set_current_state(TASK_RUNNING);
57 	remove_wait_queue(&mp->wait, &wait);
58 }
59 
60 /*
61  * Must have mp->folio locked
62  */
lock_metapage(struct metapage * mp)63 static inline void lock_metapage(struct metapage *mp)
64 {
65 	if (trylock_metapage(mp))
66 		__lock_metapage(mp);
67 }
68 
69 #define METAPOOL_MIN_PAGES 32
70 static struct kmem_cache *metapage_cache;
71 static mempool_t *metapage_mempool;
72 
73 #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
74 
75 #if MPS_PER_PAGE > 1
76 
77 struct meta_anchor {
78 	int mp_count;
79 	atomic_t io_count;
80 	blk_status_t status;
81 	struct metapage *mp[MPS_PER_PAGE];
82 };
83 
folio_to_mp(struct folio * folio,int offset)84 static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
85 {
86 	struct meta_anchor *anchor = folio->private;
87 
88 	if (!anchor)
89 		return NULL;
90 	return anchor->mp[offset >> L2PSIZE];
91 }
92 
insert_metapage(struct folio * folio,struct metapage * mp)93 static inline int insert_metapage(struct folio *folio, struct metapage *mp)
94 {
95 	struct meta_anchor *a;
96 	int index;
97 	int l2mp_blocks;	/* log2 blocks per metapage */
98 
99 	a = folio->private;
100 	if (!a) {
101 		a = kzalloc(sizeof(struct meta_anchor), GFP_NOFS);
102 		if (!a)
103 			return -ENOMEM;
104 		folio_attach_private(folio, a);
105 		kmap(&folio->page);
106 	}
107 
108 	if (mp) {
109 		l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
110 		index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
111 		a->mp_count++;
112 		a->mp[index] = mp;
113 	}
114 
115 	return 0;
116 }
117 
remove_metapage(struct folio * folio,struct metapage * mp)118 static inline void remove_metapage(struct folio *folio, struct metapage *mp)
119 {
120 	struct meta_anchor *a = folio->private;
121 	int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
122 	int index;
123 
124 	index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
125 
126 	BUG_ON(a->mp[index] != mp);
127 
128 	a->mp[index] = NULL;
129 	if (--a->mp_count == 0) {
130 		kfree(a);
131 		folio_detach_private(folio);
132 		kunmap(&folio->page);
133 	}
134 }
135 
inc_io(struct folio * folio)136 static inline void inc_io(struct folio *folio)
137 {
138 	struct meta_anchor *anchor = folio->private;
139 
140 	atomic_inc(&anchor->io_count);
141 }
142 
dec_io(struct folio * folio,blk_status_t status,void (* handler)(struct folio *,blk_status_t))143 static inline void dec_io(struct folio *folio, blk_status_t status,
144 		void (*handler)(struct folio *, blk_status_t))
145 {
146 	struct meta_anchor *anchor = folio->private;
147 
148 	if (anchor->status == BLK_STS_OK)
149 		anchor->status = status;
150 
151 	if (atomic_dec_and_test(&anchor->io_count))
152 		handler(folio, anchor->status);
153 }
154 
155 #ifdef CONFIG_MIGRATION
__metapage_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)156 static int __metapage_migrate_folio(struct address_space *mapping,
157 				    struct folio *dst, struct folio *src,
158 				    enum migrate_mode mode)
159 {
160 	struct meta_anchor *src_anchor = src->private;
161 	struct metapage *mps[MPS_PER_PAGE] = {0};
162 	struct metapage *mp;
163 	int i, rc;
164 
165 	for (i = 0; i < MPS_PER_PAGE; i++) {
166 		mp = src_anchor->mp[i];
167 		if (mp && metapage_locked(mp))
168 			return -EAGAIN;
169 	}
170 
171 	rc = filemap_migrate_folio(mapping, dst, src, mode);
172 	if (rc != MIGRATEPAGE_SUCCESS)
173 		return rc;
174 
175 	for (i = 0; i < MPS_PER_PAGE; i++) {
176 		mp = src_anchor->mp[i];
177 		if (!mp)
178 			continue;
179 		if (unlikely(insert_metapage(dst, mp))) {
180 			/* If error, roll-back previosly inserted pages */
181 			for (int j = 0 ; j < i; j++) {
182 				if (mps[j])
183 					remove_metapage(dst, mps[j]);
184 			}
185 			return -EAGAIN;
186 		}
187 		mps[i] = mp;
188 	}
189 
190 	/* Update the metapage and remove it from src */
191 	for (i = 0; i < MPS_PER_PAGE; i++) {
192 		mp = mps[i];
193 		if (mp) {
194 			int page_offset = mp->data - folio_address(src);
195 
196 			mp->data = folio_address(dst) + page_offset;
197 			mp->folio = dst;
198 			remove_metapage(src, mp);
199 		}
200 	}
201 
202 	return MIGRATEPAGE_SUCCESS;
203 }
204 #endif	/* CONFIG_MIGRATION */
205 
206 #else
207 
folio_to_mp(struct folio * folio,int offset)208 static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
209 {
210 	return folio->private;
211 }
212 
insert_metapage(struct folio * folio,struct metapage * mp)213 static inline int insert_metapage(struct folio *folio, struct metapage *mp)
214 {
215 	if (mp) {
216 		folio_attach_private(folio, mp);
217 		kmap(&folio->page);
218 	}
219 	return 0;
220 }
221 
remove_metapage(struct folio * folio,struct metapage * mp)222 static inline void remove_metapage(struct folio *folio, struct metapage *mp)
223 {
224 	folio_detach_private(folio);
225 	kunmap(&folio->page);
226 }
227 
228 #define inc_io(folio) do {} while(0)
229 #define dec_io(folio, status, handler) handler(folio, status)
230 
231 #ifdef CONFIG_MIGRATION
__metapage_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)232 static int __metapage_migrate_folio(struct address_space *mapping,
233 				    struct folio *dst, struct folio *src,
234 				    enum migrate_mode mode)
235 {
236 	struct metapage *mp;
237 	int page_offset;
238 	int rc;
239 
240 	mp = folio_to_mp(src, 0);
241 	if (metapage_locked(mp))
242 		return -EAGAIN;
243 
244 	rc = filemap_migrate_folio(mapping, dst, src, mode);
245 	if (rc != MIGRATEPAGE_SUCCESS)
246 		return rc;
247 
248 	if (unlikely(insert_metapage(dst, mp)))
249 		return -EAGAIN;
250 
251 	page_offset = mp->data - folio_address(src);
252 	mp->data = folio_address(dst) + page_offset;
253 	mp->folio = dst;
254 	remove_metapage(src, mp);
255 
256 	return MIGRATEPAGE_SUCCESS;
257 }
258 #endif	/* CONFIG_MIGRATION */
259 
260 #endif
261 
alloc_metapage(gfp_t gfp_mask)262 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
263 {
264 	struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
265 
266 	if (mp) {
267 		mp->lid = 0;
268 		mp->lsn = 0;
269 		mp->data = NULL;
270 		mp->clsn = 0;
271 		mp->log = NULL;
272 		init_waitqueue_head(&mp->wait);
273 	}
274 	return mp;
275 }
276 
free_metapage(struct metapage * mp)277 static inline void free_metapage(struct metapage *mp)
278 {
279 	mempool_free(mp, metapage_mempool);
280 }
281 
metapage_init(void)282 int __init metapage_init(void)
283 {
284 	/*
285 	 * Allocate the metapage structures
286 	 */
287 	metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
288 					   0, 0, NULL);
289 	if (metapage_cache == NULL)
290 		return -ENOMEM;
291 
292 	metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
293 						    metapage_cache);
294 
295 	if (metapage_mempool == NULL) {
296 		kmem_cache_destroy(metapage_cache);
297 		return -ENOMEM;
298 	}
299 
300 	return 0;
301 }
302 
metapage_exit(void)303 void metapage_exit(void)
304 {
305 	mempool_destroy(metapage_mempool);
306 	kmem_cache_destroy(metapage_cache);
307 }
308 
drop_metapage(struct folio * folio,struct metapage * mp)309 static inline void drop_metapage(struct folio *folio, struct metapage *mp)
310 {
311 	if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
312 	    test_bit(META_io, &mp->flag))
313 		return;
314 	remove_metapage(folio, mp);
315 	INCREMENT(mpStat.pagefree);
316 	free_metapage(mp);
317 }
318 
319 /*
320  * Metapage address space operations
321  */
322 
metapage_get_blocks(struct inode * inode,sector_t lblock,int * len)323 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
324 				    int *len)
325 {
326 	int rc = 0;
327 	int xflag;
328 	s64 xaddr;
329 	sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
330 			       inode->i_blkbits;
331 
332 	if (lblock >= file_blocks)
333 		return 0;
334 	if (lblock + *len > file_blocks)
335 		*len = file_blocks - lblock;
336 
337 	if (inode->i_ino) {
338 		rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
339 		if ((rc == 0) && *len)
340 			lblock = (sector_t)xaddr;
341 		else
342 			lblock = 0;
343 	} /* else no mapping */
344 
345 	return lblock;
346 }
347 
last_read_complete(struct folio * folio,blk_status_t status)348 static void last_read_complete(struct folio *folio, blk_status_t status)
349 {
350 	if (status)
351 		printk(KERN_ERR "Read error %d at %#llx\n", status,
352 				folio_pos(folio));
353 
354 	folio_end_read(folio, status == 0);
355 }
356 
metapage_read_end_io(struct bio * bio)357 static void metapage_read_end_io(struct bio *bio)
358 {
359 	struct folio *folio = bio->bi_private;
360 
361 	dec_io(folio, bio->bi_status, last_read_complete);
362 	bio_put(bio);
363 }
364 
remove_from_logsync(struct metapage * mp)365 static void remove_from_logsync(struct metapage *mp)
366 {
367 	struct jfs_log *log = mp->log;
368 	unsigned long flags;
369 /*
370  * This can race.  Recheck that log hasn't been set to null, and after
371  * acquiring logsync lock, recheck lsn
372  */
373 	if (!log)
374 		return;
375 
376 	LOGSYNC_LOCK(log, flags);
377 	if (mp->lsn) {
378 		mp->log = NULL;
379 		mp->lsn = 0;
380 		mp->clsn = 0;
381 		log->count--;
382 		list_del(&mp->synclist);
383 	}
384 	LOGSYNC_UNLOCK(log, flags);
385 }
386 
last_write_complete(struct folio * folio,blk_status_t status)387 static void last_write_complete(struct folio *folio, blk_status_t status)
388 {
389 	struct metapage *mp;
390 	unsigned int offset;
391 
392 	if (status) {
393 		int err = blk_status_to_errno(status);
394 		printk(KERN_ERR "metapage_write_end_io: I/O error\n");
395 		mapping_set_error(folio->mapping, err);
396 	}
397 
398 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
399 		mp = folio_to_mp(folio, offset);
400 		if (mp && test_bit(META_io, &mp->flag)) {
401 			if (mp->lsn)
402 				remove_from_logsync(mp);
403 			clear_bit(META_io, &mp->flag);
404 		}
405 		/*
406 		 * I'd like to call drop_metapage here, but I don't think it's
407 		 * safe unless I have the page locked
408 		 */
409 	}
410 	folio_end_writeback(folio);
411 }
412 
metapage_write_end_io(struct bio * bio)413 static void metapage_write_end_io(struct bio *bio)
414 {
415 	struct folio *folio = bio->bi_private;
416 
417 	BUG_ON(!folio->private);
418 
419 	dec_io(folio, bio->bi_status, last_write_complete);
420 	bio_put(bio);
421 }
422 
metapage_write_folio(struct folio * folio,struct writeback_control * wbc)423 static int metapage_write_folio(struct folio *folio,
424 		struct writeback_control *wbc)
425 {
426 	struct bio *bio = NULL;
427 	int block_offset;	/* block offset of mp within page */
428 	struct inode *inode = folio->mapping->host;
429 	int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
430 	int len;
431 	int xlen;
432 	struct metapage *mp;
433 	int redirty = 0;
434 	sector_t lblock;
435 	int nr_underway = 0;
436 	sector_t pblock;
437 	sector_t next_block = 0;
438 	sector_t page_start;
439 	unsigned long bio_bytes = 0;
440 	unsigned long bio_offset = 0;
441 	int offset;
442 	int bad_blocks = 0;
443 
444 	page_start = folio_pos(folio) >> inode->i_blkbits;
445 	BUG_ON(!folio_test_locked(folio));
446 	BUG_ON(folio_test_writeback(folio));
447 	folio_start_writeback(folio);
448 
449 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
450 		mp = folio_to_mp(folio, offset);
451 
452 		if (!mp || !test_bit(META_dirty, &mp->flag))
453 			continue;
454 
455 		if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
456 			redirty = 1;
457 			/*
458 			 * Make sure this page isn't blocked indefinitely.
459 			 * If the journal isn't undergoing I/O, push it
460 			 */
461 			if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
462 				jfs_flush_journal(mp->log, 0);
463 			continue;
464 		}
465 
466 		clear_bit(META_dirty, &mp->flag);
467 		set_bit(META_io, &mp->flag);
468 		block_offset = offset >> inode->i_blkbits;
469 		lblock = page_start + block_offset;
470 		if (bio) {
471 			if (xlen && lblock == next_block) {
472 				/* Contiguous, in memory & on disk */
473 				len = min(xlen, blocks_per_mp);
474 				xlen -= len;
475 				bio_bytes += len << inode->i_blkbits;
476 				continue;
477 			}
478 			/* Not contiguous */
479 			bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
480 			/*
481 			 * Increment counter before submitting i/o to keep
482 			 * count from hitting zero before we're through
483 			 */
484 			inc_io(folio);
485 			if (!bio->bi_iter.bi_size)
486 				goto dump_bio;
487 			submit_bio(bio);
488 			nr_underway++;
489 			bio = NULL;
490 		} else
491 			inc_io(folio);
492 		xlen = (folio_size(folio) - offset) >> inode->i_blkbits;
493 		pblock = metapage_get_blocks(inode, lblock, &xlen);
494 		if (!pblock) {
495 			printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
496 			/*
497 			 * We already called inc_io(), but can't cancel it
498 			 * with dec_io() until we're done with the page
499 			 */
500 			bad_blocks++;
501 			continue;
502 		}
503 		len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
504 
505 		bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
506 		bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
507 		bio->bi_end_io = metapage_write_end_io;
508 		bio->bi_private = folio;
509 
510 		/* Don't call bio_add_page yet, we may add to this vec */
511 		bio_offset = offset;
512 		bio_bytes = len << inode->i_blkbits;
513 
514 		xlen -= len;
515 		next_block = lblock + len;
516 	}
517 	if (bio) {
518 		bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
519 		if (!bio->bi_iter.bi_size)
520 			goto dump_bio;
521 
522 		submit_bio(bio);
523 		nr_underway++;
524 	}
525 	if (redirty)
526 		folio_redirty_for_writepage(wbc, folio);
527 
528 	folio_unlock(folio);
529 
530 	if (bad_blocks)
531 		goto err_out;
532 
533 	if (nr_underway == 0)
534 		folio_end_writeback(folio);
535 
536 	return 0;
537 dump_bio:
538 	print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
539 		       4, bio, sizeof(*bio), 0);
540 	bio_put(bio);
541 	folio_unlock(folio);
542 	dec_io(folio, BLK_STS_OK, last_write_complete);
543 err_out:
544 	while (bad_blocks--)
545 		dec_io(folio, BLK_STS_OK, last_write_complete);
546 	return -EIO;
547 }
548 
metapage_writepages(struct address_space * mapping,struct writeback_control * wbc)549 static int metapage_writepages(struct address_space *mapping,
550 		struct writeback_control *wbc)
551 {
552 	struct blk_plug plug;
553 	struct folio *folio = NULL;
554 	int err;
555 
556 	blk_start_plug(&plug);
557 	while ((folio = writeback_iter(mapping, wbc, folio, &err)))
558 		err = metapage_write_folio(folio, wbc);
559 	blk_finish_plug(&plug);
560 
561 	return err;
562 }
563 
metapage_read_folio(struct file * fp,struct folio * folio)564 static int metapage_read_folio(struct file *fp, struct folio *folio)
565 {
566 	struct inode *inode = folio->mapping->host;
567 	struct bio *bio = NULL;
568 	int block_offset;
569 	int blocks_per_page = i_blocks_per_folio(inode, folio);
570 	sector_t page_start;	/* address of page in fs blocks */
571 	sector_t pblock;
572 	int xlen;
573 	unsigned int len;
574 	int offset;
575 
576 	BUG_ON(!folio_test_locked(folio));
577 	page_start = folio_pos(folio) >> inode->i_blkbits;
578 
579 	block_offset = 0;
580 	while (block_offset < blocks_per_page) {
581 		xlen = blocks_per_page - block_offset;
582 		pblock = metapage_get_blocks(inode, page_start + block_offset,
583 					     &xlen);
584 		if (pblock) {
585 			if (!folio->private)
586 				insert_metapage(folio, NULL);
587 			inc_io(folio);
588 			if (bio)
589 				submit_bio(bio);
590 
591 			bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
592 					GFP_NOFS);
593 			bio->bi_iter.bi_sector =
594 				pblock << (inode->i_blkbits - 9);
595 			bio->bi_end_io = metapage_read_end_io;
596 			bio->bi_private = folio;
597 			len = xlen << inode->i_blkbits;
598 			offset = block_offset << inode->i_blkbits;
599 			bio_add_folio_nofail(bio, folio, len, offset);
600 			block_offset += xlen;
601 		} else
602 			block_offset++;
603 	}
604 	if (bio)
605 		submit_bio(bio);
606 	else
607 		folio_unlock(folio);
608 
609 	return 0;
610 }
611 
metapage_release_folio(struct folio * folio,gfp_t gfp_mask)612 static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
613 {
614 	struct metapage *mp;
615 	bool ret = true;
616 	int offset;
617 
618 	for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
619 		mp = folio_to_mp(folio, offset);
620 
621 		if (!mp)
622 			continue;
623 
624 		jfs_info("metapage_release_folio: mp = 0x%p", mp);
625 		if (mp->count || mp->nohomeok ||
626 		    test_bit(META_dirty, &mp->flag)) {
627 			jfs_info("count = %ld, nohomeok = %d", mp->count,
628 				 mp->nohomeok);
629 			ret = false;
630 			continue;
631 		}
632 		if (mp->lsn)
633 			remove_from_logsync(mp);
634 		remove_metapage(folio, mp);
635 		INCREMENT(mpStat.pagefree);
636 		free_metapage(mp);
637 	}
638 	return ret;
639 }
640 
641 #ifdef CONFIG_MIGRATION
642 /*
643  * metapage_migrate_folio - Migration function for JFS metapages
644  */
metapage_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)645 static int metapage_migrate_folio(struct address_space *mapping,
646 				  struct folio *dst, struct folio *src,
647 				  enum migrate_mode mode)
648 {
649 	int expected_count;
650 
651 	if (!src->private)
652 		return filemap_migrate_folio(mapping, dst, src, mode);
653 
654 	/* Check whether page does not have extra refs before we do more work */
655 	expected_count = folio_expected_ref_count(src) + 1;
656 	if (folio_ref_count(src) != expected_count)
657 		return -EAGAIN;
658 	return __metapage_migrate_folio(mapping, dst, src, mode);
659 }
660 #else
661 #define metapage_migrate_folio NULL
662 #endif	/* CONFIG_MIGRATION */
663 
metapage_invalidate_folio(struct folio * folio,size_t offset,size_t length)664 static void metapage_invalidate_folio(struct folio *folio, size_t offset,
665 				    size_t length)
666 {
667 	BUG_ON(offset || length < folio_size(folio));
668 
669 	BUG_ON(folio_test_writeback(folio));
670 
671 	metapage_release_folio(folio, 0);
672 }
673 
674 const struct address_space_operations jfs_metapage_aops = {
675 	.read_folio	= metapage_read_folio,
676 	.writepages	= metapage_writepages,
677 	.release_folio	= metapage_release_folio,
678 	.invalidate_folio = metapage_invalidate_folio,
679 	.dirty_folio	= filemap_dirty_folio,
680 	.migrate_folio	= metapage_migrate_folio,
681 };
682 
__get_metapage(struct inode * inode,unsigned long lblock,unsigned int size,int absolute,unsigned long new)683 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
684 				unsigned int size, int absolute,
685 				unsigned long new)
686 {
687 	int l2BlocksPerPage;
688 	int l2bsize;
689 	struct address_space *mapping;
690 	struct metapage *mp = NULL;
691 	struct folio *folio;
692 	unsigned long page_index;
693 	unsigned long page_offset;
694 
695 	jfs_info("__get_metapage: ino = %ld, lblock = 0x%lx, abs=%d",
696 		 inode->i_ino, lblock, absolute);
697 
698 	l2bsize = inode->i_blkbits;
699 	l2BlocksPerPage = PAGE_SHIFT - l2bsize;
700 	page_index = lblock >> l2BlocksPerPage;
701 	page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
702 	if ((page_offset + size) > PAGE_SIZE) {
703 		jfs_err("MetaData crosses page boundary!!");
704 		jfs_err("lblock = %lx, size  = %d", lblock, size);
705 		dump_stack();
706 		return NULL;
707 	}
708 	if (absolute)
709 		mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
710 	else {
711 		/*
712 		 * If an nfs client tries to read an inode that is larger
713 		 * than any existing inodes, we may try to read past the
714 		 * end of the inode map
715 		 */
716 		if ((lblock << inode->i_blkbits) >= inode->i_size)
717 			return NULL;
718 		mapping = inode->i_mapping;
719 	}
720 
721 	if (new && (PSIZE == PAGE_SIZE)) {
722 		folio = filemap_grab_folio(mapping, page_index);
723 		if (IS_ERR(folio)) {
724 			jfs_err("filemap_grab_folio failed!");
725 			return NULL;
726 		}
727 		folio_mark_uptodate(folio);
728 	} else {
729 		folio = read_mapping_folio(mapping, page_index, NULL);
730 		if (IS_ERR(folio)) {
731 			jfs_err("read_mapping_page failed!");
732 			return NULL;
733 		}
734 		folio_lock(folio);
735 	}
736 
737 	mp = folio_to_mp(folio, page_offset);
738 	if (mp) {
739 		if (mp->logical_size != size) {
740 			jfs_error(inode->i_sb,
741 				  "get_mp->logical_size != size\n");
742 			jfs_err("logical_size = %d, size = %d",
743 				mp->logical_size, size);
744 			dump_stack();
745 			goto unlock;
746 		}
747 		mp->count++;
748 		lock_metapage(mp);
749 		if (test_bit(META_discard, &mp->flag)) {
750 			if (!new) {
751 				jfs_error(inode->i_sb,
752 					  "using a discarded metapage\n");
753 				discard_metapage(mp);
754 				goto unlock;
755 			}
756 			clear_bit(META_discard, &mp->flag);
757 		}
758 	} else {
759 		INCREMENT(mpStat.pagealloc);
760 		mp = alloc_metapage(GFP_NOFS);
761 		if (!mp)
762 			goto unlock;
763 		mp->folio = folio;
764 		mp->sb = inode->i_sb;
765 		mp->flag = 0;
766 		mp->xflag = COMMIT_PAGE;
767 		mp->count = 1;
768 		mp->nohomeok = 0;
769 		mp->logical_size = size;
770 		mp->data = folio_address(folio) + page_offset;
771 		mp->index = lblock;
772 		if (unlikely(insert_metapage(folio, mp))) {
773 			free_metapage(mp);
774 			goto unlock;
775 		}
776 		lock_metapage(mp);
777 	}
778 
779 	if (new) {
780 		jfs_info("zeroing mp = 0x%p", mp);
781 		memset(mp->data, 0, PSIZE);
782 	}
783 
784 	folio_unlock(folio);
785 	jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
786 	return mp;
787 
788 unlock:
789 	folio_unlock(folio);
790 	return NULL;
791 }
792 
grab_metapage(struct metapage * mp)793 void grab_metapage(struct metapage * mp)
794 {
795 	jfs_info("grab_metapage: mp = 0x%p", mp);
796 	folio_get(mp->folio);
797 	folio_lock(mp->folio);
798 	mp->count++;
799 	lock_metapage(mp);
800 	folio_unlock(mp->folio);
801 }
802 
metapage_write_one(struct folio * folio)803 static int metapage_write_one(struct folio *folio)
804 {
805 	struct address_space *mapping = folio->mapping;
806 	struct writeback_control wbc = {
807 		.sync_mode = WB_SYNC_ALL,
808 		.nr_to_write = folio_nr_pages(folio),
809 	};
810 	int ret = 0;
811 
812 	BUG_ON(!folio_test_locked(folio));
813 
814 	folio_wait_writeback(folio);
815 
816 	if (folio_clear_dirty_for_io(folio)) {
817 		folio_get(folio);
818 		ret = metapage_write_folio(folio, &wbc);
819 		if (ret == 0)
820 			folio_wait_writeback(folio);
821 		folio_put(folio);
822 	} else {
823 		folio_unlock(folio);
824 	}
825 
826 	if (!ret)
827 		ret = filemap_check_errors(mapping);
828 	return ret;
829 }
830 
force_metapage(struct metapage * mp)831 void force_metapage(struct metapage *mp)
832 {
833 	struct folio *folio = mp->folio;
834 	jfs_info("force_metapage: mp = 0x%p", mp);
835 	set_bit(META_forcewrite, &mp->flag);
836 	clear_bit(META_sync, &mp->flag);
837 	folio_get(folio);
838 	folio_lock(folio);
839 	folio_mark_dirty(folio);
840 	if (metapage_write_one(folio))
841 		jfs_error(mp->sb, "metapage_write_one() failed\n");
842 	clear_bit(META_forcewrite, &mp->flag);
843 	folio_put(folio);
844 }
845 
hold_metapage(struct metapage * mp)846 void hold_metapage(struct metapage *mp)
847 {
848 	folio_lock(mp->folio);
849 }
850 
put_metapage(struct metapage * mp)851 void put_metapage(struct metapage *mp)
852 {
853 	if (mp->count || mp->nohomeok) {
854 		/* Someone else will release this */
855 		folio_unlock(mp->folio);
856 		return;
857 	}
858 	folio_get(mp->folio);
859 	mp->count++;
860 	lock_metapage(mp);
861 	folio_unlock(mp->folio);
862 	release_metapage(mp);
863 }
864 
release_metapage(struct metapage * mp)865 void release_metapage(struct metapage * mp)
866 {
867 	struct folio *folio = mp->folio;
868 	jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
869 
870 	folio_lock(folio);
871 	unlock_metapage(mp);
872 
873 	assert(mp->count);
874 	if (--mp->count || mp->nohomeok) {
875 		folio_unlock(folio);
876 		folio_put(folio);
877 		return;
878 	}
879 
880 	if (test_bit(META_dirty, &mp->flag)) {
881 		folio_mark_dirty(folio);
882 		if (test_bit(META_sync, &mp->flag)) {
883 			clear_bit(META_sync, &mp->flag);
884 			if (metapage_write_one(folio))
885 				jfs_error(mp->sb, "metapage_write_one() failed\n");
886 			folio_lock(folio);
887 		}
888 	} else if (mp->lsn)	/* discard_metapage doesn't remove it */
889 		remove_from_logsync(mp);
890 
891 	/* Try to keep metapages from using up too much memory */
892 	drop_metapage(folio, mp);
893 
894 	folio_unlock(folio);
895 	folio_put(folio);
896 }
897 
__invalidate_metapages(struct inode * ip,s64 addr,int len)898 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
899 {
900 	sector_t lblock;
901 	int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
902 	int BlocksPerPage = 1 << l2BlocksPerPage;
903 	/* All callers are interested in block device's mapping */
904 	struct address_space *mapping =
905 		JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
906 	struct metapage *mp;
907 	unsigned int offset;
908 
909 	/*
910 	 * Mark metapages to discard.  They will eventually be
911 	 * released, but should not be written.
912 	 */
913 	for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
914 	     lblock += BlocksPerPage) {
915 		struct folio *folio = filemap_lock_folio(mapping,
916 				lblock >> l2BlocksPerPage);
917 		if (IS_ERR(folio))
918 			continue;
919 		for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
920 			mp = folio_to_mp(folio, offset);
921 			if (!mp)
922 				continue;
923 			if (mp->index < addr)
924 				continue;
925 			if (mp->index >= addr + len)
926 				break;
927 
928 			clear_bit(META_dirty, &mp->flag);
929 			set_bit(META_discard, &mp->flag);
930 			if (mp->lsn)
931 				remove_from_logsync(mp);
932 		}
933 		folio_unlock(folio);
934 		folio_put(folio);
935 	}
936 }
937 
938 #ifdef CONFIG_JFS_STATISTICS
jfs_mpstat_proc_show(struct seq_file * m,void * v)939 int jfs_mpstat_proc_show(struct seq_file *m, void *v)
940 {
941 	seq_printf(m,
942 		       "JFS Metapage statistics\n"
943 		       "=======================\n"
944 		       "page allocations = %d\n"
945 		       "page frees = %d\n"
946 		       "lock waits = %d\n",
947 		       mpStat.pagealloc,
948 		       mpStat.pagefree,
949 		       mpStat.lockwait);
950 	return 0;
951 }
952 #endif
953