1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) International Business Machines Corp., 2000-2005
4 * Portions Copyright (C) Christoph Hellwig, 2001-2002
5 */
6
7 #include <linux/blkdev.h>
8 #include <linux/fs.h>
9 #include <linux/mm.h>
10 #include <linux/module.h>
11 #include <linux/bio.h>
12 #include <linux/slab.h>
13 #include <linux/init.h>
14 #include <linux/buffer_head.h>
15 #include <linux/mempool.h>
16 #include <linux/seq_file.h>
17 #include <linux/writeback.h>
18 #include <linux/migrate.h>
19 #include "jfs_incore.h"
20 #include "jfs_superblock.h"
21 #include "jfs_filsys.h"
22 #include "jfs_metapage.h"
23 #include "jfs_txnmgr.h"
24 #include "jfs_debug.h"
25
26 #ifdef CONFIG_JFS_STATISTICS
27 static struct {
28 uint pagealloc; /* # of page allocations */
29 uint pagefree; /* # of page frees */
30 uint lockwait; /* # of sleeping lock_metapage() calls */
31 } mpStat;
32 #endif
33
34 #define metapage_locked(mp) test_bit(META_locked, &(mp)->flag)
35 #define trylock_metapage(mp) test_and_set_bit_lock(META_locked, &(mp)->flag)
36
unlock_metapage(struct metapage * mp)37 static inline void unlock_metapage(struct metapage *mp)
38 {
39 clear_bit_unlock(META_locked, &mp->flag);
40 wake_up(&mp->wait);
41 }
42
__lock_metapage(struct metapage * mp)43 static inline void __lock_metapage(struct metapage *mp)
44 {
45 DECLARE_WAITQUEUE(wait, current);
46 INCREMENT(mpStat.lockwait);
47 add_wait_queue_exclusive(&mp->wait, &wait);
48 do {
49 set_current_state(TASK_UNINTERRUPTIBLE);
50 if (metapage_locked(mp)) {
51 folio_unlock(mp->folio);
52 io_schedule();
53 folio_lock(mp->folio);
54 }
55 } while (trylock_metapage(mp));
56 __set_current_state(TASK_RUNNING);
57 remove_wait_queue(&mp->wait, &wait);
58 }
59
60 /*
61 * Must have mp->folio locked
62 */
lock_metapage(struct metapage * mp)63 static inline void lock_metapage(struct metapage *mp)
64 {
65 if (trylock_metapage(mp))
66 __lock_metapage(mp);
67 }
68
69 #define METAPOOL_MIN_PAGES 32
70 static struct kmem_cache *metapage_cache;
71 static mempool_t *metapage_mempool;
72
73 #define MPS_PER_PAGE (PAGE_SIZE >> L2PSIZE)
74
75 #if MPS_PER_PAGE > 1
76
77 struct meta_anchor {
78 int mp_count;
79 atomic_t io_count;
80 blk_status_t status;
81 struct metapage *mp[MPS_PER_PAGE];
82 };
83
folio_to_mp(struct folio * folio,int offset)84 static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
85 {
86 struct meta_anchor *anchor = folio->private;
87
88 if (!anchor)
89 return NULL;
90 return anchor->mp[offset >> L2PSIZE];
91 }
92
insert_metapage(struct folio * folio,struct metapage * mp)93 static inline int insert_metapage(struct folio *folio, struct metapage *mp)
94 {
95 struct meta_anchor *a;
96 int index;
97 int l2mp_blocks; /* log2 blocks per metapage */
98
99 a = folio->private;
100 if (!a) {
101 a = kzalloc_obj(struct meta_anchor, GFP_NOFS);
102 if (!a)
103 return -ENOMEM;
104 folio_attach_private(folio, a);
105 kmap(&folio->page);
106 }
107
108 if (mp) {
109 l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
110 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
111 a->mp_count++;
112 a->mp[index] = mp;
113 }
114
115 return 0;
116 }
117
remove_metapage(struct folio * folio,struct metapage * mp)118 static inline void remove_metapage(struct folio *folio, struct metapage *mp)
119 {
120 struct meta_anchor *a = folio->private;
121 int l2mp_blocks = L2PSIZE - folio->mapping->host->i_blkbits;
122 int index;
123
124 index = (mp->index >> l2mp_blocks) & (MPS_PER_PAGE - 1);
125
126 BUG_ON(a->mp[index] != mp);
127
128 a->mp[index] = NULL;
129 if (--a->mp_count == 0) {
130 kfree(a);
131 folio_detach_private(folio);
132 kunmap(&folio->page);
133 }
134 }
135
inc_io(struct folio * folio)136 static inline void inc_io(struct folio *folio)
137 {
138 struct meta_anchor *anchor = folio->private;
139
140 atomic_inc(&anchor->io_count);
141 }
142
dec_io(struct folio * folio,blk_status_t status,void (* handler)(struct folio *,blk_status_t))143 static inline void dec_io(struct folio *folio, blk_status_t status,
144 void (*handler)(struct folio *, blk_status_t))
145 {
146 struct meta_anchor *anchor = folio->private;
147
148 if (anchor->status == BLK_STS_OK)
149 anchor->status = status;
150
151 if (atomic_dec_and_test(&anchor->io_count))
152 handler(folio, anchor->status);
153 }
154
155 #ifdef CONFIG_MIGRATION
__metapage_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)156 static int __metapage_migrate_folio(struct address_space *mapping,
157 struct folio *dst, struct folio *src,
158 enum migrate_mode mode)
159 {
160 struct meta_anchor *src_anchor = src->private;
161 struct metapage *mps[MPS_PER_PAGE] = {0};
162 struct metapage *mp;
163 int i, rc;
164
165 for (i = 0; i < MPS_PER_PAGE; i++) {
166 mp = src_anchor->mp[i];
167 if (mp && metapage_locked(mp))
168 return -EAGAIN;
169 }
170
171 rc = filemap_migrate_folio(mapping, dst, src, mode);
172 if (rc)
173 return rc;
174
175 for (i = 0; i < MPS_PER_PAGE; i++) {
176 mp = src_anchor->mp[i];
177 if (!mp)
178 continue;
179 if (unlikely(insert_metapage(dst, mp))) {
180 /* If error, roll-back previosly inserted pages */
181 for (int j = 0 ; j < i; j++) {
182 if (mps[j])
183 remove_metapage(dst, mps[j]);
184 }
185 return -EAGAIN;
186 }
187 mps[i] = mp;
188 }
189
190 /* Update the metapage and remove it from src */
191 for (i = 0; i < MPS_PER_PAGE; i++) {
192 mp = mps[i];
193 if (mp) {
194 int page_offset = mp->data - folio_address(src);
195
196 mp->data = folio_address(dst) + page_offset;
197 mp->folio = dst;
198 remove_metapage(src, mp);
199 }
200 }
201
202 return 0;
203 }
204 #endif /* CONFIG_MIGRATION */
205
206 #else
207
folio_to_mp(struct folio * folio,int offset)208 static inline struct metapage *folio_to_mp(struct folio *folio, int offset)
209 {
210 return folio->private;
211 }
212
insert_metapage(struct folio * folio,struct metapage * mp)213 static inline int insert_metapage(struct folio *folio, struct metapage *mp)
214 {
215 if (mp) {
216 folio_attach_private(folio, mp);
217 kmap(&folio->page);
218 }
219 return 0;
220 }
221
remove_metapage(struct folio * folio,struct metapage * mp)222 static inline void remove_metapage(struct folio *folio, struct metapage *mp)
223 {
224 folio_detach_private(folio);
225 kunmap(&folio->page);
226 }
227
228 #define inc_io(folio) do {} while(0)
229 #define dec_io(folio, status, handler) handler(folio, status)
230
231 #ifdef CONFIG_MIGRATION
__metapage_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)232 static int __metapage_migrate_folio(struct address_space *mapping,
233 struct folio *dst, struct folio *src,
234 enum migrate_mode mode)
235 {
236 struct metapage *mp;
237 int page_offset;
238 int rc;
239
240 mp = folio_to_mp(src, 0);
241 if (metapage_locked(mp))
242 return -EAGAIN;
243
244 rc = filemap_migrate_folio(mapping, dst, src, mode);
245 if (rc)
246 return rc;
247
248 if (unlikely(insert_metapage(dst, mp)))
249 return -EAGAIN;
250
251 page_offset = mp->data - folio_address(src);
252 mp->data = folio_address(dst) + page_offset;
253 mp->folio = dst;
254 remove_metapage(src, mp);
255
256 return 0;
257 }
258 #endif /* CONFIG_MIGRATION */
259
260 #endif
261
alloc_metapage(gfp_t gfp_mask)262 static inline struct metapage *alloc_metapage(gfp_t gfp_mask)
263 {
264 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask);
265
266 if (mp) {
267 mp->lid = 0;
268 mp->lsn = 0;
269 mp->data = NULL;
270 mp->clsn = 0;
271 mp->log = NULL;
272 init_waitqueue_head(&mp->wait);
273 INIT_LIST_HEAD(&mp->synclist);
274 }
275 return mp;
276 }
277
free_metapage(struct metapage * mp)278 static inline void free_metapage(struct metapage *mp)
279 {
280 mempool_free(mp, metapage_mempool);
281 }
282
metapage_init(void)283 int __init metapage_init(void)
284 {
285 /*
286 * Allocate the metapage structures
287 */
288 metapage_cache = kmem_cache_create("jfs_mp", sizeof(struct metapage),
289 0, 0, NULL);
290 if (metapage_cache == NULL)
291 return -ENOMEM;
292
293 metapage_mempool = mempool_create_slab_pool(METAPOOL_MIN_PAGES,
294 metapage_cache);
295
296 if (metapage_mempool == NULL) {
297 kmem_cache_destroy(metapage_cache);
298 return -ENOMEM;
299 }
300
301 return 0;
302 }
303
metapage_exit(void)304 void metapage_exit(void)
305 {
306 mempool_destroy(metapage_mempool);
307 kmem_cache_destroy(metapage_cache);
308 }
309
drop_metapage(struct folio * folio,struct metapage * mp)310 static inline void drop_metapage(struct folio *folio, struct metapage *mp)
311 {
312 if (mp->count || mp->nohomeok || test_bit(META_dirty, &mp->flag) ||
313 test_bit(META_io, &mp->flag))
314 return;
315 remove_metapage(folio, mp);
316 INCREMENT(mpStat.pagefree);
317 free_metapage(mp);
318 }
319
320 /*
321 * Metapage address space operations
322 */
323
metapage_get_blocks(struct inode * inode,sector_t lblock,int * len)324 static sector_t metapage_get_blocks(struct inode *inode, sector_t lblock,
325 int *len)
326 {
327 int rc = 0;
328 int xflag;
329 s64 xaddr;
330 sector_t file_blocks = (inode->i_size + inode->i_sb->s_blocksize - 1) >>
331 inode->i_blkbits;
332
333 if (lblock >= file_blocks)
334 return 0;
335 if (lblock + *len > file_blocks)
336 *len = file_blocks - lblock;
337
338 if (inode->i_ino) {
339 rc = xtLookup(inode, (s64)lblock, *len, &xflag, &xaddr, len, 0);
340 if ((rc == 0) && *len)
341 lblock = (sector_t)xaddr;
342 else
343 lblock = 0;
344 } /* else no mapping */
345
346 return lblock;
347 }
348
last_read_complete(struct folio * folio,blk_status_t status)349 static void last_read_complete(struct folio *folio, blk_status_t status)
350 {
351 if (status)
352 printk(KERN_ERR "Read error %d at %#llx\n", status,
353 folio_pos(folio));
354
355 folio_end_read(folio, status == 0);
356 }
357
metapage_read_end_io(struct bio * bio)358 static void metapage_read_end_io(struct bio *bio)
359 {
360 struct folio *folio = bio->bi_private;
361
362 dec_io(folio, bio->bi_status, last_read_complete);
363 bio_put(bio);
364 }
365
remove_from_logsync(struct metapage * mp)366 static void remove_from_logsync(struct metapage *mp)
367 {
368 struct jfs_log *log = mp->log;
369 unsigned long flags;
370 /*
371 * This can race. Recheck that log hasn't been set to null, and after
372 * acquiring logsync lock, recheck lsn
373 */
374 if (!log)
375 return;
376
377 LOGSYNC_LOCK(log, flags);
378 if (mp->lsn) {
379 mp->log = NULL;
380 mp->lsn = 0;
381 mp->clsn = 0;
382 log->count--;
383 list_del_init(&mp->synclist);
384 }
385 LOGSYNC_UNLOCK(log, flags);
386 }
387
last_write_complete(struct folio * folio,blk_status_t status)388 static void last_write_complete(struct folio *folio, blk_status_t status)
389 {
390 struct metapage *mp;
391 unsigned int offset;
392
393 if (status) {
394 int err = blk_status_to_errno(status);
395 printk(KERN_ERR "metapage_write_end_io: I/O error\n");
396 mapping_set_error(folio->mapping, err);
397 }
398
399 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
400 mp = folio_to_mp(folio, offset);
401 if (mp && test_bit(META_io, &mp->flag)) {
402 if (mp->lsn)
403 remove_from_logsync(mp);
404 clear_bit(META_io, &mp->flag);
405 }
406 /*
407 * I'd like to call drop_metapage here, but I don't think it's
408 * safe unless I have the page locked
409 */
410 }
411 folio_end_writeback(folio);
412 }
413
metapage_write_end_io(struct bio * bio)414 static void metapage_write_end_io(struct bio *bio)
415 {
416 struct folio *folio = bio->bi_private;
417
418 BUG_ON(!folio->private);
419
420 dec_io(folio, bio->bi_status, last_write_complete);
421 bio_put(bio);
422 }
423
metapage_write_folio(struct folio * folio,struct writeback_control * wbc)424 static int metapage_write_folio(struct folio *folio,
425 struct writeback_control *wbc)
426 {
427 struct bio *bio = NULL;
428 int block_offset; /* block offset of mp within page */
429 struct inode *inode = folio->mapping->host;
430 int blocks_per_mp = JFS_SBI(inode->i_sb)->nbperpage;
431 int len;
432 int xlen;
433 struct metapage *mp;
434 int redirty = 0;
435 sector_t lblock;
436 int nr_underway = 0;
437 sector_t pblock;
438 sector_t next_block = 0;
439 sector_t page_start;
440 unsigned long bio_bytes = 0;
441 unsigned long bio_offset = 0;
442 int offset;
443 int bad_blocks = 0;
444
445 page_start = folio_pos(folio) >> inode->i_blkbits;
446 BUG_ON(!folio_test_locked(folio));
447 BUG_ON(folio_test_writeback(folio));
448 folio_start_writeback(folio);
449
450 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
451 mp = folio_to_mp(folio, offset);
452
453 if (!mp || !test_bit(META_dirty, &mp->flag))
454 continue;
455
456 if (mp->nohomeok && !test_bit(META_forcewrite, &mp->flag)) {
457 redirty = 1;
458 /*
459 * Make sure this page isn't blocked indefinitely.
460 * If the journal isn't undergoing I/O, push it
461 */
462 if (mp->log && !(mp->log->cflag & logGC_PAGEOUT))
463 jfs_flush_journal(mp->log, 0);
464 continue;
465 }
466
467 clear_bit(META_dirty, &mp->flag);
468 set_bit(META_io, &mp->flag);
469 block_offset = offset >> inode->i_blkbits;
470 lblock = page_start + block_offset;
471 if (bio) {
472 if (xlen && lblock == next_block) {
473 /* Contiguous, in memory & on disk */
474 len = min(xlen, blocks_per_mp);
475 xlen -= len;
476 bio_bytes += len << inode->i_blkbits;
477 continue;
478 }
479 /* Not contiguous */
480 bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
481 /*
482 * Increment counter before submitting i/o to keep
483 * count from hitting zero before we're through
484 */
485 inc_io(folio);
486 if (!bio->bi_iter.bi_size)
487 goto dump_bio;
488 submit_bio(bio);
489 nr_underway++;
490 bio = NULL;
491 } else
492 inc_io(folio);
493 xlen = (folio_size(folio) - offset) >> inode->i_blkbits;
494 pblock = metapage_get_blocks(inode, lblock, &xlen);
495 if (!pblock) {
496 printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
497 /*
498 * We already called inc_io(), but can't cancel it
499 * with dec_io() until we're done with the page
500 */
501 bad_blocks++;
502 continue;
503 }
504 len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
505
506 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_WRITE, GFP_NOFS);
507 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
508 bio->bi_end_io = metapage_write_end_io;
509 bio->bi_private = folio;
510
511 /* Don't call bio_add_page yet, we may add to this vec */
512 bio_offset = offset;
513 bio_bytes = len << inode->i_blkbits;
514
515 xlen -= len;
516 next_block = lblock + len;
517 }
518 if (bio) {
519 bio_add_folio_nofail(bio, folio, bio_bytes, bio_offset);
520 if (!bio->bi_iter.bi_size)
521 goto dump_bio;
522
523 submit_bio(bio);
524 nr_underway++;
525 }
526 if (redirty)
527 folio_redirty_for_writepage(wbc, folio);
528
529 folio_unlock(folio);
530
531 if (bad_blocks)
532 goto err_out;
533
534 if (nr_underway == 0)
535 folio_end_writeback(folio);
536
537 return 0;
538 dump_bio:
539 print_hex_dump(KERN_ERR, "JFS: dump of bio: ", DUMP_PREFIX_ADDRESS, 16,
540 4, bio, sizeof(*bio), 0);
541 bio_put(bio);
542 folio_unlock(folio);
543 dec_io(folio, BLK_STS_OK, last_write_complete);
544 err_out:
545 while (bad_blocks--)
546 dec_io(folio, BLK_STS_OK, last_write_complete);
547 return -EIO;
548 }
549
metapage_writepages(struct address_space * mapping,struct writeback_control * wbc)550 static int metapage_writepages(struct address_space *mapping,
551 struct writeback_control *wbc)
552 {
553 struct blk_plug plug;
554 struct folio *folio = NULL;
555 int err;
556
557 blk_start_plug(&plug);
558 while ((folio = writeback_iter(mapping, wbc, folio, &err)))
559 err = metapage_write_folio(folio, wbc);
560 blk_finish_plug(&plug);
561
562 return err;
563 }
564
metapage_read_folio(struct file * fp,struct folio * folio)565 static int metapage_read_folio(struct file *fp, struct folio *folio)
566 {
567 struct inode *inode = folio->mapping->host;
568 struct bio *bio = NULL;
569 int block_offset;
570 int blocks_per_page = i_blocks_per_folio(inode, folio);
571 sector_t page_start; /* address of page in fs blocks */
572 sector_t pblock;
573 int xlen;
574 unsigned int len;
575 int offset;
576
577 BUG_ON(!folio_test_locked(folio));
578 page_start = folio_pos(folio) >> inode->i_blkbits;
579
580 block_offset = 0;
581 while (block_offset < blocks_per_page) {
582 xlen = blocks_per_page - block_offset;
583 pblock = metapage_get_blocks(inode, page_start + block_offset,
584 &xlen);
585 if (pblock) {
586 if (!folio->private)
587 insert_metapage(folio, NULL);
588 inc_io(folio);
589 if (bio)
590 submit_bio(bio);
591
592 bio = bio_alloc(inode->i_sb->s_bdev, 1, REQ_OP_READ,
593 GFP_NOFS);
594 bio->bi_iter.bi_sector =
595 pblock << (inode->i_blkbits - 9);
596 bio->bi_end_io = metapage_read_end_io;
597 bio->bi_private = folio;
598 len = xlen << inode->i_blkbits;
599 offset = block_offset << inode->i_blkbits;
600 bio_add_folio_nofail(bio, folio, len, offset);
601 block_offset += xlen;
602 } else
603 block_offset++;
604 }
605 if (bio)
606 submit_bio(bio);
607 else
608 folio_unlock(folio);
609
610 return 0;
611 }
612
metapage_release_folio(struct folio * folio,gfp_t gfp_mask)613 static bool metapage_release_folio(struct folio *folio, gfp_t gfp_mask)
614 {
615 struct metapage *mp;
616 bool ret = true;
617 int offset;
618
619 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
620 mp = folio_to_mp(folio, offset);
621
622 if (!mp)
623 continue;
624
625 jfs_info("metapage_release_folio: mp = 0x%p", mp);
626 if (mp->count || mp->nohomeok ||
627 test_bit(META_dirty, &mp->flag)) {
628 jfs_info("count = %ld, nohomeok = %d", mp->count,
629 mp->nohomeok);
630 ret = false;
631 continue;
632 }
633 if (mp->lsn)
634 remove_from_logsync(mp);
635 remove_metapage(folio, mp);
636 INCREMENT(mpStat.pagefree);
637 free_metapage(mp);
638 }
639 return ret;
640 }
641
642 #ifdef CONFIG_MIGRATION
643 /*
644 * metapage_migrate_folio - Migration function for JFS metapages
645 */
metapage_migrate_folio(struct address_space * mapping,struct folio * dst,struct folio * src,enum migrate_mode mode)646 static int metapage_migrate_folio(struct address_space *mapping,
647 struct folio *dst, struct folio *src,
648 enum migrate_mode mode)
649 {
650 int expected_count;
651
652 if (!src->private)
653 return filemap_migrate_folio(mapping, dst, src, mode);
654
655 /* Check whether page does not have extra refs before we do more work */
656 expected_count = folio_expected_ref_count(src) + 1;
657 if (folio_ref_count(src) != expected_count)
658 return -EAGAIN;
659 return __metapage_migrate_folio(mapping, dst, src, mode);
660 }
661 #else
662 #define metapage_migrate_folio NULL
663 #endif /* CONFIG_MIGRATION */
664
metapage_invalidate_folio(struct folio * folio,size_t offset,size_t length)665 static void metapage_invalidate_folio(struct folio *folio, size_t offset,
666 size_t length)
667 {
668 BUG_ON(offset || length < folio_size(folio));
669
670 BUG_ON(folio_test_writeback(folio));
671
672 metapage_release_folio(folio, 0);
673 }
674
675 const struct address_space_operations jfs_metapage_aops = {
676 .read_folio = metapage_read_folio,
677 .writepages = metapage_writepages,
678 .release_folio = metapage_release_folio,
679 .invalidate_folio = metapage_invalidate_folio,
680 .dirty_folio = filemap_dirty_folio,
681 .migrate_folio = metapage_migrate_folio,
682 };
683
__get_metapage(struct inode * inode,unsigned long lblock,unsigned int size,int absolute,unsigned long new)684 struct metapage *__get_metapage(struct inode *inode, unsigned long lblock,
685 unsigned int size, int absolute,
686 unsigned long new)
687 {
688 int l2BlocksPerPage;
689 int l2bsize;
690 struct address_space *mapping;
691 struct metapage *mp = NULL;
692 struct folio *folio;
693 unsigned long page_index;
694 unsigned long page_offset;
695
696 jfs_info("__get_metapage: ino = %llu, lblock = 0x%lx, abs=%d",
697 inode->i_ino, lblock, absolute);
698
699 l2bsize = inode->i_blkbits;
700 l2BlocksPerPage = PAGE_SHIFT - l2bsize;
701 page_index = lblock >> l2BlocksPerPage;
702 page_offset = (lblock - (page_index << l2BlocksPerPage)) << l2bsize;
703 if ((page_offset + size) > PAGE_SIZE) {
704 jfs_err("MetaData crosses page boundary!!");
705 jfs_err("lblock = %lx, size = %d", lblock, size);
706 dump_stack();
707 return NULL;
708 }
709 if (absolute)
710 mapping = JFS_SBI(inode->i_sb)->direct_inode->i_mapping;
711 else {
712 /*
713 * If an nfs client tries to read an inode that is larger
714 * than any existing inodes, we may try to read past the
715 * end of the inode map
716 */
717 if ((lblock << inode->i_blkbits) >= inode->i_size)
718 return NULL;
719 mapping = inode->i_mapping;
720 }
721
722 if (new && (PSIZE == PAGE_SIZE)) {
723 folio = filemap_grab_folio(mapping, page_index);
724 if (IS_ERR(folio)) {
725 jfs_err("filemap_grab_folio failed!");
726 return NULL;
727 }
728 folio_mark_uptodate(folio);
729 } else {
730 folio = read_mapping_folio(mapping, page_index, NULL);
731 if (IS_ERR(folio)) {
732 jfs_err("read_mapping_page failed!");
733 return NULL;
734 }
735 folio_lock(folio);
736 }
737
738 mp = folio_to_mp(folio, page_offset);
739 if (mp) {
740 if (mp->logical_size != size) {
741 jfs_error(inode->i_sb,
742 "get_mp->logical_size != size\n");
743 jfs_err("logical_size = %d, size = %d",
744 mp->logical_size, size);
745 dump_stack();
746 goto unlock;
747 }
748 mp->count++;
749 lock_metapage(mp);
750 if (test_bit(META_discard, &mp->flag)) {
751 if (!new) {
752 jfs_error(inode->i_sb,
753 "using a discarded metapage\n");
754 discard_metapage(mp);
755 goto unlock;
756 }
757 clear_bit(META_discard, &mp->flag);
758 }
759 } else {
760 INCREMENT(mpStat.pagealloc);
761 mp = alloc_metapage(GFP_NOFS);
762 if (!mp)
763 goto unlock;
764 mp->folio = folio;
765 mp->sb = inode->i_sb;
766 mp->flag = 0;
767 mp->xflag = COMMIT_PAGE;
768 mp->count = 1;
769 mp->nohomeok = 0;
770 mp->logical_size = size;
771 mp->data = folio_address(folio) + page_offset;
772 mp->index = lblock;
773 if (unlikely(insert_metapage(folio, mp))) {
774 free_metapage(mp);
775 goto unlock;
776 }
777 lock_metapage(mp);
778 }
779
780 if (new) {
781 jfs_info("zeroing mp = 0x%p", mp);
782 memset(mp->data, 0, PSIZE);
783 }
784
785 folio_unlock(folio);
786 jfs_info("__get_metapage: returning = 0x%p data = 0x%p", mp, mp->data);
787 return mp;
788
789 unlock:
790 folio_unlock(folio);
791 return NULL;
792 }
793
grab_metapage(struct metapage * mp)794 void grab_metapage(struct metapage * mp)
795 {
796 jfs_info("grab_metapage: mp = 0x%p", mp);
797 folio_get(mp->folio);
798 folio_lock(mp->folio);
799 mp->count++;
800 lock_metapage(mp);
801 folio_unlock(mp->folio);
802 }
803
metapage_write_one(struct folio * folio)804 static int metapage_write_one(struct folio *folio)
805 {
806 struct address_space *mapping = folio->mapping;
807 struct writeback_control wbc = {
808 .sync_mode = WB_SYNC_ALL,
809 .nr_to_write = folio_nr_pages(folio),
810 };
811 int ret = 0;
812
813 BUG_ON(!folio_test_locked(folio));
814
815 folio_wait_writeback(folio);
816
817 if (folio_clear_dirty_for_io(folio)) {
818 folio_get(folio);
819 ret = metapage_write_folio(folio, &wbc);
820 if (ret == 0)
821 folio_wait_writeback(folio);
822 folio_put(folio);
823 } else {
824 folio_unlock(folio);
825 }
826
827 if (!ret)
828 ret = filemap_check_errors(mapping);
829 return ret;
830 }
831
force_metapage(struct metapage * mp)832 void force_metapage(struct metapage *mp)
833 {
834 struct folio *folio = mp->folio;
835 jfs_info("force_metapage: mp = 0x%p", mp);
836 set_bit(META_forcewrite, &mp->flag);
837 clear_bit(META_sync, &mp->flag);
838 folio_get(folio);
839 folio_lock(folio);
840 folio_mark_dirty(folio);
841 if (metapage_write_one(folio))
842 jfs_error(mp->sb, "metapage_write_one() failed\n");
843 clear_bit(META_forcewrite, &mp->flag);
844 folio_put(folio);
845 }
846
hold_metapage(struct metapage * mp)847 void hold_metapage(struct metapage *mp)
848 {
849 folio_lock(mp->folio);
850 }
851
put_metapage(struct metapage * mp)852 void put_metapage(struct metapage *mp)
853 {
854 if (mp->count || mp->nohomeok) {
855 /* Someone else will release this */
856 folio_unlock(mp->folio);
857 return;
858 }
859 folio_get(mp->folio);
860 mp->count++;
861 lock_metapage(mp);
862 folio_unlock(mp->folio);
863 release_metapage(mp);
864 }
865
release_metapage(struct metapage * mp)866 void release_metapage(struct metapage * mp)
867 {
868 struct folio *folio = mp->folio;
869 jfs_info("release_metapage: mp = 0x%p, flag = 0x%lx", mp, mp->flag);
870
871 folio_lock(folio);
872 unlock_metapage(mp);
873
874 assert(mp->count);
875 if (--mp->count || mp->nohomeok) {
876 folio_unlock(folio);
877 folio_put(folio);
878 return;
879 }
880
881 if (test_bit(META_dirty, &mp->flag)) {
882 folio_mark_dirty(folio);
883 if (test_bit(META_sync, &mp->flag)) {
884 clear_bit(META_sync, &mp->flag);
885 if (metapage_write_one(folio))
886 jfs_error(mp->sb, "metapage_write_one() failed\n");
887 folio_lock(folio);
888 }
889 } else if (mp->lsn) /* discard_metapage doesn't remove it */
890 remove_from_logsync(mp);
891
892 /* Try to keep metapages from using up too much memory */
893 drop_metapage(folio, mp);
894
895 folio_unlock(folio);
896 folio_put(folio);
897 }
898
__invalidate_metapages(struct inode * ip,s64 addr,int len)899 void __invalidate_metapages(struct inode *ip, s64 addr, int len)
900 {
901 sector_t lblock;
902 int l2BlocksPerPage = PAGE_SHIFT - ip->i_blkbits;
903 int BlocksPerPage = 1 << l2BlocksPerPage;
904 /* All callers are interested in block device's mapping */
905 struct address_space *mapping =
906 JFS_SBI(ip->i_sb)->direct_inode->i_mapping;
907 struct metapage *mp;
908 unsigned int offset;
909
910 /*
911 * Mark metapages to discard. They will eventually be
912 * released, but should not be written.
913 */
914 for (lblock = addr & ~(BlocksPerPage - 1); lblock < addr + len;
915 lblock += BlocksPerPage) {
916 struct folio *folio = filemap_lock_folio(mapping,
917 lblock >> l2BlocksPerPage);
918 if (IS_ERR(folio))
919 continue;
920 for (offset = 0; offset < PAGE_SIZE; offset += PSIZE) {
921 mp = folio_to_mp(folio, offset);
922 if (!mp)
923 continue;
924 if (mp->index < addr)
925 continue;
926 if (mp->index >= addr + len)
927 break;
928
929 clear_bit(META_dirty, &mp->flag);
930 set_bit(META_discard, &mp->flag);
931 if (mp->lsn)
932 remove_from_logsync(mp);
933 }
934 folio_unlock(folio);
935 folio_put(folio);
936 }
937 }
938
939 #ifdef CONFIG_JFS_STATISTICS
jfs_mpstat_proc_show(struct seq_file * m,void * v)940 int jfs_mpstat_proc_show(struct seq_file *m, void *v)
941 {
942 seq_printf(m,
943 "JFS Metapage statistics\n"
944 "=======================\n"
945 "page allocations = %d\n"
946 "page frees = %d\n"
947 "lock waits = %d\n",
948 mpStat.pagealloc,
949 mpStat.pagefree,
950 mpStat.lockwait);
951 return 0;
952 }
953 #endif
954