1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Compatibility functions which bloat the callers too much to make inline.
4 * All of the callers of these functions should be converted to use folios
5 * eventually.
6 */
7
8 #include <linux/migrate.h>
9 #include <linux/pagemap.h>
10 #include <linux/rmap.h>
11 #include <linux/swap.h>
12 #include "internal.h"
13
unlock_page(struct page * page)14 void unlock_page(struct page *page)
15 {
16 return folio_unlock(page_folio(page));
17 }
18 EXPORT_SYMBOL(unlock_page);
19
end_page_writeback(struct page * page)20 void end_page_writeback(struct page *page)
21 {
22 return folio_end_writeback(page_folio(page));
23 }
24 EXPORT_SYMBOL(end_page_writeback);
25
wait_on_page_writeback(struct page * page)26 void wait_on_page_writeback(struct page *page)
27 {
28 return folio_wait_writeback(page_folio(page));
29 }
30 EXPORT_SYMBOL_GPL(wait_on_page_writeback);
31
mark_page_accessed(struct page * page)32 void mark_page_accessed(struct page *page)
33 {
34 folio_mark_accessed(page_folio(page));
35 }
36 EXPORT_SYMBOL(mark_page_accessed);
37
set_page_writeback(struct page * page)38 void set_page_writeback(struct page *page)
39 {
40 folio_start_writeback(page_folio(page));
41 }
42 EXPORT_SYMBOL(set_page_writeback);
43
set_page_dirty(struct page * page)44 bool set_page_dirty(struct page *page)
45 {
46 return folio_mark_dirty(page_folio(page));
47 }
48 EXPORT_SYMBOL(set_page_dirty);
49
set_page_dirty_lock(struct page * page)50 int set_page_dirty_lock(struct page *page)
51 {
52 return folio_mark_dirty_lock(page_folio(page));
53 }
54 EXPORT_SYMBOL(set_page_dirty_lock);
55
clear_page_dirty_for_io(struct page * page)56 bool clear_page_dirty_for_io(struct page *page)
57 {
58 return folio_clear_dirty_for_io(page_folio(page));
59 }
60 EXPORT_SYMBOL(clear_page_dirty_for_io);
61
redirty_page_for_writepage(struct writeback_control * wbc,struct page * page)62 bool redirty_page_for_writepage(struct writeback_control *wbc,
63 struct page *page)
64 {
65 return folio_redirty_for_writepage(wbc, page_folio(page));
66 }
67 EXPORT_SYMBOL(redirty_page_for_writepage);
68
add_to_page_cache_lru(struct page * page,struct address_space * mapping,pgoff_t index,gfp_t gfp)69 int add_to_page_cache_lru(struct page *page, struct address_space *mapping,
70 pgoff_t index, gfp_t gfp)
71 {
72 return filemap_add_folio(mapping, page_folio(page), index, gfp);
73 }
74 EXPORT_SYMBOL(add_to_page_cache_lru);
75
76 noinline
pagecache_get_page(struct address_space * mapping,pgoff_t index,fgf_t fgp_flags,gfp_t gfp)77 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index,
78 fgf_t fgp_flags, gfp_t gfp)
79 {
80 struct folio *folio;
81
82 folio = __filemap_get_folio(mapping, index, fgp_flags, gfp);
83 if (IS_ERR(folio))
84 return NULL;
85 return folio_file_page(folio, index);
86 }
87 EXPORT_SYMBOL(pagecache_get_page);
88