Lines Matching full:page

30  * Regular page slots are stabilized by the page lock even without the tree
83 struct page *page = pvec->pages[i]; in truncate_exceptional_pvec_entries() local
86 if (!xa_is_value(page)) { in truncate_exceptional_pvec_entries()
87 pvec->pages[j++] = page; in truncate_exceptional_pvec_entries()
99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries()
138 * do_invalidatepage - invalidate part or all of a page
139 * @page: the page which is affected
143 * do_invalidatepage() is called when all or part of the page has become
152 void do_invalidatepage(struct page *page, unsigned int offset, in do_invalidatepage() argument
155 void (*invalidatepage)(struct page *, unsigned int, unsigned int); in do_invalidatepage()
157 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
163 (*invalidatepage)(page, offset, length); in do_invalidatepage()
167 * If truncate cannot remove the fs-private metadata from the page, the page
171 * We need to bail out if page->mapping is no longer equal to the original
172 * mapping. This happens a) when the VM reclaimed the page while we waited on
174 * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
177 truncate_cleanup_page(struct address_space *mapping, struct page *page) in truncate_cleanup_page() argument
179 if (page_mapped(page)) { in truncate_cleanup_page()
180 unsigned int nr = thp_nr_pages(page); in truncate_cleanup_page()
181 unmap_mapping_pages(mapping, page->index, nr, false); in truncate_cleanup_page()
184 if (page_has_private(page)) in truncate_cleanup_page()
185 do_invalidatepage(page, 0, thp_size(page)); in truncate_cleanup_page()
188 * Some filesystems seem to re-dirty the page even after in truncate_cleanup_page()
192 cancel_dirty_page(page); in truncate_cleanup_page()
193 ClearPageMappedToDisk(page); in truncate_cleanup_page()
202 * Returns non-zero if the page was successfully invalidated.
205 invalidate_complete_page(struct address_space *mapping, struct page *page) in invalidate_complete_page() argument
209 if (page->mapping != mapping) in invalidate_complete_page()
212 if (page_has_private(page) && !try_to_release_page(page, 0)) in invalidate_complete_page()
215 ret = remove_mapping(mapping, page); in invalidate_complete_page()
220 int truncate_inode_page(struct address_space *mapping, struct page *page) in truncate_inode_page() argument
222 VM_BUG_ON_PAGE(PageTail(page), page); in truncate_inode_page()
224 if (page->mapping != mapping) in truncate_inode_page()
227 truncate_cleanup_page(mapping, page); in truncate_inode_page()
228 delete_from_page_cache(page); in truncate_inode_page()
235 int generic_error_remove_page(struct address_space *mapping, struct page *page) in generic_error_remove_page() argument
245 return truncate_inode_page(mapping, page); in generic_error_remove_page()
250 * Safely invalidate one page from its pagecache mapping.
251 * It only drops clean, unused pages. The page must be locked.
253 * Returns 1 if the page is successfully invalidated, otherwise 0.
255 int invalidate_inode_page(struct page *page) in invalidate_inode_page() argument
257 struct address_space *mapping = page_mapping(page); in invalidate_inode_page()
260 if (PageDirty(page) || PageWriteback(page)) in invalidate_inode_page()
262 if (page_mapped(page)) in invalidate_inode_page()
264 return invalidate_complete_page(mapping, page); in invalidate_inode_page()
273 * Truncate the page cache, removing the pages that are between
275 * if lstart or lend + 1 is not page aligned).
278 * block on page locks and it will not block on writeback. The second pass
283 * We pass down the cache-hot hint to the page freeing code. Even if the
289 * page aligned properly.
341 struct page *page = pvec.pages[i]; in truncate_inode_pages_range() local
343 /* We rely upon deletion not changing page->index */ in truncate_inode_pages_range()
348 if (xa_is_value(page)) in truncate_inode_pages_range()
351 if (!trylock_page(page)) in truncate_inode_pages_range()
353 WARN_ON(page_to_index(page) != index); in truncate_inode_pages_range()
354 if (PageWriteback(page)) { in truncate_inode_pages_range()
355 unlock_page(page); in truncate_inode_pages_range()
358 if (page->mapping != mapping) { in truncate_inode_pages_range()
359 unlock_page(page); in truncate_inode_pages_range()
362 pagevec_add(&locked_pvec, page); in truncate_inode_pages_range()
375 struct page *page = find_lock_page(mapping, start - 1); in truncate_inode_pages_range() local
376 if (page) { in truncate_inode_pages_range()
379 /* Truncation within a single page */ in truncate_inode_pages_range()
383 wait_on_page_writeback(page); in truncate_inode_pages_range()
384 zero_user_segment(page, partial_start, top); in truncate_inode_pages_range()
385 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
386 if (page_has_private(page)) in truncate_inode_pages_range()
387 do_invalidatepage(page, partial_start, in truncate_inode_pages_range()
389 unlock_page(page); in truncate_inode_pages_range()
390 put_page(page); in truncate_inode_pages_range()
394 struct page *page = find_lock_page(mapping, end); in truncate_inode_pages_range() local
395 if (page) { in truncate_inode_pages_range()
396 wait_on_page_writeback(page); in truncate_inode_pages_range()
397 zero_user_segment(page, 0, partial_end); in truncate_inode_pages_range()
398 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
399 if (page_has_private(page)) in truncate_inode_pages_range()
400 do_invalidatepage(page, 0, in truncate_inode_pages_range()
402 unlock_page(page); in truncate_inode_pages_range()
403 put_page(page); in truncate_inode_pages_range()
407 * If the truncation happened within a single page no pages in truncate_inode_pages_range()
433 struct page *page = pvec.pages[i]; in truncate_inode_pages_range() local
435 /* We rely upon deletion not changing page->index */ in truncate_inode_pages_range()
443 if (xa_is_value(page)) in truncate_inode_pages_range()
446 lock_page(page); in truncate_inode_pages_range()
447 WARN_ON(page_to_index(page) != index); in truncate_inode_pages_range()
448 wait_on_page_writeback(page); in truncate_inode_pages_range()
449 truncate_inode_page(mapping, page); in truncate_inode_pages_range()
450 unlock_page(page); in truncate_inode_pages_range()
469 * Note: When this function returns, there can be a page in the process of
495 * Page reclaim can not participate in regular inode lifetime in truncate_inode_pages_final()
546 struct page *page = pvec.pages[i]; in __invalidate_mapping_pages() local
548 /* We rely upon deletion not changing page->index */ in __invalidate_mapping_pages()
553 if (xa_is_value(page)) { in __invalidate_mapping_pages()
555 page); in __invalidate_mapping_pages()
559 if (!trylock_page(page)) in __invalidate_mapping_pages()
562 WARN_ON(page_to_index(page) != index); in __invalidate_mapping_pages()
565 if (PageTransTail(page)) { in __invalidate_mapping_pages()
566 unlock_page(page); in __invalidate_mapping_pages()
568 } else if (PageTransHuge(page)) { in __invalidate_mapping_pages()
573 * invalidate the page as the part outside of in __invalidate_mapping_pages()
577 unlock_page(page); in __invalidate_mapping_pages()
582 get_page(page); in __invalidate_mapping_pages()
586 * the huge page. in __invalidate_mapping_pages()
592 ret = invalidate_inode_page(page); in __invalidate_mapping_pages()
593 unlock_page(page); in __invalidate_mapping_pages()
595 * Invalidation is a hint that the page is no longer in __invalidate_mapping_pages()
599 deactivate_file_page(page); in __invalidate_mapping_pages()
605 if (PageTransHuge(page)) in __invalidate_mapping_pages()
606 put_page(page); in __invalidate_mapping_pages()
651 * This is like invalidate_complete_page(), except it ignores the page's
658 invalidate_complete_page2(struct address_space *mapping, struct page *page) in invalidate_complete_page2() argument
662 if (page->mapping != mapping) in invalidate_complete_page2()
665 if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL)) in invalidate_complete_page2()
669 if (PageDirty(page)) in invalidate_complete_page2()
672 BUG_ON(page_has_private(page)); in invalidate_complete_page2()
673 __delete_from_page_cache(page, NULL); in invalidate_complete_page2()
677 mapping->a_ops->freepage(page); in invalidate_complete_page2()
679 put_page(page); /* pagecache ref */ in invalidate_complete_page2()
686 static int do_launder_page(struct address_space *mapping, struct page *page) in do_launder_page() argument
688 if (!PageDirty(page)) in do_launder_page()
690 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) in do_launder_page()
692 return mapping->a_ops->launder_page(page); in do_launder_page()
698 * @start: the page offset 'from' which to invalidate
699 * @end: the page offset 'to' which to invalidate (inclusive)
726 struct page *page = pvec.pages[i]; in invalidate_inode_pages2_range() local
728 /* We rely upon deletion not changing page->index */ in invalidate_inode_pages2_range()
733 if (xa_is_value(page)) { in invalidate_inode_pages2_range()
735 index, page)) in invalidate_inode_pages2_range()
740 lock_page(page); in invalidate_inode_pages2_range()
741 WARN_ON(page_to_index(page) != index); in invalidate_inode_pages2_range()
742 if (page->mapping != mapping) { in invalidate_inode_pages2_range()
743 unlock_page(page); in invalidate_inode_pages2_range()
746 wait_on_page_writeback(page); in invalidate_inode_pages2_range()
747 if (page_mapped(page)) { in invalidate_inode_pages2_range()
757 * Just zap this page in invalidate_inode_pages2_range()
763 BUG_ON(page_mapped(page)); in invalidate_inode_pages2_range()
764 ret2 = do_launder_page(mapping, page); in invalidate_inode_pages2_range()
766 if (!invalidate_complete_page2(mapping, page)) in invalidate_inode_pages2_range()
771 unlock_page(page); in invalidate_inode_pages2_range()
779 * For DAX we invalidate page tables after invalidating page cache. We in invalidate_inode_pages2_range()
780 * could invalidate page tables while invalidating each entry however in invalidate_inode_pages2_range()
782 * work as we have no cheap way to find whether page cache entry didn't in invalidate_inode_pages2_range()
821 * situations such as writepage being called for a page that has already
832 * single-page unmaps. However after this first call, and in truncate_pagecache()
875 * write starting after current i_size. We mark the page straddling current
877 * the page. This way filesystem can be sure that page_mkwrite() is called on
878 * the page before user writes to the page via mmap after the i_size has been
881 * The function must be called after i_size is updated so that page fault
882 * coming after we unlock the page will already see the new i_size.
891 struct page *page; in pagecache_isize_extended() local
898 /* Page straddling @from will not have any hole block created? */ in pagecache_isize_extended()
904 page = find_lock_page(inode->i_mapping, index); in pagecache_isize_extended()
905 /* Page not cached? Nothing to do */ in pagecache_isize_extended()
906 if (!page) in pagecache_isize_extended()
912 if (page_mkclean(page)) in pagecache_isize_extended()
913 set_page_dirty(page); in pagecache_isize_extended()
914 unlock_page(page); in pagecache_isize_extended()
915 put_page(page); in pagecache_isize_extended()
929 * situations such as writepage being called for a page that has already
941 * doing their own page rounding first. Note that unmap_mapping_range in truncate_pagecache_range()