Lines Matching full:mapping
34 static inline void __clear_shadow_entry(struct address_space *mapping, in __clear_shadow_entry() argument
37 XA_STATE(xas, &mapping->i_pages, index); in __clear_shadow_entry()
43 mapping->nrexceptional--; in __clear_shadow_entry()
46 static void clear_shadow_entry(struct address_space *mapping, pgoff_t index, in clear_shadow_entry() argument
49 xa_lock_irq(&mapping->i_pages); in clear_shadow_entry()
50 __clear_shadow_entry(mapping, index, entry); in clear_shadow_entry()
51 xa_unlock_irq(&mapping->i_pages); in clear_shadow_entry()
59 static void truncate_exceptional_pvec_entries(struct address_space *mapping, in truncate_exceptional_pvec_entries() argument
67 if (shmem_mapping(mapping)) in truncate_exceptional_pvec_entries()
77 dax = dax_mapping(mapping); in truncate_exceptional_pvec_entries()
80 xa_lock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries()
95 dax_delete_mapping_entry(mapping, index); in truncate_exceptional_pvec_entries()
99 __clear_shadow_entry(mapping, index, page); in truncate_exceptional_pvec_entries()
103 xa_unlock_irq(&mapping->i_pages); in truncate_exceptional_pvec_entries()
111 static int invalidate_exceptional_entry(struct address_space *mapping, in invalidate_exceptional_entry() argument
115 if (shmem_mapping(mapping) || dax_mapping(mapping)) in invalidate_exceptional_entry()
117 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry()
125 static int invalidate_exceptional_entry2(struct address_space *mapping, in invalidate_exceptional_entry2() argument
129 if (shmem_mapping(mapping)) in invalidate_exceptional_entry2()
131 if (dax_mapping(mapping)) in invalidate_exceptional_entry2()
132 return dax_invalidate_mapping_entry_sync(mapping, index); in invalidate_exceptional_entry2()
133 clear_shadow_entry(mapping, index, entry); in invalidate_exceptional_entry2()
157 invalidatepage = page->mapping->a_ops->invalidatepage; in do_invalidatepage()
171 * We need to bail out if page->mapping is no longer equal to the original
172 * mapping. This happens a) when the VM reclaimed the page while we waited on
177 truncate_cleanup_page(struct address_space *mapping, struct page *page) in truncate_cleanup_page() argument
181 unmap_mapping_pages(mapping, page->index, nr, false); in truncate_cleanup_page()
205 invalidate_complete_page(struct address_space *mapping, struct page *page) in invalidate_complete_page() argument
209 if (page->mapping != mapping) in invalidate_complete_page()
215 ret = remove_mapping(mapping, page); in invalidate_complete_page()
220 int truncate_inode_page(struct address_space *mapping, struct page *page) in truncate_inode_page() argument
224 if (page->mapping != mapping) in truncate_inode_page()
227 truncate_cleanup_page(mapping, page); in truncate_inode_page()
235 int generic_error_remove_page(struct address_space *mapping, struct page *page) in generic_error_remove_page() argument
237 if (!mapping) in generic_error_remove_page()
243 if (!S_ISREG(mapping->host->i_mode)) in generic_error_remove_page()
245 return truncate_inode_page(mapping, page); in generic_error_remove_page()
250 * Safely invalidate one page from its pagecache mapping.
257 struct address_space *mapping = page_mapping(page); in invalidate_inode_page() local
258 if (!mapping) in invalidate_inode_page()
264 return invalidate_complete_page(mapping, page); in invalidate_inode_page()
269 * @mapping: mapping to truncate
284 * mapping is large, it is probably the case that the final pages are the most
291 void truncate_inode_pages_range(struct address_space *mapping, in truncate_inode_pages_range() argument
303 if (mapping->nrpages == 0 && mapping->nrexceptional == 0) in truncate_inode_pages_range()
329 while (index < end && pagevec_lookup_entries(&pvec, mapping, index, in truncate_inode_pages_range()
358 if (page->mapping != mapping) { in truncate_inode_pages_range()
365 truncate_cleanup_page(mapping, locked_pvec.pages[i]); in truncate_inode_pages_range()
366 delete_from_page_cache_batch(mapping, &locked_pvec); in truncate_inode_pages_range()
369 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); in truncate_inode_pages_range()
375 struct page *page = find_lock_page(mapping, start - 1); in truncate_inode_pages_range()
385 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
394 struct page *page = find_lock_page(mapping, end); in truncate_inode_pages_range()
398 cleancache_invalidate_page(mapping, page); in truncate_inode_pages_range()
416 if (!pagevec_lookup_entries(&pvec, mapping, index, in truncate_inode_pages_range()
449 truncate_inode_page(mapping, page); in truncate_inode_pages_range()
452 truncate_exceptional_pvec_entries(mapping, &pvec, indices, end); in truncate_inode_pages_range()
458 cleancache_invalidate_inode(mapping); in truncate_inode_pages_range()
464 * @mapping: mapping to truncate
471 * mapping->nrpages can be non-zero when this function returns even after
472 * truncation of the whole mapping.
474 void truncate_inode_pages(struct address_space *mapping, loff_t lstart) in truncate_inode_pages() argument
476 truncate_inode_pages_range(mapping, lstart, (loff_t)-1); in truncate_inode_pages()
482 * @mapping: mapping to truncate
489 void truncate_inode_pages_final(struct address_space *mapping) in truncate_inode_pages_final() argument
501 mapping_set_exiting(mapping); in truncate_inode_pages_final()
508 nrpages = mapping->nrpages; in truncate_inode_pages_final()
510 nrexceptional = mapping->nrexceptional; in truncate_inode_pages_final()
519 xa_lock_irq(&mapping->i_pages); in truncate_inode_pages_final()
520 xa_unlock_irq(&mapping->i_pages); in truncate_inode_pages_final()
527 truncate_inode_pages(mapping, 0); in truncate_inode_pages_final()
531 static unsigned long __invalidate_mapping_pages(struct address_space *mapping, in __invalidate_mapping_pages() argument
542 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, in __invalidate_mapping_pages()
554 invalidate_exceptional_entry(mapping, index, in __invalidate_mapping_pages()
619 * @mapping: the address_space which holds the pages to invalidate
632 unsigned long invalidate_mapping_pages(struct address_space *mapping, in invalidate_mapping_pages() argument
635 return __invalidate_mapping_pages(mapping, start, end, NULL); in invalidate_mapping_pages()
644 void invalidate_mapping_pagevec(struct address_space *mapping, in invalidate_mapping_pagevec() argument
647 __invalidate_mapping_pages(mapping, start, end, nr_pagevec); in invalidate_mapping_pagevec()
658 invalidate_complete_page2(struct address_space *mapping, struct page *page) in invalidate_complete_page2() argument
662 if (page->mapping != mapping) in invalidate_complete_page2()
668 xa_lock_irqsave(&mapping->i_pages, flags); in invalidate_complete_page2()
674 xa_unlock_irqrestore(&mapping->i_pages, flags); in invalidate_complete_page2()
676 if (mapping->a_ops->freepage) in invalidate_complete_page2()
677 mapping->a_ops->freepage(page); in invalidate_complete_page2()
682 xa_unlock_irqrestore(&mapping->i_pages, flags); in invalidate_complete_page2()
686 static int do_launder_page(struct address_space *mapping, struct page *page) in do_launder_page() argument
690 if (page->mapping != mapping || mapping->a_ops->launder_page == NULL) in do_launder_page()
692 return mapping->a_ops->launder_page(page); in do_launder_page()
697 * @mapping: the address_space
706 int invalidate_inode_pages2_range(struct address_space *mapping, in invalidate_inode_pages2_range() argument
717 if (mapping->nrpages == 0 && mapping->nrexceptional == 0) in invalidate_inode_pages2_range()
722 while (index <= end && pagevec_lookup_entries(&pvec, mapping, index, in invalidate_inode_pages2_range()
734 if (!invalidate_exceptional_entry2(mapping, in invalidate_inode_pages2_range()
742 if (page->mapping != mapping) { in invalidate_inode_pages2_range()
752 unmap_mapping_pages(mapping, index, in invalidate_inode_pages2_range()
759 unmap_mapping_pages(mapping, index, in invalidate_inode_pages2_range()
764 ret2 = do_launder_page(mapping, page); in invalidate_inode_pages2_range()
766 if (!invalidate_complete_page2(mapping, page)) in invalidate_inode_pages2_range()
785 if (dax_mapping(mapping)) { in invalidate_inode_pages2_range()
786 unmap_mapping_pages(mapping, start, end - start + 1, false); in invalidate_inode_pages2_range()
789 cleancache_invalidate_inode(mapping); in invalidate_inode_pages2_range()
796 * @mapping: the address_space
803 int invalidate_inode_pages2(struct address_space *mapping) in invalidate_inode_pages2() argument
805 return invalidate_inode_pages2_range(mapping, 0, -1); in invalidate_inode_pages2()
826 struct address_space *mapping = inode->i_mapping; in truncate_pagecache() local
838 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
839 truncate_inode_pages(mapping, newsize); in truncate_pagecache()
840 unmap_mapping_range(mapping, holebegin, 0, 1); in truncate_pagecache()
934 struct address_space *mapping = inode->i_mapping; in truncate_pagecache_range() local
951 unmap_mapping_range(mapping, unmap_start, in truncate_pagecache_range()
953 truncate_inode_pages_range(mapping, lstart, lend); in truncate_pagecache_range()