Lines Matching full:mapping

120 static void page_cache_delete(struct address_space *mapping,  in page_cache_delete()  argument
123 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
126 mapping_set_update(&xas, mapping); in page_cache_delete()
141 page->mapping = NULL; in page_cache_delete()
145 mapping->nrexceptional += nr; in page_cache_delete()
154 mapping->nrpages -= nr; in page_cache_delete()
157 static void unaccount_page_cache_page(struct address_space *mapping, in unaccount_page_cache_page() argument
170 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
184 if (mapping_exiting(mapping) && in unaccount_page_cache_page()
210 filemap_nr_thps_dec(mapping); in unaccount_page_cache_page()
224 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
234 struct address_space *mapping = page->mapping; in __delete_from_page_cache() local
238 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
239 page_cache_delete(mapping, page, shadow); in __delete_from_page_cache()
242 static void page_cache_free_page(struct address_space *mapping, in page_cache_free_page() argument
247 freepage = mapping->a_ops->freepage; in page_cache_free_page()
269 struct address_space *mapping = page_mapping(page); in delete_from_page_cache() local
273 xa_lock_irqsave(&mapping->i_pages, flags); in delete_from_page_cache()
275 xa_unlock_irqrestore(&mapping->i_pages, flags); in delete_from_page_cache()
277 page_cache_free_page(mapping, page); in delete_from_page_cache()
283 * @mapping: the mapping to which pages belong
286 * The function walks over mapping->i_pages and removes pages passed in @pvec
287 * from the mapping. The function expects @pvec to be sorted by page index
289 * It tolerates holes in @pvec (mapping entries at those indices are not
295 static void page_cache_delete_batch(struct address_space *mapping, in page_cache_delete_batch() argument
298 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch()
303 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
327 page->mapping = NULL; in page_cache_delete_batch()
340 mapping->nrpages -= total_pages; in page_cache_delete_batch()
343 void delete_from_page_cache_batch(struct address_space *mapping, in delete_from_page_cache_batch() argument
352 xa_lock_irqsave(&mapping->i_pages, flags); in delete_from_page_cache_batch()
356 unaccount_page_cache_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch()
358 page_cache_delete_batch(mapping, pvec); in delete_from_page_cache_batch()
359 xa_unlock_irqrestore(&mapping->i_pages, flags); in delete_from_page_cache_batch()
362 page_cache_free_page(mapping, pvec->pages[i]); in delete_from_page_cache_batch()
365 int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
369 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
370 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
372 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
373 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
379 static int filemap_check_and_keep_errors(struct address_space *mapping) in filemap_check_and_keep_errors() argument
382 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
384 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
390 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
391 * @mapping: address space structure to write
396 * Start writeback against all of a mapping's dirty pages that lie
406 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
417 if (!mapping_can_writeback(mapping) || in __filemap_fdatawrite_range()
418 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in __filemap_fdatawrite_range()
421 wbc_attach_fdatawrite_inode(&wbc, mapping->host); in __filemap_fdatawrite_range()
422 ret = do_writepages(mapping, &wbc); in __filemap_fdatawrite_range()
427 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
430 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
433 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
435 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
439 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
442 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
448 * @mapping: target address_space
455 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
457 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
463 * @mapping: address space within which to check
473 bool filemap_range_has_page(struct address_space *mapping, in filemap_range_has_page() argument
477 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
504 static void __filemap_fdatawait_range(struct address_space *mapping, in __filemap_fdatawait_range() argument
519 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, in __filemap_fdatawait_range()
537 * @mapping: address space structure to wait for
551 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
554 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range()
555 return filemap_check_errors(mapping); in filemap_fdatawait_range()
561 * @mapping: address space structure to wait for
573 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, in filemap_fdatawait_range_keep_errors() argument
576 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range_keep_errors()
577 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_range_keep_errors()
599 struct address_space *mapping = file->f_mapping; in file_fdatawait_range() local
601 __filemap_fdatawait_range(mapping, start_byte, end_byte); in file_fdatawait_range()
608 * @mapping: address space structure to wait for
620 int filemap_fdatawait_keep_errors(struct address_space *mapping) in filemap_fdatawait_keep_errors() argument
622 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait_keep_errors()
623 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_keep_errors()
628 static bool mapping_needs_writeback(struct address_space *mapping) in mapping_needs_writeback() argument
630 if (dax_mapping(mapping)) in mapping_needs_writeback()
631 return mapping->nrexceptional; in mapping_needs_writeback()
633 return mapping->nrpages; in mapping_needs_writeback()
638 * @mapping: the address_space for the pages
649 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
654 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait_range()
655 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
664 int err2 = filemap_fdatawait_range(mapping, in filemap_write_and_wait_range()
670 filemap_check_errors(mapping); in filemap_write_and_wait_range()
673 err = filemap_check_errors(mapping); in filemap_write_and_wait_range()
679 void __filemap_set_wb_err(struct address_space *mapping, int err) in __filemap_set_wb_err() argument
681 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
683 trace_filemap_set_wb_err(mapping, eseq); in __filemap_set_wb_err()
696 * Grab the wb_err from the mapping. If it matches what we have in the file,
699 * If it doesn't match, then take the mapping value, set the "seen" flag in
705 * While we handle mapping->wb_err with atomic operations, the f_wb_err
715 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err() local
718 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
722 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
733 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
734 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
758 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range() local
760 if (mapping_needs_writeback(mapping)) { in file_write_and_wait_range()
761 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range()
765 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
792 struct address_space *mapping = old->mapping; in replace_page_cache_page() local
793 void (*freepage)(struct page *) = mapping->a_ops->freepage; in replace_page_cache_page()
795 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page()
800 VM_BUG_ON_PAGE(new->mapping, new); in replace_page_cache_page()
803 new->mapping = mapping; in replace_page_cache_page()
811 old->mapping = NULL; in replace_page_cache_page()
831 struct address_space *mapping, in __add_to_page_cache_locked() argument
835 XA_STATE(xas, &mapping->i_pages, offset); in __add_to_page_cache_locked()
841 mapping_set_update(&xas, mapping); in __add_to_page_cache_locked()
844 page->mapping = mapping; in __add_to_page_cache_locked()
887 mapping->nrexceptional--; in __add_to_page_cache_locked()
888 mapping->nrpages++; in __add_to_page_cache_locked()
905 page->mapping = NULL; in __add_to_page_cache_locked()
915 * @mapping: the page's address_space
924 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
927 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
932 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
939 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
1519 struct address_space *mapping; in page_endio() local
1522 mapping = page_mapping(page); in page_endio()
1523 if (mapping) in page_endio()
1524 mapping_set_error(mapping, err); in page_endio()
1603 * @mapping: Mapping.
1620 pgoff_t page_cache_next_miss(struct address_space *mapping, in page_cache_next_miss() argument
1623 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1639 * @mapping: Mapping.
1656 pgoff_t page_cache_prev_miss(struct address_space *mapping, in page_cache_prev_miss() argument
1659 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1675 * @mapping: the address_space to search
1678 * Looks up the page cache slot at @mapping & @offset. If there is a
1686 struct page *find_get_entry(struct address_space *mapping, pgoff_t index) in find_get_entry() argument
1688 XA_STATE(xas, &mapping->i_pages, index); in find_get_entry()
1724 * @mapping: The address_space to search.
1727 * Looks up the page at @mapping & @index. If there is a page in the
1736 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index) in find_lock_entry() argument
1741 page = find_get_entry(mapping, index); in find_lock_entry()
1745 if (unlikely(page->mapping != mapping)) { in find_lock_entry()
1757 * @mapping: The address_space to search.
1762 * Looks up the page cache entry at @mapping & @index.
1787 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page() argument
1793 page = find_get_entry(mapping, index); in pagecache_get_page()
1810 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1831 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) in pagecache_get_page()
1847 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); in pagecache_get_page()
1869 * @mapping: The address_space to search
1876 * @nr_entries entries in the mapping. The entries are placed at
1880 * The search returns a group of mapping-contiguous page cache entries
1894 unsigned find_get_entries(struct address_space *mapping, in find_get_entries() argument
1898 XA_STATE(xas, &mapping->i_pages, start); in find_get_entries()
1949 * @mapping: The address_space to search
1956 * pages in the mapping starting at index @start and up to index @end
1960 * The search returns a group of mapping-contiguous pages with ascending
1968 unsigned find_get_pages_range(struct address_space *mapping, pgoff_t *start, in find_get_pages_range() argument
1972 XA_STATE(xas, &mapping->i_pages, *start); in find_get_pages_range()
2024 * @mapping: The address_space to search
2034 unsigned find_get_pages_contig(struct address_space *mapping, pgoff_t index, in find_get_pages_contig() argument
2037 XA_STATE(xas, &mapping->i_pages, index); in find_get_pages_contig()
2078 * @mapping: the address_space to search
2090 unsigned find_get_pages_range_tag(struct address_space *mapping, pgoff_t *index, in find_get_pages_range_tag() argument
2094 XA_STATE(xas, &mapping->i_pages, *index); in find_get_pages_range_tag()
2176 * mapping->a_ops->readpage() function for the actual low-level stuff.
2189 struct address_space *mapping = filp->f_mapping; in generic_file_buffered_read() local
2190 struct inode *inode = mapping->host; in generic_file_buffered_read()
2231 page = find_get_page(mapping, index); in generic_file_buffered_read()
2235 page_cache_sync_readahead(mapping, in generic_file_buffered_read()
2238 page = find_get_page(mapping, index); in generic_file_buffered_read()
2247 page_cache_async_readahead(mapping, in generic_file_buffered_read()
2277 !mapping->a_ops->is_partially_uptodate) in generic_file_buffered_read()
2285 if (!page->mapping) in generic_file_buffered_read()
2287 if (!mapping->a_ops->is_partially_uptodate(page, in generic_file_buffered_read()
2324 if (mapping_writably_mapped(mapping)) in generic_file_buffered_read()
2372 if (!page->mapping) { in generic_file_buffered_read()
2397 error = mapping->a_ops->readpage(filp, page); in generic_file_buffered_read()
2422 if (page->mapping == NULL) { in generic_file_buffered_read()
2450 page = page_cache_alloc(mapping); in generic_file_buffered_read()
2455 error = add_to_page_cache_lru(page, mapping, index, in generic_file_buffered_read()
2456 mapping_gfp_constraint(mapping, GFP_KERNEL)); in generic_file_buffered_read()
2513 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
2514 struct inode *inode = mapping->host; in generic_file_read_iter()
2519 if (filemap_range_has_page(mapping, iocb->ki_pos, in generic_file_read_iter()
2523 retval = filemap_write_and_wait_range(mapping, in generic_file_read_iter()
2532 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2616 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
2617 DEFINE_READAHEAD(ractl, file, mapping, vmf->pgoff); in do_sync_mmap_readahead()
2667 struct address_space *mapping = file->f_mapping; in do_async_mmap_readahead() local
2680 page_cache_async_readahead(mapping, ra, file, in do_async_mmap_readahead()
2714 struct address_space *mapping = file->f_mapping; in filemap_fault() local
2716 struct inode *inode = mapping->host; in filemap_fault()
2729 page = find_get_page(mapping, offset); in filemap_fault()
2743 page = pagecache_get_page(mapping, offset, in filemap_fault()
2757 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
2804 error = mapping->a_ops->readpage(file, page); in filemap_fault()
2838 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
2841 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
2873 if (head->mapping != mapping || !PageUptodate(head)) in filemap_map_pages()
2876 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in filemap_map_pages()
2914 if (page->mapping != inode->i_mapping) { in filemap_page_mkwrite()
2941 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
2943 if (!mapping->a_ops->readpage) in generic_file_mmap()
2990 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
2999 page = find_get_page(mapping, index); in do_read_cache_page()
3004 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
3017 err = mapping->a_ops->readpage(data, page); in do_read_cache_page()
3048 * otherwise serialising on page lock to stabilise the mapping gives in do_read_cache_page()
3052 * will be a race with remove_mapping that determines if the mapping in do_read_cache_page()
3071 if (!page->mapping) { in do_read_cache_page()
3099 * @mapping: the page's address_space
3111 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
3116 return do_read_cache_page(mapping, index, filler, data, in read_cache_page()
3117 mapping_gfp_mask(mapping)); in read_cache_page()
3123 * @mapping: the page's address_space
3127 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3134 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
3138 return do_read_cache_page(mapping, index, NULL, NULL, gfp); in read_cache_page_gfp()
3142 int pagecache_write_begin(struct file *file, struct address_space *mapping, in pagecache_write_begin() argument
3146 const struct address_space_operations *aops = mapping->a_ops; in pagecache_write_begin()
3148 return aops->write_begin(file, mapping, pos, len, flags, in pagecache_write_begin()
3153 int pagecache_write_end(struct file *file, struct address_space *mapping, in pagecache_write_end() argument
3157 const struct address_space_operations *aops = mapping->a_ops; in pagecache_write_end()
3159 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3188 struct address_space *mapping = file->f_mapping; in generic_file_direct_write() local
3189 struct inode *inode = mapping->host; in generic_file_direct_write()
3204 written = filemap_write_and_wait_range(mapping, pos, in generic_file_direct_write()
3216 written = invalidate_inode_pages2_range(mapping, in generic_file_direct_write()
3228 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
3245 * Skip invalidation for async writes or if mapping has no pages. in generic_file_direct_write()
3247 if (written > 0 && mapping->nrpages && in generic_file_direct_write()
3248 invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, end)) in generic_file_direct_write()
3270 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin() argument
3279 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3280 mapping_gfp_mask(mapping)); in grab_cache_page_write_begin()
3291 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
3292 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
3329 status = a_ops->write_begin(file, mapping, pos, bytes, flags, in generic_perform_write()
3334 if (mapping_writably_mapped(mapping)) in generic_perform_write()
3340 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
3365 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
3396 struct address_space * mapping = file->f_mapping; in __generic_file_write_iter() local
3397 struct inode *inode = mapping->host; in __generic_file_write_iter()
3444 err = filemap_write_and_wait_range(mapping, pos, endbyte); in __generic_file_write_iter()
3448 invalidate_mapping_pages(mapping, in __generic_file_write_iter()
3518 struct address_space * const mapping = page->mapping; in try_to_release_page() local
3524 if (mapping && mapping->a_ops->releasepage) in try_to_release_page()
3525 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()