Lines Matching full:mapping
127 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument
130 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
133 mapping_set_update(&xas, mapping); in page_cache_delete()
143 folio->mapping = NULL; in page_cache_delete()
145 mapping->nrpages -= nr; in page_cache_delete()
148 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument
161 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
190 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio()
208 mapping_can_writeback(mapping))) in filemap_unaccount_folio()
209 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
219 struct address_space *mapping = folio->mapping; in __filemap_remove_folio() local
222 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
223 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
226 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
231 free_folio = mapping->a_ops->free_folio; in filemap_free_folio()
250 struct address_space *mapping = folio->mapping; in filemap_remove_folio() local
253 spin_lock(&mapping->host->i_lock); in filemap_remove_folio()
254 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio()
256 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio()
257 if (mapping_shrinkable(mapping)) in filemap_remove_folio()
258 inode_add_lru(mapping->host); in filemap_remove_folio()
259 spin_unlock(&mapping->host->i_lock); in filemap_remove_folio()
261 filemap_free_folio(mapping, folio); in filemap_remove_folio()
266 * @mapping: the mapping to which folios belong
269 * The function walks over mapping->i_pages and removes folios passed in
270 * @fbatch from the mapping. The function expects @fbatch to be sorted
272 * It tolerates holes in @fbatch (mapping entries at those indices are not
277 static void page_cache_delete_batch(struct address_space *mapping, in page_cache_delete_batch() argument
280 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
285 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
308 folio->mapping = NULL; in page_cache_delete_batch()
315 mapping->nrpages -= total_pages; in page_cache_delete_batch()
318 void delete_from_page_cache_batch(struct address_space *mapping, in delete_from_page_cache_batch() argument
326 spin_lock(&mapping->host->i_lock); in delete_from_page_cache_batch()
327 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
332 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
334 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch()
335 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
336 if (mapping_shrinkable(mapping)) in delete_from_page_cache_batch()
337 inode_add_lru(mapping->host); in delete_from_page_cache_batch()
338 spin_unlock(&mapping->host->i_lock); in delete_from_page_cache_batch()
341 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
344 int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
348 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
349 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
351 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
352 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
358 static int filemap_check_and_keep_errors(struct address_space *mapping) in filemap_check_and_keep_errors() argument
361 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
363 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
369 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
370 * @mapping: address space structure to write
373 * Call writepages on the mapping using the provided wbc to control the
378 int filemap_fdatawrite_wbc(struct address_space *mapping, in filemap_fdatawrite_wbc() argument
383 if (!mapping_can_writeback(mapping) || in filemap_fdatawrite_wbc()
384 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in filemap_fdatawrite_wbc()
387 wbc_attach_fdatawrite_inode(wbc, mapping->host); in filemap_fdatawrite_wbc()
388 ret = do_writepages(mapping, wbc); in filemap_fdatawrite_wbc()
395 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
396 * @mapping: address space structure to write
401 * Start writeback against all of a mapping's dirty pages that lie
411 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
421 return filemap_fdatawrite_wbc(mapping, &wbc); in __filemap_fdatawrite_range()
424 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
427 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
430 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
432 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
436 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
439 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
445 * @mapping: target address_space
452 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
454 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
460 * @mapping: address space within which to check
470 bool filemap_range_has_page(struct address_space *mapping, in filemap_range_has_page() argument
474 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
501 static void __filemap_fdatawait_range(struct address_space *mapping, in __filemap_fdatawait_range() argument
514 nr_folios = filemap_get_folios_tag(mapping, &index, end, in __filemap_fdatawait_range()
533 * @mapping: address space structure to wait for
547 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
550 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range()
551 return filemap_check_errors(mapping); in filemap_fdatawait_range()
557 * @mapping: address space structure to wait for
569 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, in filemap_fdatawait_range_keep_errors() argument
572 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range_keep_errors()
573 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_range_keep_errors()
595 struct address_space *mapping = file->f_mapping; in file_fdatawait_range() local
597 __filemap_fdatawait_range(mapping, start_byte, end_byte); in file_fdatawait_range()
604 * @mapping: address space structure to wait for
616 int filemap_fdatawait_keep_errors(struct address_space *mapping) in filemap_fdatawait_keep_errors() argument
618 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait_keep_errors()
619 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_keep_errors()
624 static bool mapping_needs_writeback(struct address_space *mapping) in mapping_needs_writeback() argument
626 return mapping->nrpages; in mapping_needs_writeback()
629 bool filemap_range_has_writeback(struct address_space *mapping, in filemap_range_has_writeback() argument
632 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
656 * @mapping: the address_space for the pages
667 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
675 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait_range()
676 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
685 __filemap_fdatawait_range(mapping, lstart, lend); in filemap_write_and_wait_range()
687 err2 = filemap_check_errors(mapping); in filemap_write_and_wait_range()
694 void __filemap_set_wb_err(struct address_space *mapping, int err) in __filemap_set_wb_err() argument
696 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
698 trace_filemap_set_wb_err(mapping, eseq); in __filemap_set_wb_err()
711 * Grab the wb_err from the mapping. If it matches what we have in the file,
714 * If it doesn't match, then take the mapping value, set the "seen" flag in
720 * While we handle mapping->wb_err with atomic operations, the f_wb_err
730 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err() local
733 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
737 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
748 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
749 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
773 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range() local
778 if (mapping_needs_writeback(mapping)) { in file_write_and_wait_range()
779 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range()
783 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
807 struct address_space *mapping = old->mapping; in replace_page_cache_folio() local
808 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
810 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_folio()
814 VM_BUG_ON_FOLIO(new->mapping, new); in replace_page_cache_folio()
817 new->mapping = mapping; in replace_page_cache_folio()
825 old->mapping = NULL; in replace_page_cache_folio()
842 noinline int __filemap_add_folio(struct address_space *mapping, in __filemap_add_folio() argument
845 XA_STATE(xas, &mapping->i_pages, index); in __filemap_add_folio()
852 mapping_set_update(&xas, mapping); in __filemap_add_folio()
867 folio->mapping = mapping; in __filemap_add_folio()
893 BUG_ON(shmem_mapping(mapping)); in __filemap_add_folio()
903 mapping->nrpages += nr; in __filemap_add_folio()
924 folio->mapping = NULL; in __filemap_add_folio()
931 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
938 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
983 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
985 * @mapping1: the first mapping to lock
986 * @mapping2: the second mapping to lock
1003 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1005 * @mapping1: the first mapping to unlock
1006 * @mapping2: the second mapping to unlock
1697 * @mapping: Mapping.
1714 pgoff_t page_cache_next_miss(struct address_space *mapping, in page_cache_next_miss() argument
1717 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1733 * @mapping: Mapping.
1750 pgoff_t page_cache_prev_miss(struct address_space *mapping, in page_cache_prev_miss() argument
1753 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1789 * @mapping: the address_space to search
1792 * Looks up the page cache entry at @mapping & @index. If it is a folio,
1799 void *filemap_get_entry(struct address_space *mapping, pgoff_t index) in filemap_get_entry() argument
1801 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_entry()
1832 * @mapping: The address_space to search.
1837 * Looks up the page cache entry at @mapping & @index.
1846 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio() argument
1852 folio = filemap_get_entry(mapping, index); in __filemap_get_folio()
1869 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1892 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) in __filemap_get_folio()
1903 if (!mapping_large_folio_support(mapping)) in __filemap_get_folio()
1927 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
1989 * @mapping: The address_space to search
1996 * the mapping. The entries are placed in @fbatch. find_get_entries()
2007 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, in find_get_entries() argument
2010 XA_STATE(xas, &mapping->i_pages, *start); in find_get_entries()
2035 * @mapping: The address_space to search.
2041 * find_lock_entries() will return a batch of entries from @mapping.
2053 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, in find_lock_entries() argument
2056 XA_STATE(xas, &mapping->i_pages, *start); in find_lock_entries()
2068 if (folio->mapping != mapping || in find_lock_entries()
2099 * @mapping: The address_space to search
2104 * Search for and return a batch of folios in the mapping starting at
2111 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, in filemap_get_folios() argument
2114 return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch); in filemap_get_folios()
2120 * @mapping: The address_space to search
2133 unsigned filemap_get_folios_contig(struct address_space *mapping, in filemap_get_folios_contig() argument
2136 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2187 * @mapping: The address_space to search
2204 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, in filemap_get_folios_tag() argument
2207 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_tag()
2271 static void filemap_get_read_batch(struct address_space *mapping, in filemap_get_read_batch() argument
2274 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2340 static bool filemap_range_uptodate(struct address_space *mapping, in filemap_range_uptodate() argument
2349 if (!mapping->a_ops->is_partially_uptodate) in filemap_range_uptodate()
2351 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2361 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2365 struct address_space *mapping, size_t count, in filemap_update_page() argument
2371 if (!filemap_invalidate_trylock_shared(mapping)) in filemap_update_page()
2374 filemap_invalidate_lock_shared(mapping); in filemap_update_page()
2382 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2396 if (!folio->mapping) in filemap_update_page()
2400 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2408 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_update_page()
2414 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2421 struct address_space *mapping, pgoff_t index, in filemap_create_folio() argument
2427 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), 0); in filemap_create_folio()
2441 * while mapping blocks for IO so let's hold the lock here as in filemap_create_folio()
2444 filemap_invalidate_lock_shared(mapping); in filemap_create_folio()
2445 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2446 mapping_gfp_constraint(mapping, GFP_KERNEL)); in filemap_create_folio()
2452 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_create_folio()
2456 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2460 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2466 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2469 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2481 struct address_space *mapping = filp->f_mapping; in filemap_get_pages() local
2494 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2498 page_cache_sync_readahead(mapping, ra, filp, index, in filemap_get_pages()
2500 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2505 err = filemap_create_folio(filp, mapping, in filemap_get_pages()
2514 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2522 err = filemap_update_page(iocb, mapping, count, folio, in filemap_get_pages()
2564 struct address_space *mapping = filp->f_mapping; in filemap_read() local
2565 struct inode *inode = mapping->host; in filemap_read()
2624 writably_mapped = mapping_writably_mapped(mapping); in filemap_read()
2679 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_write_and_wait() local
2684 if (filemap_range_needs_writeback(mapping, pos, end)) in kiocb_write_and_wait()
2689 return filemap_write_and_wait_range(mapping, pos, end); in kiocb_write_and_wait()
2695 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_pages() local
2702 if (filemap_range_has_page(mapping, pos, end)) in kiocb_invalidate_pages()
2705 ret = filemap_write_and_wait_range(mapping, pos, end); in kiocb_invalidate_pages()
2716 return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, in kiocb_invalidate_pages()
2753 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
2754 struct inode *inode = mapping->host; in generic_file_read_iter()
2761 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2935 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
2938 const struct address_space_operations *ops = mapping->a_ops; in folio_seek_hole_data()
2939 size_t offset, bsz = i_blocksize(mapping->host); in folio_seek_hole_data()
2949 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
2976 * @mapping: Address space to search.
2992 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, in mapping_seek_hole_data() argument
2995 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
3016 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
3097 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
3098 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3214 struct address_space *mapping = file->f_mapping; in filemap_fault() local
3215 struct inode *inode = mapping->host; in filemap_fault()
3228 folio = filemap_get_folio(mapping, index); in filemap_fault()
3237 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3252 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3255 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3261 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3270 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3313 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3337 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3344 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3357 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3393 struct address_space *mapping, pgoff_t end_pgoff) in next_uptodate_folio() argument
3416 if (folio->mapping != mapping) in next_uptodate_folio()
3420 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in next_uptodate_folio()
3524 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
3527 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3533 folio = next_uptodate_folio(&xas, mapping, end_pgoff); in filemap_map_pages()
3568 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); in filemap_map_pages()
3585 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite() local
3589 sb_start_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3592 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3605 sb_end_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3619 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
3621 if (!mapping->a_ops->read_folio) in generic_file_mmap()
3656 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio() argument
3663 filler = mapping->a_ops->read_folio; in do_read_cache_folio()
3665 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3670 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3689 /* Folio was truncated from mapping */ in do_read_cache_folio()
3690 if (!folio->mapping) { in do_read_cache_folio()
3718 * @mapping: The address_space to read from.
3729 * Context: May sleep. Expects mapping->invalidate_lock to be held.
3732 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio() argument
3735 return do_read_cache_folio(mapping, index, filler, file, in read_cache_folio()
3736 mapping_gfp_mask(mapping)); in read_cache_folio()
3742 * @mapping: The address_space for the folio.
3746 * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3753 * The function expects mapping->invalidate_lock to be already held.
3757 struct folio *mapping_read_folio_gfp(struct address_space *mapping, in mapping_read_folio_gfp() argument
3760 return do_read_cache_folio(mapping, index, NULL, NULL, gfp); in mapping_read_folio_gfp()
3764 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
3769 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
3775 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
3778 return do_read_cache_page(mapping, index, filler, file, in read_cache_page()
3779 mapping_gfp_mask(mapping)); in read_cache_page()
3785 * @mapping: the page's address_space
3789 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3794 * The function expects mapping->invalidate_lock to be already held.
3798 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
3802 return do_read_cache_page(mapping, index, NULL, NULL, gfp); in read_cache_page_gfp()
3828 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_post_direct_write() local
3830 if (mapping->nrpages && in kiocb_invalidate_post_direct_write()
3831 invalidate_inode_pages2_range(mapping, in kiocb_invalidate_post_direct_write()
3840 struct address_space *mapping = iocb->ki_filp->f_mapping; in generic_file_direct_write() local
3855 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
3872 * Skip invalidation for async writes or if mapping has no pages. in generic_file_direct_write()
3875 struct inode *inode = mapping->host; in generic_file_direct_write()
3897 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
3898 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
3930 status = a_ops->write_begin(file, mapping, pos, bytes, in generic_perform_write()
3935 if (mapping_writably_mapped(mapping)) in generic_perform_write()
3941 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
3964 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
3998 struct address_space *mapping = file->f_mapping; in __generic_file_write_iter() local
3999 struct inode *inode = mapping->host; in __generic_file_write_iter()
4079 struct address_space * const mapping = folio->mapping; in filemap_release_folio() local
4087 if (mapping && mapping->a_ops->release_folio) in filemap_release_folio()
4088 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4095 * filemap_cachestat() - compute the page cache statistics of a mapping
4096 * @mapping: The mapping to compute the statistics for.
4101 * This will query the page cache statistics of a mapping in the
4106 static void filemap_cachestat(struct address_space *mapping, in filemap_cachestat() argument
4109 XA_STATE(xas, &mapping->i_pages, first_index); in filemap_cachestat()
4152 if (shmem_mapping(mapping)) { in filemap_cachestat()
4222 struct address_space *mapping; in SYSCALL_DEFINE4() local
4251 mapping = f.file->f_mapping; in SYSCALL_DEFINE4()
4252 filemap_cachestat(mapping, first_index, last_index, &cs); in SYSCALL_DEFINE4()