Lines Matching +full:data +full:- +full:mapping
1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 1994-1999 Linus Torvalds
30 #include <linux/error-injection.h>
33 #include <linux/backing-dev.h>
73 * finished 'unifying' the page and buffer cache and SMP-threaded the
74 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
76 * SMP-threaded pagemap-LRU 1999, Andrea Arcangeli <andrea@suse.de>
82 * ->i_mmap_rwsem (truncate_pagecache)
83 * ->private_lock (__free_pte->block_dirty_folio)
84 * ->swap_lock (exclusive_swap_page, others)
85 * ->i_pages lock
87 * ->i_rwsem
88 * ->invalidate_lock (acquired by fs in truncate path)
89 * ->i_mmap_rwsem (truncate->unmap_mapping_range)
91 * ->mmap_lock
92 * ->i_mmap_rwsem
93 * ->page_table_lock or pte_lock (various, mainly in memory.c)
94 * ->i_pages lock (arch-dependent flush_dcache_mmap_lock)
96 * ->mmap_lock
97 * ->invalidate_lock (filemap_fault)
98 * ->lock_page (filemap_fault, access_process_vm)
100 * ->i_rwsem (generic_perform_write)
101 * ->mmap_lock (fault_in_readable->do_page_fault)
103 * bdi->wb.list_lock
104 * sb_lock (fs/fs-writeback.c)
105 * ->i_pages lock (__sync_single_inode)
107 * ->i_mmap_rwsem
108 * ->anon_vma.lock (vma_merge)
110 * ->anon_vma.lock
111 * ->page_table_lock or pte_lock (anon_vma_prepare and various)
113 * ->page_table_lock or pte_lock
114 * ->swap_lock (try_to_unmap_one)
115 * ->private_lock (try_to_unmap_one)
116 * ->i_pages lock (try_to_unmap_one)
117 * ->lruvec->lru_lock (follow_page_mask->mark_page_accessed)
118 * ->lruvec->lru_lock (check_pte_range->folio_isolate_lru)
119 * ->private_lock (folio_remove_rmap_pte->set_page_dirty)
120 * ->i_pages lock (folio_remove_rmap_pte->set_page_dirty)
121 * bdi.wb->list_lock (folio_remove_rmap_pte->set_page_dirty)
122 * ->inode->i_lock (folio_remove_rmap_pte->set_page_dirty)
123 * bdi.wb->list_lock (zap_pte_range->set_page_dirty)
124 * ->inode->i_lock (zap_pte_range->set_page_dirty)
125 * ->private_lock (zap_pte_range->block_dirty_folio)
128 static void page_cache_delete(struct address_space *mapping, in page_cache_delete() argument
131 XA_STATE(xas, &mapping->i_pages, folio->index); in page_cache_delete()
134 mapping_set_update(&xas, mapping); in page_cache_delete()
136 xas_set_order(&xas, folio->index, folio_order(folio)); in page_cache_delete()
144 folio->mapping = NULL; in page_cache_delete()
145 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_delete()
146 mapping->nrpages -= nr; in page_cache_delete()
149 static void filemap_unaccount_folio(struct address_space *mapping, in filemap_unaccount_folio() argument
157 current->comm, folio_pfn(folio)); in filemap_unaccount_folio()
158 dump_page(&folio->page, "still mapped when deleted"); in filemap_unaccount_folio()
162 if (mapping_exiting(mapping) && !folio_test_large(folio)) { in filemap_unaccount_folio()
172 atomic_set(&folio->_mapcount, -1); in filemap_unaccount_folio()
184 __lruvec_stat_mod_folio(folio, NR_FILE_PAGES, -nr); in filemap_unaccount_folio()
186 __lruvec_stat_mod_folio(folio, NR_SHMEM, -nr); in filemap_unaccount_folio()
188 __lruvec_stat_mod_folio(folio, NR_SHMEM_THPS, -nr); in filemap_unaccount_folio()
190 __lruvec_stat_mod_folio(folio, NR_FILE_THPS, -nr); in filemap_unaccount_folio()
191 filemap_nr_thps_dec(mapping); in filemap_unaccount_folio()
197 * unwritten data - on ordinary filesystems. in filemap_unaccount_folio()
199 * But it's harmless on in-memory filesystems like tmpfs; and can in filemap_unaccount_folio()
209 mapping_can_writeback(mapping))) in filemap_unaccount_folio()
210 folio_account_cleaned(folio, inode_to_wb(mapping->host)); in filemap_unaccount_folio()
215 * sure the page is locked and that nobody else uses it - or that usage
220 struct address_space *mapping = folio->mapping; in __filemap_remove_folio() local
223 filemap_unaccount_folio(mapping, folio); in __filemap_remove_folio()
224 page_cache_delete(mapping, folio, shadow); in __filemap_remove_folio()
227 void filemap_free_folio(struct address_space *mapping, struct folio *folio) in filemap_free_folio() argument
231 free_folio = mapping->a_ops->free_folio; in filemap_free_folio()
239 * filemap_remove_folio - Remove folio from page cache.
248 struct address_space *mapping = folio->mapping; in filemap_remove_folio() local
251 spin_lock(&mapping->host->i_lock); in filemap_remove_folio()
252 xa_lock_irq(&mapping->i_pages); in filemap_remove_folio()
254 xa_unlock_irq(&mapping->i_pages); in filemap_remove_folio()
255 if (mapping_shrinkable(mapping)) in filemap_remove_folio()
256 inode_add_lru(mapping->host); in filemap_remove_folio()
257 spin_unlock(&mapping->host->i_lock); in filemap_remove_folio()
259 filemap_free_folio(mapping, folio); in filemap_remove_folio()
263 * page_cache_delete_batch - delete several folios from page cache
264 * @mapping: the mapping to which folios belong
267 * The function walks over mapping->i_pages and removes folios passed in
268 * @fbatch from the mapping. The function expects @fbatch to be sorted
270 * It tolerates holes in @fbatch (mapping entries at those indices are not
275 static void page_cache_delete_batch(struct address_space *mapping, in page_cache_delete_batch() argument
278 XA_STATE(xas, &mapping->i_pages, fbatch->folios[0]->index); in page_cache_delete_batch()
283 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
298 if (folio != fbatch->folios[i]) { in page_cache_delete_batch()
299 VM_BUG_ON_FOLIO(folio->index > in page_cache_delete_batch()
300 fbatch->folios[i]->index, folio); in page_cache_delete_batch()
306 folio->mapping = NULL; in page_cache_delete_batch()
307 /* Leave folio->index set: truncation lookup relies on it */ in page_cache_delete_batch()
313 mapping->nrpages -= total_pages; in page_cache_delete_batch()
316 void delete_from_page_cache_batch(struct address_space *mapping, in delete_from_page_cache_batch() argument
324 spin_lock(&mapping->host->i_lock); in delete_from_page_cache_batch()
325 xa_lock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
327 struct folio *folio = fbatch->folios[i]; in delete_from_page_cache_batch()
330 filemap_unaccount_folio(mapping, folio); in delete_from_page_cache_batch()
332 page_cache_delete_batch(mapping, fbatch); in delete_from_page_cache_batch()
333 xa_unlock_irq(&mapping->i_pages); in delete_from_page_cache_batch()
334 if (mapping_shrinkable(mapping)) in delete_from_page_cache_batch()
335 inode_add_lru(mapping->host); in delete_from_page_cache_batch()
336 spin_unlock(&mapping->host->i_lock); in delete_from_page_cache_batch()
339 filemap_free_folio(mapping, fbatch->folios[i]); in delete_from_page_cache_batch()
342 int filemap_check_errors(struct address_space *mapping) in filemap_check_errors() argument
346 if (test_bit(AS_ENOSPC, &mapping->flags) && in filemap_check_errors()
347 test_and_clear_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_errors()
348 ret = -ENOSPC; in filemap_check_errors()
349 if (test_bit(AS_EIO, &mapping->flags) && in filemap_check_errors()
350 test_and_clear_bit(AS_EIO, &mapping->flags)) in filemap_check_errors()
351 ret = -EIO; in filemap_check_errors()
356 static int filemap_check_and_keep_errors(struct address_space *mapping) in filemap_check_and_keep_errors() argument
359 if (test_bit(AS_EIO, &mapping->flags)) in filemap_check_and_keep_errors()
360 return -EIO; in filemap_check_and_keep_errors()
361 if (test_bit(AS_ENOSPC, &mapping->flags)) in filemap_check_and_keep_errors()
362 return -ENOSPC; in filemap_check_and_keep_errors()
367 * filemap_fdatawrite_wbc - start writeback on mapping dirty pages in range
368 * @mapping: address space structure to write
371 * Call writepages on the mapping using the provided wbc to control the
376 int filemap_fdatawrite_wbc(struct address_space *mapping, in filemap_fdatawrite_wbc() argument
381 if (!mapping_can_writeback(mapping) || in filemap_fdatawrite_wbc()
382 !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) in filemap_fdatawrite_wbc()
385 wbc_attach_fdatawrite_inode(wbc, mapping->host); in filemap_fdatawrite_wbc()
386 ret = do_writepages(mapping, wbc); in filemap_fdatawrite_wbc()
393 * __filemap_fdatawrite_range - start writeback on mapping dirty pages in range
394 * @mapping: address space structure to write
399 * Start writeback against all of a mapping's dirty pages that lie
402 * If sync_mode is WB_SYNC_ALL then this is a "data integrity" operation, as
409 int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in __filemap_fdatawrite_range() argument
419 return filemap_fdatawrite_wbc(mapping, &wbc); in __filemap_fdatawrite_range()
422 static inline int __filemap_fdatawrite(struct address_space *mapping, in __filemap_fdatawrite() argument
425 return __filemap_fdatawrite_range(mapping, 0, LLONG_MAX, sync_mode); in __filemap_fdatawrite()
428 int filemap_fdatawrite(struct address_space *mapping) in filemap_fdatawrite() argument
430 return __filemap_fdatawrite(mapping, WB_SYNC_ALL); in filemap_fdatawrite()
434 int filemap_fdatawrite_range(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range() argument
437 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_ALL); in filemap_fdatawrite_range()
442 * filemap_fdatawrite_range_kick - start writeback on a range
443 * @mapping: target address_space
447 * This is a non-integrity writeback helper, to start writing back folios
452 int filemap_fdatawrite_range_kick(struct address_space *mapping, loff_t start, in filemap_fdatawrite_range_kick() argument
455 return __filemap_fdatawrite_range(mapping, start, end, WB_SYNC_NONE); in filemap_fdatawrite_range_kick()
460 * filemap_flush - mostly a non-blocking flush
461 * @mapping: target address_space
463 * This is a mostly non-blocking flush. Not suitable for data-integrity
464 * purposes - I/O may not be started against all dirty pages.
468 int filemap_flush(struct address_space *mapping) in filemap_flush() argument
470 return __filemap_fdatawrite(mapping, WB_SYNC_NONE); in filemap_flush()
475 * filemap_range_has_page - check if a page exists in range.
476 * @mapping: address space within which to check
486 bool filemap_range_has_page(struct address_space *mapping, in filemap_range_has_page() argument
490 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
517 static void __filemap_fdatawait_range(struct address_space *mapping, in __filemap_fdatawait_range() argument
530 nr_folios = filemap_get_folios_tag(mapping, &index, end, in __filemap_fdatawait_range()
547 * filemap_fdatawait_range - wait for writeback to complete
548 * @mapping: address space structure to wait for
552 * Walk the list of under-writeback pages of the given address space
562 int filemap_fdatawait_range(struct address_space *mapping, loff_t start_byte, in filemap_fdatawait_range() argument
565 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range()
566 return filemap_check_errors(mapping); in filemap_fdatawait_range()
571 * filemap_fdatawait_range_keep_errors - wait for writeback to complete
572 * @mapping: address space structure to wait for
576 * Walk the list of under-writeback pages of the given address space in the
581 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
584 int filemap_fdatawait_range_keep_errors(struct address_space *mapping, in filemap_fdatawait_range_keep_errors() argument
587 __filemap_fdatawait_range(mapping, start_byte, end_byte); in filemap_fdatawait_range_keep_errors()
588 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_range_keep_errors()
593 * file_fdatawait_range - wait for writeback to complete
598 * Walk the list of under-writeback pages of the address space that file
600 * status of the address space vs. the file->f_wb_err cursor and return it.
606 * Return: error status of the address space vs. the file->f_wb_err cursor.
610 struct address_space *mapping = file->f_mapping; in file_fdatawait_range() local
612 __filemap_fdatawait_range(mapping, start_byte, end_byte); in file_fdatawait_range()
618 * filemap_fdatawait_keep_errors - wait for writeback without clearing errors
619 * @mapping: address space structure to wait for
621 * Walk the list of under-writeback pages of the given address space
626 * call sites are system-wide / filesystem-wide data flushers: e.g. sync(2),
631 int filemap_fdatawait_keep_errors(struct address_space *mapping) in filemap_fdatawait_keep_errors() argument
633 __filemap_fdatawait_range(mapping, 0, LLONG_MAX); in filemap_fdatawait_keep_errors()
634 return filemap_check_and_keep_errors(mapping); in filemap_fdatawait_keep_errors()
639 static bool mapping_needs_writeback(struct address_space *mapping) in mapping_needs_writeback() argument
641 return mapping->nrpages; in mapping_needs_writeback()
644 bool filemap_range_has_writeback(struct address_space *mapping, in filemap_range_has_writeback() argument
647 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_writeback()
670 * filemap_write_and_wait_range - write out & wait on a file range
671 * @mapping: the address_space for the pages
675 * Write out and wait upon file offsets lstart->lend, inclusive.
678 * that this function can be used to write to the very end-of-file (end = -1).
682 int filemap_write_and_wait_range(struct address_space *mapping, in filemap_write_and_wait_range() argument
690 if (mapping_needs_writeback(mapping)) { in filemap_write_and_wait_range()
691 err = __filemap_fdatawrite_range(mapping, lstart, lend, in filemap_write_and_wait_range()
695 * written partially (e.g. -ENOSPC), so we wait for it. in filemap_write_and_wait_range()
696 * But the -EIO is special case, it may indicate the worst in filemap_write_and_wait_range()
699 if (err != -EIO) in filemap_write_and_wait_range()
700 __filemap_fdatawait_range(mapping, lstart, lend); in filemap_write_and_wait_range()
702 err2 = filemap_check_errors(mapping); in filemap_write_and_wait_range()
709 void __filemap_set_wb_err(struct address_space *mapping, int err) in __filemap_set_wb_err() argument
711 errseq_t eseq = errseq_set(&mapping->wb_err, err); in __filemap_set_wb_err()
713 trace_filemap_set_wb_err(mapping, eseq); in __filemap_set_wb_err()
718 * file_check_and_advance_wb_err - report wb error (if any) that was previously
726 * Grab the wb_err from the mapping. If it matches what we have in the file,
729 * If it doesn't match, then take the mapping value, set the "seen" flag in
735 * While we handle mapping->wb_err with atomic operations, the f_wb_err
744 errseq_t old = READ_ONCE(file->f_wb_err); in file_check_and_advance_wb_err()
745 struct address_space *mapping = file->f_mapping; in file_check_and_advance_wb_err() local
748 if (errseq_check(&mapping->wb_err, old)) { in file_check_and_advance_wb_err()
750 spin_lock(&file->f_lock); in file_check_and_advance_wb_err()
751 old = file->f_wb_err; in file_check_and_advance_wb_err()
752 err = errseq_check_and_advance(&mapping->wb_err, in file_check_and_advance_wb_err()
753 &file->f_wb_err); in file_check_and_advance_wb_err()
755 spin_unlock(&file->f_lock); in file_check_and_advance_wb_err()
763 clear_bit(AS_EIO, &mapping->flags); in file_check_and_advance_wb_err()
764 clear_bit(AS_ENOSPC, &mapping->flags); in file_check_and_advance_wb_err()
770 * file_write_and_wait_range - write out & wait on a file range
775 * Write out and wait upon file offsets lstart->lend, inclusive.
778 * that this function can be used to write to the very end-of-file (end = -1).
780 * After writing out and waiting on the data, we check and advance the
788 struct address_space *mapping = file->f_mapping; in file_write_and_wait_range() local
793 if (mapping_needs_writeback(mapping)) { in file_write_and_wait_range()
794 err = __filemap_fdatawrite_range(mapping, lstart, lend, in file_write_and_wait_range()
797 if (err != -EIO) in file_write_and_wait_range()
798 __filemap_fdatawait_range(mapping, lstart, lend); in file_write_and_wait_range()
808 * replace_page_cache_folio - replace a pagecache folio with a new one
822 struct address_space *mapping = old->mapping; in replace_page_cache_folio() local
823 void (*free_folio)(struct folio *) = mapping->a_ops->free_folio; in replace_page_cache_folio()
824 pgoff_t offset = old->index; in replace_page_cache_folio()
825 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_folio()
829 VM_BUG_ON_FOLIO(new->mapping, new); in replace_page_cache_folio()
832 new->mapping = mapping; in replace_page_cache_folio()
833 new->index = offset; in replace_page_cache_folio()
840 old->mapping = NULL; in replace_page_cache_folio()
857 noinline int __filemap_add_folio(struct address_space *mapping, in __filemap_add_folio() argument
860 XA_STATE_ORDER(xas, &mapping->i_pages, index, folio_order(folio)); in __filemap_add_folio()
867 VM_BUG_ON_FOLIO(folio_order(folio) < mapping_min_folio_order(mapping), in __filemap_add_folio()
869 mapping_set_update(&xas, mapping); in __filemap_add_folio()
871 VM_BUG_ON_FOLIO(index & (folio_nr_pages(folio) - 1), folio); in __filemap_add_folio()
877 folio->mapping = mapping; in __filemap_add_folio()
878 folio->index = xas.xa_index; in __filemap_add_folio()
881 int order = -1; in __filemap_add_folio()
888 xas_set_err(&xas, -EEXIST); in __filemap_add_folio()
895 if (order == -1) in __filemap_add_folio()
905 BUG_ON(shmem_mapping(mapping)); in __filemap_add_folio()
928 mapping->nrpages += nr; in __filemap_add_folio()
951 folio->mapping = NULL; in __filemap_add_folio()
952 /* Leave page->index set: truncation relies upon it */ in __filemap_add_folio()
958 int filemap_add_folio(struct address_space *mapping, struct folio *folio, in filemap_add_folio() argument
969 ret = __filemap_add_folio(mapping, folio, index, gfp, &shadow); in filemap_add_folio()
979 * data from the working set, only to cache data that will in filemap_add_folio()
1013 * filemap_invalidate_lock_two - lock invalidate_lock for two mappings
1015 * Lock exclusively invalidate_lock of any passed mapping that is not NULL.
1017 * @mapping1: the first mapping to lock
1018 * @mapping2: the second mapping to lock
1026 down_write(&mapping1->invalidate_lock); in filemap_invalidate_lock_two()
1028 down_write_nested(&mapping2->invalidate_lock, 1); in filemap_invalidate_lock_two()
1033 * filemap_invalidate_unlock_two - unlock invalidate_lock for two mappings
1035 * Unlock exclusive invalidate_lock of any passed mapping that is not NULL.
1037 * @mapping1: the first mapping to unlock
1038 * @mapping2: the second mapping to unlock
1044 up_write(&mapping1->invalidate_lock); in filemap_invalidate_unlock_two()
1046 up_write(&mapping2->invalidate_lock); in filemap_invalidate_unlock_two()
1074 .data = &sysctl_page_lock_unfairness,
1094 * The page wait code treats the "wait->flags" somewhat unusually, because
1141 flags = wait->flags; in wake_page_function()
1143 if (test_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1144 return -1; in wake_page_function()
1146 if (test_and_set_bit(key->bit_nr, &key->folio->flags)) in wake_page_function()
1147 return -1; in wake_page_function()
1153 * We are holding the wait-queue lock, but the waiter that in wake_page_function()
1158 * afterwards to avoid any races. This store-release pairs in wake_page_function()
1159 * with the load-acquire in folio_wait_bit_common(). in wake_page_function()
1161 smp_store_release(&wait->flags, flags | WQ_FLAG_WOKEN); in wake_page_function()
1162 wake_up_state(wait->private, mode); in wake_page_function()
1170 * After this list_del_init(&wait->entry) the wait entry in wake_page_function()
1171 * might be de-allocated and the process might even have in wake_page_function()
1174 list_del_init_careful(&wait->entry); in wake_page_function()
1188 spin_lock_irqsave(&q->lock, flags); in folio_wake_bit()
1203 spin_unlock_irqrestore(&q->lock, flags); in folio_wake_bit()
1228 if (wait->flags & WQ_FLAG_EXCLUSIVE) { in folio_trylock_flag()
1229 if (test_and_set_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1231 } else if (test_bit(bit_nr, &folio->flags)) in folio_trylock_flag()
1234 wait->flags |= WQ_FLAG_WOKEN | WQ_FLAG_DONE; in folio_trylock_flag()
1257 wait->func = wake_page_function; in folio_wait_bit_common()
1262 wait->flags = 0; in folio_wait_bit_common()
1264 wait->flags = WQ_FLAG_EXCLUSIVE; in folio_wait_bit_common()
1265 if (--unfairness < 0) in folio_wait_bit_common()
1266 wait->flags |= WQ_FLAG_CUSTOM; in folio_wait_bit_common()
1283 spin_lock_irq(&q->lock); in folio_wait_bit_common()
1287 spin_unlock_irq(&q->lock); in folio_wait_bit_common()
1303 * be very careful with the 'wait->flags', because in folio_wait_bit_common()
1312 flags = smp_load_acquire(&wait->flags); in folio_wait_bit_common()
1321 /* If we were non-exclusive, we're done */ in folio_wait_bit_common()
1338 wait->flags |= WQ_FLAG_DONE; in folio_wait_bit_common()
1344 * waiter from the wait-queues, but the folio waiters bit will remain in folio_wait_bit_common()
1356 * NOTE! The wait->flags weren't stable until we've done the in folio_wait_bit_common()
1365 * Also note that WQ_FLAG_WOKEN is sufficient for a non-exclusive in folio_wait_bit_common()
1369 return wait->flags & WQ_FLAG_DONE ? 0 : -EINTR; in folio_wait_bit_common()
1371 return wait->flags & WQ_FLAG_WOKEN ? 0 : -EINTR; in folio_wait_bit_common()
1376 * migration_entry_wait_on_locked - Wait for a migration entry to be removed
1410 wait->func = wake_page_function; in migration_entry_wait_on_locked()
1413 wait->flags = 0; in migration_entry_wait_on_locked()
1415 spin_lock_irq(&q->lock); in migration_entry_wait_on_locked()
1419 spin_unlock_irq(&q->lock); in migration_entry_wait_on_locked()
1434 flags = smp_load_acquire(&wait->flags); in migration_entry_wait_on_locked()
1467 * folio_put_wait_locked - Drop a reference and wait for it to be unlocked
1477 * Return: 0 if the folio was unlocked or -EINTR if interrupted by a signal.
1485 * folio_unlock - Unlock a locked folio.
1505 * folio_end_read - End read on a folio.
1535 * folio_end_private_2 - Clear PG_private_2 and wake any waiters.
1555 * folio_wait_private_2 - Wait for PG_private_2 to be cleared on a folio.
1568 * folio_wait_private_2_killable - Wait for PG_private_2 to be cleared on a folio.
1575 * - 0 if successful.
1576 * - -EINTR if a fatal signal was encountered.
1594 * completes. Do that now. If we fail, it's likely because of a big folio -
1603 * would otherwise not need non-IRQ handling. Just skip the in folio_end_dropbehind_write()
1607 if (folio->mapping) in folio_end_dropbehind_write()
1608 folio_unmap_invalidate(folio->mapping, folio, 0); in folio_end_dropbehind_write()
1614 * folio_end_writeback - End writeback against a folio.
1659 * __folio_lock - Get a lock on the folio, assuming we need to sleep to get it.
1681 wait->folio = folio; in __folio_lock_async()
1682 wait->bit_nr = PG_locked; in __folio_lock_async()
1684 spin_lock_irq(&q->lock); in __folio_lock_async()
1685 __add_wait_queue_entry_tail(q, &wait->wait); in __folio_lock_async()
1695 __remove_wait_queue(q, &wait->wait); in __folio_lock_async()
1697 ret = -EIOCBQUEUED; in __folio_lock_async()
1698 spin_unlock_irq(&q->lock); in __folio_lock_async()
1704 * 0 - folio is locked.
1705 * non-zero - folio is not locked.
1706 * mmap_lock or per-VMA lock has been released (mmap_read_unlock() or
1711 * with the folio locked and the mmap_lock/per-VMA lock is left unperturbed.
1715 unsigned int flags = vmf->flags; in __folio_lock_or_retry()
1719 * CAUTION! In this case, mmap_lock/per-VMA lock is not in __folio_lock_or_retry()
1748 * page_cache_next_miss() - Find the next gap in the page cache.
1749 * @mapping: Mapping.
1753 * Search the range [index, min(index + max_scan - 1, ULONG_MAX)] for the
1763 * range specified (in which case 'return - index >= max_scan' will be true).
1764 * In the rare case of index wrap-around, 0 will be returned.
1766 pgoff_t page_cache_next_miss(struct address_space *mapping, in page_cache_next_miss() argument
1769 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1771 while (max_scan--) { in page_cache_next_miss()
1784 * page_cache_prev_miss() - Find the previous gap in the page cache.
1785 * @mapping: Mapping.
1789 * Search the range [max(index - max_scan + 1, 0), index] for the
1799 * range specified (in which case 'index - return >= max_scan' will be true).
1800 * In the rare case of wrap-around, ULONG_MAX will be returned.
1802 pgoff_t page_cache_prev_miss(struct address_space *mapping, in page_cache_prev_miss() argument
1805 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1807 while (max_scan--) { in page_cache_prev_miss()
1832 * increased by a speculative page cache (or GUP-fast) lookup as it can
1840 * filemap_get_entry - Get a page cache entry.
1841 * @mapping: the address_space to search
1844 * Looks up the page cache entry at @mapping & @index. If it is a folio,
1851 void *filemap_get_entry(struct address_space *mapping, pgoff_t index) in filemap_get_entry() argument
1853 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_entry()
1883 * __filemap_get_folio - Find and get a reference to a folio.
1884 * @mapping: The address_space to search.
1889 * Looks up the page cache entry at @mapping & @index.
1898 struct folio *__filemap_get_folio(struct address_space *mapping, pgoff_t index, in __filemap_get_folio() argument
1904 folio = filemap_get_entry(mapping, index); in __filemap_get_folio()
1914 return ERR_PTR(-EAGAIN); in __filemap_get_folio()
1921 if (unlikely(folio->mapping != mapping)) { in __filemap_get_folio()
1941 unsigned int min_order = mapping_min_folio_order(mapping); in __filemap_get_folio()
1944 index = mapping_align_index(mapping, index); in __filemap_get_folio()
1946 if ((fgp_flags & FGP_WRITE) && mapping_can_writeback(mapping)) in __filemap_get_folio()
1957 if (order > mapping_max_folio_order(mapping)) in __filemap_get_folio()
1958 order = mapping_max_folio_order(mapping); in __filemap_get_folio()
1960 if (index & ((1UL << order) - 1)) in __filemap_get_folio()
1966 err = -ENOMEM; in __filemap_get_folio()
1979 err = filemap_add_folio(mapping, folio, index, gfp); in __filemap_get_folio()
1984 } while (order-- > min_order); in __filemap_get_folio()
1986 if (err == -EEXIST) in __filemap_get_folio()
1993 * Return -EAGAIN so that there caller retries in a in __filemap_get_folio()
1994 * blocking fashion instead of propagating -ENOMEM in __filemap_get_folio()
1997 if ((fgp_flags & FGP_NOWAIT) && err == -ENOMEM) in __filemap_get_folio()
1998 err = -EAGAIN; in __filemap_get_folio()
2010 return ERR_PTR(-ENOENT); in __filemap_get_folio()
2054 * find_get_entries - gang pagecache lookup
2055 * @mapping: The address_space to search
2062 * the mapping. The entries are placed in @fbatch. find_get_entries()
2066 * due to not-present entries or large folios.
2073 unsigned find_get_entries(struct address_space *mapping, pgoff_t *start, in find_get_entries() argument
2076 XA_STATE(xas, &mapping->i_pages, *start); in find_get_entries()
2081 indices[fbatch->nr] = xas.xa_index; in find_get_entries()
2088 int idx = folio_batch_count(fbatch) - 1; in find_get_entries()
2090 folio = fbatch->folios[idx]; in find_get_entries()
2094 nr = 1 << xa_get_order(&mapping->i_pages, indices[idx]); in find_get_entries()
2103 * find_lock_entries - Find a batch of pagecache entries.
2104 * @mapping: The address_space to search.
2110 * find_lock_entries() will return a batch of entries from @mapping.
2117 * due to not-present entries, large folios, folios which could not be
2122 unsigned find_lock_entries(struct address_space *mapping, pgoff_t *start, in find_lock_entries() argument
2125 XA_STATE(xas, &mapping->i_pages, *start); in find_lock_entries()
2135 base = folio->index; in find_lock_entries()
2140 if (base + nr - 1 > end) in find_lock_entries()
2144 if (folio->mapping != mapping || in find_lock_entries()
2151 base = xas.xa_index & ~(nr - 1); in find_lock_entries()
2156 if (base + nr - 1 > end) in find_lock_entries()
2162 indices[fbatch->nr] = xas.xa_index; in find_lock_entries()
2177 * filemap_get_folios - Get a batch of folios
2178 * @mapping: The address_space to search
2183 * Search for and return a batch of folios in the mapping starting at
2190 unsigned filemap_get_folios(struct address_space *mapping, pgoff_t *start, in filemap_get_folios() argument
2193 return filemap_get_folios_tag(mapping, start, end, XA_PRESENT, fbatch); in filemap_get_folios()
2198 * filemap_get_folios_contig - Get a batch of contiguous folios
2199 * @mapping: The address_space to search
2212 unsigned filemap_get_folios_contig(struct address_space *mapping, in filemap_get_folios_contig() argument
2215 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_contig()
2244 *start = folio->index + nr; in filemap_get_folios_contig()
2247 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_folios_contig()
2260 folio = fbatch->folios[nr - 1]; in filemap_get_folios_contig()
2270 * filemap_get_folios_tag - Get a batch of folios matching @tag
2271 * @mapping: The address_space to search
2288 unsigned filemap_get_folios_tag(struct address_space *mapping, pgoff_t *start, in filemap_get_folios_tag() argument
2291 XA_STATE(xas, &mapping->i_pages, *start); in filemap_get_folios_tag()
2305 *start = folio->index + nr; in filemap_get_folios_tag()
2312 * breaks the iteration when there is a page at index -1 but that is in filemap_get_folios_tag()
2315 if (end == (pgoff_t)-1) in filemap_get_folios_tag()
2316 *start = (pgoff_t)-1; in filemap_get_folios_tag()
2330 * ---R__________________________________________B__________
2343 ra->ra_pages /= 4; in shrink_readahead_size_eio()
2347 * filemap_get_read_batch - Get a batch of folios for read
2355 static void filemap_get_read_batch(struct address_space *mapping, in filemap_get_read_batch() argument
2358 XA_STATE(xas, &mapping->i_pages, index); in filemap_get_read_batch()
2381 xas_advance(&xas, folio_next_index(folio) - 1); in filemap_get_read_batch()
2413 shrink_readahead_size_eio(&file->f_ra); in filemap_read_folio()
2414 return -EIO; in filemap_read_folio()
2417 static bool filemap_range_uptodate(struct address_space *mapping, in filemap_range_uptodate() argument
2426 if (!mapping->a_ops->is_partially_uptodate) in filemap_range_uptodate()
2428 if (mapping->host->i_blkbits >= folio_shift(folio)) in filemap_range_uptodate()
2432 count -= folio_pos(folio) - pos; in filemap_range_uptodate()
2435 pos -= folio_pos(folio); in filemap_range_uptodate()
2438 return mapping->a_ops->is_partially_uptodate(folio, pos, count); in filemap_range_uptodate()
2442 struct address_space *mapping, size_t count, in filemap_update_page() argument
2447 if (iocb->ki_flags & IOCB_NOWAIT) { in filemap_update_page()
2448 if (!filemap_invalidate_trylock_shared(mapping)) in filemap_update_page()
2449 return -EAGAIN; in filemap_update_page()
2451 filemap_invalidate_lock_shared(mapping); in filemap_update_page()
2455 error = -EAGAIN; in filemap_update_page()
2456 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_NOIO)) in filemap_update_page()
2458 if (!(iocb->ki_flags & IOCB_WAITQ)) { in filemap_update_page()
2459 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2467 error = __folio_lock_async(folio, iocb->ki_waitq); in filemap_update_page()
2473 if (!folio->mapping) in filemap_update_page()
2477 if (filemap_range_uptodate(mapping, iocb->ki_pos, count, folio, in filemap_update_page()
2481 error = -EAGAIN; in filemap_update_page()
2482 if (iocb->ki_flags & (IOCB_NOIO | IOCB_NOWAIT | IOCB_WAITQ)) in filemap_update_page()
2485 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_update_page()
2491 filemap_invalidate_unlock_shared(mapping); in filemap_update_page()
2499 struct address_space *mapping = iocb->ki_filp->f_mapping; in filemap_create_folio() local
2502 unsigned int min_order = mapping_min_folio_order(mapping); in filemap_create_folio()
2505 if (iocb->ki_flags & (IOCB_NOWAIT | IOCB_WAITQ)) in filemap_create_folio()
2506 return -EAGAIN; in filemap_create_folio()
2508 folio = filemap_alloc_folio(mapping_gfp_mask(mapping), min_order); in filemap_create_folio()
2510 return -ENOMEM; in filemap_create_folio()
2511 if (iocb->ki_flags & IOCB_DONTCACHE) in filemap_create_folio()
2523 * pages or ->readahead() that need to hold invalidate_lock in filemap_create_folio()
2524 * while mapping blocks for IO so let's hold the lock here as in filemap_create_folio()
2527 filemap_invalidate_lock_shared(mapping); in filemap_create_folio()
2528 index = (iocb->ki_pos >> (PAGE_SHIFT + min_order)) << min_order; in filemap_create_folio()
2529 error = filemap_add_folio(mapping, folio, index, in filemap_create_folio()
2530 mapping_gfp_constraint(mapping, GFP_KERNEL)); in filemap_create_folio()
2531 if (error == -EEXIST) in filemap_create_folio()
2536 error = filemap_read_folio(iocb->ki_filp, mapping->a_ops->read_folio, in filemap_create_folio()
2541 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2545 filemap_invalidate_unlock_shared(mapping); in filemap_create_folio()
2551 struct address_space *mapping, struct folio *folio, in filemap_readahead() argument
2554 DEFINE_READAHEAD(ractl, file, &file->f_ra, mapping, folio->index); in filemap_readahead()
2556 if (iocb->ki_flags & IOCB_NOIO) in filemap_readahead()
2557 return -EAGAIN; in filemap_readahead()
2558 if (iocb->ki_flags & IOCB_DONTCACHE) in filemap_readahead()
2560 page_cache_async_ra(&ractl, folio, last_index - folio->index); in filemap_readahead()
2567 struct file *filp = iocb->ki_filp; in filemap_get_pages()
2568 struct address_space *mapping = filp->f_mapping; in filemap_get_pages() local
2569 pgoff_t index = iocb->ki_pos >> PAGE_SHIFT; in filemap_get_pages()
2576 last_index = DIV_ROUND_UP(iocb->ki_pos + count, PAGE_SIZE); in filemap_get_pages()
2579 return -EINTR; in filemap_get_pages()
2581 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2583 DEFINE_READAHEAD(ractl, filp, &filp->f_ra, mapping, index); in filemap_get_pages()
2585 if (iocb->ki_flags & IOCB_NOIO) in filemap_get_pages()
2586 return -EAGAIN; in filemap_get_pages()
2587 if (iocb->ki_flags & IOCB_NOWAIT) in filemap_get_pages()
2589 if (iocb->ki_flags & IOCB_DONTCACHE) in filemap_get_pages()
2591 page_cache_sync_ra(&ractl, last_index - index); in filemap_get_pages()
2592 if (iocb->ki_flags & IOCB_NOWAIT) in filemap_get_pages()
2594 filemap_get_read_batch(mapping, index, last_index - 1, fbatch); in filemap_get_pages()
2603 folio = fbatch->folios[folio_batch_count(fbatch) - 1]; in filemap_get_pages()
2605 err = filemap_readahead(iocb, filp, mapping, folio, last_index); in filemap_get_pages()
2610 if ((iocb->ki_flags & IOCB_WAITQ) && in filemap_get_pages()
2612 iocb->ki_flags |= IOCB_NOWAIT; in filemap_get_pages()
2613 err = filemap_update_page(iocb, mapping, count, folio, in filemap_get_pages()
2619 trace_mm_filemap_get_pages(mapping, index, last_index - 1); in filemap_get_pages()
2624 if (likely(--fbatch->nr)) in filemap_get_pages()
2638 static void filemap_end_dropbehind_read(struct address_space *mapping, in filemap_end_dropbehind_read() argument
2647 folio_unmap_invalidate(mapping, folio, 0); in filemap_end_dropbehind_read()
2653 * filemap_read - Read data from the page cache.
2655 * @iter: Destination for the data.
2658 * Copies data from the page cache. If the data is not currently present,
2668 struct file *filp = iocb->ki_filp; in filemap_read()
2669 struct file_ra_state *ra = &filp->f_ra; in filemap_read()
2670 struct address_space *mapping = filp->f_mapping; in filemap_read() local
2671 struct inode *inode = mapping->host; in filemap_read()
2676 loff_t last_pos = ra->prev_pos; in filemap_read()
2678 if (unlikely(iocb->ki_pos < 0)) in filemap_read()
2679 return -EINVAL; in filemap_read()
2680 if (unlikely(iocb->ki_pos >= inode->i_sb->s_maxbytes)) in filemap_read()
2685 iov_iter_truncate(iter, inode->i_sb->s_maxbytes - iocb->ki_pos); in filemap_read()
2692 * If we've already successfully copied some data, then we in filemap_read()
2693 * can no longer safely return -EIOCBQUEUED. Hence mark in filemap_read()
2696 if ((iocb->ki_flags & IOCB_WAITQ) && already_read) in filemap_read()
2697 iocb->ki_flags |= IOCB_NOWAIT; in filemap_read()
2699 if (unlikely(iocb->ki_pos >= i_size_read(inode))) in filemap_read()
2702 error = filemap_get_pages(iocb, iter->count, &fbatch, false); in filemap_read()
2710 * the correct value for "nr", which means the zero-filled in filemap_read()
2712 * another truncate extends the file - this is desired though). in filemap_read()
2715 if (unlikely(iocb->ki_pos >= isize)) in filemap_read()
2717 end_offset = min_t(loff_t, isize, iocb->ki_pos + iter->count); in filemap_read()
2720 * Once we start copying data, we don't want to be touching any in filemap_read()
2723 writably_mapped = mapping_writably_mapped(mapping); in filemap_read()
2729 if (!pos_same_folio(iocb->ki_pos, last_pos - 1, in filemap_read()
2736 size_t offset = iocb->ki_pos & (fsize - 1); in filemap_read()
2737 size_t bytes = min_t(loff_t, end_offset - iocb->ki_pos, in filemap_read()
2738 fsize - offset); in filemap_read()
2756 iocb->ki_pos += copied; in filemap_read()
2757 last_pos = iocb->ki_pos; in filemap_read()
2760 error = -EFAULT; in filemap_read()
2768 filemap_end_dropbehind_read(mapping, folio); in filemap_read()
2772 } while (iov_iter_count(iter) && iocb->ki_pos < isize && !error); in filemap_read()
2775 ra->prev_pos = last_pos; in filemap_read()
2782 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_write_and_wait() local
2783 loff_t pos = iocb->ki_pos; in kiocb_write_and_wait()
2784 loff_t end = pos + count - 1; in kiocb_write_and_wait()
2786 if (iocb->ki_flags & IOCB_NOWAIT) { in kiocb_write_and_wait()
2787 if (filemap_range_needs_writeback(mapping, pos, end)) in kiocb_write_and_wait()
2788 return -EAGAIN; in kiocb_write_and_wait()
2792 return filemap_write_and_wait_range(mapping, pos, end); in kiocb_write_and_wait()
2796 int filemap_invalidate_pages(struct address_space *mapping, in filemap_invalidate_pages() argument
2803 if (filemap_range_has_page(mapping, pos, end)) in filemap_invalidate_pages()
2804 return -EAGAIN; in filemap_invalidate_pages()
2806 ret = filemap_write_and_wait_range(mapping, pos, end); in filemap_invalidate_pages()
2813 * the new data. We invalidate clean cached page from the region we're in filemap_invalidate_pages()
2815 * without clobbering -EIOCBQUEUED from ->direct_IO(). in filemap_invalidate_pages()
2817 return invalidate_inode_pages2_range(mapping, pos >> PAGE_SHIFT, in filemap_invalidate_pages()
2823 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_pages() local
2825 return filemap_invalidate_pages(mapping, iocb->ki_pos, in kiocb_invalidate_pages()
2826 iocb->ki_pos + count - 1, in kiocb_invalidate_pages()
2827 iocb->ki_flags & IOCB_NOWAIT); in kiocb_invalidate_pages()
2832 * generic_file_read_iter - generic filesystem read routine
2834 * @iter: destination for the data read
2839 * The IOCB_NOWAIT flag in iocb->ki_flags indicates that -EAGAIN shall
2840 * be returned when no data can be read without waiting for I/O requests
2843 * The IOCB_NOIO flag in iocb->ki_flags indicates that no new I/O
2844 * requests shall be made for the read or for readahead. When no data
2845 * can be read, -EAGAIN shall be returned. When readahead would be
2861 if (iocb->ki_flags & IOCB_DIRECT) { in generic_file_read_iter()
2862 struct file *file = iocb->ki_filp; in generic_file_read_iter()
2863 struct address_space *mapping = file->f_mapping; in generic_file_read_iter() local
2864 struct inode *inode = mapping->host; in generic_file_read_iter()
2871 retval = mapping->a_ops->direct_IO(iocb, iter); in generic_file_read_iter()
2873 iocb->ki_pos += retval; in generic_file_read_iter()
2874 count -= retval; in generic_file_read_iter()
2876 if (retval != -EIOCBQUEUED) in generic_file_read_iter()
2877 iov_iter_revert(iter, count - iov_iter_count(iter)); in generic_file_read_iter()
2890 if (iocb->ki_pos >= i_size_read(inode)) in generic_file_read_iter()
2908 size = min(size, folio_size(folio) - offset); in splice_folio_into_pipe()
2913 size_t part = min_t(size_t, PAGE_SIZE - offset, size - spliced); in splice_folio_into_pipe()
2922 pipe->head++; in splice_folio_into_pipe()
2932 * filemap_splice_read - Splice data from a file's pagecache into a pipe
2944 * will be updated if appropriate; 0 will be returned if there is no more data
2945 * to be read; -EAGAIN will be returned if the pipe had no space, and some
2947 * if the pipe has insufficient space, we reach the end of the data or we hit a
2961 if (unlikely(*ppos >= in->f_mapping->host->i_sb->s_maxbytes)) in filemap_splice_read()
2967 /* Work out how much data we can actually add into the pipe */ in filemap_splice_read()
2969 npages = max_t(ssize_t, pipe->max_usage - used, 0); in filemap_splice_read()
2977 if (*ppos >= i_size_read(in->f_mapping->host)) in filemap_splice_read()
2989 * the correct value for "nr", which means the zero-filled in filemap_splice_read()
2991 * another truncate extends the file - this is desired though). in filemap_splice_read()
2993 isize = i_size_read(in->f_mapping->host); in filemap_splice_read()
2999 * Once we start copying data, we don't want to be touching any in filemap_splice_read()
3002 writably_mapped = mapping_writably_mapped(in->f_mapping); in filemap_splice_read()
3020 n = min_t(loff_t, len, isize - *ppos); in filemap_splice_read()
3024 len -= n; in filemap_splice_read()
3027 in->f_ra.prev_pos = *ppos; in filemap_splice_read()
3044 struct address_space *mapping, struct folio *folio, in folio_seek_hole_data() argument
3047 const struct address_space_operations *ops = mapping->a_ops; in folio_seek_hole_data()
3048 size_t offset, bsz = i_blocksize(mapping->host); in folio_seek_hole_data()
3052 if (!ops->is_partially_uptodate) in folio_seek_hole_data()
3058 if (unlikely(folio->mapping != mapping)) in folio_seek_hole_data()
3061 offset = offset_in_folio(folio, start) & ~(bsz - 1); in folio_seek_hole_data()
3064 if (ops->is_partially_uptodate(folio, offset, bsz) == in folio_seek_hole_data()
3067 start = (start + bsz) & ~((u64)bsz - 1); in folio_seek_hole_data()
3084 * mapping_seek_hole_data - Seek for SEEK_DATA / SEEK_HOLE in the page cache.
3085 * @mapping: Address space to search.
3091 * contain data, your filesystem can use this function to implement
3093 * entirely memory-based such as tmpfs, and filesystems which support
3096 * Return: The requested offset on success, or -ENXIO if @whence specifies
3097 * SEEK_DATA and there is no data after @start. There is an implicit hole
3098 * after @end - 1, so SEEK_HOLE returns @end if all the bytes between @start
3099 * and @end contain data.
3101 loff_t mapping_seek_hole_data(struct address_space *mapping, loff_t start, in mapping_seek_hole_data() argument
3104 XA_STATE(xas, &mapping->i_pages, start >> PAGE_SHIFT); in mapping_seek_hole_data()
3105 pgoff_t max = (end - 1) >> PAGE_SHIFT; in mapping_seek_hole_data()
3110 return -ENXIO; in mapping_seek_hole_data()
3125 start = folio_seek_hole_data(&xas, mapping, folio, start, pos, in mapping_seek_hole_data()
3137 start = -ENXIO; in mapping_seek_hole_data()
3150 * lock_folio_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
3151 * @vmf - the vm_fault for this fault.
3152 * @folio - the folio to lock.
3153 * @fpin - the pointer to the file we may pin (or is already pinned).
3172 if (vmf->flags & FAULT_FLAG_RETRY_NOWAIT) in lock_folio_maybe_drop_mmap()
3176 if (vmf->flags & FAULT_FLAG_KILLABLE) { in lock_folio_maybe_drop_mmap()
3204 struct file *file = vmf->vma->vm_file; in do_sync_mmap_readahead()
3205 struct file_ra_state *ra = &file->f_ra; in do_sync_mmap_readahead()
3206 struct address_space *mapping = file->f_mapping; in do_sync_mmap_readahead() local
3207 DEFINE_READAHEAD(ractl, file, ra, mapping, vmf->pgoff); in do_sync_mmap_readahead()
3209 unsigned long vm_flags = vmf->vma->vm_flags; in do_sync_mmap_readahead()
3216 ractl._index &= ~((unsigned long)HPAGE_PMD_NR - 1); in do_sync_mmap_readahead()
3217 ra->size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3223 ra->size *= 2; in do_sync_mmap_readahead()
3224 ra->async_size = HPAGE_PMD_NR; in do_sync_mmap_readahead()
3230 /* If we don't want any read-ahead, don't bother */ in do_sync_mmap_readahead()
3233 if (!ra->ra_pages) in do_sync_mmap_readahead()
3238 page_cache_sync_ra(&ractl, ra->ra_pages); in do_sync_mmap_readahead()
3243 mmap_miss = READ_ONCE(ra->mmap_miss); in do_sync_mmap_readahead()
3245 WRITE_ONCE(ra->mmap_miss, ++mmap_miss); in do_sync_mmap_readahead()
3249 * stop bothering with read-ahead. It will only hurt. in do_sync_mmap_readahead()
3255 * mmap read-around in do_sync_mmap_readahead()
3258 ra->start = max_t(long, 0, vmf->pgoff - ra->ra_pages / 2); in do_sync_mmap_readahead()
3259 ra->size = ra->ra_pages; in do_sync_mmap_readahead()
3260 ra->async_size = ra->ra_pages / 4; in do_sync_mmap_readahead()
3261 ractl._index = ra->start; in do_sync_mmap_readahead()
3274 struct file *file = vmf->vma->vm_file; in do_async_mmap_readahead()
3275 struct file_ra_state *ra = &file->f_ra; in do_async_mmap_readahead()
3276 DEFINE_READAHEAD(ractl, file, ra, file->f_mapping, vmf->pgoff); in do_async_mmap_readahead()
3280 /* If we don't want any read-ahead, don't bother */ in do_async_mmap_readahead()
3281 if (vmf->vma->vm_flags & VM_RAND_READ || !ra->ra_pages) in do_async_mmap_readahead()
3284 mmap_miss = READ_ONCE(ra->mmap_miss); in do_async_mmap_readahead()
3286 WRITE_ONCE(ra->mmap_miss, --mmap_miss); in do_async_mmap_readahead()
3290 page_cache_async_ra(&ractl, folio, ra->ra_pages); in do_async_mmap_readahead()
3297 struct vm_area_struct *vma = vmf->vma; in filemap_fault_recheck_pte_none()
3311 * scenario while holding the PT lock, to not degrade non-mlocked in filemap_fault_recheck_pte_none()
3315 if (!(vma->vm_flags & VM_LOCKED)) in filemap_fault_recheck_pte_none()
3318 if (!(vmf->flags & FAULT_FLAG_ORIG_PTE_VALID)) in filemap_fault_recheck_pte_none()
3321 ptep = pte_offset_map_ro_nolock(vma->vm_mm, vmf->pmd, vmf->address, in filemap_fault_recheck_pte_none()
3322 &vmf->ptl); in filemap_fault_recheck_pte_none()
3329 spin_lock(vmf->ptl); in filemap_fault_recheck_pte_none()
3332 spin_unlock(vmf->ptl); in filemap_fault_recheck_pte_none()
3339 * filemap_fault - read in file data for page fault handling
3343 * mapped memory region to read in file data during a page fault.
3349 * vma->vm_mm->mmap_lock must be held on entry.
3359 * Return: bitwise-OR of %VM_FAULT_ codes.
3364 struct file *file = vmf->vma->vm_file; in filemap_fault()
3366 struct address_space *mapping = file->f_mapping; in filemap_fault() local
3367 struct inode *inode = mapping->host; in filemap_fault()
3368 pgoff_t max_idx, index = vmf->pgoff; in filemap_fault()
3377 trace_mm_filemap_fault(mapping, index); in filemap_fault()
3382 folio = filemap_get_folio(mapping, index); in filemap_fault()
3388 if (!(vmf->flags & FAULT_FLAG_TRIED)) in filemap_fault()
3391 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3401 count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT); in filemap_fault()
3410 filemap_invalidate_lock_shared(mapping); in filemap_fault()
3413 folio = __filemap_get_folio(mapping, index, in filemap_fault()
3415 vmf->gfp_mask); in filemap_fault()
3419 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3428 if (unlikely(folio->mapping != mapping)) { in filemap_fault()
3437 * that it's up-to-date. If not, it is going to be due to an error, in filemap_fault()
3463 * time to return to the upper layer and have it re-find the vma and in filemap_fault()
3471 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3484 vmf->page = folio_file_page(folio, index); in filemap_fault()
3489 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
3490 * Try to re-read it _once_. We do this synchronously, in filemap_fault()
3495 error = filemap_read_folio(file, mapping->a_ops->read_folio, folio); in filemap_fault()
3502 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3509 * re-find the vma and come back and find our hopefully still populated in filemap_fault()
3515 filemap_invalidate_unlock_shared(mapping); in filemap_fault()
3525 struct mm_struct *mm = vmf->vma->vm_mm; in filemap_map_pmd()
3528 if (pmd_trans_huge(*vmf->pmd)) { in filemap_map_pmd()
3534 if (pmd_none(*vmf->pmd) && folio_test_pmd_mappable(folio)) { in filemap_map_pmd()
3544 if (pmd_none(*vmf->pmd) && vmf->prealloc_pte) in filemap_map_pmd()
3545 pmd_install(mm, vmf->pmd, &vmf->prealloc_pte); in filemap_map_pmd()
3551 struct address_space *mapping, pgoff_t end_pgoff) in next_uptodate_folio() argument
3574 if (folio->mapping != mapping) in next_uptodate_folio()
3578 max_idx = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE); in next_uptodate_folio()
3579 if (xas->xa_index >= max_idx) in next_uptodate_folio()
3603 pte_t *old_ptep = vmf->pte; in filemap_map_folio_range()
3612 * In such situation, read-ahead is only a waste of IO. in filemap_map_folio_range()
3614 * we can stop read-ahead. in filemap_map_folio_range()
3622 * fault-around logic. in filemap_map_folio_range()
3624 if (!pte_none(ptep_get(&vmf->pte[count]))) in filemap_map_folio_range()
3634 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3640 vmf->pte += count; in filemap_map_folio_range()
3643 } while (--nr_pages > 0); in filemap_map_folio_range()
3649 if (in_range(vmf->address, addr, count * PAGE_SIZE)) in filemap_map_folio_range()
3653 vmf->pte = old_ptep; in filemap_map_folio_range()
3663 struct page *page = &folio->page; in filemap_map_order0_folio()
3675 * the fault-around logic. in filemap_map_order0_folio()
3677 if (!pte_none(ptep_get(vmf->pte))) in filemap_map_order0_folio()
3680 if (vmf->address == addr) in filemap_map_order0_folio()
3693 struct vm_area_struct *vma = vmf->vma; in filemap_map_pages()
3694 struct file *file = vma->vm_file; in filemap_map_pages()
3695 struct address_space *mapping = file->f_mapping; in filemap_map_pages() local
3698 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3705 folio = next_uptodate_folio(&xas, mapping, end_pgoff); in filemap_map_pages()
3714 addr = vma->vm_start + ((start_pgoff - vma->vm_pgoff) << PAGE_SHIFT); in filemap_map_pages()
3715 vmf->pte = pte_offset_map_lock(vma->vm_mm, vmf->pmd, addr, &vmf->ptl); in filemap_map_pages()
3716 if (!vmf->pte) { in filemap_map_pages()
3722 file_end = DIV_ROUND_UP(i_size_read(mapping->host), PAGE_SIZE) - 1; in filemap_map_pages()
3730 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
3731 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3733 end = folio_next_index(folio) - 1; in filemap_map_pages()
3734 nr_pages = min(end, end_pgoff) - xas.xa_index + 1; in filemap_map_pages()
3741 xas.xa_index - folio->index, addr, in filemap_map_pages()
3746 } while ((folio = next_uptodate_folio(&xas, mapping, end_pgoff)) != NULL); in filemap_map_pages()
3747 add_mm_counter(vma->vm_mm, folio_type, rss); in filemap_map_pages()
3748 pte_unmap_unlock(vmf->pte, vmf->ptl); in filemap_map_pages()
3749 trace_mm_filemap_map_pages(mapping, start_pgoff, end_pgoff); in filemap_map_pages()
3753 mmap_miss_saved = READ_ONCE(file->f_ra.mmap_miss); in filemap_map_pages()
3755 WRITE_ONCE(file->f_ra.mmap_miss, 0); in filemap_map_pages()
3757 WRITE_ONCE(file->f_ra.mmap_miss, mmap_miss_saved - mmap_miss); in filemap_map_pages()
3765 struct address_space *mapping = vmf->vma->vm_file->f_mapping; in filemap_page_mkwrite() local
3766 struct folio *folio = page_folio(vmf->page); in filemap_page_mkwrite()
3769 sb_start_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3770 file_update_time(vmf->vma->vm_file); in filemap_page_mkwrite()
3772 if (folio->mapping != mapping) { in filemap_page_mkwrite()
3785 sb_end_pagefault(mapping->host->i_sb); in filemap_page_mkwrite()
3799 struct address_space *mapping = file->f_mapping; in generic_file_mmap() local
3801 if (!mapping->a_ops->read_folio) in generic_file_mmap()
3802 return -ENOEXEC; in generic_file_mmap()
3804 vma->vm_ops = &generic_file_vm_ops; in generic_file_mmap()
3809 * This is for filesystems which do not implement ->writepage.
3814 return -EINVAL; in generic_file_readonly_mmap()
3824 return -ENOSYS; in generic_file_mmap()
3828 return -ENOSYS; in generic_file_readonly_mmap()
3836 static struct folio *do_read_cache_folio(struct address_space *mapping, in do_read_cache_folio() argument
3843 filler = mapping->a_ops->read_folio; in do_read_cache_folio()
3845 folio = filemap_get_folio(mapping, index); in do_read_cache_folio()
3848 mapping_min_folio_order(mapping)); in do_read_cache_folio()
3850 return ERR_PTR(-ENOMEM); in do_read_cache_folio()
3851 index = mapping_align_index(mapping, index); in do_read_cache_folio()
3852 err = filemap_add_folio(mapping, folio, index, gfp); in do_read_cache_folio()
3855 if (err == -EEXIST) in do_read_cache_folio()
3871 /* Folio was truncated from mapping */ in do_read_cache_folio()
3872 if (!folio->mapping) { in do_read_cache_folio()
3899 * read_cache_folio - Read into page cache, fill it if needed.
3900 * @mapping: The address_space to read from.
3902 * @filler: Function to perform the read, or NULL to use aops->read_folio().
3911 * Context: May sleep. Expects mapping->invalidate_lock to be held.
3914 struct folio *read_cache_folio(struct address_space *mapping, pgoff_t index, in read_cache_folio() argument
3917 return do_read_cache_folio(mapping, index, filler, file, in read_cache_folio()
3918 mapping_gfp_mask(mapping)); in read_cache_folio()
3923 * mapping_read_folio_gfp - Read into page cache, using specified allocation flags.
3924 * @mapping: The address_space for the folio.
3928 * This is the same as "read_cache_folio(mapping, index, NULL, NULL)", but with
3932 * possible and so is EINTR. If ->read_folio returns another error,
3935 * The function expects mapping->invalidate_lock to be already held.
3939 struct folio *mapping_read_folio_gfp(struct address_space *mapping, in mapping_read_folio_gfp() argument
3942 return do_read_cache_folio(mapping, index, NULL, NULL, gfp); in mapping_read_folio_gfp()
3946 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page() argument
3951 folio = do_read_cache_folio(mapping, index, filler, file, gfp); in do_read_cache_page()
3953 return &folio->page; in do_read_cache_page()
3957 struct page *read_cache_page(struct address_space *mapping, in read_cache_page() argument
3960 return do_read_cache_page(mapping, index, filler, file, in read_cache_page()
3961 mapping_gfp_mask(mapping)); in read_cache_page()
3966 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3967 * @mapping: the page's address_space
3971 * This is the same as "read_mapping_page(mapping, index, NULL)", but with
3974 * If the page does not get brought uptodate, return -EIO.
3976 * The function expects mapping->invalidate_lock to be already held.
3980 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp() argument
3984 return do_read_cache_page(mapping, index, NULL, NULL, gfp); in read_cache_page_gfp()
3997 errseq_set(&filp->f_mapping->wb_err, -EIO); in dio_warn_stale_pagecache()
4002 …pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision… in dio_warn_stale_pagecache()
4003 pr_crit("File: %s PID: %d Comm: %.20s\n", path, current->pid, in dio_warn_stale_pagecache()
4004 current->comm); in dio_warn_stale_pagecache()
4010 struct address_space *mapping = iocb->ki_filp->f_mapping; in kiocb_invalidate_post_direct_write() local
4012 if (mapping->nrpages && in kiocb_invalidate_post_direct_write()
4013 invalidate_inode_pages2_range(mapping, in kiocb_invalidate_post_direct_write()
4014 iocb->ki_pos >> PAGE_SHIFT, in kiocb_invalidate_post_direct_write()
4015 (iocb->ki_pos + count - 1) >> PAGE_SHIFT)) in kiocb_invalidate_post_direct_write()
4016 dio_warn_stale_pagecache(iocb->ki_filp); in kiocb_invalidate_post_direct_write()
4022 struct address_space *mapping = iocb->ki_filp->f_mapping; in generic_file_direct_write() local
4032 if (written == -EBUSY) in generic_file_direct_write()
4037 written = mapping->a_ops->direct_IO(iocb, from); in generic_file_direct_write()
4041 * cached by non-direct readahead, or faulted in by get_user_pages() in generic_file_direct_write()
4054 * Skip invalidation for async writes or if mapping has no pages. in generic_file_direct_write()
4057 struct inode *inode = mapping->host; in generic_file_direct_write()
4058 loff_t pos = iocb->ki_pos; in generic_file_direct_write()
4062 write_len -= written; in generic_file_direct_write()
4063 if (pos > i_size_read(inode) && !S_ISBLK(inode->i_mode)) { in generic_file_direct_write()
4067 iocb->ki_pos = pos; in generic_file_direct_write()
4069 if (written != -EIOCBQUEUED) in generic_file_direct_write()
4070 iov_iter_revert(from, write_len - iov_iter_count(from)); in generic_file_direct_write()
4077 struct file *file = iocb->ki_filp; in generic_perform_write()
4078 loff_t pos = iocb->ki_pos; in generic_perform_write()
4079 struct address_space *mapping = file->f_mapping; in generic_perform_write() local
4080 const struct address_space_operations *a_ops = mapping->a_ops; in generic_perform_write()
4081 size_t chunk = mapping_max_folio_size(mapping); in generic_perform_write()
4094 offset = pos & (chunk - 1); in generic_perform_write()
4095 bytes = min(chunk - offset, bytes); in generic_perform_write()
4096 balance_dirty_pages_ratelimited(mapping); in generic_perform_write()
4099 status = -EINTR; in generic_perform_write()
4103 status = a_ops->write_begin(file, mapping, pos, bytes, in generic_perform_write()
4109 if (bytes > folio_size(folio) - offset) in generic_perform_write()
4110 bytes = folio_size(folio) - offset; in generic_perform_write()
4112 if (mapping_writably_mapped(mapping)) in generic_perform_write()
4124 status = a_ops->write_end(file, mapping, pos, bytes, copied, in generic_perform_write()
4127 iov_iter_revert(i, copied - max(status, 0L)); in generic_perform_write()
4135 * A short copy made ->write_end() reject the in generic_perform_write()
4153 status = -EFAULT; in generic_perform_write()
4164 iocb->ki_pos += written; in generic_perform_write()
4170 * __generic_file_write_iter - write data to a file
4172 * @from: iov_iter with data to write
4174 * This function does all the work needed for actually writing data to a
4182 * This function does *not* take care of syncing data in case of O_SYNC write.
4188 * * negative error code if no data has been written at all
4192 struct file *file = iocb->ki_filp; in __generic_file_write_iter()
4193 struct address_space *mapping = file->f_mapping; in __generic_file_write_iter() local
4194 struct inode *inode = mapping->host; in __generic_file_write_iter()
4205 if (iocb->ki_flags & IOCB_DIRECT) { in __generic_file_write_iter()
4212 * page-cache pages correctly). in __generic_file_write_iter()
4225 * generic_file_write_iter - write data to a file
4227 * @from: iov_iter with data to write
4233 * * negative error code if no data has been written at all of
4239 struct file *file = iocb->ki_filp; in generic_file_write_iter()
4240 struct inode *inode = file->f_mapping->host; in generic_file_write_iter()
4256 * filemap_release_folio() - Release fs-specific metadata on a folio.
4260 * The address_space is trying to release any data attached to a folio
4261 * (presumably at folio->private).
4274 struct address_space * const mapping = folio->mapping; in filemap_release_folio() local
4282 if (mapping && mapping->a_ops->release_folio) in filemap_release_folio()
4283 return mapping->a_ops->release_folio(folio, gfp); in filemap_release_folio()
4289 * filemap_invalidate_inode - Invalidate/forcibly write back a range of an inode's pagecache
4304 struct address_space *mapping = inode->i_mapping; in filemap_invalidate_inode() local
4307 pgoff_t nr = end == LLONG_MAX ? ULONG_MAX : last - first + 1; in filemap_invalidate_inode()
4309 if (!mapping || !mapping->nrpages || end < start) in filemap_invalidate_inode()
4313 filemap_invalidate_lock(mapping); in filemap_invalidate_inode()
4315 if (!mapping->nrpages) in filemap_invalidate_inode()
4318 unmap_mapping_pages(mapping, first, nr, false); in filemap_invalidate_inode()
4320 /* Write back the data if we're asked to. */ in filemap_invalidate_inode()
4329 filemap_fdatawrite_wbc(mapping, &wbc); in filemap_invalidate_inode()
4333 invalidate_inode_pages2_range(mapping, start / PAGE_SIZE, end / PAGE_SIZE); in filemap_invalidate_inode()
4336 filemap_invalidate_unlock(mapping); in filemap_invalidate_inode()
4338 return filemap_check_errors(mapping); in filemap_invalidate_inode()
4344 * filemap_cachestat() - compute the page cache statistics of a mapping
4345 * @mapping: The mapping to compute the statistics for.
4350 * This will query the page cache statistics of a mapping in the
4355 static void filemap_cachestat(struct address_space *mapping, in filemap_cachestat() argument
4358 XA_STATE(xas, &mapping->i_pages, first_index); in filemap_cachestat()
4378 * the rcu-protected xarray. in filemap_cachestat()
4387 folio_last_index = folio_first_index + nr_pages - 1; in filemap_cachestat()
4391 nr_pages -= first_index - folio_first_index; in filemap_cachestat()
4394 nr_pages -= folio_last_index - last_index; in filemap_cachestat()
4401 cs->nr_evicted += nr_pages; in filemap_cachestat()
4404 if (shmem_mapping(mapping)) { in filemap_cachestat()
4405 /* shmem file - in swap cache */ in filemap_cachestat()
4428 cs->nr_recently_evicted += nr_pages; in filemap_cachestat()
4434 cs->nr_cache += nr_pages; in filemap_cachestat()
4437 cs->nr_dirty += nr_pages; in filemap_cachestat()
4440 cs->nr_writeback += nr_pages; in filemap_cachestat()
4458 if (f->f_mode & FMODE_WRITE) in can_do_cachestat()
4479 * `off` and `len` must be non-negative integers. If `len` > 0,
4493 * zero - success
4494 * -EFAULT - cstat or cstat_range points to an illegal address
4495 * -EINVAL - invalid flags
4496 * -EBADF - invalid file descriptor
4497 * -EOPNOTSUPP - file descriptor is of a hugetlbfs file
4504 struct address_space *mapping; in SYSCALL_DEFINE4() local
4510 return -EBADF; in SYSCALL_DEFINE4()
4514 return -EFAULT; in SYSCALL_DEFINE4()
4518 return -EOPNOTSUPP; in SYSCALL_DEFINE4()
4521 return -EPERM; in SYSCALL_DEFINE4()
4524 return -EINVAL; in SYSCALL_DEFINE4()
4528 csr.len == 0 ? ULONG_MAX : (csr.off + csr.len - 1) >> PAGE_SHIFT; in SYSCALL_DEFINE4()
4530 mapping = fd_file(f)->f_mapping; in SYSCALL_DEFINE4()
4531 filemap_cachestat(mapping, first_index, last_index, &cs); in SYSCALL_DEFINE4()
4534 return -EFAULT; in SYSCALL_DEFINE4()