Lines Matching full:page
63 * finished 'unifying' the page and buffer cache and SMP-threaded the
64 * page-cache, 21.05.1999, Ingo Molnar <mingo@redhat.com>
121 struct page *page, void *shadow) in page_cache_delete() argument
123 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
129 if (!PageHuge(page)) { in page_cache_delete()
130 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
131 nr = compound_nr(page); in page_cache_delete()
134 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
135 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
136 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
141 page->mapping = NULL; in page_cache_delete()
142 /* Leave page->index set: truncation lookup relies upon it */ in page_cache_delete()
158 struct page *page) in unaccount_page_cache_page() argument
165 * stale data around in the cleancache once our page is gone in unaccount_page_cache_page()
167 if (PageUptodate(page) && PageMappedToDisk(page)) in unaccount_page_cache_page()
168 cleancache_put_page(page); in unaccount_page_cache_page()
170 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
172 VM_BUG_ON_PAGE(PageTail(page), page); in unaccount_page_cache_page()
173 VM_BUG_ON_PAGE(page_mapped(page), page); in unaccount_page_cache_page()
174 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { in unaccount_page_cache_page()
177 pr_alert("BUG: Bad page cache in process %s pfn:%05lx\n", in unaccount_page_cache_page()
178 current->comm, page_to_pfn(page)); in unaccount_page_cache_page()
179 dump_page(page, "still mapped when deleted"); in unaccount_page_cache_page()
183 mapcount = page_mapcount(page); in unaccount_page_cache_page()
185 page_count(page) >= mapcount + 2) { in unaccount_page_cache_page()
188 * a good bet that actually the page is unmapped, in unaccount_page_cache_page()
190 * some other bad page check should catch it later. in unaccount_page_cache_page()
192 page_mapcount_reset(page); in unaccount_page_cache_page()
193 page_ref_sub(page, mapcount); in unaccount_page_cache_page()
197 /* hugetlb pages do not participate in page cache accounting. */ in unaccount_page_cache_page()
198 if (PageHuge(page)) in unaccount_page_cache_page()
201 nr = thp_nr_pages(page); in unaccount_page_cache_page()
203 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
204 if (PageSwapBacked(page)) { in unaccount_page_cache_page()
205 __mod_lruvec_page_state(page, NR_SHMEM, -nr); in unaccount_page_cache_page()
206 if (PageTransHuge(page)) in unaccount_page_cache_page()
207 __dec_node_page_state(page, NR_SHMEM_THPS); in unaccount_page_cache_page()
208 } else if (PageTransHuge(page)) { in unaccount_page_cache_page()
209 __dec_node_page_state(page, NR_FILE_THPS); in unaccount_page_cache_page()
214 * At this point page must be either written or cleaned by in unaccount_page_cache_page()
215 * truncate. Dirty page here signals a bug and loss of in unaccount_page_cache_page()
218 * This fixes dirty accounting after removing the page entirely in unaccount_page_cache_page()
220 * page and anyway will be cleared before returning page into in unaccount_page_cache_page()
223 if (WARN_ON_ONCE(PageDirty(page))) in unaccount_page_cache_page()
224 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
228 * Delete a page from the page cache and free it. Caller has to make
229 * sure the page is locked and that nobody else uses it - or that usage
232 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
234 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
236 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
238 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
239 page_cache_delete(mapping, page, shadow); in __delete_from_page_cache()
243 struct page *page) in page_cache_free_page() argument
245 void (*freepage)(struct page *); in page_cache_free_page()
249 freepage(page); in page_cache_free_page()
251 if (PageTransHuge(page) && !PageHuge(page)) { in page_cache_free_page()
252 page_ref_sub(page, thp_nr_pages(page)); in page_cache_free_page()
253 VM_BUG_ON_PAGE(page_count(page) <= 0, page); in page_cache_free_page()
255 put_page(page); in page_cache_free_page()
260 * delete_from_page_cache - delete page from page cache
261 * @page: the page which the kernel is trying to remove from page cache
263 * This must be called only on pages that have been verified to be in the page
264 * cache and locked. It will never put the page into the free list, the caller
265 * has a reference on the page.
267 void delete_from_page_cache(struct page *page) in delete_from_page_cache() argument
269 struct address_space *mapping = page_mapping(page); in delete_from_page_cache()
272 BUG_ON(!PageLocked(page)); in delete_from_page_cache()
274 __delete_from_page_cache(page, NULL); in delete_from_page_cache()
277 page_cache_free_page(mapping, page); in delete_from_page_cache()
282 * page_cache_delete_batch - delete several pages from page cache
287 * from the mapping. The function expects @pvec to be sorted by page index
301 struct page *page; in page_cache_delete_batch() local
304 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
309 if (xa_is_value(page)) in page_cache_delete_batch()
312 * A page got inserted in our range? Skip it. We have our in page_cache_delete_batch()
314 * If we see a page whose index is higher than ours, it in page_cache_delete_batch()
315 * means our page has been removed, which shouldn't be in page_cache_delete_batch()
318 if (page != pvec->pages[i]) { in page_cache_delete_batch()
319 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch()
320 page); in page_cache_delete_batch()
324 WARN_ON_ONCE(!PageLocked(page)); in page_cache_delete_batch()
326 if (page->index == xas.xa_index) in page_cache_delete_batch()
327 page->mapping = NULL; in page_cache_delete_batch()
328 /* Leave page->index set: truncation lookup relies on it */ in page_cache_delete_batch()
331 * Move to the next page in the vector if this is a regular in page_cache_delete_batch()
332 * page or the index is of the last sub-page of this compound in page_cache_delete_batch()
333 * page. in page_cache_delete_batch()
335 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
401 * these two operations is that if a dirty page/buffer is encountered, it must
462 * filemap_range_has_page - check if a page exists in range.
467 * Find at least one page in the range supplied, usually used to check if
470 * Return: %true if at least one page exists in the specified range,
476 struct page *page; in filemap_range_has_page() local
485 page = xas_find(&xas, max); in filemap_range_has_page()
486 if (xas_retry(&xas, page)) in filemap_range_has_page()
489 if (xa_is_value(page)) in filemap_range_has_page()
492 * We don't need to try to pin this page; we're about to in filemap_range_has_page()
494 * there was a page here recently. in filemap_range_has_page()
500 return page != NULL; in filemap_range_has_page()
525 struct page *page = pvec.pages[i]; in __filemap_fdatawait_range() local
527 wait_on_page_writeback(page); in __filemap_fdatawait_range()
528 ClearPageError(page); in __filemap_fdatawait_range()
775 * replace_page_cache_page - replace a pagecache page with a new one
776 * @old: page to be replaced
777 * @new: page to replace with
780 * This function replaces a page in the pagecache with a new one. On
781 * success it acquires the pagecache reference for the new page and
782 * drops it for the old page. Both the old and new pages must be
783 * locked. This function does not add the new page to the LRU, the
790 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page()
793 void (*freepage)(struct page *) = mapping->a_ops->freepage; in replace_page_cache_page()
812 /* hugetlb pages do not participate in page cache accounting. */ in replace_page_cache_page()
830 noinline int __add_to_page_cache_locked(struct page *page, in __add_to_page_cache_locked() argument
836 int huge = PageHuge(page); in __add_to_page_cache_locked()
839 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_page_cache_locked()
840 VM_BUG_ON_PAGE(PageSwapBacked(page), page); in __add_to_page_cache_locked()
843 get_page(page); in __add_to_page_cache_locked()
844 page->mapping = mapping; in __add_to_page_cache_locked()
845 page->index = offset; in __add_to_page_cache_locked()
848 error = mem_cgroup_charge(page, current->mm, gfp); in __add_to_page_cache_locked()
859 if (order > thp_order(page)) in __add_to_page_cache_locked()
876 if (order > thp_order(page)) { in __add_to_page_cache_locked()
882 xas_store(&xas, page); in __add_to_page_cache_locked()
890 /* hugetlb pages do not participate in page cache accounting */ in __add_to_page_cache_locked()
892 __inc_lruvec_page_state(page, NR_FILE_PAGES); in __add_to_page_cache_locked()
902 trace_mm_filemap_add_to_page_cache(page); in __add_to_page_cache_locked()
905 page->mapping = NULL; in __add_to_page_cache_locked()
906 /* Leave page->index set: truncation relies upon it */ in __add_to_page_cache_locked()
907 put_page(page); in __add_to_page_cache_locked()
913 * add_to_page_cache_locked - add a locked page to the pagecache
914 * @page: page to add
915 * @mapping: the page's address_space
916 * @offset: page index
917 * @gfp_mask: page allocation mode
919 * This function is used to add a page to the pagecache. It must be locked.
920 * This function does not add the page to the LRU. The caller must do that.
924 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
927 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
932 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
938 __SetPageLocked(page); in add_to_page_cache_lru()
939 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
942 __ClearPageLocked(page); in add_to_page_cache_lru()
945 * The page might have been evicted from cache only in add_to_page_cache_lru()
947 * any other repeatedly accessed page. in add_to_page_cache_lru()
952 WARN_ON_ONCE(PageActive(page)); in add_to_page_cache_lru()
954 workingset_refault(page, shadow); in add_to_page_cache_lru()
955 lru_cache_add(page); in add_to_page_cache_lru()
962 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc()
965 struct page *page; in __page_cache_alloc() local
972 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc()
973 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); in __page_cache_alloc()
975 return page; in __page_cache_alloc()
988 * sure the appropriate page became available, this saves space
996 static wait_queue_head_t *page_waitqueue(struct page *page) in page_waitqueue() argument
998 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; in page_waitqueue()
1012 * The page wait code treats the "wait->flags" somewhat unusually, because
1061 if (test_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1064 if (test_and_set_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1096 static void wake_up_page_bit(struct page *page, int bit_nr) in wake_up_page_bit() argument
1098 wait_queue_head_t *q = page_waitqueue(page); in wake_up_page_bit()
1103 key.page = page; in wake_up_page_bit()
1130 * hash, so in that case check for a page match. That prevents a long- in wake_up_page_bit()
1133 * It is still possible to miss a case here, when we woke page waiters in wake_up_page_bit()
1135 * page waiters. in wake_up_page_bit()
1138 ClearPageWaiters(page); in wake_up_page_bit()
1141 * our page waiters, but the hashed waitqueue has waiters for in wake_up_page_bit()
1150 static void wake_up_page(struct page *page, int bit) in wake_up_page() argument
1152 if (!PageWaiters(page)) in wake_up_page()
1154 wake_up_page_bit(page, bit); in wake_up_page()
1161 EXCLUSIVE, /* Hold ref to page and take the bit when woken, like
1164 SHARED, /* Hold ref to page and check the bit when woken, like
1167 DROP, /* Drop ref to page before wait, no check when woken,
1173 * Attempt to check (or get) the page bit, and mark us done
1176 static inline bool trylock_page_bit_common(struct page *page, int bit_nr, in trylock_page_bit_common() argument
1180 if (test_and_set_bit(bit_nr, &page->flags)) in trylock_page_bit_common()
1182 } else if (test_bit(bit_nr, &page->flags)) in trylock_page_bit_common()
1193 struct page *page, int bit_nr, int state, enum behavior behavior) in wait_on_page_bit_common() argument
1203 !PageUptodate(page) && PageWorkingset(page)) { in wait_on_page_bit_common()
1204 if (!PageSwapBacked(page)) { in wait_on_page_bit_common()
1214 wait_page.page = page; in wait_on_page_bit_common()
1227 * page bit synchronously. in wait_on_page_bit_common()
1233 * page queue), and add ourselves to the wait in wait_on_page_bit_common()
1240 SetPageWaiters(page); in wait_on_page_bit_common()
1241 if (!trylock_page_bit_common(page, bit_nr, wait)) in wait_on_page_bit_common()
1248 * see whether the page bit testing has already in wait_on_page_bit_common()
1251 * We can drop our reference to the page. in wait_on_page_bit_common()
1254 put_page(page); in wait_on_page_bit_common()
1291 if (unlikely(test_and_set_bit(bit_nr, &page->flags))) in wait_on_page_bit_common()
1331 void wait_on_page_bit(struct page *page, int bit_nr) in wait_on_page_bit() argument
1333 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit()
1334 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in wait_on_page_bit()
1338 int wait_on_page_bit_killable(struct page *page, int bit_nr) in wait_on_page_bit_killable() argument
1340 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit_killable()
1341 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); in wait_on_page_bit_killable()
1345 static int __wait_on_page_locked_async(struct page *page, in __wait_on_page_locked_async() argument
1348 struct wait_queue_head *q = page_waitqueue(page); in __wait_on_page_locked_async()
1351 wait->page = page; in __wait_on_page_locked_async()
1356 SetPageWaiters(page); in __wait_on_page_locked_async()
1358 ret = !trylock_page(page); in __wait_on_page_locked_async()
1360 ret = PageLocked(page); in __wait_on_page_locked_async()
1375 static int wait_on_page_locked_async(struct page *page, in wait_on_page_locked_async() argument
1378 if (!PageLocked(page)) in wait_on_page_locked_async()
1380 return __wait_on_page_locked_async(compound_head(page), wait, false); in wait_on_page_locked_async()
1385 * @page: The page to wait for.
1387 * The caller should hold a reference on @page. They expect the page to
1389 * (for example) by holding the reference while waiting for the page to
1391 * dereference @page.
1393 void put_and_wait_on_page_locked(struct page *page) in put_and_wait_on_page_locked() argument
1397 page = compound_head(page); in put_and_wait_on_page_locked()
1398 q = page_waitqueue(page); in put_and_wait_on_page_locked()
1399 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); in put_and_wait_on_page_locked()
1403 * add_page_wait_queue - Add an arbitrary waiter to a page's wait queue
1404 * @page: Page defining the wait queue of interest
1407 * Add an arbitrary @waiter to the wait queue for the nominated @page.
1409 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) in add_page_wait_queue() argument
1411 wait_queue_head_t *q = page_waitqueue(page); in add_page_wait_queue()
1416 SetPageWaiters(page); in add_page_wait_queue()
1445 * unlock_page - unlock a locked page
1446 * @page: the page
1448 * Unlocks the page and wakes up sleepers in wait_on_page_locked().
1459 void unlock_page(struct page *page) in unlock_page() argument
1462 page = compound_head(page); in unlock_page()
1463 VM_BUG_ON_PAGE(!PageLocked(page), page); in unlock_page()
1464 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) in unlock_page()
1465 wake_up_page_bit(page, PG_locked); in unlock_page()
1470 * end_page_writeback - end writeback against a page
1471 * @page: the page
1473 void end_page_writeback(struct page *page) in end_page_writeback() argument
1478 * shuffle a page marked for immediate reclaim is too mild to in end_page_writeback()
1480 * ever page writeback. in end_page_writeback()
1482 if (PageReclaim(page)) { in end_page_writeback()
1483 ClearPageReclaim(page); in end_page_writeback()
1484 rotate_reclaimable_page(page); in end_page_writeback()
1488 * Writeback does not hold a page reference of its own, relying in end_page_writeback()
1490 * But here we must make sure that the page is not freed and in end_page_writeback()
1493 get_page(page); in end_page_writeback()
1494 if (!test_clear_page_writeback(page)) in end_page_writeback()
1498 wake_up_page(page, PG_writeback); in end_page_writeback()
1499 put_page(page); in end_page_writeback()
1504 * After completing I/O on a page, call this routine to update the page
1507 void page_endio(struct page *page, bool is_write, int err) in page_endio() argument
1511 SetPageUptodate(page); in page_endio()
1513 ClearPageUptodate(page); in page_endio()
1514 SetPageError(page); in page_endio()
1516 unlock_page(page); in page_endio()
1521 SetPageError(page); in page_endio()
1522 mapping = page_mapping(page); in page_endio()
1526 end_page_writeback(page); in page_endio()
1532 * __lock_page - get a lock on the page, assuming we need to sleep to get it
1533 * @__page: the page to lock
1535 void __lock_page(struct page *__page) in __lock_page()
1537 struct page *page = compound_head(__page); in __lock_page() local
1538 wait_queue_head_t *q = page_waitqueue(page); in __lock_page()
1539 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, in __lock_page()
1544 int __lock_page_killable(struct page *__page) in __lock_page_killable()
1546 struct page *page = compound_head(__page); in __lock_page_killable() local
1547 wait_queue_head_t *q = page_waitqueue(page); in __lock_page_killable()
1548 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, in __lock_page_killable()
1553 int __lock_page_async(struct page *page, struct wait_page_queue *wait) in __lock_page_async() argument
1555 return __wait_on_page_locked_async(page, wait, true); in __lock_page_async()
1560 * 1 - page is locked; mmap_lock is still held.
1561 * 0 - page is not locked.
1567 * with the page locked and the mmap_lock unperturbed.
1569 int __lock_page_or_retry(struct page *page, struct mm_struct *mm, in __lock_page_or_retry() argument
1582 wait_on_page_locked_killable(page); in __lock_page_or_retry()
1584 wait_on_page_locked(page); in __lock_page_or_retry()
1590 ret = __lock_page_killable(page); in __lock_page_or_retry()
1596 __lock_page(page); in __lock_page_or_retry()
1602 * page_cache_next_miss() - Find the next gap in the page cache.
1638 * page_cache_prev_miss() - Find the previous gap in the page cache.
1674 * find_get_entry - find and get a page cache entry
1676 * @index: The page cache index.
1678 * Looks up the page cache slot at @mapping & @offset. If there is a
1679 * page cache page, the head page is returned with an increased refcount.
1681 * If the slot holds a shadow entry of a previously evicted page, or a
1684 * Return: The head page or shadow entry, %NULL if nothing is found.
1686 struct page *find_get_entry(struct address_space *mapping, pgoff_t index) in find_get_entry()
1689 struct page *page; in find_get_entry() local
1694 page = xas_load(&xas); in find_get_entry()
1695 if (xas_retry(&xas, page)) in find_get_entry()
1698 * A shadow entry of a recently evicted page, or a swap entry from in find_get_entry()
1699 * shmem/tmpfs. Return it without attempting to raise page count. in find_get_entry()
1701 if (!page || xa_is_value(page)) in find_get_entry()
1704 if (!page_cache_get_speculative(page)) in find_get_entry()
1708 * Has the page moved or been split? in find_get_entry()
1712 if (unlikely(page != xas_reload(&xas))) { in find_get_entry()
1713 put_page(page); in find_get_entry()
1719 return page; in find_get_entry()
1723 * find_lock_entry - Locate and lock a page cache entry.
1725 * @index: The page cache index.
1727 * Looks up the page at @mapping & @index. If there is a page in the
1728 * cache, the head page is returned locked and with an increased refcount.
1730 * If the slot holds a shadow entry of a previously evicted page, or a
1734 * Return: The head page or shadow entry, %NULL if nothing is found.
1736 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index) in find_lock_entry()
1738 struct page *page; in find_lock_entry() local
1741 page = find_get_entry(mapping, index); in find_lock_entry()
1742 if (page && !xa_is_value(page)) { in find_lock_entry()
1743 lock_page(page); in find_lock_entry()
1744 /* Has the page been truncated? */ in find_lock_entry()
1745 if (unlikely(page->mapping != mapping)) { in find_lock_entry()
1746 unlock_page(page); in find_lock_entry()
1747 put_page(page); in find_lock_entry()
1750 VM_BUG_ON_PAGE(!thp_contains(page, index), page); in find_lock_entry()
1752 return page; in find_lock_entry()
1756 * pagecache_get_page - Find and get a reference to a page.
1758 * @index: The page index.
1759 * @fgp_flags: %FGP flags modify how the page is returned.
1762 * Looks up the page cache entry at @mapping & @index.
1766 * * %FGP_ACCESSED - The page will be marked accessed.
1767 * * %FGP_LOCK - The page is returned locked.
1768 * * %FGP_HEAD - If the page is present and a THP, return the head page
1769 * rather than the exact page specified by the index.
1770 * * %FGP_CREAT - If no page is present then a new page is allocated using
1771 * @gfp_mask and added to the page cache and the VM's LRU list.
1772 * The page is returned locked and with an increased refcount.
1774 * page is already in cache. If the page was allocated, unlock it before
1776 * * %FGP_WRITE - The page will be written
1778 * * %FGP_NOWAIT - Don't get blocked by page lock
1783 * If there is a page cache page, it is returned with an increased refcount.
1785 * Return: The found page or %NULL otherwise.
1787 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page()
1790 struct page *page; in pagecache_get_page() local
1793 page = find_get_entry(mapping, index); in pagecache_get_page()
1794 if (xa_is_value(page)) in pagecache_get_page()
1795 page = NULL; in pagecache_get_page()
1796 if (!page) in pagecache_get_page()
1801 if (!trylock_page(page)) { in pagecache_get_page()
1802 put_page(page); in pagecache_get_page()
1806 lock_page(page); in pagecache_get_page()
1809 /* Has the page been truncated? */ in pagecache_get_page()
1810 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1811 unlock_page(page); in pagecache_get_page()
1812 put_page(page); in pagecache_get_page()
1815 VM_BUG_ON_PAGE(!thp_contains(page, index), page); in pagecache_get_page()
1819 mark_page_accessed(page); in pagecache_get_page()
1822 if (page_is_idle(page)) in pagecache_get_page()
1823 clear_page_idle(page); in pagecache_get_page()
1826 page = find_subpage(page, index); in pagecache_get_page()
1829 if (!page && (fgp_flags & FGP_CREAT)) { in pagecache_get_page()
1836 page = __page_cache_alloc(gfp_mask); in pagecache_get_page()
1837 if (!page) in pagecache_get_page()
1845 __SetPageReferenced(page); in pagecache_get_page()
1847 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); in pagecache_get_page()
1849 put_page(page); in pagecache_get_page()
1850 page = NULL; in pagecache_get_page()
1856 * add_to_page_cache_lru locks the page, and for mmap we expect in pagecache_get_page()
1857 * an unlocked page. in pagecache_get_page()
1859 if (page && (fgp_flags & FGP_FOR_MMAP)) in pagecache_get_page()
1860 unlock_page(page); in pagecache_get_page()
1863 return page; in pagecache_get_page()
1870 * @start: The starting page cache index
1880 * The search returns a group of mapping-contiguous page cache entries
1887 * If it finds a Transparent Huge Page, head or tail, find_get_entries()
1888 * stops at that page: the caller is likely to have a better way to handle
1889 * the compound page as a whole, and then skip its extent, than repeatedly
1896 struct page **entries, pgoff_t *indices) in find_get_entries()
1899 struct page *page; in find_get_entries() local
1906 xas_for_each(&xas, page, ULONG_MAX) { in find_get_entries()
1907 if (xas_retry(&xas, page)) in find_get_entries()
1910 * A shadow entry of a recently evicted page, a swap in find_get_entries()
1912 * without attempting to raise page count. in find_get_entries()
1914 if (xa_is_value(page)) in find_get_entries()
1917 if (!page_cache_get_speculative(page)) in find_get_entries()
1920 /* Has the page moved or been split? */ in find_get_entries()
1921 if (unlikely(page != xas_reload(&xas))) in find_get_entries()
1928 if (PageTransHuge(page) && !PageHuge(page)) { in find_get_entries()
1929 page = find_subpage(page, xas.xa_index); in find_get_entries()
1934 entries[ret] = page; in find_get_entries()
1939 put_page(page); in find_get_entries()
1950 * @start: The starting page index
1951 * @end: The final page index (inclusive)
1962 * We also update @start to index the next page for the traversal.
1970 struct page **pages) in find_get_pages_range()
1973 struct page *page; in find_get_pages_range() local
1980 xas_for_each(&xas, page, end) { in find_get_pages_range()
1981 if (xas_retry(&xas, page)) in find_get_pages_range()
1984 if (xa_is_value(page)) in find_get_pages_range()
1987 if (!page_cache_get_speculative(page)) in find_get_pages_range()
1990 /* Has the page moved or been split? */ in find_get_pages_range()
1991 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range()
1994 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range()
2001 put_page(page); in find_get_pages_range()
2007 * We come here when there is no page beyond @end. We take care to not in find_get_pages_range()
2009 * breaks the iteration when there is a page at index -1 but that is in find_get_pages_range()
2025 * @index: The starting page index
2035 unsigned int nr_pages, struct page **pages) in find_get_pages_contig()
2038 struct page *page; in find_get_pages_contig() local
2045 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig()
2046 if (xas_retry(&xas, page)) in find_get_pages_contig()
2052 if (xa_is_value(page)) in find_get_pages_contig()
2055 if (!page_cache_get_speculative(page)) in find_get_pages_contig()
2058 /* Has the page moved or been split? */ in find_get_pages_contig()
2059 if (unlikely(page != xas_reload(&xas))) in find_get_pages_contig()
2062 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_contig()
2067 put_page(page); in find_get_pages_contig()
2079 * @index: the starting page index
2080 * @end: The final page index (inclusive)
2086 * @tag. We update @index to index the next page for the traversal.
2092 struct page **pages) in find_get_pages_range_tag()
2095 struct page *page; in find_get_pages_range_tag() local
2102 xas_for_each_marked(&xas, page, end, tag) { in find_get_pages_range_tag()
2103 if (xas_retry(&xas, page)) in find_get_pages_range_tag()
2107 * is lockless so there is a window for page reclaim to evict in find_get_pages_range_tag()
2108 * a page we saw tagged. Skip over it. in find_get_pages_range_tag()
2110 if (xa_is_value(page)) in find_get_pages_range_tag()
2113 if (!page_cache_get_speculative(page)) in find_get_pages_range_tag()
2116 /* Has the page moved or been split? */ in find_get_pages_range_tag()
2117 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range_tag()
2120 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range_tag()
2127 put_page(page); in find_get_pages_range_tag()
2135 * iteration when there is a page at index -1 but that is already in find_get_pages_range_tag()
2196 unsigned long offset; /* offset into pagecache page */ in generic_file_buffered_read()
2219 struct page *page; in generic_file_buffered_read() local
2231 page = find_get_page(mapping, index); in generic_file_buffered_read()
2232 if (!page) { in generic_file_buffered_read()
2238 page = find_get_page(mapping, index); in generic_file_buffered_read()
2239 if (unlikely(page == NULL)) in generic_file_buffered_read()
2242 if (PageReadahead(page)) { in generic_file_buffered_read()
2244 put_page(page); in generic_file_buffered_read()
2248 ra, filp, page, in generic_file_buffered_read()
2251 if (!PageUptodate(page)) { in generic_file_buffered_read()
2259 put_page(page); in generic_file_buffered_read()
2262 error = wait_on_page_locked_async(page, in generic_file_buffered_read()
2266 put_page(page); in generic_file_buffered_read()
2269 error = wait_on_page_locked_killable(page); in generic_file_buffered_read()
2273 if (PageUptodate(page)) in generic_file_buffered_read()
2282 if (!trylock_page(page)) in generic_file_buffered_read()
2285 if (!page->mapping) in generic_file_buffered_read()
2287 if (!mapping->a_ops->is_partially_uptodate(page, in generic_file_buffered_read()
2290 unlock_page(page); in generic_file_buffered_read()
2294 * i_size must be checked after we know the page is Uptodate. in generic_file_buffered_read()
2298 * part of the page is not copied back to userspace (unless in generic_file_buffered_read()
2305 put_page(page); in generic_file_buffered_read()
2309 /* nr is the maximum number of bytes to copy from this page */ in generic_file_buffered_read()
2314 put_page(page); in generic_file_buffered_read()
2320 /* If users can be writing to this page using arbitrary in generic_file_buffered_read()
2322 * before reading the page on the kernel side. in generic_file_buffered_read()
2325 flush_dcache_page(page); in generic_file_buffered_read()
2328 * When a sequential read accesses a page several times, in generic_file_buffered_read()
2332 mark_page_accessed(page); in generic_file_buffered_read()
2336 * Ok, we have the page, and it's up-to-date, so in generic_file_buffered_read()
2340 ret = copy_page_to_iter(page, offset, nr, iter); in generic_file_buffered_read()
2346 put_page(page); in generic_file_buffered_read()
2357 /* Get exclusive access to the page ... */ in generic_file_buffered_read()
2360 put_page(page); in generic_file_buffered_read()
2363 error = lock_page_async(page, iocb->ki_waitq); in generic_file_buffered_read()
2365 error = lock_page_killable(page); in generic_file_buffered_read()
2372 if (!page->mapping) { in generic_file_buffered_read()
2373 unlock_page(page); in generic_file_buffered_read()
2374 put_page(page); in generic_file_buffered_read()
2379 if (PageUptodate(page)) { in generic_file_buffered_read()
2380 unlock_page(page); in generic_file_buffered_read()
2386 unlock_page(page); in generic_file_buffered_read()
2387 put_page(page); in generic_file_buffered_read()
2395 ClearPageError(page); in generic_file_buffered_read()
2396 /* Start the actual read. The read will unlock the page. */ in generic_file_buffered_read()
2397 error = mapping->a_ops->readpage(filp, page); in generic_file_buffered_read()
2401 put_page(page); in generic_file_buffered_read()
2408 if (!PageUptodate(page)) { in generic_file_buffered_read()
2411 put_page(page); in generic_file_buffered_read()
2414 error = lock_page_async(page, iocb->ki_waitq); in generic_file_buffered_read()
2416 error = lock_page_killable(page); in generic_file_buffered_read()
2421 if (!PageUptodate(page)) { in generic_file_buffered_read()
2422 if (page->mapping == NULL) { in generic_file_buffered_read()
2426 unlock_page(page); in generic_file_buffered_read()
2427 put_page(page); in generic_file_buffered_read()
2430 unlock_page(page); in generic_file_buffered_read()
2435 unlock_page(page); in generic_file_buffered_read()
2442 put_page(page); in generic_file_buffered_read()
2448 * page.. in generic_file_buffered_read()
2450 page = page_cache_alloc(mapping); in generic_file_buffered_read()
2451 if (!page) { in generic_file_buffered_read()
2455 error = add_to_page_cache_lru(page, mapping, index, in generic_file_buffered_read()
2458 put_page(page); in generic_file_buffered_read()
2487 * that can use the page cache directly.
2562 * lock_page_maybe_drop_mmap - lock the page, possibly dropping the mmap_lock
2564 * @page - the page to lock.
2568 * It differs in that it actually returns the page locked if it returns 1 and 0
2569 * if it couldn't lock the page. If we did have to drop the mmap_lock then fpin
2572 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2575 if (trylock_page(page)) in lock_page_maybe_drop_mmap()
2588 if (__lock_page_killable(page)) { in lock_page_maybe_drop_mmap()
2600 __lock_page(page); in lock_page_maybe_drop_mmap()
2606 * Synchronous readahead happens when we don't even find a page in the page
2658 * Asynchronous readahead happens when we find the page and PG_readahead,
2663 struct page *page) in do_async_mmap_readahead() argument
2678 if (PageReadahead(page)) { in do_async_mmap_readahead()
2681 page, offset, ra->ra_pages); in do_async_mmap_readahead()
2687 * filemap_fault - read in file data for page fault handling
2691 * mapped memory region to read in file data during a page fault.
2694 * it in the page cache, and handles the special cases reasonably without
2719 struct page *page; in filemap_fault() local
2727 * Do we have something in the page cache already? in filemap_fault()
2729 page = find_get_page(mapping, offset); in filemap_fault()
2730 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { in filemap_fault()
2732 * We found the page, so try async readahead before in filemap_fault()
2735 fpin = do_async_mmap_readahead(vmf, page); in filemap_fault()
2736 } else if (!page) { in filemap_fault()
2737 /* No page in the page cache at all */ in filemap_fault()
2743 page = pagecache_get_page(mapping, offset, in filemap_fault()
2746 if (!page) { in filemap_fault()
2753 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) in filemap_fault()
2757 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
2758 unlock_page(page); in filemap_fault()
2759 put_page(page); in filemap_fault()
2762 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in filemap_fault()
2765 * We have a locked page in the page cache, now we need to check in filemap_fault()
2768 if (unlikely(!PageUptodate(page))) in filemap_fault()
2777 unlock_page(page); in filemap_fault()
2782 * Found the page and have a reference on it. in filemap_fault()
2783 * We must recheck i_size under page lock. in filemap_fault()
2787 unlock_page(page); in filemap_fault()
2788 put_page(page); in filemap_fault()
2792 vmf->page = page; in filemap_fault()
2797 * Umm, take care of errors if the page isn't up-to-date. in filemap_fault()
2802 ClearPageError(page); in filemap_fault()
2804 error = mapping->a_ops->readpage(file, page); in filemap_fault()
2806 wait_on_page_locked(page); in filemap_fault()
2807 if (!PageUptodate(page)) in filemap_fault()
2812 put_page(page); in filemap_fault()
2824 * page. in filemap_fault()
2826 if (page) in filemap_fault()
2827 put_page(page); in filemap_fault()
2842 struct page *head, *page; in filemap_map_pages() local
2853 * Check for a locked page first, as a speculative in filemap_map_pages()
2854 * reference may adversely influence page migration. in filemap_map_pages()
2861 /* Has the page moved or been split? */ in filemap_map_pages()
2864 page = find_subpage(head, xas.xa_index); in filemap_map_pages()
2867 PageReadahead(page) || in filemap_map_pages()
2868 PageHWPoison(page)) in filemap_map_pages()
2887 if (alloc_set_pte(vmf, page)) in filemap_map_pages()
2896 /* Huge page is mapped? No need to proceed. */ in filemap_map_pages()
2907 struct page *page = vmf->page; in filemap_page_mkwrite() local
2913 lock_page(page); in filemap_page_mkwrite()
2914 if (page->mapping != inode->i_mapping) { in filemap_page_mkwrite()
2915 unlock_page(page); in filemap_page_mkwrite()
2920 * We mark the page dirty already here so that when freeze is in in filemap_page_mkwrite()
2922 * see the dirty page and writeprotect it again. in filemap_page_mkwrite()
2924 set_page_dirty(page); in filemap_page_mkwrite()
2925 wait_for_stable_page(page); in filemap_page_mkwrite()
2978 static struct page *wait_on_page_read(struct page *page) in wait_on_page_read() argument
2980 if (!IS_ERR(page)) { in wait_on_page_read()
2981 wait_on_page_locked(page); in wait_on_page_read()
2982 if (!PageUptodate(page)) { in wait_on_page_read()
2983 put_page(page); in wait_on_page_read()
2984 page = ERR_PTR(-EIO); in wait_on_page_read()
2987 return page; in wait_on_page_read()
2990 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page()
2992 int (*filler)(void *, struct page *), in do_read_cache_page() argument
2996 struct page *page; in do_read_cache_page() local
2999 page = find_get_page(mapping, index); in do_read_cache_page()
3000 if (!page) { in do_read_cache_page()
3001 page = __page_cache_alloc(gfp); in do_read_cache_page()
3002 if (!page) in do_read_cache_page()
3004 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
3006 put_page(page); in do_read_cache_page()
3015 err = filler(data, page); in do_read_cache_page()
3017 err = mapping->a_ops->readpage(data, page); in do_read_cache_page()
3020 put_page(page); in do_read_cache_page()
3024 page = wait_on_page_read(page); in do_read_cache_page()
3025 if (IS_ERR(page)) in do_read_cache_page()
3026 return page; in do_read_cache_page()
3029 if (PageUptodate(page)) in do_read_cache_page()
3033 * Page is not up to date and may be locked due to one of the following in do_read_cache_page()
3034 * case a: Page is being filled and the page lock is held in do_read_cache_page()
3035 * case b: Read/write error clearing the page uptodate status in do_read_cache_page()
3036 * case c: Truncation in progress (page locked) in do_read_cache_page()
3039 * Case a, the page will be up to date when the page is unlocked. in do_read_cache_page()
3040 * There is no need to serialise on the page lock here as the page in do_read_cache_page()
3042 * page is truncated, the data is still valid if PageUptodate as in do_read_cache_page()
3044 * Case b, the page will not be up to date in do_read_cache_page()
3045 * Case c, the page may be truncated but in itself, the data may still in do_read_cache_page()
3047 * operation must restart if the page is not uptodate on unlock but in do_read_cache_page()
3048 * otherwise serialising on page lock to stabilise the mapping gives in do_read_cache_page()
3049 * no additional guarantees to the caller as the page lock is in do_read_cache_page()
3051 * Case d, similar to truncation. If reclaim holds the page lock, it in do_read_cache_page()
3054 * no need to serialise with page lock. in do_read_cache_page()
3056 * As the page lock gives no additional guarantee, we optimistically in do_read_cache_page()
3057 * wait on the page to be unlocked and check if it's up to date and in do_read_cache_page()
3058 * use the page if it is. Otherwise, the page lock is required to in do_read_cache_page()
3061 * wait on the same page for IO to complete. in do_read_cache_page()
3063 wait_on_page_locked(page); in do_read_cache_page()
3064 if (PageUptodate(page)) in do_read_cache_page()
3068 lock_page(page); in do_read_cache_page()
3071 if (!page->mapping) { in do_read_cache_page()
3072 unlock_page(page); in do_read_cache_page()
3073 put_page(page); in do_read_cache_page()
3077 /* Someone else locked and filled the page in a very small window */ in do_read_cache_page()
3078 if (PageUptodate(page)) { in do_read_cache_page()
3079 unlock_page(page); in do_read_cache_page()
3086 * Clear page error before actual read, PG_error will be in do_read_cache_page()
3087 * set again if read page fails. in do_read_cache_page()
3089 ClearPageError(page); in do_read_cache_page()
3093 mark_page_accessed(page); in do_read_cache_page()
3094 return page; in do_read_cache_page()
3098 * read_cache_page - read into page cache, fill it if needed
3099 * @mapping: the page's address_space
3100 * @index: the page index
3102 * @data: first arg to filler(data, page) function, often left as NULL
3104 * Read into the page cache. If a page already exists, and PageUptodate() is
3105 * not set, try to fill the page and wait for it to become unlocked.
3107 * If the page does not get brought uptodate, return -EIO.
3109 * Return: up to date page on success, ERR_PTR() on failure.
3111 struct page *read_cache_page(struct address_space *mapping, in read_cache_page()
3113 int (*filler)(void *, struct page *), in read_cache_page() argument
3122 * read_cache_page_gfp - read into page cache, using specified page allocation flags.
3123 * @mapping: the page's address_space
3124 * @index: the page index
3125 * @gfp: the page allocator flags to use if allocating
3128 * any new page allocations done using the specified allocation flags.
3130 * If the page does not get brought uptodate, return -EIO.
3132 * Return: up to date page on success, ERR_PTR() on failure.
3134 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp()
3144 struct page **pagep, void **fsdata) in pagecache_write_begin()
3155 struct page *page, void *fsdata) in pagecache_write_end() argument
3159 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3164 * Warn about a page cache invalidation failure during a direct I/O write.
3178 …pr_crit("Page cache invalidation failure on direct I/O. Possible data corruption due to collision… in dio_warn_stale_pagecache()
3212 * the new data. We invalidate clean cached page from the region we're in generic_file_direct_write()
3219 * If a page can not be invalidated, return 0 to fall back in generic_file_direct_write()
3267 * Find or create a page at the given pagecache position. Return the locked
3268 * page. This function is specifically for buffered writes.
3270 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin()
3273 struct page *page; in grab_cache_page_write_begin() local
3279 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3281 if (page) in grab_cache_page_write_begin()
3282 wait_for_stable_page(page); in grab_cache_page_write_begin()
3284 return page; in grab_cache_page_write_begin()
3298 struct page *page; in generic_perform_write() local
3299 unsigned long offset; /* Offset into pagecache page */ in generic_perform_write()
3300 unsigned long bytes; /* Bytes to write to page */ in generic_perform_write()
3310 * Bring in the user page that we will copy from _first_. in generic_perform_write()
3312 * same page as we're writing to, without it being marked in generic_perform_write()
3330 &page, &fsdata); in generic_perform_write()
3335 flush_dcache_page(page); in generic_perform_write()
3337 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); in generic_perform_write()
3338 flush_dcache_page(page); in generic_perform_write()
3341 page, fsdata); in generic_perform_write()
3402 /* We can write back this queue in page reclaim */ in __generic_file_write_iter()
3421 * page-cache pages correctly). in __generic_file_write_iter()
3439 * We need to ensure that the page cache pages are written to in __generic_file_write_iter()
3500 * try_to_release_page() - release old fs-specific metadata on a page
3502 * @page: the page which the kernel is trying to free
3505 * The address_space is to try to release any data against the page
3506 * (presumably at page->private).
3508 * This may also be called if PG_fscache is set on a page, indicating that the
3509 * page is known to the local caching routines.
3512 * this page (__GFP_IO), and whether the call may block (__GFP_RECLAIM & __GFP_FS).
3516 int try_to_release_page(struct page *page, gfp_t gfp_mask) in try_to_release_page() argument
3518 struct address_space * const mapping = page->mapping; in try_to_release_page()
3520 BUG_ON(!PageLocked(page)); in try_to_release_page()
3521 if (PageWriteback(page)) in try_to_release_page()
3525 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()
3526 return try_to_free_buffers(page); in try_to_release_page()