Lines Matching full:page

10  * Provides methods for unmapping each kind of mapped page:
25 * page->flags PG_locked (lock_page) * (see huegtlbfs below)
28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51 * page->flags PG_locked (lock_page)
272 * searches where page is mapped.
455 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
459 * have been relevant to this page.
461 * The page might have been remapped to a different anon_vma or the anon_vma
466 * ensure that any anon_vma obtained from the page will still be valid for as
470 * chain and verify that the page in question is indeed mapped in it
474 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
478 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
484 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
487 if (!page_mapped(page)) in page_get_anon_vma()
497 * If this page is still mapped, then its anon_vma cannot have been in page_get_anon_vma()
503 if (!page_mapped(page)) { in page_get_anon_vma()
521 struct anon_vma *page_lock_anon_vma_read(struct page *page) in page_lock_anon_vma_read() argument
528 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
531 if (!page_mapped(page)) in page_lock_anon_vma_read()
538 * If the page is still mapped, then this anon_vma is still in page_lock_anon_vma_read()
542 if (!page_mapped(page)) { in page_lock_anon_vma_read()
555 if (!page_mapped(page)) { in page_lock_anon_vma_read()
592 * before any IO is initiated on the page to prevent lost writes. Similarly,
633 * before the page is queued for IO. in set_tlb_ubc_flush_pending()
662 * the page and flushing the page. If this race occurs, it potentially allows
698 * At what user virtual address is page expected in vma?
699 * Caller should check the page is actually part of the vma.
701 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
704 if (PageAnon(page)) { in page_address_in_vma()
705 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
713 } else if (page->mapping) { in page_address_in_vma()
714 if (!vma->vm_file || vma->vm_file->f_mapping != page->mapping) in page_address_in_vma()
718 address = __vma_address(page, vma); in page_address_in_vma()
767 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
772 .page = page, in page_referenced_one()
793 * If the page has been used in another mapping, in page_referenced_one()
796 * PG_referenced or activated the page. in page_referenced_one()
806 /* unexpected pmd-mapped page? */ in page_referenced_one()
814 clear_page_idle(page); in page_referenced_one()
815 if (test_and_clear_page_young(page)) in page_referenced_one()
841 * page_referenced - test if the page was referenced
842 * @page: the page to test
843 * @is_locked: caller holds lock on the page
845 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
847 * Quick test_and_clear_referenced for all mappings to a page,
848 * returns the number of ptes which referenced the page.
850 int page_referenced(struct page *page, in page_referenced() argument
857 .mapcount = total_mapcount(page), in page_referenced()
870 if (!page_rmapping(page)) in page_referenced()
873 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
874 we_locked = trylock_page(page); in page_referenced()
888 rmap_walk(page, &rwc); in page_referenced()
892 unlock_page(page); in page_referenced()
897 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
901 .page = page, in page_mkclean_one()
911 * the page can not be free from this function. in page_mkclean_one()
915 min(vma->vm_end, address + page_size(page))); in page_mkclean_one()
943 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
950 /* unexpected pmd-mapped page? */ in page_mkclean_one()
957 * downgrading page table protection not changing it to point in page_mkclean_one()
958 * to a new page. in page_mkclean_one()
979 int page_mkclean(struct page *page) in page_mkclean() argument
989 BUG_ON(!PageLocked(page)); in page_mkclean()
991 if (!page_mapped(page)) in page_mkclean()
994 mapping = page_mapping(page); in page_mkclean()
998 rmap_walk(page, &rwc); in page_mkclean()
1005 * page_move_anon_rmap - move a page to our anon_vma
1006 * @page: the page to move to our anon_vma
1007 * @vma: the vma the page belongs to
1009 * When a page belongs exclusively to one process after a COW event,
1010 * that page can be moved into the anon_vma that belongs to just that
1014 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1018 page = compound_head(page); in page_move_anon_rmap()
1020 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
1029 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1034 * @page: Page or Hugepage to add to rmap
1035 * @vma: VM area to add page to.
1037 * @exclusive: the page is exclusively owned by the current process
1039 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
1046 if (PageAnon(page)) in __page_set_anon_rmap()
1050 * If the page isn't exclusively mapped into this vma, in __page_set_anon_rmap()
1052 * page mapping! in __page_set_anon_rmap()
1058 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
1059 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1064 * @page: the page to add the mapping to
1068 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1072 * The page's anon-rmap details (mapping and index) are guaranteed to in __page_check_anon_rmap()
1076 * always holds the page locked, except if called from page_dup_rmap, in __page_check_anon_rmap()
1077 * in which case the page is already known to be setup. in __page_check_anon_rmap()
1083 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); in __page_check_anon_rmap()
1084 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1085 page); in __page_check_anon_rmap()
1089 * page_add_anon_rmap - add pte mapping to an anonymous page
1090 * @page: the page to add the mapping to
1093 * @compound: charge the page as compound or small page
1095 * The caller needs to hold the pte lock, and the page must be locked in
1100 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1103 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1111 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1117 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1118 lock_page_memcg(page); in do_page_add_anon_rmap()
1120 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1124 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1125 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in do_page_add_anon_rmap()
1126 mapcount = compound_mapcount_ptr(page); in do_page_add_anon_rmap()
1129 first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1133 int nr = compound ? thp_nr_pages(page) : 1; in do_page_add_anon_rmap()
1141 __inc_lruvec_page_state(page, NR_ANON_THPS); in do_page_add_anon_rmap()
1142 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in do_page_add_anon_rmap()
1145 if (unlikely(PageKsm(page))) { in do_page_add_anon_rmap()
1146 unlock_page_memcg(page); in do_page_add_anon_rmap()
1152 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1155 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1159 * page_add_new_anon_rmap - add pte mapping to a new anonymous page
1160 * @page: the page to add the mapping to
1163 * @compound: charge the page as compound or small page
1167 * Page does not have to be locked.
1169 void page_add_new_anon_rmap(struct page *page, in page_add_new_anon_rmap() argument
1172 int nr = compound ? thp_nr_pages(page) : 1; in page_add_new_anon_rmap()
1175 __SetPageSwapBacked(page); in page_add_new_anon_rmap()
1177 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in page_add_new_anon_rmap()
1179 atomic_set(compound_mapcount_ptr(page), 0); in page_add_new_anon_rmap()
1180 if (hpage_pincount_available(page)) in page_add_new_anon_rmap()
1181 atomic_set(compound_pincount_ptr(page), 0); in page_add_new_anon_rmap()
1183 __inc_lruvec_page_state(page, NR_ANON_THPS); in page_add_new_anon_rmap()
1186 VM_BUG_ON_PAGE(PageTransCompound(page), page); in page_add_new_anon_rmap()
1188 atomic_set(&page->_mapcount, 0); in page_add_new_anon_rmap()
1190 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in page_add_new_anon_rmap()
1191 __page_set_anon_rmap(page, vma, address, 1); in page_add_new_anon_rmap()
1195 * page_add_file_rmap - add pte mapping to a file page
1196 * @page: the page to add the mapping to
1197 * @compound: charge the page as compound or small page
1201 void page_add_file_rmap(struct page *page, bool compound) in page_add_file_rmap() argument
1205 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1206 lock_page_memcg(page); in page_add_file_rmap()
1207 if (compound && PageTransHuge(page)) { in page_add_file_rmap()
1208 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_add_file_rmap()
1209 if (atomic_inc_and_test(&page[i]._mapcount)) in page_add_file_rmap()
1212 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) in page_add_file_rmap()
1214 if (PageSwapBacked(page)) in page_add_file_rmap()
1215 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_add_file_rmap()
1217 __inc_node_page_state(page, NR_FILE_PMDMAPPED); in page_add_file_rmap()
1219 if (PageTransCompound(page) && page_mapping(page)) { in page_add_file_rmap()
1220 VM_WARN_ON_ONCE(!PageLocked(page)); in page_add_file_rmap()
1222 SetPageDoubleMap(compound_head(page)); in page_add_file_rmap()
1223 if (PageMlocked(page)) in page_add_file_rmap()
1224 clear_page_mlock(compound_head(page)); in page_add_file_rmap()
1226 if (!atomic_inc_and_test(&page->_mapcount)) in page_add_file_rmap()
1229 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap()
1231 unlock_page_memcg(page); in page_add_file_rmap()
1234 static void page_remove_file_rmap(struct page *page, bool compound) in page_remove_file_rmap() argument
1238 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_file_rmap()
1241 if (unlikely(PageHuge(page))) { in page_remove_file_rmap()
1243 atomic_dec(compound_mapcount_ptr(page)); in page_remove_file_rmap()
1247 /* page still mapped by someone else? */ in page_remove_file_rmap()
1248 if (compound && PageTransHuge(page)) { in page_remove_file_rmap()
1249 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_remove_file_rmap()
1250 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_file_rmap()
1253 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_file_rmap()
1255 if (PageSwapBacked(page)) in page_remove_file_rmap()
1256 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_remove_file_rmap()
1258 __dec_node_page_state(page, NR_FILE_PMDMAPPED); in page_remove_file_rmap()
1260 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1269 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); in page_remove_file_rmap()
1271 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1272 clear_page_mlock(page); in page_remove_file_rmap()
1275 static void page_remove_anon_compound_rmap(struct page *page) in page_remove_anon_compound_rmap() argument
1279 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_anon_compound_rmap()
1283 if (unlikely(PageHuge(page))) in page_remove_anon_compound_rmap()
1289 __dec_lruvec_page_state(page, NR_ANON_THPS); in page_remove_anon_compound_rmap()
1291 if (TestClearPageDoubleMap(page)) { in page_remove_anon_compound_rmap()
1296 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_remove_anon_compound_rmap()
1297 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_anon_compound_rmap()
1302 * Queue the page for deferred split if at least one small in page_remove_anon_compound_rmap()
1303 * page of the compound page is unmapped, but at least one in page_remove_anon_compound_rmap()
1304 * small page is still mapped. in page_remove_anon_compound_rmap()
1306 if (nr && nr < thp_nr_pages(page)) in page_remove_anon_compound_rmap()
1307 deferred_split_huge_page(page); in page_remove_anon_compound_rmap()
1309 nr = thp_nr_pages(page); in page_remove_anon_compound_rmap()
1312 if (unlikely(PageMlocked(page))) in page_remove_anon_compound_rmap()
1313 clear_page_mlock(page); in page_remove_anon_compound_rmap()
1316 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); in page_remove_anon_compound_rmap()
1320 * page_remove_rmap - take down pte mapping from a page
1321 * @page: page to remove mapping from
1322 * @compound: uncharge the page as compound or small page
1326 void page_remove_rmap(struct page *page, bool compound) in page_remove_rmap() argument
1328 lock_page_memcg(page); in page_remove_rmap()
1330 if (!PageAnon(page)) { in page_remove_rmap()
1331 page_remove_file_rmap(page, compound); in page_remove_rmap()
1336 page_remove_anon_compound_rmap(page); in page_remove_rmap()
1340 /* page still mapped by someone else? */ in page_remove_rmap()
1341 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1349 __dec_lruvec_page_state(page, NR_ANON_MAPPED); in page_remove_rmap()
1351 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1352 clear_page_mlock(page); in page_remove_rmap()
1354 if (PageTransCompound(page)) in page_remove_rmap()
1355 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
1367 unlock_page_memcg(page); in page_remove_rmap()
1373 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1378 .page = page, in try_to_unmap_one()
1383 struct page *subpage; in try_to_unmap_one()
1393 is_zone_device_page(page) && !is_device_private_page(page)) in try_to_unmap_one()
1398 flags & TTU_SPLIT_FREEZE, page); in try_to_unmap_one()
1406 * Note that the page can not be free in this function as call of in try_to_unmap_one()
1407 * try_to_unmap() must hold a reference on the page. in try_to_unmap_one()
1411 min(vma->vm_end, address + page_size(page))); in try_to_unmap_one()
1412 if (PageHuge(page)) { in try_to_unmap_one()
1426 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in try_to_unmap_one()
1428 set_pmd_migration_entry(&pvmw, page); in try_to_unmap_one()
1434 * If the page is mlock()d, we cannot swap it out. in try_to_unmap_one()
1441 if (!PageTransCompound(page)) { in try_to_unmap_one()
1446 mlock_vma_page(page); in try_to_unmap_one()
1457 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
1459 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_unmap_one()
1462 if (PageHuge(page) && !PageAnon(page)) { in try_to_unmap_one()
1472 * page. There is no way of knowing exactly in try_to_unmap_one()
1483 * The ref count of the PMD page was dropped in try_to_unmap_one()
1488 * unmap the actual page and drop map count in try_to_unmap_one()
1498 is_zone_device_page(page)) { in try_to_unmap_one()
1505 * Store the pfn of the page in a special migration in try_to_unmap_one()
1509 entry = make_migration_entry(page, 0); in try_to_unmap_one()
1513 * pteval maps a zone device page and is therefore in try_to_unmap_one()
1528 * migrated, just set it to page. This will need to be in try_to_unmap_one()
1532 subpage = page; in try_to_unmap_one()
1545 /* Nuke the page table entry. */ in try_to_unmap_one()
1550 * a remote CPU could still be writing to the page. in try_to_unmap_one()
1563 /* Move the dirty bit to the page. Now the pte is gone. */ in try_to_unmap_one()
1565 set_page_dirty(page); in try_to_unmap_one()
1570 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1572 if (PageHuge(page)) { in try_to_unmap_one()
1573 hugetlb_count_sub(compound_nr(page), mm); in try_to_unmap_one()
1578 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1584 * The guest indicated that the page content is of no in try_to_unmap_one()
1588 * page. When userfaultfd is active, we must not drop in try_to_unmap_one()
1589 * this page though, as its main user (postcopy in try_to_unmap_one()
1593 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1610 * Store the pfn of the page in a special migration in try_to_unmap_one()
1626 } else if (PageAnon(page)) { in try_to_unmap_one()
1633 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { in try_to_unmap_one()
1643 /* MADV_FREE page check */ in try_to_unmap_one()
1644 if (!PageSwapBacked(page)) { in try_to_unmap_one()
1645 if (!PageDirty(page)) { in try_to_unmap_one()
1654 * If the page was redirtied, it cannot be in try_to_unmap_one()
1655 * discarded. Remap the page to page table. in try_to_unmap_one()
1658 SetPageSwapBacked(page); in try_to_unmap_one()
1695 * This is a locked file-backed page, thus it cannot in try_to_unmap_one()
1696 * be removed from the page cache and replaced by a new in try_to_unmap_one()
1697 * page before mmu_notifier_invalidate_range_end, so no in try_to_unmap_one()
1698 * concurrent thread might update its page table to in try_to_unmap_one()
1699 * point at new page while a device still is using this in try_to_unmap_one()
1700 * page. in try_to_unmap_one()
1704 dec_mm_counter(mm, mm_counter_file(page)); in try_to_unmap_one()
1709 * done above for all cases requiring it to happen under page in try_to_unmap_one()
1714 page_remove_rmap(subpage, PageHuge(page)); in try_to_unmap_one()
1715 put_page(page); in try_to_unmap_one()
1728 static int page_mapcount_is_zero(struct page *page) in page_mapcount_is_zero() argument
1730 return !total_mapcount(page); in page_mapcount_is_zero()
1734 * try_to_unmap - try to remove all page table mappings to a page
1735 * @page: the page to get unmapped
1738 * Tries to remove all the page table entries which are mapping this
1739 * page, used in the pageout path. Caller must hold the page lock.
1743 bool try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1755 * page tables leading to a race where migration cannot in try_to_unmap()
1761 && !PageKsm(page) && PageAnon(page)) in try_to_unmap()
1765 rmap_walk_locked(page, &rwc); in try_to_unmap()
1767 rmap_walk(page, &rwc); in try_to_unmap()
1769 return !page_mapcount(page) ? true : false; in try_to_unmap()
1772 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1774 return !page_mapped(page); in page_not_mapped()
1778 * try_to_munlock - try to munlock a page
1779 * @page: the page to be munlocked
1781 * Called from munlock code. Checks all of the VMAs mapping the page
1782 * to make sure nobody else has this page mlocked. The page will be
1786 void try_to_munlock(struct page *page) in try_to_munlock() argument
1796 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in try_to_munlock()
1797 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); in try_to_munlock()
1799 rmap_walk(page, &rwc); in try_to_munlock()
1811 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
1817 return rwc->anon_lock(page); in rmap_walk_anon_lock()
1825 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
1834 * rmap_walk_anon - do something to anonymous page using the object-based
1836 * @page: the page to be handled
1839 * Find all the mappings of a page using the mapping pointer and the vma chains
1843 * where the page was found will be held for write. So, we won't recheck
1847 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_anon() argument
1855 anon_vma = page_anon_vma(page); in rmap_walk_anon()
1857 VM_BUG_ON_PAGE(!anon_vma, page); in rmap_walk_anon()
1859 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
1864 pgoff_start = page_to_pgoff(page); in rmap_walk_anon()
1865 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_anon()
1869 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1876 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
1878 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
1887 * rmap_walk_file - do something to file page using the object-based rmap method
1888 * @page: the page to be handled
1891 * Find all the mappings of a page using the mapping pointer and the vma chains
1895 * where the page was found will be held for write. So, we won't recheck
1899 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_file() argument
1902 struct address_space *mapping = page_mapping(page); in rmap_walk_file()
1907 * The page lock not only makes sure that page->mapping cannot in rmap_walk_file()
1912 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
1917 pgoff_start = page_to_pgoff(page); in rmap_walk_file()
1918 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_file()
1923 unsigned long address = vma_address(page, vma); in rmap_walk_file()
1930 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
1932 if (rwc->done && rwc->done(page)) in rmap_walk_file()
1941 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
1943 if (unlikely(PageKsm(page))) in rmap_walk()
1944 rmap_walk_ksm(page, rwc); in rmap_walk()
1945 else if (PageAnon(page)) in rmap_walk()
1946 rmap_walk_anon(page, rwc, false); in rmap_walk()
1948 rmap_walk_file(page, rwc, false); in rmap_walk()
1952 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
1955 VM_BUG_ON_PAGE(PageKsm(page), page); in rmap_walk_locked()
1956 if (PageAnon(page)) in rmap_walk_locked()
1957 rmap_walk_anon(page, rwc, true); in rmap_walk_locked()
1959 rmap_walk_file(page, rwc, true); in rmap_walk_locked()
1968 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
1974 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
1977 first = atomic_inc_and_test(compound_mapcount_ptr(page)); in hugepage_add_anon_rmap()
1979 __page_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
1982 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
1986 atomic_set(compound_mapcount_ptr(page), 0); in hugepage_add_new_anon_rmap()
1987 if (hpage_pincount_available(page)) in hugepage_add_new_anon_rmap()
1988 atomic_set(compound_pincount_ptr(page), 0); in hugepage_add_new_anon_rmap()
1990 __page_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()