11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 171da177e4SLinus Torvalds * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 2482591e6eSNick Piggin * inode->i_alloc_sem (vmtruncate_range) 251da177e4SLinus Torvalds * mm->mmap_sem 261da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 271da177e4SLinus Torvalds * mapping->i_mmap_lock 281da177e4SLinus Torvalds * anon_vma->lock 29b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 30053837fcSNick Piggin * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 315d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 321da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 331da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 341da177e4SLinus Torvalds * inode_lock (in set_page_dirty's __mark_inode_dirty) 351da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 361da177e4SLinus Torvalds * mapping->tree_lock (widely used, in set_page_dirty, 371da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 381da177e4SLinus Torvalds * within inode_lock in __sync_single_inode) 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/mm.h> 421da177e4SLinus Torvalds #include <linux/pagemap.h> 431da177e4SLinus Torvalds #include <linux/swap.h> 441da177e4SLinus Torvalds #include <linux/swapops.h> 451da177e4SLinus Torvalds #include <linux/slab.h> 461da177e4SLinus Torvalds #include <linux/init.h> 471da177e4SLinus Torvalds #include <linux/rmap.h> 481da177e4SLinus Torvalds #include <linux/rcupdate.h> 49a48d07afSChristoph Lameter #include <linux/module.h> 507de6b805SNick Piggin #include <linux/kallsyms.h> 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds #include <asm/tlbflush.h> 531da177e4SLinus Torvalds 54fcc234f8SPekka Enberg struct kmem_cache *anon_vma_cachep; 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds static inline void validate_anon_vma(struct vm_area_struct *find_vma) 571da177e4SLinus Torvalds { 58b7ab795bSNick Piggin #ifdef CONFIG_DEBUG_VM 591da177e4SLinus Torvalds struct anon_vma *anon_vma = find_vma->anon_vma; 601da177e4SLinus Torvalds struct vm_area_struct *vma; 611da177e4SLinus Torvalds unsigned int mapcount = 0; 621da177e4SLinus Torvalds int found = 0; 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 651da177e4SLinus Torvalds mapcount++; 661da177e4SLinus Torvalds BUG_ON(mapcount > 100000); 671da177e4SLinus Torvalds if (vma == find_vma) 681da177e4SLinus Torvalds found = 1; 691da177e4SLinus Torvalds } 701da177e4SLinus Torvalds BUG_ON(!found); 711da177e4SLinus Torvalds #endif 721da177e4SLinus Torvalds } 731da177e4SLinus Torvalds 741da177e4SLinus Torvalds /* This must be called under the mmap_sem. */ 751da177e4SLinus Torvalds int anon_vma_prepare(struct vm_area_struct *vma) 761da177e4SLinus Torvalds { 771da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds might_sleep(); 801da177e4SLinus Torvalds if (unlikely(!anon_vma)) { 811da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 821da177e4SLinus Torvalds struct anon_vma *allocated, *locked; 831da177e4SLinus Torvalds 841da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 851da177e4SLinus Torvalds if (anon_vma) { 861da177e4SLinus Torvalds allocated = NULL; 871da177e4SLinus Torvalds locked = anon_vma; 881da177e4SLinus Torvalds spin_lock(&locked->lock); 891da177e4SLinus Torvalds } else { 901da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 911da177e4SLinus Torvalds if (unlikely(!anon_vma)) 921da177e4SLinus Torvalds return -ENOMEM; 931da177e4SLinus Torvalds allocated = anon_vma; 941da177e4SLinus Torvalds locked = NULL; 951da177e4SLinus Torvalds } 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 981da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 991da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 1001da177e4SLinus Torvalds vma->anon_vma = anon_vma; 1010697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1021da177e4SLinus Torvalds allocated = NULL; 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds if (locked) 1071da177e4SLinus Torvalds spin_unlock(&locked->lock); 1081da177e4SLinus Torvalds if (unlikely(allocated)) 1091da177e4SLinus Torvalds anon_vma_free(allocated); 1101da177e4SLinus Torvalds } 1111da177e4SLinus Torvalds return 0; 1121da177e4SLinus Torvalds } 1131da177e4SLinus Torvalds 1141da177e4SLinus Torvalds void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) 1151da177e4SLinus Torvalds { 1161da177e4SLinus Torvalds BUG_ON(vma->anon_vma != next->anon_vma); 1171da177e4SLinus Torvalds list_del(&next->anon_vma_node); 1181da177e4SLinus Torvalds } 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds void __anon_vma_link(struct vm_area_struct *vma) 1211da177e4SLinus Torvalds { 1221da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1231da177e4SLinus Torvalds 1241da177e4SLinus Torvalds if (anon_vma) { 1250697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1261da177e4SLinus Torvalds validate_anon_vma(vma); 1271da177e4SLinus Torvalds } 1281da177e4SLinus Torvalds } 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds void anon_vma_link(struct vm_area_struct *vma) 1311da177e4SLinus Torvalds { 1321da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds if (anon_vma) { 1351da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1360697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1371da177e4SLinus Torvalds validate_anon_vma(vma); 1381da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1391da177e4SLinus Torvalds } 1401da177e4SLinus Torvalds } 1411da177e4SLinus Torvalds 1421da177e4SLinus Torvalds void anon_vma_unlink(struct vm_area_struct *vma) 1431da177e4SLinus Torvalds { 1441da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1451da177e4SLinus Torvalds int empty; 1461da177e4SLinus Torvalds 1471da177e4SLinus Torvalds if (!anon_vma) 1481da177e4SLinus Torvalds return; 1491da177e4SLinus Torvalds 1501da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1511da177e4SLinus Torvalds validate_anon_vma(vma); 1521da177e4SLinus Torvalds list_del(&vma->anon_vma_node); 1531da177e4SLinus Torvalds 1541da177e4SLinus Torvalds /* We must garbage collect the anon_vma if it's empty */ 1551da177e4SLinus Torvalds empty = list_empty(&anon_vma->head); 1561da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1571da177e4SLinus Torvalds 1581da177e4SLinus Torvalds if (empty) 1591da177e4SLinus Torvalds anon_vma_free(anon_vma); 1601da177e4SLinus Torvalds } 1611da177e4SLinus Torvalds 162fcc234f8SPekka Enberg static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 163fcc234f8SPekka Enberg unsigned long flags) 1641da177e4SLinus Torvalds { 1651da177e4SLinus Torvalds if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 1661da177e4SLinus Torvalds SLAB_CTOR_CONSTRUCTOR) { 1671da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds spin_lock_init(&anon_vma->lock); 1701da177e4SLinus Torvalds INIT_LIST_HEAD(&anon_vma->head); 1711da177e4SLinus Torvalds } 1721da177e4SLinus Torvalds } 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds void __init anon_vma_init(void) 1751da177e4SLinus Torvalds { 1761da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 1771da177e4SLinus Torvalds 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); 1781da177e4SLinus Torvalds } 1791da177e4SLinus Torvalds 1801da177e4SLinus Torvalds /* 1811da177e4SLinus Torvalds * Getting a lock on a stable anon_vma from a page off the LRU is 1821da177e4SLinus Torvalds * tricky: page_lock_anon_vma rely on RCU to guard against the races. 1831da177e4SLinus Torvalds */ 1841da177e4SLinus Torvalds static struct anon_vma *page_lock_anon_vma(struct page *page) 1851da177e4SLinus Torvalds { 1861da177e4SLinus Torvalds struct anon_vma *anon_vma = NULL; 1871da177e4SLinus Torvalds unsigned long anon_mapping; 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds rcu_read_lock(); 1901da177e4SLinus Torvalds anon_mapping = (unsigned long) page->mapping; 1911da177e4SLinus Torvalds if (!(anon_mapping & PAGE_MAPPING_ANON)) 1921da177e4SLinus Torvalds goto out; 1931da177e4SLinus Torvalds if (!page_mapped(page)) 1941da177e4SLinus Torvalds goto out; 1951da177e4SLinus Torvalds 1961da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 1971da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1981da177e4SLinus Torvalds out: 1991da177e4SLinus Torvalds rcu_read_unlock(); 2001da177e4SLinus Torvalds return anon_vma; 2011da177e4SLinus Torvalds } 2021da177e4SLinus Torvalds 2031da177e4SLinus Torvalds /* 2041da177e4SLinus Torvalds * At what user virtual address is page expected in vma? 2051da177e4SLinus Torvalds */ 2061da177e4SLinus Torvalds static inline unsigned long 2071da177e4SLinus Torvalds vma_address(struct page *page, struct vm_area_struct *vma) 2081da177e4SLinus Torvalds { 2091da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 2101da177e4SLinus Torvalds unsigned long address; 2111da177e4SLinus Torvalds 2121da177e4SLinus Torvalds address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 2131da177e4SLinus Torvalds if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 2141da177e4SLinus Torvalds /* page should be within any vma from prio_tree_next */ 2151da177e4SLinus Torvalds BUG_ON(!PageAnon(page)); 2161da177e4SLinus Torvalds return -EFAULT; 2171da177e4SLinus Torvalds } 2181da177e4SLinus Torvalds return address; 2191da177e4SLinus Torvalds } 2201da177e4SLinus Torvalds 2211da177e4SLinus Torvalds /* 2221da177e4SLinus Torvalds * At what user virtual address is page expected in vma? checking that the 223ee498ed7SHugh Dickins * page matches the vma: currently only used on anon pages, by unuse_vma; 2241da177e4SLinus Torvalds */ 2251da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 2261da177e4SLinus Torvalds { 2271da177e4SLinus Torvalds if (PageAnon(page)) { 2281da177e4SLinus Torvalds if ((void *)vma->anon_vma != 2291da177e4SLinus Torvalds (void *)page->mapping - PAGE_MAPPING_ANON) 2301da177e4SLinus Torvalds return -EFAULT; 2311da177e4SLinus Torvalds } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 232ee498ed7SHugh Dickins if (!vma->vm_file || 233ee498ed7SHugh Dickins vma->vm_file->f_mapping != page->mapping) 2341da177e4SLinus Torvalds return -EFAULT; 2351da177e4SLinus Torvalds } else 2361da177e4SLinus Torvalds return -EFAULT; 2371da177e4SLinus Torvalds return vma_address(page, vma); 2381da177e4SLinus Torvalds } 2391da177e4SLinus Torvalds 2401da177e4SLinus Torvalds /* 24181b4082dSNikita Danilov * Check that @page is mapped at @address into @mm. 24281b4082dSNikita Danilov * 243b8072f09SHugh Dickins * On success returns with pte mapped and locked. 24481b4082dSNikita Danilov */ 245ceffc078SCarsten Otte pte_t *page_check_address(struct page *page, struct mm_struct *mm, 246c0718806SHugh Dickins unsigned long address, spinlock_t **ptlp) 24781b4082dSNikita Danilov { 24881b4082dSNikita Danilov pgd_t *pgd; 24981b4082dSNikita Danilov pud_t *pud; 25081b4082dSNikita Danilov pmd_t *pmd; 25181b4082dSNikita Danilov pte_t *pte; 252c0718806SHugh Dickins spinlock_t *ptl; 25381b4082dSNikita Danilov 25481b4082dSNikita Danilov pgd = pgd_offset(mm, address); 255c0718806SHugh Dickins if (!pgd_present(*pgd)) 256c0718806SHugh Dickins return NULL; 257c0718806SHugh Dickins 25881b4082dSNikita Danilov pud = pud_offset(pgd, address); 259c0718806SHugh Dickins if (!pud_present(*pud)) 260c0718806SHugh Dickins return NULL; 261c0718806SHugh Dickins 26281b4082dSNikita Danilov pmd = pmd_offset(pud, address); 263c0718806SHugh Dickins if (!pmd_present(*pmd)) 264c0718806SHugh Dickins return NULL; 265c0718806SHugh Dickins 26681b4082dSNikita Danilov pte = pte_offset_map(pmd, address); 267c0718806SHugh Dickins /* Make a quick check before getting the lock */ 268c0718806SHugh Dickins if (!pte_present(*pte)) { 26981b4082dSNikita Danilov pte_unmap(pte); 270c0718806SHugh Dickins return NULL; 27181b4082dSNikita Danilov } 272c0718806SHugh Dickins 2734c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 274c0718806SHugh Dickins spin_lock(ptl); 275c0718806SHugh Dickins if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 276c0718806SHugh Dickins *ptlp = ptl; 277c0718806SHugh Dickins return pte; 27881b4082dSNikita Danilov } 279c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 280c0718806SHugh Dickins return NULL; 28181b4082dSNikita Danilov } 28281b4082dSNikita Danilov 28381b4082dSNikita Danilov /* 2841da177e4SLinus Torvalds * Subfunctions of page_referenced: page_referenced_one called 2851da177e4SLinus Torvalds * repeatedly from either page_referenced_anon or page_referenced_file. 2861da177e4SLinus Torvalds */ 2871da177e4SLinus Torvalds static int page_referenced_one(struct page *page, 288f7b7fd8fSRik van Riel struct vm_area_struct *vma, unsigned int *mapcount) 2891da177e4SLinus Torvalds { 2901da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2911da177e4SLinus Torvalds unsigned long address; 2921da177e4SLinus Torvalds pte_t *pte; 293c0718806SHugh Dickins spinlock_t *ptl; 2941da177e4SLinus Torvalds int referenced = 0; 2951da177e4SLinus Torvalds 2961da177e4SLinus Torvalds address = vma_address(page, vma); 2971da177e4SLinus Torvalds if (address == -EFAULT) 2981da177e4SLinus Torvalds goto out; 2991da177e4SLinus Torvalds 300c0718806SHugh Dickins pte = page_check_address(page, mm, address, &ptl); 301c0718806SHugh Dickins if (!pte) 302c0718806SHugh Dickins goto out; 303c0718806SHugh Dickins 3041da177e4SLinus Torvalds if (ptep_clear_flush_young(vma, address, pte)) 3051da177e4SLinus Torvalds referenced++; 3061da177e4SLinus Torvalds 307fcdae29aSRik Van Riel /* Pretend the page is referenced if the task has the 308fcdae29aSRik Van Riel swap token and is in the middle of a page fault. */ 309f7b7fd8fSRik van Riel if (mm != current->mm && has_swap_token(mm) && 310fcdae29aSRik Van Riel rwsem_is_locked(&mm->mmap_sem)) 3111da177e4SLinus Torvalds referenced++; 3121da177e4SLinus Torvalds 3131da177e4SLinus Torvalds (*mapcount)--; 314c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 3151da177e4SLinus Torvalds out: 3161da177e4SLinus Torvalds return referenced; 3171da177e4SLinus Torvalds } 3181da177e4SLinus Torvalds 319f7b7fd8fSRik van Riel static int page_referenced_anon(struct page *page) 3201da177e4SLinus Torvalds { 3211da177e4SLinus Torvalds unsigned int mapcount; 3221da177e4SLinus Torvalds struct anon_vma *anon_vma; 3231da177e4SLinus Torvalds struct vm_area_struct *vma; 3241da177e4SLinus Torvalds int referenced = 0; 3251da177e4SLinus Torvalds 3261da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 3271da177e4SLinus Torvalds if (!anon_vma) 3281da177e4SLinus Torvalds return referenced; 3291da177e4SLinus Torvalds 3301da177e4SLinus Torvalds mapcount = page_mapcount(page); 3311da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 332f7b7fd8fSRik van Riel referenced += page_referenced_one(page, vma, &mapcount); 3331da177e4SLinus Torvalds if (!mapcount) 3341da177e4SLinus Torvalds break; 3351da177e4SLinus Torvalds } 3361da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 3371da177e4SLinus Torvalds return referenced; 3381da177e4SLinus Torvalds } 3391da177e4SLinus Torvalds 3401da177e4SLinus Torvalds /** 3411da177e4SLinus Torvalds * page_referenced_file - referenced check for object-based rmap 3421da177e4SLinus Torvalds * @page: the page we're checking references on. 3431da177e4SLinus Torvalds * 3441da177e4SLinus Torvalds * For an object-based mapped page, find all the places it is mapped and 3451da177e4SLinus Torvalds * check/clear the referenced flag. This is done by following the page->mapping 3461da177e4SLinus Torvalds * pointer, then walking the chain of vmas it holds. It returns the number 3471da177e4SLinus Torvalds * of references it found. 3481da177e4SLinus Torvalds * 3491da177e4SLinus Torvalds * This function is only called from page_referenced for object-based pages. 3501da177e4SLinus Torvalds */ 351f7b7fd8fSRik van Riel static int page_referenced_file(struct page *page) 3521da177e4SLinus Torvalds { 3531da177e4SLinus Torvalds unsigned int mapcount; 3541da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 3551da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 3561da177e4SLinus Torvalds struct vm_area_struct *vma; 3571da177e4SLinus Torvalds struct prio_tree_iter iter; 3581da177e4SLinus Torvalds int referenced = 0; 3591da177e4SLinus Torvalds 3601da177e4SLinus Torvalds /* 3611da177e4SLinus Torvalds * The caller's checks on page->mapping and !PageAnon have made 3621da177e4SLinus Torvalds * sure that this is a file page: the check for page->mapping 3631da177e4SLinus Torvalds * excludes the case just before it gets set on an anon page. 3641da177e4SLinus Torvalds */ 3651da177e4SLinus Torvalds BUG_ON(PageAnon(page)); 3661da177e4SLinus Torvalds 3671da177e4SLinus Torvalds /* 3681da177e4SLinus Torvalds * The page lock not only makes sure that page->mapping cannot 3691da177e4SLinus Torvalds * suddenly be NULLified by truncation, it makes sure that the 3701da177e4SLinus Torvalds * structure at mapping cannot be freed and reused yet, 3711da177e4SLinus Torvalds * so we can safely take mapping->i_mmap_lock. 3721da177e4SLinus Torvalds */ 3731da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 3761da177e4SLinus Torvalds 3771da177e4SLinus Torvalds /* 3781da177e4SLinus Torvalds * i_mmap_lock does not stabilize mapcount at all, but mapcount 3791da177e4SLinus Torvalds * is more likely to be accurate if we note it after spinning. 3801da177e4SLinus Torvalds */ 3811da177e4SLinus Torvalds mapcount = page_mapcount(page); 3821da177e4SLinus Torvalds 3831da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 3841da177e4SLinus Torvalds if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) 3851da177e4SLinus Torvalds == (VM_LOCKED|VM_MAYSHARE)) { 3861da177e4SLinus Torvalds referenced++; 3871da177e4SLinus Torvalds break; 3881da177e4SLinus Torvalds } 389f7b7fd8fSRik van Riel referenced += page_referenced_one(page, vma, &mapcount); 3901da177e4SLinus Torvalds if (!mapcount) 3911da177e4SLinus Torvalds break; 3921da177e4SLinus Torvalds } 3931da177e4SLinus Torvalds 3941da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 3951da177e4SLinus Torvalds return referenced; 3961da177e4SLinus Torvalds } 3971da177e4SLinus Torvalds 3981da177e4SLinus Torvalds /** 3991da177e4SLinus Torvalds * page_referenced - test if the page was referenced 4001da177e4SLinus Torvalds * @page: the page to test 4011da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 4021da177e4SLinus Torvalds * 4031da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 4041da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 4051da177e4SLinus Torvalds */ 406f7b7fd8fSRik van Riel int page_referenced(struct page *page, int is_locked) 4071da177e4SLinus Torvalds { 4081da177e4SLinus Torvalds int referenced = 0; 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds if (page_test_and_clear_young(page)) 4111da177e4SLinus Torvalds referenced++; 4121da177e4SLinus Torvalds 4131da177e4SLinus Torvalds if (TestClearPageReferenced(page)) 4141da177e4SLinus Torvalds referenced++; 4151da177e4SLinus Torvalds 4161da177e4SLinus Torvalds if (page_mapped(page) && page->mapping) { 4171da177e4SLinus Torvalds if (PageAnon(page)) 418f7b7fd8fSRik van Riel referenced += page_referenced_anon(page); 4191da177e4SLinus Torvalds else if (is_locked) 420f7b7fd8fSRik van Riel referenced += page_referenced_file(page); 4211da177e4SLinus Torvalds else if (TestSetPageLocked(page)) 4221da177e4SLinus Torvalds referenced++; 4231da177e4SLinus Torvalds else { 4241da177e4SLinus Torvalds if (page->mapping) 425f7b7fd8fSRik van Riel referenced += page_referenced_file(page); 4261da177e4SLinus Torvalds unlock_page(page); 4271da177e4SLinus Torvalds } 4281da177e4SLinus Torvalds } 4291da177e4SLinus Torvalds return referenced; 4301da177e4SLinus Torvalds } 4311da177e4SLinus Torvalds 432d08b3851SPeter Zijlstra static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) 433d08b3851SPeter Zijlstra { 434d08b3851SPeter Zijlstra struct mm_struct *mm = vma->vm_mm; 435d08b3851SPeter Zijlstra unsigned long address; 436c2fda5feSPeter Zijlstra pte_t *pte; 437d08b3851SPeter Zijlstra spinlock_t *ptl; 438d08b3851SPeter Zijlstra int ret = 0; 439d08b3851SPeter Zijlstra 440d08b3851SPeter Zijlstra address = vma_address(page, vma); 441d08b3851SPeter Zijlstra if (address == -EFAULT) 442d08b3851SPeter Zijlstra goto out; 443d08b3851SPeter Zijlstra 444d08b3851SPeter Zijlstra pte = page_check_address(page, mm, address, &ptl); 445d08b3851SPeter Zijlstra if (!pte) 446d08b3851SPeter Zijlstra goto out; 447d08b3851SPeter Zijlstra 448c2fda5feSPeter Zijlstra if (pte_dirty(*pte) || pte_write(*pte)) { 449c2fda5feSPeter Zijlstra pte_t entry; 450d08b3851SPeter Zijlstra 451c2fda5feSPeter Zijlstra flush_cache_page(vma, address, pte_pfn(*pte)); 452c2fda5feSPeter Zijlstra entry = ptep_clear_flush(vma, address, pte); 453d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 454c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 455*d6e88e67SAl Viro set_pte_at(mm, address, pte, entry); 456d08b3851SPeter Zijlstra lazy_mmu_prot_update(entry); 457d08b3851SPeter Zijlstra ret = 1; 458c2fda5feSPeter Zijlstra } 459d08b3851SPeter Zijlstra 460d08b3851SPeter Zijlstra pte_unmap_unlock(pte, ptl); 461d08b3851SPeter Zijlstra out: 462d08b3851SPeter Zijlstra return ret; 463d08b3851SPeter Zijlstra } 464d08b3851SPeter Zijlstra 465d08b3851SPeter Zijlstra static int page_mkclean_file(struct address_space *mapping, struct page *page) 466d08b3851SPeter Zijlstra { 467d08b3851SPeter Zijlstra pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 468d08b3851SPeter Zijlstra struct vm_area_struct *vma; 469d08b3851SPeter Zijlstra struct prio_tree_iter iter; 470d08b3851SPeter Zijlstra int ret = 0; 471d08b3851SPeter Zijlstra 472d08b3851SPeter Zijlstra BUG_ON(PageAnon(page)); 473d08b3851SPeter Zijlstra 474d08b3851SPeter Zijlstra spin_lock(&mapping->i_mmap_lock); 475d08b3851SPeter Zijlstra vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 476d08b3851SPeter Zijlstra if (vma->vm_flags & VM_SHARED) 477d08b3851SPeter Zijlstra ret += page_mkclean_one(page, vma); 478d08b3851SPeter Zijlstra } 479d08b3851SPeter Zijlstra spin_unlock(&mapping->i_mmap_lock); 480d08b3851SPeter Zijlstra return ret; 481d08b3851SPeter Zijlstra } 482d08b3851SPeter Zijlstra 483d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 484d08b3851SPeter Zijlstra { 485d08b3851SPeter Zijlstra int ret = 0; 486d08b3851SPeter Zijlstra 487d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 488d08b3851SPeter Zijlstra 489d08b3851SPeter Zijlstra if (page_mapped(page)) { 490d08b3851SPeter Zijlstra struct address_space *mapping = page_mapping(page); 491d08b3851SPeter Zijlstra if (mapping) 492d08b3851SPeter Zijlstra ret = page_mkclean_file(mapping, page); 493d08b3851SPeter Zijlstra } 494c2fda5feSPeter Zijlstra if (page_test_and_clear_dirty(page)) 495c2fda5feSPeter Zijlstra ret = 1; 496d08b3851SPeter Zijlstra 497d08b3851SPeter Zijlstra return ret; 498d08b3851SPeter Zijlstra } 499d08b3851SPeter Zijlstra 5001da177e4SLinus Torvalds /** 5019617d95eSNick Piggin * page_set_anon_rmap - setup new anonymous rmap 5021da177e4SLinus Torvalds * @page: the page to add the mapping to 5031da177e4SLinus Torvalds * @vma: the vm area in which the mapping is added 5041da177e4SLinus Torvalds * @address: the user virtual address mapped 5051da177e4SLinus Torvalds */ 5069617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 5071da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long address) 5081da177e4SLinus Torvalds { 5092822c1aaSNick Piggin struct anon_vma *anon_vma = vma->anon_vma; 5102822c1aaSNick Piggin 5112822c1aaSNick Piggin BUG_ON(!anon_vma); 5121da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 5132822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 5142822c1aaSNick Piggin 5154d7670e0SNick Piggin page->index = linear_page_index(vma, address); 5162822c1aaSNick Piggin 517a74609faSNick Piggin /* 518a74609faSNick Piggin * nr_mapped state can be updated without turning off 519a74609faSNick Piggin * interrupts because it is not modified via interrupt. 520a74609faSNick Piggin */ 521f3dbd344SChristoph Lameter __inc_zone_page_state(page, NR_ANON_PAGES); 5221da177e4SLinus Torvalds } 5239617d95eSNick Piggin 5249617d95eSNick Piggin /** 5259617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 5269617d95eSNick Piggin * @page: the page to add the mapping to 5279617d95eSNick Piggin * @vma: the vm area in which the mapping is added 5289617d95eSNick Piggin * @address: the user virtual address mapped 5299617d95eSNick Piggin * 5309617d95eSNick Piggin * The caller needs to hold the pte lock. 5319617d95eSNick Piggin */ 5329617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 5339617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 5349617d95eSNick Piggin { 5359617d95eSNick Piggin if (atomic_inc_and_test(&page->_mapcount)) 5369617d95eSNick Piggin __page_set_anon_rmap(page, vma, address); 5371da177e4SLinus Torvalds /* else checking page index and mapping is racy */ 5381da177e4SLinus Torvalds } 5391da177e4SLinus Torvalds 5409617d95eSNick Piggin /* 5419617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 5429617d95eSNick Piggin * @page: the page to add the mapping to 5439617d95eSNick Piggin * @vma: the vm area in which the mapping is added 5449617d95eSNick Piggin * @address: the user virtual address mapped 5459617d95eSNick Piggin * 5469617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 5479617d95eSNick Piggin * This means the inc-and-test can be bypassed. 5489617d95eSNick Piggin */ 5499617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 5509617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 5519617d95eSNick Piggin { 5529617d95eSNick Piggin atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 5539617d95eSNick Piggin __page_set_anon_rmap(page, vma, address); 5549617d95eSNick Piggin } 5559617d95eSNick Piggin 5561da177e4SLinus Torvalds /** 5571da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 5581da177e4SLinus Torvalds * @page: the page to add the mapping to 5591da177e4SLinus Torvalds * 560b8072f09SHugh Dickins * The caller needs to hold the pte lock. 5611da177e4SLinus Torvalds */ 5621da177e4SLinus Torvalds void page_add_file_rmap(struct page *page) 5631da177e4SLinus Torvalds { 5641da177e4SLinus Torvalds if (atomic_inc_and_test(&page->_mapcount)) 56565ba55f5SChristoph Lameter __inc_zone_page_state(page, NR_FILE_MAPPED); 5661da177e4SLinus Torvalds } 5671da177e4SLinus Torvalds 5681da177e4SLinus Torvalds /** 5691da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 5701da177e4SLinus Torvalds * @page: page to remove mapping from 5711da177e4SLinus Torvalds * 572b8072f09SHugh Dickins * The caller needs to hold the pte lock. 5731da177e4SLinus Torvalds */ 5747de6b805SNick Piggin void page_remove_rmap(struct page *page, struct vm_area_struct *vma) 5751da177e4SLinus Torvalds { 5761da177e4SLinus Torvalds if (atomic_add_negative(-1, &page->_mapcount)) { 577b7ab795bSNick Piggin if (unlikely(page_mapcount(page) < 0)) { 578ef2bf0dcSDave Jones printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); 5797de6b805SNick Piggin printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); 580ef2bf0dcSDave Jones printk (KERN_EMERG " page->flags = %lx\n", page->flags); 581ef2bf0dcSDave Jones printk (KERN_EMERG " page->count = %x\n", page_count(page)); 582ef2bf0dcSDave Jones printk (KERN_EMERG " page->mapping = %p\n", page->mapping); 5837de6b805SNick Piggin print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); 5847de6b805SNick Piggin if (vma->vm_ops) 5857de6b805SNick Piggin print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage); 5867de6b805SNick Piggin if (vma->vm_file && vma->vm_file->f_op) 5877de6b805SNick Piggin print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); 588b16bc64dSDave Jones BUG(); 589ef2bf0dcSDave Jones } 590b16bc64dSDave Jones 5911da177e4SLinus Torvalds /* 5921da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 5931da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 5941da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 5951da177e4SLinus Torvalds * before us: so leave the reset to free_hot_cold_page, 5961da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 5971da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 5981da177e4SLinus Torvalds * faster for those pages still in swapcache. 5991da177e4SLinus Torvalds */ 6001da177e4SLinus Torvalds if (page_test_and_clear_dirty(page)) 6011da177e4SLinus Torvalds set_page_dirty(page); 602f3dbd344SChristoph Lameter __dec_zone_page_state(page, 603f3dbd344SChristoph Lameter PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 6041da177e4SLinus Torvalds } 6051da177e4SLinus Torvalds } 6061da177e4SLinus Torvalds 6071da177e4SLinus Torvalds /* 6081da177e4SLinus Torvalds * Subfunctions of try_to_unmap: try_to_unmap_one called 6091da177e4SLinus Torvalds * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 6101da177e4SLinus Torvalds */ 611a48d07afSChristoph Lameter static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 6127352349aSChristoph Lameter int migration) 6131da177e4SLinus Torvalds { 6141da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 6151da177e4SLinus Torvalds unsigned long address; 6161da177e4SLinus Torvalds pte_t *pte; 6171da177e4SLinus Torvalds pte_t pteval; 618c0718806SHugh Dickins spinlock_t *ptl; 6191da177e4SLinus Torvalds int ret = SWAP_AGAIN; 6201da177e4SLinus Torvalds 6211da177e4SLinus Torvalds address = vma_address(page, vma); 6221da177e4SLinus Torvalds if (address == -EFAULT) 6231da177e4SLinus Torvalds goto out; 6241da177e4SLinus Torvalds 625c0718806SHugh Dickins pte = page_check_address(page, mm, address, &ptl); 626c0718806SHugh Dickins if (!pte) 62781b4082dSNikita Danilov goto out; 6281da177e4SLinus Torvalds 6291da177e4SLinus Torvalds /* 6301da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 6311da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 6321da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 6331da177e4SLinus Torvalds */ 634e6a1530dSChristoph Lameter if (!migration && ((vma->vm_flags & VM_LOCKED) || 635e6a1530dSChristoph Lameter (ptep_clear_flush_young(vma, address, pte)))) { 6361da177e4SLinus Torvalds ret = SWAP_FAIL; 6371da177e4SLinus Torvalds goto out_unmap; 6381da177e4SLinus Torvalds } 6391da177e4SLinus Torvalds 6401da177e4SLinus Torvalds /* Nuke the page table entry. */ 6411da177e4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 6421da177e4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pte); 6431da177e4SLinus Torvalds 6441da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 6451da177e4SLinus Torvalds if (pte_dirty(pteval)) 6461da177e4SLinus Torvalds set_page_dirty(page); 6471da177e4SLinus Torvalds 648365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 649365e9c87SHugh Dickins update_hiwater_rss(mm); 650365e9c87SHugh Dickins 6511da177e4SLinus Torvalds if (PageAnon(page)) { 6524c21e2f2SHugh Dickins swp_entry_t entry = { .val = page_private(page) }; 6530697212aSChristoph Lameter 6540697212aSChristoph Lameter if (PageSwapCache(page)) { 6551da177e4SLinus Torvalds /* 6561da177e4SLinus Torvalds * Store the swap location in the pte. 6571da177e4SLinus Torvalds * See handle_pte_fault() ... 6581da177e4SLinus Torvalds */ 6591da177e4SLinus Torvalds swap_duplicate(entry); 6601da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 6611da177e4SLinus Torvalds spin_lock(&mmlist_lock); 662f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 6631da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 6641da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 6651da177e4SLinus Torvalds } 666442c9137SChristoph Lameter dec_mm_counter(mm, anon_rss); 66704e62a29SChristoph Lameter #ifdef CONFIG_MIGRATION 6680697212aSChristoph Lameter } else { 6690697212aSChristoph Lameter /* 6700697212aSChristoph Lameter * Store the pfn of the page in a special migration 6710697212aSChristoph Lameter * pte. do_swap_page() will wait until the migration 6720697212aSChristoph Lameter * pte is removed and then restart fault handling. 6730697212aSChristoph Lameter */ 6740697212aSChristoph Lameter BUG_ON(!migration); 6750697212aSChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 67604e62a29SChristoph Lameter #endif 6770697212aSChristoph Lameter } 6781da177e4SLinus Torvalds set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 6791da177e4SLinus Torvalds BUG_ON(pte_file(*pte)); 6804294621fSHugh Dickins } else 68104e62a29SChristoph Lameter #ifdef CONFIG_MIGRATION 68204e62a29SChristoph Lameter if (migration) { 68304e62a29SChristoph Lameter /* Establish migration entry for a file page */ 68404e62a29SChristoph Lameter swp_entry_t entry; 68504e62a29SChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 68604e62a29SChristoph Lameter set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 68704e62a29SChristoph Lameter } else 68804e62a29SChristoph Lameter #endif 6894294621fSHugh Dickins dec_mm_counter(mm, file_rss); 6901da177e4SLinus Torvalds 69104e62a29SChristoph Lameter 6927de6b805SNick Piggin page_remove_rmap(page, vma); 6931da177e4SLinus Torvalds page_cache_release(page); 6941da177e4SLinus Torvalds 6951da177e4SLinus Torvalds out_unmap: 696c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 6971da177e4SLinus Torvalds out: 6981da177e4SLinus Torvalds return ret; 6991da177e4SLinus Torvalds } 7001da177e4SLinus Torvalds 7011da177e4SLinus Torvalds /* 7021da177e4SLinus Torvalds * objrmap doesn't work for nonlinear VMAs because the assumption that 7031da177e4SLinus Torvalds * offset-into-file correlates with offset-into-virtual-addresses does not hold. 7041da177e4SLinus Torvalds * Consequently, given a particular page and its ->index, we cannot locate the 7051da177e4SLinus Torvalds * ptes which are mapping that page without an exhaustive linear search. 7061da177e4SLinus Torvalds * 7071da177e4SLinus Torvalds * So what this code does is a mini "virtual scan" of each nonlinear VMA which 7081da177e4SLinus Torvalds * maps the file to which the target page belongs. The ->vm_private_data field 7091da177e4SLinus Torvalds * holds the current cursor into that scan. Successive searches will circulate 7101da177e4SLinus Torvalds * around the vma's virtual address space. 7111da177e4SLinus Torvalds * 7121da177e4SLinus Torvalds * So as more replacement pressure is applied to the pages in a nonlinear VMA, 7131da177e4SLinus Torvalds * more scanning pressure is placed against them as well. Eventually pages 7141da177e4SLinus Torvalds * will become fully unmapped and are eligible for eviction. 7151da177e4SLinus Torvalds * 7161da177e4SLinus Torvalds * For very sparsely populated VMAs this is a little inefficient - chances are 7171da177e4SLinus Torvalds * there there won't be many ptes located within the scan cluster. In this case 7181da177e4SLinus Torvalds * maybe we could scan further - to the end of the pte page, perhaps. 7191da177e4SLinus Torvalds */ 7201da177e4SLinus Torvalds #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 7211da177e4SLinus Torvalds #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 7221da177e4SLinus Torvalds 7231da177e4SLinus Torvalds static void try_to_unmap_cluster(unsigned long cursor, 7241da177e4SLinus Torvalds unsigned int *mapcount, struct vm_area_struct *vma) 7251da177e4SLinus Torvalds { 7261da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 7271da177e4SLinus Torvalds pgd_t *pgd; 7281da177e4SLinus Torvalds pud_t *pud; 7291da177e4SLinus Torvalds pmd_t *pmd; 730c0718806SHugh Dickins pte_t *pte; 7311da177e4SLinus Torvalds pte_t pteval; 732c0718806SHugh Dickins spinlock_t *ptl; 7331da177e4SLinus Torvalds struct page *page; 7341da177e4SLinus Torvalds unsigned long address; 7351da177e4SLinus Torvalds unsigned long end; 7361da177e4SLinus Torvalds 7371da177e4SLinus Torvalds address = (vma->vm_start + cursor) & CLUSTER_MASK; 7381da177e4SLinus Torvalds end = address + CLUSTER_SIZE; 7391da177e4SLinus Torvalds if (address < vma->vm_start) 7401da177e4SLinus Torvalds address = vma->vm_start; 7411da177e4SLinus Torvalds if (end > vma->vm_end) 7421da177e4SLinus Torvalds end = vma->vm_end; 7431da177e4SLinus Torvalds 7441da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 7451da177e4SLinus Torvalds if (!pgd_present(*pgd)) 746c0718806SHugh Dickins return; 7471da177e4SLinus Torvalds 7481da177e4SLinus Torvalds pud = pud_offset(pgd, address); 7491da177e4SLinus Torvalds if (!pud_present(*pud)) 750c0718806SHugh Dickins return; 7511da177e4SLinus Torvalds 7521da177e4SLinus Torvalds pmd = pmd_offset(pud, address); 7531da177e4SLinus Torvalds if (!pmd_present(*pmd)) 754c0718806SHugh Dickins return; 755c0718806SHugh Dickins 756c0718806SHugh Dickins pte = pte_offset_map_lock(mm, pmd, address, &ptl); 7571da177e4SLinus Torvalds 758365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 759365e9c87SHugh Dickins update_hiwater_rss(mm); 760365e9c87SHugh Dickins 761c0718806SHugh Dickins for (; address < end; pte++, address += PAGE_SIZE) { 7621da177e4SLinus Torvalds if (!pte_present(*pte)) 7631da177e4SLinus Torvalds continue; 7646aab341eSLinus Torvalds page = vm_normal_page(vma, address, *pte); 7656aab341eSLinus Torvalds BUG_ON(!page || PageAnon(page)); 7661da177e4SLinus Torvalds 7671da177e4SLinus Torvalds if (ptep_clear_flush_young(vma, address, pte)) 7681da177e4SLinus Torvalds continue; 7691da177e4SLinus Torvalds 7701da177e4SLinus Torvalds /* Nuke the page table entry. */ 771eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(*pte)); 7721da177e4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pte); 7731da177e4SLinus Torvalds 7741da177e4SLinus Torvalds /* If nonlinear, store the file page offset in the pte. */ 7751da177e4SLinus Torvalds if (page->index != linear_page_index(vma, address)) 7761da177e4SLinus Torvalds set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 7771da177e4SLinus Torvalds 7781da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 7791da177e4SLinus Torvalds if (pte_dirty(pteval)) 7801da177e4SLinus Torvalds set_page_dirty(page); 7811da177e4SLinus Torvalds 7827de6b805SNick Piggin page_remove_rmap(page, vma); 7831da177e4SLinus Torvalds page_cache_release(page); 7844294621fSHugh Dickins dec_mm_counter(mm, file_rss); 7851da177e4SLinus Torvalds (*mapcount)--; 7861da177e4SLinus Torvalds } 787c0718806SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 7881da177e4SLinus Torvalds } 7891da177e4SLinus Torvalds 7907352349aSChristoph Lameter static int try_to_unmap_anon(struct page *page, int migration) 7911da177e4SLinus Torvalds { 7921da177e4SLinus Torvalds struct anon_vma *anon_vma; 7931da177e4SLinus Torvalds struct vm_area_struct *vma; 7941da177e4SLinus Torvalds int ret = SWAP_AGAIN; 7951da177e4SLinus Torvalds 7961da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 7971da177e4SLinus Torvalds if (!anon_vma) 7981da177e4SLinus Torvalds return ret; 7991da177e4SLinus Torvalds 8001da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 8017352349aSChristoph Lameter ret = try_to_unmap_one(page, vma, migration); 8021da177e4SLinus Torvalds if (ret == SWAP_FAIL || !page_mapped(page)) 8031da177e4SLinus Torvalds break; 8041da177e4SLinus Torvalds } 8051da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 8061da177e4SLinus Torvalds return ret; 8071da177e4SLinus Torvalds } 8081da177e4SLinus Torvalds 8091da177e4SLinus Torvalds /** 8101da177e4SLinus Torvalds * try_to_unmap_file - unmap file page using the object-based rmap method 8111da177e4SLinus Torvalds * @page: the page to unmap 8121da177e4SLinus Torvalds * 8131da177e4SLinus Torvalds * Find all the mappings of a page using the mapping pointer and the vma chains 8141da177e4SLinus Torvalds * contained in the address_space struct it points to. 8151da177e4SLinus Torvalds * 8161da177e4SLinus Torvalds * This function is only called from try_to_unmap for object-based pages. 8171da177e4SLinus Torvalds */ 8187352349aSChristoph Lameter static int try_to_unmap_file(struct page *page, int migration) 8191da177e4SLinus Torvalds { 8201da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 8211da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 8221da177e4SLinus Torvalds struct vm_area_struct *vma; 8231da177e4SLinus Torvalds struct prio_tree_iter iter; 8241da177e4SLinus Torvalds int ret = SWAP_AGAIN; 8251da177e4SLinus Torvalds unsigned long cursor; 8261da177e4SLinus Torvalds unsigned long max_nl_cursor = 0; 8271da177e4SLinus Torvalds unsigned long max_nl_size = 0; 8281da177e4SLinus Torvalds unsigned int mapcount; 8291da177e4SLinus Torvalds 8301da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 8311da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 8327352349aSChristoph Lameter ret = try_to_unmap_one(page, vma, migration); 8331da177e4SLinus Torvalds if (ret == SWAP_FAIL || !page_mapped(page)) 8341da177e4SLinus Torvalds goto out; 8351da177e4SLinus Torvalds } 8361da177e4SLinus Torvalds 8371da177e4SLinus Torvalds if (list_empty(&mapping->i_mmap_nonlinear)) 8381da177e4SLinus Torvalds goto out; 8391da177e4SLinus Torvalds 8401da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 8411da177e4SLinus Torvalds shared.vm_set.list) { 842e6a1530dSChristoph Lameter if ((vma->vm_flags & VM_LOCKED) && !migration) 8431da177e4SLinus Torvalds continue; 8441da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 8451da177e4SLinus Torvalds if (cursor > max_nl_cursor) 8461da177e4SLinus Torvalds max_nl_cursor = cursor; 8471da177e4SLinus Torvalds cursor = vma->vm_end - vma->vm_start; 8481da177e4SLinus Torvalds if (cursor > max_nl_size) 8491da177e4SLinus Torvalds max_nl_size = cursor; 8501da177e4SLinus Torvalds } 8511da177e4SLinus Torvalds 8521da177e4SLinus Torvalds if (max_nl_size == 0) { /* any nonlinears locked or reserved */ 8531da177e4SLinus Torvalds ret = SWAP_FAIL; 8541da177e4SLinus Torvalds goto out; 8551da177e4SLinus Torvalds } 8561da177e4SLinus Torvalds 8571da177e4SLinus Torvalds /* 8581da177e4SLinus Torvalds * We don't try to search for this page in the nonlinear vmas, 8591da177e4SLinus Torvalds * and page_referenced wouldn't have found it anyway. Instead 8601da177e4SLinus Torvalds * just walk the nonlinear vmas trying to age and unmap some. 8611da177e4SLinus Torvalds * The mapcount of the page we came in with is irrelevant, 8621da177e4SLinus Torvalds * but even so use it as a guide to how hard we should try? 8631da177e4SLinus Torvalds */ 8641da177e4SLinus Torvalds mapcount = page_mapcount(page); 8651da177e4SLinus Torvalds if (!mapcount) 8661da177e4SLinus Torvalds goto out; 8671da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 8681da177e4SLinus Torvalds 8691da177e4SLinus Torvalds max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 8701da177e4SLinus Torvalds if (max_nl_cursor == 0) 8711da177e4SLinus Torvalds max_nl_cursor = CLUSTER_SIZE; 8721da177e4SLinus Torvalds 8731da177e4SLinus Torvalds do { 8741da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 8751da177e4SLinus Torvalds shared.vm_set.list) { 876e6a1530dSChristoph Lameter if ((vma->vm_flags & VM_LOCKED) && !migration) 8771da177e4SLinus Torvalds continue; 8781da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 879839b9685SHugh Dickins while ( cursor < max_nl_cursor && 8801da177e4SLinus Torvalds cursor < vma->vm_end - vma->vm_start) { 8811da177e4SLinus Torvalds try_to_unmap_cluster(cursor, &mapcount, vma); 8821da177e4SLinus Torvalds cursor += CLUSTER_SIZE; 8831da177e4SLinus Torvalds vma->vm_private_data = (void *) cursor; 8841da177e4SLinus Torvalds if ((int)mapcount <= 0) 8851da177e4SLinus Torvalds goto out; 8861da177e4SLinus Torvalds } 8871da177e4SLinus Torvalds vma->vm_private_data = (void *) max_nl_cursor; 8881da177e4SLinus Torvalds } 8891da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 8901da177e4SLinus Torvalds max_nl_cursor += CLUSTER_SIZE; 8911da177e4SLinus Torvalds } while (max_nl_cursor <= max_nl_size); 8921da177e4SLinus Torvalds 8931da177e4SLinus Torvalds /* 8941da177e4SLinus Torvalds * Don't loop forever (perhaps all the remaining pages are 8951da177e4SLinus Torvalds * in locked vmas). Reset cursor on all unreserved nonlinear 8961da177e4SLinus Torvalds * vmas, now forgetting on which ones it had fallen behind. 8971da177e4SLinus Torvalds */ 898101d2be7SHugh Dickins list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 8991da177e4SLinus Torvalds vma->vm_private_data = NULL; 9001da177e4SLinus Torvalds out: 9011da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 9021da177e4SLinus Torvalds return ret; 9031da177e4SLinus Torvalds } 9041da177e4SLinus Torvalds 9051da177e4SLinus Torvalds /** 9061da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 9071da177e4SLinus Torvalds * @page: the page to get unmapped 9081da177e4SLinus Torvalds * 9091da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 9101da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 9111da177e4SLinus Torvalds * Return values are: 9121da177e4SLinus Torvalds * 9131da177e4SLinus Torvalds * SWAP_SUCCESS - we succeeded in removing all mappings 9141da177e4SLinus Torvalds * SWAP_AGAIN - we missed a mapping, try again later 9151da177e4SLinus Torvalds * SWAP_FAIL - the page is unswappable 9161da177e4SLinus Torvalds */ 9177352349aSChristoph Lameter int try_to_unmap(struct page *page, int migration) 9181da177e4SLinus Torvalds { 9191da177e4SLinus Torvalds int ret; 9201da177e4SLinus Torvalds 9211da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 9221da177e4SLinus Torvalds 9231da177e4SLinus Torvalds if (PageAnon(page)) 9247352349aSChristoph Lameter ret = try_to_unmap_anon(page, migration); 9251da177e4SLinus Torvalds else 9267352349aSChristoph Lameter ret = try_to_unmap_file(page, migration); 9271da177e4SLinus Torvalds 9281da177e4SLinus Torvalds if (!page_mapped(page)) 9291da177e4SLinus Torvalds ret = SWAP_SUCCESS; 9301da177e4SLinus Torvalds return ret; 9311da177e4SLinus Torvalds } 93281b4082dSNikita Danilov 933