11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 171da177e4SLinus Torvalds * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 2482591e6eSNick Piggin * inode->i_alloc_sem (vmtruncate_range) 251da177e4SLinus Torvalds * mm->mmap_sem 261da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 271da177e4SLinus Torvalds * mapping->i_mmap_lock 281da177e4SLinus Torvalds * anon_vma->lock 29b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 30053837fcSNick Piggin * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 315d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 321da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 331da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 341da177e4SLinus Torvalds * inode_lock (in set_page_dirty's __mark_inode_dirty) 351da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 361da177e4SLinus Torvalds * mapping->tree_lock (widely used, in set_page_dirty, 371da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 381da177e4SLinus Torvalds * within inode_lock in __sync_single_inode) 391da177e4SLinus Torvalds */ 401da177e4SLinus Torvalds 411da177e4SLinus Torvalds #include <linux/mm.h> 421da177e4SLinus Torvalds #include <linux/pagemap.h> 431da177e4SLinus Torvalds #include <linux/swap.h> 441da177e4SLinus Torvalds #include <linux/swapops.h> 451da177e4SLinus Torvalds #include <linux/slab.h> 461da177e4SLinus Torvalds #include <linux/init.h> 471da177e4SLinus Torvalds #include <linux/rmap.h> 481da177e4SLinus Torvalds #include <linux/rcupdate.h> 49a48d07afSChristoph Lameter #include <linux/module.h> 507de6b805SNick Piggin #include <linux/kallsyms.h> 511da177e4SLinus Torvalds 521da177e4SLinus Torvalds #include <asm/tlbflush.h> 531da177e4SLinus Torvalds 54fcc234f8SPekka Enberg struct kmem_cache *anon_vma_cachep; 551da177e4SLinus Torvalds 561da177e4SLinus Torvalds static inline void validate_anon_vma(struct vm_area_struct *find_vma) 571da177e4SLinus Torvalds { 58b7ab795bSNick Piggin #ifdef CONFIG_DEBUG_VM 591da177e4SLinus Torvalds struct anon_vma *anon_vma = find_vma->anon_vma; 601da177e4SLinus Torvalds struct vm_area_struct *vma; 611da177e4SLinus Torvalds unsigned int mapcount = 0; 621da177e4SLinus Torvalds int found = 0; 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 651da177e4SLinus Torvalds mapcount++; 661da177e4SLinus Torvalds BUG_ON(mapcount > 100000); 671da177e4SLinus Torvalds if (vma == find_vma) 681da177e4SLinus Torvalds found = 1; 691da177e4SLinus Torvalds } 701da177e4SLinus Torvalds BUG_ON(!found); 711da177e4SLinus Torvalds #endif 721da177e4SLinus Torvalds } 731da177e4SLinus Torvalds 741da177e4SLinus Torvalds /* This must be called under the mmap_sem. */ 751da177e4SLinus Torvalds int anon_vma_prepare(struct vm_area_struct *vma) 761da177e4SLinus Torvalds { 771da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds might_sleep(); 801da177e4SLinus Torvalds if (unlikely(!anon_vma)) { 811da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 821da177e4SLinus Torvalds struct anon_vma *allocated, *locked; 831da177e4SLinus Torvalds 841da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 851da177e4SLinus Torvalds if (anon_vma) { 861da177e4SLinus Torvalds allocated = NULL; 871da177e4SLinus Torvalds locked = anon_vma; 881da177e4SLinus Torvalds spin_lock(&locked->lock); 891da177e4SLinus Torvalds } else { 901da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 911da177e4SLinus Torvalds if (unlikely(!anon_vma)) 921da177e4SLinus Torvalds return -ENOMEM; 931da177e4SLinus Torvalds allocated = anon_vma; 941da177e4SLinus Torvalds locked = NULL; 951da177e4SLinus Torvalds } 961da177e4SLinus Torvalds 971da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 981da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 991da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 1001da177e4SLinus Torvalds vma->anon_vma = anon_vma; 1010697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1021da177e4SLinus Torvalds allocated = NULL; 1031da177e4SLinus Torvalds } 1041da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1051da177e4SLinus Torvalds 1061da177e4SLinus Torvalds if (locked) 1071da177e4SLinus Torvalds spin_unlock(&locked->lock); 1081da177e4SLinus Torvalds if (unlikely(allocated)) 1091da177e4SLinus Torvalds anon_vma_free(allocated); 1101da177e4SLinus Torvalds } 1111da177e4SLinus Torvalds return 0; 1121da177e4SLinus Torvalds } 1131da177e4SLinus Torvalds 1141da177e4SLinus Torvalds void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) 1151da177e4SLinus Torvalds { 1161da177e4SLinus Torvalds BUG_ON(vma->anon_vma != next->anon_vma); 1171da177e4SLinus Torvalds list_del(&next->anon_vma_node); 1181da177e4SLinus Torvalds } 1191da177e4SLinus Torvalds 1201da177e4SLinus Torvalds void __anon_vma_link(struct vm_area_struct *vma) 1211da177e4SLinus Torvalds { 1221da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1231da177e4SLinus Torvalds 1241da177e4SLinus Torvalds if (anon_vma) { 1250697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1261da177e4SLinus Torvalds validate_anon_vma(vma); 1271da177e4SLinus Torvalds } 1281da177e4SLinus Torvalds } 1291da177e4SLinus Torvalds 1301da177e4SLinus Torvalds void anon_vma_link(struct vm_area_struct *vma) 1311da177e4SLinus Torvalds { 1321da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1331da177e4SLinus Torvalds 1341da177e4SLinus Torvalds if (anon_vma) { 1351da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1360697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1371da177e4SLinus Torvalds validate_anon_vma(vma); 1381da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1391da177e4SLinus Torvalds } 1401da177e4SLinus Torvalds } 1411da177e4SLinus Torvalds 1421da177e4SLinus Torvalds void anon_vma_unlink(struct vm_area_struct *vma) 1431da177e4SLinus Torvalds { 1441da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1451da177e4SLinus Torvalds int empty; 1461da177e4SLinus Torvalds 1471da177e4SLinus Torvalds if (!anon_vma) 1481da177e4SLinus Torvalds return; 1491da177e4SLinus Torvalds 1501da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1511da177e4SLinus Torvalds validate_anon_vma(vma); 1521da177e4SLinus Torvalds list_del(&vma->anon_vma_node); 1531da177e4SLinus Torvalds 1541da177e4SLinus Torvalds /* We must garbage collect the anon_vma if it's empty */ 1551da177e4SLinus Torvalds empty = list_empty(&anon_vma->head); 1561da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1571da177e4SLinus Torvalds 1581da177e4SLinus Torvalds if (empty) 1591da177e4SLinus Torvalds anon_vma_free(anon_vma); 1601da177e4SLinus Torvalds } 1611da177e4SLinus Torvalds 162fcc234f8SPekka Enberg static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 163fcc234f8SPekka Enberg unsigned long flags) 1641da177e4SLinus Torvalds { 1651da177e4SLinus Torvalds if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 1661da177e4SLinus Torvalds SLAB_CTOR_CONSTRUCTOR) { 1671da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 1681da177e4SLinus Torvalds 1691da177e4SLinus Torvalds spin_lock_init(&anon_vma->lock); 1701da177e4SLinus Torvalds INIT_LIST_HEAD(&anon_vma->head); 1711da177e4SLinus Torvalds } 1721da177e4SLinus Torvalds } 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds void __init anon_vma_init(void) 1751da177e4SLinus Torvalds { 1761da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 1771da177e4SLinus Torvalds 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); 1781da177e4SLinus Torvalds } 1791da177e4SLinus Torvalds 1801da177e4SLinus Torvalds /* 1811da177e4SLinus Torvalds * Getting a lock on a stable anon_vma from a page off the LRU is 1821da177e4SLinus Torvalds * tricky: page_lock_anon_vma rely on RCU to guard against the races. 1831da177e4SLinus Torvalds */ 1841da177e4SLinus Torvalds static struct anon_vma *page_lock_anon_vma(struct page *page) 1851da177e4SLinus Torvalds { 186*34bbd704SOleg Nesterov struct anon_vma *anon_vma; 1871da177e4SLinus Torvalds unsigned long anon_mapping; 1881da177e4SLinus Torvalds 1891da177e4SLinus Torvalds rcu_read_lock(); 1901da177e4SLinus Torvalds anon_mapping = (unsigned long) page->mapping; 1911da177e4SLinus Torvalds if (!(anon_mapping & PAGE_MAPPING_ANON)) 1921da177e4SLinus Torvalds goto out; 1931da177e4SLinus Torvalds if (!page_mapped(page)) 1941da177e4SLinus Torvalds goto out; 1951da177e4SLinus Torvalds 1961da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 1971da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 198*34bbd704SOleg Nesterov return anon_vma; 1991da177e4SLinus Torvalds out: 2001da177e4SLinus Torvalds rcu_read_unlock(); 201*34bbd704SOleg Nesterov return NULL; 202*34bbd704SOleg Nesterov } 203*34bbd704SOleg Nesterov 204*34bbd704SOleg Nesterov static void page_unlock_anon_vma(struct anon_vma *anon_vma) 205*34bbd704SOleg Nesterov { 206*34bbd704SOleg Nesterov spin_unlock(&anon_vma->lock); 207*34bbd704SOleg Nesterov rcu_read_unlock(); 2081da177e4SLinus Torvalds } 2091da177e4SLinus Torvalds 2101da177e4SLinus Torvalds /* 2111da177e4SLinus Torvalds * At what user virtual address is page expected in vma? 2121da177e4SLinus Torvalds */ 2131da177e4SLinus Torvalds static inline unsigned long 2141da177e4SLinus Torvalds vma_address(struct page *page, struct vm_area_struct *vma) 2151da177e4SLinus Torvalds { 2161da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 2171da177e4SLinus Torvalds unsigned long address; 2181da177e4SLinus Torvalds 2191da177e4SLinus Torvalds address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 2201da177e4SLinus Torvalds if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 2211da177e4SLinus Torvalds /* page should be within any vma from prio_tree_next */ 2221da177e4SLinus Torvalds BUG_ON(!PageAnon(page)); 2231da177e4SLinus Torvalds return -EFAULT; 2241da177e4SLinus Torvalds } 2251da177e4SLinus Torvalds return address; 2261da177e4SLinus Torvalds } 2271da177e4SLinus Torvalds 2281da177e4SLinus Torvalds /* 2291da177e4SLinus Torvalds * At what user virtual address is page expected in vma? checking that the 230ee498ed7SHugh Dickins * page matches the vma: currently only used on anon pages, by unuse_vma; 2311da177e4SLinus Torvalds */ 2321da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 2331da177e4SLinus Torvalds { 2341da177e4SLinus Torvalds if (PageAnon(page)) { 2351da177e4SLinus Torvalds if ((void *)vma->anon_vma != 2361da177e4SLinus Torvalds (void *)page->mapping - PAGE_MAPPING_ANON) 2371da177e4SLinus Torvalds return -EFAULT; 2381da177e4SLinus Torvalds } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 239ee498ed7SHugh Dickins if (!vma->vm_file || 240ee498ed7SHugh Dickins vma->vm_file->f_mapping != page->mapping) 2411da177e4SLinus Torvalds return -EFAULT; 2421da177e4SLinus Torvalds } else 2431da177e4SLinus Torvalds return -EFAULT; 2441da177e4SLinus Torvalds return vma_address(page, vma); 2451da177e4SLinus Torvalds } 2461da177e4SLinus Torvalds 2471da177e4SLinus Torvalds /* 24881b4082dSNikita Danilov * Check that @page is mapped at @address into @mm. 24981b4082dSNikita Danilov * 250b8072f09SHugh Dickins * On success returns with pte mapped and locked. 25181b4082dSNikita Danilov */ 252ceffc078SCarsten Otte pte_t *page_check_address(struct page *page, struct mm_struct *mm, 253c0718806SHugh Dickins unsigned long address, spinlock_t **ptlp) 25481b4082dSNikita Danilov { 25581b4082dSNikita Danilov pgd_t *pgd; 25681b4082dSNikita Danilov pud_t *pud; 25781b4082dSNikita Danilov pmd_t *pmd; 25881b4082dSNikita Danilov pte_t *pte; 259c0718806SHugh Dickins spinlock_t *ptl; 26081b4082dSNikita Danilov 26181b4082dSNikita Danilov pgd = pgd_offset(mm, address); 262c0718806SHugh Dickins if (!pgd_present(*pgd)) 263c0718806SHugh Dickins return NULL; 264c0718806SHugh Dickins 26581b4082dSNikita Danilov pud = pud_offset(pgd, address); 266c0718806SHugh Dickins if (!pud_present(*pud)) 267c0718806SHugh Dickins return NULL; 268c0718806SHugh Dickins 26981b4082dSNikita Danilov pmd = pmd_offset(pud, address); 270c0718806SHugh Dickins if (!pmd_present(*pmd)) 271c0718806SHugh Dickins return NULL; 272c0718806SHugh Dickins 27381b4082dSNikita Danilov pte = pte_offset_map(pmd, address); 274c0718806SHugh Dickins /* Make a quick check before getting the lock */ 275c0718806SHugh Dickins if (!pte_present(*pte)) { 27681b4082dSNikita Danilov pte_unmap(pte); 277c0718806SHugh Dickins return NULL; 27881b4082dSNikita Danilov } 279c0718806SHugh Dickins 2804c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 281c0718806SHugh Dickins spin_lock(ptl); 282c0718806SHugh Dickins if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 283c0718806SHugh Dickins *ptlp = ptl; 284c0718806SHugh Dickins return pte; 28581b4082dSNikita Danilov } 286c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 287c0718806SHugh Dickins return NULL; 28881b4082dSNikita Danilov } 28981b4082dSNikita Danilov 29081b4082dSNikita Danilov /* 2911da177e4SLinus Torvalds * Subfunctions of page_referenced: page_referenced_one called 2921da177e4SLinus Torvalds * repeatedly from either page_referenced_anon or page_referenced_file. 2931da177e4SLinus Torvalds */ 2941da177e4SLinus Torvalds static int page_referenced_one(struct page *page, 295f7b7fd8fSRik van Riel struct vm_area_struct *vma, unsigned int *mapcount) 2961da177e4SLinus Torvalds { 2971da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 2981da177e4SLinus Torvalds unsigned long address; 2991da177e4SLinus Torvalds pte_t *pte; 300c0718806SHugh Dickins spinlock_t *ptl; 3011da177e4SLinus Torvalds int referenced = 0; 3021da177e4SLinus Torvalds 3031da177e4SLinus Torvalds address = vma_address(page, vma); 3041da177e4SLinus Torvalds if (address == -EFAULT) 3051da177e4SLinus Torvalds goto out; 3061da177e4SLinus Torvalds 307c0718806SHugh Dickins pte = page_check_address(page, mm, address, &ptl); 308c0718806SHugh Dickins if (!pte) 309c0718806SHugh Dickins goto out; 310c0718806SHugh Dickins 3111da177e4SLinus Torvalds if (ptep_clear_flush_young(vma, address, pte)) 3121da177e4SLinus Torvalds referenced++; 3131da177e4SLinus Torvalds 314fcdae29aSRik Van Riel /* Pretend the page is referenced if the task has the 315fcdae29aSRik Van Riel swap token and is in the middle of a page fault. */ 316f7b7fd8fSRik van Riel if (mm != current->mm && has_swap_token(mm) && 317fcdae29aSRik Van Riel rwsem_is_locked(&mm->mmap_sem)) 3181da177e4SLinus Torvalds referenced++; 3191da177e4SLinus Torvalds 3201da177e4SLinus Torvalds (*mapcount)--; 321c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 3221da177e4SLinus Torvalds out: 3231da177e4SLinus Torvalds return referenced; 3241da177e4SLinus Torvalds } 3251da177e4SLinus Torvalds 326f7b7fd8fSRik van Riel static int page_referenced_anon(struct page *page) 3271da177e4SLinus Torvalds { 3281da177e4SLinus Torvalds unsigned int mapcount; 3291da177e4SLinus Torvalds struct anon_vma *anon_vma; 3301da177e4SLinus Torvalds struct vm_area_struct *vma; 3311da177e4SLinus Torvalds int referenced = 0; 3321da177e4SLinus Torvalds 3331da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 3341da177e4SLinus Torvalds if (!anon_vma) 3351da177e4SLinus Torvalds return referenced; 3361da177e4SLinus Torvalds 3371da177e4SLinus Torvalds mapcount = page_mapcount(page); 3381da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 339f7b7fd8fSRik van Riel referenced += page_referenced_one(page, vma, &mapcount); 3401da177e4SLinus Torvalds if (!mapcount) 3411da177e4SLinus Torvalds break; 3421da177e4SLinus Torvalds } 343*34bbd704SOleg Nesterov 344*34bbd704SOleg Nesterov page_unlock_anon_vma(anon_vma); 3451da177e4SLinus Torvalds return referenced; 3461da177e4SLinus Torvalds } 3471da177e4SLinus Torvalds 3481da177e4SLinus Torvalds /** 3491da177e4SLinus Torvalds * page_referenced_file - referenced check for object-based rmap 3501da177e4SLinus Torvalds * @page: the page we're checking references on. 3511da177e4SLinus Torvalds * 3521da177e4SLinus Torvalds * For an object-based mapped page, find all the places it is mapped and 3531da177e4SLinus Torvalds * check/clear the referenced flag. This is done by following the page->mapping 3541da177e4SLinus Torvalds * pointer, then walking the chain of vmas it holds. It returns the number 3551da177e4SLinus Torvalds * of references it found. 3561da177e4SLinus Torvalds * 3571da177e4SLinus Torvalds * This function is only called from page_referenced for object-based pages. 3581da177e4SLinus Torvalds */ 359f7b7fd8fSRik van Riel static int page_referenced_file(struct page *page) 3601da177e4SLinus Torvalds { 3611da177e4SLinus Torvalds unsigned int mapcount; 3621da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 3631da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 3641da177e4SLinus Torvalds struct vm_area_struct *vma; 3651da177e4SLinus Torvalds struct prio_tree_iter iter; 3661da177e4SLinus Torvalds int referenced = 0; 3671da177e4SLinus Torvalds 3681da177e4SLinus Torvalds /* 3691da177e4SLinus Torvalds * The caller's checks on page->mapping and !PageAnon have made 3701da177e4SLinus Torvalds * sure that this is a file page: the check for page->mapping 3711da177e4SLinus Torvalds * excludes the case just before it gets set on an anon page. 3721da177e4SLinus Torvalds */ 3731da177e4SLinus Torvalds BUG_ON(PageAnon(page)); 3741da177e4SLinus Torvalds 3751da177e4SLinus Torvalds /* 3761da177e4SLinus Torvalds * The page lock not only makes sure that page->mapping cannot 3771da177e4SLinus Torvalds * suddenly be NULLified by truncation, it makes sure that the 3781da177e4SLinus Torvalds * structure at mapping cannot be freed and reused yet, 3791da177e4SLinus Torvalds * so we can safely take mapping->i_mmap_lock. 3801da177e4SLinus Torvalds */ 3811da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 3821da177e4SLinus Torvalds 3831da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 3841da177e4SLinus Torvalds 3851da177e4SLinus Torvalds /* 3861da177e4SLinus Torvalds * i_mmap_lock does not stabilize mapcount at all, but mapcount 3871da177e4SLinus Torvalds * is more likely to be accurate if we note it after spinning. 3881da177e4SLinus Torvalds */ 3891da177e4SLinus Torvalds mapcount = page_mapcount(page); 3901da177e4SLinus Torvalds 3911da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 3921da177e4SLinus Torvalds if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) 3931da177e4SLinus Torvalds == (VM_LOCKED|VM_MAYSHARE)) { 3941da177e4SLinus Torvalds referenced++; 3951da177e4SLinus Torvalds break; 3961da177e4SLinus Torvalds } 397f7b7fd8fSRik van Riel referenced += page_referenced_one(page, vma, &mapcount); 3981da177e4SLinus Torvalds if (!mapcount) 3991da177e4SLinus Torvalds break; 4001da177e4SLinus Torvalds } 4011da177e4SLinus Torvalds 4021da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 4031da177e4SLinus Torvalds return referenced; 4041da177e4SLinus Torvalds } 4051da177e4SLinus Torvalds 4061da177e4SLinus Torvalds /** 4071da177e4SLinus Torvalds * page_referenced - test if the page was referenced 4081da177e4SLinus Torvalds * @page: the page to test 4091da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 4101da177e4SLinus Torvalds * 4111da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 4121da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 4131da177e4SLinus Torvalds */ 414f7b7fd8fSRik van Riel int page_referenced(struct page *page, int is_locked) 4151da177e4SLinus Torvalds { 4161da177e4SLinus Torvalds int referenced = 0; 4171da177e4SLinus Torvalds 4181da177e4SLinus Torvalds if (page_test_and_clear_young(page)) 4191da177e4SLinus Torvalds referenced++; 4201da177e4SLinus Torvalds 4211da177e4SLinus Torvalds if (TestClearPageReferenced(page)) 4221da177e4SLinus Torvalds referenced++; 4231da177e4SLinus Torvalds 4241da177e4SLinus Torvalds if (page_mapped(page) && page->mapping) { 4251da177e4SLinus Torvalds if (PageAnon(page)) 426f7b7fd8fSRik van Riel referenced += page_referenced_anon(page); 4271da177e4SLinus Torvalds else if (is_locked) 428f7b7fd8fSRik van Riel referenced += page_referenced_file(page); 4291da177e4SLinus Torvalds else if (TestSetPageLocked(page)) 4301da177e4SLinus Torvalds referenced++; 4311da177e4SLinus Torvalds else { 4321da177e4SLinus Torvalds if (page->mapping) 433f7b7fd8fSRik van Riel referenced += page_referenced_file(page); 4341da177e4SLinus Torvalds unlock_page(page); 4351da177e4SLinus Torvalds } 4361da177e4SLinus Torvalds } 4371da177e4SLinus Torvalds return referenced; 4381da177e4SLinus Torvalds } 4391da177e4SLinus Torvalds 440d08b3851SPeter Zijlstra static int page_mkclean_one(struct page *page, struct vm_area_struct *vma) 441d08b3851SPeter Zijlstra { 442d08b3851SPeter Zijlstra struct mm_struct *mm = vma->vm_mm; 443d08b3851SPeter Zijlstra unsigned long address; 444c2fda5feSPeter Zijlstra pte_t *pte; 445d08b3851SPeter Zijlstra spinlock_t *ptl; 446d08b3851SPeter Zijlstra int ret = 0; 447d08b3851SPeter Zijlstra 448d08b3851SPeter Zijlstra address = vma_address(page, vma); 449d08b3851SPeter Zijlstra if (address == -EFAULT) 450d08b3851SPeter Zijlstra goto out; 451d08b3851SPeter Zijlstra 452d08b3851SPeter Zijlstra pte = page_check_address(page, mm, address, &ptl); 453d08b3851SPeter Zijlstra if (!pte) 454d08b3851SPeter Zijlstra goto out; 455d08b3851SPeter Zijlstra 456c2fda5feSPeter Zijlstra if (pte_dirty(*pte) || pte_write(*pte)) { 457c2fda5feSPeter Zijlstra pte_t entry; 458d08b3851SPeter Zijlstra 459c2fda5feSPeter Zijlstra flush_cache_page(vma, address, pte_pfn(*pte)); 460c2fda5feSPeter Zijlstra entry = ptep_clear_flush(vma, address, pte); 461d08b3851SPeter Zijlstra entry = pte_wrprotect(entry); 462c2fda5feSPeter Zijlstra entry = pte_mkclean(entry); 463d6e88e67SAl Viro set_pte_at(mm, address, pte, entry); 464d08b3851SPeter Zijlstra lazy_mmu_prot_update(entry); 465d08b3851SPeter Zijlstra ret = 1; 466c2fda5feSPeter Zijlstra } 467d08b3851SPeter Zijlstra 468d08b3851SPeter Zijlstra pte_unmap_unlock(pte, ptl); 469d08b3851SPeter Zijlstra out: 470d08b3851SPeter Zijlstra return ret; 471d08b3851SPeter Zijlstra } 472d08b3851SPeter Zijlstra 473d08b3851SPeter Zijlstra static int page_mkclean_file(struct address_space *mapping, struct page *page) 474d08b3851SPeter Zijlstra { 475d08b3851SPeter Zijlstra pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 476d08b3851SPeter Zijlstra struct vm_area_struct *vma; 477d08b3851SPeter Zijlstra struct prio_tree_iter iter; 478d08b3851SPeter Zijlstra int ret = 0; 479d08b3851SPeter Zijlstra 480d08b3851SPeter Zijlstra BUG_ON(PageAnon(page)); 481d08b3851SPeter Zijlstra 482d08b3851SPeter Zijlstra spin_lock(&mapping->i_mmap_lock); 483d08b3851SPeter Zijlstra vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 484d08b3851SPeter Zijlstra if (vma->vm_flags & VM_SHARED) 485d08b3851SPeter Zijlstra ret += page_mkclean_one(page, vma); 486d08b3851SPeter Zijlstra } 487d08b3851SPeter Zijlstra spin_unlock(&mapping->i_mmap_lock); 488d08b3851SPeter Zijlstra return ret; 489d08b3851SPeter Zijlstra } 490d08b3851SPeter Zijlstra 491d08b3851SPeter Zijlstra int page_mkclean(struct page *page) 492d08b3851SPeter Zijlstra { 493d08b3851SPeter Zijlstra int ret = 0; 494d08b3851SPeter Zijlstra 495d08b3851SPeter Zijlstra BUG_ON(!PageLocked(page)); 496d08b3851SPeter Zijlstra 497d08b3851SPeter Zijlstra if (page_mapped(page)) { 498d08b3851SPeter Zijlstra struct address_space *mapping = page_mapping(page); 499d08b3851SPeter Zijlstra if (mapping) 500d08b3851SPeter Zijlstra ret = page_mkclean_file(mapping, page); 501d08b3851SPeter Zijlstra } 502c2fda5feSPeter Zijlstra if (page_test_and_clear_dirty(page)) 503c2fda5feSPeter Zijlstra ret = 1; 504d08b3851SPeter Zijlstra 505d08b3851SPeter Zijlstra return ret; 506d08b3851SPeter Zijlstra } 507d08b3851SPeter Zijlstra 5081da177e4SLinus Torvalds /** 5099617d95eSNick Piggin * page_set_anon_rmap - setup new anonymous rmap 5101da177e4SLinus Torvalds * @page: the page to add the mapping to 5111da177e4SLinus Torvalds * @vma: the vm area in which the mapping is added 5121da177e4SLinus Torvalds * @address: the user virtual address mapped 5131da177e4SLinus Torvalds */ 5149617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 5151da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long address) 5161da177e4SLinus Torvalds { 5172822c1aaSNick Piggin struct anon_vma *anon_vma = vma->anon_vma; 5182822c1aaSNick Piggin 5192822c1aaSNick Piggin BUG_ON(!anon_vma); 5201da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 5212822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 5222822c1aaSNick Piggin 5234d7670e0SNick Piggin page->index = linear_page_index(vma, address); 5242822c1aaSNick Piggin 525a74609faSNick Piggin /* 526a74609faSNick Piggin * nr_mapped state can be updated without turning off 527a74609faSNick Piggin * interrupts because it is not modified via interrupt. 528a74609faSNick Piggin */ 529f3dbd344SChristoph Lameter __inc_zone_page_state(page, NR_ANON_PAGES); 5301da177e4SLinus Torvalds } 5319617d95eSNick Piggin 5329617d95eSNick Piggin /** 5339617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 5349617d95eSNick Piggin * @page: the page to add the mapping to 5359617d95eSNick Piggin * @vma: the vm area in which the mapping is added 5369617d95eSNick Piggin * @address: the user virtual address mapped 5379617d95eSNick Piggin * 5389617d95eSNick Piggin * The caller needs to hold the pte lock. 5399617d95eSNick Piggin */ 5409617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 5419617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 5429617d95eSNick Piggin { 5439617d95eSNick Piggin if (atomic_inc_and_test(&page->_mapcount)) 5449617d95eSNick Piggin __page_set_anon_rmap(page, vma, address); 5451da177e4SLinus Torvalds /* else checking page index and mapping is racy */ 5461da177e4SLinus Torvalds } 5471da177e4SLinus Torvalds 5489617d95eSNick Piggin /* 5499617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 5509617d95eSNick Piggin * @page: the page to add the mapping to 5519617d95eSNick Piggin * @vma: the vm area in which the mapping is added 5529617d95eSNick Piggin * @address: the user virtual address mapped 5539617d95eSNick Piggin * 5549617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 5559617d95eSNick Piggin * This means the inc-and-test can be bypassed. 5569617d95eSNick Piggin */ 5579617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 5589617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 5599617d95eSNick Piggin { 5609617d95eSNick Piggin atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 5619617d95eSNick Piggin __page_set_anon_rmap(page, vma, address); 5629617d95eSNick Piggin } 5639617d95eSNick Piggin 5641da177e4SLinus Torvalds /** 5651da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 5661da177e4SLinus Torvalds * @page: the page to add the mapping to 5671da177e4SLinus Torvalds * 568b8072f09SHugh Dickins * The caller needs to hold the pte lock. 5691da177e4SLinus Torvalds */ 5701da177e4SLinus Torvalds void page_add_file_rmap(struct page *page) 5711da177e4SLinus Torvalds { 5721da177e4SLinus Torvalds if (atomic_inc_and_test(&page->_mapcount)) 57365ba55f5SChristoph Lameter __inc_zone_page_state(page, NR_FILE_MAPPED); 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds 5761da177e4SLinus Torvalds /** 5771da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 5781da177e4SLinus Torvalds * @page: page to remove mapping from 5791da177e4SLinus Torvalds * 580b8072f09SHugh Dickins * The caller needs to hold the pte lock. 5811da177e4SLinus Torvalds */ 5827de6b805SNick Piggin void page_remove_rmap(struct page *page, struct vm_area_struct *vma) 5831da177e4SLinus Torvalds { 5841da177e4SLinus Torvalds if (atomic_add_negative(-1, &page->_mapcount)) { 585b7ab795bSNick Piggin if (unlikely(page_mapcount(page) < 0)) { 586ef2bf0dcSDave Jones printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); 5877de6b805SNick Piggin printk (KERN_EMERG " page pfn = %lx\n", page_to_pfn(page)); 588ef2bf0dcSDave Jones printk (KERN_EMERG " page->flags = %lx\n", page->flags); 589ef2bf0dcSDave Jones printk (KERN_EMERG " page->count = %x\n", page_count(page)); 590ef2bf0dcSDave Jones printk (KERN_EMERG " page->mapping = %p\n", page->mapping); 5917de6b805SNick Piggin print_symbol (KERN_EMERG " vma->vm_ops = %s\n", (unsigned long)vma->vm_ops); 5927de6b805SNick Piggin if (vma->vm_ops) 5937de6b805SNick Piggin print_symbol (KERN_EMERG " vma->vm_ops->nopage = %s\n", (unsigned long)vma->vm_ops->nopage); 5947de6b805SNick Piggin if (vma->vm_file && vma->vm_file->f_op) 5957de6b805SNick Piggin print_symbol (KERN_EMERG " vma->vm_file->f_op->mmap = %s\n", (unsigned long)vma->vm_file->f_op->mmap); 596b16bc64dSDave Jones BUG(); 597ef2bf0dcSDave Jones } 598b16bc64dSDave Jones 5991da177e4SLinus Torvalds /* 6001da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 6011da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 6021da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 6031da177e4SLinus Torvalds * before us: so leave the reset to free_hot_cold_page, 6041da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 6051da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 6061da177e4SLinus Torvalds * faster for those pages still in swapcache. 6071da177e4SLinus Torvalds */ 6081da177e4SLinus Torvalds if (page_test_and_clear_dirty(page)) 6091da177e4SLinus Torvalds set_page_dirty(page); 610f3dbd344SChristoph Lameter __dec_zone_page_state(page, 611f3dbd344SChristoph Lameter PageAnon(page) ? NR_ANON_PAGES : NR_FILE_MAPPED); 6121da177e4SLinus Torvalds } 6131da177e4SLinus Torvalds } 6141da177e4SLinus Torvalds 6151da177e4SLinus Torvalds /* 6161da177e4SLinus Torvalds * Subfunctions of try_to_unmap: try_to_unmap_one called 6171da177e4SLinus Torvalds * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 6181da177e4SLinus Torvalds */ 619a48d07afSChristoph Lameter static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 6207352349aSChristoph Lameter int migration) 6211da177e4SLinus Torvalds { 6221da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 6231da177e4SLinus Torvalds unsigned long address; 6241da177e4SLinus Torvalds pte_t *pte; 6251da177e4SLinus Torvalds pte_t pteval; 626c0718806SHugh Dickins spinlock_t *ptl; 6271da177e4SLinus Torvalds int ret = SWAP_AGAIN; 6281da177e4SLinus Torvalds 6291da177e4SLinus Torvalds address = vma_address(page, vma); 6301da177e4SLinus Torvalds if (address == -EFAULT) 6311da177e4SLinus Torvalds goto out; 6321da177e4SLinus Torvalds 633c0718806SHugh Dickins pte = page_check_address(page, mm, address, &ptl); 634c0718806SHugh Dickins if (!pte) 63581b4082dSNikita Danilov goto out; 6361da177e4SLinus Torvalds 6371da177e4SLinus Torvalds /* 6381da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 6391da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 6401da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 6411da177e4SLinus Torvalds */ 642e6a1530dSChristoph Lameter if (!migration && ((vma->vm_flags & VM_LOCKED) || 643e6a1530dSChristoph Lameter (ptep_clear_flush_young(vma, address, pte)))) { 6441da177e4SLinus Torvalds ret = SWAP_FAIL; 6451da177e4SLinus Torvalds goto out_unmap; 6461da177e4SLinus Torvalds } 6471da177e4SLinus Torvalds 6481da177e4SLinus Torvalds /* Nuke the page table entry. */ 6491da177e4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 6501da177e4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pte); 6511da177e4SLinus Torvalds 6521da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 6531da177e4SLinus Torvalds if (pte_dirty(pteval)) 6541da177e4SLinus Torvalds set_page_dirty(page); 6551da177e4SLinus Torvalds 656365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 657365e9c87SHugh Dickins update_hiwater_rss(mm); 658365e9c87SHugh Dickins 6591da177e4SLinus Torvalds if (PageAnon(page)) { 6604c21e2f2SHugh Dickins swp_entry_t entry = { .val = page_private(page) }; 6610697212aSChristoph Lameter 6620697212aSChristoph Lameter if (PageSwapCache(page)) { 6631da177e4SLinus Torvalds /* 6641da177e4SLinus Torvalds * Store the swap location in the pte. 6651da177e4SLinus Torvalds * See handle_pte_fault() ... 6661da177e4SLinus Torvalds */ 6671da177e4SLinus Torvalds swap_duplicate(entry); 6681da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 6691da177e4SLinus Torvalds spin_lock(&mmlist_lock); 670f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 6711da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 6721da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 6731da177e4SLinus Torvalds } 674442c9137SChristoph Lameter dec_mm_counter(mm, anon_rss); 67504e62a29SChristoph Lameter #ifdef CONFIG_MIGRATION 6760697212aSChristoph Lameter } else { 6770697212aSChristoph Lameter /* 6780697212aSChristoph Lameter * Store the pfn of the page in a special migration 6790697212aSChristoph Lameter * pte. do_swap_page() will wait until the migration 6800697212aSChristoph Lameter * pte is removed and then restart fault handling. 6810697212aSChristoph Lameter */ 6820697212aSChristoph Lameter BUG_ON(!migration); 6830697212aSChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 68404e62a29SChristoph Lameter #endif 6850697212aSChristoph Lameter } 6861da177e4SLinus Torvalds set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 6871da177e4SLinus Torvalds BUG_ON(pte_file(*pte)); 6884294621fSHugh Dickins } else 68904e62a29SChristoph Lameter #ifdef CONFIG_MIGRATION 69004e62a29SChristoph Lameter if (migration) { 69104e62a29SChristoph Lameter /* Establish migration entry for a file page */ 69204e62a29SChristoph Lameter swp_entry_t entry; 69304e62a29SChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 69404e62a29SChristoph Lameter set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 69504e62a29SChristoph Lameter } else 69604e62a29SChristoph Lameter #endif 6974294621fSHugh Dickins dec_mm_counter(mm, file_rss); 6981da177e4SLinus Torvalds 69904e62a29SChristoph Lameter 7007de6b805SNick Piggin page_remove_rmap(page, vma); 7011da177e4SLinus Torvalds page_cache_release(page); 7021da177e4SLinus Torvalds 7031da177e4SLinus Torvalds out_unmap: 704c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 7051da177e4SLinus Torvalds out: 7061da177e4SLinus Torvalds return ret; 7071da177e4SLinus Torvalds } 7081da177e4SLinus Torvalds 7091da177e4SLinus Torvalds /* 7101da177e4SLinus Torvalds * objrmap doesn't work for nonlinear VMAs because the assumption that 7111da177e4SLinus Torvalds * offset-into-file correlates with offset-into-virtual-addresses does not hold. 7121da177e4SLinus Torvalds * Consequently, given a particular page and its ->index, we cannot locate the 7131da177e4SLinus Torvalds * ptes which are mapping that page without an exhaustive linear search. 7141da177e4SLinus Torvalds * 7151da177e4SLinus Torvalds * So what this code does is a mini "virtual scan" of each nonlinear VMA which 7161da177e4SLinus Torvalds * maps the file to which the target page belongs. The ->vm_private_data field 7171da177e4SLinus Torvalds * holds the current cursor into that scan. Successive searches will circulate 7181da177e4SLinus Torvalds * around the vma's virtual address space. 7191da177e4SLinus Torvalds * 7201da177e4SLinus Torvalds * So as more replacement pressure is applied to the pages in a nonlinear VMA, 7211da177e4SLinus Torvalds * more scanning pressure is placed against them as well. Eventually pages 7221da177e4SLinus Torvalds * will become fully unmapped and are eligible for eviction. 7231da177e4SLinus Torvalds * 7241da177e4SLinus Torvalds * For very sparsely populated VMAs this is a little inefficient - chances are 7251da177e4SLinus Torvalds * there there won't be many ptes located within the scan cluster. In this case 7261da177e4SLinus Torvalds * maybe we could scan further - to the end of the pte page, perhaps. 7271da177e4SLinus Torvalds */ 7281da177e4SLinus Torvalds #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 7291da177e4SLinus Torvalds #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 7301da177e4SLinus Torvalds 7311da177e4SLinus Torvalds static void try_to_unmap_cluster(unsigned long cursor, 7321da177e4SLinus Torvalds unsigned int *mapcount, struct vm_area_struct *vma) 7331da177e4SLinus Torvalds { 7341da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 7351da177e4SLinus Torvalds pgd_t *pgd; 7361da177e4SLinus Torvalds pud_t *pud; 7371da177e4SLinus Torvalds pmd_t *pmd; 738c0718806SHugh Dickins pte_t *pte; 7391da177e4SLinus Torvalds pte_t pteval; 740c0718806SHugh Dickins spinlock_t *ptl; 7411da177e4SLinus Torvalds struct page *page; 7421da177e4SLinus Torvalds unsigned long address; 7431da177e4SLinus Torvalds unsigned long end; 7441da177e4SLinus Torvalds 7451da177e4SLinus Torvalds address = (vma->vm_start + cursor) & CLUSTER_MASK; 7461da177e4SLinus Torvalds end = address + CLUSTER_SIZE; 7471da177e4SLinus Torvalds if (address < vma->vm_start) 7481da177e4SLinus Torvalds address = vma->vm_start; 7491da177e4SLinus Torvalds if (end > vma->vm_end) 7501da177e4SLinus Torvalds end = vma->vm_end; 7511da177e4SLinus Torvalds 7521da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 7531da177e4SLinus Torvalds if (!pgd_present(*pgd)) 754c0718806SHugh Dickins return; 7551da177e4SLinus Torvalds 7561da177e4SLinus Torvalds pud = pud_offset(pgd, address); 7571da177e4SLinus Torvalds if (!pud_present(*pud)) 758c0718806SHugh Dickins return; 7591da177e4SLinus Torvalds 7601da177e4SLinus Torvalds pmd = pmd_offset(pud, address); 7611da177e4SLinus Torvalds if (!pmd_present(*pmd)) 762c0718806SHugh Dickins return; 763c0718806SHugh Dickins 764c0718806SHugh Dickins pte = pte_offset_map_lock(mm, pmd, address, &ptl); 7651da177e4SLinus Torvalds 766365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 767365e9c87SHugh Dickins update_hiwater_rss(mm); 768365e9c87SHugh Dickins 769c0718806SHugh Dickins for (; address < end; pte++, address += PAGE_SIZE) { 7701da177e4SLinus Torvalds if (!pte_present(*pte)) 7711da177e4SLinus Torvalds continue; 7726aab341eSLinus Torvalds page = vm_normal_page(vma, address, *pte); 7736aab341eSLinus Torvalds BUG_ON(!page || PageAnon(page)); 7741da177e4SLinus Torvalds 7751da177e4SLinus Torvalds if (ptep_clear_flush_young(vma, address, pte)) 7761da177e4SLinus Torvalds continue; 7771da177e4SLinus Torvalds 7781da177e4SLinus Torvalds /* Nuke the page table entry. */ 779eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(*pte)); 7801da177e4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pte); 7811da177e4SLinus Torvalds 7821da177e4SLinus Torvalds /* If nonlinear, store the file page offset in the pte. */ 7831da177e4SLinus Torvalds if (page->index != linear_page_index(vma, address)) 7841da177e4SLinus Torvalds set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 7851da177e4SLinus Torvalds 7861da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 7871da177e4SLinus Torvalds if (pte_dirty(pteval)) 7881da177e4SLinus Torvalds set_page_dirty(page); 7891da177e4SLinus Torvalds 7907de6b805SNick Piggin page_remove_rmap(page, vma); 7911da177e4SLinus Torvalds page_cache_release(page); 7924294621fSHugh Dickins dec_mm_counter(mm, file_rss); 7931da177e4SLinus Torvalds (*mapcount)--; 7941da177e4SLinus Torvalds } 795c0718806SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 7961da177e4SLinus Torvalds } 7971da177e4SLinus Torvalds 7987352349aSChristoph Lameter static int try_to_unmap_anon(struct page *page, int migration) 7991da177e4SLinus Torvalds { 8001da177e4SLinus Torvalds struct anon_vma *anon_vma; 8011da177e4SLinus Torvalds struct vm_area_struct *vma; 8021da177e4SLinus Torvalds int ret = SWAP_AGAIN; 8031da177e4SLinus Torvalds 8041da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 8051da177e4SLinus Torvalds if (!anon_vma) 8061da177e4SLinus Torvalds return ret; 8071da177e4SLinus Torvalds 8081da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 8097352349aSChristoph Lameter ret = try_to_unmap_one(page, vma, migration); 8101da177e4SLinus Torvalds if (ret == SWAP_FAIL || !page_mapped(page)) 8111da177e4SLinus Torvalds break; 8121da177e4SLinus Torvalds } 813*34bbd704SOleg Nesterov 814*34bbd704SOleg Nesterov page_unlock_anon_vma(anon_vma); 8151da177e4SLinus Torvalds return ret; 8161da177e4SLinus Torvalds } 8171da177e4SLinus Torvalds 8181da177e4SLinus Torvalds /** 8191da177e4SLinus Torvalds * try_to_unmap_file - unmap file page using the object-based rmap method 8201da177e4SLinus Torvalds * @page: the page to unmap 8211da177e4SLinus Torvalds * 8221da177e4SLinus Torvalds * Find all the mappings of a page using the mapping pointer and the vma chains 8231da177e4SLinus Torvalds * contained in the address_space struct it points to. 8241da177e4SLinus Torvalds * 8251da177e4SLinus Torvalds * This function is only called from try_to_unmap for object-based pages. 8261da177e4SLinus Torvalds */ 8277352349aSChristoph Lameter static int try_to_unmap_file(struct page *page, int migration) 8281da177e4SLinus Torvalds { 8291da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 8301da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 8311da177e4SLinus Torvalds struct vm_area_struct *vma; 8321da177e4SLinus Torvalds struct prio_tree_iter iter; 8331da177e4SLinus Torvalds int ret = SWAP_AGAIN; 8341da177e4SLinus Torvalds unsigned long cursor; 8351da177e4SLinus Torvalds unsigned long max_nl_cursor = 0; 8361da177e4SLinus Torvalds unsigned long max_nl_size = 0; 8371da177e4SLinus Torvalds unsigned int mapcount; 8381da177e4SLinus Torvalds 8391da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 8401da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 8417352349aSChristoph Lameter ret = try_to_unmap_one(page, vma, migration); 8421da177e4SLinus Torvalds if (ret == SWAP_FAIL || !page_mapped(page)) 8431da177e4SLinus Torvalds goto out; 8441da177e4SLinus Torvalds } 8451da177e4SLinus Torvalds 8461da177e4SLinus Torvalds if (list_empty(&mapping->i_mmap_nonlinear)) 8471da177e4SLinus Torvalds goto out; 8481da177e4SLinus Torvalds 8491da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 8501da177e4SLinus Torvalds shared.vm_set.list) { 851e6a1530dSChristoph Lameter if ((vma->vm_flags & VM_LOCKED) && !migration) 8521da177e4SLinus Torvalds continue; 8531da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 8541da177e4SLinus Torvalds if (cursor > max_nl_cursor) 8551da177e4SLinus Torvalds max_nl_cursor = cursor; 8561da177e4SLinus Torvalds cursor = vma->vm_end - vma->vm_start; 8571da177e4SLinus Torvalds if (cursor > max_nl_size) 8581da177e4SLinus Torvalds max_nl_size = cursor; 8591da177e4SLinus Torvalds } 8601da177e4SLinus Torvalds 8611da177e4SLinus Torvalds if (max_nl_size == 0) { /* any nonlinears locked or reserved */ 8621da177e4SLinus Torvalds ret = SWAP_FAIL; 8631da177e4SLinus Torvalds goto out; 8641da177e4SLinus Torvalds } 8651da177e4SLinus Torvalds 8661da177e4SLinus Torvalds /* 8671da177e4SLinus Torvalds * We don't try to search for this page in the nonlinear vmas, 8681da177e4SLinus Torvalds * and page_referenced wouldn't have found it anyway. Instead 8691da177e4SLinus Torvalds * just walk the nonlinear vmas trying to age and unmap some. 8701da177e4SLinus Torvalds * The mapcount of the page we came in with is irrelevant, 8711da177e4SLinus Torvalds * but even so use it as a guide to how hard we should try? 8721da177e4SLinus Torvalds */ 8731da177e4SLinus Torvalds mapcount = page_mapcount(page); 8741da177e4SLinus Torvalds if (!mapcount) 8751da177e4SLinus Torvalds goto out; 8761da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 8771da177e4SLinus Torvalds 8781da177e4SLinus Torvalds max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 8791da177e4SLinus Torvalds if (max_nl_cursor == 0) 8801da177e4SLinus Torvalds max_nl_cursor = CLUSTER_SIZE; 8811da177e4SLinus Torvalds 8821da177e4SLinus Torvalds do { 8831da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 8841da177e4SLinus Torvalds shared.vm_set.list) { 885e6a1530dSChristoph Lameter if ((vma->vm_flags & VM_LOCKED) && !migration) 8861da177e4SLinus Torvalds continue; 8871da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 888839b9685SHugh Dickins while ( cursor < max_nl_cursor && 8891da177e4SLinus Torvalds cursor < vma->vm_end - vma->vm_start) { 8901da177e4SLinus Torvalds try_to_unmap_cluster(cursor, &mapcount, vma); 8911da177e4SLinus Torvalds cursor += CLUSTER_SIZE; 8921da177e4SLinus Torvalds vma->vm_private_data = (void *) cursor; 8931da177e4SLinus Torvalds if ((int)mapcount <= 0) 8941da177e4SLinus Torvalds goto out; 8951da177e4SLinus Torvalds } 8961da177e4SLinus Torvalds vma->vm_private_data = (void *) max_nl_cursor; 8971da177e4SLinus Torvalds } 8981da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 8991da177e4SLinus Torvalds max_nl_cursor += CLUSTER_SIZE; 9001da177e4SLinus Torvalds } while (max_nl_cursor <= max_nl_size); 9011da177e4SLinus Torvalds 9021da177e4SLinus Torvalds /* 9031da177e4SLinus Torvalds * Don't loop forever (perhaps all the remaining pages are 9041da177e4SLinus Torvalds * in locked vmas). Reset cursor on all unreserved nonlinear 9051da177e4SLinus Torvalds * vmas, now forgetting on which ones it had fallen behind. 9061da177e4SLinus Torvalds */ 907101d2be7SHugh Dickins list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 9081da177e4SLinus Torvalds vma->vm_private_data = NULL; 9091da177e4SLinus Torvalds out: 9101da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 9111da177e4SLinus Torvalds return ret; 9121da177e4SLinus Torvalds } 9131da177e4SLinus Torvalds 9141da177e4SLinus Torvalds /** 9151da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 9161da177e4SLinus Torvalds * @page: the page to get unmapped 9171da177e4SLinus Torvalds * 9181da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 9191da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 9201da177e4SLinus Torvalds * Return values are: 9211da177e4SLinus Torvalds * 9221da177e4SLinus Torvalds * SWAP_SUCCESS - we succeeded in removing all mappings 9231da177e4SLinus Torvalds * SWAP_AGAIN - we missed a mapping, try again later 9241da177e4SLinus Torvalds * SWAP_FAIL - the page is unswappable 9251da177e4SLinus Torvalds */ 9267352349aSChristoph Lameter int try_to_unmap(struct page *page, int migration) 9271da177e4SLinus Torvalds { 9281da177e4SLinus Torvalds int ret; 9291da177e4SLinus Torvalds 9301da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 9311da177e4SLinus Torvalds 9321da177e4SLinus Torvalds if (PageAnon(page)) 9337352349aSChristoph Lameter ret = try_to_unmap_anon(page, migration); 9341da177e4SLinus Torvalds else 9357352349aSChristoph Lameter ret = try_to_unmap_file(page, migration); 9361da177e4SLinus Torvalds 9371da177e4SLinus Torvalds if (!page_mapped(page)) 9381da177e4SLinus Torvalds ret = SWAP_SUCCESS; 9391da177e4SLinus Torvalds return ret; 9401da177e4SLinus Torvalds } 94181b4082dSNikita Danilov 942