11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * mm/rmap.c - physical to virtual reverse mappings 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright 2001, Rik van Riel <riel@conectiva.com.br> 51da177e4SLinus Torvalds * Released under the General Public License (GPL). 61da177e4SLinus Torvalds * 71da177e4SLinus Torvalds * Simple, low overhead reverse mapping scheme. 81da177e4SLinus Torvalds * Please try to keep this thing as modular as possible. 91da177e4SLinus Torvalds * 101da177e4SLinus Torvalds * Provides methods for unmapping each kind of mapped page: 111da177e4SLinus Torvalds * the anon methods track anonymous pages, and 121da177e4SLinus Torvalds * the file methods track pages belonging to an inode. 131da177e4SLinus Torvalds * 141da177e4SLinus Torvalds * Original design by Rik van Riel <riel@conectiva.com.br> 2001 151da177e4SLinus Torvalds * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004 161da177e4SLinus Torvalds * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004 171da177e4SLinus Torvalds * Contributions by Hugh Dickins <hugh@veritas.com> 2003, 2004 181da177e4SLinus Torvalds */ 191da177e4SLinus Torvalds 201da177e4SLinus Torvalds /* 211da177e4SLinus Torvalds * Lock ordering in mm: 221da177e4SLinus Torvalds * 231b1dcc1bSJes Sorensen * inode->i_mutex (while writing or truncating, not reading or faulting) 241da177e4SLinus Torvalds * inode->i_alloc_sem 251da177e4SLinus Torvalds * 261da177e4SLinus Torvalds * When a page fault occurs in writing from user to file, down_read 271b1dcc1bSJes Sorensen * of mmap_sem nests within i_mutex; in sys_msync, i_mutex nests within 281b1dcc1bSJes Sorensen * down_read of mmap_sem; i_mutex and down_write of mmap_sem are never 291b1dcc1bSJes Sorensen * taken together; in truncation, i_mutex is taken outermost. 301da177e4SLinus Torvalds * 311da177e4SLinus Torvalds * mm->mmap_sem 321da177e4SLinus Torvalds * page->flags PG_locked (lock_page) 331da177e4SLinus Torvalds * mapping->i_mmap_lock 341da177e4SLinus Torvalds * anon_vma->lock 35b8072f09SHugh Dickins * mm->page_table_lock or pte_lock 36053837fcSNick Piggin * zone->lru_lock (in mark_page_accessed, isolate_lru_page) 375d337b91SHugh Dickins * swap_lock (in swap_duplicate, swap_info_get) 381da177e4SLinus Torvalds * mmlist_lock (in mmput, drain_mmlist and others) 391da177e4SLinus Torvalds * mapping->private_lock (in __set_page_dirty_buffers) 401da177e4SLinus Torvalds * inode_lock (in set_page_dirty's __mark_inode_dirty) 411da177e4SLinus Torvalds * sb_lock (within inode_lock in fs/fs-writeback.c) 421da177e4SLinus Torvalds * mapping->tree_lock (widely used, in set_page_dirty, 431da177e4SLinus Torvalds * in arch-dependent flush_dcache_mmap_lock, 441da177e4SLinus Torvalds * within inode_lock in __sync_single_inode) 451da177e4SLinus Torvalds */ 461da177e4SLinus Torvalds 471da177e4SLinus Torvalds #include <linux/mm.h> 481da177e4SLinus Torvalds #include <linux/pagemap.h> 491da177e4SLinus Torvalds #include <linux/swap.h> 501da177e4SLinus Torvalds #include <linux/swapops.h> 511da177e4SLinus Torvalds #include <linux/slab.h> 521da177e4SLinus Torvalds #include <linux/init.h> 531da177e4SLinus Torvalds #include <linux/rmap.h> 541da177e4SLinus Torvalds #include <linux/rcupdate.h> 55a48d07afSChristoph Lameter #include <linux/module.h> 561da177e4SLinus Torvalds 571da177e4SLinus Torvalds #include <asm/tlbflush.h> 581da177e4SLinus Torvalds 59fcc234f8SPekka Enberg struct kmem_cache *anon_vma_cachep; 601da177e4SLinus Torvalds 611da177e4SLinus Torvalds static inline void validate_anon_vma(struct vm_area_struct *find_vma) 621da177e4SLinus Torvalds { 63b7ab795bSNick Piggin #ifdef CONFIG_DEBUG_VM 641da177e4SLinus Torvalds struct anon_vma *anon_vma = find_vma->anon_vma; 651da177e4SLinus Torvalds struct vm_area_struct *vma; 661da177e4SLinus Torvalds unsigned int mapcount = 0; 671da177e4SLinus Torvalds int found = 0; 681da177e4SLinus Torvalds 691da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 701da177e4SLinus Torvalds mapcount++; 711da177e4SLinus Torvalds BUG_ON(mapcount > 100000); 721da177e4SLinus Torvalds if (vma == find_vma) 731da177e4SLinus Torvalds found = 1; 741da177e4SLinus Torvalds } 751da177e4SLinus Torvalds BUG_ON(!found); 761da177e4SLinus Torvalds #endif 771da177e4SLinus Torvalds } 781da177e4SLinus Torvalds 791da177e4SLinus Torvalds /* This must be called under the mmap_sem. */ 801da177e4SLinus Torvalds int anon_vma_prepare(struct vm_area_struct *vma) 811da177e4SLinus Torvalds { 821da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 831da177e4SLinus Torvalds 841da177e4SLinus Torvalds might_sleep(); 851da177e4SLinus Torvalds if (unlikely(!anon_vma)) { 861da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 871da177e4SLinus Torvalds struct anon_vma *allocated, *locked; 881da177e4SLinus Torvalds 891da177e4SLinus Torvalds anon_vma = find_mergeable_anon_vma(vma); 901da177e4SLinus Torvalds if (anon_vma) { 911da177e4SLinus Torvalds allocated = NULL; 921da177e4SLinus Torvalds locked = anon_vma; 931da177e4SLinus Torvalds spin_lock(&locked->lock); 941da177e4SLinus Torvalds } else { 951da177e4SLinus Torvalds anon_vma = anon_vma_alloc(); 961da177e4SLinus Torvalds if (unlikely(!anon_vma)) 971da177e4SLinus Torvalds return -ENOMEM; 981da177e4SLinus Torvalds allocated = anon_vma; 991da177e4SLinus Torvalds locked = NULL; 1001da177e4SLinus Torvalds } 1011da177e4SLinus Torvalds 1021da177e4SLinus Torvalds /* page_table_lock to protect against threads */ 1031da177e4SLinus Torvalds spin_lock(&mm->page_table_lock); 1041da177e4SLinus Torvalds if (likely(!vma->anon_vma)) { 1051da177e4SLinus Torvalds vma->anon_vma = anon_vma; 106*0697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1071da177e4SLinus Torvalds allocated = NULL; 1081da177e4SLinus Torvalds } 1091da177e4SLinus Torvalds spin_unlock(&mm->page_table_lock); 1101da177e4SLinus Torvalds 1111da177e4SLinus Torvalds if (locked) 1121da177e4SLinus Torvalds spin_unlock(&locked->lock); 1131da177e4SLinus Torvalds if (unlikely(allocated)) 1141da177e4SLinus Torvalds anon_vma_free(allocated); 1151da177e4SLinus Torvalds } 1161da177e4SLinus Torvalds return 0; 1171da177e4SLinus Torvalds } 1181da177e4SLinus Torvalds 1191da177e4SLinus Torvalds void __anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) 1201da177e4SLinus Torvalds { 1211da177e4SLinus Torvalds BUG_ON(vma->anon_vma != next->anon_vma); 1221da177e4SLinus Torvalds list_del(&next->anon_vma_node); 1231da177e4SLinus Torvalds } 1241da177e4SLinus Torvalds 1251da177e4SLinus Torvalds void __anon_vma_link(struct vm_area_struct *vma) 1261da177e4SLinus Torvalds { 1271da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1281da177e4SLinus Torvalds 1291da177e4SLinus Torvalds if (anon_vma) { 130*0697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1311da177e4SLinus Torvalds validate_anon_vma(vma); 1321da177e4SLinus Torvalds } 1331da177e4SLinus Torvalds } 1341da177e4SLinus Torvalds 1351da177e4SLinus Torvalds void anon_vma_link(struct vm_area_struct *vma) 1361da177e4SLinus Torvalds { 1371da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1381da177e4SLinus Torvalds 1391da177e4SLinus Torvalds if (anon_vma) { 1401da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 141*0697212aSChristoph Lameter list_add_tail(&vma->anon_vma_node, &anon_vma->head); 1421da177e4SLinus Torvalds validate_anon_vma(vma); 1431da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1441da177e4SLinus Torvalds } 1451da177e4SLinus Torvalds } 1461da177e4SLinus Torvalds 1471da177e4SLinus Torvalds void anon_vma_unlink(struct vm_area_struct *vma) 1481da177e4SLinus Torvalds { 1491da177e4SLinus Torvalds struct anon_vma *anon_vma = vma->anon_vma; 1501da177e4SLinus Torvalds int empty; 1511da177e4SLinus Torvalds 1521da177e4SLinus Torvalds if (!anon_vma) 1531da177e4SLinus Torvalds return; 1541da177e4SLinus Torvalds 1551da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 1561da177e4SLinus Torvalds validate_anon_vma(vma); 1571da177e4SLinus Torvalds list_del(&vma->anon_vma_node); 1581da177e4SLinus Torvalds 1591da177e4SLinus Torvalds /* We must garbage collect the anon_vma if it's empty */ 1601da177e4SLinus Torvalds empty = list_empty(&anon_vma->head); 1611da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 1621da177e4SLinus Torvalds 1631da177e4SLinus Torvalds if (empty) 1641da177e4SLinus Torvalds anon_vma_free(anon_vma); 1651da177e4SLinus Torvalds } 1661da177e4SLinus Torvalds 167fcc234f8SPekka Enberg static void anon_vma_ctor(void *data, struct kmem_cache *cachep, 168fcc234f8SPekka Enberg unsigned long flags) 1691da177e4SLinus Torvalds { 1701da177e4SLinus Torvalds if ((flags & (SLAB_CTOR_VERIFY|SLAB_CTOR_CONSTRUCTOR)) == 1711da177e4SLinus Torvalds SLAB_CTOR_CONSTRUCTOR) { 1721da177e4SLinus Torvalds struct anon_vma *anon_vma = data; 1731da177e4SLinus Torvalds 1741da177e4SLinus Torvalds spin_lock_init(&anon_vma->lock); 1751da177e4SLinus Torvalds INIT_LIST_HEAD(&anon_vma->head); 1761da177e4SLinus Torvalds } 1771da177e4SLinus Torvalds } 1781da177e4SLinus Torvalds 1791da177e4SLinus Torvalds void __init anon_vma_init(void) 1801da177e4SLinus Torvalds { 1811da177e4SLinus Torvalds anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma), 1821da177e4SLinus Torvalds 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor, NULL); 1831da177e4SLinus Torvalds } 1841da177e4SLinus Torvalds 1851da177e4SLinus Torvalds /* 1861da177e4SLinus Torvalds * Getting a lock on a stable anon_vma from a page off the LRU is 1871da177e4SLinus Torvalds * tricky: page_lock_anon_vma rely on RCU to guard against the races. 1881da177e4SLinus Torvalds */ 1891da177e4SLinus Torvalds static struct anon_vma *page_lock_anon_vma(struct page *page) 1901da177e4SLinus Torvalds { 1911da177e4SLinus Torvalds struct anon_vma *anon_vma = NULL; 1921da177e4SLinus Torvalds unsigned long anon_mapping; 1931da177e4SLinus Torvalds 1941da177e4SLinus Torvalds rcu_read_lock(); 1951da177e4SLinus Torvalds anon_mapping = (unsigned long) page->mapping; 1961da177e4SLinus Torvalds if (!(anon_mapping & PAGE_MAPPING_ANON)) 1971da177e4SLinus Torvalds goto out; 1981da177e4SLinus Torvalds if (!page_mapped(page)) 1991da177e4SLinus Torvalds goto out; 2001da177e4SLinus Torvalds 2011da177e4SLinus Torvalds anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON); 2021da177e4SLinus Torvalds spin_lock(&anon_vma->lock); 2031da177e4SLinus Torvalds out: 2041da177e4SLinus Torvalds rcu_read_unlock(); 2051da177e4SLinus Torvalds return anon_vma; 2061da177e4SLinus Torvalds } 2071da177e4SLinus Torvalds 208a3351e52SChristoph Lameter #ifdef CONFIG_MIGRATION 209a3351e52SChristoph Lameter /* 210a3351e52SChristoph Lameter * Remove an anonymous page from swap replacing the swap pte's 211a3351e52SChristoph Lameter * through real pte's pointing to valid pages and then releasing 212a3351e52SChristoph Lameter * the page from the swap cache. 213a3351e52SChristoph Lameter * 214e8788c0cSChristoph Lameter * Must hold page lock on page and mmap_sem of one vma that contains 215e8788c0cSChristoph Lameter * the page. 216a3351e52SChristoph Lameter */ 217a3351e52SChristoph Lameter void remove_from_swap(struct page *page) 218a3351e52SChristoph Lameter { 219a3351e52SChristoph Lameter struct anon_vma *anon_vma; 220a3351e52SChristoph Lameter struct vm_area_struct *vma; 221e8788c0cSChristoph Lameter unsigned long mapping; 222a3351e52SChristoph Lameter 223e8788c0cSChristoph Lameter if (!PageSwapCache(page)) 224a3351e52SChristoph Lameter return; 225a3351e52SChristoph Lameter 226e8788c0cSChristoph Lameter mapping = (unsigned long)page->mapping; 227e8788c0cSChristoph Lameter 228e8788c0cSChristoph Lameter if (!mapping || (mapping & PAGE_MAPPING_ANON) == 0) 229a3351e52SChristoph Lameter return; 230a3351e52SChristoph Lameter 231e8788c0cSChristoph Lameter /* 232e8788c0cSChristoph Lameter * We hold the mmap_sem lock. So no need to call page_lock_anon_vma. 233e8788c0cSChristoph Lameter */ 234e8788c0cSChristoph Lameter anon_vma = (struct anon_vma *) (mapping - PAGE_MAPPING_ANON); 235e8788c0cSChristoph Lameter spin_lock(&anon_vma->lock); 236e8788c0cSChristoph Lameter 237a3351e52SChristoph Lameter list_for_each_entry(vma, &anon_vma->head, anon_vma_node) 238a3351e52SChristoph Lameter remove_vma_swap(vma, page); 239a3351e52SChristoph Lameter 240a3351e52SChristoph Lameter spin_unlock(&anon_vma->lock); 241a3351e52SChristoph Lameter delete_from_swap_cache(page); 242a3351e52SChristoph Lameter } 243e965f963SChristoph Lameter EXPORT_SYMBOL(remove_from_swap); 244a3351e52SChristoph Lameter #endif 245a3351e52SChristoph Lameter 2461da177e4SLinus Torvalds /* 2471da177e4SLinus Torvalds * At what user virtual address is page expected in vma? 2481da177e4SLinus Torvalds */ 2491da177e4SLinus Torvalds static inline unsigned long 2501da177e4SLinus Torvalds vma_address(struct page *page, struct vm_area_struct *vma) 2511da177e4SLinus Torvalds { 2521da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 2531da177e4SLinus Torvalds unsigned long address; 2541da177e4SLinus Torvalds 2551da177e4SLinus Torvalds address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT); 2561da177e4SLinus Torvalds if (unlikely(address < vma->vm_start || address >= vma->vm_end)) { 2571da177e4SLinus Torvalds /* page should be within any vma from prio_tree_next */ 2581da177e4SLinus Torvalds BUG_ON(!PageAnon(page)); 2591da177e4SLinus Torvalds return -EFAULT; 2601da177e4SLinus Torvalds } 2611da177e4SLinus Torvalds return address; 2621da177e4SLinus Torvalds } 2631da177e4SLinus Torvalds 2641da177e4SLinus Torvalds /* 2651da177e4SLinus Torvalds * At what user virtual address is page expected in vma? checking that the 266ee498ed7SHugh Dickins * page matches the vma: currently only used on anon pages, by unuse_vma; 2671da177e4SLinus Torvalds */ 2681da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) 2691da177e4SLinus Torvalds { 2701da177e4SLinus Torvalds if (PageAnon(page)) { 2711da177e4SLinus Torvalds if ((void *)vma->anon_vma != 2721da177e4SLinus Torvalds (void *)page->mapping - PAGE_MAPPING_ANON) 2731da177e4SLinus Torvalds return -EFAULT; 2741da177e4SLinus Torvalds } else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) { 275ee498ed7SHugh Dickins if (!vma->vm_file || 276ee498ed7SHugh Dickins vma->vm_file->f_mapping != page->mapping) 2771da177e4SLinus Torvalds return -EFAULT; 2781da177e4SLinus Torvalds } else 2791da177e4SLinus Torvalds return -EFAULT; 2801da177e4SLinus Torvalds return vma_address(page, vma); 2811da177e4SLinus Torvalds } 2821da177e4SLinus Torvalds 2831da177e4SLinus Torvalds /* 28481b4082dSNikita Danilov * Check that @page is mapped at @address into @mm. 28581b4082dSNikita Danilov * 286b8072f09SHugh Dickins * On success returns with pte mapped and locked. 28781b4082dSNikita Danilov */ 288ceffc078SCarsten Otte pte_t *page_check_address(struct page *page, struct mm_struct *mm, 289c0718806SHugh Dickins unsigned long address, spinlock_t **ptlp) 29081b4082dSNikita Danilov { 29181b4082dSNikita Danilov pgd_t *pgd; 29281b4082dSNikita Danilov pud_t *pud; 29381b4082dSNikita Danilov pmd_t *pmd; 29481b4082dSNikita Danilov pte_t *pte; 295c0718806SHugh Dickins spinlock_t *ptl; 29681b4082dSNikita Danilov 29781b4082dSNikita Danilov pgd = pgd_offset(mm, address); 298c0718806SHugh Dickins if (!pgd_present(*pgd)) 299c0718806SHugh Dickins return NULL; 300c0718806SHugh Dickins 30181b4082dSNikita Danilov pud = pud_offset(pgd, address); 302c0718806SHugh Dickins if (!pud_present(*pud)) 303c0718806SHugh Dickins return NULL; 304c0718806SHugh Dickins 30581b4082dSNikita Danilov pmd = pmd_offset(pud, address); 306c0718806SHugh Dickins if (!pmd_present(*pmd)) 307c0718806SHugh Dickins return NULL; 308c0718806SHugh Dickins 30981b4082dSNikita Danilov pte = pte_offset_map(pmd, address); 310c0718806SHugh Dickins /* Make a quick check before getting the lock */ 311c0718806SHugh Dickins if (!pte_present(*pte)) { 31281b4082dSNikita Danilov pte_unmap(pte); 313c0718806SHugh Dickins return NULL; 31481b4082dSNikita Danilov } 315c0718806SHugh Dickins 3164c21e2f2SHugh Dickins ptl = pte_lockptr(mm, pmd); 317c0718806SHugh Dickins spin_lock(ptl); 318c0718806SHugh Dickins if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) { 319c0718806SHugh Dickins *ptlp = ptl; 320c0718806SHugh Dickins return pte; 32181b4082dSNikita Danilov } 322c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 323c0718806SHugh Dickins return NULL; 32481b4082dSNikita Danilov } 32581b4082dSNikita Danilov 32681b4082dSNikita Danilov /* 3271da177e4SLinus Torvalds * Subfunctions of page_referenced: page_referenced_one called 3281da177e4SLinus Torvalds * repeatedly from either page_referenced_anon or page_referenced_file. 3291da177e4SLinus Torvalds */ 3301da177e4SLinus Torvalds static int page_referenced_one(struct page *page, 331f7b7fd8fSRik van Riel struct vm_area_struct *vma, unsigned int *mapcount) 3321da177e4SLinus Torvalds { 3331da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 3341da177e4SLinus Torvalds unsigned long address; 3351da177e4SLinus Torvalds pte_t *pte; 336c0718806SHugh Dickins spinlock_t *ptl; 3371da177e4SLinus Torvalds int referenced = 0; 3381da177e4SLinus Torvalds 3391da177e4SLinus Torvalds address = vma_address(page, vma); 3401da177e4SLinus Torvalds if (address == -EFAULT) 3411da177e4SLinus Torvalds goto out; 3421da177e4SLinus Torvalds 343c0718806SHugh Dickins pte = page_check_address(page, mm, address, &ptl); 344c0718806SHugh Dickins if (!pte) 345c0718806SHugh Dickins goto out; 346c0718806SHugh Dickins 3471da177e4SLinus Torvalds if (ptep_clear_flush_young(vma, address, pte)) 3481da177e4SLinus Torvalds referenced++; 3491da177e4SLinus Torvalds 350fcdae29aSRik Van Riel /* Pretend the page is referenced if the task has the 351fcdae29aSRik Van Riel swap token and is in the middle of a page fault. */ 352f7b7fd8fSRik van Riel if (mm != current->mm && has_swap_token(mm) && 353fcdae29aSRik Van Riel rwsem_is_locked(&mm->mmap_sem)) 3541da177e4SLinus Torvalds referenced++; 3551da177e4SLinus Torvalds 3561da177e4SLinus Torvalds (*mapcount)--; 357c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 3581da177e4SLinus Torvalds out: 3591da177e4SLinus Torvalds return referenced; 3601da177e4SLinus Torvalds } 3611da177e4SLinus Torvalds 362f7b7fd8fSRik van Riel static int page_referenced_anon(struct page *page) 3631da177e4SLinus Torvalds { 3641da177e4SLinus Torvalds unsigned int mapcount; 3651da177e4SLinus Torvalds struct anon_vma *anon_vma; 3661da177e4SLinus Torvalds struct vm_area_struct *vma; 3671da177e4SLinus Torvalds int referenced = 0; 3681da177e4SLinus Torvalds 3691da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 3701da177e4SLinus Torvalds if (!anon_vma) 3711da177e4SLinus Torvalds return referenced; 3721da177e4SLinus Torvalds 3731da177e4SLinus Torvalds mapcount = page_mapcount(page); 3741da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 375f7b7fd8fSRik van Riel referenced += page_referenced_one(page, vma, &mapcount); 3761da177e4SLinus Torvalds if (!mapcount) 3771da177e4SLinus Torvalds break; 3781da177e4SLinus Torvalds } 3791da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 3801da177e4SLinus Torvalds return referenced; 3811da177e4SLinus Torvalds } 3821da177e4SLinus Torvalds 3831da177e4SLinus Torvalds /** 3841da177e4SLinus Torvalds * page_referenced_file - referenced check for object-based rmap 3851da177e4SLinus Torvalds * @page: the page we're checking references on. 3861da177e4SLinus Torvalds * 3871da177e4SLinus Torvalds * For an object-based mapped page, find all the places it is mapped and 3881da177e4SLinus Torvalds * check/clear the referenced flag. This is done by following the page->mapping 3891da177e4SLinus Torvalds * pointer, then walking the chain of vmas it holds. It returns the number 3901da177e4SLinus Torvalds * of references it found. 3911da177e4SLinus Torvalds * 3921da177e4SLinus Torvalds * This function is only called from page_referenced for object-based pages. 3931da177e4SLinus Torvalds */ 394f7b7fd8fSRik van Riel static int page_referenced_file(struct page *page) 3951da177e4SLinus Torvalds { 3961da177e4SLinus Torvalds unsigned int mapcount; 3971da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 3981da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 3991da177e4SLinus Torvalds struct vm_area_struct *vma; 4001da177e4SLinus Torvalds struct prio_tree_iter iter; 4011da177e4SLinus Torvalds int referenced = 0; 4021da177e4SLinus Torvalds 4031da177e4SLinus Torvalds /* 4041da177e4SLinus Torvalds * The caller's checks on page->mapping and !PageAnon have made 4051da177e4SLinus Torvalds * sure that this is a file page: the check for page->mapping 4061da177e4SLinus Torvalds * excludes the case just before it gets set on an anon page. 4071da177e4SLinus Torvalds */ 4081da177e4SLinus Torvalds BUG_ON(PageAnon(page)); 4091da177e4SLinus Torvalds 4101da177e4SLinus Torvalds /* 4111da177e4SLinus Torvalds * The page lock not only makes sure that page->mapping cannot 4121da177e4SLinus Torvalds * suddenly be NULLified by truncation, it makes sure that the 4131da177e4SLinus Torvalds * structure at mapping cannot be freed and reused yet, 4141da177e4SLinus Torvalds * so we can safely take mapping->i_mmap_lock. 4151da177e4SLinus Torvalds */ 4161da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 4171da177e4SLinus Torvalds 4181da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 4191da177e4SLinus Torvalds 4201da177e4SLinus Torvalds /* 4211da177e4SLinus Torvalds * i_mmap_lock does not stabilize mapcount at all, but mapcount 4221da177e4SLinus Torvalds * is more likely to be accurate if we note it after spinning. 4231da177e4SLinus Torvalds */ 4241da177e4SLinus Torvalds mapcount = page_mapcount(page); 4251da177e4SLinus Torvalds 4261da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 4271da177e4SLinus Torvalds if ((vma->vm_flags & (VM_LOCKED|VM_MAYSHARE)) 4281da177e4SLinus Torvalds == (VM_LOCKED|VM_MAYSHARE)) { 4291da177e4SLinus Torvalds referenced++; 4301da177e4SLinus Torvalds break; 4311da177e4SLinus Torvalds } 432f7b7fd8fSRik van Riel referenced += page_referenced_one(page, vma, &mapcount); 4331da177e4SLinus Torvalds if (!mapcount) 4341da177e4SLinus Torvalds break; 4351da177e4SLinus Torvalds } 4361da177e4SLinus Torvalds 4371da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 4381da177e4SLinus Torvalds return referenced; 4391da177e4SLinus Torvalds } 4401da177e4SLinus Torvalds 4411da177e4SLinus Torvalds /** 4421da177e4SLinus Torvalds * page_referenced - test if the page was referenced 4431da177e4SLinus Torvalds * @page: the page to test 4441da177e4SLinus Torvalds * @is_locked: caller holds lock on the page 4451da177e4SLinus Torvalds * 4461da177e4SLinus Torvalds * Quick test_and_clear_referenced for all mappings to a page, 4471da177e4SLinus Torvalds * returns the number of ptes which referenced the page. 4481da177e4SLinus Torvalds */ 449f7b7fd8fSRik van Riel int page_referenced(struct page *page, int is_locked) 4501da177e4SLinus Torvalds { 4511da177e4SLinus Torvalds int referenced = 0; 4521da177e4SLinus Torvalds 4531da177e4SLinus Torvalds if (page_test_and_clear_young(page)) 4541da177e4SLinus Torvalds referenced++; 4551da177e4SLinus Torvalds 4561da177e4SLinus Torvalds if (TestClearPageReferenced(page)) 4571da177e4SLinus Torvalds referenced++; 4581da177e4SLinus Torvalds 4591da177e4SLinus Torvalds if (page_mapped(page) && page->mapping) { 4601da177e4SLinus Torvalds if (PageAnon(page)) 461f7b7fd8fSRik van Riel referenced += page_referenced_anon(page); 4621da177e4SLinus Torvalds else if (is_locked) 463f7b7fd8fSRik van Riel referenced += page_referenced_file(page); 4641da177e4SLinus Torvalds else if (TestSetPageLocked(page)) 4651da177e4SLinus Torvalds referenced++; 4661da177e4SLinus Torvalds else { 4671da177e4SLinus Torvalds if (page->mapping) 468f7b7fd8fSRik van Riel referenced += page_referenced_file(page); 4691da177e4SLinus Torvalds unlock_page(page); 4701da177e4SLinus Torvalds } 4711da177e4SLinus Torvalds } 4721da177e4SLinus Torvalds return referenced; 4731da177e4SLinus Torvalds } 4741da177e4SLinus Torvalds 4751da177e4SLinus Torvalds /** 4769617d95eSNick Piggin * page_set_anon_rmap - setup new anonymous rmap 4771da177e4SLinus Torvalds * @page: the page to add the mapping to 4781da177e4SLinus Torvalds * @vma: the vm area in which the mapping is added 4791da177e4SLinus Torvalds * @address: the user virtual address mapped 4801da177e4SLinus Torvalds */ 4819617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page, 4821da177e4SLinus Torvalds struct vm_area_struct *vma, unsigned long address) 4831da177e4SLinus Torvalds { 4842822c1aaSNick Piggin struct anon_vma *anon_vma = vma->anon_vma; 4852822c1aaSNick Piggin 4862822c1aaSNick Piggin BUG_ON(!anon_vma); 4871da177e4SLinus Torvalds anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON; 4882822c1aaSNick Piggin page->mapping = (struct address_space *) anon_vma; 4892822c1aaSNick Piggin 4904d7670e0SNick Piggin page->index = linear_page_index(vma, address); 4912822c1aaSNick Piggin 492a74609faSNick Piggin /* 493a74609faSNick Piggin * nr_mapped state can be updated without turning off 494a74609faSNick Piggin * interrupts because it is not modified via interrupt. 495a74609faSNick Piggin */ 496a74609faSNick Piggin __inc_page_state(nr_mapped); 4971da177e4SLinus Torvalds } 4989617d95eSNick Piggin 4999617d95eSNick Piggin /** 5009617d95eSNick Piggin * page_add_anon_rmap - add pte mapping to an anonymous page 5019617d95eSNick Piggin * @page: the page to add the mapping to 5029617d95eSNick Piggin * @vma: the vm area in which the mapping is added 5039617d95eSNick Piggin * @address: the user virtual address mapped 5049617d95eSNick Piggin * 5059617d95eSNick Piggin * The caller needs to hold the pte lock. 5069617d95eSNick Piggin */ 5079617d95eSNick Piggin void page_add_anon_rmap(struct page *page, 5089617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 5099617d95eSNick Piggin { 5109617d95eSNick Piggin if (atomic_inc_and_test(&page->_mapcount)) 5119617d95eSNick Piggin __page_set_anon_rmap(page, vma, address); 5121da177e4SLinus Torvalds /* else checking page index and mapping is racy */ 5131da177e4SLinus Torvalds } 5141da177e4SLinus Torvalds 5159617d95eSNick Piggin /* 5169617d95eSNick Piggin * page_add_new_anon_rmap - add pte mapping to a new anonymous page 5179617d95eSNick Piggin * @page: the page to add the mapping to 5189617d95eSNick Piggin * @vma: the vm area in which the mapping is added 5199617d95eSNick Piggin * @address: the user virtual address mapped 5209617d95eSNick Piggin * 5219617d95eSNick Piggin * Same as page_add_anon_rmap but must only be called on *new* pages. 5229617d95eSNick Piggin * This means the inc-and-test can be bypassed. 5239617d95eSNick Piggin */ 5249617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page, 5259617d95eSNick Piggin struct vm_area_struct *vma, unsigned long address) 5269617d95eSNick Piggin { 5279617d95eSNick Piggin atomic_set(&page->_mapcount, 0); /* elevate count by 1 (starts at -1) */ 5289617d95eSNick Piggin __page_set_anon_rmap(page, vma, address); 5299617d95eSNick Piggin } 5309617d95eSNick Piggin 5311da177e4SLinus Torvalds /** 5321da177e4SLinus Torvalds * page_add_file_rmap - add pte mapping to a file page 5331da177e4SLinus Torvalds * @page: the page to add the mapping to 5341da177e4SLinus Torvalds * 535b8072f09SHugh Dickins * The caller needs to hold the pte lock. 5361da177e4SLinus Torvalds */ 5371da177e4SLinus Torvalds void page_add_file_rmap(struct page *page) 5381da177e4SLinus Torvalds { 5391da177e4SLinus Torvalds if (atomic_inc_and_test(&page->_mapcount)) 540a74609faSNick Piggin __inc_page_state(nr_mapped); 5411da177e4SLinus Torvalds } 5421da177e4SLinus Torvalds 5431da177e4SLinus Torvalds /** 5441da177e4SLinus Torvalds * page_remove_rmap - take down pte mapping from a page 5451da177e4SLinus Torvalds * @page: page to remove mapping from 5461da177e4SLinus Torvalds * 547b8072f09SHugh Dickins * The caller needs to hold the pte lock. 5481da177e4SLinus Torvalds */ 5491da177e4SLinus Torvalds void page_remove_rmap(struct page *page) 5501da177e4SLinus Torvalds { 5511da177e4SLinus Torvalds if (atomic_add_negative(-1, &page->_mapcount)) { 552b7ab795bSNick Piggin #ifdef CONFIG_DEBUG_VM 553b7ab795bSNick Piggin if (unlikely(page_mapcount(page) < 0)) { 554ef2bf0dcSDave Jones printk (KERN_EMERG "Eeek! page_mapcount(page) went negative! (%d)\n", page_mapcount(page)); 555ef2bf0dcSDave Jones printk (KERN_EMERG " page->flags = %lx\n", page->flags); 556ef2bf0dcSDave Jones printk (KERN_EMERG " page->count = %x\n", page_count(page)); 557ef2bf0dcSDave Jones printk (KERN_EMERG " page->mapping = %p\n", page->mapping); 558ef2bf0dcSDave Jones } 559b7ab795bSNick Piggin #endif 5601da177e4SLinus Torvalds BUG_ON(page_mapcount(page) < 0); 5611da177e4SLinus Torvalds /* 5621da177e4SLinus Torvalds * It would be tidy to reset the PageAnon mapping here, 5631da177e4SLinus Torvalds * but that might overwrite a racing page_add_anon_rmap 5641da177e4SLinus Torvalds * which increments mapcount after us but sets mapping 5651da177e4SLinus Torvalds * before us: so leave the reset to free_hot_cold_page, 5661da177e4SLinus Torvalds * and remember that it's only reliable while mapped. 5671da177e4SLinus Torvalds * Leaving it set also helps swapoff to reinstate ptes 5681da177e4SLinus Torvalds * faster for those pages still in swapcache. 5691da177e4SLinus Torvalds */ 5701da177e4SLinus Torvalds if (page_test_and_clear_dirty(page)) 5711da177e4SLinus Torvalds set_page_dirty(page); 572a74609faSNick Piggin __dec_page_state(nr_mapped); 5731da177e4SLinus Torvalds } 5741da177e4SLinus Torvalds } 5751da177e4SLinus Torvalds 5761da177e4SLinus Torvalds /* 5771da177e4SLinus Torvalds * Subfunctions of try_to_unmap: try_to_unmap_one called 5781da177e4SLinus Torvalds * repeatedly from either try_to_unmap_anon or try_to_unmap_file. 5791da177e4SLinus Torvalds */ 580a48d07afSChristoph Lameter static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma, 5817352349aSChristoph Lameter int migration) 5821da177e4SLinus Torvalds { 5831da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 5841da177e4SLinus Torvalds unsigned long address; 5851da177e4SLinus Torvalds pte_t *pte; 5861da177e4SLinus Torvalds pte_t pteval; 587c0718806SHugh Dickins spinlock_t *ptl; 5881da177e4SLinus Torvalds int ret = SWAP_AGAIN; 5891da177e4SLinus Torvalds 5901da177e4SLinus Torvalds address = vma_address(page, vma); 5911da177e4SLinus Torvalds if (address == -EFAULT) 5921da177e4SLinus Torvalds goto out; 5931da177e4SLinus Torvalds 594c0718806SHugh Dickins pte = page_check_address(page, mm, address, &ptl); 595c0718806SHugh Dickins if (!pte) 59681b4082dSNikita Danilov goto out; 5971da177e4SLinus Torvalds 5981da177e4SLinus Torvalds /* 5991da177e4SLinus Torvalds * If the page is mlock()d, we cannot swap it out. 6001da177e4SLinus Torvalds * If it's recently referenced (perhaps page_referenced 6011da177e4SLinus Torvalds * skipped over this mm) then we should reactivate it. 6021da177e4SLinus Torvalds */ 603101d2be7SHugh Dickins if ((vma->vm_flags & VM_LOCKED) || 604a48d07afSChristoph Lameter (ptep_clear_flush_young(vma, address, pte) 6057352349aSChristoph Lameter && !migration)) { 6061da177e4SLinus Torvalds ret = SWAP_FAIL; 6071da177e4SLinus Torvalds goto out_unmap; 6081da177e4SLinus Torvalds } 6091da177e4SLinus Torvalds 6101da177e4SLinus Torvalds /* Nuke the page table entry. */ 6111da177e4SLinus Torvalds flush_cache_page(vma, address, page_to_pfn(page)); 6121da177e4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pte); 6131da177e4SLinus Torvalds 6141da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 6151da177e4SLinus Torvalds if (pte_dirty(pteval)) 6161da177e4SLinus Torvalds set_page_dirty(page); 6171da177e4SLinus Torvalds 618365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 619365e9c87SHugh Dickins update_hiwater_rss(mm); 620365e9c87SHugh Dickins 6211da177e4SLinus Torvalds if (PageAnon(page)) { 6224c21e2f2SHugh Dickins swp_entry_t entry = { .val = page_private(page) }; 623*0697212aSChristoph Lameter 624*0697212aSChristoph Lameter if (PageSwapCache(page)) { 6251da177e4SLinus Torvalds /* 6261da177e4SLinus Torvalds * Store the swap location in the pte. 6271da177e4SLinus Torvalds * See handle_pte_fault() ... 6281da177e4SLinus Torvalds */ 6291da177e4SLinus Torvalds swap_duplicate(entry); 6301da177e4SLinus Torvalds if (list_empty(&mm->mmlist)) { 6311da177e4SLinus Torvalds spin_lock(&mmlist_lock); 632f412ac08SHugh Dickins if (list_empty(&mm->mmlist)) 6331da177e4SLinus Torvalds list_add(&mm->mmlist, &init_mm.mmlist); 6341da177e4SLinus Torvalds spin_unlock(&mmlist_lock); 6351da177e4SLinus Torvalds } 636*0697212aSChristoph Lameter } else { 637*0697212aSChristoph Lameter /* 638*0697212aSChristoph Lameter * Store the pfn of the page in a special migration 639*0697212aSChristoph Lameter * pte. do_swap_page() will wait until the migration 640*0697212aSChristoph Lameter * pte is removed and then restart fault handling. 641*0697212aSChristoph Lameter */ 642*0697212aSChristoph Lameter BUG_ON(!migration); 643*0697212aSChristoph Lameter entry = make_migration_entry(page, pte_write(pteval)); 644*0697212aSChristoph Lameter } 6451da177e4SLinus Torvalds set_pte_at(mm, address, pte, swp_entry_to_pte(entry)); 6461da177e4SLinus Torvalds BUG_ON(pte_file(*pte)); 6471da177e4SLinus Torvalds dec_mm_counter(mm, anon_rss); 6484294621fSHugh Dickins } else 6494294621fSHugh Dickins dec_mm_counter(mm, file_rss); 6501da177e4SLinus Torvalds 6511da177e4SLinus Torvalds page_remove_rmap(page); 6521da177e4SLinus Torvalds page_cache_release(page); 6531da177e4SLinus Torvalds 6541da177e4SLinus Torvalds out_unmap: 655c0718806SHugh Dickins pte_unmap_unlock(pte, ptl); 6561da177e4SLinus Torvalds out: 6571da177e4SLinus Torvalds return ret; 6581da177e4SLinus Torvalds } 6591da177e4SLinus Torvalds 6601da177e4SLinus Torvalds /* 6611da177e4SLinus Torvalds * objrmap doesn't work for nonlinear VMAs because the assumption that 6621da177e4SLinus Torvalds * offset-into-file correlates with offset-into-virtual-addresses does not hold. 6631da177e4SLinus Torvalds * Consequently, given a particular page and its ->index, we cannot locate the 6641da177e4SLinus Torvalds * ptes which are mapping that page without an exhaustive linear search. 6651da177e4SLinus Torvalds * 6661da177e4SLinus Torvalds * So what this code does is a mini "virtual scan" of each nonlinear VMA which 6671da177e4SLinus Torvalds * maps the file to which the target page belongs. The ->vm_private_data field 6681da177e4SLinus Torvalds * holds the current cursor into that scan. Successive searches will circulate 6691da177e4SLinus Torvalds * around the vma's virtual address space. 6701da177e4SLinus Torvalds * 6711da177e4SLinus Torvalds * So as more replacement pressure is applied to the pages in a nonlinear VMA, 6721da177e4SLinus Torvalds * more scanning pressure is placed against them as well. Eventually pages 6731da177e4SLinus Torvalds * will become fully unmapped and are eligible for eviction. 6741da177e4SLinus Torvalds * 6751da177e4SLinus Torvalds * For very sparsely populated VMAs this is a little inefficient - chances are 6761da177e4SLinus Torvalds * there there won't be many ptes located within the scan cluster. In this case 6771da177e4SLinus Torvalds * maybe we could scan further - to the end of the pte page, perhaps. 6781da177e4SLinus Torvalds */ 6791da177e4SLinus Torvalds #define CLUSTER_SIZE min(32*PAGE_SIZE, PMD_SIZE) 6801da177e4SLinus Torvalds #define CLUSTER_MASK (~(CLUSTER_SIZE - 1)) 6811da177e4SLinus Torvalds 6821da177e4SLinus Torvalds static void try_to_unmap_cluster(unsigned long cursor, 6831da177e4SLinus Torvalds unsigned int *mapcount, struct vm_area_struct *vma) 6841da177e4SLinus Torvalds { 6851da177e4SLinus Torvalds struct mm_struct *mm = vma->vm_mm; 6861da177e4SLinus Torvalds pgd_t *pgd; 6871da177e4SLinus Torvalds pud_t *pud; 6881da177e4SLinus Torvalds pmd_t *pmd; 689c0718806SHugh Dickins pte_t *pte; 6901da177e4SLinus Torvalds pte_t pteval; 691c0718806SHugh Dickins spinlock_t *ptl; 6921da177e4SLinus Torvalds struct page *page; 6931da177e4SLinus Torvalds unsigned long address; 6941da177e4SLinus Torvalds unsigned long end; 6951da177e4SLinus Torvalds 6961da177e4SLinus Torvalds address = (vma->vm_start + cursor) & CLUSTER_MASK; 6971da177e4SLinus Torvalds end = address + CLUSTER_SIZE; 6981da177e4SLinus Torvalds if (address < vma->vm_start) 6991da177e4SLinus Torvalds address = vma->vm_start; 7001da177e4SLinus Torvalds if (end > vma->vm_end) 7011da177e4SLinus Torvalds end = vma->vm_end; 7021da177e4SLinus Torvalds 7031da177e4SLinus Torvalds pgd = pgd_offset(mm, address); 7041da177e4SLinus Torvalds if (!pgd_present(*pgd)) 705c0718806SHugh Dickins return; 7061da177e4SLinus Torvalds 7071da177e4SLinus Torvalds pud = pud_offset(pgd, address); 7081da177e4SLinus Torvalds if (!pud_present(*pud)) 709c0718806SHugh Dickins return; 7101da177e4SLinus Torvalds 7111da177e4SLinus Torvalds pmd = pmd_offset(pud, address); 7121da177e4SLinus Torvalds if (!pmd_present(*pmd)) 713c0718806SHugh Dickins return; 714c0718806SHugh Dickins 715c0718806SHugh Dickins pte = pte_offset_map_lock(mm, pmd, address, &ptl); 7161da177e4SLinus Torvalds 717365e9c87SHugh Dickins /* Update high watermark before we lower rss */ 718365e9c87SHugh Dickins update_hiwater_rss(mm); 719365e9c87SHugh Dickins 720c0718806SHugh Dickins for (; address < end; pte++, address += PAGE_SIZE) { 7211da177e4SLinus Torvalds if (!pte_present(*pte)) 7221da177e4SLinus Torvalds continue; 7236aab341eSLinus Torvalds page = vm_normal_page(vma, address, *pte); 7246aab341eSLinus Torvalds BUG_ON(!page || PageAnon(page)); 7251da177e4SLinus Torvalds 7261da177e4SLinus Torvalds if (ptep_clear_flush_young(vma, address, pte)) 7271da177e4SLinus Torvalds continue; 7281da177e4SLinus Torvalds 7291da177e4SLinus Torvalds /* Nuke the page table entry. */ 730eca35133SBen Collins flush_cache_page(vma, address, pte_pfn(*pte)); 7311da177e4SLinus Torvalds pteval = ptep_clear_flush(vma, address, pte); 7321da177e4SLinus Torvalds 7331da177e4SLinus Torvalds /* If nonlinear, store the file page offset in the pte. */ 7341da177e4SLinus Torvalds if (page->index != linear_page_index(vma, address)) 7351da177e4SLinus Torvalds set_pte_at(mm, address, pte, pgoff_to_pte(page->index)); 7361da177e4SLinus Torvalds 7371da177e4SLinus Torvalds /* Move the dirty bit to the physical page now the pte is gone. */ 7381da177e4SLinus Torvalds if (pte_dirty(pteval)) 7391da177e4SLinus Torvalds set_page_dirty(page); 7401da177e4SLinus Torvalds 7411da177e4SLinus Torvalds page_remove_rmap(page); 7421da177e4SLinus Torvalds page_cache_release(page); 7434294621fSHugh Dickins dec_mm_counter(mm, file_rss); 7441da177e4SLinus Torvalds (*mapcount)--; 7451da177e4SLinus Torvalds } 746c0718806SHugh Dickins pte_unmap_unlock(pte - 1, ptl); 7471da177e4SLinus Torvalds } 7481da177e4SLinus Torvalds 7497352349aSChristoph Lameter static int try_to_unmap_anon(struct page *page, int migration) 7501da177e4SLinus Torvalds { 7511da177e4SLinus Torvalds struct anon_vma *anon_vma; 7521da177e4SLinus Torvalds struct vm_area_struct *vma; 7531da177e4SLinus Torvalds int ret = SWAP_AGAIN; 7541da177e4SLinus Torvalds 7551da177e4SLinus Torvalds anon_vma = page_lock_anon_vma(page); 7561da177e4SLinus Torvalds if (!anon_vma) 7571da177e4SLinus Torvalds return ret; 7581da177e4SLinus Torvalds 7591da177e4SLinus Torvalds list_for_each_entry(vma, &anon_vma->head, anon_vma_node) { 7607352349aSChristoph Lameter ret = try_to_unmap_one(page, vma, migration); 7611da177e4SLinus Torvalds if (ret == SWAP_FAIL || !page_mapped(page)) 7621da177e4SLinus Torvalds break; 7631da177e4SLinus Torvalds } 7641da177e4SLinus Torvalds spin_unlock(&anon_vma->lock); 7651da177e4SLinus Torvalds return ret; 7661da177e4SLinus Torvalds } 7671da177e4SLinus Torvalds 7681da177e4SLinus Torvalds /** 7691da177e4SLinus Torvalds * try_to_unmap_file - unmap file page using the object-based rmap method 7701da177e4SLinus Torvalds * @page: the page to unmap 7711da177e4SLinus Torvalds * 7721da177e4SLinus Torvalds * Find all the mappings of a page using the mapping pointer and the vma chains 7731da177e4SLinus Torvalds * contained in the address_space struct it points to. 7741da177e4SLinus Torvalds * 7751da177e4SLinus Torvalds * This function is only called from try_to_unmap for object-based pages. 7761da177e4SLinus Torvalds */ 7777352349aSChristoph Lameter static int try_to_unmap_file(struct page *page, int migration) 7781da177e4SLinus Torvalds { 7791da177e4SLinus Torvalds struct address_space *mapping = page->mapping; 7801da177e4SLinus Torvalds pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT); 7811da177e4SLinus Torvalds struct vm_area_struct *vma; 7821da177e4SLinus Torvalds struct prio_tree_iter iter; 7831da177e4SLinus Torvalds int ret = SWAP_AGAIN; 7841da177e4SLinus Torvalds unsigned long cursor; 7851da177e4SLinus Torvalds unsigned long max_nl_cursor = 0; 7861da177e4SLinus Torvalds unsigned long max_nl_size = 0; 7871da177e4SLinus Torvalds unsigned int mapcount; 7881da177e4SLinus Torvalds 7891da177e4SLinus Torvalds spin_lock(&mapping->i_mmap_lock); 7901da177e4SLinus Torvalds vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) { 7917352349aSChristoph Lameter ret = try_to_unmap_one(page, vma, migration); 7921da177e4SLinus Torvalds if (ret == SWAP_FAIL || !page_mapped(page)) 7931da177e4SLinus Torvalds goto out; 7941da177e4SLinus Torvalds } 7951da177e4SLinus Torvalds 7961da177e4SLinus Torvalds if (list_empty(&mapping->i_mmap_nonlinear)) 7971da177e4SLinus Torvalds goto out; 7981da177e4SLinus Torvalds 7991da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 8001da177e4SLinus Torvalds shared.vm_set.list) { 801101d2be7SHugh Dickins if (vma->vm_flags & VM_LOCKED) 8021da177e4SLinus Torvalds continue; 8031da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 8041da177e4SLinus Torvalds if (cursor > max_nl_cursor) 8051da177e4SLinus Torvalds max_nl_cursor = cursor; 8061da177e4SLinus Torvalds cursor = vma->vm_end - vma->vm_start; 8071da177e4SLinus Torvalds if (cursor > max_nl_size) 8081da177e4SLinus Torvalds max_nl_size = cursor; 8091da177e4SLinus Torvalds } 8101da177e4SLinus Torvalds 8111da177e4SLinus Torvalds if (max_nl_size == 0) { /* any nonlinears locked or reserved */ 8121da177e4SLinus Torvalds ret = SWAP_FAIL; 8131da177e4SLinus Torvalds goto out; 8141da177e4SLinus Torvalds } 8151da177e4SLinus Torvalds 8161da177e4SLinus Torvalds /* 8171da177e4SLinus Torvalds * We don't try to search for this page in the nonlinear vmas, 8181da177e4SLinus Torvalds * and page_referenced wouldn't have found it anyway. Instead 8191da177e4SLinus Torvalds * just walk the nonlinear vmas trying to age and unmap some. 8201da177e4SLinus Torvalds * The mapcount of the page we came in with is irrelevant, 8211da177e4SLinus Torvalds * but even so use it as a guide to how hard we should try? 8221da177e4SLinus Torvalds */ 8231da177e4SLinus Torvalds mapcount = page_mapcount(page); 8241da177e4SLinus Torvalds if (!mapcount) 8251da177e4SLinus Torvalds goto out; 8261da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 8271da177e4SLinus Torvalds 8281da177e4SLinus Torvalds max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK; 8291da177e4SLinus Torvalds if (max_nl_cursor == 0) 8301da177e4SLinus Torvalds max_nl_cursor = CLUSTER_SIZE; 8311da177e4SLinus Torvalds 8321da177e4SLinus Torvalds do { 8331da177e4SLinus Torvalds list_for_each_entry(vma, &mapping->i_mmap_nonlinear, 8341da177e4SLinus Torvalds shared.vm_set.list) { 835101d2be7SHugh Dickins if (vma->vm_flags & VM_LOCKED) 8361da177e4SLinus Torvalds continue; 8371da177e4SLinus Torvalds cursor = (unsigned long) vma->vm_private_data; 838839b9685SHugh Dickins while ( cursor < max_nl_cursor && 8391da177e4SLinus Torvalds cursor < vma->vm_end - vma->vm_start) { 8401da177e4SLinus Torvalds try_to_unmap_cluster(cursor, &mapcount, vma); 8411da177e4SLinus Torvalds cursor += CLUSTER_SIZE; 8421da177e4SLinus Torvalds vma->vm_private_data = (void *) cursor; 8431da177e4SLinus Torvalds if ((int)mapcount <= 0) 8441da177e4SLinus Torvalds goto out; 8451da177e4SLinus Torvalds } 8461da177e4SLinus Torvalds vma->vm_private_data = (void *) max_nl_cursor; 8471da177e4SLinus Torvalds } 8481da177e4SLinus Torvalds cond_resched_lock(&mapping->i_mmap_lock); 8491da177e4SLinus Torvalds max_nl_cursor += CLUSTER_SIZE; 8501da177e4SLinus Torvalds } while (max_nl_cursor <= max_nl_size); 8511da177e4SLinus Torvalds 8521da177e4SLinus Torvalds /* 8531da177e4SLinus Torvalds * Don't loop forever (perhaps all the remaining pages are 8541da177e4SLinus Torvalds * in locked vmas). Reset cursor on all unreserved nonlinear 8551da177e4SLinus Torvalds * vmas, now forgetting on which ones it had fallen behind. 8561da177e4SLinus Torvalds */ 857101d2be7SHugh Dickins list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list) 8581da177e4SLinus Torvalds vma->vm_private_data = NULL; 8591da177e4SLinus Torvalds out: 8601da177e4SLinus Torvalds spin_unlock(&mapping->i_mmap_lock); 8611da177e4SLinus Torvalds return ret; 8621da177e4SLinus Torvalds } 8631da177e4SLinus Torvalds 8641da177e4SLinus Torvalds /** 8651da177e4SLinus Torvalds * try_to_unmap - try to remove all page table mappings to a page 8661da177e4SLinus Torvalds * @page: the page to get unmapped 8671da177e4SLinus Torvalds * 8681da177e4SLinus Torvalds * Tries to remove all the page table entries which are mapping this 8691da177e4SLinus Torvalds * page, used in the pageout path. Caller must hold the page lock. 8701da177e4SLinus Torvalds * Return values are: 8711da177e4SLinus Torvalds * 8721da177e4SLinus Torvalds * SWAP_SUCCESS - we succeeded in removing all mappings 8731da177e4SLinus Torvalds * SWAP_AGAIN - we missed a mapping, try again later 8741da177e4SLinus Torvalds * SWAP_FAIL - the page is unswappable 8751da177e4SLinus Torvalds */ 8767352349aSChristoph Lameter int try_to_unmap(struct page *page, int migration) 8771da177e4SLinus Torvalds { 8781da177e4SLinus Torvalds int ret; 8791da177e4SLinus Torvalds 8801da177e4SLinus Torvalds BUG_ON(!PageLocked(page)); 8811da177e4SLinus Torvalds 8821da177e4SLinus Torvalds if (PageAnon(page)) 8837352349aSChristoph Lameter ret = try_to_unmap_anon(page, migration); 8841da177e4SLinus Torvalds else 8857352349aSChristoph Lameter ret = try_to_unmap_file(page, migration); 8861da177e4SLinus Torvalds 8871da177e4SLinus Torvalds if (!page_mapped(page)) 8881da177e4SLinus Torvalds ret = SWAP_SUCCESS; 8891da177e4SLinus Torvalds return ret; 8901da177e4SLinus Torvalds } 89181b4082dSNikita Danilov 892