xref: /linux/mm/rmap.c (revision 288468c334e98aacbb7e2fb8bde6bc1adcd55e05)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  * mm/rmap.c - physical to virtual reverse mappings
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  * Copyright 2001, Rik van Riel <riel@conectiva.com.br>
51da177e4SLinus Torvalds  * Released under the General Public License (GPL).
61da177e4SLinus Torvalds  *
71da177e4SLinus Torvalds  * Simple, low overhead reverse mapping scheme.
81da177e4SLinus Torvalds  * Please try to keep this thing as modular as possible.
91da177e4SLinus Torvalds  *
101da177e4SLinus Torvalds  * Provides methods for unmapping each kind of mapped page:
111da177e4SLinus Torvalds  * the anon methods track anonymous pages, and
121da177e4SLinus Torvalds  * the file methods track pages belonging to an inode.
131da177e4SLinus Torvalds  *
141da177e4SLinus Torvalds  * Original design by Rik van Riel <riel@conectiva.com.br> 2001
151da177e4SLinus Torvalds  * File methods by Dave McCracken <dmccr@us.ibm.com> 2003, 2004
161da177e4SLinus Torvalds  * Anonymous methods by Andrea Arcangeli <andrea@suse.de> 2004
1798f32602SHugh Dickins  * Contributions by Hugh Dickins 2003, 2004
181da177e4SLinus Torvalds  */
191da177e4SLinus Torvalds 
201da177e4SLinus Torvalds /*
211da177e4SLinus Torvalds  * Lock ordering in mm:
221da177e4SLinus Torvalds  *
231b1dcc1bSJes Sorensen  * inode->i_mutex	(while writing or truncating, not reading or faulting)
2482591e6eSNick Piggin  *   inode->i_alloc_sem (vmtruncate_range)
251da177e4SLinus Torvalds  *   mm->mmap_sem
261da177e4SLinus Torvalds  *     page->flags PG_locked (lock_page)
271da177e4SLinus Torvalds  *       mapping->i_mmap_lock
281da177e4SLinus Torvalds  *         anon_vma->lock
29b8072f09SHugh Dickins  *           mm->page_table_lock or pte_lock
30053837fcSNick Piggin  *             zone->lru_lock (in mark_page_accessed, isolate_lru_page)
315d337b91SHugh Dickins  *             swap_lock (in swap_duplicate, swap_info_get)
321da177e4SLinus Torvalds  *               mmlist_lock (in mmput, drain_mmlist and others)
331da177e4SLinus Torvalds  *               mapping->private_lock (in __set_page_dirty_buffers)
341da177e4SLinus Torvalds  *               inode_lock (in set_page_dirty's __mark_inode_dirty)
351da177e4SLinus Torvalds  *                 sb_lock (within inode_lock in fs/fs-writeback.c)
361da177e4SLinus Torvalds  *                 mapping->tree_lock (widely used, in set_page_dirty,
371da177e4SLinus Torvalds  *                           in arch-dependent flush_dcache_mmap_lock,
381da177e4SLinus Torvalds  *                           within inode_lock in __sync_single_inode)
396a46079cSAndi Kleen  *
406a46079cSAndi Kleen  * (code doesn't rely on that order so it could be switched around)
416a46079cSAndi Kleen  * ->tasklist_lock
426a46079cSAndi Kleen  *   anon_vma->lock      (memory_failure, collect_procs_anon)
436a46079cSAndi Kleen  *     pte map lock
441da177e4SLinus Torvalds  */
451da177e4SLinus Torvalds 
461da177e4SLinus Torvalds #include <linux/mm.h>
471da177e4SLinus Torvalds #include <linux/pagemap.h>
481da177e4SLinus Torvalds #include <linux/swap.h>
491da177e4SLinus Torvalds #include <linux/swapops.h>
501da177e4SLinus Torvalds #include <linux/slab.h>
511da177e4SLinus Torvalds #include <linux/init.h>
525ad64688SHugh Dickins #include <linux/ksm.h>
531da177e4SLinus Torvalds #include <linux/rmap.h>
541da177e4SLinus Torvalds #include <linux/rcupdate.h>
55a48d07afSChristoph Lameter #include <linux/module.h>
568a9f3ccdSBalbir Singh #include <linux/memcontrol.h>
57cddb8a5cSAndrea Arcangeli #include <linux/mmu_notifier.h>
5864cdd548SKOSAKI Motohiro #include <linux/migrate.h>
591da177e4SLinus Torvalds 
601da177e4SLinus Torvalds #include <asm/tlbflush.h>
611da177e4SLinus Torvalds 
62b291f000SNick Piggin #include "internal.h"
63b291f000SNick Piggin 
64fdd2e5f8SAdrian Bunk static struct kmem_cache *anon_vma_cachep;
655beb4930SRik van Riel static struct kmem_cache *anon_vma_chain_cachep;
66fdd2e5f8SAdrian Bunk 
67fdd2e5f8SAdrian Bunk static inline struct anon_vma *anon_vma_alloc(void)
68fdd2e5f8SAdrian Bunk {
69fdd2e5f8SAdrian Bunk 	return kmem_cache_alloc(anon_vma_cachep, GFP_KERNEL);
70fdd2e5f8SAdrian Bunk }
71fdd2e5f8SAdrian Bunk 
72db114b83SHugh Dickins void anon_vma_free(struct anon_vma *anon_vma)
73fdd2e5f8SAdrian Bunk {
74fdd2e5f8SAdrian Bunk 	kmem_cache_free(anon_vma_cachep, anon_vma);
75fdd2e5f8SAdrian Bunk }
761da177e4SLinus Torvalds 
775beb4930SRik van Riel static inline struct anon_vma_chain *anon_vma_chain_alloc(void)
785beb4930SRik van Riel {
795beb4930SRik van Riel 	return kmem_cache_alloc(anon_vma_chain_cachep, GFP_KERNEL);
805beb4930SRik van Riel }
815beb4930SRik van Riel 
825beb4930SRik van Riel void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain)
835beb4930SRik van Riel {
845beb4930SRik van Riel 	kmem_cache_free(anon_vma_chain_cachep, anon_vma_chain);
855beb4930SRik van Riel }
865beb4930SRik van Riel 
87d9d332e0SLinus Torvalds /**
88d9d332e0SLinus Torvalds  * anon_vma_prepare - attach an anon_vma to a memory region
89d9d332e0SLinus Torvalds  * @vma: the memory region in question
90d9d332e0SLinus Torvalds  *
91d9d332e0SLinus Torvalds  * This makes sure the memory mapping described by 'vma' has
92d9d332e0SLinus Torvalds  * an 'anon_vma' attached to it, so that we can associate the
93d9d332e0SLinus Torvalds  * anonymous pages mapped into it with that anon_vma.
94d9d332e0SLinus Torvalds  *
95d9d332e0SLinus Torvalds  * The common case will be that we already have one, but if
96d9d332e0SLinus Torvalds  * if not we either need to find an adjacent mapping that we
97d9d332e0SLinus Torvalds  * can re-use the anon_vma from (very common when the only
98d9d332e0SLinus Torvalds  * reason for splitting a vma has been mprotect()), or we
99d9d332e0SLinus Torvalds  * allocate a new one.
100d9d332e0SLinus Torvalds  *
101d9d332e0SLinus Torvalds  * Anon-vma allocations are very subtle, because we may have
102d9d332e0SLinus Torvalds  * optimistically looked up an anon_vma in page_lock_anon_vma()
103d9d332e0SLinus Torvalds  * and that may actually touch the spinlock even in the newly
104d9d332e0SLinus Torvalds  * allocated vma (it depends on RCU to make sure that the
105d9d332e0SLinus Torvalds  * anon_vma isn't actually destroyed).
106d9d332e0SLinus Torvalds  *
107d9d332e0SLinus Torvalds  * As a result, we need to do proper anon_vma locking even
108d9d332e0SLinus Torvalds  * for the new allocation. At the same time, we do not want
109d9d332e0SLinus Torvalds  * to do any locking for the common case of already having
110d9d332e0SLinus Torvalds  * an anon_vma.
111d9d332e0SLinus Torvalds  *
112d9d332e0SLinus Torvalds  * This must be called with the mmap_sem held for reading.
113d9d332e0SLinus Torvalds  */
1141da177e4SLinus Torvalds int anon_vma_prepare(struct vm_area_struct *vma)
1151da177e4SLinus Torvalds {
1161da177e4SLinus Torvalds 	struct anon_vma *anon_vma = vma->anon_vma;
1175beb4930SRik van Riel 	struct anon_vma_chain *avc;
1181da177e4SLinus Torvalds 
1191da177e4SLinus Torvalds 	might_sleep();
1201da177e4SLinus Torvalds 	if (unlikely(!anon_vma)) {
1211da177e4SLinus Torvalds 		struct mm_struct *mm = vma->vm_mm;
122d9d332e0SLinus Torvalds 		struct anon_vma *allocated;
1231da177e4SLinus Torvalds 
1245beb4930SRik van Riel 		avc = anon_vma_chain_alloc();
1255beb4930SRik van Riel 		if (!avc)
1265beb4930SRik van Riel 			goto out_enomem;
1275beb4930SRik van Riel 
1281da177e4SLinus Torvalds 		anon_vma = find_mergeable_anon_vma(vma);
1291da177e4SLinus Torvalds 		allocated = NULL;
130d9d332e0SLinus Torvalds 		if (!anon_vma) {
1311da177e4SLinus Torvalds 			anon_vma = anon_vma_alloc();
1321da177e4SLinus Torvalds 			if (unlikely(!anon_vma))
1335beb4930SRik van Riel 				goto out_enomem_free_avc;
1341da177e4SLinus Torvalds 			allocated = anon_vma;
1355c341ee1SRik van Riel 			/*
1365c341ee1SRik van Riel 			 * This VMA had no anon_vma yet.  This anon_vma is
1375c341ee1SRik van Riel 			 * the root of any anon_vma tree that might form.
1385c341ee1SRik van Riel 			 */
1395c341ee1SRik van Riel 			anon_vma->root = anon_vma;
1401da177e4SLinus Torvalds 		}
1411da177e4SLinus Torvalds 
142cba48b98SRik van Riel 		anon_vma_lock(anon_vma);
1431da177e4SLinus Torvalds 		/* page_table_lock to protect against threads */
1441da177e4SLinus Torvalds 		spin_lock(&mm->page_table_lock);
1451da177e4SLinus Torvalds 		if (likely(!vma->anon_vma)) {
1461da177e4SLinus Torvalds 			vma->anon_vma = anon_vma;
1475beb4930SRik van Riel 			avc->anon_vma = anon_vma;
1485beb4930SRik van Riel 			avc->vma = vma;
1495beb4930SRik van Riel 			list_add(&avc->same_vma, &vma->anon_vma_chain);
15026ba0cb6SAndrea Arcangeli 			list_add_tail(&avc->same_anon_vma, &anon_vma->head);
1511da177e4SLinus Torvalds 			allocated = NULL;
15231f2b0ebSOleg Nesterov 			avc = NULL;
1531da177e4SLinus Torvalds 		}
1541da177e4SLinus Torvalds 		spin_unlock(&mm->page_table_lock);
155cba48b98SRik van Riel 		anon_vma_unlock(anon_vma);
15631f2b0ebSOleg Nesterov 
15731f2b0ebSOleg Nesterov 		if (unlikely(allocated))
1581da177e4SLinus Torvalds 			anon_vma_free(allocated);
15931f2b0ebSOleg Nesterov 		if (unlikely(avc))
1605beb4930SRik van Riel 			anon_vma_chain_free(avc);
1615beb4930SRik van Riel 	}
1621da177e4SLinus Torvalds 	return 0;
1635beb4930SRik van Riel 
1645beb4930SRik van Riel  out_enomem_free_avc:
1655beb4930SRik van Riel 	anon_vma_chain_free(avc);
1665beb4930SRik van Riel  out_enomem:
1675beb4930SRik van Riel 	return -ENOMEM;
1681da177e4SLinus Torvalds }
1691da177e4SLinus Torvalds 
1705beb4930SRik van Riel static void anon_vma_chain_link(struct vm_area_struct *vma,
1715beb4930SRik van Riel 				struct anon_vma_chain *avc,
1725beb4930SRik van Riel 				struct anon_vma *anon_vma)
1731da177e4SLinus Torvalds {
1745beb4930SRik van Riel 	avc->vma = vma;
1755beb4930SRik van Riel 	avc->anon_vma = anon_vma;
1765beb4930SRik van Riel 	list_add(&avc->same_vma, &vma->anon_vma_chain);
1771da177e4SLinus Torvalds 
178cba48b98SRik van Riel 	anon_vma_lock(anon_vma);
1795beb4930SRik van Riel 	list_add_tail(&avc->same_anon_vma, &anon_vma->head);
180cba48b98SRik van Riel 	anon_vma_unlock(anon_vma);
1811da177e4SLinus Torvalds }
1825beb4930SRik van Riel 
1835beb4930SRik van Riel /*
1845beb4930SRik van Riel  * Attach the anon_vmas from src to dst.
1855beb4930SRik van Riel  * Returns 0 on success, -ENOMEM on failure.
1865beb4930SRik van Riel  */
1875beb4930SRik van Riel int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
1885beb4930SRik van Riel {
1895beb4930SRik van Riel 	struct anon_vma_chain *avc, *pavc;
1905beb4930SRik van Riel 
191646d87b4SLinus Torvalds 	list_for_each_entry_reverse(pavc, &src->anon_vma_chain, same_vma) {
1925beb4930SRik van Riel 		avc = anon_vma_chain_alloc();
1935beb4930SRik van Riel 		if (!avc)
1945beb4930SRik van Riel 			goto enomem_failure;
1955beb4930SRik van Riel 		anon_vma_chain_link(dst, avc, pavc->anon_vma);
1965beb4930SRik van Riel 	}
1975beb4930SRik van Riel 	return 0;
1985beb4930SRik van Riel 
1995beb4930SRik van Riel  enomem_failure:
2005beb4930SRik van Riel 	unlink_anon_vmas(dst);
2015beb4930SRik van Riel 	return -ENOMEM;
2021da177e4SLinus Torvalds }
2031da177e4SLinus Torvalds 
2045beb4930SRik van Riel /*
2055beb4930SRik van Riel  * Attach vma to its own anon_vma, as well as to the anon_vmas that
2065beb4930SRik van Riel  * the corresponding VMA in the parent process is attached to.
2075beb4930SRik van Riel  * Returns 0 on success, non-zero on failure.
2085beb4930SRik van Riel  */
2095beb4930SRik van Riel int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
2101da177e4SLinus Torvalds {
2115beb4930SRik van Riel 	struct anon_vma_chain *avc;
2125beb4930SRik van Riel 	struct anon_vma *anon_vma;
2135beb4930SRik van Riel 
2145beb4930SRik van Riel 	/* Don't bother if the parent process has no anon_vma here. */
2155beb4930SRik van Riel 	if (!pvma->anon_vma)
2165beb4930SRik van Riel 		return 0;
2175beb4930SRik van Riel 
2185beb4930SRik van Riel 	/*
2195beb4930SRik van Riel 	 * First, attach the new VMA to the parent VMA's anon_vmas,
2205beb4930SRik van Riel 	 * so rmap can find non-COWed pages in child processes.
2215beb4930SRik van Riel 	 */
2225beb4930SRik van Riel 	if (anon_vma_clone(vma, pvma))
2235beb4930SRik van Riel 		return -ENOMEM;
2245beb4930SRik van Riel 
2255beb4930SRik van Riel 	/* Then add our own anon_vma. */
2265beb4930SRik van Riel 	anon_vma = anon_vma_alloc();
2275beb4930SRik van Riel 	if (!anon_vma)
2285beb4930SRik van Riel 		goto out_error;
2295beb4930SRik van Riel 	avc = anon_vma_chain_alloc();
2305beb4930SRik van Riel 	if (!avc)
2315beb4930SRik van Riel 		goto out_error_free_anon_vma;
2325c341ee1SRik van Riel 
2335c341ee1SRik van Riel 	/*
2345c341ee1SRik van Riel 	 * The root anon_vma's spinlock is the lock actually used when we
2355c341ee1SRik van Riel 	 * lock any of the anon_vmas in this anon_vma tree.
2365c341ee1SRik van Riel 	 */
2375c341ee1SRik van Riel 	anon_vma->root = pvma->anon_vma->root;
23876545066SRik van Riel 	/*
23976545066SRik van Riel 	 * With KSM refcounts, an anon_vma can stay around longer than the
24076545066SRik van Riel 	 * process it belongs to.  The root anon_vma needs to be pinned
24176545066SRik van Riel 	 * until this anon_vma is freed, because the lock lives in the root.
24276545066SRik van Riel 	 */
24376545066SRik van Riel 	get_anon_vma(anon_vma->root);
2445beb4930SRik van Riel 	/* Mark this anon_vma as the one where our new (COWed) pages go. */
2455beb4930SRik van Riel 	vma->anon_vma = anon_vma;
2465c341ee1SRik van Riel 	anon_vma_chain_link(vma, avc, anon_vma);
2475beb4930SRik van Riel 
2485beb4930SRik van Riel 	return 0;
2495beb4930SRik van Riel 
2505beb4930SRik van Riel  out_error_free_anon_vma:
2515beb4930SRik van Riel 	anon_vma_free(anon_vma);
2525beb4930SRik van Riel  out_error:
2534946d54cSRik van Riel 	unlink_anon_vmas(vma);
2545beb4930SRik van Riel 	return -ENOMEM;
2555beb4930SRik van Riel }
2565beb4930SRik van Riel 
2575beb4930SRik van Riel static void anon_vma_unlink(struct anon_vma_chain *anon_vma_chain)
2585beb4930SRik van Riel {
2595beb4930SRik van Riel 	struct anon_vma *anon_vma = anon_vma_chain->anon_vma;
2601da177e4SLinus Torvalds 	int empty;
2611da177e4SLinus Torvalds 
2625beb4930SRik van Riel 	/* If anon_vma_fork fails, we can get an empty anon_vma_chain. */
2631da177e4SLinus Torvalds 	if (!anon_vma)
2641da177e4SLinus Torvalds 		return;
2651da177e4SLinus Torvalds 
266cba48b98SRik van Riel 	anon_vma_lock(anon_vma);
2675beb4930SRik van Riel 	list_del(&anon_vma_chain->same_anon_vma);
2681da177e4SLinus Torvalds 
2691da177e4SLinus Torvalds 	/* We must garbage collect the anon_vma if it's empty */
2707f60c214SMel Gorman 	empty = list_empty(&anon_vma->head) && !anonvma_external_refcount(anon_vma);
271cba48b98SRik van Riel 	anon_vma_unlock(anon_vma);
2721da177e4SLinus Torvalds 
27376545066SRik van Riel 	if (empty) {
27476545066SRik van Riel 		/* We no longer need the root anon_vma */
27576545066SRik van Riel 		if (anon_vma->root != anon_vma)
27676545066SRik van Riel 			drop_anon_vma(anon_vma->root);
2771da177e4SLinus Torvalds 		anon_vma_free(anon_vma);
2781da177e4SLinus Torvalds 	}
27976545066SRik van Riel }
2801da177e4SLinus Torvalds 
2815beb4930SRik van Riel void unlink_anon_vmas(struct vm_area_struct *vma)
2825beb4930SRik van Riel {
2835beb4930SRik van Riel 	struct anon_vma_chain *avc, *next;
2845beb4930SRik van Riel 
2855c341ee1SRik van Riel 	/*
2865c341ee1SRik van Riel 	 * Unlink each anon_vma chained to the VMA.  This list is ordered
2875c341ee1SRik van Riel 	 * from newest to oldest, ensuring the root anon_vma gets freed last.
2885c341ee1SRik van Riel 	 */
2895beb4930SRik van Riel 	list_for_each_entry_safe(avc, next, &vma->anon_vma_chain, same_vma) {
2905beb4930SRik van Riel 		anon_vma_unlink(avc);
2915beb4930SRik van Riel 		list_del(&avc->same_vma);
2925beb4930SRik van Riel 		anon_vma_chain_free(avc);
2935beb4930SRik van Riel 	}
2945beb4930SRik van Riel }
2955beb4930SRik van Riel 
29651cc5068SAlexey Dobriyan static void anon_vma_ctor(void *data)
2971da177e4SLinus Torvalds {
2981da177e4SLinus Torvalds 	struct anon_vma *anon_vma = data;
2991da177e4SLinus Torvalds 
3001da177e4SLinus Torvalds 	spin_lock_init(&anon_vma->lock);
3017f60c214SMel Gorman 	anonvma_external_refcount_init(anon_vma);
3021da177e4SLinus Torvalds 	INIT_LIST_HEAD(&anon_vma->head);
3031da177e4SLinus Torvalds }
3041da177e4SLinus Torvalds 
3051da177e4SLinus Torvalds void __init anon_vma_init(void)
3061da177e4SLinus Torvalds {
3071da177e4SLinus Torvalds 	anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
30820c2df83SPaul Mundt 			0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
3095beb4930SRik van Riel 	anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
3101da177e4SLinus Torvalds }
3111da177e4SLinus Torvalds 
3121da177e4SLinus Torvalds /*
3131da177e4SLinus Torvalds  * Getting a lock on a stable anon_vma from a page off the LRU is
3141da177e4SLinus Torvalds  * tricky: page_lock_anon_vma rely on RCU to guard against the races.
3151da177e4SLinus Torvalds  */
31610be22dfSAndi Kleen struct anon_vma *page_lock_anon_vma(struct page *page)
3171da177e4SLinus Torvalds {
31834bbd704SOleg Nesterov 	struct anon_vma *anon_vma;
3191da177e4SLinus Torvalds 	unsigned long anon_mapping;
3201da177e4SLinus Torvalds 
3211da177e4SLinus Torvalds 	rcu_read_lock();
32280e14822SHugh Dickins 	anon_mapping = (unsigned long) ACCESS_ONCE(page->mapping);
3233ca7b3c5SHugh Dickins 	if ((anon_mapping & PAGE_MAPPING_FLAGS) != PAGE_MAPPING_ANON)
3241da177e4SLinus Torvalds 		goto out;
3251da177e4SLinus Torvalds 	if (!page_mapped(page))
3261da177e4SLinus Torvalds 		goto out;
3271da177e4SLinus Torvalds 
3281da177e4SLinus Torvalds 	anon_vma = (struct anon_vma *) (anon_mapping - PAGE_MAPPING_ANON);
329cba48b98SRik van Riel 	anon_vma_lock(anon_vma);
33034bbd704SOleg Nesterov 	return anon_vma;
3311da177e4SLinus Torvalds out:
3321da177e4SLinus Torvalds 	rcu_read_unlock();
33334bbd704SOleg Nesterov 	return NULL;
33434bbd704SOleg Nesterov }
33534bbd704SOleg Nesterov 
33610be22dfSAndi Kleen void page_unlock_anon_vma(struct anon_vma *anon_vma)
33734bbd704SOleg Nesterov {
338cba48b98SRik van Riel 	anon_vma_unlock(anon_vma);
33934bbd704SOleg Nesterov 	rcu_read_unlock();
3401da177e4SLinus Torvalds }
3411da177e4SLinus Torvalds 
3421da177e4SLinus Torvalds /*
3433ad33b24SLee Schermerhorn  * At what user virtual address is page expected in @vma?
3443ad33b24SLee Schermerhorn  * Returns virtual address or -EFAULT if page's index/offset is not
3453ad33b24SLee Schermerhorn  * within the range mapped the @vma.
3461da177e4SLinus Torvalds  */
3471da177e4SLinus Torvalds static inline unsigned long
3481da177e4SLinus Torvalds vma_address(struct page *page, struct vm_area_struct *vma)
3491da177e4SLinus Torvalds {
3501da177e4SLinus Torvalds 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
3511da177e4SLinus Torvalds 	unsigned long address;
3521da177e4SLinus Torvalds 
3531da177e4SLinus Torvalds 	address = vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
3541da177e4SLinus Torvalds 	if (unlikely(address < vma->vm_start || address >= vma->vm_end)) {
3553ad33b24SLee Schermerhorn 		/* page should be within @vma mapping range */
3561da177e4SLinus Torvalds 		return -EFAULT;
3571da177e4SLinus Torvalds 	}
3581da177e4SLinus Torvalds 	return address;
3591da177e4SLinus Torvalds }
3601da177e4SLinus Torvalds 
3611da177e4SLinus Torvalds /*
362bf89c8c8SHuang Shijie  * At what user virtual address is page expected in vma?
363ab941e0fSNaoya Horiguchi  * Caller should check the page is actually part of the vma.
3641da177e4SLinus Torvalds  */
3651da177e4SLinus Torvalds unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma)
3661da177e4SLinus Torvalds {
367ab941e0fSNaoya Horiguchi 	if (PageAnon(page))
368ab941e0fSNaoya Horiguchi 		;
369ab941e0fSNaoya Horiguchi 	else if (page->mapping && !(vma->vm_flags & VM_NONLINEAR)) {
370ee498ed7SHugh Dickins 		if (!vma->vm_file ||
371ee498ed7SHugh Dickins 		    vma->vm_file->f_mapping != page->mapping)
3721da177e4SLinus Torvalds 			return -EFAULT;
3731da177e4SLinus Torvalds 	} else
3741da177e4SLinus Torvalds 		return -EFAULT;
3751da177e4SLinus Torvalds 	return vma_address(page, vma);
3761da177e4SLinus Torvalds }
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds /*
37981b4082dSNikita Danilov  * Check that @page is mapped at @address into @mm.
38081b4082dSNikita Danilov  *
381479db0bfSNick Piggin  * If @sync is false, page_check_address may perform a racy check to avoid
382479db0bfSNick Piggin  * the page table lock when the pte is not present (helpful when reclaiming
383479db0bfSNick Piggin  * highly shared pages).
384479db0bfSNick Piggin  *
385b8072f09SHugh Dickins  * On success returns with pte mapped and locked.
38681b4082dSNikita Danilov  */
387ceffc078SCarsten Otte pte_t *page_check_address(struct page *page, struct mm_struct *mm,
388479db0bfSNick Piggin 			  unsigned long address, spinlock_t **ptlp, int sync)
38981b4082dSNikita Danilov {
39081b4082dSNikita Danilov 	pgd_t *pgd;
39181b4082dSNikita Danilov 	pud_t *pud;
39281b4082dSNikita Danilov 	pmd_t *pmd;
39381b4082dSNikita Danilov 	pte_t *pte;
394c0718806SHugh Dickins 	spinlock_t *ptl;
39581b4082dSNikita Danilov 
39681b4082dSNikita Danilov 	pgd = pgd_offset(mm, address);
397c0718806SHugh Dickins 	if (!pgd_present(*pgd))
398c0718806SHugh Dickins 		return NULL;
399c0718806SHugh Dickins 
40081b4082dSNikita Danilov 	pud = pud_offset(pgd, address);
401c0718806SHugh Dickins 	if (!pud_present(*pud))
402c0718806SHugh Dickins 		return NULL;
403c0718806SHugh Dickins 
40481b4082dSNikita Danilov 	pmd = pmd_offset(pud, address);
405c0718806SHugh Dickins 	if (!pmd_present(*pmd))
406c0718806SHugh Dickins 		return NULL;
407c0718806SHugh Dickins 
40881b4082dSNikita Danilov 	pte = pte_offset_map(pmd, address);
409c0718806SHugh Dickins 	/* Make a quick check before getting the lock */
410479db0bfSNick Piggin 	if (!sync && !pte_present(*pte)) {
41181b4082dSNikita Danilov 		pte_unmap(pte);
412c0718806SHugh Dickins 		return NULL;
41381b4082dSNikita Danilov 	}
414c0718806SHugh Dickins 
4154c21e2f2SHugh Dickins 	ptl = pte_lockptr(mm, pmd);
416c0718806SHugh Dickins 	spin_lock(ptl);
417c0718806SHugh Dickins 	if (pte_present(*pte) && page_to_pfn(page) == pte_pfn(*pte)) {
418c0718806SHugh Dickins 		*ptlp = ptl;
419c0718806SHugh Dickins 		return pte;
42081b4082dSNikita Danilov 	}
421c0718806SHugh Dickins 	pte_unmap_unlock(pte, ptl);
422c0718806SHugh Dickins 	return NULL;
42381b4082dSNikita Danilov }
42481b4082dSNikita Danilov 
425b291f000SNick Piggin /**
426b291f000SNick Piggin  * page_mapped_in_vma - check whether a page is really mapped in a VMA
427b291f000SNick Piggin  * @page: the page to test
428b291f000SNick Piggin  * @vma: the VMA to test
429b291f000SNick Piggin  *
430b291f000SNick Piggin  * Returns 1 if the page is mapped into the page tables of the VMA, 0
431b291f000SNick Piggin  * if the page is not mapped into the page tables of this VMA.  Only
432b291f000SNick Piggin  * valid for normal file or anonymous VMAs.
433b291f000SNick Piggin  */
4346a46079cSAndi Kleen int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
435b291f000SNick Piggin {
436b291f000SNick Piggin 	unsigned long address;
437b291f000SNick Piggin 	pte_t *pte;
438b291f000SNick Piggin 	spinlock_t *ptl;
439b291f000SNick Piggin 
440b291f000SNick Piggin 	address = vma_address(page, vma);
441b291f000SNick Piggin 	if (address == -EFAULT)		/* out of vma range */
442b291f000SNick Piggin 		return 0;
443b291f000SNick Piggin 	pte = page_check_address(page, vma->vm_mm, address, &ptl, 1);
444b291f000SNick Piggin 	if (!pte)			/* the page is not in this mm */
445b291f000SNick Piggin 		return 0;
446b291f000SNick Piggin 	pte_unmap_unlock(pte, ptl);
447b291f000SNick Piggin 
448b291f000SNick Piggin 	return 1;
449b291f000SNick Piggin }
450b291f000SNick Piggin 
45181b4082dSNikita Danilov /*
4521da177e4SLinus Torvalds  * Subfunctions of page_referenced: page_referenced_one called
4531da177e4SLinus Torvalds  * repeatedly from either page_referenced_anon or page_referenced_file.
4541da177e4SLinus Torvalds  */
4555ad64688SHugh Dickins int page_referenced_one(struct page *page, struct vm_area_struct *vma,
4561cb1729bSHugh Dickins 			unsigned long address, unsigned int *mapcount,
4576fe6b7e3SWu Fengguang 			unsigned long *vm_flags)
4581da177e4SLinus Torvalds {
4591da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
4601da177e4SLinus Torvalds 	pte_t *pte;
461c0718806SHugh Dickins 	spinlock_t *ptl;
4621da177e4SLinus Torvalds 	int referenced = 0;
4631da177e4SLinus Torvalds 
464479db0bfSNick Piggin 	pte = page_check_address(page, mm, address, &ptl, 0);
465c0718806SHugh Dickins 	if (!pte)
466c0718806SHugh Dickins 		goto out;
467c0718806SHugh Dickins 
468b291f000SNick Piggin 	/*
469b291f000SNick Piggin 	 * Don't want to elevate referenced for mlocked page that gets this far,
470b291f000SNick Piggin 	 * in order that it progresses to try_to_unmap and is moved to the
471b291f000SNick Piggin 	 * unevictable list.
472b291f000SNick Piggin 	 */
4735a9bbdcdSHugh Dickins 	if (vma->vm_flags & VM_LOCKED) {
4745a9bbdcdSHugh Dickins 		*mapcount = 1;	/* break early from loop */
47503ef83afSMinchan Kim 		*vm_flags |= VM_LOCKED;
476b291f000SNick Piggin 		goto out_unmap;
477b291f000SNick Piggin 	}
478b291f000SNick Piggin 
4794917e5d0SJohannes Weiner 	if (ptep_clear_flush_young_notify(vma, address, pte)) {
4804917e5d0SJohannes Weiner 		/*
4814917e5d0SJohannes Weiner 		 * Don't treat a reference through a sequentially read
4824917e5d0SJohannes Weiner 		 * mapping as such.  If the page has been used in
4834917e5d0SJohannes Weiner 		 * another mapping, we will catch it; if this other
4844917e5d0SJohannes Weiner 		 * mapping is already gone, the unmap path will have
4854917e5d0SJohannes Weiner 		 * set PG_referenced or activated the page.
4864917e5d0SJohannes Weiner 		 */
4874917e5d0SJohannes Weiner 		if (likely(!VM_SequentialReadHint(vma)))
4881da177e4SLinus Torvalds 			referenced++;
4894917e5d0SJohannes Weiner 	}
4901da177e4SLinus Torvalds 
491fcdae29aSRik Van Riel 	/* Pretend the page is referenced if the task has the
492fcdae29aSRik Van Riel 	   swap token and is in the middle of a page fault. */
493f7b7fd8fSRik van Riel 	if (mm != current->mm && has_swap_token(mm) &&
494fcdae29aSRik Van Riel 			rwsem_is_locked(&mm->mmap_sem))
4951da177e4SLinus Torvalds 		referenced++;
4961da177e4SLinus Torvalds 
497b291f000SNick Piggin out_unmap:
4981da177e4SLinus Torvalds 	(*mapcount)--;
499c0718806SHugh Dickins 	pte_unmap_unlock(pte, ptl);
500273f047eSHuang Shijie 
5016fe6b7e3SWu Fengguang 	if (referenced)
5026fe6b7e3SWu Fengguang 		*vm_flags |= vma->vm_flags;
503273f047eSHuang Shijie out:
5041da177e4SLinus Torvalds 	return referenced;
5051da177e4SLinus Torvalds }
5061da177e4SLinus Torvalds 
507bed7161aSBalbir Singh static int page_referenced_anon(struct page *page,
5086fe6b7e3SWu Fengguang 				struct mem_cgroup *mem_cont,
5096fe6b7e3SWu Fengguang 				unsigned long *vm_flags)
5101da177e4SLinus Torvalds {
5111da177e4SLinus Torvalds 	unsigned int mapcount;
5121da177e4SLinus Torvalds 	struct anon_vma *anon_vma;
5135beb4930SRik van Riel 	struct anon_vma_chain *avc;
5141da177e4SLinus Torvalds 	int referenced = 0;
5151da177e4SLinus Torvalds 
5161da177e4SLinus Torvalds 	anon_vma = page_lock_anon_vma(page);
5171da177e4SLinus Torvalds 	if (!anon_vma)
5181da177e4SLinus Torvalds 		return referenced;
5191da177e4SLinus Torvalds 
5201da177e4SLinus Torvalds 	mapcount = page_mapcount(page);
5215beb4930SRik van Riel 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
5225beb4930SRik van Riel 		struct vm_area_struct *vma = avc->vma;
5231cb1729bSHugh Dickins 		unsigned long address = vma_address(page, vma);
5241cb1729bSHugh Dickins 		if (address == -EFAULT)
5251cb1729bSHugh Dickins 			continue;
526bed7161aSBalbir Singh 		/*
527bed7161aSBalbir Singh 		 * If we are reclaiming on behalf of a cgroup, skip
528bed7161aSBalbir Singh 		 * counting on behalf of references from different
529bed7161aSBalbir Singh 		 * cgroups
530bed7161aSBalbir Singh 		 */
531bd845e38SHugh Dickins 		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
532bed7161aSBalbir Singh 			continue;
5331cb1729bSHugh Dickins 		referenced += page_referenced_one(page, vma, address,
5346fe6b7e3SWu Fengguang 						  &mapcount, vm_flags);
5351da177e4SLinus Torvalds 		if (!mapcount)
5361da177e4SLinus Torvalds 			break;
5371da177e4SLinus Torvalds 	}
53834bbd704SOleg Nesterov 
53934bbd704SOleg Nesterov 	page_unlock_anon_vma(anon_vma);
5401da177e4SLinus Torvalds 	return referenced;
5411da177e4SLinus Torvalds }
5421da177e4SLinus Torvalds 
5431da177e4SLinus Torvalds /**
5441da177e4SLinus Torvalds  * page_referenced_file - referenced check for object-based rmap
5451da177e4SLinus Torvalds  * @page: the page we're checking references on.
54643d8eac4SRandy Dunlap  * @mem_cont: target memory controller
5476fe6b7e3SWu Fengguang  * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
5481da177e4SLinus Torvalds  *
5491da177e4SLinus Torvalds  * For an object-based mapped page, find all the places it is mapped and
5501da177e4SLinus Torvalds  * check/clear the referenced flag.  This is done by following the page->mapping
5511da177e4SLinus Torvalds  * pointer, then walking the chain of vmas it holds.  It returns the number
5521da177e4SLinus Torvalds  * of references it found.
5531da177e4SLinus Torvalds  *
5541da177e4SLinus Torvalds  * This function is only called from page_referenced for object-based pages.
5551da177e4SLinus Torvalds  */
556bed7161aSBalbir Singh static int page_referenced_file(struct page *page,
5576fe6b7e3SWu Fengguang 				struct mem_cgroup *mem_cont,
5586fe6b7e3SWu Fengguang 				unsigned long *vm_flags)
5591da177e4SLinus Torvalds {
5601da177e4SLinus Torvalds 	unsigned int mapcount;
5611da177e4SLinus Torvalds 	struct address_space *mapping = page->mapping;
5621da177e4SLinus Torvalds 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
5631da177e4SLinus Torvalds 	struct vm_area_struct *vma;
5641da177e4SLinus Torvalds 	struct prio_tree_iter iter;
5651da177e4SLinus Torvalds 	int referenced = 0;
5661da177e4SLinus Torvalds 
5671da177e4SLinus Torvalds 	/*
5681da177e4SLinus Torvalds 	 * The caller's checks on page->mapping and !PageAnon have made
5691da177e4SLinus Torvalds 	 * sure that this is a file page: the check for page->mapping
5701da177e4SLinus Torvalds 	 * excludes the case just before it gets set on an anon page.
5711da177e4SLinus Torvalds 	 */
5721da177e4SLinus Torvalds 	BUG_ON(PageAnon(page));
5731da177e4SLinus Torvalds 
5741da177e4SLinus Torvalds 	/*
5751da177e4SLinus Torvalds 	 * The page lock not only makes sure that page->mapping cannot
5761da177e4SLinus Torvalds 	 * suddenly be NULLified by truncation, it makes sure that the
5771da177e4SLinus Torvalds 	 * structure at mapping cannot be freed and reused yet,
5781da177e4SLinus Torvalds 	 * so we can safely take mapping->i_mmap_lock.
5791da177e4SLinus Torvalds 	 */
5801da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
5811da177e4SLinus Torvalds 
5821da177e4SLinus Torvalds 	spin_lock(&mapping->i_mmap_lock);
5831da177e4SLinus Torvalds 
5841da177e4SLinus Torvalds 	/*
5851da177e4SLinus Torvalds 	 * i_mmap_lock does not stabilize mapcount at all, but mapcount
5861da177e4SLinus Torvalds 	 * is more likely to be accurate if we note it after spinning.
5871da177e4SLinus Torvalds 	 */
5881da177e4SLinus Torvalds 	mapcount = page_mapcount(page);
5891da177e4SLinus Torvalds 
5901da177e4SLinus Torvalds 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
5911cb1729bSHugh Dickins 		unsigned long address = vma_address(page, vma);
5921cb1729bSHugh Dickins 		if (address == -EFAULT)
5931cb1729bSHugh Dickins 			continue;
594bed7161aSBalbir Singh 		/*
595bed7161aSBalbir Singh 		 * If we are reclaiming on behalf of a cgroup, skip
596bed7161aSBalbir Singh 		 * counting on behalf of references from different
597bed7161aSBalbir Singh 		 * cgroups
598bed7161aSBalbir Singh 		 */
599bd845e38SHugh Dickins 		if (mem_cont && !mm_match_cgroup(vma->vm_mm, mem_cont))
600bed7161aSBalbir Singh 			continue;
6011cb1729bSHugh Dickins 		referenced += page_referenced_one(page, vma, address,
6026fe6b7e3SWu Fengguang 						  &mapcount, vm_flags);
6031da177e4SLinus Torvalds 		if (!mapcount)
6041da177e4SLinus Torvalds 			break;
6051da177e4SLinus Torvalds 	}
6061da177e4SLinus Torvalds 
6071da177e4SLinus Torvalds 	spin_unlock(&mapping->i_mmap_lock);
6081da177e4SLinus Torvalds 	return referenced;
6091da177e4SLinus Torvalds }
6101da177e4SLinus Torvalds 
6111da177e4SLinus Torvalds /**
6121da177e4SLinus Torvalds  * page_referenced - test if the page was referenced
6131da177e4SLinus Torvalds  * @page: the page to test
6141da177e4SLinus Torvalds  * @is_locked: caller holds lock on the page
61543d8eac4SRandy Dunlap  * @mem_cont: target memory controller
6166fe6b7e3SWu Fengguang  * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
6171da177e4SLinus Torvalds  *
6181da177e4SLinus Torvalds  * Quick test_and_clear_referenced for all mappings to a page,
6191da177e4SLinus Torvalds  * returns the number of ptes which referenced the page.
6201da177e4SLinus Torvalds  */
6216fe6b7e3SWu Fengguang int page_referenced(struct page *page,
6226fe6b7e3SWu Fengguang 		    int is_locked,
6236fe6b7e3SWu Fengguang 		    struct mem_cgroup *mem_cont,
6246fe6b7e3SWu Fengguang 		    unsigned long *vm_flags)
6251da177e4SLinus Torvalds {
6261da177e4SLinus Torvalds 	int referenced = 0;
6275ad64688SHugh Dickins 	int we_locked = 0;
6281da177e4SLinus Torvalds 
6296fe6b7e3SWu Fengguang 	*vm_flags = 0;
6303ca7b3c5SHugh Dickins 	if (page_mapped(page) && page_rmapping(page)) {
6315ad64688SHugh Dickins 		if (!is_locked && (!PageAnon(page) || PageKsm(page))) {
6325ad64688SHugh Dickins 			we_locked = trylock_page(page);
6335ad64688SHugh Dickins 			if (!we_locked) {
6345ad64688SHugh Dickins 				referenced++;
6355ad64688SHugh Dickins 				goto out;
6365ad64688SHugh Dickins 			}
6375ad64688SHugh Dickins 		}
6385ad64688SHugh Dickins 		if (unlikely(PageKsm(page)))
6395ad64688SHugh Dickins 			referenced += page_referenced_ksm(page, mem_cont,
6405ad64688SHugh Dickins 								vm_flags);
6415ad64688SHugh Dickins 		else if (PageAnon(page))
6426fe6b7e3SWu Fengguang 			referenced += page_referenced_anon(page, mem_cont,
6436fe6b7e3SWu Fengguang 								vm_flags);
6445ad64688SHugh Dickins 		else if (page->mapping)
6456fe6b7e3SWu Fengguang 			referenced += page_referenced_file(page, mem_cont,
6466fe6b7e3SWu Fengguang 								vm_flags);
6475ad64688SHugh Dickins 		if (we_locked)
6481da177e4SLinus Torvalds 			unlock_page(page);
6491da177e4SLinus Torvalds 	}
6505ad64688SHugh Dickins out:
6515b7baf05SChristian Borntraeger 	if (page_test_and_clear_young(page))
6525b7baf05SChristian Borntraeger 		referenced++;
6535b7baf05SChristian Borntraeger 
6541da177e4SLinus Torvalds 	return referenced;
6551da177e4SLinus Torvalds }
6561da177e4SLinus Torvalds 
6571cb1729bSHugh Dickins static int page_mkclean_one(struct page *page, struct vm_area_struct *vma,
6581cb1729bSHugh Dickins 			    unsigned long address)
659d08b3851SPeter Zijlstra {
660d08b3851SPeter Zijlstra 	struct mm_struct *mm = vma->vm_mm;
661c2fda5feSPeter Zijlstra 	pte_t *pte;
662d08b3851SPeter Zijlstra 	spinlock_t *ptl;
663d08b3851SPeter Zijlstra 	int ret = 0;
664d08b3851SPeter Zijlstra 
665479db0bfSNick Piggin 	pte = page_check_address(page, mm, address, &ptl, 1);
666d08b3851SPeter Zijlstra 	if (!pte)
667d08b3851SPeter Zijlstra 		goto out;
668d08b3851SPeter Zijlstra 
669c2fda5feSPeter Zijlstra 	if (pte_dirty(*pte) || pte_write(*pte)) {
670c2fda5feSPeter Zijlstra 		pte_t entry;
671d08b3851SPeter Zijlstra 
672c2fda5feSPeter Zijlstra 		flush_cache_page(vma, address, pte_pfn(*pte));
673cddb8a5cSAndrea Arcangeli 		entry = ptep_clear_flush_notify(vma, address, pte);
674d08b3851SPeter Zijlstra 		entry = pte_wrprotect(entry);
675c2fda5feSPeter Zijlstra 		entry = pte_mkclean(entry);
676d6e88e67SAl Viro 		set_pte_at(mm, address, pte, entry);
677d08b3851SPeter Zijlstra 		ret = 1;
678c2fda5feSPeter Zijlstra 	}
679d08b3851SPeter Zijlstra 
680d08b3851SPeter Zijlstra 	pte_unmap_unlock(pte, ptl);
681d08b3851SPeter Zijlstra out:
682d08b3851SPeter Zijlstra 	return ret;
683d08b3851SPeter Zijlstra }
684d08b3851SPeter Zijlstra 
685d08b3851SPeter Zijlstra static int page_mkclean_file(struct address_space *mapping, struct page *page)
686d08b3851SPeter Zijlstra {
687d08b3851SPeter Zijlstra 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
688d08b3851SPeter Zijlstra 	struct vm_area_struct *vma;
689d08b3851SPeter Zijlstra 	struct prio_tree_iter iter;
690d08b3851SPeter Zijlstra 	int ret = 0;
691d08b3851SPeter Zijlstra 
692d08b3851SPeter Zijlstra 	BUG_ON(PageAnon(page));
693d08b3851SPeter Zijlstra 
694d08b3851SPeter Zijlstra 	spin_lock(&mapping->i_mmap_lock);
695d08b3851SPeter Zijlstra 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
6961cb1729bSHugh Dickins 		if (vma->vm_flags & VM_SHARED) {
6971cb1729bSHugh Dickins 			unsigned long address = vma_address(page, vma);
6981cb1729bSHugh Dickins 			if (address == -EFAULT)
6991cb1729bSHugh Dickins 				continue;
7001cb1729bSHugh Dickins 			ret += page_mkclean_one(page, vma, address);
7011cb1729bSHugh Dickins 		}
702d08b3851SPeter Zijlstra 	}
703d08b3851SPeter Zijlstra 	spin_unlock(&mapping->i_mmap_lock);
704d08b3851SPeter Zijlstra 	return ret;
705d08b3851SPeter Zijlstra }
706d08b3851SPeter Zijlstra 
707d08b3851SPeter Zijlstra int page_mkclean(struct page *page)
708d08b3851SPeter Zijlstra {
709d08b3851SPeter Zijlstra 	int ret = 0;
710d08b3851SPeter Zijlstra 
711d08b3851SPeter Zijlstra 	BUG_ON(!PageLocked(page));
712d08b3851SPeter Zijlstra 
713d08b3851SPeter Zijlstra 	if (page_mapped(page)) {
714d08b3851SPeter Zijlstra 		struct address_space *mapping = page_mapping(page);
715ce7e9faeSChristian Borntraeger 		if (mapping) {
716d08b3851SPeter Zijlstra 			ret = page_mkclean_file(mapping, page);
7176c210482SMartin Schwidefsky 			if (page_test_dirty(page)) {
7186c210482SMartin Schwidefsky 				page_clear_dirty(page);
719c2fda5feSPeter Zijlstra 				ret = 1;
7206e1beb3cSMartin Schwidefsky 			}
7216c210482SMartin Schwidefsky 		}
722ce7e9faeSChristian Borntraeger 	}
723d08b3851SPeter Zijlstra 
724d08b3851SPeter Zijlstra 	return ret;
725d08b3851SPeter Zijlstra }
72660b59beaSJaya Kumar EXPORT_SYMBOL_GPL(page_mkclean);
727d08b3851SPeter Zijlstra 
7281da177e4SLinus Torvalds /**
729c44b6743SRik van Riel  * page_move_anon_rmap - move a page to our anon_vma
730c44b6743SRik van Riel  * @page:	the page to move to our anon_vma
731c44b6743SRik van Riel  * @vma:	the vma the page belongs to
732c44b6743SRik van Riel  * @address:	the user virtual address mapped
733c44b6743SRik van Riel  *
734c44b6743SRik van Riel  * When a page belongs exclusively to one process after a COW event,
735c44b6743SRik van Riel  * that page can be moved into the anon_vma that belongs to just that
736c44b6743SRik van Riel  * process, so the rmap code will not search the parent or sibling
737c44b6743SRik van Riel  * processes.
738c44b6743SRik van Riel  */
739c44b6743SRik van Riel void page_move_anon_rmap(struct page *page,
740c44b6743SRik van Riel 	struct vm_area_struct *vma, unsigned long address)
741c44b6743SRik van Riel {
742c44b6743SRik van Riel 	struct anon_vma *anon_vma = vma->anon_vma;
743c44b6743SRik van Riel 
744c44b6743SRik van Riel 	VM_BUG_ON(!PageLocked(page));
745c44b6743SRik van Riel 	VM_BUG_ON(!anon_vma);
746c44b6743SRik van Riel 	VM_BUG_ON(page->index != linear_page_index(vma, address));
747c44b6743SRik van Riel 
748c44b6743SRik van Riel 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
749c44b6743SRik van Riel 	page->mapping = (struct address_space *) anon_vma;
750c44b6743SRik van Riel }
751c44b6743SRik van Riel 
752c44b6743SRik van Riel /**
75343d8eac4SRandy Dunlap  * __page_set_anon_rmap - setup new anonymous rmap
7541da177e4SLinus Torvalds  * @page:	the page to add the mapping to
7551da177e4SLinus Torvalds  * @vma:	the vm area in which the mapping is added
7561da177e4SLinus Torvalds  * @address:	the user virtual address mapped
757e8a03febSRik van Riel  * @exclusive:	the page is exclusively owned by the current process
7581da177e4SLinus Torvalds  */
7599617d95eSNick Piggin static void __page_set_anon_rmap(struct page *page,
760e8a03febSRik van Riel 	struct vm_area_struct *vma, unsigned long address, int exclusive)
7611da177e4SLinus Torvalds {
762e8a03febSRik van Riel 	struct anon_vma *anon_vma = vma->anon_vma;
7632822c1aaSNick Piggin 
764e8a03febSRik van Riel 	BUG_ON(!anon_vma);
765ea90002bSLinus Torvalds 
766ea90002bSLinus Torvalds 	/*
767e8a03febSRik van Riel 	 * If the page isn't exclusively mapped into this vma,
768e8a03febSRik van Riel 	 * we must use the _oldest_ possible anon_vma for the
769e8a03febSRik van Riel 	 * page mapping!
770ea90002bSLinus Torvalds 	 */
771e8a03febSRik van Riel 	if (!exclusive) {
772*288468c3SAndrea Arcangeli 		if (PageAnon(page))
773*288468c3SAndrea Arcangeli 			return;
774*288468c3SAndrea Arcangeli 		anon_vma = anon_vma->root;
775*288468c3SAndrea Arcangeli 	} else {
776*288468c3SAndrea Arcangeli 		/*
777*288468c3SAndrea Arcangeli 		 * In this case, swapped-out-but-not-discarded swap-cache
778*288468c3SAndrea Arcangeli 		 * is remapped. So, no need to update page->mapping here.
779*288468c3SAndrea Arcangeli 		 * We convice anon_vma poitned by page->mapping is not obsolete
780*288468c3SAndrea Arcangeli 		 * because vma->anon_vma is necessary to be a family of it.
781*288468c3SAndrea Arcangeli 		 */
782*288468c3SAndrea Arcangeli 		if (PageAnon(page))
783*288468c3SAndrea Arcangeli 			return;
784e8a03febSRik van Riel 	}
785ea90002bSLinus Torvalds 
7861da177e4SLinus Torvalds 	anon_vma = (void *) anon_vma + PAGE_MAPPING_ANON;
7872822c1aaSNick Piggin 	page->mapping = (struct address_space *) anon_vma;
7884d7670e0SNick Piggin 	page->index = linear_page_index(vma, address);
7891da177e4SLinus Torvalds }
7909617d95eSNick Piggin 
7919617d95eSNick Piggin /**
79243d8eac4SRandy Dunlap  * __page_check_anon_rmap - sanity check anonymous rmap addition
793c97a9e10SNick Piggin  * @page:	the page to add the mapping to
794c97a9e10SNick Piggin  * @vma:	the vm area in which the mapping is added
795c97a9e10SNick Piggin  * @address:	the user virtual address mapped
796c97a9e10SNick Piggin  */
797c97a9e10SNick Piggin static void __page_check_anon_rmap(struct page *page,
798c97a9e10SNick Piggin 	struct vm_area_struct *vma, unsigned long address)
799c97a9e10SNick Piggin {
800c97a9e10SNick Piggin #ifdef CONFIG_DEBUG_VM
801c97a9e10SNick Piggin 	/*
802c97a9e10SNick Piggin 	 * The page's anon-rmap details (mapping and index) are guaranteed to
803c97a9e10SNick Piggin 	 * be set up correctly at this point.
804c97a9e10SNick Piggin 	 *
805c97a9e10SNick Piggin 	 * We have exclusion against page_add_anon_rmap because the caller
806c97a9e10SNick Piggin 	 * always holds the page locked, except if called from page_dup_rmap,
807c97a9e10SNick Piggin 	 * in which case the page is already known to be setup.
808c97a9e10SNick Piggin 	 *
809c97a9e10SNick Piggin 	 * We have exclusion against page_add_new_anon_rmap because those pages
810c97a9e10SNick Piggin 	 * are initially only visible via the pagetables, and the pte is locked
811c97a9e10SNick Piggin 	 * over the call to page_add_new_anon_rmap.
812c97a9e10SNick Piggin 	 */
813c97a9e10SNick Piggin 	BUG_ON(page->index != linear_page_index(vma, address));
814c97a9e10SNick Piggin #endif
815c97a9e10SNick Piggin }
816c97a9e10SNick Piggin 
817c97a9e10SNick Piggin /**
8189617d95eSNick Piggin  * page_add_anon_rmap - add pte mapping to an anonymous page
8199617d95eSNick Piggin  * @page:	the page to add the mapping to
8209617d95eSNick Piggin  * @vma:	the vm area in which the mapping is added
8219617d95eSNick Piggin  * @address:	the user virtual address mapped
8229617d95eSNick Piggin  *
8235ad64688SHugh Dickins  * The caller needs to hold the pte lock, and the page must be locked in
82480e14822SHugh Dickins  * the anon_vma case: to serialize mapping,index checking after setting,
82580e14822SHugh Dickins  * and to ensure that PageAnon is not being upgraded racily to PageKsm
82680e14822SHugh Dickins  * (but PageKsm is never downgraded to PageAnon).
8279617d95eSNick Piggin  */
8289617d95eSNick Piggin void page_add_anon_rmap(struct page *page,
8299617d95eSNick Piggin 	struct vm_area_struct *vma, unsigned long address)
8309617d95eSNick Piggin {
8315ad64688SHugh Dickins 	int first = atomic_inc_and_test(&page->_mapcount);
8325ad64688SHugh Dickins 	if (first)
8335ad64688SHugh Dickins 		__inc_zone_page_state(page, NR_ANON_PAGES);
8345ad64688SHugh Dickins 	if (unlikely(PageKsm(page)))
8355ad64688SHugh Dickins 		return;
8365ad64688SHugh Dickins 
837c97a9e10SNick Piggin 	VM_BUG_ON(!PageLocked(page));
838c97a9e10SNick Piggin 	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
8395ad64688SHugh Dickins 	if (first)
840e8a03febSRik van Riel 		__page_set_anon_rmap(page, vma, address, 0);
84169029cd5SKAMEZAWA Hiroyuki 	else
842c97a9e10SNick Piggin 		__page_check_anon_rmap(page, vma, address);
8431da177e4SLinus Torvalds }
8441da177e4SLinus Torvalds 
84543d8eac4SRandy Dunlap /**
8469617d95eSNick Piggin  * page_add_new_anon_rmap - add pte mapping to a new anonymous page
8479617d95eSNick Piggin  * @page:	the page to add the mapping to
8489617d95eSNick Piggin  * @vma:	the vm area in which the mapping is added
8499617d95eSNick Piggin  * @address:	the user virtual address mapped
8509617d95eSNick Piggin  *
8519617d95eSNick Piggin  * Same as page_add_anon_rmap but must only be called on *new* pages.
8529617d95eSNick Piggin  * This means the inc-and-test can be bypassed.
853c97a9e10SNick Piggin  * Page does not have to be locked.
8549617d95eSNick Piggin  */
8559617d95eSNick Piggin void page_add_new_anon_rmap(struct page *page,
8569617d95eSNick Piggin 	struct vm_area_struct *vma, unsigned long address)
8579617d95eSNick Piggin {
858b5934c53SHugh Dickins 	VM_BUG_ON(address < vma->vm_start || address >= vma->vm_end);
859cbf84b7aSHugh Dickins 	SetPageSwapBacked(page);
860cbf84b7aSHugh Dickins 	atomic_set(&page->_mapcount, 0); /* increment count (starts at -1) */
8615ad64688SHugh Dickins 	__inc_zone_page_state(page, NR_ANON_PAGES);
862e8a03febSRik van Riel 	__page_set_anon_rmap(page, vma, address, 1);
863b5934c53SHugh Dickins 	if (page_evictable(page, vma))
864cbf84b7aSHugh Dickins 		lru_cache_add_lru(page, LRU_ACTIVE_ANON);
865b5934c53SHugh Dickins 	else
866b5934c53SHugh Dickins 		add_page_to_unevictable_list(page);
8679617d95eSNick Piggin }
8689617d95eSNick Piggin 
8691da177e4SLinus Torvalds /**
8701da177e4SLinus Torvalds  * page_add_file_rmap - add pte mapping to a file page
8711da177e4SLinus Torvalds  * @page: the page to add the mapping to
8721da177e4SLinus Torvalds  *
873b8072f09SHugh Dickins  * The caller needs to hold the pte lock.
8741da177e4SLinus Torvalds  */
8751da177e4SLinus Torvalds void page_add_file_rmap(struct page *page)
8761da177e4SLinus Torvalds {
877d69b042fSBalbir Singh 	if (atomic_inc_and_test(&page->_mapcount)) {
87865ba55f5SChristoph Lameter 		__inc_zone_page_state(page, NR_FILE_MAPPED);
879d8046582SKAMEZAWA Hiroyuki 		mem_cgroup_update_file_mapped(page, 1);
880d69b042fSBalbir Singh 	}
8811da177e4SLinus Torvalds }
8821da177e4SLinus Torvalds 
8831da177e4SLinus Torvalds /**
8841da177e4SLinus Torvalds  * page_remove_rmap - take down pte mapping from a page
8851da177e4SLinus Torvalds  * @page: page to remove mapping from
8861da177e4SLinus Torvalds  *
887b8072f09SHugh Dickins  * The caller needs to hold the pte lock.
8881da177e4SLinus Torvalds  */
889edc315fdSHugh Dickins void page_remove_rmap(struct page *page)
8901da177e4SLinus Torvalds {
891b904dcfeSKOSAKI Motohiro 	/* page still mapped by someone else? */
892b904dcfeSKOSAKI Motohiro 	if (!atomic_add_negative(-1, &page->_mapcount))
893b904dcfeSKOSAKI Motohiro 		return;
894b904dcfeSKOSAKI Motohiro 
8951da177e4SLinus Torvalds 	/*
89616f8c5b2SHugh Dickins 	 * Now that the last pte has gone, s390 must transfer dirty
89716f8c5b2SHugh Dickins 	 * flag from storage key to struct page.  We can usually skip
89816f8c5b2SHugh Dickins 	 * this if the page is anon, so about to be freed; but perhaps
89916f8c5b2SHugh Dickins 	 * not if it's in swapcache - there might be another pte slot
90016f8c5b2SHugh Dickins 	 * containing the swap entry, but page not yet written to swap.
90116f8c5b2SHugh Dickins 	 */
902b904dcfeSKOSAKI Motohiro 	if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) {
90316f8c5b2SHugh Dickins 		page_clear_dirty(page);
90416f8c5b2SHugh Dickins 		set_page_dirty(page);
90516f8c5b2SHugh Dickins 	}
906b904dcfeSKOSAKI Motohiro 	if (PageAnon(page)) {
90716f8c5b2SHugh Dickins 		mem_cgroup_uncharge_page(page);
908b904dcfeSKOSAKI Motohiro 		__dec_zone_page_state(page, NR_ANON_PAGES);
909b904dcfeSKOSAKI Motohiro 	} else {
910b904dcfeSKOSAKI Motohiro 		__dec_zone_page_state(page, NR_FILE_MAPPED);
911d8046582SKAMEZAWA Hiroyuki 		mem_cgroup_update_file_mapped(page, -1);
912b904dcfeSKOSAKI Motohiro 	}
91316f8c5b2SHugh Dickins 	/*
9141da177e4SLinus Torvalds 	 * It would be tidy to reset the PageAnon mapping here,
9151da177e4SLinus Torvalds 	 * but that might overwrite a racing page_add_anon_rmap
9161da177e4SLinus Torvalds 	 * which increments mapcount after us but sets mapping
9171da177e4SLinus Torvalds 	 * before us: so leave the reset to free_hot_cold_page,
9181da177e4SLinus Torvalds 	 * and remember that it's only reliable while mapped.
9191da177e4SLinus Torvalds 	 * Leaving it set also helps swapoff to reinstate ptes
9201da177e4SLinus Torvalds 	 * faster for those pages still in swapcache.
9211da177e4SLinus Torvalds 	 */
9221da177e4SLinus Torvalds }
9231da177e4SLinus Torvalds 
9241da177e4SLinus Torvalds /*
9251da177e4SLinus Torvalds  * Subfunctions of try_to_unmap: try_to_unmap_one called
9261da177e4SLinus Torvalds  * repeatedly from either try_to_unmap_anon or try_to_unmap_file.
9271da177e4SLinus Torvalds  */
9285ad64688SHugh Dickins int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
9291cb1729bSHugh Dickins 		     unsigned long address, enum ttu_flags flags)
9301da177e4SLinus Torvalds {
9311da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
9321da177e4SLinus Torvalds 	pte_t *pte;
9331da177e4SLinus Torvalds 	pte_t pteval;
934c0718806SHugh Dickins 	spinlock_t *ptl;
9351da177e4SLinus Torvalds 	int ret = SWAP_AGAIN;
9361da177e4SLinus Torvalds 
937479db0bfSNick Piggin 	pte = page_check_address(page, mm, address, &ptl, 0);
938c0718806SHugh Dickins 	if (!pte)
93981b4082dSNikita Danilov 		goto out;
9401da177e4SLinus Torvalds 
9411da177e4SLinus Torvalds 	/*
9421da177e4SLinus Torvalds 	 * If the page is mlock()d, we cannot swap it out.
9431da177e4SLinus Torvalds 	 * If it's recently referenced (perhaps page_referenced
9441da177e4SLinus Torvalds 	 * skipped over this mm) then we should reactivate it.
9451da177e4SLinus Torvalds 	 */
94614fa31b8SAndi Kleen 	if (!(flags & TTU_IGNORE_MLOCK)) {
947caed0f48SKOSAKI Motohiro 		if (vma->vm_flags & VM_LOCKED)
948caed0f48SKOSAKI Motohiro 			goto out_mlock;
949caed0f48SKOSAKI Motohiro 
950af8e3354SHugh Dickins 		if (TTU_ACTION(flags) == TTU_MUNLOCK)
95153f79acbSHugh Dickins 			goto out_unmap;
95214fa31b8SAndi Kleen 	}
95314fa31b8SAndi Kleen 	if (!(flags & TTU_IGNORE_ACCESS)) {
954b291f000SNick Piggin 		if (ptep_clear_flush_young_notify(vma, address, pte)) {
9551da177e4SLinus Torvalds 			ret = SWAP_FAIL;
9561da177e4SLinus Torvalds 			goto out_unmap;
9571da177e4SLinus Torvalds 		}
958b291f000SNick Piggin   	}
9591da177e4SLinus Torvalds 
9601da177e4SLinus Torvalds 	/* Nuke the page table entry. */
9611da177e4SLinus Torvalds 	flush_cache_page(vma, address, page_to_pfn(page));
962cddb8a5cSAndrea Arcangeli 	pteval = ptep_clear_flush_notify(vma, address, pte);
9631da177e4SLinus Torvalds 
9641da177e4SLinus Torvalds 	/* Move the dirty bit to the physical page now the pte is gone. */
9651da177e4SLinus Torvalds 	if (pte_dirty(pteval))
9661da177e4SLinus Torvalds 		set_page_dirty(page);
9671da177e4SLinus Torvalds 
968365e9c87SHugh Dickins 	/* Update high watermark before we lower rss */
969365e9c87SHugh Dickins 	update_hiwater_rss(mm);
970365e9c87SHugh Dickins 
971888b9f7cSAndi Kleen 	if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) {
972888b9f7cSAndi Kleen 		if (PageAnon(page))
973d559db08SKAMEZAWA Hiroyuki 			dec_mm_counter(mm, MM_ANONPAGES);
974888b9f7cSAndi Kleen 		else
975d559db08SKAMEZAWA Hiroyuki 			dec_mm_counter(mm, MM_FILEPAGES);
976888b9f7cSAndi Kleen 		set_pte_at(mm, address, pte,
977888b9f7cSAndi Kleen 				swp_entry_to_pte(make_hwpoison_entry(page)));
978888b9f7cSAndi Kleen 	} else if (PageAnon(page)) {
9794c21e2f2SHugh Dickins 		swp_entry_t entry = { .val = page_private(page) };
9800697212aSChristoph Lameter 
9810697212aSChristoph Lameter 		if (PageSwapCache(page)) {
9821da177e4SLinus Torvalds 			/*
9831da177e4SLinus Torvalds 			 * Store the swap location in the pte.
9841da177e4SLinus Torvalds 			 * See handle_pte_fault() ...
9851da177e4SLinus Torvalds 			 */
986570a335bSHugh Dickins 			if (swap_duplicate(entry) < 0) {
987570a335bSHugh Dickins 				set_pte_at(mm, address, pte, pteval);
988570a335bSHugh Dickins 				ret = SWAP_FAIL;
989570a335bSHugh Dickins 				goto out_unmap;
990570a335bSHugh Dickins 			}
9911da177e4SLinus Torvalds 			if (list_empty(&mm->mmlist)) {
9921da177e4SLinus Torvalds 				spin_lock(&mmlist_lock);
993f412ac08SHugh Dickins 				if (list_empty(&mm->mmlist))
9941da177e4SLinus Torvalds 					list_add(&mm->mmlist, &init_mm.mmlist);
9951da177e4SLinus Torvalds 				spin_unlock(&mmlist_lock);
9961da177e4SLinus Torvalds 			}
997d559db08SKAMEZAWA Hiroyuki 			dec_mm_counter(mm, MM_ANONPAGES);
998b084d435SKAMEZAWA Hiroyuki 			inc_mm_counter(mm, MM_SWAPENTS);
99964cdd548SKOSAKI Motohiro 		} else if (PAGE_MIGRATION) {
10000697212aSChristoph Lameter 			/*
10010697212aSChristoph Lameter 			 * Store the pfn of the page in a special migration
10020697212aSChristoph Lameter 			 * pte. do_swap_page() will wait until the migration
10030697212aSChristoph Lameter 			 * pte is removed and then restart fault handling.
10040697212aSChristoph Lameter 			 */
100514fa31b8SAndi Kleen 			BUG_ON(TTU_ACTION(flags) != TTU_MIGRATION);
10060697212aSChristoph Lameter 			entry = make_migration_entry(page, pte_write(pteval));
10070697212aSChristoph Lameter 		}
10081da177e4SLinus Torvalds 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
10091da177e4SLinus Torvalds 		BUG_ON(pte_file(*pte));
101014fa31b8SAndi Kleen 	} else if (PAGE_MIGRATION && (TTU_ACTION(flags) == TTU_MIGRATION)) {
101104e62a29SChristoph Lameter 		/* Establish migration entry for a file page */
101204e62a29SChristoph Lameter 		swp_entry_t entry;
101304e62a29SChristoph Lameter 		entry = make_migration_entry(page, pte_write(pteval));
101404e62a29SChristoph Lameter 		set_pte_at(mm, address, pte, swp_entry_to_pte(entry));
101504e62a29SChristoph Lameter 	} else
1016d559db08SKAMEZAWA Hiroyuki 		dec_mm_counter(mm, MM_FILEPAGES);
10171da177e4SLinus Torvalds 
1018edc315fdSHugh Dickins 	page_remove_rmap(page);
10191da177e4SLinus Torvalds 	page_cache_release(page);
10201da177e4SLinus Torvalds 
10211da177e4SLinus Torvalds out_unmap:
1022c0718806SHugh Dickins 	pte_unmap_unlock(pte, ptl);
1023caed0f48SKOSAKI Motohiro out:
1024caed0f48SKOSAKI Motohiro 	return ret;
102553f79acbSHugh Dickins 
1026caed0f48SKOSAKI Motohiro out_mlock:
1027caed0f48SKOSAKI Motohiro 	pte_unmap_unlock(pte, ptl);
1028caed0f48SKOSAKI Motohiro 
1029caed0f48SKOSAKI Motohiro 
1030caed0f48SKOSAKI Motohiro 	/*
1031caed0f48SKOSAKI Motohiro 	 * We need mmap_sem locking, Otherwise VM_LOCKED check makes
1032caed0f48SKOSAKI Motohiro 	 * unstable result and race. Plus, We can't wait here because
1033caed0f48SKOSAKI Motohiro 	 * we now hold anon_vma->lock or mapping->i_mmap_lock.
1034caed0f48SKOSAKI Motohiro 	 * if trylock failed, the page remain in evictable lru and later
1035caed0f48SKOSAKI Motohiro 	 * vmscan could retry to move the page to unevictable lru if the
1036caed0f48SKOSAKI Motohiro 	 * page is actually mlocked.
1037caed0f48SKOSAKI Motohiro 	 */
103853f79acbSHugh Dickins 	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
103953f79acbSHugh Dickins 		if (vma->vm_flags & VM_LOCKED) {
104053f79acbSHugh Dickins 			mlock_vma_page(page);
104153f79acbSHugh Dickins 			ret = SWAP_MLOCK;
104253f79acbSHugh Dickins 		}
104353f79acbSHugh Dickins 		up_read(&vma->vm_mm->mmap_sem);
104453f79acbSHugh Dickins 	}
10451da177e4SLinus Torvalds 	return ret;
10461da177e4SLinus Torvalds }
10471da177e4SLinus Torvalds 
10481da177e4SLinus Torvalds /*
10491da177e4SLinus Torvalds  * objrmap doesn't work for nonlinear VMAs because the assumption that
10501da177e4SLinus Torvalds  * offset-into-file correlates with offset-into-virtual-addresses does not hold.
10511da177e4SLinus Torvalds  * Consequently, given a particular page and its ->index, we cannot locate the
10521da177e4SLinus Torvalds  * ptes which are mapping that page without an exhaustive linear search.
10531da177e4SLinus Torvalds  *
10541da177e4SLinus Torvalds  * So what this code does is a mini "virtual scan" of each nonlinear VMA which
10551da177e4SLinus Torvalds  * maps the file to which the target page belongs.  The ->vm_private_data field
10561da177e4SLinus Torvalds  * holds the current cursor into that scan.  Successive searches will circulate
10571da177e4SLinus Torvalds  * around the vma's virtual address space.
10581da177e4SLinus Torvalds  *
10591da177e4SLinus Torvalds  * So as more replacement pressure is applied to the pages in a nonlinear VMA,
10601da177e4SLinus Torvalds  * more scanning pressure is placed against them as well.   Eventually pages
10611da177e4SLinus Torvalds  * will become fully unmapped and are eligible for eviction.
10621da177e4SLinus Torvalds  *
10631da177e4SLinus Torvalds  * For very sparsely populated VMAs this is a little inefficient - chances are
10641da177e4SLinus Torvalds  * there there won't be many ptes located within the scan cluster.  In this case
10651da177e4SLinus Torvalds  * maybe we could scan further - to the end of the pte page, perhaps.
1066b291f000SNick Piggin  *
1067b291f000SNick Piggin  * Mlocked pages:  check VM_LOCKED under mmap_sem held for read, if we can
1068b291f000SNick Piggin  * acquire it without blocking.  If vma locked, mlock the pages in the cluster,
1069b291f000SNick Piggin  * rather than unmapping them.  If we encounter the "check_page" that vmscan is
1070b291f000SNick Piggin  * trying to unmap, return SWAP_MLOCK, else default SWAP_AGAIN.
10711da177e4SLinus Torvalds  */
10721da177e4SLinus Torvalds #define CLUSTER_SIZE	min(32*PAGE_SIZE, PMD_SIZE)
10731da177e4SLinus Torvalds #define CLUSTER_MASK	(~(CLUSTER_SIZE - 1))
10741da177e4SLinus Torvalds 
1075b291f000SNick Piggin static int try_to_unmap_cluster(unsigned long cursor, unsigned int *mapcount,
1076b291f000SNick Piggin 		struct vm_area_struct *vma, struct page *check_page)
10771da177e4SLinus Torvalds {
10781da177e4SLinus Torvalds 	struct mm_struct *mm = vma->vm_mm;
10791da177e4SLinus Torvalds 	pgd_t *pgd;
10801da177e4SLinus Torvalds 	pud_t *pud;
10811da177e4SLinus Torvalds 	pmd_t *pmd;
1082c0718806SHugh Dickins 	pte_t *pte;
10831da177e4SLinus Torvalds 	pte_t pteval;
1084c0718806SHugh Dickins 	spinlock_t *ptl;
10851da177e4SLinus Torvalds 	struct page *page;
10861da177e4SLinus Torvalds 	unsigned long address;
10871da177e4SLinus Torvalds 	unsigned long end;
1088b291f000SNick Piggin 	int ret = SWAP_AGAIN;
1089b291f000SNick Piggin 	int locked_vma = 0;
10901da177e4SLinus Torvalds 
10911da177e4SLinus Torvalds 	address = (vma->vm_start + cursor) & CLUSTER_MASK;
10921da177e4SLinus Torvalds 	end = address + CLUSTER_SIZE;
10931da177e4SLinus Torvalds 	if (address < vma->vm_start)
10941da177e4SLinus Torvalds 		address = vma->vm_start;
10951da177e4SLinus Torvalds 	if (end > vma->vm_end)
10961da177e4SLinus Torvalds 		end = vma->vm_end;
10971da177e4SLinus Torvalds 
10981da177e4SLinus Torvalds 	pgd = pgd_offset(mm, address);
10991da177e4SLinus Torvalds 	if (!pgd_present(*pgd))
1100b291f000SNick Piggin 		return ret;
11011da177e4SLinus Torvalds 
11021da177e4SLinus Torvalds 	pud = pud_offset(pgd, address);
11031da177e4SLinus Torvalds 	if (!pud_present(*pud))
1104b291f000SNick Piggin 		return ret;
11051da177e4SLinus Torvalds 
11061da177e4SLinus Torvalds 	pmd = pmd_offset(pud, address);
11071da177e4SLinus Torvalds 	if (!pmd_present(*pmd))
1108b291f000SNick Piggin 		return ret;
1109b291f000SNick Piggin 
1110b291f000SNick Piggin 	/*
1111af8e3354SHugh Dickins 	 * If we can acquire the mmap_sem for read, and vma is VM_LOCKED,
1112b291f000SNick Piggin 	 * keep the sem while scanning the cluster for mlocking pages.
1113b291f000SNick Piggin 	 */
1114af8e3354SHugh Dickins 	if (down_read_trylock(&vma->vm_mm->mmap_sem)) {
1115b291f000SNick Piggin 		locked_vma = (vma->vm_flags & VM_LOCKED);
1116b291f000SNick Piggin 		if (!locked_vma)
1117b291f000SNick Piggin 			up_read(&vma->vm_mm->mmap_sem); /* don't need it */
1118b291f000SNick Piggin 	}
1119c0718806SHugh Dickins 
1120c0718806SHugh Dickins 	pte = pte_offset_map_lock(mm, pmd, address, &ptl);
11211da177e4SLinus Torvalds 
1122365e9c87SHugh Dickins 	/* Update high watermark before we lower rss */
1123365e9c87SHugh Dickins 	update_hiwater_rss(mm);
1124365e9c87SHugh Dickins 
1125c0718806SHugh Dickins 	for (; address < end; pte++, address += PAGE_SIZE) {
11261da177e4SLinus Torvalds 		if (!pte_present(*pte))
11271da177e4SLinus Torvalds 			continue;
11286aab341eSLinus Torvalds 		page = vm_normal_page(vma, address, *pte);
11296aab341eSLinus Torvalds 		BUG_ON(!page || PageAnon(page));
11301da177e4SLinus Torvalds 
1131b291f000SNick Piggin 		if (locked_vma) {
1132b291f000SNick Piggin 			mlock_vma_page(page);   /* no-op if already mlocked */
1133b291f000SNick Piggin 			if (page == check_page)
1134b291f000SNick Piggin 				ret = SWAP_MLOCK;
1135b291f000SNick Piggin 			continue;	/* don't unmap */
1136b291f000SNick Piggin 		}
1137b291f000SNick Piggin 
1138cddb8a5cSAndrea Arcangeli 		if (ptep_clear_flush_young_notify(vma, address, pte))
11391da177e4SLinus Torvalds 			continue;
11401da177e4SLinus Torvalds 
11411da177e4SLinus Torvalds 		/* Nuke the page table entry. */
1142eca35133SBen Collins 		flush_cache_page(vma, address, pte_pfn(*pte));
1143cddb8a5cSAndrea Arcangeli 		pteval = ptep_clear_flush_notify(vma, address, pte);
11441da177e4SLinus Torvalds 
11451da177e4SLinus Torvalds 		/* If nonlinear, store the file page offset in the pte. */
11461da177e4SLinus Torvalds 		if (page->index != linear_page_index(vma, address))
11471da177e4SLinus Torvalds 			set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
11481da177e4SLinus Torvalds 
11491da177e4SLinus Torvalds 		/* Move the dirty bit to the physical page now the pte is gone. */
11501da177e4SLinus Torvalds 		if (pte_dirty(pteval))
11511da177e4SLinus Torvalds 			set_page_dirty(page);
11521da177e4SLinus Torvalds 
1153edc315fdSHugh Dickins 		page_remove_rmap(page);
11541da177e4SLinus Torvalds 		page_cache_release(page);
1155d559db08SKAMEZAWA Hiroyuki 		dec_mm_counter(mm, MM_FILEPAGES);
11561da177e4SLinus Torvalds 		(*mapcount)--;
11571da177e4SLinus Torvalds 	}
1158c0718806SHugh Dickins 	pte_unmap_unlock(pte - 1, ptl);
1159b291f000SNick Piggin 	if (locked_vma)
1160b291f000SNick Piggin 		up_read(&vma->vm_mm->mmap_sem);
1161b291f000SNick Piggin 	return ret;
11621da177e4SLinus Torvalds }
11631da177e4SLinus Torvalds 
1164a8bef8ffSMel Gorman static bool is_vma_temporary_stack(struct vm_area_struct *vma)
1165a8bef8ffSMel Gorman {
1166a8bef8ffSMel Gorman 	int maybe_stack = vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP);
1167a8bef8ffSMel Gorman 
1168a8bef8ffSMel Gorman 	if (!maybe_stack)
1169a8bef8ffSMel Gorman 		return false;
1170a8bef8ffSMel Gorman 
1171a8bef8ffSMel Gorman 	if ((vma->vm_flags & VM_STACK_INCOMPLETE_SETUP) ==
1172a8bef8ffSMel Gorman 						VM_STACK_INCOMPLETE_SETUP)
1173a8bef8ffSMel Gorman 		return true;
1174a8bef8ffSMel Gorman 
1175a8bef8ffSMel Gorman 	return false;
1176a8bef8ffSMel Gorman }
1177a8bef8ffSMel Gorman 
1178b291f000SNick Piggin /**
1179b291f000SNick Piggin  * try_to_unmap_anon - unmap or unlock anonymous page using the object-based
1180b291f000SNick Piggin  * rmap method
1181b291f000SNick Piggin  * @page: the page to unmap/unlock
11828051be5eSHuang Shijie  * @flags: action and flags
1183b291f000SNick Piggin  *
1184b291f000SNick Piggin  * Find all the mappings of a page using the mapping pointer and the vma chains
1185b291f000SNick Piggin  * contained in the anon_vma struct it points to.
1186b291f000SNick Piggin  *
1187b291f000SNick Piggin  * This function is only called from try_to_unmap/try_to_munlock for
1188b291f000SNick Piggin  * anonymous pages.
1189b291f000SNick Piggin  * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1190b291f000SNick Piggin  * where the page was found will be held for write.  So, we won't recheck
1191b291f000SNick Piggin  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1192b291f000SNick Piggin  * 'LOCKED.
1193b291f000SNick Piggin  */
119414fa31b8SAndi Kleen static int try_to_unmap_anon(struct page *page, enum ttu_flags flags)
11951da177e4SLinus Torvalds {
11961da177e4SLinus Torvalds 	struct anon_vma *anon_vma;
11975beb4930SRik van Riel 	struct anon_vma_chain *avc;
11981da177e4SLinus Torvalds 	int ret = SWAP_AGAIN;
1199b291f000SNick Piggin 
12001da177e4SLinus Torvalds 	anon_vma = page_lock_anon_vma(page);
12011da177e4SLinus Torvalds 	if (!anon_vma)
12021da177e4SLinus Torvalds 		return ret;
12031da177e4SLinus Torvalds 
12045beb4930SRik van Riel 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
12055beb4930SRik van Riel 		struct vm_area_struct *vma = avc->vma;
1206a8bef8ffSMel Gorman 		unsigned long address;
1207a8bef8ffSMel Gorman 
1208a8bef8ffSMel Gorman 		/*
1209a8bef8ffSMel Gorman 		 * During exec, a temporary VMA is setup and later moved.
1210a8bef8ffSMel Gorman 		 * The VMA is moved under the anon_vma lock but not the
1211a8bef8ffSMel Gorman 		 * page tables leading to a race where migration cannot
1212a8bef8ffSMel Gorman 		 * find the migration ptes. Rather than increasing the
1213a8bef8ffSMel Gorman 		 * locking requirements of exec(), migration skips
1214a8bef8ffSMel Gorman 		 * temporary VMAs until after exec() completes.
1215a8bef8ffSMel Gorman 		 */
1216a8bef8ffSMel Gorman 		if (PAGE_MIGRATION && (flags & TTU_MIGRATION) &&
1217a8bef8ffSMel Gorman 				is_vma_temporary_stack(vma))
1218a8bef8ffSMel Gorman 			continue;
1219a8bef8ffSMel Gorman 
1220a8bef8ffSMel Gorman 		address = vma_address(page, vma);
12211cb1729bSHugh Dickins 		if (address == -EFAULT)
12221cb1729bSHugh Dickins 			continue;
12231cb1729bSHugh Dickins 		ret = try_to_unmap_one(page, vma, address, flags);
122453f79acbSHugh Dickins 		if (ret != SWAP_AGAIN || !page_mapped(page))
12251da177e4SLinus Torvalds 			break;
12261da177e4SLinus Torvalds 	}
122734bbd704SOleg Nesterov 
122834bbd704SOleg Nesterov 	page_unlock_anon_vma(anon_vma);
12291da177e4SLinus Torvalds 	return ret;
12301da177e4SLinus Torvalds }
12311da177e4SLinus Torvalds 
12321da177e4SLinus Torvalds /**
1233b291f000SNick Piggin  * try_to_unmap_file - unmap/unlock file page using the object-based rmap method
1234b291f000SNick Piggin  * @page: the page to unmap/unlock
123514fa31b8SAndi Kleen  * @flags: action and flags
12361da177e4SLinus Torvalds  *
12371da177e4SLinus Torvalds  * Find all the mappings of a page using the mapping pointer and the vma chains
12381da177e4SLinus Torvalds  * contained in the address_space struct it points to.
12391da177e4SLinus Torvalds  *
1240b291f000SNick Piggin  * This function is only called from try_to_unmap/try_to_munlock for
1241b291f000SNick Piggin  * object-based pages.
1242b291f000SNick Piggin  * When called from try_to_munlock(), the mmap_sem of the mm containing the vma
1243b291f000SNick Piggin  * where the page was found will be held for write.  So, we won't recheck
1244b291f000SNick Piggin  * vm_flags for that VMA.  That should be OK, because that vma shouldn't be
1245b291f000SNick Piggin  * 'LOCKED.
12461da177e4SLinus Torvalds  */
124714fa31b8SAndi Kleen static int try_to_unmap_file(struct page *page, enum ttu_flags flags)
12481da177e4SLinus Torvalds {
12491da177e4SLinus Torvalds 	struct address_space *mapping = page->mapping;
12501da177e4SLinus Torvalds 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
12511da177e4SLinus Torvalds 	struct vm_area_struct *vma;
12521da177e4SLinus Torvalds 	struct prio_tree_iter iter;
12531da177e4SLinus Torvalds 	int ret = SWAP_AGAIN;
12541da177e4SLinus Torvalds 	unsigned long cursor;
12551da177e4SLinus Torvalds 	unsigned long max_nl_cursor = 0;
12561da177e4SLinus Torvalds 	unsigned long max_nl_size = 0;
12571da177e4SLinus Torvalds 	unsigned int mapcount;
12581da177e4SLinus Torvalds 
12591da177e4SLinus Torvalds 	spin_lock(&mapping->i_mmap_lock);
12601da177e4SLinus Torvalds 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
12611cb1729bSHugh Dickins 		unsigned long address = vma_address(page, vma);
12621cb1729bSHugh Dickins 		if (address == -EFAULT)
12631cb1729bSHugh Dickins 			continue;
12641cb1729bSHugh Dickins 		ret = try_to_unmap_one(page, vma, address, flags);
126553f79acbSHugh Dickins 		if (ret != SWAP_AGAIN || !page_mapped(page))
12661da177e4SLinus Torvalds 			goto out;
12671da177e4SLinus Torvalds 	}
1268b291f000SNick Piggin 
12691da177e4SLinus Torvalds 	if (list_empty(&mapping->i_mmap_nonlinear))
12701da177e4SLinus Torvalds 		goto out;
12711da177e4SLinus Torvalds 
127253f79acbSHugh Dickins 	/*
127353f79acbSHugh Dickins 	 * We don't bother to try to find the munlocked page in nonlinears.
127453f79acbSHugh Dickins 	 * It's costly. Instead, later, page reclaim logic may call
127553f79acbSHugh Dickins 	 * try_to_unmap(TTU_MUNLOCK) and recover PG_mlocked lazily.
127653f79acbSHugh Dickins 	 */
127753f79acbSHugh Dickins 	if (TTU_ACTION(flags) == TTU_MUNLOCK)
127853f79acbSHugh Dickins 		goto out;
127953f79acbSHugh Dickins 
12801da177e4SLinus Torvalds 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
12811da177e4SLinus Torvalds 						shared.vm_set.list) {
12821da177e4SLinus Torvalds 		cursor = (unsigned long) vma->vm_private_data;
12831da177e4SLinus Torvalds 		if (cursor > max_nl_cursor)
12841da177e4SLinus Torvalds 			max_nl_cursor = cursor;
12851da177e4SLinus Torvalds 		cursor = vma->vm_end - vma->vm_start;
12861da177e4SLinus Torvalds 		if (cursor > max_nl_size)
12871da177e4SLinus Torvalds 			max_nl_size = cursor;
12881da177e4SLinus Torvalds 	}
12891da177e4SLinus Torvalds 
1290b291f000SNick Piggin 	if (max_nl_size == 0) {	/* all nonlinears locked or reserved ? */
12911da177e4SLinus Torvalds 		ret = SWAP_FAIL;
12921da177e4SLinus Torvalds 		goto out;
12931da177e4SLinus Torvalds 	}
12941da177e4SLinus Torvalds 
12951da177e4SLinus Torvalds 	/*
12961da177e4SLinus Torvalds 	 * We don't try to search for this page in the nonlinear vmas,
12971da177e4SLinus Torvalds 	 * and page_referenced wouldn't have found it anyway.  Instead
12981da177e4SLinus Torvalds 	 * just walk the nonlinear vmas trying to age and unmap some.
12991da177e4SLinus Torvalds 	 * The mapcount of the page we came in with is irrelevant,
13001da177e4SLinus Torvalds 	 * but even so use it as a guide to how hard we should try?
13011da177e4SLinus Torvalds 	 */
13021da177e4SLinus Torvalds 	mapcount = page_mapcount(page);
13031da177e4SLinus Torvalds 	if (!mapcount)
13041da177e4SLinus Torvalds 		goto out;
13051da177e4SLinus Torvalds 	cond_resched_lock(&mapping->i_mmap_lock);
13061da177e4SLinus Torvalds 
13071da177e4SLinus Torvalds 	max_nl_size = (max_nl_size + CLUSTER_SIZE - 1) & CLUSTER_MASK;
13081da177e4SLinus Torvalds 	if (max_nl_cursor == 0)
13091da177e4SLinus Torvalds 		max_nl_cursor = CLUSTER_SIZE;
13101da177e4SLinus Torvalds 
13111da177e4SLinus Torvalds 	do {
13121da177e4SLinus Torvalds 		list_for_each_entry(vma, &mapping->i_mmap_nonlinear,
13131da177e4SLinus Torvalds 						shared.vm_set.list) {
13141da177e4SLinus Torvalds 			cursor = (unsigned long) vma->vm_private_data;
1315839b9685SHugh Dickins 			while ( cursor < max_nl_cursor &&
13161da177e4SLinus Torvalds 				cursor < vma->vm_end - vma->vm_start) {
131753f79acbSHugh Dickins 				if (try_to_unmap_cluster(cursor, &mapcount,
131853f79acbSHugh Dickins 						vma, page) == SWAP_MLOCK)
131953f79acbSHugh Dickins 					ret = SWAP_MLOCK;
13201da177e4SLinus Torvalds 				cursor += CLUSTER_SIZE;
13211da177e4SLinus Torvalds 				vma->vm_private_data = (void *) cursor;
13221da177e4SLinus Torvalds 				if ((int)mapcount <= 0)
13231da177e4SLinus Torvalds 					goto out;
13241da177e4SLinus Torvalds 			}
13251da177e4SLinus Torvalds 			vma->vm_private_data = (void *) max_nl_cursor;
13261da177e4SLinus Torvalds 		}
13271da177e4SLinus Torvalds 		cond_resched_lock(&mapping->i_mmap_lock);
13281da177e4SLinus Torvalds 		max_nl_cursor += CLUSTER_SIZE;
13291da177e4SLinus Torvalds 	} while (max_nl_cursor <= max_nl_size);
13301da177e4SLinus Torvalds 
13311da177e4SLinus Torvalds 	/*
13321da177e4SLinus Torvalds 	 * Don't loop forever (perhaps all the remaining pages are
13331da177e4SLinus Torvalds 	 * in locked vmas).  Reset cursor on all unreserved nonlinear
13341da177e4SLinus Torvalds 	 * vmas, now forgetting on which ones it had fallen behind.
13351da177e4SLinus Torvalds 	 */
1336101d2be7SHugh Dickins 	list_for_each_entry(vma, &mapping->i_mmap_nonlinear, shared.vm_set.list)
13371da177e4SLinus Torvalds 		vma->vm_private_data = NULL;
13381da177e4SLinus Torvalds out:
13391da177e4SLinus Torvalds 	spin_unlock(&mapping->i_mmap_lock);
13401da177e4SLinus Torvalds 	return ret;
13411da177e4SLinus Torvalds }
13421da177e4SLinus Torvalds 
13431da177e4SLinus Torvalds /**
13441da177e4SLinus Torvalds  * try_to_unmap - try to remove all page table mappings to a page
13451da177e4SLinus Torvalds  * @page: the page to get unmapped
134614fa31b8SAndi Kleen  * @flags: action and flags
13471da177e4SLinus Torvalds  *
13481da177e4SLinus Torvalds  * Tries to remove all the page table entries which are mapping this
13491da177e4SLinus Torvalds  * page, used in the pageout path.  Caller must hold the page lock.
13501da177e4SLinus Torvalds  * Return values are:
13511da177e4SLinus Torvalds  *
13521da177e4SLinus Torvalds  * SWAP_SUCCESS	- we succeeded in removing all mappings
13531da177e4SLinus Torvalds  * SWAP_AGAIN	- we missed a mapping, try again later
13541da177e4SLinus Torvalds  * SWAP_FAIL	- the page is unswappable
1355b291f000SNick Piggin  * SWAP_MLOCK	- page is mlocked.
13561da177e4SLinus Torvalds  */
135714fa31b8SAndi Kleen int try_to_unmap(struct page *page, enum ttu_flags flags)
13581da177e4SLinus Torvalds {
13591da177e4SLinus Torvalds 	int ret;
13601da177e4SLinus Torvalds 
13611da177e4SLinus Torvalds 	BUG_ON(!PageLocked(page));
13621da177e4SLinus Torvalds 
13635ad64688SHugh Dickins 	if (unlikely(PageKsm(page)))
13645ad64688SHugh Dickins 		ret = try_to_unmap_ksm(page, flags);
13655ad64688SHugh Dickins 	else if (PageAnon(page))
136614fa31b8SAndi Kleen 		ret = try_to_unmap_anon(page, flags);
13671da177e4SLinus Torvalds 	else
136814fa31b8SAndi Kleen 		ret = try_to_unmap_file(page, flags);
1369b291f000SNick Piggin 	if (ret != SWAP_MLOCK && !page_mapped(page))
13701da177e4SLinus Torvalds 		ret = SWAP_SUCCESS;
13711da177e4SLinus Torvalds 	return ret;
13721da177e4SLinus Torvalds }
137381b4082dSNikita Danilov 
1374b291f000SNick Piggin /**
1375b291f000SNick Piggin  * try_to_munlock - try to munlock a page
1376b291f000SNick Piggin  * @page: the page to be munlocked
1377b291f000SNick Piggin  *
1378b291f000SNick Piggin  * Called from munlock code.  Checks all of the VMAs mapping the page
1379b291f000SNick Piggin  * to make sure nobody else has this page mlocked. The page will be
1380b291f000SNick Piggin  * returned with PG_mlocked cleared if no other vmas have it mlocked.
1381b291f000SNick Piggin  *
1382b291f000SNick Piggin  * Return values are:
1383b291f000SNick Piggin  *
138453f79acbSHugh Dickins  * SWAP_AGAIN	- no vma is holding page mlocked, or,
1385b291f000SNick Piggin  * SWAP_AGAIN	- page mapped in mlocked vma -- couldn't acquire mmap sem
13865ad64688SHugh Dickins  * SWAP_FAIL	- page cannot be located at present
1387b291f000SNick Piggin  * SWAP_MLOCK	- page is now mlocked.
1388b291f000SNick Piggin  */
1389b291f000SNick Piggin int try_to_munlock(struct page *page)
1390b291f000SNick Piggin {
1391b291f000SNick Piggin 	VM_BUG_ON(!PageLocked(page) || PageLRU(page));
1392b291f000SNick Piggin 
13935ad64688SHugh Dickins 	if (unlikely(PageKsm(page)))
13945ad64688SHugh Dickins 		return try_to_unmap_ksm(page, TTU_MUNLOCK);
13955ad64688SHugh Dickins 	else if (PageAnon(page))
139614fa31b8SAndi Kleen 		return try_to_unmap_anon(page, TTU_MUNLOCK);
1397b291f000SNick Piggin 	else
139814fa31b8SAndi Kleen 		return try_to_unmap_file(page, TTU_MUNLOCK);
1399b291f000SNick Piggin }
1400e9995ef9SHugh Dickins 
140176545066SRik van Riel #if defined(CONFIG_KSM) || defined(CONFIG_MIGRATION)
140276545066SRik van Riel /*
140376545066SRik van Riel  * Drop an anon_vma refcount, freeing the anon_vma and anon_vma->root
140476545066SRik van Riel  * if necessary.  Be careful to do all the tests under the lock.  Once
140576545066SRik van Riel  * we know we are the last user, nobody else can get a reference and we
140676545066SRik van Riel  * can do the freeing without the lock.
140776545066SRik van Riel  */
140876545066SRik van Riel void drop_anon_vma(struct anon_vma *anon_vma)
140976545066SRik van Riel {
141076545066SRik van Riel 	if (atomic_dec_and_lock(&anon_vma->external_refcount, &anon_vma->root->lock)) {
141176545066SRik van Riel 		struct anon_vma *root = anon_vma->root;
141276545066SRik van Riel 		int empty = list_empty(&anon_vma->head);
141376545066SRik van Riel 		int last_root_user = 0;
141476545066SRik van Riel 		int root_empty = 0;
141576545066SRik van Riel 
141676545066SRik van Riel 		/*
141776545066SRik van Riel 		 * The refcount on a non-root anon_vma got dropped.  Drop
141876545066SRik van Riel 		 * the refcount on the root and check if we need to free it.
141976545066SRik van Riel 		 */
142076545066SRik van Riel 		if (empty && anon_vma != root) {
142176545066SRik van Riel 			last_root_user = atomic_dec_and_test(&root->external_refcount);
142276545066SRik van Riel 			root_empty = list_empty(&root->head);
142376545066SRik van Riel 		}
142476545066SRik van Riel 		anon_vma_unlock(anon_vma);
142576545066SRik van Riel 
142676545066SRik van Riel 		if (empty) {
142776545066SRik van Riel 			anon_vma_free(anon_vma);
142876545066SRik van Riel 			if (root_empty && last_root_user)
142976545066SRik van Riel 				anon_vma_free(root);
143076545066SRik van Riel 		}
143176545066SRik van Riel 	}
143276545066SRik van Riel }
143376545066SRik van Riel #endif
143476545066SRik van Riel 
1435e9995ef9SHugh Dickins #ifdef CONFIG_MIGRATION
1436e9995ef9SHugh Dickins /*
1437e9995ef9SHugh Dickins  * rmap_walk() and its helpers rmap_walk_anon() and rmap_walk_file():
1438e9995ef9SHugh Dickins  * Called by migrate.c to remove migration ptes, but might be used more later.
1439e9995ef9SHugh Dickins  */
1440e9995ef9SHugh Dickins static int rmap_walk_anon(struct page *page, int (*rmap_one)(struct page *,
1441e9995ef9SHugh Dickins 		struct vm_area_struct *, unsigned long, void *), void *arg)
1442e9995ef9SHugh Dickins {
1443e9995ef9SHugh Dickins 	struct anon_vma *anon_vma;
14445beb4930SRik van Riel 	struct anon_vma_chain *avc;
1445e9995ef9SHugh Dickins 	int ret = SWAP_AGAIN;
1446e9995ef9SHugh Dickins 
1447e9995ef9SHugh Dickins 	/*
1448e9995ef9SHugh Dickins 	 * Note: remove_migration_ptes() cannot use page_lock_anon_vma()
1449e9995ef9SHugh Dickins 	 * because that depends on page_mapped(); but not all its usages
14503f6c8272SMel Gorman 	 * are holding mmap_sem. Users without mmap_sem are required to
14513f6c8272SMel Gorman 	 * take a reference count to prevent the anon_vma disappearing
1452e9995ef9SHugh Dickins 	 */
1453e9995ef9SHugh Dickins 	anon_vma = page_anon_vma(page);
1454e9995ef9SHugh Dickins 	if (!anon_vma)
1455e9995ef9SHugh Dickins 		return ret;
1456cba48b98SRik van Riel 	anon_vma_lock(anon_vma);
14575beb4930SRik van Riel 	list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
14585beb4930SRik van Riel 		struct vm_area_struct *vma = avc->vma;
1459e9995ef9SHugh Dickins 		unsigned long address = vma_address(page, vma);
1460e9995ef9SHugh Dickins 		if (address == -EFAULT)
1461e9995ef9SHugh Dickins 			continue;
1462e9995ef9SHugh Dickins 		ret = rmap_one(page, vma, address, arg);
1463e9995ef9SHugh Dickins 		if (ret != SWAP_AGAIN)
1464e9995ef9SHugh Dickins 			break;
1465e9995ef9SHugh Dickins 	}
1466cba48b98SRik van Riel 	anon_vma_unlock(anon_vma);
1467e9995ef9SHugh Dickins 	return ret;
1468e9995ef9SHugh Dickins }
1469e9995ef9SHugh Dickins 
1470e9995ef9SHugh Dickins static int rmap_walk_file(struct page *page, int (*rmap_one)(struct page *,
1471e9995ef9SHugh Dickins 		struct vm_area_struct *, unsigned long, void *), void *arg)
1472e9995ef9SHugh Dickins {
1473e9995ef9SHugh Dickins 	struct address_space *mapping = page->mapping;
1474e9995ef9SHugh Dickins 	pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
1475e9995ef9SHugh Dickins 	struct vm_area_struct *vma;
1476e9995ef9SHugh Dickins 	struct prio_tree_iter iter;
1477e9995ef9SHugh Dickins 	int ret = SWAP_AGAIN;
1478e9995ef9SHugh Dickins 
1479e9995ef9SHugh Dickins 	if (!mapping)
1480e9995ef9SHugh Dickins 		return ret;
1481e9995ef9SHugh Dickins 	spin_lock(&mapping->i_mmap_lock);
1482e9995ef9SHugh Dickins 	vma_prio_tree_foreach(vma, &iter, &mapping->i_mmap, pgoff, pgoff) {
1483e9995ef9SHugh Dickins 		unsigned long address = vma_address(page, vma);
1484e9995ef9SHugh Dickins 		if (address == -EFAULT)
1485e9995ef9SHugh Dickins 			continue;
1486e9995ef9SHugh Dickins 		ret = rmap_one(page, vma, address, arg);
1487e9995ef9SHugh Dickins 		if (ret != SWAP_AGAIN)
1488e9995ef9SHugh Dickins 			break;
1489e9995ef9SHugh Dickins 	}
1490e9995ef9SHugh Dickins 	/*
1491e9995ef9SHugh Dickins 	 * No nonlinear handling: being always shared, nonlinear vmas
1492e9995ef9SHugh Dickins 	 * never contain migration ptes.  Decide what to do about this
1493e9995ef9SHugh Dickins 	 * limitation to linear when we need rmap_walk() on nonlinear.
1494e9995ef9SHugh Dickins 	 */
1495e9995ef9SHugh Dickins 	spin_unlock(&mapping->i_mmap_lock);
1496e9995ef9SHugh Dickins 	return ret;
1497e9995ef9SHugh Dickins }
1498e9995ef9SHugh Dickins 
1499e9995ef9SHugh Dickins int rmap_walk(struct page *page, int (*rmap_one)(struct page *,
1500e9995ef9SHugh Dickins 		struct vm_area_struct *, unsigned long, void *), void *arg)
1501e9995ef9SHugh Dickins {
1502e9995ef9SHugh Dickins 	VM_BUG_ON(!PageLocked(page));
1503e9995ef9SHugh Dickins 
1504e9995ef9SHugh Dickins 	if (unlikely(PageKsm(page)))
1505e9995ef9SHugh Dickins 		return rmap_walk_ksm(page, rmap_one, arg);
1506e9995ef9SHugh Dickins 	else if (PageAnon(page))
1507e9995ef9SHugh Dickins 		return rmap_walk_anon(page, rmap_one, arg);
1508e9995ef9SHugh Dickins 	else
1509e9995ef9SHugh Dickins 		return rmap_walk_file(page, rmap_one, arg);
1510e9995ef9SHugh Dickins }
1511e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */
1512