1f8af4da3SHugh Dickins /* 231dbd01fSIzik Eidus * Memory merging support. 331dbd01fSIzik Eidus * 431dbd01fSIzik Eidus * This code enables dynamic sharing of identical pages found in different 531dbd01fSIzik Eidus * memory areas, even if they are not shared by fork() 631dbd01fSIzik Eidus * 736b2528dSIzik Eidus * Copyright (C) 2008-2009 Red Hat, Inc. 831dbd01fSIzik Eidus * Authors: 931dbd01fSIzik Eidus * Izik Eidus 1031dbd01fSIzik Eidus * Andrea Arcangeli 1131dbd01fSIzik Eidus * Chris Wright 1236b2528dSIzik Eidus * Hugh Dickins 1331dbd01fSIzik Eidus * 1431dbd01fSIzik Eidus * This work is licensed under the terms of the GNU GPL, version 2. 15f8af4da3SHugh Dickins */ 16f8af4da3SHugh Dickins 17f8af4da3SHugh Dickins #include <linux/errno.h> 1831dbd01fSIzik Eidus #include <linux/mm.h> 1931dbd01fSIzik Eidus #include <linux/fs.h> 20f8af4da3SHugh Dickins #include <linux/mman.h> 2131dbd01fSIzik Eidus #include <linux/sched.h> 2231dbd01fSIzik Eidus #include <linux/rwsem.h> 2331dbd01fSIzik Eidus #include <linux/pagemap.h> 2431dbd01fSIzik Eidus #include <linux/rmap.h> 2531dbd01fSIzik Eidus #include <linux/spinlock.h> 2631dbd01fSIzik Eidus #include <linux/jhash.h> 2731dbd01fSIzik Eidus #include <linux/delay.h> 2831dbd01fSIzik Eidus #include <linux/kthread.h> 2931dbd01fSIzik Eidus #include <linux/wait.h> 3031dbd01fSIzik Eidus #include <linux/slab.h> 3131dbd01fSIzik Eidus #include <linux/rbtree.h> 3262b61f61SHugh Dickins #include <linux/memory.h> 3331dbd01fSIzik Eidus #include <linux/mmu_notifier.h> 342c6854fdSIzik Eidus #include <linux/swap.h> 35f8af4da3SHugh Dickins #include <linux/ksm.h> 36d9f8984cSLai Jiangshan #include <linux/hash.h> 37878aee7dSAndrea Arcangeli #include <linux/freezer.h> 38f8af4da3SHugh Dickins 3931dbd01fSIzik Eidus #include <asm/tlbflush.h> 4073848b46SHugh Dickins #include "internal.h" 4131dbd01fSIzik Eidus 4231dbd01fSIzik Eidus /* 4331dbd01fSIzik Eidus * A few notes about the KSM scanning process, 4431dbd01fSIzik Eidus * to make it easier to understand the data structures below: 4531dbd01fSIzik Eidus * 4631dbd01fSIzik Eidus * In order to reduce excessive scanning, KSM sorts the memory pages by their 4731dbd01fSIzik Eidus * contents into a data structure that holds pointers to the pages' locations. 4831dbd01fSIzik Eidus * 4931dbd01fSIzik Eidus * Since the contents of the pages may change at any moment, KSM cannot just 5031dbd01fSIzik Eidus * insert the pages into a normal sorted tree and expect it to find anything. 5131dbd01fSIzik Eidus * Therefore KSM uses two data structures - the stable and the unstable tree. 5231dbd01fSIzik Eidus * 5331dbd01fSIzik Eidus * The stable tree holds pointers to all the merged pages (ksm pages), sorted 5431dbd01fSIzik Eidus * by their contents. Because each such page is write-protected, searching on 5531dbd01fSIzik Eidus * this tree is fully assured to be working (except when pages are unmapped), 5631dbd01fSIzik Eidus * and therefore this tree is called the stable tree. 5731dbd01fSIzik Eidus * 5831dbd01fSIzik Eidus * In addition to the stable tree, KSM uses a second data structure called the 5931dbd01fSIzik Eidus * unstable tree: this tree holds pointers to pages which have been found to 6031dbd01fSIzik Eidus * be "unchanged for a period of time". The unstable tree sorts these pages 6131dbd01fSIzik Eidus * by their contents, but since they are not write-protected, KSM cannot rely 6231dbd01fSIzik Eidus * upon the unstable tree to work correctly - the unstable tree is liable to 6331dbd01fSIzik Eidus * be corrupted as its contents are modified, and so it is called unstable. 6431dbd01fSIzik Eidus * 6531dbd01fSIzik Eidus * KSM solves this problem by several techniques: 6631dbd01fSIzik Eidus * 6731dbd01fSIzik Eidus * 1) The unstable tree is flushed every time KSM completes scanning all 6831dbd01fSIzik Eidus * memory areas, and then the tree is rebuilt again from the beginning. 6931dbd01fSIzik Eidus * 2) KSM will only insert into the unstable tree, pages whose hash value 7031dbd01fSIzik Eidus * has not changed since the previous scan of all memory areas. 7131dbd01fSIzik Eidus * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 7231dbd01fSIzik Eidus * colors of the nodes and not on their contents, assuring that even when 7331dbd01fSIzik Eidus * the tree gets "corrupted" it won't get out of balance, so scanning time 7431dbd01fSIzik Eidus * remains the same (also, searching and inserting nodes in an rbtree uses 7531dbd01fSIzik Eidus * the same algorithm, so we have no overhead when we flush and rebuild). 7631dbd01fSIzik Eidus * 4) KSM never flushes the stable tree, which means that even if it were to 7731dbd01fSIzik Eidus * take 10 attempts to find a page in the unstable tree, once it is found, 7831dbd01fSIzik Eidus * it is secured in the stable tree. (When we scan a new page, we first 7931dbd01fSIzik Eidus * compare it against the stable tree, and then against the unstable tree.) 8031dbd01fSIzik Eidus */ 8131dbd01fSIzik Eidus 8231dbd01fSIzik Eidus /** 8331dbd01fSIzik Eidus * struct mm_slot - ksm information per mm that is being scanned 8431dbd01fSIzik Eidus * @link: link to the mm_slots hash list 8531dbd01fSIzik Eidus * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 866514d511SHugh Dickins * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 8731dbd01fSIzik Eidus * @mm: the mm that this information is valid for 8831dbd01fSIzik Eidus */ 8931dbd01fSIzik Eidus struct mm_slot { 9031dbd01fSIzik Eidus struct hlist_node link; 9131dbd01fSIzik Eidus struct list_head mm_list; 926514d511SHugh Dickins struct rmap_item *rmap_list; 9331dbd01fSIzik Eidus struct mm_struct *mm; 9431dbd01fSIzik Eidus }; 9531dbd01fSIzik Eidus 9631dbd01fSIzik Eidus /** 9731dbd01fSIzik Eidus * struct ksm_scan - cursor for scanning 9831dbd01fSIzik Eidus * @mm_slot: the current mm_slot we are scanning 9931dbd01fSIzik Eidus * @address: the next address inside that to be scanned 1006514d511SHugh Dickins * @rmap_list: link to the next rmap to be scanned in the rmap_list 10131dbd01fSIzik Eidus * @seqnr: count of completed full scans (needed when removing unstable node) 10231dbd01fSIzik Eidus * 10331dbd01fSIzik Eidus * There is only the one ksm_scan instance of this cursor structure. 10431dbd01fSIzik Eidus */ 10531dbd01fSIzik Eidus struct ksm_scan { 10631dbd01fSIzik Eidus struct mm_slot *mm_slot; 10731dbd01fSIzik Eidus unsigned long address; 1086514d511SHugh Dickins struct rmap_item **rmap_list; 10931dbd01fSIzik Eidus unsigned long seqnr; 11031dbd01fSIzik Eidus }; 11131dbd01fSIzik Eidus 11231dbd01fSIzik Eidus /** 1137b6ba2c7SHugh Dickins * struct stable_node - node of the stable rbtree 1147b6ba2c7SHugh Dickins * @node: rb node of this ksm page in the stable tree 1157b6ba2c7SHugh Dickins * @hlist: hlist head of rmap_items using this ksm page 11662b61f61SHugh Dickins * @kpfn: page frame number of this ksm page 1177b6ba2c7SHugh Dickins */ 1187b6ba2c7SHugh Dickins struct stable_node { 1197b6ba2c7SHugh Dickins struct rb_node node; 1207b6ba2c7SHugh Dickins struct hlist_head hlist; 12162b61f61SHugh Dickins unsigned long kpfn; 1227b6ba2c7SHugh Dickins }; 1237b6ba2c7SHugh Dickins 1247b6ba2c7SHugh Dickins /** 12531dbd01fSIzik Eidus * struct rmap_item - reverse mapping item for virtual addresses 1266514d511SHugh Dickins * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 127db114b83SHugh Dickins * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 12831dbd01fSIzik Eidus * @mm: the memory structure this rmap_item is pointing into 12931dbd01fSIzik Eidus * @address: the virtual address this rmap_item tracks (+ flags in low bits) 13031dbd01fSIzik Eidus * @oldchecksum: previous checksum of the page at that virtual address 1317b6ba2c7SHugh Dickins * @node: rb node of this rmap_item in the unstable tree 1327b6ba2c7SHugh Dickins * @head: pointer to stable_node heading this list in the stable tree 1337b6ba2c7SHugh Dickins * @hlist: link into hlist of rmap_items hanging off that stable_node 13431dbd01fSIzik Eidus */ 13531dbd01fSIzik Eidus struct rmap_item { 1366514d511SHugh Dickins struct rmap_item *rmap_list; 137db114b83SHugh Dickins struct anon_vma *anon_vma; /* when stable */ 13831dbd01fSIzik Eidus struct mm_struct *mm; 13931dbd01fSIzik Eidus unsigned long address; /* + low bits used for flags below */ 14031dbd01fSIzik Eidus unsigned int oldchecksum; /* when unstable */ 14131dbd01fSIzik Eidus union { 1427b6ba2c7SHugh Dickins struct rb_node node; /* when node of unstable tree */ 1437b6ba2c7SHugh Dickins struct { /* when listed from stable tree */ 1447b6ba2c7SHugh Dickins struct stable_node *head; 1457b6ba2c7SHugh Dickins struct hlist_node hlist; 1467b6ba2c7SHugh Dickins }; 14731dbd01fSIzik Eidus }; 14831dbd01fSIzik Eidus }; 14931dbd01fSIzik Eidus 15031dbd01fSIzik Eidus #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 1517b6ba2c7SHugh Dickins #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 1527b6ba2c7SHugh Dickins #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 15331dbd01fSIzik Eidus 15431dbd01fSIzik Eidus /* The stable and unstable tree heads */ 15531dbd01fSIzik Eidus static struct rb_root root_stable_tree = RB_ROOT; 15631dbd01fSIzik Eidus static struct rb_root root_unstable_tree = RB_ROOT; 15731dbd01fSIzik Eidus 158d9f8984cSLai Jiangshan #define MM_SLOTS_HASH_SHIFT 10 159d9f8984cSLai Jiangshan #define MM_SLOTS_HASH_HEADS (1 << MM_SLOTS_HASH_SHIFT) 160d9f8984cSLai Jiangshan static struct hlist_head mm_slots_hash[MM_SLOTS_HASH_HEADS]; 16131dbd01fSIzik Eidus 16231dbd01fSIzik Eidus static struct mm_slot ksm_mm_head = { 16331dbd01fSIzik Eidus .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), 16431dbd01fSIzik Eidus }; 16531dbd01fSIzik Eidus static struct ksm_scan ksm_scan = { 16631dbd01fSIzik Eidus .mm_slot = &ksm_mm_head, 16731dbd01fSIzik Eidus }; 16831dbd01fSIzik Eidus 16931dbd01fSIzik Eidus static struct kmem_cache *rmap_item_cache; 1707b6ba2c7SHugh Dickins static struct kmem_cache *stable_node_cache; 17131dbd01fSIzik Eidus static struct kmem_cache *mm_slot_cache; 17231dbd01fSIzik Eidus 17331dbd01fSIzik Eidus /* The number of nodes in the stable tree */ 174b4028260SHugh Dickins static unsigned long ksm_pages_shared; 17531dbd01fSIzik Eidus 176e178dfdeSHugh Dickins /* The number of page slots additionally sharing those nodes */ 177b4028260SHugh Dickins static unsigned long ksm_pages_sharing; 17831dbd01fSIzik Eidus 179473b0ce4SHugh Dickins /* The number of nodes in the unstable tree */ 180473b0ce4SHugh Dickins static unsigned long ksm_pages_unshared; 181473b0ce4SHugh Dickins 182473b0ce4SHugh Dickins /* The number of rmap_items in use: to calculate pages_volatile */ 183473b0ce4SHugh Dickins static unsigned long ksm_rmap_items; 184473b0ce4SHugh Dickins 18531dbd01fSIzik Eidus /* Number of pages ksmd should scan in one batch */ 1862c6854fdSIzik Eidus static unsigned int ksm_thread_pages_to_scan = 100; 18731dbd01fSIzik Eidus 18831dbd01fSIzik Eidus /* Milliseconds ksmd should sleep between batches */ 1892ffd8679SHugh Dickins static unsigned int ksm_thread_sleep_millisecs = 20; 19031dbd01fSIzik Eidus 19131dbd01fSIzik Eidus #define KSM_RUN_STOP 0 19231dbd01fSIzik Eidus #define KSM_RUN_MERGE 1 19331dbd01fSIzik Eidus #define KSM_RUN_UNMERGE 2 1942c6854fdSIzik Eidus static unsigned int ksm_run = KSM_RUN_STOP; 19531dbd01fSIzik Eidus 19631dbd01fSIzik Eidus static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 19731dbd01fSIzik Eidus static DEFINE_MUTEX(ksm_thread_mutex); 19831dbd01fSIzik Eidus static DEFINE_SPINLOCK(ksm_mmlist_lock); 19931dbd01fSIzik Eidus 20031dbd01fSIzik Eidus #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ 20131dbd01fSIzik Eidus sizeof(struct __struct), __alignof__(struct __struct),\ 20231dbd01fSIzik Eidus (__flags), NULL) 20331dbd01fSIzik Eidus 20431dbd01fSIzik Eidus static int __init ksm_slab_init(void) 20531dbd01fSIzik Eidus { 20631dbd01fSIzik Eidus rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 20731dbd01fSIzik Eidus if (!rmap_item_cache) 20831dbd01fSIzik Eidus goto out; 20931dbd01fSIzik Eidus 2107b6ba2c7SHugh Dickins stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); 2117b6ba2c7SHugh Dickins if (!stable_node_cache) 2127b6ba2c7SHugh Dickins goto out_free1; 2137b6ba2c7SHugh Dickins 21431dbd01fSIzik Eidus mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); 21531dbd01fSIzik Eidus if (!mm_slot_cache) 2167b6ba2c7SHugh Dickins goto out_free2; 21731dbd01fSIzik Eidus 21831dbd01fSIzik Eidus return 0; 21931dbd01fSIzik Eidus 2207b6ba2c7SHugh Dickins out_free2: 2217b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 2227b6ba2c7SHugh Dickins out_free1: 22331dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 22431dbd01fSIzik Eidus out: 22531dbd01fSIzik Eidus return -ENOMEM; 22631dbd01fSIzik Eidus } 22731dbd01fSIzik Eidus 22831dbd01fSIzik Eidus static void __init ksm_slab_free(void) 22931dbd01fSIzik Eidus { 23031dbd01fSIzik Eidus kmem_cache_destroy(mm_slot_cache); 2317b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 23231dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 23331dbd01fSIzik Eidus mm_slot_cache = NULL; 23431dbd01fSIzik Eidus } 23531dbd01fSIzik Eidus 23631dbd01fSIzik Eidus static inline struct rmap_item *alloc_rmap_item(void) 23731dbd01fSIzik Eidus { 238473b0ce4SHugh Dickins struct rmap_item *rmap_item; 239473b0ce4SHugh Dickins 240473b0ce4SHugh Dickins rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 241473b0ce4SHugh Dickins if (rmap_item) 242473b0ce4SHugh Dickins ksm_rmap_items++; 243473b0ce4SHugh Dickins return rmap_item; 24431dbd01fSIzik Eidus } 24531dbd01fSIzik Eidus 24631dbd01fSIzik Eidus static inline void free_rmap_item(struct rmap_item *rmap_item) 24731dbd01fSIzik Eidus { 248473b0ce4SHugh Dickins ksm_rmap_items--; 24931dbd01fSIzik Eidus rmap_item->mm = NULL; /* debug safety */ 25031dbd01fSIzik Eidus kmem_cache_free(rmap_item_cache, rmap_item); 25131dbd01fSIzik Eidus } 25231dbd01fSIzik Eidus 2537b6ba2c7SHugh Dickins static inline struct stable_node *alloc_stable_node(void) 2547b6ba2c7SHugh Dickins { 2557b6ba2c7SHugh Dickins return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); 2567b6ba2c7SHugh Dickins } 2577b6ba2c7SHugh Dickins 2587b6ba2c7SHugh Dickins static inline void free_stable_node(struct stable_node *stable_node) 2597b6ba2c7SHugh Dickins { 2607b6ba2c7SHugh Dickins kmem_cache_free(stable_node_cache, stable_node); 2617b6ba2c7SHugh Dickins } 2627b6ba2c7SHugh Dickins 26331dbd01fSIzik Eidus static inline struct mm_slot *alloc_mm_slot(void) 26431dbd01fSIzik Eidus { 26531dbd01fSIzik Eidus if (!mm_slot_cache) /* initialization failed */ 26631dbd01fSIzik Eidus return NULL; 26731dbd01fSIzik Eidus return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 26831dbd01fSIzik Eidus } 26931dbd01fSIzik Eidus 27031dbd01fSIzik Eidus static inline void free_mm_slot(struct mm_slot *mm_slot) 27131dbd01fSIzik Eidus { 27231dbd01fSIzik Eidus kmem_cache_free(mm_slot_cache, mm_slot); 27331dbd01fSIzik Eidus } 27431dbd01fSIzik Eidus 27531dbd01fSIzik Eidus static struct mm_slot *get_mm_slot(struct mm_struct *mm) 27631dbd01fSIzik Eidus { 27731dbd01fSIzik Eidus struct mm_slot *mm_slot; 27831dbd01fSIzik Eidus struct hlist_head *bucket; 27931dbd01fSIzik Eidus struct hlist_node *node; 28031dbd01fSIzik Eidus 281d9f8984cSLai Jiangshan bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; 28231dbd01fSIzik Eidus hlist_for_each_entry(mm_slot, node, bucket, link) { 28331dbd01fSIzik Eidus if (mm == mm_slot->mm) 28431dbd01fSIzik Eidus return mm_slot; 28531dbd01fSIzik Eidus } 28631dbd01fSIzik Eidus return NULL; 28731dbd01fSIzik Eidus } 28831dbd01fSIzik Eidus 28931dbd01fSIzik Eidus static void insert_to_mm_slots_hash(struct mm_struct *mm, 29031dbd01fSIzik Eidus struct mm_slot *mm_slot) 29131dbd01fSIzik Eidus { 29231dbd01fSIzik Eidus struct hlist_head *bucket; 29331dbd01fSIzik Eidus 294d9f8984cSLai Jiangshan bucket = &mm_slots_hash[hash_ptr(mm, MM_SLOTS_HASH_SHIFT)]; 29531dbd01fSIzik Eidus mm_slot->mm = mm; 29631dbd01fSIzik Eidus hlist_add_head(&mm_slot->link, bucket); 29731dbd01fSIzik Eidus } 29831dbd01fSIzik Eidus 29931dbd01fSIzik Eidus static inline int in_stable_tree(struct rmap_item *rmap_item) 30031dbd01fSIzik Eidus { 30131dbd01fSIzik Eidus return rmap_item->address & STABLE_FLAG; 30231dbd01fSIzik Eidus } 30331dbd01fSIzik Eidus 304db114b83SHugh Dickins static void hold_anon_vma(struct rmap_item *rmap_item, 305db114b83SHugh Dickins struct anon_vma *anon_vma) 306db114b83SHugh Dickins { 307db114b83SHugh Dickins rmap_item->anon_vma = anon_vma; 30876545066SRik van Riel get_anon_vma(anon_vma); 309db114b83SHugh Dickins } 310db114b83SHugh Dickins 31176545066SRik van Riel static void ksm_drop_anon_vma(struct rmap_item *rmap_item) 312db114b83SHugh Dickins { 313db114b83SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 314db114b83SHugh Dickins 31576545066SRik van Riel drop_anon_vma(anon_vma); 316db114b83SHugh Dickins } 317db114b83SHugh Dickins 31831dbd01fSIzik Eidus /* 319a913e182SHugh Dickins * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 320a913e182SHugh Dickins * page tables after it has passed through ksm_exit() - which, if necessary, 321a913e182SHugh Dickins * takes mmap_sem briefly to serialize against them. ksm_exit() does not set 322a913e182SHugh Dickins * a special flag: they can just back out as soon as mm_users goes to zero. 323a913e182SHugh Dickins * ksm_test_exit() is used throughout to make this test for exit: in some 324a913e182SHugh Dickins * places for correctness, in some places just to avoid unnecessary work. 325a913e182SHugh Dickins */ 326a913e182SHugh Dickins static inline bool ksm_test_exit(struct mm_struct *mm) 327a913e182SHugh Dickins { 328a913e182SHugh Dickins return atomic_read(&mm->mm_users) == 0; 329a913e182SHugh Dickins } 330a913e182SHugh Dickins 331a913e182SHugh Dickins /* 33231dbd01fSIzik Eidus * We use break_ksm to break COW on a ksm page: it's a stripped down 33331dbd01fSIzik Eidus * 33431dbd01fSIzik Eidus * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) 33531dbd01fSIzik Eidus * put_page(page); 33631dbd01fSIzik Eidus * 33731dbd01fSIzik Eidus * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, 33831dbd01fSIzik Eidus * in case the application has unmapped and remapped mm,addr meanwhile. 33931dbd01fSIzik Eidus * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 34031dbd01fSIzik Eidus * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. 34131dbd01fSIzik Eidus */ 342d952b791SHugh Dickins static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 34331dbd01fSIzik Eidus { 34431dbd01fSIzik Eidus struct page *page; 345d952b791SHugh Dickins int ret = 0; 34631dbd01fSIzik Eidus 34731dbd01fSIzik Eidus do { 34831dbd01fSIzik Eidus cond_resched(); 34931dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 35022eccdd7SDan Carpenter if (IS_ERR_OR_NULL(page)) 35131dbd01fSIzik Eidus break; 35231dbd01fSIzik Eidus if (PageKsm(page)) 35331dbd01fSIzik Eidus ret = handle_mm_fault(vma->vm_mm, vma, addr, 35431dbd01fSIzik Eidus FAULT_FLAG_WRITE); 35531dbd01fSIzik Eidus else 35631dbd01fSIzik Eidus ret = VM_FAULT_WRITE; 35731dbd01fSIzik Eidus put_page(page); 358d952b791SHugh Dickins } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); 359d952b791SHugh Dickins /* 360d952b791SHugh Dickins * We must loop because handle_mm_fault() may back out if there's 361d952b791SHugh Dickins * any difficulty e.g. if pte accessed bit gets updated concurrently. 362d952b791SHugh Dickins * 363d952b791SHugh Dickins * VM_FAULT_WRITE is what we have been hoping for: it indicates that 364d952b791SHugh Dickins * COW has been broken, even if the vma does not permit VM_WRITE; 365d952b791SHugh Dickins * but note that a concurrent fault might break PageKsm for us. 366d952b791SHugh Dickins * 367d952b791SHugh Dickins * VM_FAULT_SIGBUS could occur if we race with truncation of the 368d952b791SHugh Dickins * backing file, which also invalidates anonymous pages: that's 369d952b791SHugh Dickins * okay, that truncation will have unmapped the PageKsm for us. 370d952b791SHugh Dickins * 371d952b791SHugh Dickins * VM_FAULT_OOM: at the time of writing (late July 2009), setting 372d952b791SHugh Dickins * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 373d952b791SHugh Dickins * current task has TIF_MEMDIE set, and will be OOM killed on return 374d952b791SHugh Dickins * to user; and ksmd, having no mm, would never be chosen for that. 375d952b791SHugh Dickins * 376d952b791SHugh Dickins * But if the mm is in a limited mem_cgroup, then the fault may fail 377d952b791SHugh Dickins * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 378d952b791SHugh Dickins * even ksmd can fail in this way - though it's usually breaking ksm 379d952b791SHugh Dickins * just to undo a merge it made a moment before, so unlikely to oom. 380d952b791SHugh Dickins * 381d952b791SHugh Dickins * That's a pity: we might therefore have more kernel pages allocated 382d952b791SHugh Dickins * than we're counting as nodes in the stable tree; but ksm_do_scan 383d952b791SHugh Dickins * will retry to break_cow on each pass, so should recover the page 384d952b791SHugh Dickins * in due course. The important thing is to not let VM_MERGEABLE 385d952b791SHugh Dickins * be cleared while any such pages might remain in the area. 386d952b791SHugh Dickins */ 387d952b791SHugh Dickins return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 38831dbd01fSIzik Eidus } 38931dbd01fSIzik Eidus 3908dd3557aSHugh Dickins static void break_cow(struct rmap_item *rmap_item) 39131dbd01fSIzik Eidus { 3928dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 3938dd3557aSHugh Dickins unsigned long addr = rmap_item->address; 39431dbd01fSIzik Eidus struct vm_area_struct *vma; 39531dbd01fSIzik Eidus 3964035c07aSHugh Dickins /* 3974035c07aSHugh Dickins * It is not an accident that whenever we want to break COW 3984035c07aSHugh Dickins * to undo, we also need to drop a reference to the anon_vma. 3994035c07aSHugh Dickins */ 40076545066SRik van Riel ksm_drop_anon_vma(rmap_item); 4014035c07aSHugh Dickins 40281464e30SHugh Dickins down_read(&mm->mmap_sem); 4039ba69294SHugh Dickins if (ksm_test_exit(mm)) 4049ba69294SHugh Dickins goto out; 40531dbd01fSIzik Eidus vma = find_vma(mm, addr); 40631dbd01fSIzik Eidus if (!vma || vma->vm_start > addr) 40781464e30SHugh Dickins goto out; 40831dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 40981464e30SHugh Dickins goto out; 41031dbd01fSIzik Eidus break_ksm(vma, addr); 41181464e30SHugh Dickins out: 41231dbd01fSIzik Eidus up_read(&mm->mmap_sem); 41331dbd01fSIzik Eidus } 41431dbd01fSIzik Eidus 41529ad768cSAndrea Arcangeli static struct page *page_trans_compound_anon(struct page *page) 41629ad768cSAndrea Arcangeli { 41729ad768cSAndrea Arcangeli if (PageTransCompound(page)) { 41822e5c47eSAndrea Arcangeli struct page *head = compound_trans_head(page); 41929ad768cSAndrea Arcangeli /* 42022e5c47eSAndrea Arcangeli * head may actually be splitted and freed from under 42122e5c47eSAndrea Arcangeli * us but it's ok here. 42229ad768cSAndrea Arcangeli */ 42329ad768cSAndrea Arcangeli if (PageAnon(head)) 42429ad768cSAndrea Arcangeli return head; 42529ad768cSAndrea Arcangeli } 42629ad768cSAndrea Arcangeli return NULL; 42729ad768cSAndrea Arcangeli } 42829ad768cSAndrea Arcangeli 42931dbd01fSIzik Eidus static struct page *get_mergeable_page(struct rmap_item *rmap_item) 43031dbd01fSIzik Eidus { 43131dbd01fSIzik Eidus struct mm_struct *mm = rmap_item->mm; 43231dbd01fSIzik Eidus unsigned long addr = rmap_item->address; 43331dbd01fSIzik Eidus struct vm_area_struct *vma; 43431dbd01fSIzik Eidus struct page *page; 43531dbd01fSIzik Eidus 43631dbd01fSIzik Eidus down_read(&mm->mmap_sem); 4379ba69294SHugh Dickins if (ksm_test_exit(mm)) 4389ba69294SHugh Dickins goto out; 43931dbd01fSIzik Eidus vma = find_vma(mm, addr); 44031dbd01fSIzik Eidus if (!vma || vma->vm_start > addr) 44131dbd01fSIzik Eidus goto out; 44231dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 44331dbd01fSIzik Eidus goto out; 44431dbd01fSIzik Eidus 44531dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 44622eccdd7SDan Carpenter if (IS_ERR_OR_NULL(page)) 44731dbd01fSIzik Eidus goto out; 44829ad768cSAndrea Arcangeli if (PageAnon(page) || page_trans_compound_anon(page)) { 44931dbd01fSIzik Eidus flush_anon_page(vma, page, addr); 45031dbd01fSIzik Eidus flush_dcache_page(page); 45131dbd01fSIzik Eidus } else { 45231dbd01fSIzik Eidus put_page(page); 45331dbd01fSIzik Eidus out: page = NULL; 45431dbd01fSIzik Eidus } 45531dbd01fSIzik Eidus up_read(&mm->mmap_sem); 45631dbd01fSIzik Eidus return page; 45731dbd01fSIzik Eidus } 45831dbd01fSIzik Eidus 4594035c07aSHugh Dickins static void remove_node_from_stable_tree(struct stable_node *stable_node) 4604035c07aSHugh Dickins { 4614035c07aSHugh Dickins struct rmap_item *rmap_item; 4624035c07aSHugh Dickins struct hlist_node *hlist; 4634035c07aSHugh Dickins 4644035c07aSHugh Dickins hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 4654035c07aSHugh Dickins if (rmap_item->hlist.next) 4664035c07aSHugh Dickins ksm_pages_sharing--; 4674035c07aSHugh Dickins else 4684035c07aSHugh Dickins ksm_pages_shared--; 46976545066SRik van Riel ksm_drop_anon_vma(rmap_item); 4704035c07aSHugh Dickins rmap_item->address &= PAGE_MASK; 4714035c07aSHugh Dickins cond_resched(); 4724035c07aSHugh Dickins } 4734035c07aSHugh Dickins 4744035c07aSHugh Dickins rb_erase(&stable_node->node, &root_stable_tree); 4754035c07aSHugh Dickins free_stable_node(stable_node); 4764035c07aSHugh Dickins } 4774035c07aSHugh Dickins 4784035c07aSHugh Dickins /* 4794035c07aSHugh Dickins * get_ksm_page: checks if the page indicated by the stable node 4804035c07aSHugh Dickins * is still its ksm page, despite having held no reference to it. 4814035c07aSHugh Dickins * In which case we can trust the content of the page, and it 4824035c07aSHugh Dickins * returns the gotten page; but if the page has now been zapped, 4834035c07aSHugh Dickins * remove the stale node from the stable tree and return NULL. 4844035c07aSHugh Dickins * 4854035c07aSHugh Dickins * You would expect the stable_node to hold a reference to the ksm page. 4864035c07aSHugh Dickins * But if it increments the page's count, swapping out has to wait for 4874035c07aSHugh Dickins * ksmd to come around again before it can free the page, which may take 4884035c07aSHugh Dickins * seconds or even minutes: much too unresponsive. So instead we use a 4894035c07aSHugh Dickins * "keyhole reference": access to the ksm page from the stable node peeps 4904035c07aSHugh Dickins * out through its keyhole to see if that page still holds the right key, 4914035c07aSHugh Dickins * pointing back to this stable node. This relies on freeing a PageAnon 4924035c07aSHugh Dickins * page to reset its page->mapping to NULL, and relies on no other use of 4934035c07aSHugh Dickins * a page to put something that might look like our key in page->mapping. 4944035c07aSHugh Dickins * 4954035c07aSHugh Dickins * include/linux/pagemap.h page_cache_get_speculative() is a good reference, 4964035c07aSHugh Dickins * but this is different - made simpler by ksm_thread_mutex being held, but 4974035c07aSHugh Dickins * interesting for assuming that no other use of the struct page could ever 4984035c07aSHugh Dickins * put our expected_mapping into page->mapping (or a field of the union which 4994035c07aSHugh Dickins * coincides with page->mapping). The RCU calls are not for KSM at all, but 5004035c07aSHugh Dickins * to keep the page_count protocol described with page_cache_get_speculative. 5014035c07aSHugh Dickins * 5024035c07aSHugh Dickins * Note: it is possible that get_ksm_page() will return NULL one moment, 5034035c07aSHugh Dickins * then page the next, if the page is in between page_freeze_refs() and 5044035c07aSHugh Dickins * page_unfreeze_refs(): this shouldn't be a problem anywhere, the page 5054035c07aSHugh Dickins * is on its way to being freed; but it is an anomaly to bear in mind. 5064035c07aSHugh Dickins */ 5074035c07aSHugh Dickins static struct page *get_ksm_page(struct stable_node *stable_node) 5084035c07aSHugh Dickins { 5094035c07aSHugh Dickins struct page *page; 5104035c07aSHugh Dickins void *expected_mapping; 5114035c07aSHugh Dickins 51262b61f61SHugh Dickins page = pfn_to_page(stable_node->kpfn); 5134035c07aSHugh Dickins expected_mapping = (void *)stable_node + 5144035c07aSHugh Dickins (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 5154035c07aSHugh Dickins rcu_read_lock(); 5164035c07aSHugh Dickins if (page->mapping != expected_mapping) 5174035c07aSHugh Dickins goto stale; 5184035c07aSHugh Dickins if (!get_page_unless_zero(page)) 5194035c07aSHugh Dickins goto stale; 5204035c07aSHugh Dickins if (page->mapping != expected_mapping) { 5214035c07aSHugh Dickins put_page(page); 5224035c07aSHugh Dickins goto stale; 5234035c07aSHugh Dickins } 5244035c07aSHugh Dickins rcu_read_unlock(); 5254035c07aSHugh Dickins return page; 5264035c07aSHugh Dickins stale: 5274035c07aSHugh Dickins rcu_read_unlock(); 5284035c07aSHugh Dickins remove_node_from_stable_tree(stable_node); 5294035c07aSHugh Dickins return NULL; 5304035c07aSHugh Dickins } 5314035c07aSHugh Dickins 53231dbd01fSIzik Eidus /* 53331dbd01fSIzik Eidus * Removing rmap_item from stable or unstable tree. 53431dbd01fSIzik Eidus * This function will clean the information from the stable/unstable tree. 53531dbd01fSIzik Eidus */ 53631dbd01fSIzik Eidus static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) 53731dbd01fSIzik Eidus { 5387b6ba2c7SHugh Dickins if (rmap_item->address & STABLE_FLAG) { 5397b6ba2c7SHugh Dickins struct stable_node *stable_node; 5405ad64688SHugh Dickins struct page *page; 54131dbd01fSIzik Eidus 5427b6ba2c7SHugh Dickins stable_node = rmap_item->head; 5434035c07aSHugh Dickins page = get_ksm_page(stable_node); 5444035c07aSHugh Dickins if (!page) 5454035c07aSHugh Dickins goto out; 5465ad64688SHugh Dickins 5474035c07aSHugh Dickins lock_page(page); 5487b6ba2c7SHugh Dickins hlist_del(&rmap_item->hlist); 5495ad64688SHugh Dickins unlock_page(page); 5505ad64688SHugh Dickins put_page(page); 55108beca44SHugh Dickins 5524035c07aSHugh Dickins if (stable_node->hlist.first) 5534035c07aSHugh Dickins ksm_pages_sharing--; 5544035c07aSHugh Dickins else 555b4028260SHugh Dickins ksm_pages_shared--; 55631dbd01fSIzik Eidus 55776545066SRik van Riel ksm_drop_anon_vma(rmap_item); 55893d17715SHugh Dickins rmap_item->address &= PAGE_MASK; 55931dbd01fSIzik Eidus 5607b6ba2c7SHugh Dickins } else if (rmap_item->address & UNSTABLE_FLAG) { 56131dbd01fSIzik Eidus unsigned char age; 56231dbd01fSIzik Eidus /* 5639ba69294SHugh Dickins * Usually ksmd can and must skip the rb_erase, because 56431dbd01fSIzik Eidus * root_unstable_tree was already reset to RB_ROOT. 5659ba69294SHugh Dickins * But be careful when an mm is exiting: do the rb_erase 5669ba69294SHugh Dickins * if this rmap_item was inserted by this scan, rather 5679ba69294SHugh Dickins * than left over from before. 56831dbd01fSIzik Eidus */ 56931dbd01fSIzik Eidus age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 570cd551f97SHugh Dickins BUG_ON(age > 1); 57131dbd01fSIzik Eidus if (!age) 57231dbd01fSIzik Eidus rb_erase(&rmap_item->node, &root_unstable_tree); 57331dbd01fSIzik Eidus 57493d17715SHugh Dickins ksm_pages_unshared--; 57531dbd01fSIzik Eidus rmap_item->address &= PAGE_MASK; 57693d17715SHugh Dickins } 5774035c07aSHugh Dickins out: 57831dbd01fSIzik Eidus cond_resched(); /* we're called from many long loops */ 57931dbd01fSIzik Eidus } 58031dbd01fSIzik Eidus 58131dbd01fSIzik Eidus static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 5826514d511SHugh Dickins struct rmap_item **rmap_list) 58331dbd01fSIzik Eidus { 5846514d511SHugh Dickins while (*rmap_list) { 5856514d511SHugh Dickins struct rmap_item *rmap_item = *rmap_list; 5866514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 58731dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 58831dbd01fSIzik Eidus free_rmap_item(rmap_item); 58931dbd01fSIzik Eidus } 59031dbd01fSIzik Eidus } 59131dbd01fSIzik Eidus 59231dbd01fSIzik Eidus /* 59331dbd01fSIzik Eidus * Though it's very tempting to unmerge in_stable_tree(rmap_item)s rather 59431dbd01fSIzik Eidus * than check every pte of a given vma, the locking doesn't quite work for 59531dbd01fSIzik Eidus * that - an rmap_item is assigned to the stable tree after inserting ksm 59631dbd01fSIzik Eidus * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing 59731dbd01fSIzik Eidus * rmap_items from parent to child at fork time (so as not to waste time 59831dbd01fSIzik Eidus * if exit comes before the next scan reaches it). 59981464e30SHugh Dickins * 60081464e30SHugh Dickins * Similarly, although we'd like to remove rmap_items (so updating counts 60181464e30SHugh Dickins * and freeing memory) when unmerging an area, it's easier to leave that 60281464e30SHugh Dickins * to the next pass of ksmd - consider, for example, how ksmd might be 60381464e30SHugh Dickins * in cmp_and_merge_page on one of the rmap_items we would be removing. 60431dbd01fSIzik Eidus */ 605d952b791SHugh Dickins static int unmerge_ksm_pages(struct vm_area_struct *vma, 60631dbd01fSIzik Eidus unsigned long start, unsigned long end) 60731dbd01fSIzik Eidus { 60831dbd01fSIzik Eidus unsigned long addr; 609d952b791SHugh Dickins int err = 0; 61031dbd01fSIzik Eidus 611d952b791SHugh Dickins for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 6129ba69294SHugh Dickins if (ksm_test_exit(vma->vm_mm)) 6139ba69294SHugh Dickins break; 614d952b791SHugh Dickins if (signal_pending(current)) 615d952b791SHugh Dickins err = -ERESTARTSYS; 616d952b791SHugh Dickins else 617d952b791SHugh Dickins err = break_ksm(vma, addr); 618d952b791SHugh Dickins } 619d952b791SHugh Dickins return err; 62031dbd01fSIzik Eidus } 62131dbd01fSIzik Eidus 6222ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 6232ffd8679SHugh Dickins /* 6242ffd8679SHugh Dickins * Only called through the sysfs control interface: 6252ffd8679SHugh Dickins */ 626d952b791SHugh Dickins static int unmerge_and_remove_all_rmap_items(void) 62731dbd01fSIzik Eidus { 62831dbd01fSIzik Eidus struct mm_slot *mm_slot; 62931dbd01fSIzik Eidus struct mm_struct *mm; 63031dbd01fSIzik Eidus struct vm_area_struct *vma; 631d952b791SHugh Dickins int err = 0; 63231dbd01fSIzik Eidus 633d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 6349ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, 635d952b791SHugh Dickins struct mm_slot, mm_list); 636d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 637d952b791SHugh Dickins 6389ba69294SHugh Dickins for (mm_slot = ksm_scan.mm_slot; 6399ba69294SHugh Dickins mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { 64031dbd01fSIzik Eidus mm = mm_slot->mm; 64131dbd01fSIzik Eidus down_read(&mm->mmap_sem); 64231dbd01fSIzik Eidus for (vma = mm->mmap; vma; vma = vma->vm_next) { 6439ba69294SHugh Dickins if (ksm_test_exit(mm)) 6449ba69294SHugh Dickins break; 64531dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 64631dbd01fSIzik Eidus continue; 647d952b791SHugh Dickins err = unmerge_ksm_pages(vma, 648d952b791SHugh Dickins vma->vm_start, vma->vm_end); 6499ba69294SHugh Dickins if (err) 6509ba69294SHugh Dickins goto error; 651d952b791SHugh Dickins } 6529ba69294SHugh Dickins 6536514d511SHugh Dickins remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); 65431dbd01fSIzik Eidus 65531dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 6569ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 657d952b791SHugh Dickins struct mm_slot, mm_list); 6589ba69294SHugh Dickins if (ksm_test_exit(mm)) { 6599ba69294SHugh Dickins hlist_del(&mm_slot->link); 6609ba69294SHugh Dickins list_del(&mm_slot->mm_list); 66131dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 6629ba69294SHugh Dickins 6639ba69294SHugh Dickins free_mm_slot(mm_slot); 6649ba69294SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 6659ba69294SHugh Dickins up_read(&mm->mmap_sem); 6669ba69294SHugh Dickins mmdrop(mm); 6679ba69294SHugh Dickins } else { 6689ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 6699ba69294SHugh Dickins up_read(&mm->mmap_sem); 6709ba69294SHugh Dickins } 67131dbd01fSIzik Eidus } 67231dbd01fSIzik Eidus 673d952b791SHugh Dickins ksm_scan.seqnr = 0; 6749ba69294SHugh Dickins return 0; 6759ba69294SHugh Dickins 6769ba69294SHugh Dickins error: 6779ba69294SHugh Dickins up_read(&mm->mmap_sem); 678d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 679d952b791SHugh Dickins ksm_scan.mm_slot = &ksm_mm_head; 680d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 681d952b791SHugh Dickins return err; 682d952b791SHugh Dickins } 6832ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 684d952b791SHugh Dickins 68531dbd01fSIzik Eidus static u32 calc_checksum(struct page *page) 68631dbd01fSIzik Eidus { 68731dbd01fSIzik Eidus u32 checksum; 68831dbd01fSIzik Eidus void *addr = kmap_atomic(page, KM_USER0); 68931dbd01fSIzik Eidus checksum = jhash2(addr, PAGE_SIZE / 4, 17); 69031dbd01fSIzik Eidus kunmap_atomic(addr, KM_USER0); 69131dbd01fSIzik Eidus return checksum; 69231dbd01fSIzik Eidus } 69331dbd01fSIzik Eidus 69431dbd01fSIzik Eidus static int memcmp_pages(struct page *page1, struct page *page2) 69531dbd01fSIzik Eidus { 69631dbd01fSIzik Eidus char *addr1, *addr2; 69731dbd01fSIzik Eidus int ret; 69831dbd01fSIzik Eidus 69931dbd01fSIzik Eidus addr1 = kmap_atomic(page1, KM_USER0); 70031dbd01fSIzik Eidus addr2 = kmap_atomic(page2, KM_USER1); 70131dbd01fSIzik Eidus ret = memcmp(addr1, addr2, PAGE_SIZE); 70231dbd01fSIzik Eidus kunmap_atomic(addr2, KM_USER1); 70331dbd01fSIzik Eidus kunmap_atomic(addr1, KM_USER0); 70431dbd01fSIzik Eidus return ret; 70531dbd01fSIzik Eidus } 70631dbd01fSIzik Eidus 70731dbd01fSIzik Eidus static inline int pages_identical(struct page *page1, struct page *page2) 70831dbd01fSIzik Eidus { 70931dbd01fSIzik Eidus return !memcmp_pages(page1, page2); 71031dbd01fSIzik Eidus } 71131dbd01fSIzik Eidus 71231dbd01fSIzik Eidus static int write_protect_page(struct vm_area_struct *vma, struct page *page, 71331dbd01fSIzik Eidus pte_t *orig_pte) 71431dbd01fSIzik Eidus { 71531dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 71631dbd01fSIzik Eidus unsigned long addr; 71731dbd01fSIzik Eidus pte_t *ptep; 71831dbd01fSIzik Eidus spinlock_t *ptl; 71931dbd01fSIzik Eidus int swapped; 72031dbd01fSIzik Eidus int err = -EFAULT; 72131dbd01fSIzik Eidus 72231dbd01fSIzik Eidus addr = page_address_in_vma(page, vma); 72331dbd01fSIzik Eidus if (addr == -EFAULT) 72431dbd01fSIzik Eidus goto out; 72531dbd01fSIzik Eidus 72629ad768cSAndrea Arcangeli BUG_ON(PageTransCompound(page)); 72731dbd01fSIzik Eidus ptep = page_check_address(page, mm, addr, &ptl, 0); 72831dbd01fSIzik Eidus if (!ptep) 72931dbd01fSIzik Eidus goto out; 73031dbd01fSIzik Eidus 7314e31635cSHugh Dickins if (pte_write(*ptep) || pte_dirty(*ptep)) { 73231dbd01fSIzik Eidus pte_t entry; 73331dbd01fSIzik Eidus 73431dbd01fSIzik Eidus swapped = PageSwapCache(page); 73531dbd01fSIzik Eidus flush_cache_page(vma, addr, page_to_pfn(page)); 73631dbd01fSIzik Eidus /* 73731dbd01fSIzik Eidus * Ok this is tricky, when get_user_pages_fast() run it doesnt 73831dbd01fSIzik Eidus * take any lock, therefore the check that we are going to make 73931dbd01fSIzik Eidus * with the pagecount against the mapcount is racey and 74031dbd01fSIzik Eidus * O_DIRECT can happen right after the check. 74131dbd01fSIzik Eidus * So we clear the pte and flush the tlb before the check 74231dbd01fSIzik Eidus * this assure us that no O_DIRECT can happen after the check 74331dbd01fSIzik Eidus * or in the middle of the check. 74431dbd01fSIzik Eidus */ 74531dbd01fSIzik Eidus entry = ptep_clear_flush(vma, addr, ptep); 74631dbd01fSIzik Eidus /* 74731dbd01fSIzik Eidus * Check that no O_DIRECT or similar I/O is in progress on the 74831dbd01fSIzik Eidus * page 74931dbd01fSIzik Eidus */ 75031e855eaSHugh Dickins if (page_mapcount(page) + 1 + swapped != page_count(page)) { 751cb532375SRobin Holt set_pte_at(mm, addr, ptep, entry); 75231dbd01fSIzik Eidus goto out_unlock; 75331dbd01fSIzik Eidus } 7544e31635cSHugh Dickins if (pte_dirty(entry)) 7554e31635cSHugh Dickins set_page_dirty(page); 7564e31635cSHugh Dickins entry = pte_mkclean(pte_wrprotect(entry)); 75731dbd01fSIzik Eidus set_pte_at_notify(mm, addr, ptep, entry); 75831dbd01fSIzik Eidus } 75931dbd01fSIzik Eidus *orig_pte = *ptep; 76031dbd01fSIzik Eidus err = 0; 76131dbd01fSIzik Eidus 76231dbd01fSIzik Eidus out_unlock: 76331dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 76431dbd01fSIzik Eidus out: 76531dbd01fSIzik Eidus return err; 76631dbd01fSIzik Eidus } 76731dbd01fSIzik Eidus 76831dbd01fSIzik Eidus /** 76931dbd01fSIzik Eidus * replace_page - replace page in vma by new ksm page 7708dd3557aSHugh Dickins * @vma: vma that holds the pte pointing to page 7718dd3557aSHugh Dickins * @page: the page we are replacing by kpage 7728dd3557aSHugh Dickins * @kpage: the ksm page we replace page by 77331dbd01fSIzik Eidus * @orig_pte: the original value of the pte 77431dbd01fSIzik Eidus * 77531dbd01fSIzik Eidus * Returns 0 on success, -EFAULT on failure. 77631dbd01fSIzik Eidus */ 7778dd3557aSHugh Dickins static int replace_page(struct vm_area_struct *vma, struct page *page, 7788dd3557aSHugh Dickins struct page *kpage, pte_t orig_pte) 77931dbd01fSIzik Eidus { 78031dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 78131dbd01fSIzik Eidus pgd_t *pgd; 78231dbd01fSIzik Eidus pud_t *pud; 78331dbd01fSIzik Eidus pmd_t *pmd; 78431dbd01fSIzik Eidus pte_t *ptep; 78531dbd01fSIzik Eidus spinlock_t *ptl; 78631dbd01fSIzik Eidus unsigned long addr; 78731dbd01fSIzik Eidus int err = -EFAULT; 78831dbd01fSIzik Eidus 7898dd3557aSHugh Dickins addr = page_address_in_vma(page, vma); 79031dbd01fSIzik Eidus if (addr == -EFAULT) 79131dbd01fSIzik Eidus goto out; 79231dbd01fSIzik Eidus 79331dbd01fSIzik Eidus pgd = pgd_offset(mm, addr); 79431dbd01fSIzik Eidus if (!pgd_present(*pgd)) 79531dbd01fSIzik Eidus goto out; 79631dbd01fSIzik Eidus 79731dbd01fSIzik Eidus pud = pud_offset(pgd, addr); 79831dbd01fSIzik Eidus if (!pud_present(*pud)) 79931dbd01fSIzik Eidus goto out; 80031dbd01fSIzik Eidus 80131dbd01fSIzik Eidus pmd = pmd_offset(pud, addr); 80229ad768cSAndrea Arcangeli BUG_ON(pmd_trans_huge(*pmd)); 80331dbd01fSIzik Eidus if (!pmd_present(*pmd)) 80431dbd01fSIzik Eidus goto out; 80531dbd01fSIzik Eidus 80631dbd01fSIzik Eidus ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 80731dbd01fSIzik Eidus if (!pte_same(*ptep, orig_pte)) { 80831dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 80931dbd01fSIzik Eidus goto out; 81031dbd01fSIzik Eidus } 81131dbd01fSIzik Eidus 8128dd3557aSHugh Dickins get_page(kpage); 8135ad64688SHugh Dickins page_add_anon_rmap(kpage, vma, addr); 81431dbd01fSIzik Eidus 81531dbd01fSIzik Eidus flush_cache_page(vma, addr, pte_pfn(*ptep)); 81631dbd01fSIzik Eidus ptep_clear_flush(vma, addr, ptep); 8178dd3557aSHugh Dickins set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 81831dbd01fSIzik Eidus 8198dd3557aSHugh Dickins page_remove_rmap(page); 820ae52a2adSHugh Dickins if (!page_mapped(page)) 821ae52a2adSHugh Dickins try_to_free_swap(page); 8228dd3557aSHugh Dickins put_page(page); 82331dbd01fSIzik Eidus 82431dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 82531dbd01fSIzik Eidus err = 0; 82631dbd01fSIzik Eidus out: 82731dbd01fSIzik Eidus return err; 82831dbd01fSIzik Eidus } 82931dbd01fSIzik Eidus 83029ad768cSAndrea Arcangeli static int page_trans_compound_anon_split(struct page *page) 83129ad768cSAndrea Arcangeli { 83229ad768cSAndrea Arcangeli int ret = 0; 83329ad768cSAndrea Arcangeli struct page *transhuge_head = page_trans_compound_anon(page); 83429ad768cSAndrea Arcangeli if (transhuge_head) { 83529ad768cSAndrea Arcangeli /* Get the reference on the head to split it. */ 83629ad768cSAndrea Arcangeli if (get_page_unless_zero(transhuge_head)) { 83729ad768cSAndrea Arcangeli /* 83829ad768cSAndrea Arcangeli * Recheck we got the reference while the head 83929ad768cSAndrea Arcangeli * was still anonymous. 84029ad768cSAndrea Arcangeli */ 84129ad768cSAndrea Arcangeli if (PageAnon(transhuge_head)) 84229ad768cSAndrea Arcangeli ret = split_huge_page(transhuge_head); 84329ad768cSAndrea Arcangeli else 84429ad768cSAndrea Arcangeli /* 84529ad768cSAndrea Arcangeli * Retry later if split_huge_page run 84629ad768cSAndrea Arcangeli * from under us. 84729ad768cSAndrea Arcangeli */ 84829ad768cSAndrea Arcangeli ret = 1; 84929ad768cSAndrea Arcangeli put_page(transhuge_head); 85029ad768cSAndrea Arcangeli } else 85129ad768cSAndrea Arcangeli /* Retry later if split_huge_page run from under us. */ 85229ad768cSAndrea Arcangeli ret = 1; 85329ad768cSAndrea Arcangeli } 85429ad768cSAndrea Arcangeli return ret; 85529ad768cSAndrea Arcangeli } 85629ad768cSAndrea Arcangeli 85731dbd01fSIzik Eidus /* 85831dbd01fSIzik Eidus * try_to_merge_one_page - take two pages and merge them into one 8598dd3557aSHugh Dickins * @vma: the vma that holds the pte pointing to page 8608dd3557aSHugh Dickins * @page: the PageAnon page that we want to replace with kpage 86180e14822SHugh Dickins * @kpage: the PageKsm page that we want to map instead of page, 86280e14822SHugh Dickins * or NULL the first time when we want to use page as kpage. 86331dbd01fSIzik Eidus * 86431dbd01fSIzik Eidus * This function returns 0 if the pages were merged, -EFAULT otherwise. 86531dbd01fSIzik Eidus */ 86631dbd01fSIzik Eidus static int try_to_merge_one_page(struct vm_area_struct *vma, 8678dd3557aSHugh Dickins struct page *page, struct page *kpage) 86831dbd01fSIzik Eidus { 86931dbd01fSIzik Eidus pte_t orig_pte = __pte(0); 87031dbd01fSIzik Eidus int err = -EFAULT; 87131dbd01fSIzik Eidus 872db114b83SHugh Dickins if (page == kpage) /* ksm page forked */ 873db114b83SHugh Dickins return 0; 874db114b83SHugh Dickins 87531dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 87631dbd01fSIzik Eidus goto out; 87729ad768cSAndrea Arcangeli if (PageTransCompound(page) && page_trans_compound_anon_split(page)) 87829ad768cSAndrea Arcangeli goto out; 87929ad768cSAndrea Arcangeli BUG_ON(PageTransCompound(page)); 8808dd3557aSHugh Dickins if (!PageAnon(page)) 88131dbd01fSIzik Eidus goto out; 88231dbd01fSIzik Eidus 88331dbd01fSIzik Eidus /* 88431dbd01fSIzik Eidus * We need the page lock to read a stable PageSwapCache in 88531dbd01fSIzik Eidus * write_protect_page(). We use trylock_page() instead of 88631dbd01fSIzik Eidus * lock_page() because we don't want to wait here - we 88731dbd01fSIzik Eidus * prefer to continue scanning and merging different pages, 88831dbd01fSIzik Eidus * then come back to this page when it is unlocked. 88931dbd01fSIzik Eidus */ 8908dd3557aSHugh Dickins if (!trylock_page(page)) 89131e855eaSHugh Dickins goto out; 89231dbd01fSIzik Eidus /* 89331dbd01fSIzik Eidus * If this anonymous page is mapped only here, its pte may need 89431dbd01fSIzik Eidus * to be write-protected. If it's mapped elsewhere, all of its 89531dbd01fSIzik Eidus * ptes are necessarily already write-protected. But in either 89631dbd01fSIzik Eidus * case, we need to lock and check page_count is not raised. 89731dbd01fSIzik Eidus */ 89880e14822SHugh Dickins if (write_protect_page(vma, page, &orig_pte) == 0) { 89980e14822SHugh Dickins if (!kpage) { 90080e14822SHugh Dickins /* 90180e14822SHugh Dickins * While we hold page lock, upgrade page from 90280e14822SHugh Dickins * PageAnon+anon_vma to PageKsm+NULL stable_node: 90380e14822SHugh Dickins * stable_tree_insert() will update stable_node. 90480e14822SHugh Dickins */ 90580e14822SHugh Dickins set_page_stable_node(page, NULL); 90680e14822SHugh Dickins mark_page_accessed(page); 90780e14822SHugh Dickins err = 0; 90880e14822SHugh Dickins } else if (pages_identical(page, kpage)) 9098dd3557aSHugh Dickins err = replace_page(vma, page, kpage, orig_pte); 91080e14822SHugh Dickins } 91131dbd01fSIzik Eidus 91280e14822SHugh Dickins if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { 91373848b46SHugh Dickins munlock_vma_page(page); 9145ad64688SHugh Dickins if (!PageMlocked(kpage)) { 9155ad64688SHugh Dickins unlock_page(page); 9165ad64688SHugh Dickins lock_page(kpage); 9175ad64688SHugh Dickins mlock_vma_page(kpage); 9185ad64688SHugh Dickins page = kpage; /* for final unlock */ 9195ad64688SHugh Dickins } 9205ad64688SHugh Dickins } 92173848b46SHugh Dickins 9228dd3557aSHugh Dickins unlock_page(page); 92331dbd01fSIzik Eidus out: 92431dbd01fSIzik Eidus return err; 92531dbd01fSIzik Eidus } 92631dbd01fSIzik Eidus 92731dbd01fSIzik Eidus /* 92881464e30SHugh Dickins * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 92981464e30SHugh Dickins * but no new kernel page is allocated: kpage must already be a ksm page. 9308dd3557aSHugh Dickins * 9318dd3557aSHugh Dickins * This function returns 0 if the pages were merged, -EFAULT otherwise. 93281464e30SHugh Dickins */ 9338dd3557aSHugh Dickins static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, 9348dd3557aSHugh Dickins struct page *page, struct page *kpage) 93581464e30SHugh Dickins { 9368dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 93781464e30SHugh Dickins struct vm_area_struct *vma; 93881464e30SHugh Dickins int err = -EFAULT; 93981464e30SHugh Dickins 9408dd3557aSHugh Dickins down_read(&mm->mmap_sem); 9418dd3557aSHugh Dickins if (ksm_test_exit(mm)) 9428dd3557aSHugh Dickins goto out; 9438dd3557aSHugh Dickins vma = find_vma(mm, rmap_item->address); 9448dd3557aSHugh Dickins if (!vma || vma->vm_start > rmap_item->address) 9459ba69294SHugh Dickins goto out; 9469ba69294SHugh Dickins 9478dd3557aSHugh Dickins err = try_to_merge_one_page(vma, page, kpage); 948db114b83SHugh Dickins if (err) 949db114b83SHugh Dickins goto out; 950db114b83SHugh Dickins 951db114b83SHugh Dickins /* Must get reference to anon_vma while still holding mmap_sem */ 952db114b83SHugh Dickins hold_anon_vma(rmap_item, vma->anon_vma); 95381464e30SHugh Dickins out: 9548dd3557aSHugh Dickins up_read(&mm->mmap_sem); 95581464e30SHugh Dickins return err; 95681464e30SHugh Dickins } 95781464e30SHugh Dickins 95881464e30SHugh Dickins /* 95931dbd01fSIzik Eidus * try_to_merge_two_pages - take two identical pages and prepare them 96031dbd01fSIzik Eidus * to be merged into one page. 96131dbd01fSIzik Eidus * 9628dd3557aSHugh Dickins * This function returns the kpage if we successfully merged two identical 9638dd3557aSHugh Dickins * pages into one ksm page, NULL otherwise. 96431dbd01fSIzik Eidus * 96580e14822SHugh Dickins * Note that this function upgrades page to ksm page: if one of the pages 96631dbd01fSIzik Eidus * is already a ksm page, try_to_merge_with_ksm_page should be used. 96731dbd01fSIzik Eidus */ 9688dd3557aSHugh Dickins static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, 9698dd3557aSHugh Dickins struct page *page, 9708dd3557aSHugh Dickins struct rmap_item *tree_rmap_item, 9718dd3557aSHugh Dickins struct page *tree_page) 97231dbd01fSIzik Eidus { 97380e14822SHugh Dickins int err; 97431dbd01fSIzik Eidus 97580e14822SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 97631dbd01fSIzik Eidus if (!err) { 9778dd3557aSHugh Dickins err = try_to_merge_with_ksm_page(tree_rmap_item, 97880e14822SHugh Dickins tree_page, page); 97931dbd01fSIzik Eidus /* 98081464e30SHugh Dickins * If that fails, we have a ksm page with only one pte 98181464e30SHugh Dickins * pointing to it: so break it. 98231dbd01fSIzik Eidus */ 9834035c07aSHugh Dickins if (err) 9848dd3557aSHugh Dickins break_cow(rmap_item); 98531dbd01fSIzik Eidus } 98680e14822SHugh Dickins return err ? NULL : page; 98731dbd01fSIzik Eidus } 98831dbd01fSIzik Eidus 98931dbd01fSIzik Eidus /* 9908dd3557aSHugh Dickins * stable_tree_search - search for page inside the stable tree 99131dbd01fSIzik Eidus * 99231dbd01fSIzik Eidus * This function checks if there is a page inside the stable tree 99331dbd01fSIzik Eidus * with identical content to the page that we are scanning right now. 99431dbd01fSIzik Eidus * 9957b6ba2c7SHugh Dickins * This function returns the stable tree node of identical content if found, 99631dbd01fSIzik Eidus * NULL otherwise. 99731dbd01fSIzik Eidus */ 99862b61f61SHugh Dickins static struct page *stable_tree_search(struct page *page) 99931dbd01fSIzik Eidus { 100031dbd01fSIzik Eidus struct rb_node *node = root_stable_tree.rb_node; 10017b6ba2c7SHugh Dickins struct stable_node *stable_node; 100231dbd01fSIzik Eidus 100308beca44SHugh Dickins stable_node = page_stable_node(page); 100408beca44SHugh Dickins if (stable_node) { /* ksm page forked */ 100508beca44SHugh Dickins get_page(page); 100662b61f61SHugh Dickins return page; 100708beca44SHugh Dickins } 100808beca44SHugh Dickins 100931dbd01fSIzik Eidus while (node) { 10104035c07aSHugh Dickins struct page *tree_page; 101131dbd01fSIzik Eidus int ret; 101231dbd01fSIzik Eidus 101331dbd01fSIzik Eidus cond_resched(); 101408beca44SHugh Dickins stable_node = rb_entry(node, struct stable_node, node); 10154035c07aSHugh Dickins tree_page = get_ksm_page(stable_node); 10164035c07aSHugh Dickins if (!tree_page) 10174035c07aSHugh Dickins return NULL; 101831dbd01fSIzik Eidus 10194035c07aSHugh Dickins ret = memcmp_pages(page, tree_page); 102031dbd01fSIzik Eidus 10214035c07aSHugh Dickins if (ret < 0) { 10224035c07aSHugh Dickins put_page(tree_page); 102331dbd01fSIzik Eidus node = node->rb_left; 10244035c07aSHugh Dickins } else if (ret > 0) { 10254035c07aSHugh Dickins put_page(tree_page); 102631dbd01fSIzik Eidus node = node->rb_right; 10274035c07aSHugh Dickins } else 102862b61f61SHugh Dickins return tree_page; 102931dbd01fSIzik Eidus } 103031dbd01fSIzik Eidus 103131dbd01fSIzik Eidus return NULL; 103231dbd01fSIzik Eidus } 103331dbd01fSIzik Eidus 103431dbd01fSIzik Eidus /* 103531dbd01fSIzik Eidus * stable_tree_insert - insert rmap_item pointing to new ksm page 103631dbd01fSIzik Eidus * into the stable tree. 103731dbd01fSIzik Eidus * 10387b6ba2c7SHugh Dickins * This function returns the stable tree node just allocated on success, 10397b6ba2c7SHugh Dickins * NULL otherwise. 104031dbd01fSIzik Eidus */ 10417b6ba2c7SHugh Dickins static struct stable_node *stable_tree_insert(struct page *kpage) 104231dbd01fSIzik Eidus { 104331dbd01fSIzik Eidus struct rb_node **new = &root_stable_tree.rb_node; 104431dbd01fSIzik Eidus struct rb_node *parent = NULL; 10457b6ba2c7SHugh Dickins struct stable_node *stable_node; 104631dbd01fSIzik Eidus 104731dbd01fSIzik Eidus while (*new) { 10484035c07aSHugh Dickins struct page *tree_page; 104931dbd01fSIzik Eidus int ret; 105031dbd01fSIzik Eidus 105131dbd01fSIzik Eidus cond_resched(); 105208beca44SHugh Dickins stable_node = rb_entry(*new, struct stable_node, node); 10534035c07aSHugh Dickins tree_page = get_ksm_page(stable_node); 10544035c07aSHugh Dickins if (!tree_page) 10554035c07aSHugh Dickins return NULL; 105631dbd01fSIzik Eidus 10574035c07aSHugh Dickins ret = memcmp_pages(kpage, tree_page); 10584035c07aSHugh Dickins put_page(tree_page); 105931dbd01fSIzik Eidus 106031dbd01fSIzik Eidus parent = *new; 106131dbd01fSIzik Eidus if (ret < 0) 106231dbd01fSIzik Eidus new = &parent->rb_left; 106331dbd01fSIzik Eidus else if (ret > 0) 106431dbd01fSIzik Eidus new = &parent->rb_right; 106531dbd01fSIzik Eidus else { 106631dbd01fSIzik Eidus /* 106731dbd01fSIzik Eidus * It is not a bug that stable_tree_search() didn't 106831dbd01fSIzik Eidus * find this node: because at that time our page was 106931dbd01fSIzik Eidus * not yet write-protected, so may have changed since. 107031dbd01fSIzik Eidus */ 107131dbd01fSIzik Eidus return NULL; 107231dbd01fSIzik Eidus } 107331dbd01fSIzik Eidus } 107431dbd01fSIzik Eidus 10757b6ba2c7SHugh Dickins stable_node = alloc_stable_node(); 10767b6ba2c7SHugh Dickins if (!stable_node) 10777b6ba2c7SHugh Dickins return NULL; 107831dbd01fSIzik Eidus 10797b6ba2c7SHugh Dickins rb_link_node(&stable_node->node, parent, new); 10807b6ba2c7SHugh Dickins rb_insert_color(&stable_node->node, &root_stable_tree); 10817b6ba2c7SHugh Dickins 10827b6ba2c7SHugh Dickins INIT_HLIST_HEAD(&stable_node->hlist); 10837b6ba2c7SHugh Dickins 108462b61f61SHugh Dickins stable_node->kpfn = page_to_pfn(kpage); 108508beca44SHugh Dickins set_page_stable_node(kpage, stable_node); 108608beca44SHugh Dickins 10877b6ba2c7SHugh Dickins return stable_node; 108831dbd01fSIzik Eidus } 108931dbd01fSIzik Eidus 109031dbd01fSIzik Eidus /* 10918dd3557aSHugh Dickins * unstable_tree_search_insert - search for identical page, 10928dd3557aSHugh Dickins * else insert rmap_item into the unstable tree. 109331dbd01fSIzik Eidus * 109431dbd01fSIzik Eidus * This function searches for a page in the unstable tree identical to the 109531dbd01fSIzik Eidus * page currently being scanned; and if no identical page is found in the 109631dbd01fSIzik Eidus * tree, we insert rmap_item as a new object into the unstable tree. 109731dbd01fSIzik Eidus * 109831dbd01fSIzik Eidus * This function returns pointer to rmap_item found to be identical 109931dbd01fSIzik Eidus * to the currently scanned page, NULL otherwise. 110031dbd01fSIzik Eidus * 110131dbd01fSIzik Eidus * This function does both searching and inserting, because they share 110231dbd01fSIzik Eidus * the same walking algorithm in an rbtree. 110331dbd01fSIzik Eidus */ 11048dd3557aSHugh Dickins static 11058dd3557aSHugh Dickins struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, 11068dd3557aSHugh Dickins struct page *page, 11078dd3557aSHugh Dickins struct page **tree_pagep) 11088dd3557aSHugh Dickins 110931dbd01fSIzik Eidus { 111031dbd01fSIzik Eidus struct rb_node **new = &root_unstable_tree.rb_node; 111131dbd01fSIzik Eidus struct rb_node *parent = NULL; 111231dbd01fSIzik Eidus 111331dbd01fSIzik Eidus while (*new) { 111431dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 11158dd3557aSHugh Dickins struct page *tree_page; 111631dbd01fSIzik Eidus int ret; 111731dbd01fSIzik Eidus 1118d178f27fSHugh Dickins cond_resched(); 111931dbd01fSIzik Eidus tree_rmap_item = rb_entry(*new, struct rmap_item, node); 11208dd3557aSHugh Dickins tree_page = get_mergeable_page(tree_rmap_item); 112122eccdd7SDan Carpenter if (IS_ERR_OR_NULL(tree_page)) 112231dbd01fSIzik Eidus return NULL; 112331dbd01fSIzik Eidus 112431dbd01fSIzik Eidus /* 11258dd3557aSHugh Dickins * Don't substitute a ksm page for a forked page. 112631dbd01fSIzik Eidus */ 11278dd3557aSHugh Dickins if (page == tree_page) { 11288dd3557aSHugh Dickins put_page(tree_page); 112931dbd01fSIzik Eidus return NULL; 113031dbd01fSIzik Eidus } 113131dbd01fSIzik Eidus 11328dd3557aSHugh Dickins ret = memcmp_pages(page, tree_page); 113331dbd01fSIzik Eidus 113431dbd01fSIzik Eidus parent = *new; 113531dbd01fSIzik Eidus if (ret < 0) { 11368dd3557aSHugh Dickins put_page(tree_page); 113731dbd01fSIzik Eidus new = &parent->rb_left; 113831dbd01fSIzik Eidus } else if (ret > 0) { 11398dd3557aSHugh Dickins put_page(tree_page); 114031dbd01fSIzik Eidus new = &parent->rb_right; 114131dbd01fSIzik Eidus } else { 11428dd3557aSHugh Dickins *tree_pagep = tree_page; 114331dbd01fSIzik Eidus return tree_rmap_item; 114431dbd01fSIzik Eidus } 114531dbd01fSIzik Eidus } 114631dbd01fSIzik Eidus 11477b6ba2c7SHugh Dickins rmap_item->address |= UNSTABLE_FLAG; 114831dbd01fSIzik Eidus rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 114931dbd01fSIzik Eidus rb_link_node(&rmap_item->node, parent, new); 115031dbd01fSIzik Eidus rb_insert_color(&rmap_item->node, &root_unstable_tree); 115131dbd01fSIzik Eidus 1152473b0ce4SHugh Dickins ksm_pages_unshared++; 115331dbd01fSIzik Eidus return NULL; 115431dbd01fSIzik Eidus } 115531dbd01fSIzik Eidus 115631dbd01fSIzik Eidus /* 115731dbd01fSIzik Eidus * stable_tree_append - add another rmap_item to the linked list of 115831dbd01fSIzik Eidus * rmap_items hanging off a given node of the stable tree, all sharing 115931dbd01fSIzik Eidus * the same ksm page. 116031dbd01fSIzik Eidus */ 116131dbd01fSIzik Eidus static void stable_tree_append(struct rmap_item *rmap_item, 11627b6ba2c7SHugh Dickins struct stable_node *stable_node) 116331dbd01fSIzik Eidus { 11647b6ba2c7SHugh Dickins rmap_item->head = stable_node; 116531dbd01fSIzik Eidus rmap_item->address |= STABLE_FLAG; 11667b6ba2c7SHugh Dickins hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 1167e178dfdeSHugh Dickins 11687b6ba2c7SHugh Dickins if (rmap_item->hlist.next) 1169e178dfdeSHugh Dickins ksm_pages_sharing++; 11707b6ba2c7SHugh Dickins else 11717b6ba2c7SHugh Dickins ksm_pages_shared++; 117231dbd01fSIzik Eidus } 117331dbd01fSIzik Eidus 117431dbd01fSIzik Eidus /* 117581464e30SHugh Dickins * cmp_and_merge_page - first see if page can be merged into the stable tree; 117681464e30SHugh Dickins * if not, compare checksum to previous and if it's the same, see if page can 117781464e30SHugh Dickins * be inserted into the unstable tree, or merged with a page already there and 117881464e30SHugh Dickins * both transferred to the stable tree. 117931dbd01fSIzik Eidus * 118031dbd01fSIzik Eidus * @page: the page that we are searching identical page to. 118131dbd01fSIzik Eidus * @rmap_item: the reverse mapping into the virtual address of this page 118231dbd01fSIzik Eidus */ 118331dbd01fSIzik Eidus static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 118431dbd01fSIzik Eidus { 118531dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 11868dd3557aSHugh Dickins struct page *tree_page = NULL; 11877b6ba2c7SHugh Dickins struct stable_node *stable_node; 11888dd3557aSHugh Dickins struct page *kpage; 118931dbd01fSIzik Eidus unsigned int checksum; 119031dbd01fSIzik Eidus int err; 119131dbd01fSIzik Eidus 119231dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 119331dbd01fSIzik Eidus 119431dbd01fSIzik Eidus /* We first start with searching the page inside the stable tree */ 119562b61f61SHugh Dickins kpage = stable_tree_search(page); 119662b61f61SHugh Dickins if (kpage) { 119708beca44SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 119831dbd01fSIzik Eidus if (!err) { 119931dbd01fSIzik Eidus /* 120031dbd01fSIzik Eidus * The page was successfully merged: 120131dbd01fSIzik Eidus * add its rmap_item to the stable tree. 120231dbd01fSIzik Eidus */ 12035ad64688SHugh Dickins lock_page(kpage); 120462b61f61SHugh Dickins stable_tree_append(rmap_item, page_stable_node(kpage)); 12055ad64688SHugh Dickins unlock_page(kpage); 120631dbd01fSIzik Eidus } 12078dd3557aSHugh Dickins put_page(kpage); 120831dbd01fSIzik Eidus return; 120931dbd01fSIzik Eidus } 121031dbd01fSIzik Eidus 121131dbd01fSIzik Eidus /* 12124035c07aSHugh Dickins * If the hash value of the page has changed from the last time 12134035c07aSHugh Dickins * we calculated it, this page is changing frequently: therefore we 12144035c07aSHugh Dickins * don't want to insert it in the unstable tree, and we don't want 12154035c07aSHugh Dickins * to waste our time searching for something identical to it there. 121631dbd01fSIzik Eidus */ 121731dbd01fSIzik Eidus checksum = calc_checksum(page); 121831dbd01fSIzik Eidus if (rmap_item->oldchecksum != checksum) { 121931dbd01fSIzik Eidus rmap_item->oldchecksum = checksum; 122031dbd01fSIzik Eidus return; 122131dbd01fSIzik Eidus } 122231dbd01fSIzik Eidus 12238dd3557aSHugh Dickins tree_rmap_item = 12248dd3557aSHugh Dickins unstable_tree_search_insert(rmap_item, page, &tree_page); 122531dbd01fSIzik Eidus if (tree_rmap_item) { 12268dd3557aSHugh Dickins kpage = try_to_merge_two_pages(rmap_item, page, 12278dd3557aSHugh Dickins tree_rmap_item, tree_page); 12288dd3557aSHugh Dickins put_page(tree_page); 122931dbd01fSIzik Eidus /* 123031dbd01fSIzik Eidus * As soon as we merge this page, we want to remove the 123131dbd01fSIzik Eidus * rmap_item of the page we have merged with from the unstable 123231dbd01fSIzik Eidus * tree, and insert it instead as new node in the stable tree. 123331dbd01fSIzik Eidus */ 12348dd3557aSHugh Dickins if (kpage) { 123593d17715SHugh Dickins remove_rmap_item_from_tree(tree_rmap_item); 1236473b0ce4SHugh Dickins 12375ad64688SHugh Dickins lock_page(kpage); 12387b6ba2c7SHugh Dickins stable_node = stable_tree_insert(kpage); 12397b6ba2c7SHugh Dickins if (stable_node) { 12407b6ba2c7SHugh Dickins stable_tree_append(tree_rmap_item, stable_node); 12417b6ba2c7SHugh Dickins stable_tree_append(rmap_item, stable_node); 12427b6ba2c7SHugh Dickins } 12435ad64688SHugh Dickins unlock_page(kpage); 12447b6ba2c7SHugh Dickins 124531dbd01fSIzik Eidus /* 124631dbd01fSIzik Eidus * If we fail to insert the page into the stable tree, 124731dbd01fSIzik Eidus * we will have 2 virtual addresses that are pointing 124831dbd01fSIzik Eidus * to a ksm page left outside the stable tree, 124931dbd01fSIzik Eidus * in which case we need to break_cow on both. 125031dbd01fSIzik Eidus */ 12517b6ba2c7SHugh Dickins if (!stable_node) { 12528dd3557aSHugh Dickins break_cow(tree_rmap_item); 12538dd3557aSHugh Dickins break_cow(rmap_item); 125431dbd01fSIzik Eidus } 125531dbd01fSIzik Eidus } 125631dbd01fSIzik Eidus } 125731dbd01fSIzik Eidus } 125831dbd01fSIzik Eidus 125931dbd01fSIzik Eidus static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 12606514d511SHugh Dickins struct rmap_item **rmap_list, 126131dbd01fSIzik Eidus unsigned long addr) 126231dbd01fSIzik Eidus { 126331dbd01fSIzik Eidus struct rmap_item *rmap_item; 126431dbd01fSIzik Eidus 12656514d511SHugh Dickins while (*rmap_list) { 12666514d511SHugh Dickins rmap_item = *rmap_list; 126793d17715SHugh Dickins if ((rmap_item->address & PAGE_MASK) == addr) 126831dbd01fSIzik Eidus return rmap_item; 126931dbd01fSIzik Eidus if (rmap_item->address > addr) 127031dbd01fSIzik Eidus break; 12716514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 127231dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 127331dbd01fSIzik Eidus free_rmap_item(rmap_item); 127431dbd01fSIzik Eidus } 127531dbd01fSIzik Eidus 127631dbd01fSIzik Eidus rmap_item = alloc_rmap_item(); 127731dbd01fSIzik Eidus if (rmap_item) { 127831dbd01fSIzik Eidus /* It has already been zeroed */ 127931dbd01fSIzik Eidus rmap_item->mm = mm_slot->mm; 128031dbd01fSIzik Eidus rmap_item->address = addr; 12816514d511SHugh Dickins rmap_item->rmap_list = *rmap_list; 12826514d511SHugh Dickins *rmap_list = rmap_item; 128331dbd01fSIzik Eidus } 128431dbd01fSIzik Eidus return rmap_item; 128531dbd01fSIzik Eidus } 128631dbd01fSIzik Eidus 128731dbd01fSIzik Eidus static struct rmap_item *scan_get_next_rmap_item(struct page **page) 128831dbd01fSIzik Eidus { 128931dbd01fSIzik Eidus struct mm_struct *mm; 129031dbd01fSIzik Eidus struct mm_slot *slot; 129131dbd01fSIzik Eidus struct vm_area_struct *vma; 129231dbd01fSIzik Eidus struct rmap_item *rmap_item; 129331dbd01fSIzik Eidus 129431dbd01fSIzik Eidus if (list_empty(&ksm_mm_head.mm_list)) 129531dbd01fSIzik Eidus return NULL; 129631dbd01fSIzik Eidus 129731dbd01fSIzik Eidus slot = ksm_scan.mm_slot; 129831dbd01fSIzik Eidus if (slot == &ksm_mm_head) { 1299*2919bfd0SHugh Dickins /* 1300*2919bfd0SHugh Dickins * A number of pages can hang around indefinitely on per-cpu 1301*2919bfd0SHugh Dickins * pagevecs, raised page count preventing write_protect_page 1302*2919bfd0SHugh Dickins * from merging them. Though it doesn't really matter much, 1303*2919bfd0SHugh Dickins * it is puzzling to see some stuck in pages_volatile until 1304*2919bfd0SHugh Dickins * other activity jostles them out, and they also prevented 1305*2919bfd0SHugh Dickins * LTP's KSM test from succeeding deterministically; so drain 1306*2919bfd0SHugh Dickins * them here (here rather than on entry to ksm_do_scan(), 1307*2919bfd0SHugh Dickins * so we don't IPI too often when pages_to_scan is set low). 1308*2919bfd0SHugh Dickins */ 1309*2919bfd0SHugh Dickins lru_add_drain_all(); 1310*2919bfd0SHugh Dickins 131131dbd01fSIzik Eidus root_unstable_tree = RB_ROOT; 131231dbd01fSIzik Eidus 131331dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 131431dbd01fSIzik Eidus slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 131531dbd01fSIzik Eidus ksm_scan.mm_slot = slot; 131631dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 131731dbd01fSIzik Eidus next_mm: 131831dbd01fSIzik Eidus ksm_scan.address = 0; 13196514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 132031dbd01fSIzik Eidus } 132131dbd01fSIzik Eidus 132231dbd01fSIzik Eidus mm = slot->mm; 132331dbd01fSIzik Eidus down_read(&mm->mmap_sem); 13249ba69294SHugh Dickins if (ksm_test_exit(mm)) 13259ba69294SHugh Dickins vma = NULL; 13269ba69294SHugh Dickins else 13279ba69294SHugh Dickins vma = find_vma(mm, ksm_scan.address); 13289ba69294SHugh Dickins 13299ba69294SHugh Dickins for (; vma; vma = vma->vm_next) { 133031dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 133131dbd01fSIzik Eidus continue; 133231dbd01fSIzik Eidus if (ksm_scan.address < vma->vm_start) 133331dbd01fSIzik Eidus ksm_scan.address = vma->vm_start; 133431dbd01fSIzik Eidus if (!vma->anon_vma) 133531dbd01fSIzik Eidus ksm_scan.address = vma->vm_end; 133631dbd01fSIzik Eidus 133731dbd01fSIzik Eidus while (ksm_scan.address < vma->vm_end) { 13389ba69294SHugh Dickins if (ksm_test_exit(mm)) 13399ba69294SHugh Dickins break; 134031dbd01fSIzik Eidus *page = follow_page(vma, ksm_scan.address, FOLL_GET); 134121ae5b01SAndrea Arcangeli if (IS_ERR_OR_NULL(*page)) { 134221ae5b01SAndrea Arcangeli ksm_scan.address += PAGE_SIZE; 134321ae5b01SAndrea Arcangeli cond_resched(); 134421ae5b01SAndrea Arcangeli continue; 134521ae5b01SAndrea Arcangeli } 134629ad768cSAndrea Arcangeli if (PageAnon(*page) || 134729ad768cSAndrea Arcangeli page_trans_compound_anon(*page)) { 134831dbd01fSIzik Eidus flush_anon_page(vma, *page, ksm_scan.address); 134931dbd01fSIzik Eidus flush_dcache_page(*page); 135031dbd01fSIzik Eidus rmap_item = get_next_rmap_item(slot, 13516514d511SHugh Dickins ksm_scan.rmap_list, ksm_scan.address); 135231dbd01fSIzik Eidus if (rmap_item) { 13536514d511SHugh Dickins ksm_scan.rmap_list = 13546514d511SHugh Dickins &rmap_item->rmap_list; 135531dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 135631dbd01fSIzik Eidus } else 135731dbd01fSIzik Eidus put_page(*page); 135831dbd01fSIzik Eidus up_read(&mm->mmap_sem); 135931dbd01fSIzik Eidus return rmap_item; 136031dbd01fSIzik Eidus } 136131dbd01fSIzik Eidus put_page(*page); 136231dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 136331dbd01fSIzik Eidus cond_resched(); 136431dbd01fSIzik Eidus } 136531dbd01fSIzik Eidus } 136631dbd01fSIzik Eidus 13679ba69294SHugh Dickins if (ksm_test_exit(mm)) { 13689ba69294SHugh Dickins ksm_scan.address = 0; 13696514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 13709ba69294SHugh Dickins } 137131dbd01fSIzik Eidus /* 137231dbd01fSIzik Eidus * Nuke all the rmap_items that are above this current rmap: 137331dbd01fSIzik Eidus * because there were no VM_MERGEABLE vmas with such addresses. 137431dbd01fSIzik Eidus */ 13756514d511SHugh Dickins remove_trailing_rmap_items(slot, ksm_scan.rmap_list); 137631dbd01fSIzik Eidus 137731dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 1378cd551f97SHugh Dickins ksm_scan.mm_slot = list_entry(slot->mm_list.next, 1379cd551f97SHugh Dickins struct mm_slot, mm_list); 1380cd551f97SHugh Dickins if (ksm_scan.address == 0) { 1381cd551f97SHugh Dickins /* 1382cd551f97SHugh Dickins * We've completed a full scan of all vmas, holding mmap_sem 1383cd551f97SHugh Dickins * throughout, and found no VM_MERGEABLE: so do the same as 1384cd551f97SHugh Dickins * __ksm_exit does to remove this mm from all our lists now. 13859ba69294SHugh Dickins * This applies either when cleaning up after __ksm_exit 13869ba69294SHugh Dickins * (but beware: we can reach here even before __ksm_exit), 13879ba69294SHugh Dickins * or when all VM_MERGEABLE areas have been unmapped (and 13889ba69294SHugh Dickins * mmap_sem then protects against race with MADV_MERGEABLE). 1389cd551f97SHugh Dickins */ 1390cd551f97SHugh Dickins hlist_del(&slot->link); 1391cd551f97SHugh Dickins list_del(&slot->mm_list); 13929ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 13939ba69294SHugh Dickins 1394cd551f97SHugh Dickins free_mm_slot(slot); 1395cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 13969ba69294SHugh Dickins up_read(&mm->mmap_sem); 13979ba69294SHugh Dickins mmdrop(mm); 13989ba69294SHugh Dickins } else { 139931dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 1400cd551f97SHugh Dickins up_read(&mm->mmap_sem); 14019ba69294SHugh Dickins } 140231dbd01fSIzik Eidus 140331dbd01fSIzik Eidus /* Repeat until we've completed scanning the whole list */ 1404cd551f97SHugh Dickins slot = ksm_scan.mm_slot; 140531dbd01fSIzik Eidus if (slot != &ksm_mm_head) 140631dbd01fSIzik Eidus goto next_mm; 140731dbd01fSIzik Eidus 140831dbd01fSIzik Eidus ksm_scan.seqnr++; 140931dbd01fSIzik Eidus return NULL; 141031dbd01fSIzik Eidus } 141131dbd01fSIzik Eidus 141231dbd01fSIzik Eidus /** 141331dbd01fSIzik Eidus * ksm_do_scan - the ksm scanner main worker function. 141431dbd01fSIzik Eidus * @scan_npages - number of pages we want to scan before we return. 141531dbd01fSIzik Eidus */ 141631dbd01fSIzik Eidus static void ksm_do_scan(unsigned int scan_npages) 141731dbd01fSIzik Eidus { 141831dbd01fSIzik Eidus struct rmap_item *rmap_item; 141922eccdd7SDan Carpenter struct page *uninitialized_var(page); 142031dbd01fSIzik Eidus 1421878aee7dSAndrea Arcangeli while (scan_npages-- && likely(!freezing(current))) { 142231dbd01fSIzik Eidus cond_resched(); 142331dbd01fSIzik Eidus rmap_item = scan_get_next_rmap_item(&page); 142431dbd01fSIzik Eidus if (!rmap_item) 142531dbd01fSIzik Eidus return; 142631dbd01fSIzik Eidus if (!PageKsm(page) || !in_stable_tree(rmap_item)) 142731dbd01fSIzik Eidus cmp_and_merge_page(page, rmap_item); 142831dbd01fSIzik Eidus put_page(page); 142931dbd01fSIzik Eidus } 143031dbd01fSIzik Eidus } 143131dbd01fSIzik Eidus 14326e158384SHugh Dickins static int ksmd_should_run(void) 14336e158384SHugh Dickins { 14346e158384SHugh Dickins return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); 14356e158384SHugh Dickins } 14366e158384SHugh Dickins 143731dbd01fSIzik Eidus static int ksm_scan_thread(void *nothing) 143831dbd01fSIzik Eidus { 1439878aee7dSAndrea Arcangeli set_freezable(); 1440339aa624SIzik Eidus set_user_nice(current, 5); 144131dbd01fSIzik Eidus 144231dbd01fSIzik Eidus while (!kthread_should_stop()) { 144331dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 14446e158384SHugh Dickins if (ksmd_should_run()) 144531dbd01fSIzik Eidus ksm_do_scan(ksm_thread_pages_to_scan); 144631dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 14476e158384SHugh Dickins 1448878aee7dSAndrea Arcangeli try_to_freeze(); 1449878aee7dSAndrea Arcangeli 14506e158384SHugh Dickins if (ksmd_should_run()) { 145131dbd01fSIzik Eidus schedule_timeout_interruptible( 145231dbd01fSIzik Eidus msecs_to_jiffies(ksm_thread_sleep_millisecs)); 145331dbd01fSIzik Eidus } else { 1454878aee7dSAndrea Arcangeli wait_event_freezable(ksm_thread_wait, 14556e158384SHugh Dickins ksmd_should_run() || kthread_should_stop()); 145631dbd01fSIzik Eidus } 145731dbd01fSIzik Eidus } 145831dbd01fSIzik Eidus return 0; 145931dbd01fSIzik Eidus } 146031dbd01fSIzik Eidus 1461f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 1462f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags) 1463f8af4da3SHugh Dickins { 1464f8af4da3SHugh Dickins struct mm_struct *mm = vma->vm_mm; 1465d952b791SHugh Dickins int err; 1466f8af4da3SHugh Dickins 1467f8af4da3SHugh Dickins switch (advice) { 1468f8af4da3SHugh Dickins case MADV_MERGEABLE: 1469f8af4da3SHugh Dickins /* 1470f8af4da3SHugh Dickins * Be somewhat over-protective for now! 1471f8af4da3SHugh Dickins */ 1472f8af4da3SHugh Dickins if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1473f8af4da3SHugh Dickins VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1474f8af4da3SHugh Dickins VM_RESERVED | VM_HUGETLB | VM_INSERTPAGE | 14755ad64688SHugh Dickins VM_NONLINEAR | VM_MIXEDMAP | VM_SAO)) 1476f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 1477f8af4da3SHugh Dickins 1478d952b791SHugh Dickins if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 1479d952b791SHugh Dickins err = __ksm_enter(mm); 1480d952b791SHugh Dickins if (err) 1481d952b791SHugh Dickins return err; 1482d952b791SHugh Dickins } 1483f8af4da3SHugh Dickins 1484f8af4da3SHugh Dickins *vm_flags |= VM_MERGEABLE; 1485f8af4da3SHugh Dickins break; 1486f8af4da3SHugh Dickins 1487f8af4da3SHugh Dickins case MADV_UNMERGEABLE: 1488f8af4da3SHugh Dickins if (!(*vm_flags & VM_MERGEABLE)) 1489f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 1490f8af4da3SHugh Dickins 1491d952b791SHugh Dickins if (vma->anon_vma) { 1492d952b791SHugh Dickins err = unmerge_ksm_pages(vma, start, end); 1493d952b791SHugh Dickins if (err) 1494d952b791SHugh Dickins return err; 1495d952b791SHugh Dickins } 1496f8af4da3SHugh Dickins 1497f8af4da3SHugh Dickins *vm_flags &= ~VM_MERGEABLE; 1498f8af4da3SHugh Dickins break; 1499f8af4da3SHugh Dickins } 1500f8af4da3SHugh Dickins 1501f8af4da3SHugh Dickins return 0; 1502f8af4da3SHugh Dickins } 1503f8af4da3SHugh Dickins 1504f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm) 1505f8af4da3SHugh Dickins { 15066e158384SHugh Dickins struct mm_slot *mm_slot; 15076e158384SHugh Dickins int needs_wakeup; 15086e158384SHugh Dickins 15096e158384SHugh Dickins mm_slot = alloc_mm_slot(); 151031dbd01fSIzik Eidus if (!mm_slot) 151131dbd01fSIzik Eidus return -ENOMEM; 151231dbd01fSIzik Eidus 15136e158384SHugh Dickins /* Check ksm_run too? Would need tighter locking */ 15146e158384SHugh Dickins needs_wakeup = list_empty(&ksm_mm_head.mm_list); 15156e158384SHugh Dickins 151631dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 151731dbd01fSIzik Eidus insert_to_mm_slots_hash(mm, mm_slot); 151831dbd01fSIzik Eidus /* 151931dbd01fSIzik Eidus * Insert just behind the scanning cursor, to let the area settle 152031dbd01fSIzik Eidus * down a little; when fork is followed by immediate exec, we don't 152131dbd01fSIzik Eidus * want ksmd to waste time setting up and tearing down an rmap_list. 152231dbd01fSIzik Eidus */ 152331dbd01fSIzik Eidus list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); 152431dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 152531dbd01fSIzik Eidus 1526f8af4da3SHugh Dickins set_bit(MMF_VM_MERGEABLE, &mm->flags); 15279ba69294SHugh Dickins atomic_inc(&mm->mm_count); 15286e158384SHugh Dickins 15296e158384SHugh Dickins if (needs_wakeup) 15306e158384SHugh Dickins wake_up_interruptible(&ksm_thread_wait); 15316e158384SHugh Dickins 1532f8af4da3SHugh Dickins return 0; 1533f8af4da3SHugh Dickins } 1534f8af4da3SHugh Dickins 15351c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm) 1536f8af4da3SHugh Dickins { 1537cd551f97SHugh Dickins struct mm_slot *mm_slot; 15389ba69294SHugh Dickins int easy_to_free = 0; 1539cd551f97SHugh Dickins 154031dbd01fSIzik Eidus /* 15419ba69294SHugh Dickins * This process is exiting: if it's straightforward (as is the 15429ba69294SHugh Dickins * case when ksmd was never running), free mm_slot immediately. 15439ba69294SHugh Dickins * But if it's at the cursor or has rmap_items linked to it, use 15449ba69294SHugh Dickins * mmap_sem to synchronize with any break_cows before pagetables 15459ba69294SHugh Dickins * are freed, and leave the mm_slot on the list for ksmd to free. 15469ba69294SHugh Dickins * Beware: ksm may already have noticed it exiting and freed the slot. 154731dbd01fSIzik Eidus */ 15489ba69294SHugh Dickins 1549cd551f97SHugh Dickins spin_lock(&ksm_mmlist_lock); 1550cd551f97SHugh Dickins mm_slot = get_mm_slot(mm); 15519ba69294SHugh Dickins if (mm_slot && ksm_scan.mm_slot != mm_slot) { 15526514d511SHugh Dickins if (!mm_slot->rmap_list) { 1553cd551f97SHugh Dickins hlist_del(&mm_slot->link); 1554cd551f97SHugh Dickins list_del(&mm_slot->mm_list); 15559ba69294SHugh Dickins easy_to_free = 1; 15569ba69294SHugh Dickins } else { 15579ba69294SHugh Dickins list_move(&mm_slot->mm_list, 15589ba69294SHugh Dickins &ksm_scan.mm_slot->mm_list); 15599ba69294SHugh Dickins } 15609ba69294SHugh Dickins } 1561cd551f97SHugh Dickins spin_unlock(&ksm_mmlist_lock); 1562cd551f97SHugh Dickins 15639ba69294SHugh Dickins if (easy_to_free) { 1564cd551f97SHugh Dickins free_mm_slot(mm_slot); 1565cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 15669ba69294SHugh Dickins mmdrop(mm); 15679ba69294SHugh Dickins } else if (mm_slot) { 15689ba69294SHugh Dickins down_write(&mm->mmap_sem); 15699ba69294SHugh Dickins up_write(&mm->mmap_sem); 15709ba69294SHugh Dickins } 1571f8af4da3SHugh Dickins } 157231dbd01fSIzik Eidus 15735ad64688SHugh Dickins struct page *ksm_does_need_to_copy(struct page *page, 15745ad64688SHugh Dickins struct vm_area_struct *vma, unsigned long address) 15755ad64688SHugh Dickins { 15765ad64688SHugh Dickins struct page *new_page; 15775ad64688SHugh Dickins 15785ad64688SHugh Dickins new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 15795ad64688SHugh Dickins if (new_page) { 15805ad64688SHugh Dickins copy_user_highpage(new_page, page, address, vma); 15815ad64688SHugh Dickins 15825ad64688SHugh Dickins SetPageDirty(new_page); 15835ad64688SHugh Dickins __SetPageUptodate(new_page); 15845ad64688SHugh Dickins SetPageSwapBacked(new_page); 15855ad64688SHugh Dickins __set_page_locked(new_page); 15865ad64688SHugh Dickins 15875ad64688SHugh Dickins if (page_evictable(new_page, vma)) 15885ad64688SHugh Dickins lru_cache_add_lru(new_page, LRU_ACTIVE_ANON); 15895ad64688SHugh Dickins else 15905ad64688SHugh Dickins add_page_to_unevictable_list(new_page); 15915ad64688SHugh Dickins } 15925ad64688SHugh Dickins 15935ad64688SHugh Dickins return new_page; 15945ad64688SHugh Dickins } 15955ad64688SHugh Dickins 15965ad64688SHugh Dickins int page_referenced_ksm(struct page *page, struct mem_cgroup *memcg, 15975ad64688SHugh Dickins unsigned long *vm_flags) 15985ad64688SHugh Dickins { 15995ad64688SHugh Dickins struct stable_node *stable_node; 16005ad64688SHugh Dickins struct rmap_item *rmap_item; 16015ad64688SHugh Dickins struct hlist_node *hlist; 16025ad64688SHugh Dickins unsigned int mapcount = page_mapcount(page); 16035ad64688SHugh Dickins int referenced = 0; 1604db114b83SHugh Dickins int search_new_forks = 0; 16055ad64688SHugh Dickins 16065ad64688SHugh Dickins VM_BUG_ON(!PageKsm(page)); 16075ad64688SHugh Dickins VM_BUG_ON(!PageLocked(page)); 16085ad64688SHugh Dickins 16095ad64688SHugh Dickins stable_node = page_stable_node(page); 16105ad64688SHugh Dickins if (!stable_node) 16115ad64688SHugh Dickins return 0; 1612db114b83SHugh Dickins again: 16135ad64688SHugh Dickins hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1614db114b83SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 16155beb4930SRik van Riel struct anon_vma_chain *vmac; 1616db114b83SHugh Dickins struct vm_area_struct *vma; 1617db114b83SHugh Dickins 1618cba48b98SRik van Riel anon_vma_lock(anon_vma); 16195beb4930SRik van Riel list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 16205beb4930SRik van Riel vma = vmac->vma; 1621db114b83SHugh Dickins if (rmap_item->address < vma->vm_start || 1622db114b83SHugh Dickins rmap_item->address >= vma->vm_end) 1623db114b83SHugh Dickins continue; 1624db114b83SHugh Dickins /* 1625db114b83SHugh Dickins * Initially we examine only the vma which covers this 1626db114b83SHugh Dickins * rmap_item; but later, if there is still work to do, 1627db114b83SHugh Dickins * we examine covering vmas in other mms: in case they 1628db114b83SHugh Dickins * were forked from the original since ksmd passed. 1629db114b83SHugh Dickins */ 1630db114b83SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 16315ad64688SHugh Dickins continue; 16325ad64688SHugh Dickins 1633db114b83SHugh Dickins if (memcg && !mm_match_cgroup(vma->vm_mm, memcg)) 1634db114b83SHugh Dickins continue; 16355ad64688SHugh Dickins 16365ad64688SHugh Dickins referenced += page_referenced_one(page, vma, 16375ad64688SHugh Dickins rmap_item->address, &mapcount, vm_flags); 1638db114b83SHugh Dickins if (!search_new_forks || !mapcount) 1639db114b83SHugh Dickins break; 1640db114b83SHugh Dickins } 1641cba48b98SRik van Riel anon_vma_unlock(anon_vma); 16425ad64688SHugh Dickins if (!mapcount) 16435ad64688SHugh Dickins goto out; 16445ad64688SHugh Dickins } 1645db114b83SHugh Dickins if (!search_new_forks++) 1646db114b83SHugh Dickins goto again; 16475ad64688SHugh Dickins out: 16485ad64688SHugh Dickins return referenced; 16495ad64688SHugh Dickins } 16505ad64688SHugh Dickins 16515ad64688SHugh Dickins int try_to_unmap_ksm(struct page *page, enum ttu_flags flags) 16525ad64688SHugh Dickins { 16535ad64688SHugh Dickins struct stable_node *stable_node; 16545ad64688SHugh Dickins struct hlist_node *hlist; 16555ad64688SHugh Dickins struct rmap_item *rmap_item; 16565ad64688SHugh Dickins int ret = SWAP_AGAIN; 1657db114b83SHugh Dickins int search_new_forks = 0; 16585ad64688SHugh Dickins 16595ad64688SHugh Dickins VM_BUG_ON(!PageKsm(page)); 16605ad64688SHugh Dickins VM_BUG_ON(!PageLocked(page)); 16615ad64688SHugh Dickins 16625ad64688SHugh Dickins stable_node = page_stable_node(page); 16635ad64688SHugh Dickins if (!stable_node) 16645ad64688SHugh Dickins return SWAP_FAIL; 1665db114b83SHugh Dickins again: 16665ad64688SHugh Dickins hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1667db114b83SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 16685beb4930SRik van Riel struct anon_vma_chain *vmac; 1669db114b83SHugh Dickins struct vm_area_struct *vma; 16705ad64688SHugh Dickins 1671cba48b98SRik van Riel anon_vma_lock(anon_vma); 16725beb4930SRik van Riel list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 16735beb4930SRik van Riel vma = vmac->vma; 1674db114b83SHugh Dickins if (rmap_item->address < vma->vm_start || 1675db114b83SHugh Dickins rmap_item->address >= vma->vm_end) 1676db114b83SHugh Dickins continue; 1677db114b83SHugh Dickins /* 1678db114b83SHugh Dickins * Initially we examine only the vma which covers this 1679db114b83SHugh Dickins * rmap_item; but later, if there is still work to do, 1680db114b83SHugh Dickins * we examine covering vmas in other mms: in case they 1681db114b83SHugh Dickins * were forked from the original since ksmd passed. 1682db114b83SHugh Dickins */ 1683db114b83SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1684db114b83SHugh Dickins continue; 1685db114b83SHugh Dickins 1686db114b83SHugh Dickins ret = try_to_unmap_one(page, vma, 1687db114b83SHugh Dickins rmap_item->address, flags); 1688db114b83SHugh Dickins if (ret != SWAP_AGAIN || !page_mapped(page)) { 1689cba48b98SRik van Riel anon_vma_unlock(anon_vma); 16905ad64688SHugh Dickins goto out; 16915ad64688SHugh Dickins } 1692db114b83SHugh Dickins } 1693cba48b98SRik van Riel anon_vma_unlock(anon_vma); 1694db114b83SHugh Dickins } 1695db114b83SHugh Dickins if (!search_new_forks++) 1696db114b83SHugh Dickins goto again; 16975ad64688SHugh Dickins out: 16985ad64688SHugh Dickins return ret; 16995ad64688SHugh Dickins } 17005ad64688SHugh Dickins 1701e9995ef9SHugh Dickins #ifdef CONFIG_MIGRATION 1702e9995ef9SHugh Dickins int rmap_walk_ksm(struct page *page, int (*rmap_one)(struct page *, 1703e9995ef9SHugh Dickins struct vm_area_struct *, unsigned long, void *), void *arg) 1704e9995ef9SHugh Dickins { 1705e9995ef9SHugh Dickins struct stable_node *stable_node; 1706e9995ef9SHugh Dickins struct hlist_node *hlist; 1707e9995ef9SHugh Dickins struct rmap_item *rmap_item; 1708e9995ef9SHugh Dickins int ret = SWAP_AGAIN; 1709e9995ef9SHugh Dickins int search_new_forks = 0; 1710e9995ef9SHugh Dickins 1711e9995ef9SHugh Dickins VM_BUG_ON(!PageKsm(page)); 1712e9995ef9SHugh Dickins VM_BUG_ON(!PageLocked(page)); 1713e9995ef9SHugh Dickins 1714e9995ef9SHugh Dickins stable_node = page_stable_node(page); 1715e9995ef9SHugh Dickins if (!stable_node) 1716e9995ef9SHugh Dickins return ret; 1717e9995ef9SHugh Dickins again: 1718e9995ef9SHugh Dickins hlist_for_each_entry(rmap_item, hlist, &stable_node->hlist, hlist) { 1719e9995ef9SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 17205beb4930SRik van Riel struct anon_vma_chain *vmac; 1721e9995ef9SHugh Dickins struct vm_area_struct *vma; 1722e9995ef9SHugh Dickins 1723cba48b98SRik van Riel anon_vma_lock(anon_vma); 17245beb4930SRik van Riel list_for_each_entry(vmac, &anon_vma->head, same_anon_vma) { 17255beb4930SRik van Riel vma = vmac->vma; 1726e9995ef9SHugh Dickins if (rmap_item->address < vma->vm_start || 1727e9995ef9SHugh Dickins rmap_item->address >= vma->vm_end) 1728e9995ef9SHugh Dickins continue; 1729e9995ef9SHugh Dickins /* 1730e9995ef9SHugh Dickins * Initially we examine only the vma which covers this 1731e9995ef9SHugh Dickins * rmap_item; but later, if there is still work to do, 1732e9995ef9SHugh Dickins * we examine covering vmas in other mms: in case they 1733e9995ef9SHugh Dickins * were forked from the original since ksmd passed. 1734e9995ef9SHugh Dickins */ 1735e9995ef9SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1736e9995ef9SHugh Dickins continue; 1737e9995ef9SHugh Dickins 1738e9995ef9SHugh Dickins ret = rmap_one(page, vma, rmap_item->address, arg); 1739e9995ef9SHugh Dickins if (ret != SWAP_AGAIN) { 1740cba48b98SRik van Riel anon_vma_unlock(anon_vma); 1741e9995ef9SHugh Dickins goto out; 1742e9995ef9SHugh Dickins } 1743e9995ef9SHugh Dickins } 1744cba48b98SRik van Riel anon_vma_unlock(anon_vma); 1745e9995ef9SHugh Dickins } 1746e9995ef9SHugh Dickins if (!search_new_forks++) 1747e9995ef9SHugh Dickins goto again; 1748e9995ef9SHugh Dickins out: 1749e9995ef9SHugh Dickins return ret; 1750e9995ef9SHugh Dickins } 1751e9995ef9SHugh Dickins 1752e9995ef9SHugh Dickins void ksm_migrate_page(struct page *newpage, struct page *oldpage) 1753e9995ef9SHugh Dickins { 1754e9995ef9SHugh Dickins struct stable_node *stable_node; 1755e9995ef9SHugh Dickins 1756e9995ef9SHugh Dickins VM_BUG_ON(!PageLocked(oldpage)); 1757e9995ef9SHugh Dickins VM_BUG_ON(!PageLocked(newpage)); 1758e9995ef9SHugh Dickins VM_BUG_ON(newpage->mapping != oldpage->mapping); 1759e9995ef9SHugh Dickins 1760e9995ef9SHugh Dickins stable_node = page_stable_node(newpage); 1761e9995ef9SHugh Dickins if (stable_node) { 176262b61f61SHugh Dickins VM_BUG_ON(stable_node->kpfn != page_to_pfn(oldpage)); 176362b61f61SHugh Dickins stable_node->kpfn = page_to_pfn(newpage); 1764e9995ef9SHugh Dickins } 1765e9995ef9SHugh Dickins } 1766e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */ 1767e9995ef9SHugh Dickins 176862b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 176962b61f61SHugh Dickins static struct stable_node *ksm_check_stable_tree(unsigned long start_pfn, 177062b61f61SHugh Dickins unsigned long end_pfn) 177162b61f61SHugh Dickins { 177262b61f61SHugh Dickins struct rb_node *node; 177362b61f61SHugh Dickins 177462b61f61SHugh Dickins for (node = rb_first(&root_stable_tree); node; node = rb_next(node)) { 177562b61f61SHugh Dickins struct stable_node *stable_node; 177662b61f61SHugh Dickins 177762b61f61SHugh Dickins stable_node = rb_entry(node, struct stable_node, node); 177862b61f61SHugh Dickins if (stable_node->kpfn >= start_pfn && 177962b61f61SHugh Dickins stable_node->kpfn < end_pfn) 178062b61f61SHugh Dickins return stable_node; 178162b61f61SHugh Dickins } 178262b61f61SHugh Dickins return NULL; 178362b61f61SHugh Dickins } 178462b61f61SHugh Dickins 178562b61f61SHugh Dickins static int ksm_memory_callback(struct notifier_block *self, 178662b61f61SHugh Dickins unsigned long action, void *arg) 178762b61f61SHugh Dickins { 178862b61f61SHugh Dickins struct memory_notify *mn = arg; 178962b61f61SHugh Dickins struct stable_node *stable_node; 179062b61f61SHugh Dickins 179162b61f61SHugh Dickins switch (action) { 179262b61f61SHugh Dickins case MEM_GOING_OFFLINE: 179362b61f61SHugh Dickins /* 179462b61f61SHugh Dickins * Keep it very simple for now: just lock out ksmd and 179562b61f61SHugh Dickins * MADV_UNMERGEABLE while any memory is going offline. 1796a0b0f58cSKOSAKI Motohiro * mutex_lock_nested() is necessary because lockdep was alarmed 1797a0b0f58cSKOSAKI Motohiro * that here we take ksm_thread_mutex inside notifier chain 1798a0b0f58cSKOSAKI Motohiro * mutex, and later take notifier chain mutex inside 1799a0b0f58cSKOSAKI Motohiro * ksm_thread_mutex to unlock it. But that's safe because both 1800a0b0f58cSKOSAKI Motohiro * are inside mem_hotplug_mutex. 180162b61f61SHugh Dickins */ 1802a0b0f58cSKOSAKI Motohiro mutex_lock_nested(&ksm_thread_mutex, SINGLE_DEPTH_NESTING); 180362b61f61SHugh Dickins break; 180462b61f61SHugh Dickins 180562b61f61SHugh Dickins case MEM_OFFLINE: 180662b61f61SHugh Dickins /* 180762b61f61SHugh Dickins * Most of the work is done by page migration; but there might 180862b61f61SHugh Dickins * be a few stable_nodes left over, still pointing to struct 180962b61f61SHugh Dickins * pages which have been offlined: prune those from the tree. 181062b61f61SHugh Dickins */ 181162b61f61SHugh Dickins while ((stable_node = ksm_check_stable_tree(mn->start_pfn, 181262b61f61SHugh Dickins mn->start_pfn + mn->nr_pages)) != NULL) 181362b61f61SHugh Dickins remove_node_from_stable_tree(stable_node); 181462b61f61SHugh Dickins /* fallthrough */ 181562b61f61SHugh Dickins 181662b61f61SHugh Dickins case MEM_CANCEL_OFFLINE: 181762b61f61SHugh Dickins mutex_unlock(&ksm_thread_mutex); 181862b61f61SHugh Dickins break; 181962b61f61SHugh Dickins } 182062b61f61SHugh Dickins return NOTIFY_OK; 182162b61f61SHugh Dickins } 182262b61f61SHugh Dickins #endif /* CONFIG_MEMORY_HOTREMOVE */ 182362b61f61SHugh Dickins 18242ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 18252ffd8679SHugh Dickins /* 18262ffd8679SHugh Dickins * This all compiles without CONFIG_SYSFS, but is a waste of space. 18272ffd8679SHugh Dickins */ 18282ffd8679SHugh Dickins 182931dbd01fSIzik Eidus #define KSM_ATTR_RO(_name) \ 183031dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 183131dbd01fSIzik Eidus #define KSM_ATTR(_name) \ 183231dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = \ 183331dbd01fSIzik Eidus __ATTR(_name, 0644, _name##_show, _name##_store) 183431dbd01fSIzik Eidus 183531dbd01fSIzik Eidus static ssize_t sleep_millisecs_show(struct kobject *kobj, 183631dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 183731dbd01fSIzik Eidus { 183831dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); 183931dbd01fSIzik Eidus } 184031dbd01fSIzik Eidus 184131dbd01fSIzik Eidus static ssize_t sleep_millisecs_store(struct kobject *kobj, 184231dbd01fSIzik Eidus struct kobj_attribute *attr, 184331dbd01fSIzik Eidus const char *buf, size_t count) 184431dbd01fSIzik Eidus { 184531dbd01fSIzik Eidus unsigned long msecs; 184631dbd01fSIzik Eidus int err; 184731dbd01fSIzik Eidus 184831dbd01fSIzik Eidus err = strict_strtoul(buf, 10, &msecs); 184931dbd01fSIzik Eidus if (err || msecs > UINT_MAX) 185031dbd01fSIzik Eidus return -EINVAL; 185131dbd01fSIzik Eidus 185231dbd01fSIzik Eidus ksm_thread_sleep_millisecs = msecs; 185331dbd01fSIzik Eidus 185431dbd01fSIzik Eidus return count; 185531dbd01fSIzik Eidus } 185631dbd01fSIzik Eidus KSM_ATTR(sleep_millisecs); 185731dbd01fSIzik Eidus 185831dbd01fSIzik Eidus static ssize_t pages_to_scan_show(struct kobject *kobj, 185931dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 186031dbd01fSIzik Eidus { 186131dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); 186231dbd01fSIzik Eidus } 186331dbd01fSIzik Eidus 186431dbd01fSIzik Eidus static ssize_t pages_to_scan_store(struct kobject *kobj, 186531dbd01fSIzik Eidus struct kobj_attribute *attr, 186631dbd01fSIzik Eidus const char *buf, size_t count) 186731dbd01fSIzik Eidus { 186831dbd01fSIzik Eidus int err; 186931dbd01fSIzik Eidus unsigned long nr_pages; 187031dbd01fSIzik Eidus 187131dbd01fSIzik Eidus err = strict_strtoul(buf, 10, &nr_pages); 187231dbd01fSIzik Eidus if (err || nr_pages > UINT_MAX) 187331dbd01fSIzik Eidus return -EINVAL; 187431dbd01fSIzik Eidus 187531dbd01fSIzik Eidus ksm_thread_pages_to_scan = nr_pages; 187631dbd01fSIzik Eidus 187731dbd01fSIzik Eidus return count; 187831dbd01fSIzik Eidus } 187931dbd01fSIzik Eidus KSM_ATTR(pages_to_scan); 188031dbd01fSIzik Eidus 188131dbd01fSIzik Eidus static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 188231dbd01fSIzik Eidus char *buf) 188331dbd01fSIzik Eidus { 188431dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_run); 188531dbd01fSIzik Eidus } 188631dbd01fSIzik Eidus 188731dbd01fSIzik Eidus static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 188831dbd01fSIzik Eidus const char *buf, size_t count) 188931dbd01fSIzik Eidus { 189031dbd01fSIzik Eidus int err; 189131dbd01fSIzik Eidus unsigned long flags; 189231dbd01fSIzik Eidus 189331dbd01fSIzik Eidus err = strict_strtoul(buf, 10, &flags); 189431dbd01fSIzik Eidus if (err || flags > UINT_MAX) 189531dbd01fSIzik Eidus return -EINVAL; 189631dbd01fSIzik Eidus if (flags > KSM_RUN_UNMERGE) 189731dbd01fSIzik Eidus return -EINVAL; 189831dbd01fSIzik Eidus 189931dbd01fSIzik Eidus /* 190031dbd01fSIzik Eidus * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 190131dbd01fSIzik Eidus * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 1902d0f209f6SHugh Dickins * breaking COW to free the pages_shared (but leaves mm_slots 1903d0f209f6SHugh Dickins * on the list for when ksmd may be set running again). 190431dbd01fSIzik Eidus */ 190531dbd01fSIzik Eidus 190631dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 190731dbd01fSIzik Eidus if (ksm_run != flags) { 190831dbd01fSIzik Eidus ksm_run = flags; 1909d952b791SHugh Dickins if (flags & KSM_RUN_UNMERGE) { 191035451beeSHugh Dickins current->flags |= PF_OOM_ORIGIN; 1911d952b791SHugh Dickins err = unmerge_and_remove_all_rmap_items(); 191235451beeSHugh Dickins current->flags &= ~PF_OOM_ORIGIN; 1913d952b791SHugh Dickins if (err) { 1914d952b791SHugh Dickins ksm_run = KSM_RUN_STOP; 1915d952b791SHugh Dickins count = err; 1916d952b791SHugh Dickins } 1917d952b791SHugh Dickins } 191831dbd01fSIzik Eidus } 191931dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 192031dbd01fSIzik Eidus 192131dbd01fSIzik Eidus if (flags & KSM_RUN_MERGE) 192231dbd01fSIzik Eidus wake_up_interruptible(&ksm_thread_wait); 192331dbd01fSIzik Eidus 192431dbd01fSIzik Eidus return count; 192531dbd01fSIzik Eidus } 192631dbd01fSIzik Eidus KSM_ATTR(run); 192731dbd01fSIzik Eidus 1928b4028260SHugh Dickins static ssize_t pages_shared_show(struct kobject *kobj, 1929b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 1930b4028260SHugh Dickins { 1931b4028260SHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_shared); 1932b4028260SHugh Dickins } 1933b4028260SHugh Dickins KSM_ATTR_RO(pages_shared); 1934b4028260SHugh Dickins 1935b4028260SHugh Dickins static ssize_t pages_sharing_show(struct kobject *kobj, 1936b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 1937b4028260SHugh Dickins { 1938e178dfdeSHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_sharing); 1939b4028260SHugh Dickins } 1940b4028260SHugh Dickins KSM_ATTR_RO(pages_sharing); 1941b4028260SHugh Dickins 1942473b0ce4SHugh Dickins static ssize_t pages_unshared_show(struct kobject *kobj, 1943473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 1944473b0ce4SHugh Dickins { 1945473b0ce4SHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_unshared); 1946473b0ce4SHugh Dickins } 1947473b0ce4SHugh Dickins KSM_ATTR_RO(pages_unshared); 1948473b0ce4SHugh Dickins 1949473b0ce4SHugh Dickins static ssize_t pages_volatile_show(struct kobject *kobj, 1950473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 1951473b0ce4SHugh Dickins { 1952473b0ce4SHugh Dickins long ksm_pages_volatile; 1953473b0ce4SHugh Dickins 1954473b0ce4SHugh Dickins ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 1955473b0ce4SHugh Dickins - ksm_pages_sharing - ksm_pages_unshared; 1956473b0ce4SHugh Dickins /* 1957473b0ce4SHugh Dickins * It was not worth any locking to calculate that statistic, 1958473b0ce4SHugh Dickins * but it might therefore sometimes be negative: conceal that. 1959473b0ce4SHugh Dickins */ 1960473b0ce4SHugh Dickins if (ksm_pages_volatile < 0) 1961473b0ce4SHugh Dickins ksm_pages_volatile = 0; 1962473b0ce4SHugh Dickins return sprintf(buf, "%ld\n", ksm_pages_volatile); 1963473b0ce4SHugh Dickins } 1964473b0ce4SHugh Dickins KSM_ATTR_RO(pages_volatile); 1965473b0ce4SHugh Dickins 1966473b0ce4SHugh Dickins static ssize_t full_scans_show(struct kobject *kobj, 1967473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 1968473b0ce4SHugh Dickins { 1969473b0ce4SHugh Dickins return sprintf(buf, "%lu\n", ksm_scan.seqnr); 1970473b0ce4SHugh Dickins } 1971473b0ce4SHugh Dickins KSM_ATTR_RO(full_scans); 1972473b0ce4SHugh Dickins 197331dbd01fSIzik Eidus static struct attribute *ksm_attrs[] = { 197431dbd01fSIzik Eidus &sleep_millisecs_attr.attr, 197531dbd01fSIzik Eidus &pages_to_scan_attr.attr, 197631dbd01fSIzik Eidus &run_attr.attr, 1977b4028260SHugh Dickins &pages_shared_attr.attr, 1978b4028260SHugh Dickins &pages_sharing_attr.attr, 1979473b0ce4SHugh Dickins &pages_unshared_attr.attr, 1980473b0ce4SHugh Dickins &pages_volatile_attr.attr, 1981473b0ce4SHugh Dickins &full_scans_attr.attr, 198231dbd01fSIzik Eidus NULL, 198331dbd01fSIzik Eidus }; 198431dbd01fSIzik Eidus 198531dbd01fSIzik Eidus static struct attribute_group ksm_attr_group = { 198631dbd01fSIzik Eidus .attrs = ksm_attrs, 198731dbd01fSIzik Eidus .name = "ksm", 198831dbd01fSIzik Eidus }; 19892ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 199031dbd01fSIzik Eidus 199131dbd01fSIzik Eidus static int __init ksm_init(void) 199231dbd01fSIzik Eidus { 199331dbd01fSIzik Eidus struct task_struct *ksm_thread; 199431dbd01fSIzik Eidus int err; 199531dbd01fSIzik Eidus 199631dbd01fSIzik Eidus err = ksm_slab_init(); 199731dbd01fSIzik Eidus if (err) 199831dbd01fSIzik Eidus goto out; 199931dbd01fSIzik Eidus 200031dbd01fSIzik Eidus ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 200131dbd01fSIzik Eidus if (IS_ERR(ksm_thread)) { 200231dbd01fSIzik Eidus printk(KERN_ERR "ksm: creating kthread failed\n"); 200331dbd01fSIzik Eidus err = PTR_ERR(ksm_thread); 2004d9f8984cSLai Jiangshan goto out_free; 200531dbd01fSIzik Eidus } 200631dbd01fSIzik Eidus 20072ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 200831dbd01fSIzik Eidus err = sysfs_create_group(mm_kobj, &ksm_attr_group); 200931dbd01fSIzik Eidus if (err) { 201031dbd01fSIzik Eidus printk(KERN_ERR "ksm: register sysfs failed\n"); 20112ffd8679SHugh Dickins kthread_stop(ksm_thread); 2012d9f8984cSLai Jiangshan goto out_free; 201331dbd01fSIzik Eidus } 2014c73602adSHugh Dickins #else 2015c73602adSHugh Dickins ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 2016c73602adSHugh Dickins 20172ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 201831dbd01fSIzik Eidus 201962b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 202062b61f61SHugh Dickins /* 202162b61f61SHugh Dickins * Choose a high priority since the callback takes ksm_thread_mutex: 202262b61f61SHugh Dickins * later callbacks could only be taking locks which nest within that. 202362b61f61SHugh Dickins */ 202462b61f61SHugh Dickins hotplug_memory_notifier(ksm_memory_callback, 100); 202562b61f61SHugh Dickins #endif 202631dbd01fSIzik Eidus return 0; 202731dbd01fSIzik Eidus 2028d9f8984cSLai Jiangshan out_free: 202931dbd01fSIzik Eidus ksm_slab_free(); 203031dbd01fSIzik Eidus out: 203131dbd01fSIzik Eidus return err; 203231dbd01fSIzik Eidus } 203331dbd01fSIzik Eidus module_init(ksm_init) 2034