1f8af4da3SHugh Dickins /* 231dbd01fSIzik Eidus * Memory merging support. 331dbd01fSIzik Eidus * 431dbd01fSIzik Eidus * This code enables dynamic sharing of identical pages found in different 531dbd01fSIzik Eidus * memory areas, even if they are not shared by fork() 631dbd01fSIzik Eidus * 736b2528dSIzik Eidus * Copyright (C) 2008-2009 Red Hat, Inc. 831dbd01fSIzik Eidus * Authors: 931dbd01fSIzik Eidus * Izik Eidus 1031dbd01fSIzik Eidus * Andrea Arcangeli 1131dbd01fSIzik Eidus * Chris Wright 1236b2528dSIzik Eidus * Hugh Dickins 1331dbd01fSIzik Eidus * 1431dbd01fSIzik Eidus * This work is licensed under the terms of the GNU GPL, version 2. 15f8af4da3SHugh Dickins */ 16f8af4da3SHugh Dickins 17f8af4da3SHugh Dickins #include <linux/errno.h> 1831dbd01fSIzik Eidus #include <linux/mm.h> 1931dbd01fSIzik Eidus #include <linux/fs.h> 20f8af4da3SHugh Dickins #include <linux/mman.h> 2131dbd01fSIzik Eidus #include <linux/sched.h> 2231dbd01fSIzik Eidus #include <linux/rwsem.h> 2331dbd01fSIzik Eidus #include <linux/pagemap.h> 2431dbd01fSIzik Eidus #include <linux/rmap.h> 2531dbd01fSIzik Eidus #include <linux/spinlock.h> 2631dbd01fSIzik Eidus #include <linux/jhash.h> 2731dbd01fSIzik Eidus #include <linux/delay.h> 2831dbd01fSIzik Eidus #include <linux/kthread.h> 2931dbd01fSIzik Eidus #include <linux/wait.h> 3031dbd01fSIzik Eidus #include <linux/slab.h> 3131dbd01fSIzik Eidus #include <linux/rbtree.h> 3262b61f61SHugh Dickins #include <linux/memory.h> 3331dbd01fSIzik Eidus #include <linux/mmu_notifier.h> 342c6854fdSIzik Eidus #include <linux/swap.h> 35f8af4da3SHugh Dickins #include <linux/ksm.h> 364ca3a69bSSasha Levin #include <linux/hashtable.h> 37878aee7dSAndrea Arcangeli #include <linux/freezer.h> 3872788c38SDavid Rientjes #include <linux/oom.h> 3990bd6fd3SPetr Holasek #include <linux/numa.h> 40f8af4da3SHugh Dickins 4131dbd01fSIzik Eidus #include <asm/tlbflush.h> 4273848b46SHugh Dickins #include "internal.h" 4331dbd01fSIzik Eidus 44e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 45e850dcf5SHugh Dickins #define NUMA(x) (x) 46e850dcf5SHugh Dickins #define DO_NUMA(x) do { (x); } while (0) 47e850dcf5SHugh Dickins #else 48e850dcf5SHugh Dickins #define NUMA(x) (0) 49e850dcf5SHugh Dickins #define DO_NUMA(x) do { } while (0) 50e850dcf5SHugh Dickins #endif 51e850dcf5SHugh Dickins 5231dbd01fSIzik Eidus /* 5331dbd01fSIzik Eidus * A few notes about the KSM scanning process, 5431dbd01fSIzik Eidus * to make it easier to understand the data structures below: 5531dbd01fSIzik Eidus * 5631dbd01fSIzik Eidus * In order to reduce excessive scanning, KSM sorts the memory pages by their 5731dbd01fSIzik Eidus * contents into a data structure that holds pointers to the pages' locations. 5831dbd01fSIzik Eidus * 5931dbd01fSIzik Eidus * Since the contents of the pages may change at any moment, KSM cannot just 6031dbd01fSIzik Eidus * insert the pages into a normal sorted tree and expect it to find anything. 6131dbd01fSIzik Eidus * Therefore KSM uses two data structures - the stable and the unstable tree. 6231dbd01fSIzik Eidus * 6331dbd01fSIzik Eidus * The stable tree holds pointers to all the merged pages (ksm pages), sorted 6431dbd01fSIzik Eidus * by their contents. Because each such page is write-protected, searching on 6531dbd01fSIzik Eidus * this tree is fully assured to be working (except when pages are unmapped), 6631dbd01fSIzik Eidus * and therefore this tree is called the stable tree. 6731dbd01fSIzik Eidus * 6831dbd01fSIzik Eidus * In addition to the stable tree, KSM uses a second data structure called the 6931dbd01fSIzik Eidus * unstable tree: this tree holds pointers to pages which have been found to 7031dbd01fSIzik Eidus * be "unchanged for a period of time". The unstable tree sorts these pages 7131dbd01fSIzik Eidus * by their contents, but since they are not write-protected, KSM cannot rely 7231dbd01fSIzik Eidus * upon the unstable tree to work correctly - the unstable tree is liable to 7331dbd01fSIzik Eidus * be corrupted as its contents are modified, and so it is called unstable. 7431dbd01fSIzik Eidus * 7531dbd01fSIzik Eidus * KSM solves this problem by several techniques: 7631dbd01fSIzik Eidus * 7731dbd01fSIzik Eidus * 1) The unstable tree is flushed every time KSM completes scanning all 7831dbd01fSIzik Eidus * memory areas, and then the tree is rebuilt again from the beginning. 7931dbd01fSIzik Eidus * 2) KSM will only insert into the unstable tree, pages whose hash value 8031dbd01fSIzik Eidus * has not changed since the previous scan of all memory areas. 8131dbd01fSIzik Eidus * 3) The unstable tree is a RedBlack Tree - so its balancing is based on the 8231dbd01fSIzik Eidus * colors of the nodes and not on their contents, assuring that even when 8331dbd01fSIzik Eidus * the tree gets "corrupted" it won't get out of balance, so scanning time 8431dbd01fSIzik Eidus * remains the same (also, searching and inserting nodes in an rbtree uses 8531dbd01fSIzik Eidus * the same algorithm, so we have no overhead when we flush and rebuild). 8631dbd01fSIzik Eidus * 4) KSM never flushes the stable tree, which means that even if it were to 8731dbd01fSIzik Eidus * take 10 attempts to find a page in the unstable tree, once it is found, 8831dbd01fSIzik Eidus * it is secured in the stable tree. (When we scan a new page, we first 8931dbd01fSIzik Eidus * compare it against the stable tree, and then against the unstable tree.) 908fdb3dbfSHugh Dickins * 918fdb3dbfSHugh Dickins * If the merge_across_nodes tunable is unset, then KSM maintains multiple 928fdb3dbfSHugh Dickins * stable trees and multiple unstable trees: one of each for each NUMA node. 9331dbd01fSIzik Eidus */ 9431dbd01fSIzik Eidus 9531dbd01fSIzik Eidus /** 9631dbd01fSIzik Eidus * struct mm_slot - ksm information per mm that is being scanned 9731dbd01fSIzik Eidus * @link: link to the mm_slots hash list 9831dbd01fSIzik Eidus * @mm_list: link into the mm_slots list, rooted in ksm_mm_head 996514d511SHugh Dickins * @rmap_list: head for this mm_slot's singly-linked list of rmap_items 10031dbd01fSIzik Eidus * @mm: the mm that this information is valid for 10131dbd01fSIzik Eidus */ 10231dbd01fSIzik Eidus struct mm_slot { 10331dbd01fSIzik Eidus struct hlist_node link; 10431dbd01fSIzik Eidus struct list_head mm_list; 1056514d511SHugh Dickins struct rmap_item *rmap_list; 10631dbd01fSIzik Eidus struct mm_struct *mm; 10731dbd01fSIzik Eidus }; 10831dbd01fSIzik Eidus 10931dbd01fSIzik Eidus /** 11031dbd01fSIzik Eidus * struct ksm_scan - cursor for scanning 11131dbd01fSIzik Eidus * @mm_slot: the current mm_slot we are scanning 11231dbd01fSIzik Eidus * @address: the next address inside that to be scanned 1136514d511SHugh Dickins * @rmap_list: link to the next rmap to be scanned in the rmap_list 11431dbd01fSIzik Eidus * @seqnr: count of completed full scans (needed when removing unstable node) 11531dbd01fSIzik Eidus * 11631dbd01fSIzik Eidus * There is only the one ksm_scan instance of this cursor structure. 11731dbd01fSIzik Eidus */ 11831dbd01fSIzik Eidus struct ksm_scan { 11931dbd01fSIzik Eidus struct mm_slot *mm_slot; 12031dbd01fSIzik Eidus unsigned long address; 1216514d511SHugh Dickins struct rmap_item **rmap_list; 12231dbd01fSIzik Eidus unsigned long seqnr; 12331dbd01fSIzik Eidus }; 12431dbd01fSIzik Eidus 12531dbd01fSIzik Eidus /** 1267b6ba2c7SHugh Dickins * struct stable_node - node of the stable rbtree 1277b6ba2c7SHugh Dickins * @node: rb node of this ksm page in the stable tree 1284146d2d6SHugh Dickins * @head: (overlaying parent) &migrate_nodes indicates temporarily on that list 1294146d2d6SHugh Dickins * @list: linked into migrate_nodes, pending placement in the proper node tree 1307b6ba2c7SHugh Dickins * @hlist: hlist head of rmap_items using this ksm page 1314146d2d6SHugh Dickins * @kpfn: page frame number of this ksm page (perhaps temporarily on wrong nid) 1324146d2d6SHugh Dickins * @nid: NUMA node id of stable tree in which linked (may not match kpfn) 1337b6ba2c7SHugh Dickins */ 1347b6ba2c7SHugh Dickins struct stable_node { 1354146d2d6SHugh Dickins union { 1364146d2d6SHugh Dickins struct rb_node node; /* when node of stable tree */ 1374146d2d6SHugh Dickins struct { /* when listed for migration */ 1384146d2d6SHugh Dickins struct list_head *head; 1394146d2d6SHugh Dickins struct list_head list; 1404146d2d6SHugh Dickins }; 1414146d2d6SHugh Dickins }; 1427b6ba2c7SHugh Dickins struct hlist_head hlist; 14362b61f61SHugh Dickins unsigned long kpfn; 1444146d2d6SHugh Dickins #ifdef CONFIG_NUMA 1454146d2d6SHugh Dickins int nid; 1464146d2d6SHugh Dickins #endif 1477b6ba2c7SHugh Dickins }; 1487b6ba2c7SHugh Dickins 1497b6ba2c7SHugh Dickins /** 15031dbd01fSIzik Eidus * struct rmap_item - reverse mapping item for virtual addresses 1516514d511SHugh Dickins * @rmap_list: next rmap_item in mm_slot's singly-linked rmap_list 152db114b83SHugh Dickins * @anon_vma: pointer to anon_vma for this mm,address, when in stable tree 153bc56620bSHugh Dickins * @nid: NUMA node id of unstable tree in which linked (may not match page) 15431dbd01fSIzik Eidus * @mm: the memory structure this rmap_item is pointing into 15531dbd01fSIzik Eidus * @address: the virtual address this rmap_item tracks (+ flags in low bits) 15631dbd01fSIzik Eidus * @oldchecksum: previous checksum of the page at that virtual address 1577b6ba2c7SHugh Dickins * @node: rb node of this rmap_item in the unstable tree 1587b6ba2c7SHugh Dickins * @head: pointer to stable_node heading this list in the stable tree 1597b6ba2c7SHugh Dickins * @hlist: link into hlist of rmap_items hanging off that stable_node 16031dbd01fSIzik Eidus */ 16131dbd01fSIzik Eidus struct rmap_item { 1626514d511SHugh Dickins struct rmap_item *rmap_list; 163bc56620bSHugh Dickins union { 164db114b83SHugh Dickins struct anon_vma *anon_vma; /* when stable */ 165bc56620bSHugh Dickins #ifdef CONFIG_NUMA 166bc56620bSHugh Dickins int nid; /* when node of unstable tree */ 167bc56620bSHugh Dickins #endif 168bc56620bSHugh Dickins }; 16931dbd01fSIzik Eidus struct mm_struct *mm; 17031dbd01fSIzik Eidus unsigned long address; /* + low bits used for flags below */ 17131dbd01fSIzik Eidus unsigned int oldchecksum; /* when unstable */ 17231dbd01fSIzik Eidus union { 1737b6ba2c7SHugh Dickins struct rb_node node; /* when node of unstable tree */ 1747b6ba2c7SHugh Dickins struct { /* when listed from stable tree */ 1757b6ba2c7SHugh Dickins struct stable_node *head; 1767b6ba2c7SHugh Dickins struct hlist_node hlist; 1777b6ba2c7SHugh Dickins }; 17831dbd01fSIzik Eidus }; 17931dbd01fSIzik Eidus }; 18031dbd01fSIzik Eidus 18131dbd01fSIzik Eidus #define SEQNR_MASK 0x0ff /* low bits of unstable tree seqnr */ 1827b6ba2c7SHugh Dickins #define UNSTABLE_FLAG 0x100 /* is a node of the unstable tree */ 1837b6ba2c7SHugh Dickins #define STABLE_FLAG 0x200 /* is listed from the stable tree */ 18431dbd01fSIzik Eidus 18531dbd01fSIzik Eidus /* The stable and unstable tree heads */ 186ef53d16cSHugh Dickins static struct rb_root one_stable_tree[1] = { RB_ROOT }; 187ef53d16cSHugh Dickins static struct rb_root one_unstable_tree[1] = { RB_ROOT }; 188ef53d16cSHugh Dickins static struct rb_root *root_stable_tree = one_stable_tree; 189ef53d16cSHugh Dickins static struct rb_root *root_unstable_tree = one_unstable_tree; 19031dbd01fSIzik Eidus 1914146d2d6SHugh Dickins /* Recently migrated nodes of stable tree, pending proper placement */ 1924146d2d6SHugh Dickins static LIST_HEAD(migrate_nodes); 1934146d2d6SHugh Dickins 1944ca3a69bSSasha Levin #define MM_SLOTS_HASH_BITS 10 1954ca3a69bSSasha Levin static DEFINE_HASHTABLE(mm_slots_hash, MM_SLOTS_HASH_BITS); 19631dbd01fSIzik Eidus 19731dbd01fSIzik Eidus static struct mm_slot ksm_mm_head = { 19831dbd01fSIzik Eidus .mm_list = LIST_HEAD_INIT(ksm_mm_head.mm_list), 19931dbd01fSIzik Eidus }; 20031dbd01fSIzik Eidus static struct ksm_scan ksm_scan = { 20131dbd01fSIzik Eidus .mm_slot = &ksm_mm_head, 20231dbd01fSIzik Eidus }; 20331dbd01fSIzik Eidus 20431dbd01fSIzik Eidus static struct kmem_cache *rmap_item_cache; 2057b6ba2c7SHugh Dickins static struct kmem_cache *stable_node_cache; 20631dbd01fSIzik Eidus static struct kmem_cache *mm_slot_cache; 20731dbd01fSIzik Eidus 20831dbd01fSIzik Eidus /* The number of nodes in the stable tree */ 209b4028260SHugh Dickins static unsigned long ksm_pages_shared; 21031dbd01fSIzik Eidus 211e178dfdeSHugh Dickins /* The number of page slots additionally sharing those nodes */ 212b4028260SHugh Dickins static unsigned long ksm_pages_sharing; 21331dbd01fSIzik Eidus 214473b0ce4SHugh Dickins /* The number of nodes in the unstable tree */ 215473b0ce4SHugh Dickins static unsigned long ksm_pages_unshared; 216473b0ce4SHugh Dickins 217473b0ce4SHugh Dickins /* The number of rmap_items in use: to calculate pages_volatile */ 218473b0ce4SHugh Dickins static unsigned long ksm_rmap_items; 219473b0ce4SHugh Dickins 22031dbd01fSIzik Eidus /* Number of pages ksmd should scan in one batch */ 2212c6854fdSIzik Eidus static unsigned int ksm_thread_pages_to_scan = 100; 22231dbd01fSIzik Eidus 22331dbd01fSIzik Eidus /* Milliseconds ksmd should sleep between batches */ 2242ffd8679SHugh Dickins static unsigned int ksm_thread_sleep_millisecs = 20; 22531dbd01fSIzik Eidus 226e850dcf5SHugh Dickins #ifdef CONFIG_NUMA 22790bd6fd3SPetr Holasek /* Zeroed when merging across nodes is not allowed */ 22890bd6fd3SPetr Holasek static unsigned int ksm_merge_across_nodes = 1; 229ef53d16cSHugh Dickins static int ksm_nr_node_ids = 1; 230e850dcf5SHugh Dickins #else 231e850dcf5SHugh Dickins #define ksm_merge_across_nodes 1U 232ef53d16cSHugh Dickins #define ksm_nr_node_ids 1 233e850dcf5SHugh Dickins #endif 23490bd6fd3SPetr Holasek 23531dbd01fSIzik Eidus #define KSM_RUN_STOP 0 23631dbd01fSIzik Eidus #define KSM_RUN_MERGE 1 23731dbd01fSIzik Eidus #define KSM_RUN_UNMERGE 2 238ef4d43a8SHugh Dickins #define KSM_RUN_OFFLINE 4 239ef4d43a8SHugh Dickins static unsigned long ksm_run = KSM_RUN_STOP; 240ef4d43a8SHugh Dickins static void wait_while_offlining(void); 24131dbd01fSIzik Eidus 24231dbd01fSIzik Eidus static DECLARE_WAIT_QUEUE_HEAD(ksm_thread_wait); 24331dbd01fSIzik Eidus static DEFINE_MUTEX(ksm_thread_mutex); 24431dbd01fSIzik Eidus static DEFINE_SPINLOCK(ksm_mmlist_lock); 24531dbd01fSIzik Eidus 24631dbd01fSIzik Eidus #define KSM_KMEM_CACHE(__struct, __flags) kmem_cache_create("ksm_"#__struct,\ 24731dbd01fSIzik Eidus sizeof(struct __struct), __alignof__(struct __struct),\ 24831dbd01fSIzik Eidus (__flags), NULL) 24931dbd01fSIzik Eidus 25031dbd01fSIzik Eidus static int __init ksm_slab_init(void) 25131dbd01fSIzik Eidus { 25231dbd01fSIzik Eidus rmap_item_cache = KSM_KMEM_CACHE(rmap_item, 0); 25331dbd01fSIzik Eidus if (!rmap_item_cache) 25431dbd01fSIzik Eidus goto out; 25531dbd01fSIzik Eidus 2567b6ba2c7SHugh Dickins stable_node_cache = KSM_KMEM_CACHE(stable_node, 0); 2577b6ba2c7SHugh Dickins if (!stable_node_cache) 2587b6ba2c7SHugh Dickins goto out_free1; 2597b6ba2c7SHugh Dickins 26031dbd01fSIzik Eidus mm_slot_cache = KSM_KMEM_CACHE(mm_slot, 0); 26131dbd01fSIzik Eidus if (!mm_slot_cache) 2627b6ba2c7SHugh Dickins goto out_free2; 26331dbd01fSIzik Eidus 26431dbd01fSIzik Eidus return 0; 26531dbd01fSIzik Eidus 2667b6ba2c7SHugh Dickins out_free2: 2677b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 2687b6ba2c7SHugh Dickins out_free1: 26931dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 27031dbd01fSIzik Eidus out: 27131dbd01fSIzik Eidus return -ENOMEM; 27231dbd01fSIzik Eidus } 27331dbd01fSIzik Eidus 27431dbd01fSIzik Eidus static void __init ksm_slab_free(void) 27531dbd01fSIzik Eidus { 27631dbd01fSIzik Eidus kmem_cache_destroy(mm_slot_cache); 2777b6ba2c7SHugh Dickins kmem_cache_destroy(stable_node_cache); 27831dbd01fSIzik Eidus kmem_cache_destroy(rmap_item_cache); 27931dbd01fSIzik Eidus mm_slot_cache = NULL; 28031dbd01fSIzik Eidus } 28131dbd01fSIzik Eidus 28231dbd01fSIzik Eidus static inline struct rmap_item *alloc_rmap_item(void) 28331dbd01fSIzik Eidus { 284473b0ce4SHugh Dickins struct rmap_item *rmap_item; 285473b0ce4SHugh Dickins 286473b0ce4SHugh Dickins rmap_item = kmem_cache_zalloc(rmap_item_cache, GFP_KERNEL); 287473b0ce4SHugh Dickins if (rmap_item) 288473b0ce4SHugh Dickins ksm_rmap_items++; 289473b0ce4SHugh Dickins return rmap_item; 29031dbd01fSIzik Eidus } 29131dbd01fSIzik Eidus 29231dbd01fSIzik Eidus static inline void free_rmap_item(struct rmap_item *rmap_item) 29331dbd01fSIzik Eidus { 294473b0ce4SHugh Dickins ksm_rmap_items--; 29531dbd01fSIzik Eidus rmap_item->mm = NULL; /* debug safety */ 29631dbd01fSIzik Eidus kmem_cache_free(rmap_item_cache, rmap_item); 29731dbd01fSIzik Eidus } 29831dbd01fSIzik Eidus 2997b6ba2c7SHugh Dickins static inline struct stable_node *alloc_stable_node(void) 3007b6ba2c7SHugh Dickins { 3017b6ba2c7SHugh Dickins return kmem_cache_alloc(stable_node_cache, GFP_KERNEL); 3027b6ba2c7SHugh Dickins } 3037b6ba2c7SHugh Dickins 3047b6ba2c7SHugh Dickins static inline void free_stable_node(struct stable_node *stable_node) 3057b6ba2c7SHugh Dickins { 3067b6ba2c7SHugh Dickins kmem_cache_free(stable_node_cache, stable_node); 3077b6ba2c7SHugh Dickins } 3087b6ba2c7SHugh Dickins 30931dbd01fSIzik Eidus static inline struct mm_slot *alloc_mm_slot(void) 31031dbd01fSIzik Eidus { 31131dbd01fSIzik Eidus if (!mm_slot_cache) /* initialization failed */ 31231dbd01fSIzik Eidus return NULL; 31331dbd01fSIzik Eidus return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL); 31431dbd01fSIzik Eidus } 31531dbd01fSIzik Eidus 31631dbd01fSIzik Eidus static inline void free_mm_slot(struct mm_slot *mm_slot) 31731dbd01fSIzik Eidus { 31831dbd01fSIzik Eidus kmem_cache_free(mm_slot_cache, mm_slot); 31931dbd01fSIzik Eidus } 32031dbd01fSIzik Eidus 32131dbd01fSIzik Eidus static struct mm_slot *get_mm_slot(struct mm_struct *mm) 32231dbd01fSIzik Eidus { 3234ca3a69bSSasha Levin struct mm_slot *slot; 32431dbd01fSIzik Eidus 325b67bfe0dSSasha Levin hash_for_each_possible(mm_slots_hash, slot, link, (unsigned long)mm) 3264ca3a69bSSasha Levin if (slot->mm == mm) 3274ca3a69bSSasha Levin return slot; 3284ca3a69bSSasha Levin 32931dbd01fSIzik Eidus return NULL; 33031dbd01fSIzik Eidus } 33131dbd01fSIzik Eidus 33231dbd01fSIzik Eidus static void insert_to_mm_slots_hash(struct mm_struct *mm, 33331dbd01fSIzik Eidus struct mm_slot *mm_slot) 33431dbd01fSIzik Eidus { 33531dbd01fSIzik Eidus mm_slot->mm = mm; 3364ca3a69bSSasha Levin hash_add(mm_slots_hash, &mm_slot->link, (unsigned long)mm); 33731dbd01fSIzik Eidus } 33831dbd01fSIzik Eidus 33931dbd01fSIzik Eidus /* 340a913e182SHugh Dickins * ksmd, and unmerge_and_remove_all_rmap_items(), must not touch an mm's 341a913e182SHugh Dickins * page tables after it has passed through ksm_exit() - which, if necessary, 342a913e182SHugh Dickins * takes mmap_sem briefly to serialize against them. ksm_exit() does not set 343a913e182SHugh Dickins * a special flag: they can just back out as soon as mm_users goes to zero. 344a913e182SHugh Dickins * ksm_test_exit() is used throughout to make this test for exit: in some 345a913e182SHugh Dickins * places for correctness, in some places just to avoid unnecessary work. 346a913e182SHugh Dickins */ 347a913e182SHugh Dickins static inline bool ksm_test_exit(struct mm_struct *mm) 348a913e182SHugh Dickins { 349a913e182SHugh Dickins return atomic_read(&mm->mm_users) == 0; 350a913e182SHugh Dickins } 351a913e182SHugh Dickins 352a913e182SHugh Dickins /* 35331dbd01fSIzik Eidus * We use break_ksm to break COW on a ksm page: it's a stripped down 35431dbd01fSIzik Eidus * 35531dbd01fSIzik Eidus * if (get_user_pages(current, mm, addr, 1, 1, 1, &page, NULL) == 1) 35631dbd01fSIzik Eidus * put_page(page); 35731dbd01fSIzik Eidus * 35831dbd01fSIzik Eidus * but taking great care only to touch a ksm page, in a VM_MERGEABLE vma, 35931dbd01fSIzik Eidus * in case the application has unmapped and remapped mm,addr meanwhile. 36031dbd01fSIzik Eidus * Could a ksm page appear anywhere else? Actually yes, in a VM_PFNMAP 36131dbd01fSIzik Eidus * mmap of /dev/mem or /dev/kmem, where we would not want to touch it. 36231dbd01fSIzik Eidus */ 363d952b791SHugh Dickins static int break_ksm(struct vm_area_struct *vma, unsigned long addr) 36431dbd01fSIzik Eidus { 36531dbd01fSIzik Eidus struct page *page; 366d952b791SHugh Dickins int ret = 0; 36731dbd01fSIzik Eidus 36831dbd01fSIzik Eidus do { 36931dbd01fSIzik Eidus cond_resched(); 3705117b3b8SHugh Dickins page = follow_page(vma, addr, FOLL_GET | FOLL_MIGRATION); 37122eccdd7SDan Carpenter if (IS_ERR_OR_NULL(page)) 37231dbd01fSIzik Eidus break; 37331dbd01fSIzik Eidus if (PageKsm(page)) 37431dbd01fSIzik Eidus ret = handle_mm_fault(vma->vm_mm, vma, addr, 37531dbd01fSIzik Eidus FAULT_FLAG_WRITE); 37631dbd01fSIzik Eidus else 37731dbd01fSIzik Eidus ret = VM_FAULT_WRITE; 37831dbd01fSIzik Eidus put_page(page); 379d952b791SHugh Dickins } while (!(ret & (VM_FAULT_WRITE | VM_FAULT_SIGBUS | VM_FAULT_OOM))); 380d952b791SHugh Dickins /* 381d952b791SHugh Dickins * We must loop because handle_mm_fault() may back out if there's 382d952b791SHugh Dickins * any difficulty e.g. if pte accessed bit gets updated concurrently. 383d952b791SHugh Dickins * 384d952b791SHugh Dickins * VM_FAULT_WRITE is what we have been hoping for: it indicates that 385d952b791SHugh Dickins * COW has been broken, even if the vma does not permit VM_WRITE; 386d952b791SHugh Dickins * but note that a concurrent fault might break PageKsm for us. 387d952b791SHugh Dickins * 388d952b791SHugh Dickins * VM_FAULT_SIGBUS could occur if we race with truncation of the 389d952b791SHugh Dickins * backing file, which also invalidates anonymous pages: that's 390d952b791SHugh Dickins * okay, that truncation will have unmapped the PageKsm for us. 391d952b791SHugh Dickins * 392d952b791SHugh Dickins * VM_FAULT_OOM: at the time of writing (late July 2009), setting 393d952b791SHugh Dickins * aside mem_cgroup limits, VM_FAULT_OOM would only be set if the 394d952b791SHugh Dickins * current task has TIF_MEMDIE set, and will be OOM killed on return 395d952b791SHugh Dickins * to user; and ksmd, having no mm, would never be chosen for that. 396d952b791SHugh Dickins * 397d952b791SHugh Dickins * But if the mm is in a limited mem_cgroup, then the fault may fail 398d952b791SHugh Dickins * with VM_FAULT_OOM even if the current task is not TIF_MEMDIE; and 399d952b791SHugh Dickins * even ksmd can fail in this way - though it's usually breaking ksm 400d952b791SHugh Dickins * just to undo a merge it made a moment before, so unlikely to oom. 401d952b791SHugh Dickins * 402d952b791SHugh Dickins * That's a pity: we might therefore have more kernel pages allocated 403d952b791SHugh Dickins * than we're counting as nodes in the stable tree; but ksm_do_scan 404d952b791SHugh Dickins * will retry to break_cow on each pass, so should recover the page 405d952b791SHugh Dickins * in due course. The important thing is to not let VM_MERGEABLE 406d952b791SHugh Dickins * be cleared while any such pages might remain in the area. 407d952b791SHugh Dickins */ 408d952b791SHugh Dickins return (ret & VM_FAULT_OOM) ? -ENOMEM : 0; 40931dbd01fSIzik Eidus } 41031dbd01fSIzik Eidus 411ef694222SBob Liu static struct vm_area_struct *find_mergeable_vma(struct mm_struct *mm, 412ef694222SBob Liu unsigned long addr) 413ef694222SBob Liu { 414ef694222SBob Liu struct vm_area_struct *vma; 415ef694222SBob Liu if (ksm_test_exit(mm)) 416ef694222SBob Liu return NULL; 417ef694222SBob Liu vma = find_vma(mm, addr); 418ef694222SBob Liu if (!vma || vma->vm_start > addr) 419ef694222SBob Liu return NULL; 420ef694222SBob Liu if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 421ef694222SBob Liu return NULL; 422ef694222SBob Liu return vma; 423ef694222SBob Liu } 424ef694222SBob Liu 4258dd3557aSHugh Dickins static void break_cow(struct rmap_item *rmap_item) 42631dbd01fSIzik Eidus { 4278dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 4288dd3557aSHugh Dickins unsigned long addr = rmap_item->address; 42931dbd01fSIzik Eidus struct vm_area_struct *vma; 43031dbd01fSIzik Eidus 4314035c07aSHugh Dickins /* 4324035c07aSHugh Dickins * It is not an accident that whenever we want to break COW 4334035c07aSHugh Dickins * to undo, we also need to drop a reference to the anon_vma. 4344035c07aSHugh Dickins */ 4359e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 4364035c07aSHugh Dickins 43781464e30SHugh Dickins down_read(&mm->mmap_sem); 438ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 439ef694222SBob Liu if (vma) 44031dbd01fSIzik Eidus break_ksm(vma, addr); 44131dbd01fSIzik Eidus up_read(&mm->mmap_sem); 44231dbd01fSIzik Eidus } 44331dbd01fSIzik Eidus 44429ad768cSAndrea Arcangeli static struct page *page_trans_compound_anon(struct page *page) 44529ad768cSAndrea Arcangeli { 44629ad768cSAndrea Arcangeli if (PageTransCompound(page)) { 447668f9abbSDavid Rientjes struct page *head = compound_head(page); 44829ad768cSAndrea Arcangeli /* 44922e5c47eSAndrea Arcangeli * head may actually be splitted and freed from under 45022e5c47eSAndrea Arcangeli * us but it's ok here. 45129ad768cSAndrea Arcangeli */ 45229ad768cSAndrea Arcangeli if (PageAnon(head)) 45329ad768cSAndrea Arcangeli return head; 45429ad768cSAndrea Arcangeli } 45529ad768cSAndrea Arcangeli return NULL; 45629ad768cSAndrea Arcangeli } 45729ad768cSAndrea Arcangeli 45831dbd01fSIzik Eidus static struct page *get_mergeable_page(struct rmap_item *rmap_item) 45931dbd01fSIzik Eidus { 46031dbd01fSIzik Eidus struct mm_struct *mm = rmap_item->mm; 46131dbd01fSIzik Eidus unsigned long addr = rmap_item->address; 46231dbd01fSIzik Eidus struct vm_area_struct *vma; 46331dbd01fSIzik Eidus struct page *page; 46431dbd01fSIzik Eidus 46531dbd01fSIzik Eidus down_read(&mm->mmap_sem); 466ef694222SBob Liu vma = find_mergeable_vma(mm, addr); 467ef694222SBob Liu if (!vma) 46831dbd01fSIzik Eidus goto out; 46931dbd01fSIzik Eidus 47031dbd01fSIzik Eidus page = follow_page(vma, addr, FOLL_GET); 47122eccdd7SDan Carpenter if (IS_ERR_OR_NULL(page)) 47231dbd01fSIzik Eidus goto out; 47329ad768cSAndrea Arcangeli if (PageAnon(page) || page_trans_compound_anon(page)) { 47431dbd01fSIzik Eidus flush_anon_page(vma, page, addr); 47531dbd01fSIzik Eidus flush_dcache_page(page); 47631dbd01fSIzik Eidus } else { 47731dbd01fSIzik Eidus put_page(page); 47831dbd01fSIzik Eidus out: page = NULL; 47931dbd01fSIzik Eidus } 48031dbd01fSIzik Eidus up_read(&mm->mmap_sem); 48131dbd01fSIzik Eidus return page; 48231dbd01fSIzik Eidus } 48331dbd01fSIzik Eidus 48490bd6fd3SPetr Holasek /* 48590bd6fd3SPetr Holasek * This helper is used for getting right index into array of tree roots. 48690bd6fd3SPetr Holasek * When merge_across_nodes knob is set to 1, there are only two rb-trees for 48790bd6fd3SPetr Holasek * stable and unstable pages from all nodes with roots in index 0. Otherwise, 48890bd6fd3SPetr Holasek * every node has its own stable and unstable tree. 48990bd6fd3SPetr Holasek */ 49090bd6fd3SPetr Holasek static inline int get_kpfn_nid(unsigned long kpfn) 49190bd6fd3SPetr Holasek { 492d8fc16a8SHugh Dickins return ksm_merge_across_nodes ? 0 : NUMA(pfn_to_nid(kpfn)); 49390bd6fd3SPetr Holasek } 49490bd6fd3SPetr Holasek 4954035c07aSHugh Dickins static void remove_node_from_stable_tree(struct stable_node *stable_node) 4964035c07aSHugh Dickins { 4974035c07aSHugh Dickins struct rmap_item *rmap_item; 4984035c07aSHugh Dickins 499b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 5004035c07aSHugh Dickins if (rmap_item->hlist.next) 5014035c07aSHugh Dickins ksm_pages_sharing--; 5024035c07aSHugh Dickins else 5034035c07aSHugh Dickins ksm_pages_shared--; 5049e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 5054035c07aSHugh Dickins rmap_item->address &= PAGE_MASK; 5064035c07aSHugh Dickins cond_resched(); 5074035c07aSHugh Dickins } 5084035c07aSHugh Dickins 5094146d2d6SHugh Dickins if (stable_node->head == &migrate_nodes) 5104146d2d6SHugh Dickins list_del(&stable_node->list); 5114146d2d6SHugh Dickins else 5124146d2d6SHugh Dickins rb_erase(&stable_node->node, 513ef53d16cSHugh Dickins root_stable_tree + NUMA(stable_node->nid)); 5144035c07aSHugh Dickins free_stable_node(stable_node); 5154035c07aSHugh Dickins } 5164035c07aSHugh Dickins 5174035c07aSHugh Dickins /* 5184035c07aSHugh Dickins * get_ksm_page: checks if the page indicated by the stable node 5194035c07aSHugh Dickins * is still its ksm page, despite having held no reference to it. 5204035c07aSHugh Dickins * In which case we can trust the content of the page, and it 5214035c07aSHugh Dickins * returns the gotten page; but if the page has now been zapped, 5224035c07aSHugh Dickins * remove the stale node from the stable tree and return NULL. 523c8d6553bSHugh Dickins * But beware, the stable node's page might be being migrated. 5244035c07aSHugh Dickins * 5254035c07aSHugh Dickins * You would expect the stable_node to hold a reference to the ksm page. 5264035c07aSHugh Dickins * But if it increments the page's count, swapping out has to wait for 5274035c07aSHugh Dickins * ksmd to come around again before it can free the page, which may take 5284035c07aSHugh Dickins * seconds or even minutes: much too unresponsive. So instead we use a 5294035c07aSHugh Dickins * "keyhole reference": access to the ksm page from the stable node peeps 5304035c07aSHugh Dickins * out through its keyhole to see if that page still holds the right key, 5314035c07aSHugh Dickins * pointing back to this stable node. This relies on freeing a PageAnon 5324035c07aSHugh Dickins * page to reset its page->mapping to NULL, and relies on no other use of 5334035c07aSHugh Dickins * a page to put something that might look like our key in page->mapping. 5344035c07aSHugh Dickins * is on its way to being freed; but it is an anomaly to bear in mind. 5354035c07aSHugh Dickins */ 5368fdb3dbfSHugh Dickins static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it) 5374035c07aSHugh Dickins { 5384035c07aSHugh Dickins struct page *page; 5394035c07aSHugh Dickins void *expected_mapping; 540c8d6553bSHugh Dickins unsigned long kpfn; 5414035c07aSHugh Dickins 5424035c07aSHugh Dickins expected_mapping = (void *)stable_node + 5434035c07aSHugh Dickins (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM); 544c8d6553bSHugh Dickins again: 545c8d6553bSHugh Dickins kpfn = ACCESS_ONCE(stable_node->kpfn); 546c8d6553bSHugh Dickins page = pfn_to_page(kpfn); 547c8d6553bSHugh Dickins 548c8d6553bSHugh Dickins /* 549c8d6553bSHugh Dickins * page is computed from kpfn, so on most architectures reading 550c8d6553bSHugh Dickins * page->mapping is naturally ordered after reading node->kpfn, 551c8d6553bSHugh Dickins * but on Alpha we need to be more careful. 552c8d6553bSHugh Dickins */ 553c8d6553bSHugh Dickins smp_read_barrier_depends(); 554c8d6553bSHugh Dickins if (ACCESS_ONCE(page->mapping) != expected_mapping) 5554035c07aSHugh Dickins goto stale; 556c8d6553bSHugh Dickins 557c8d6553bSHugh Dickins /* 558c8d6553bSHugh Dickins * We cannot do anything with the page while its refcount is 0. 559c8d6553bSHugh Dickins * Usually 0 means free, or tail of a higher-order page: in which 560c8d6553bSHugh Dickins * case this node is no longer referenced, and should be freed; 561c8d6553bSHugh Dickins * however, it might mean that the page is under page_freeze_refs(). 562c8d6553bSHugh Dickins * The __remove_mapping() case is easy, again the node is now stale; 563c8d6553bSHugh Dickins * but if page is swapcache in migrate_page_move_mapping(), it might 564c8d6553bSHugh Dickins * still be our page, in which case it's essential to keep the node. 565c8d6553bSHugh Dickins */ 566c8d6553bSHugh Dickins while (!get_page_unless_zero(page)) { 567c8d6553bSHugh Dickins /* 568c8d6553bSHugh Dickins * Another check for page->mapping != expected_mapping would 569c8d6553bSHugh Dickins * work here too. We have chosen the !PageSwapCache test to 570c8d6553bSHugh Dickins * optimize the common case, when the page is or is about to 571c8d6553bSHugh Dickins * be freed: PageSwapCache is cleared (under spin_lock_irq) 572c8d6553bSHugh Dickins * in the freeze_refs section of __remove_mapping(); but Anon 573c8d6553bSHugh Dickins * page->mapping reset to NULL later, in free_pages_prepare(). 574c8d6553bSHugh Dickins */ 575c8d6553bSHugh Dickins if (!PageSwapCache(page)) 5764035c07aSHugh Dickins goto stale; 577c8d6553bSHugh Dickins cpu_relax(); 578c8d6553bSHugh Dickins } 579c8d6553bSHugh Dickins 580c8d6553bSHugh Dickins if (ACCESS_ONCE(page->mapping) != expected_mapping) { 5814035c07aSHugh Dickins put_page(page); 5824035c07aSHugh Dickins goto stale; 5834035c07aSHugh Dickins } 584c8d6553bSHugh Dickins 5858fdb3dbfSHugh Dickins if (lock_it) { 5868aafa6a4SHugh Dickins lock_page(page); 587c8d6553bSHugh Dickins if (ACCESS_ONCE(page->mapping) != expected_mapping) { 5888aafa6a4SHugh Dickins unlock_page(page); 5898aafa6a4SHugh Dickins put_page(page); 5908aafa6a4SHugh Dickins goto stale; 5918aafa6a4SHugh Dickins } 5928aafa6a4SHugh Dickins } 5934035c07aSHugh Dickins return page; 594c8d6553bSHugh Dickins 5954035c07aSHugh Dickins stale: 596c8d6553bSHugh Dickins /* 597c8d6553bSHugh Dickins * We come here from above when page->mapping or !PageSwapCache 598c8d6553bSHugh Dickins * suggests that the node is stale; but it might be under migration. 599c8d6553bSHugh Dickins * We need smp_rmb(), matching the smp_wmb() in ksm_migrate_page(), 600c8d6553bSHugh Dickins * before checking whether node->kpfn has been changed. 601c8d6553bSHugh Dickins */ 602c8d6553bSHugh Dickins smp_rmb(); 603c8d6553bSHugh Dickins if (ACCESS_ONCE(stable_node->kpfn) != kpfn) 604c8d6553bSHugh Dickins goto again; 6054035c07aSHugh Dickins remove_node_from_stable_tree(stable_node); 6064035c07aSHugh Dickins return NULL; 6074035c07aSHugh Dickins } 6084035c07aSHugh Dickins 60931dbd01fSIzik Eidus /* 61031dbd01fSIzik Eidus * Removing rmap_item from stable or unstable tree. 61131dbd01fSIzik Eidus * This function will clean the information from the stable/unstable tree. 61231dbd01fSIzik Eidus */ 61331dbd01fSIzik Eidus static void remove_rmap_item_from_tree(struct rmap_item *rmap_item) 61431dbd01fSIzik Eidus { 6157b6ba2c7SHugh Dickins if (rmap_item->address & STABLE_FLAG) { 6167b6ba2c7SHugh Dickins struct stable_node *stable_node; 6175ad64688SHugh Dickins struct page *page; 61831dbd01fSIzik Eidus 6197b6ba2c7SHugh Dickins stable_node = rmap_item->head; 6208aafa6a4SHugh Dickins page = get_ksm_page(stable_node, true); 6214035c07aSHugh Dickins if (!page) 6224035c07aSHugh Dickins goto out; 6235ad64688SHugh Dickins 6247b6ba2c7SHugh Dickins hlist_del(&rmap_item->hlist); 6255ad64688SHugh Dickins unlock_page(page); 6265ad64688SHugh Dickins put_page(page); 62708beca44SHugh Dickins 6284035c07aSHugh Dickins if (stable_node->hlist.first) 6294035c07aSHugh Dickins ksm_pages_sharing--; 6304035c07aSHugh Dickins else 631b4028260SHugh Dickins ksm_pages_shared--; 63231dbd01fSIzik Eidus 6339e60109fSPeter Zijlstra put_anon_vma(rmap_item->anon_vma); 63493d17715SHugh Dickins rmap_item->address &= PAGE_MASK; 63531dbd01fSIzik Eidus 6367b6ba2c7SHugh Dickins } else if (rmap_item->address & UNSTABLE_FLAG) { 63731dbd01fSIzik Eidus unsigned char age; 63831dbd01fSIzik Eidus /* 6399ba69294SHugh Dickins * Usually ksmd can and must skip the rb_erase, because 64031dbd01fSIzik Eidus * root_unstable_tree was already reset to RB_ROOT. 6419ba69294SHugh Dickins * But be careful when an mm is exiting: do the rb_erase 6429ba69294SHugh Dickins * if this rmap_item was inserted by this scan, rather 6439ba69294SHugh Dickins * than left over from before. 64431dbd01fSIzik Eidus */ 64531dbd01fSIzik Eidus age = (unsigned char)(ksm_scan.seqnr - rmap_item->address); 646cd551f97SHugh Dickins BUG_ON(age > 1); 64731dbd01fSIzik Eidus if (!age) 64890bd6fd3SPetr Holasek rb_erase(&rmap_item->node, 649ef53d16cSHugh Dickins root_unstable_tree + NUMA(rmap_item->nid)); 65093d17715SHugh Dickins ksm_pages_unshared--; 65131dbd01fSIzik Eidus rmap_item->address &= PAGE_MASK; 65293d17715SHugh Dickins } 6534035c07aSHugh Dickins out: 65431dbd01fSIzik Eidus cond_resched(); /* we're called from many long loops */ 65531dbd01fSIzik Eidus } 65631dbd01fSIzik Eidus 65731dbd01fSIzik Eidus static void remove_trailing_rmap_items(struct mm_slot *mm_slot, 6586514d511SHugh Dickins struct rmap_item **rmap_list) 65931dbd01fSIzik Eidus { 6606514d511SHugh Dickins while (*rmap_list) { 6616514d511SHugh Dickins struct rmap_item *rmap_item = *rmap_list; 6626514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 66331dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 66431dbd01fSIzik Eidus free_rmap_item(rmap_item); 66531dbd01fSIzik Eidus } 66631dbd01fSIzik Eidus } 66731dbd01fSIzik Eidus 66831dbd01fSIzik Eidus /* 669e850dcf5SHugh Dickins * Though it's very tempting to unmerge rmap_items from stable tree rather 67031dbd01fSIzik Eidus * than check every pte of a given vma, the locking doesn't quite work for 67131dbd01fSIzik Eidus * that - an rmap_item is assigned to the stable tree after inserting ksm 67231dbd01fSIzik Eidus * page and upping mmap_sem. Nor does it fit with the way we skip dup'ing 67331dbd01fSIzik Eidus * rmap_items from parent to child at fork time (so as not to waste time 67431dbd01fSIzik Eidus * if exit comes before the next scan reaches it). 67581464e30SHugh Dickins * 67681464e30SHugh Dickins * Similarly, although we'd like to remove rmap_items (so updating counts 67781464e30SHugh Dickins * and freeing memory) when unmerging an area, it's easier to leave that 67881464e30SHugh Dickins * to the next pass of ksmd - consider, for example, how ksmd might be 67981464e30SHugh Dickins * in cmp_and_merge_page on one of the rmap_items we would be removing. 68031dbd01fSIzik Eidus */ 681d952b791SHugh Dickins static int unmerge_ksm_pages(struct vm_area_struct *vma, 68231dbd01fSIzik Eidus unsigned long start, unsigned long end) 68331dbd01fSIzik Eidus { 68431dbd01fSIzik Eidus unsigned long addr; 685d952b791SHugh Dickins int err = 0; 68631dbd01fSIzik Eidus 687d952b791SHugh Dickins for (addr = start; addr < end && !err; addr += PAGE_SIZE) { 6889ba69294SHugh Dickins if (ksm_test_exit(vma->vm_mm)) 6899ba69294SHugh Dickins break; 690d952b791SHugh Dickins if (signal_pending(current)) 691d952b791SHugh Dickins err = -ERESTARTSYS; 692d952b791SHugh Dickins else 693d952b791SHugh Dickins err = break_ksm(vma, addr); 694d952b791SHugh Dickins } 695d952b791SHugh Dickins return err; 69631dbd01fSIzik Eidus } 69731dbd01fSIzik Eidus 6982ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 6992ffd8679SHugh Dickins /* 7002ffd8679SHugh Dickins * Only called through the sysfs control interface: 7012ffd8679SHugh Dickins */ 702cbf86cfeSHugh Dickins static int remove_stable_node(struct stable_node *stable_node) 703cbf86cfeSHugh Dickins { 704cbf86cfeSHugh Dickins struct page *page; 705cbf86cfeSHugh Dickins int err; 706cbf86cfeSHugh Dickins 707cbf86cfeSHugh Dickins page = get_ksm_page(stable_node, true); 708cbf86cfeSHugh Dickins if (!page) { 709cbf86cfeSHugh Dickins /* 710cbf86cfeSHugh Dickins * get_ksm_page did remove_node_from_stable_tree itself. 711cbf86cfeSHugh Dickins */ 712cbf86cfeSHugh Dickins return 0; 713cbf86cfeSHugh Dickins } 714cbf86cfeSHugh Dickins 7158fdb3dbfSHugh Dickins if (WARN_ON_ONCE(page_mapped(page))) { 716cbf86cfeSHugh Dickins /* 7178fdb3dbfSHugh Dickins * This should not happen: but if it does, just refuse to let 7188fdb3dbfSHugh Dickins * merge_across_nodes be switched - there is no need to panic. 7198fdb3dbfSHugh Dickins */ 7208fdb3dbfSHugh Dickins err = -EBUSY; 7218fdb3dbfSHugh Dickins } else { 7228fdb3dbfSHugh Dickins /* 7238fdb3dbfSHugh Dickins * The stable node did not yet appear stale to get_ksm_page(), 7248fdb3dbfSHugh Dickins * since that allows for an unmapped ksm page to be recognized 7258fdb3dbfSHugh Dickins * right up until it is freed; but the node is safe to remove. 726cbf86cfeSHugh Dickins * This page might be in a pagevec waiting to be freed, 727cbf86cfeSHugh Dickins * or it might be PageSwapCache (perhaps under writeback), 728cbf86cfeSHugh Dickins * or it might have been removed from swapcache a moment ago. 729cbf86cfeSHugh Dickins */ 730cbf86cfeSHugh Dickins set_page_stable_node(page, NULL); 731cbf86cfeSHugh Dickins remove_node_from_stable_tree(stable_node); 732cbf86cfeSHugh Dickins err = 0; 733cbf86cfeSHugh Dickins } 734cbf86cfeSHugh Dickins 735cbf86cfeSHugh Dickins unlock_page(page); 736cbf86cfeSHugh Dickins put_page(page); 737cbf86cfeSHugh Dickins return err; 738cbf86cfeSHugh Dickins } 739cbf86cfeSHugh Dickins 740cbf86cfeSHugh Dickins static int remove_all_stable_nodes(void) 741cbf86cfeSHugh Dickins { 742cbf86cfeSHugh Dickins struct stable_node *stable_node; 7434146d2d6SHugh Dickins struct list_head *this, *next; 744cbf86cfeSHugh Dickins int nid; 745cbf86cfeSHugh Dickins int err = 0; 746cbf86cfeSHugh Dickins 747ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 748cbf86cfeSHugh Dickins while (root_stable_tree[nid].rb_node) { 749cbf86cfeSHugh Dickins stable_node = rb_entry(root_stable_tree[nid].rb_node, 750cbf86cfeSHugh Dickins struct stable_node, node); 751cbf86cfeSHugh Dickins if (remove_stable_node(stable_node)) { 752cbf86cfeSHugh Dickins err = -EBUSY; 753cbf86cfeSHugh Dickins break; /* proceed to next nid */ 754cbf86cfeSHugh Dickins } 755cbf86cfeSHugh Dickins cond_resched(); 756cbf86cfeSHugh Dickins } 757cbf86cfeSHugh Dickins } 7584146d2d6SHugh Dickins list_for_each_safe(this, next, &migrate_nodes) { 7594146d2d6SHugh Dickins stable_node = list_entry(this, struct stable_node, list); 7604146d2d6SHugh Dickins if (remove_stable_node(stable_node)) 7614146d2d6SHugh Dickins err = -EBUSY; 7624146d2d6SHugh Dickins cond_resched(); 7634146d2d6SHugh Dickins } 764cbf86cfeSHugh Dickins return err; 765cbf86cfeSHugh Dickins } 766cbf86cfeSHugh Dickins 767d952b791SHugh Dickins static int unmerge_and_remove_all_rmap_items(void) 76831dbd01fSIzik Eidus { 76931dbd01fSIzik Eidus struct mm_slot *mm_slot; 77031dbd01fSIzik Eidus struct mm_struct *mm; 77131dbd01fSIzik Eidus struct vm_area_struct *vma; 772d952b791SHugh Dickins int err = 0; 77331dbd01fSIzik Eidus 774d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 7759ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(ksm_mm_head.mm_list.next, 776d952b791SHugh Dickins struct mm_slot, mm_list); 777d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 778d952b791SHugh Dickins 7799ba69294SHugh Dickins for (mm_slot = ksm_scan.mm_slot; 7809ba69294SHugh Dickins mm_slot != &ksm_mm_head; mm_slot = ksm_scan.mm_slot) { 78131dbd01fSIzik Eidus mm = mm_slot->mm; 78231dbd01fSIzik Eidus down_read(&mm->mmap_sem); 78331dbd01fSIzik Eidus for (vma = mm->mmap; vma; vma = vma->vm_next) { 7849ba69294SHugh Dickins if (ksm_test_exit(mm)) 7859ba69294SHugh Dickins break; 78631dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE) || !vma->anon_vma) 78731dbd01fSIzik Eidus continue; 788d952b791SHugh Dickins err = unmerge_ksm_pages(vma, 789d952b791SHugh Dickins vma->vm_start, vma->vm_end); 7909ba69294SHugh Dickins if (err) 7919ba69294SHugh Dickins goto error; 792d952b791SHugh Dickins } 7939ba69294SHugh Dickins 7946514d511SHugh Dickins remove_trailing_rmap_items(mm_slot, &mm_slot->rmap_list); 79531dbd01fSIzik Eidus 79631dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 7979ba69294SHugh Dickins ksm_scan.mm_slot = list_entry(mm_slot->mm_list.next, 798d952b791SHugh Dickins struct mm_slot, mm_list); 7999ba69294SHugh Dickins if (ksm_test_exit(mm)) { 8004ca3a69bSSasha Levin hash_del(&mm_slot->link); 8019ba69294SHugh Dickins list_del(&mm_slot->mm_list); 80231dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 8039ba69294SHugh Dickins 8049ba69294SHugh Dickins free_mm_slot(mm_slot); 8059ba69294SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 8069ba69294SHugh Dickins up_read(&mm->mmap_sem); 8079ba69294SHugh Dickins mmdrop(mm); 8089ba69294SHugh Dickins } else { 8099ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 8109ba69294SHugh Dickins up_read(&mm->mmap_sem); 8119ba69294SHugh Dickins } 81231dbd01fSIzik Eidus } 81331dbd01fSIzik Eidus 814cbf86cfeSHugh Dickins /* Clean up stable nodes, but don't worry if some are still busy */ 815cbf86cfeSHugh Dickins remove_all_stable_nodes(); 816d952b791SHugh Dickins ksm_scan.seqnr = 0; 8179ba69294SHugh Dickins return 0; 8189ba69294SHugh Dickins 8199ba69294SHugh Dickins error: 8209ba69294SHugh Dickins up_read(&mm->mmap_sem); 821d952b791SHugh Dickins spin_lock(&ksm_mmlist_lock); 822d952b791SHugh Dickins ksm_scan.mm_slot = &ksm_mm_head; 823d952b791SHugh Dickins spin_unlock(&ksm_mmlist_lock); 824d952b791SHugh Dickins return err; 825d952b791SHugh Dickins } 8262ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 827d952b791SHugh Dickins 82831dbd01fSIzik Eidus static u32 calc_checksum(struct page *page) 82931dbd01fSIzik Eidus { 83031dbd01fSIzik Eidus u32 checksum; 8319b04c5feSCong Wang void *addr = kmap_atomic(page); 83231dbd01fSIzik Eidus checksum = jhash2(addr, PAGE_SIZE / 4, 17); 8339b04c5feSCong Wang kunmap_atomic(addr); 83431dbd01fSIzik Eidus return checksum; 83531dbd01fSIzik Eidus } 83631dbd01fSIzik Eidus 83731dbd01fSIzik Eidus static int memcmp_pages(struct page *page1, struct page *page2) 83831dbd01fSIzik Eidus { 83931dbd01fSIzik Eidus char *addr1, *addr2; 84031dbd01fSIzik Eidus int ret; 84131dbd01fSIzik Eidus 8429b04c5feSCong Wang addr1 = kmap_atomic(page1); 8439b04c5feSCong Wang addr2 = kmap_atomic(page2); 84431dbd01fSIzik Eidus ret = memcmp(addr1, addr2, PAGE_SIZE); 8459b04c5feSCong Wang kunmap_atomic(addr2); 8469b04c5feSCong Wang kunmap_atomic(addr1); 84731dbd01fSIzik Eidus return ret; 84831dbd01fSIzik Eidus } 84931dbd01fSIzik Eidus 85031dbd01fSIzik Eidus static inline int pages_identical(struct page *page1, struct page *page2) 85131dbd01fSIzik Eidus { 85231dbd01fSIzik Eidus return !memcmp_pages(page1, page2); 85331dbd01fSIzik Eidus } 85431dbd01fSIzik Eidus 85531dbd01fSIzik Eidus static int write_protect_page(struct vm_area_struct *vma, struct page *page, 85631dbd01fSIzik Eidus pte_t *orig_pte) 85731dbd01fSIzik Eidus { 85831dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 85931dbd01fSIzik Eidus unsigned long addr; 86031dbd01fSIzik Eidus pte_t *ptep; 86131dbd01fSIzik Eidus spinlock_t *ptl; 86231dbd01fSIzik Eidus int swapped; 86331dbd01fSIzik Eidus int err = -EFAULT; 8646bdb913fSHaggai Eran unsigned long mmun_start; /* For mmu_notifiers */ 8656bdb913fSHaggai Eran unsigned long mmun_end; /* For mmu_notifiers */ 86631dbd01fSIzik Eidus 86731dbd01fSIzik Eidus addr = page_address_in_vma(page, vma); 86831dbd01fSIzik Eidus if (addr == -EFAULT) 86931dbd01fSIzik Eidus goto out; 87031dbd01fSIzik Eidus 87129ad768cSAndrea Arcangeli BUG_ON(PageTransCompound(page)); 8726bdb913fSHaggai Eran 8736bdb913fSHaggai Eran mmun_start = addr; 8746bdb913fSHaggai Eran mmun_end = addr + PAGE_SIZE; 8756bdb913fSHaggai Eran mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 8766bdb913fSHaggai Eran 87731dbd01fSIzik Eidus ptep = page_check_address(page, mm, addr, &ptl, 0); 87831dbd01fSIzik Eidus if (!ptep) 8796bdb913fSHaggai Eran goto out_mn; 88031dbd01fSIzik Eidus 8814e31635cSHugh Dickins if (pte_write(*ptep) || pte_dirty(*ptep)) { 88231dbd01fSIzik Eidus pte_t entry; 88331dbd01fSIzik Eidus 88431dbd01fSIzik Eidus swapped = PageSwapCache(page); 88531dbd01fSIzik Eidus flush_cache_page(vma, addr, page_to_pfn(page)); 88631dbd01fSIzik Eidus /* 88725985edcSLucas De Marchi * Ok this is tricky, when get_user_pages_fast() run it doesn't 88831dbd01fSIzik Eidus * take any lock, therefore the check that we are going to make 88931dbd01fSIzik Eidus * with the pagecount against the mapcount is racey and 89031dbd01fSIzik Eidus * O_DIRECT can happen right after the check. 89131dbd01fSIzik Eidus * So we clear the pte and flush the tlb before the check 89231dbd01fSIzik Eidus * this assure us that no O_DIRECT can happen after the check 89331dbd01fSIzik Eidus * or in the middle of the check. 89431dbd01fSIzik Eidus */ 89534ee645eSJoerg Roedel entry = ptep_clear_flush_notify(vma, addr, ptep); 89631dbd01fSIzik Eidus /* 89731dbd01fSIzik Eidus * Check that no O_DIRECT or similar I/O is in progress on the 89831dbd01fSIzik Eidus * page 89931dbd01fSIzik Eidus */ 90031e855eaSHugh Dickins if (page_mapcount(page) + 1 + swapped != page_count(page)) { 901cb532375SRobin Holt set_pte_at(mm, addr, ptep, entry); 90231dbd01fSIzik Eidus goto out_unlock; 90331dbd01fSIzik Eidus } 9044e31635cSHugh Dickins if (pte_dirty(entry)) 9054e31635cSHugh Dickins set_page_dirty(page); 9064e31635cSHugh Dickins entry = pte_mkclean(pte_wrprotect(entry)); 90731dbd01fSIzik Eidus set_pte_at_notify(mm, addr, ptep, entry); 90831dbd01fSIzik Eidus } 90931dbd01fSIzik Eidus *orig_pte = *ptep; 91031dbd01fSIzik Eidus err = 0; 91131dbd01fSIzik Eidus 91231dbd01fSIzik Eidus out_unlock: 91331dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 9146bdb913fSHaggai Eran out_mn: 9156bdb913fSHaggai Eran mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 91631dbd01fSIzik Eidus out: 91731dbd01fSIzik Eidus return err; 91831dbd01fSIzik Eidus } 91931dbd01fSIzik Eidus 92031dbd01fSIzik Eidus /** 92131dbd01fSIzik Eidus * replace_page - replace page in vma by new ksm page 9228dd3557aSHugh Dickins * @vma: vma that holds the pte pointing to page 9238dd3557aSHugh Dickins * @page: the page we are replacing by kpage 9248dd3557aSHugh Dickins * @kpage: the ksm page we replace page by 92531dbd01fSIzik Eidus * @orig_pte: the original value of the pte 92631dbd01fSIzik Eidus * 92731dbd01fSIzik Eidus * Returns 0 on success, -EFAULT on failure. 92831dbd01fSIzik Eidus */ 9298dd3557aSHugh Dickins static int replace_page(struct vm_area_struct *vma, struct page *page, 9308dd3557aSHugh Dickins struct page *kpage, pte_t orig_pte) 93131dbd01fSIzik Eidus { 93231dbd01fSIzik Eidus struct mm_struct *mm = vma->vm_mm; 93331dbd01fSIzik Eidus pmd_t *pmd; 93431dbd01fSIzik Eidus pte_t *ptep; 93531dbd01fSIzik Eidus spinlock_t *ptl; 93631dbd01fSIzik Eidus unsigned long addr; 93731dbd01fSIzik Eidus int err = -EFAULT; 9386bdb913fSHaggai Eran unsigned long mmun_start; /* For mmu_notifiers */ 9396bdb913fSHaggai Eran unsigned long mmun_end; /* For mmu_notifiers */ 94031dbd01fSIzik Eidus 9418dd3557aSHugh Dickins addr = page_address_in_vma(page, vma); 94231dbd01fSIzik Eidus if (addr == -EFAULT) 94331dbd01fSIzik Eidus goto out; 94431dbd01fSIzik Eidus 9456219049aSBob Liu pmd = mm_find_pmd(mm, addr); 9466219049aSBob Liu if (!pmd) 94731dbd01fSIzik Eidus goto out; 94831dbd01fSIzik Eidus 9496bdb913fSHaggai Eran mmun_start = addr; 9506bdb913fSHaggai Eran mmun_end = addr + PAGE_SIZE; 9516bdb913fSHaggai Eran mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); 9526bdb913fSHaggai Eran 95331dbd01fSIzik Eidus ptep = pte_offset_map_lock(mm, pmd, addr, &ptl); 95431dbd01fSIzik Eidus if (!pte_same(*ptep, orig_pte)) { 95531dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 9566bdb913fSHaggai Eran goto out_mn; 95731dbd01fSIzik Eidus } 95831dbd01fSIzik Eidus 9598dd3557aSHugh Dickins get_page(kpage); 9605ad64688SHugh Dickins page_add_anon_rmap(kpage, vma, addr); 96131dbd01fSIzik Eidus 96231dbd01fSIzik Eidus flush_cache_page(vma, addr, pte_pfn(*ptep)); 96334ee645eSJoerg Roedel ptep_clear_flush_notify(vma, addr, ptep); 9648dd3557aSHugh Dickins set_pte_at_notify(mm, addr, ptep, mk_pte(kpage, vma->vm_page_prot)); 96531dbd01fSIzik Eidus 9668dd3557aSHugh Dickins page_remove_rmap(page); 967ae52a2adSHugh Dickins if (!page_mapped(page)) 968ae52a2adSHugh Dickins try_to_free_swap(page); 9698dd3557aSHugh Dickins put_page(page); 97031dbd01fSIzik Eidus 97131dbd01fSIzik Eidus pte_unmap_unlock(ptep, ptl); 97231dbd01fSIzik Eidus err = 0; 9736bdb913fSHaggai Eran out_mn: 9746bdb913fSHaggai Eran mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); 97531dbd01fSIzik Eidus out: 97631dbd01fSIzik Eidus return err; 97731dbd01fSIzik Eidus } 97831dbd01fSIzik Eidus 97929ad768cSAndrea Arcangeli static int page_trans_compound_anon_split(struct page *page) 98029ad768cSAndrea Arcangeli { 98129ad768cSAndrea Arcangeli int ret = 0; 98229ad768cSAndrea Arcangeli struct page *transhuge_head = page_trans_compound_anon(page); 98329ad768cSAndrea Arcangeli if (transhuge_head) { 98429ad768cSAndrea Arcangeli /* Get the reference on the head to split it. */ 98529ad768cSAndrea Arcangeli if (get_page_unless_zero(transhuge_head)) { 98629ad768cSAndrea Arcangeli /* 98729ad768cSAndrea Arcangeli * Recheck we got the reference while the head 98829ad768cSAndrea Arcangeli * was still anonymous. 98929ad768cSAndrea Arcangeli */ 99029ad768cSAndrea Arcangeli if (PageAnon(transhuge_head)) 99129ad768cSAndrea Arcangeli ret = split_huge_page(transhuge_head); 99229ad768cSAndrea Arcangeli else 99329ad768cSAndrea Arcangeli /* 99429ad768cSAndrea Arcangeli * Retry later if split_huge_page run 99529ad768cSAndrea Arcangeli * from under us. 99629ad768cSAndrea Arcangeli */ 99729ad768cSAndrea Arcangeli ret = 1; 99829ad768cSAndrea Arcangeli put_page(transhuge_head); 99929ad768cSAndrea Arcangeli } else 100029ad768cSAndrea Arcangeli /* Retry later if split_huge_page run from under us. */ 100129ad768cSAndrea Arcangeli ret = 1; 100229ad768cSAndrea Arcangeli } 100329ad768cSAndrea Arcangeli return ret; 100429ad768cSAndrea Arcangeli } 100529ad768cSAndrea Arcangeli 100631dbd01fSIzik Eidus /* 100731dbd01fSIzik Eidus * try_to_merge_one_page - take two pages and merge them into one 10088dd3557aSHugh Dickins * @vma: the vma that holds the pte pointing to page 10098dd3557aSHugh Dickins * @page: the PageAnon page that we want to replace with kpage 101080e14822SHugh Dickins * @kpage: the PageKsm page that we want to map instead of page, 101180e14822SHugh Dickins * or NULL the first time when we want to use page as kpage. 101231dbd01fSIzik Eidus * 101331dbd01fSIzik Eidus * This function returns 0 if the pages were merged, -EFAULT otherwise. 101431dbd01fSIzik Eidus */ 101531dbd01fSIzik Eidus static int try_to_merge_one_page(struct vm_area_struct *vma, 10168dd3557aSHugh Dickins struct page *page, struct page *kpage) 101731dbd01fSIzik Eidus { 101831dbd01fSIzik Eidus pte_t orig_pte = __pte(0); 101931dbd01fSIzik Eidus int err = -EFAULT; 102031dbd01fSIzik Eidus 1021db114b83SHugh Dickins if (page == kpage) /* ksm page forked */ 1022db114b83SHugh Dickins return 0; 1023db114b83SHugh Dickins 102431dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 102531dbd01fSIzik Eidus goto out; 102629ad768cSAndrea Arcangeli if (PageTransCompound(page) && page_trans_compound_anon_split(page)) 102729ad768cSAndrea Arcangeli goto out; 102829ad768cSAndrea Arcangeli BUG_ON(PageTransCompound(page)); 10298dd3557aSHugh Dickins if (!PageAnon(page)) 103031dbd01fSIzik Eidus goto out; 103131dbd01fSIzik Eidus 103231dbd01fSIzik Eidus /* 103331dbd01fSIzik Eidus * We need the page lock to read a stable PageSwapCache in 103431dbd01fSIzik Eidus * write_protect_page(). We use trylock_page() instead of 103531dbd01fSIzik Eidus * lock_page() because we don't want to wait here - we 103631dbd01fSIzik Eidus * prefer to continue scanning and merging different pages, 103731dbd01fSIzik Eidus * then come back to this page when it is unlocked. 103831dbd01fSIzik Eidus */ 10398dd3557aSHugh Dickins if (!trylock_page(page)) 104031e855eaSHugh Dickins goto out; 104131dbd01fSIzik Eidus /* 104231dbd01fSIzik Eidus * If this anonymous page is mapped only here, its pte may need 104331dbd01fSIzik Eidus * to be write-protected. If it's mapped elsewhere, all of its 104431dbd01fSIzik Eidus * ptes are necessarily already write-protected. But in either 104531dbd01fSIzik Eidus * case, we need to lock and check page_count is not raised. 104631dbd01fSIzik Eidus */ 104780e14822SHugh Dickins if (write_protect_page(vma, page, &orig_pte) == 0) { 104880e14822SHugh Dickins if (!kpage) { 104980e14822SHugh Dickins /* 105080e14822SHugh Dickins * While we hold page lock, upgrade page from 105180e14822SHugh Dickins * PageAnon+anon_vma to PageKsm+NULL stable_node: 105280e14822SHugh Dickins * stable_tree_insert() will update stable_node. 105380e14822SHugh Dickins */ 105480e14822SHugh Dickins set_page_stable_node(page, NULL); 105580e14822SHugh Dickins mark_page_accessed(page); 105680e14822SHugh Dickins err = 0; 105780e14822SHugh Dickins } else if (pages_identical(page, kpage)) 10588dd3557aSHugh Dickins err = replace_page(vma, page, kpage, orig_pte); 105980e14822SHugh Dickins } 106031dbd01fSIzik Eidus 106180e14822SHugh Dickins if ((vma->vm_flags & VM_LOCKED) && kpage && !err) { 106273848b46SHugh Dickins munlock_vma_page(page); 10635ad64688SHugh Dickins if (!PageMlocked(kpage)) { 10645ad64688SHugh Dickins unlock_page(page); 10655ad64688SHugh Dickins lock_page(kpage); 10665ad64688SHugh Dickins mlock_vma_page(kpage); 10675ad64688SHugh Dickins page = kpage; /* for final unlock */ 10685ad64688SHugh Dickins } 10695ad64688SHugh Dickins } 107073848b46SHugh Dickins 10718dd3557aSHugh Dickins unlock_page(page); 107231dbd01fSIzik Eidus out: 107331dbd01fSIzik Eidus return err; 107431dbd01fSIzik Eidus } 107531dbd01fSIzik Eidus 107631dbd01fSIzik Eidus /* 107781464e30SHugh Dickins * try_to_merge_with_ksm_page - like try_to_merge_two_pages, 107881464e30SHugh Dickins * but no new kernel page is allocated: kpage must already be a ksm page. 10798dd3557aSHugh Dickins * 10808dd3557aSHugh Dickins * This function returns 0 if the pages were merged, -EFAULT otherwise. 108181464e30SHugh Dickins */ 10828dd3557aSHugh Dickins static int try_to_merge_with_ksm_page(struct rmap_item *rmap_item, 10838dd3557aSHugh Dickins struct page *page, struct page *kpage) 108481464e30SHugh Dickins { 10858dd3557aSHugh Dickins struct mm_struct *mm = rmap_item->mm; 108681464e30SHugh Dickins struct vm_area_struct *vma; 108781464e30SHugh Dickins int err = -EFAULT; 108881464e30SHugh Dickins 10898dd3557aSHugh Dickins down_read(&mm->mmap_sem); 10908dd3557aSHugh Dickins if (ksm_test_exit(mm)) 10918dd3557aSHugh Dickins goto out; 10928dd3557aSHugh Dickins vma = find_vma(mm, rmap_item->address); 10938dd3557aSHugh Dickins if (!vma || vma->vm_start > rmap_item->address) 10949ba69294SHugh Dickins goto out; 10959ba69294SHugh Dickins 10968dd3557aSHugh Dickins err = try_to_merge_one_page(vma, page, kpage); 1097db114b83SHugh Dickins if (err) 1098db114b83SHugh Dickins goto out; 1099db114b83SHugh Dickins 1100bc56620bSHugh Dickins /* Unstable nid is in union with stable anon_vma: remove first */ 1101bc56620bSHugh Dickins remove_rmap_item_from_tree(rmap_item); 1102bc56620bSHugh Dickins 1103db114b83SHugh Dickins /* Must get reference to anon_vma while still holding mmap_sem */ 11049e60109fSPeter Zijlstra rmap_item->anon_vma = vma->anon_vma; 11059e60109fSPeter Zijlstra get_anon_vma(vma->anon_vma); 110681464e30SHugh Dickins out: 11078dd3557aSHugh Dickins up_read(&mm->mmap_sem); 110881464e30SHugh Dickins return err; 110981464e30SHugh Dickins } 111081464e30SHugh Dickins 111181464e30SHugh Dickins /* 111231dbd01fSIzik Eidus * try_to_merge_two_pages - take two identical pages and prepare them 111331dbd01fSIzik Eidus * to be merged into one page. 111431dbd01fSIzik Eidus * 11158dd3557aSHugh Dickins * This function returns the kpage if we successfully merged two identical 11168dd3557aSHugh Dickins * pages into one ksm page, NULL otherwise. 111731dbd01fSIzik Eidus * 111880e14822SHugh Dickins * Note that this function upgrades page to ksm page: if one of the pages 111931dbd01fSIzik Eidus * is already a ksm page, try_to_merge_with_ksm_page should be used. 112031dbd01fSIzik Eidus */ 11218dd3557aSHugh Dickins static struct page *try_to_merge_two_pages(struct rmap_item *rmap_item, 11228dd3557aSHugh Dickins struct page *page, 11238dd3557aSHugh Dickins struct rmap_item *tree_rmap_item, 11248dd3557aSHugh Dickins struct page *tree_page) 112531dbd01fSIzik Eidus { 112680e14822SHugh Dickins int err; 112731dbd01fSIzik Eidus 112880e14822SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, NULL); 112931dbd01fSIzik Eidus if (!err) { 11308dd3557aSHugh Dickins err = try_to_merge_with_ksm_page(tree_rmap_item, 113180e14822SHugh Dickins tree_page, page); 113231dbd01fSIzik Eidus /* 113381464e30SHugh Dickins * If that fails, we have a ksm page with only one pte 113481464e30SHugh Dickins * pointing to it: so break it. 113531dbd01fSIzik Eidus */ 11364035c07aSHugh Dickins if (err) 11378dd3557aSHugh Dickins break_cow(rmap_item); 113831dbd01fSIzik Eidus } 113980e14822SHugh Dickins return err ? NULL : page; 114031dbd01fSIzik Eidus } 114131dbd01fSIzik Eidus 114231dbd01fSIzik Eidus /* 11438dd3557aSHugh Dickins * stable_tree_search - search for page inside the stable tree 114431dbd01fSIzik Eidus * 114531dbd01fSIzik Eidus * This function checks if there is a page inside the stable tree 114631dbd01fSIzik Eidus * with identical content to the page that we are scanning right now. 114731dbd01fSIzik Eidus * 11487b6ba2c7SHugh Dickins * This function returns the stable tree node of identical content if found, 114931dbd01fSIzik Eidus * NULL otherwise. 115031dbd01fSIzik Eidus */ 115162b61f61SHugh Dickins static struct page *stable_tree_search(struct page *page) 115231dbd01fSIzik Eidus { 115390bd6fd3SPetr Holasek int nid; 1154ef53d16cSHugh Dickins struct rb_root *root; 11554146d2d6SHugh Dickins struct rb_node **new; 11564146d2d6SHugh Dickins struct rb_node *parent; 11574146d2d6SHugh Dickins struct stable_node *stable_node; 11584146d2d6SHugh Dickins struct stable_node *page_node; 115931dbd01fSIzik Eidus 11604146d2d6SHugh Dickins page_node = page_stable_node(page); 11614146d2d6SHugh Dickins if (page_node && page_node->head != &migrate_nodes) { 11624146d2d6SHugh Dickins /* ksm page forked */ 116308beca44SHugh Dickins get_page(page); 116462b61f61SHugh Dickins return page; 116508beca44SHugh Dickins } 116608beca44SHugh Dickins 116790bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1168ef53d16cSHugh Dickins root = root_stable_tree + nid; 11694146d2d6SHugh Dickins again: 1170ef53d16cSHugh Dickins new = &root->rb_node; 11714146d2d6SHugh Dickins parent = NULL; 117290bd6fd3SPetr Holasek 11734146d2d6SHugh Dickins while (*new) { 11744035c07aSHugh Dickins struct page *tree_page; 117531dbd01fSIzik Eidus int ret; 117631dbd01fSIzik Eidus 117731dbd01fSIzik Eidus cond_resched(); 11784146d2d6SHugh Dickins stable_node = rb_entry(*new, struct stable_node, node); 11798aafa6a4SHugh Dickins tree_page = get_ksm_page(stable_node, false); 11804035c07aSHugh Dickins if (!tree_page) 11814035c07aSHugh Dickins return NULL; 118231dbd01fSIzik Eidus 11834035c07aSHugh Dickins ret = memcmp_pages(page, tree_page); 1184c8d6553bSHugh Dickins put_page(tree_page); 118531dbd01fSIzik Eidus 11864146d2d6SHugh Dickins parent = *new; 1187c8d6553bSHugh Dickins if (ret < 0) 11884146d2d6SHugh Dickins new = &parent->rb_left; 1189c8d6553bSHugh Dickins else if (ret > 0) 11904146d2d6SHugh Dickins new = &parent->rb_right; 1191c8d6553bSHugh Dickins else { 1192c8d6553bSHugh Dickins /* 1193c8d6553bSHugh Dickins * Lock and unlock the stable_node's page (which 1194c8d6553bSHugh Dickins * might already have been migrated) so that page 1195c8d6553bSHugh Dickins * migration is sure to notice its raised count. 1196c8d6553bSHugh Dickins * It would be more elegant to return stable_node 1197c8d6553bSHugh Dickins * than kpage, but that involves more changes. 1198c8d6553bSHugh Dickins */ 1199c8d6553bSHugh Dickins tree_page = get_ksm_page(stable_node, true); 12004146d2d6SHugh Dickins if (tree_page) { 1201c8d6553bSHugh Dickins unlock_page(tree_page); 12024146d2d6SHugh Dickins if (get_kpfn_nid(stable_node->kpfn) != 12034146d2d6SHugh Dickins NUMA(stable_node->nid)) { 12044146d2d6SHugh Dickins put_page(tree_page); 12054146d2d6SHugh Dickins goto replace; 12064146d2d6SHugh Dickins } 120762b61f61SHugh Dickins return tree_page; 120831dbd01fSIzik Eidus } 12094146d2d6SHugh Dickins /* 12104146d2d6SHugh Dickins * There is now a place for page_node, but the tree may 12114146d2d6SHugh Dickins * have been rebalanced, so re-evaluate parent and new. 12124146d2d6SHugh Dickins */ 12134146d2d6SHugh Dickins if (page_node) 12144146d2d6SHugh Dickins goto again; 12154146d2d6SHugh Dickins return NULL; 12164146d2d6SHugh Dickins } 1217c8d6553bSHugh Dickins } 121831dbd01fSIzik Eidus 12194146d2d6SHugh Dickins if (!page_node) 122031dbd01fSIzik Eidus return NULL; 12214146d2d6SHugh Dickins 12224146d2d6SHugh Dickins list_del(&page_node->list); 12234146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 12244146d2d6SHugh Dickins rb_link_node(&page_node->node, parent, new); 1225ef53d16cSHugh Dickins rb_insert_color(&page_node->node, root); 12264146d2d6SHugh Dickins get_page(page); 12274146d2d6SHugh Dickins return page; 12284146d2d6SHugh Dickins 12294146d2d6SHugh Dickins replace: 12304146d2d6SHugh Dickins if (page_node) { 12314146d2d6SHugh Dickins list_del(&page_node->list); 12324146d2d6SHugh Dickins DO_NUMA(page_node->nid = nid); 1233ef53d16cSHugh Dickins rb_replace_node(&stable_node->node, &page_node->node, root); 12344146d2d6SHugh Dickins get_page(page); 12354146d2d6SHugh Dickins } else { 1236ef53d16cSHugh Dickins rb_erase(&stable_node->node, root); 12374146d2d6SHugh Dickins page = NULL; 12384146d2d6SHugh Dickins } 12394146d2d6SHugh Dickins stable_node->head = &migrate_nodes; 12404146d2d6SHugh Dickins list_add(&stable_node->list, stable_node->head); 12414146d2d6SHugh Dickins return page; 124231dbd01fSIzik Eidus } 124331dbd01fSIzik Eidus 124431dbd01fSIzik Eidus /* 1245e850dcf5SHugh Dickins * stable_tree_insert - insert stable tree node pointing to new ksm page 124631dbd01fSIzik Eidus * into the stable tree. 124731dbd01fSIzik Eidus * 12487b6ba2c7SHugh Dickins * This function returns the stable tree node just allocated on success, 12497b6ba2c7SHugh Dickins * NULL otherwise. 125031dbd01fSIzik Eidus */ 12517b6ba2c7SHugh Dickins static struct stable_node *stable_tree_insert(struct page *kpage) 125231dbd01fSIzik Eidus { 125390bd6fd3SPetr Holasek int nid; 125490bd6fd3SPetr Holasek unsigned long kpfn; 1255ef53d16cSHugh Dickins struct rb_root *root; 125690bd6fd3SPetr Holasek struct rb_node **new; 125731dbd01fSIzik Eidus struct rb_node *parent = NULL; 12587b6ba2c7SHugh Dickins struct stable_node *stable_node; 125931dbd01fSIzik Eidus 126090bd6fd3SPetr Holasek kpfn = page_to_pfn(kpage); 126190bd6fd3SPetr Holasek nid = get_kpfn_nid(kpfn); 1262ef53d16cSHugh Dickins root = root_stable_tree + nid; 1263ef53d16cSHugh Dickins new = &root->rb_node; 126490bd6fd3SPetr Holasek 126531dbd01fSIzik Eidus while (*new) { 12664035c07aSHugh Dickins struct page *tree_page; 126731dbd01fSIzik Eidus int ret; 126831dbd01fSIzik Eidus 126931dbd01fSIzik Eidus cond_resched(); 127008beca44SHugh Dickins stable_node = rb_entry(*new, struct stable_node, node); 12718aafa6a4SHugh Dickins tree_page = get_ksm_page(stable_node, false); 12724035c07aSHugh Dickins if (!tree_page) 12734035c07aSHugh Dickins return NULL; 127431dbd01fSIzik Eidus 12754035c07aSHugh Dickins ret = memcmp_pages(kpage, tree_page); 12764035c07aSHugh Dickins put_page(tree_page); 127731dbd01fSIzik Eidus 127831dbd01fSIzik Eidus parent = *new; 127931dbd01fSIzik Eidus if (ret < 0) 128031dbd01fSIzik Eidus new = &parent->rb_left; 128131dbd01fSIzik Eidus else if (ret > 0) 128231dbd01fSIzik Eidus new = &parent->rb_right; 128331dbd01fSIzik Eidus else { 128431dbd01fSIzik Eidus /* 128531dbd01fSIzik Eidus * It is not a bug that stable_tree_search() didn't 128631dbd01fSIzik Eidus * find this node: because at that time our page was 128731dbd01fSIzik Eidus * not yet write-protected, so may have changed since. 128831dbd01fSIzik Eidus */ 128931dbd01fSIzik Eidus return NULL; 129031dbd01fSIzik Eidus } 129131dbd01fSIzik Eidus } 129231dbd01fSIzik Eidus 12937b6ba2c7SHugh Dickins stable_node = alloc_stable_node(); 12947b6ba2c7SHugh Dickins if (!stable_node) 12957b6ba2c7SHugh Dickins return NULL; 129631dbd01fSIzik Eidus 12977b6ba2c7SHugh Dickins INIT_HLIST_HEAD(&stable_node->hlist); 129890bd6fd3SPetr Holasek stable_node->kpfn = kpfn; 129908beca44SHugh Dickins set_page_stable_node(kpage, stable_node); 13004146d2d6SHugh Dickins DO_NUMA(stable_node->nid = nid); 1301e850dcf5SHugh Dickins rb_link_node(&stable_node->node, parent, new); 1302ef53d16cSHugh Dickins rb_insert_color(&stable_node->node, root); 130308beca44SHugh Dickins 13047b6ba2c7SHugh Dickins return stable_node; 130531dbd01fSIzik Eidus } 130631dbd01fSIzik Eidus 130731dbd01fSIzik Eidus /* 13088dd3557aSHugh Dickins * unstable_tree_search_insert - search for identical page, 13098dd3557aSHugh Dickins * else insert rmap_item into the unstable tree. 131031dbd01fSIzik Eidus * 131131dbd01fSIzik Eidus * This function searches for a page in the unstable tree identical to the 131231dbd01fSIzik Eidus * page currently being scanned; and if no identical page is found in the 131331dbd01fSIzik Eidus * tree, we insert rmap_item as a new object into the unstable tree. 131431dbd01fSIzik Eidus * 131531dbd01fSIzik Eidus * This function returns pointer to rmap_item found to be identical 131631dbd01fSIzik Eidus * to the currently scanned page, NULL otherwise. 131731dbd01fSIzik Eidus * 131831dbd01fSIzik Eidus * This function does both searching and inserting, because they share 131931dbd01fSIzik Eidus * the same walking algorithm in an rbtree. 132031dbd01fSIzik Eidus */ 13218dd3557aSHugh Dickins static 13228dd3557aSHugh Dickins struct rmap_item *unstable_tree_search_insert(struct rmap_item *rmap_item, 13238dd3557aSHugh Dickins struct page *page, 13248dd3557aSHugh Dickins struct page **tree_pagep) 132531dbd01fSIzik Eidus { 132690bd6fd3SPetr Holasek struct rb_node **new; 132790bd6fd3SPetr Holasek struct rb_root *root; 132831dbd01fSIzik Eidus struct rb_node *parent = NULL; 132990bd6fd3SPetr Holasek int nid; 133090bd6fd3SPetr Holasek 133190bd6fd3SPetr Holasek nid = get_kpfn_nid(page_to_pfn(page)); 1332ef53d16cSHugh Dickins root = root_unstable_tree + nid; 133390bd6fd3SPetr Holasek new = &root->rb_node; 133431dbd01fSIzik Eidus 133531dbd01fSIzik Eidus while (*new) { 133631dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 13378dd3557aSHugh Dickins struct page *tree_page; 133831dbd01fSIzik Eidus int ret; 133931dbd01fSIzik Eidus 1340d178f27fSHugh Dickins cond_resched(); 134131dbd01fSIzik Eidus tree_rmap_item = rb_entry(*new, struct rmap_item, node); 13428dd3557aSHugh Dickins tree_page = get_mergeable_page(tree_rmap_item); 134322eccdd7SDan Carpenter if (IS_ERR_OR_NULL(tree_page)) 134431dbd01fSIzik Eidus return NULL; 134531dbd01fSIzik Eidus 134631dbd01fSIzik Eidus /* 13478dd3557aSHugh Dickins * Don't substitute a ksm page for a forked page. 134831dbd01fSIzik Eidus */ 13498dd3557aSHugh Dickins if (page == tree_page) { 13508dd3557aSHugh Dickins put_page(tree_page); 135131dbd01fSIzik Eidus return NULL; 135231dbd01fSIzik Eidus } 135331dbd01fSIzik Eidus 13548dd3557aSHugh Dickins ret = memcmp_pages(page, tree_page); 135531dbd01fSIzik Eidus 135631dbd01fSIzik Eidus parent = *new; 135731dbd01fSIzik Eidus if (ret < 0) { 13588dd3557aSHugh Dickins put_page(tree_page); 135931dbd01fSIzik Eidus new = &parent->rb_left; 136031dbd01fSIzik Eidus } else if (ret > 0) { 13618dd3557aSHugh Dickins put_page(tree_page); 136231dbd01fSIzik Eidus new = &parent->rb_right; 1363b599cbdfSHugh Dickins } else if (!ksm_merge_across_nodes && 1364b599cbdfSHugh Dickins page_to_nid(tree_page) != nid) { 1365b599cbdfSHugh Dickins /* 1366b599cbdfSHugh Dickins * If tree_page has been migrated to another NUMA node, 1367b599cbdfSHugh Dickins * it will be flushed out and put in the right unstable 1368b599cbdfSHugh Dickins * tree next time: only merge with it when across_nodes. 1369b599cbdfSHugh Dickins */ 1370b599cbdfSHugh Dickins put_page(tree_page); 1371b599cbdfSHugh Dickins return NULL; 137231dbd01fSIzik Eidus } else { 13738dd3557aSHugh Dickins *tree_pagep = tree_page; 137431dbd01fSIzik Eidus return tree_rmap_item; 137531dbd01fSIzik Eidus } 137631dbd01fSIzik Eidus } 137731dbd01fSIzik Eidus 13787b6ba2c7SHugh Dickins rmap_item->address |= UNSTABLE_FLAG; 137931dbd01fSIzik Eidus rmap_item->address |= (ksm_scan.seqnr & SEQNR_MASK); 1380e850dcf5SHugh Dickins DO_NUMA(rmap_item->nid = nid); 138131dbd01fSIzik Eidus rb_link_node(&rmap_item->node, parent, new); 138290bd6fd3SPetr Holasek rb_insert_color(&rmap_item->node, root); 138331dbd01fSIzik Eidus 1384473b0ce4SHugh Dickins ksm_pages_unshared++; 138531dbd01fSIzik Eidus return NULL; 138631dbd01fSIzik Eidus } 138731dbd01fSIzik Eidus 138831dbd01fSIzik Eidus /* 138931dbd01fSIzik Eidus * stable_tree_append - add another rmap_item to the linked list of 139031dbd01fSIzik Eidus * rmap_items hanging off a given node of the stable tree, all sharing 139131dbd01fSIzik Eidus * the same ksm page. 139231dbd01fSIzik Eidus */ 139331dbd01fSIzik Eidus static void stable_tree_append(struct rmap_item *rmap_item, 13947b6ba2c7SHugh Dickins struct stable_node *stable_node) 139531dbd01fSIzik Eidus { 13967b6ba2c7SHugh Dickins rmap_item->head = stable_node; 139731dbd01fSIzik Eidus rmap_item->address |= STABLE_FLAG; 13987b6ba2c7SHugh Dickins hlist_add_head(&rmap_item->hlist, &stable_node->hlist); 1399e178dfdeSHugh Dickins 14007b6ba2c7SHugh Dickins if (rmap_item->hlist.next) 1401e178dfdeSHugh Dickins ksm_pages_sharing++; 14027b6ba2c7SHugh Dickins else 14037b6ba2c7SHugh Dickins ksm_pages_shared++; 140431dbd01fSIzik Eidus } 140531dbd01fSIzik Eidus 140631dbd01fSIzik Eidus /* 140781464e30SHugh Dickins * cmp_and_merge_page - first see if page can be merged into the stable tree; 140881464e30SHugh Dickins * if not, compare checksum to previous and if it's the same, see if page can 140981464e30SHugh Dickins * be inserted into the unstable tree, or merged with a page already there and 141081464e30SHugh Dickins * both transferred to the stable tree. 141131dbd01fSIzik Eidus * 141231dbd01fSIzik Eidus * @page: the page that we are searching identical page to. 141331dbd01fSIzik Eidus * @rmap_item: the reverse mapping into the virtual address of this page 141431dbd01fSIzik Eidus */ 141531dbd01fSIzik Eidus static void cmp_and_merge_page(struct page *page, struct rmap_item *rmap_item) 141631dbd01fSIzik Eidus { 141731dbd01fSIzik Eidus struct rmap_item *tree_rmap_item; 14188dd3557aSHugh Dickins struct page *tree_page = NULL; 14197b6ba2c7SHugh Dickins struct stable_node *stable_node; 14208dd3557aSHugh Dickins struct page *kpage; 142131dbd01fSIzik Eidus unsigned int checksum; 142231dbd01fSIzik Eidus int err; 142331dbd01fSIzik Eidus 14244146d2d6SHugh Dickins stable_node = page_stable_node(page); 14254146d2d6SHugh Dickins if (stable_node) { 14264146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 14274146d2d6SHugh Dickins get_kpfn_nid(stable_node->kpfn) != NUMA(stable_node->nid)) { 14284146d2d6SHugh Dickins rb_erase(&stable_node->node, 1429ef53d16cSHugh Dickins root_stable_tree + NUMA(stable_node->nid)); 14304146d2d6SHugh Dickins stable_node->head = &migrate_nodes; 14314146d2d6SHugh Dickins list_add(&stable_node->list, stable_node->head); 14324146d2d6SHugh Dickins } 14334146d2d6SHugh Dickins if (stable_node->head != &migrate_nodes && 14344146d2d6SHugh Dickins rmap_item->head == stable_node) 14354146d2d6SHugh Dickins return; 14364146d2d6SHugh Dickins } 143731dbd01fSIzik Eidus 143831dbd01fSIzik Eidus /* We first start with searching the page inside the stable tree */ 143962b61f61SHugh Dickins kpage = stable_tree_search(page); 14404146d2d6SHugh Dickins if (kpage == page && rmap_item->head == stable_node) { 14414146d2d6SHugh Dickins put_page(kpage); 14424146d2d6SHugh Dickins return; 14434146d2d6SHugh Dickins } 14444146d2d6SHugh Dickins 14454146d2d6SHugh Dickins remove_rmap_item_from_tree(rmap_item); 14464146d2d6SHugh Dickins 144762b61f61SHugh Dickins if (kpage) { 144808beca44SHugh Dickins err = try_to_merge_with_ksm_page(rmap_item, page, kpage); 144931dbd01fSIzik Eidus if (!err) { 145031dbd01fSIzik Eidus /* 145131dbd01fSIzik Eidus * The page was successfully merged: 145231dbd01fSIzik Eidus * add its rmap_item to the stable tree. 145331dbd01fSIzik Eidus */ 14545ad64688SHugh Dickins lock_page(kpage); 145562b61f61SHugh Dickins stable_tree_append(rmap_item, page_stable_node(kpage)); 14565ad64688SHugh Dickins unlock_page(kpage); 145731dbd01fSIzik Eidus } 14588dd3557aSHugh Dickins put_page(kpage); 145931dbd01fSIzik Eidus return; 146031dbd01fSIzik Eidus } 146131dbd01fSIzik Eidus 146231dbd01fSIzik Eidus /* 14634035c07aSHugh Dickins * If the hash value of the page has changed from the last time 14644035c07aSHugh Dickins * we calculated it, this page is changing frequently: therefore we 14654035c07aSHugh Dickins * don't want to insert it in the unstable tree, and we don't want 14664035c07aSHugh Dickins * to waste our time searching for something identical to it there. 146731dbd01fSIzik Eidus */ 146831dbd01fSIzik Eidus checksum = calc_checksum(page); 146931dbd01fSIzik Eidus if (rmap_item->oldchecksum != checksum) { 147031dbd01fSIzik Eidus rmap_item->oldchecksum = checksum; 147131dbd01fSIzik Eidus return; 147231dbd01fSIzik Eidus } 147331dbd01fSIzik Eidus 14748dd3557aSHugh Dickins tree_rmap_item = 14758dd3557aSHugh Dickins unstable_tree_search_insert(rmap_item, page, &tree_page); 147631dbd01fSIzik Eidus if (tree_rmap_item) { 14778dd3557aSHugh Dickins kpage = try_to_merge_two_pages(rmap_item, page, 14788dd3557aSHugh Dickins tree_rmap_item, tree_page); 14798dd3557aSHugh Dickins put_page(tree_page); 14808dd3557aSHugh Dickins if (kpage) { 1481bc56620bSHugh Dickins /* 1482bc56620bSHugh Dickins * The pages were successfully merged: insert new 1483bc56620bSHugh Dickins * node in the stable tree and add both rmap_items. 1484bc56620bSHugh Dickins */ 14855ad64688SHugh Dickins lock_page(kpage); 14867b6ba2c7SHugh Dickins stable_node = stable_tree_insert(kpage); 14877b6ba2c7SHugh Dickins if (stable_node) { 14887b6ba2c7SHugh Dickins stable_tree_append(tree_rmap_item, stable_node); 14897b6ba2c7SHugh Dickins stable_tree_append(rmap_item, stable_node); 14907b6ba2c7SHugh Dickins } 14915ad64688SHugh Dickins unlock_page(kpage); 14927b6ba2c7SHugh Dickins 149331dbd01fSIzik Eidus /* 149431dbd01fSIzik Eidus * If we fail to insert the page into the stable tree, 149531dbd01fSIzik Eidus * we will have 2 virtual addresses that are pointing 149631dbd01fSIzik Eidus * to a ksm page left outside the stable tree, 149731dbd01fSIzik Eidus * in which case we need to break_cow on both. 149831dbd01fSIzik Eidus */ 14997b6ba2c7SHugh Dickins if (!stable_node) { 15008dd3557aSHugh Dickins break_cow(tree_rmap_item); 15018dd3557aSHugh Dickins break_cow(rmap_item); 150231dbd01fSIzik Eidus } 150331dbd01fSIzik Eidus } 150431dbd01fSIzik Eidus } 150531dbd01fSIzik Eidus } 150631dbd01fSIzik Eidus 150731dbd01fSIzik Eidus static struct rmap_item *get_next_rmap_item(struct mm_slot *mm_slot, 15086514d511SHugh Dickins struct rmap_item **rmap_list, 150931dbd01fSIzik Eidus unsigned long addr) 151031dbd01fSIzik Eidus { 151131dbd01fSIzik Eidus struct rmap_item *rmap_item; 151231dbd01fSIzik Eidus 15136514d511SHugh Dickins while (*rmap_list) { 15146514d511SHugh Dickins rmap_item = *rmap_list; 151593d17715SHugh Dickins if ((rmap_item->address & PAGE_MASK) == addr) 151631dbd01fSIzik Eidus return rmap_item; 151731dbd01fSIzik Eidus if (rmap_item->address > addr) 151831dbd01fSIzik Eidus break; 15196514d511SHugh Dickins *rmap_list = rmap_item->rmap_list; 152031dbd01fSIzik Eidus remove_rmap_item_from_tree(rmap_item); 152131dbd01fSIzik Eidus free_rmap_item(rmap_item); 152231dbd01fSIzik Eidus } 152331dbd01fSIzik Eidus 152431dbd01fSIzik Eidus rmap_item = alloc_rmap_item(); 152531dbd01fSIzik Eidus if (rmap_item) { 152631dbd01fSIzik Eidus /* It has already been zeroed */ 152731dbd01fSIzik Eidus rmap_item->mm = mm_slot->mm; 152831dbd01fSIzik Eidus rmap_item->address = addr; 15296514d511SHugh Dickins rmap_item->rmap_list = *rmap_list; 15306514d511SHugh Dickins *rmap_list = rmap_item; 153131dbd01fSIzik Eidus } 153231dbd01fSIzik Eidus return rmap_item; 153331dbd01fSIzik Eidus } 153431dbd01fSIzik Eidus 153531dbd01fSIzik Eidus static struct rmap_item *scan_get_next_rmap_item(struct page **page) 153631dbd01fSIzik Eidus { 153731dbd01fSIzik Eidus struct mm_struct *mm; 153831dbd01fSIzik Eidus struct mm_slot *slot; 153931dbd01fSIzik Eidus struct vm_area_struct *vma; 154031dbd01fSIzik Eidus struct rmap_item *rmap_item; 154190bd6fd3SPetr Holasek int nid; 154231dbd01fSIzik Eidus 154331dbd01fSIzik Eidus if (list_empty(&ksm_mm_head.mm_list)) 154431dbd01fSIzik Eidus return NULL; 154531dbd01fSIzik Eidus 154631dbd01fSIzik Eidus slot = ksm_scan.mm_slot; 154731dbd01fSIzik Eidus if (slot == &ksm_mm_head) { 15482919bfd0SHugh Dickins /* 15492919bfd0SHugh Dickins * A number of pages can hang around indefinitely on per-cpu 15502919bfd0SHugh Dickins * pagevecs, raised page count preventing write_protect_page 15512919bfd0SHugh Dickins * from merging them. Though it doesn't really matter much, 15522919bfd0SHugh Dickins * it is puzzling to see some stuck in pages_volatile until 15532919bfd0SHugh Dickins * other activity jostles them out, and they also prevented 15542919bfd0SHugh Dickins * LTP's KSM test from succeeding deterministically; so drain 15552919bfd0SHugh Dickins * them here (here rather than on entry to ksm_do_scan(), 15562919bfd0SHugh Dickins * so we don't IPI too often when pages_to_scan is set low). 15572919bfd0SHugh Dickins */ 15582919bfd0SHugh Dickins lru_add_drain_all(); 15592919bfd0SHugh Dickins 15604146d2d6SHugh Dickins /* 15614146d2d6SHugh Dickins * Whereas stale stable_nodes on the stable_tree itself 15624146d2d6SHugh Dickins * get pruned in the regular course of stable_tree_search(), 15634146d2d6SHugh Dickins * those moved out to the migrate_nodes list can accumulate: 15644146d2d6SHugh Dickins * so prune them once before each full scan. 15654146d2d6SHugh Dickins */ 15664146d2d6SHugh Dickins if (!ksm_merge_across_nodes) { 15674146d2d6SHugh Dickins struct stable_node *stable_node; 15684146d2d6SHugh Dickins struct list_head *this, *next; 15694146d2d6SHugh Dickins struct page *page; 15704146d2d6SHugh Dickins 15714146d2d6SHugh Dickins list_for_each_safe(this, next, &migrate_nodes) { 15724146d2d6SHugh Dickins stable_node = list_entry(this, 15734146d2d6SHugh Dickins struct stable_node, list); 15744146d2d6SHugh Dickins page = get_ksm_page(stable_node, false); 15754146d2d6SHugh Dickins if (page) 15764146d2d6SHugh Dickins put_page(page); 15774146d2d6SHugh Dickins cond_resched(); 15784146d2d6SHugh Dickins } 15794146d2d6SHugh Dickins } 15804146d2d6SHugh Dickins 1581ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) 158290bd6fd3SPetr Holasek root_unstable_tree[nid] = RB_ROOT; 158331dbd01fSIzik Eidus 158431dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 158531dbd01fSIzik Eidus slot = list_entry(slot->mm_list.next, struct mm_slot, mm_list); 158631dbd01fSIzik Eidus ksm_scan.mm_slot = slot; 158731dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 15882b472611SHugh Dickins /* 15892b472611SHugh Dickins * Although we tested list_empty() above, a racing __ksm_exit 15902b472611SHugh Dickins * of the last mm on the list may have removed it since then. 15912b472611SHugh Dickins */ 15922b472611SHugh Dickins if (slot == &ksm_mm_head) 15932b472611SHugh Dickins return NULL; 159431dbd01fSIzik Eidus next_mm: 159531dbd01fSIzik Eidus ksm_scan.address = 0; 15966514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 159731dbd01fSIzik Eidus } 159831dbd01fSIzik Eidus 159931dbd01fSIzik Eidus mm = slot->mm; 160031dbd01fSIzik Eidus down_read(&mm->mmap_sem); 16019ba69294SHugh Dickins if (ksm_test_exit(mm)) 16029ba69294SHugh Dickins vma = NULL; 16039ba69294SHugh Dickins else 16049ba69294SHugh Dickins vma = find_vma(mm, ksm_scan.address); 16059ba69294SHugh Dickins 16069ba69294SHugh Dickins for (; vma; vma = vma->vm_next) { 160731dbd01fSIzik Eidus if (!(vma->vm_flags & VM_MERGEABLE)) 160831dbd01fSIzik Eidus continue; 160931dbd01fSIzik Eidus if (ksm_scan.address < vma->vm_start) 161031dbd01fSIzik Eidus ksm_scan.address = vma->vm_start; 161131dbd01fSIzik Eidus if (!vma->anon_vma) 161231dbd01fSIzik Eidus ksm_scan.address = vma->vm_end; 161331dbd01fSIzik Eidus 161431dbd01fSIzik Eidus while (ksm_scan.address < vma->vm_end) { 16159ba69294SHugh Dickins if (ksm_test_exit(mm)) 16169ba69294SHugh Dickins break; 161731dbd01fSIzik Eidus *page = follow_page(vma, ksm_scan.address, FOLL_GET); 161821ae5b01SAndrea Arcangeli if (IS_ERR_OR_NULL(*page)) { 161921ae5b01SAndrea Arcangeli ksm_scan.address += PAGE_SIZE; 162021ae5b01SAndrea Arcangeli cond_resched(); 162121ae5b01SAndrea Arcangeli continue; 162221ae5b01SAndrea Arcangeli } 162329ad768cSAndrea Arcangeli if (PageAnon(*page) || 162429ad768cSAndrea Arcangeli page_trans_compound_anon(*page)) { 162531dbd01fSIzik Eidus flush_anon_page(vma, *page, ksm_scan.address); 162631dbd01fSIzik Eidus flush_dcache_page(*page); 162731dbd01fSIzik Eidus rmap_item = get_next_rmap_item(slot, 16286514d511SHugh Dickins ksm_scan.rmap_list, ksm_scan.address); 162931dbd01fSIzik Eidus if (rmap_item) { 16306514d511SHugh Dickins ksm_scan.rmap_list = 16316514d511SHugh Dickins &rmap_item->rmap_list; 163231dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 163331dbd01fSIzik Eidus } else 163431dbd01fSIzik Eidus put_page(*page); 163531dbd01fSIzik Eidus up_read(&mm->mmap_sem); 163631dbd01fSIzik Eidus return rmap_item; 163731dbd01fSIzik Eidus } 163831dbd01fSIzik Eidus put_page(*page); 163931dbd01fSIzik Eidus ksm_scan.address += PAGE_SIZE; 164031dbd01fSIzik Eidus cond_resched(); 164131dbd01fSIzik Eidus } 164231dbd01fSIzik Eidus } 164331dbd01fSIzik Eidus 16449ba69294SHugh Dickins if (ksm_test_exit(mm)) { 16459ba69294SHugh Dickins ksm_scan.address = 0; 16466514d511SHugh Dickins ksm_scan.rmap_list = &slot->rmap_list; 16479ba69294SHugh Dickins } 164831dbd01fSIzik Eidus /* 164931dbd01fSIzik Eidus * Nuke all the rmap_items that are above this current rmap: 165031dbd01fSIzik Eidus * because there were no VM_MERGEABLE vmas with such addresses. 165131dbd01fSIzik Eidus */ 16526514d511SHugh Dickins remove_trailing_rmap_items(slot, ksm_scan.rmap_list); 165331dbd01fSIzik Eidus 165431dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 1655cd551f97SHugh Dickins ksm_scan.mm_slot = list_entry(slot->mm_list.next, 1656cd551f97SHugh Dickins struct mm_slot, mm_list); 1657cd551f97SHugh Dickins if (ksm_scan.address == 0) { 1658cd551f97SHugh Dickins /* 1659cd551f97SHugh Dickins * We've completed a full scan of all vmas, holding mmap_sem 1660cd551f97SHugh Dickins * throughout, and found no VM_MERGEABLE: so do the same as 1661cd551f97SHugh Dickins * __ksm_exit does to remove this mm from all our lists now. 16629ba69294SHugh Dickins * This applies either when cleaning up after __ksm_exit 16639ba69294SHugh Dickins * (but beware: we can reach here even before __ksm_exit), 16649ba69294SHugh Dickins * or when all VM_MERGEABLE areas have been unmapped (and 16659ba69294SHugh Dickins * mmap_sem then protects against race with MADV_MERGEABLE). 1666cd551f97SHugh Dickins */ 16674ca3a69bSSasha Levin hash_del(&slot->link); 1668cd551f97SHugh Dickins list_del(&slot->mm_list); 16699ba69294SHugh Dickins spin_unlock(&ksm_mmlist_lock); 16709ba69294SHugh Dickins 1671cd551f97SHugh Dickins free_mm_slot(slot); 1672cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 16739ba69294SHugh Dickins up_read(&mm->mmap_sem); 16749ba69294SHugh Dickins mmdrop(mm); 16759ba69294SHugh Dickins } else { 167631dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 1677cd551f97SHugh Dickins up_read(&mm->mmap_sem); 16789ba69294SHugh Dickins } 167931dbd01fSIzik Eidus 168031dbd01fSIzik Eidus /* Repeat until we've completed scanning the whole list */ 1681cd551f97SHugh Dickins slot = ksm_scan.mm_slot; 168231dbd01fSIzik Eidus if (slot != &ksm_mm_head) 168331dbd01fSIzik Eidus goto next_mm; 168431dbd01fSIzik Eidus 168531dbd01fSIzik Eidus ksm_scan.seqnr++; 168631dbd01fSIzik Eidus return NULL; 168731dbd01fSIzik Eidus } 168831dbd01fSIzik Eidus 168931dbd01fSIzik Eidus /** 169031dbd01fSIzik Eidus * ksm_do_scan - the ksm scanner main worker function. 169131dbd01fSIzik Eidus * @scan_npages - number of pages we want to scan before we return. 169231dbd01fSIzik Eidus */ 169331dbd01fSIzik Eidus static void ksm_do_scan(unsigned int scan_npages) 169431dbd01fSIzik Eidus { 169531dbd01fSIzik Eidus struct rmap_item *rmap_item; 169622eccdd7SDan Carpenter struct page *uninitialized_var(page); 169731dbd01fSIzik Eidus 1698878aee7dSAndrea Arcangeli while (scan_npages-- && likely(!freezing(current))) { 169931dbd01fSIzik Eidus cond_resched(); 170031dbd01fSIzik Eidus rmap_item = scan_get_next_rmap_item(&page); 170131dbd01fSIzik Eidus if (!rmap_item) 170231dbd01fSIzik Eidus return; 170331dbd01fSIzik Eidus cmp_and_merge_page(page, rmap_item); 170431dbd01fSIzik Eidus put_page(page); 170531dbd01fSIzik Eidus } 170631dbd01fSIzik Eidus } 170731dbd01fSIzik Eidus 17086e158384SHugh Dickins static int ksmd_should_run(void) 17096e158384SHugh Dickins { 17106e158384SHugh Dickins return (ksm_run & KSM_RUN_MERGE) && !list_empty(&ksm_mm_head.mm_list); 17116e158384SHugh Dickins } 17126e158384SHugh Dickins 171331dbd01fSIzik Eidus static int ksm_scan_thread(void *nothing) 171431dbd01fSIzik Eidus { 1715878aee7dSAndrea Arcangeli set_freezable(); 1716339aa624SIzik Eidus set_user_nice(current, 5); 171731dbd01fSIzik Eidus 171831dbd01fSIzik Eidus while (!kthread_should_stop()) { 171931dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 1720ef4d43a8SHugh Dickins wait_while_offlining(); 17216e158384SHugh Dickins if (ksmd_should_run()) 172231dbd01fSIzik Eidus ksm_do_scan(ksm_thread_pages_to_scan); 172331dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 17246e158384SHugh Dickins 1725878aee7dSAndrea Arcangeli try_to_freeze(); 1726878aee7dSAndrea Arcangeli 17276e158384SHugh Dickins if (ksmd_should_run()) { 172831dbd01fSIzik Eidus schedule_timeout_interruptible( 172931dbd01fSIzik Eidus msecs_to_jiffies(ksm_thread_sleep_millisecs)); 173031dbd01fSIzik Eidus } else { 1731878aee7dSAndrea Arcangeli wait_event_freezable(ksm_thread_wait, 17326e158384SHugh Dickins ksmd_should_run() || kthread_should_stop()); 173331dbd01fSIzik Eidus } 173431dbd01fSIzik Eidus } 173531dbd01fSIzik Eidus return 0; 173631dbd01fSIzik Eidus } 173731dbd01fSIzik Eidus 1738f8af4da3SHugh Dickins int ksm_madvise(struct vm_area_struct *vma, unsigned long start, 1739f8af4da3SHugh Dickins unsigned long end, int advice, unsigned long *vm_flags) 1740f8af4da3SHugh Dickins { 1741f8af4da3SHugh Dickins struct mm_struct *mm = vma->vm_mm; 1742d952b791SHugh Dickins int err; 1743f8af4da3SHugh Dickins 1744f8af4da3SHugh Dickins switch (advice) { 1745f8af4da3SHugh Dickins case MADV_MERGEABLE: 1746f8af4da3SHugh Dickins /* 1747f8af4da3SHugh Dickins * Be somewhat over-protective for now! 1748f8af4da3SHugh Dickins */ 1749f8af4da3SHugh Dickins if (*vm_flags & (VM_MERGEABLE | VM_SHARED | VM_MAYSHARE | 1750f8af4da3SHugh Dickins VM_PFNMAP | VM_IO | VM_DONTEXPAND | 1751314e51b9SKonstantin Khlebnikov VM_HUGETLB | VM_NONLINEAR | VM_MIXEDMAP)) 1752f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 1753f8af4da3SHugh Dickins 1754cc2383ecSKonstantin Khlebnikov #ifdef VM_SAO 1755cc2383ecSKonstantin Khlebnikov if (*vm_flags & VM_SAO) 1756cc2383ecSKonstantin Khlebnikov return 0; 1757cc2383ecSKonstantin Khlebnikov #endif 1758cc2383ecSKonstantin Khlebnikov 1759d952b791SHugh Dickins if (!test_bit(MMF_VM_MERGEABLE, &mm->flags)) { 1760d952b791SHugh Dickins err = __ksm_enter(mm); 1761d952b791SHugh Dickins if (err) 1762d952b791SHugh Dickins return err; 1763d952b791SHugh Dickins } 1764f8af4da3SHugh Dickins 1765f8af4da3SHugh Dickins *vm_flags |= VM_MERGEABLE; 1766f8af4da3SHugh Dickins break; 1767f8af4da3SHugh Dickins 1768f8af4da3SHugh Dickins case MADV_UNMERGEABLE: 1769f8af4da3SHugh Dickins if (!(*vm_flags & VM_MERGEABLE)) 1770f8af4da3SHugh Dickins return 0; /* just ignore the advice */ 1771f8af4da3SHugh Dickins 1772d952b791SHugh Dickins if (vma->anon_vma) { 1773d952b791SHugh Dickins err = unmerge_ksm_pages(vma, start, end); 1774d952b791SHugh Dickins if (err) 1775d952b791SHugh Dickins return err; 1776d952b791SHugh Dickins } 1777f8af4da3SHugh Dickins 1778f8af4da3SHugh Dickins *vm_flags &= ~VM_MERGEABLE; 1779f8af4da3SHugh Dickins break; 1780f8af4da3SHugh Dickins } 1781f8af4da3SHugh Dickins 1782f8af4da3SHugh Dickins return 0; 1783f8af4da3SHugh Dickins } 1784f8af4da3SHugh Dickins 1785f8af4da3SHugh Dickins int __ksm_enter(struct mm_struct *mm) 1786f8af4da3SHugh Dickins { 17876e158384SHugh Dickins struct mm_slot *mm_slot; 17886e158384SHugh Dickins int needs_wakeup; 17896e158384SHugh Dickins 17906e158384SHugh Dickins mm_slot = alloc_mm_slot(); 179131dbd01fSIzik Eidus if (!mm_slot) 179231dbd01fSIzik Eidus return -ENOMEM; 179331dbd01fSIzik Eidus 17946e158384SHugh Dickins /* Check ksm_run too? Would need tighter locking */ 17956e158384SHugh Dickins needs_wakeup = list_empty(&ksm_mm_head.mm_list); 17966e158384SHugh Dickins 179731dbd01fSIzik Eidus spin_lock(&ksm_mmlist_lock); 179831dbd01fSIzik Eidus insert_to_mm_slots_hash(mm, mm_slot); 179931dbd01fSIzik Eidus /* 1800cbf86cfeSHugh Dickins * When KSM_RUN_MERGE (or KSM_RUN_STOP), 1801cbf86cfeSHugh Dickins * insert just behind the scanning cursor, to let the area settle 180231dbd01fSIzik Eidus * down a little; when fork is followed by immediate exec, we don't 180331dbd01fSIzik Eidus * want ksmd to waste time setting up and tearing down an rmap_list. 1804cbf86cfeSHugh Dickins * 1805cbf86cfeSHugh Dickins * But when KSM_RUN_UNMERGE, it's important to insert ahead of its 1806cbf86cfeSHugh Dickins * scanning cursor, otherwise KSM pages in newly forked mms will be 1807cbf86cfeSHugh Dickins * missed: then we might as well insert at the end of the list. 180831dbd01fSIzik Eidus */ 1809cbf86cfeSHugh Dickins if (ksm_run & KSM_RUN_UNMERGE) 1810cbf86cfeSHugh Dickins list_add_tail(&mm_slot->mm_list, &ksm_mm_head.mm_list); 1811cbf86cfeSHugh Dickins else 181231dbd01fSIzik Eidus list_add_tail(&mm_slot->mm_list, &ksm_scan.mm_slot->mm_list); 181331dbd01fSIzik Eidus spin_unlock(&ksm_mmlist_lock); 181431dbd01fSIzik Eidus 1815f8af4da3SHugh Dickins set_bit(MMF_VM_MERGEABLE, &mm->flags); 18169ba69294SHugh Dickins atomic_inc(&mm->mm_count); 18176e158384SHugh Dickins 18186e158384SHugh Dickins if (needs_wakeup) 18196e158384SHugh Dickins wake_up_interruptible(&ksm_thread_wait); 18206e158384SHugh Dickins 1821f8af4da3SHugh Dickins return 0; 1822f8af4da3SHugh Dickins } 1823f8af4da3SHugh Dickins 18241c2fb7a4SAndrea Arcangeli void __ksm_exit(struct mm_struct *mm) 1825f8af4da3SHugh Dickins { 1826cd551f97SHugh Dickins struct mm_slot *mm_slot; 18279ba69294SHugh Dickins int easy_to_free = 0; 1828cd551f97SHugh Dickins 182931dbd01fSIzik Eidus /* 18309ba69294SHugh Dickins * This process is exiting: if it's straightforward (as is the 18319ba69294SHugh Dickins * case when ksmd was never running), free mm_slot immediately. 18329ba69294SHugh Dickins * But if it's at the cursor or has rmap_items linked to it, use 18339ba69294SHugh Dickins * mmap_sem to synchronize with any break_cows before pagetables 18349ba69294SHugh Dickins * are freed, and leave the mm_slot on the list for ksmd to free. 18359ba69294SHugh Dickins * Beware: ksm may already have noticed it exiting and freed the slot. 183631dbd01fSIzik Eidus */ 18379ba69294SHugh Dickins 1838cd551f97SHugh Dickins spin_lock(&ksm_mmlist_lock); 1839cd551f97SHugh Dickins mm_slot = get_mm_slot(mm); 18409ba69294SHugh Dickins if (mm_slot && ksm_scan.mm_slot != mm_slot) { 18416514d511SHugh Dickins if (!mm_slot->rmap_list) { 18424ca3a69bSSasha Levin hash_del(&mm_slot->link); 1843cd551f97SHugh Dickins list_del(&mm_slot->mm_list); 18449ba69294SHugh Dickins easy_to_free = 1; 18459ba69294SHugh Dickins } else { 18469ba69294SHugh Dickins list_move(&mm_slot->mm_list, 18479ba69294SHugh Dickins &ksm_scan.mm_slot->mm_list); 18489ba69294SHugh Dickins } 18499ba69294SHugh Dickins } 1850cd551f97SHugh Dickins spin_unlock(&ksm_mmlist_lock); 1851cd551f97SHugh Dickins 18529ba69294SHugh Dickins if (easy_to_free) { 1853cd551f97SHugh Dickins free_mm_slot(mm_slot); 1854cd551f97SHugh Dickins clear_bit(MMF_VM_MERGEABLE, &mm->flags); 18559ba69294SHugh Dickins mmdrop(mm); 18569ba69294SHugh Dickins } else if (mm_slot) { 18579ba69294SHugh Dickins down_write(&mm->mmap_sem); 18589ba69294SHugh Dickins up_write(&mm->mmap_sem); 18599ba69294SHugh Dickins } 1860f8af4da3SHugh Dickins } 186131dbd01fSIzik Eidus 1862cbf86cfeSHugh Dickins struct page *ksm_might_need_to_copy(struct page *page, 18635ad64688SHugh Dickins struct vm_area_struct *vma, unsigned long address) 18645ad64688SHugh Dickins { 1865cbf86cfeSHugh Dickins struct anon_vma *anon_vma = page_anon_vma(page); 18665ad64688SHugh Dickins struct page *new_page; 18675ad64688SHugh Dickins 1868cbf86cfeSHugh Dickins if (PageKsm(page)) { 1869cbf86cfeSHugh Dickins if (page_stable_node(page) && 1870cbf86cfeSHugh Dickins !(ksm_run & KSM_RUN_UNMERGE)) 1871cbf86cfeSHugh Dickins return page; /* no need to copy it */ 1872cbf86cfeSHugh Dickins } else if (!anon_vma) { 1873cbf86cfeSHugh Dickins return page; /* no need to copy it */ 1874cbf86cfeSHugh Dickins } else if (anon_vma->root == vma->anon_vma->root && 1875cbf86cfeSHugh Dickins page->index == linear_page_index(vma, address)) { 1876cbf86cfeSHugh Dickins return page; /* still no need to copy it */ 1877cbf86cfeSHugh Dickins } 1878cbf86cfeSHugh Dickins if (!PageUptodate(page)) 1879cbf86cfeSHugh Dickins return page; /* let do_swap_page report the error */ 1880cbf86cfeSHugh Dickins 18815ad64688SHugh Dickins new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); 18825ad64688SHugh Dickins if (new_page) { 18835ad64688SHugh Dickins copy_user_highpage(new_page, page, address, vma); 18845ad64688SHugh Dickins 18855ad64688SHugh Dickins SetPageDirty(new_page); 18865ad64688SHugh Dickins __SetPageUptodate(new_page); 18875ad64688SHugh Dickins __set_page_locked(new_page); 18885ad64688SHugh Dickins } 18895ad64688SHugh Dickins 18905ad64688SHugh Dickins return new_page; 18915ad64688SHugh Dickins } 18925ad64688SHugh Dickins 1893051ac83aSJoonsoo Kim int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc) 1894e9995ef9SHugh Dickins { 1895e9995ef9SHugh Dickins struct stable_node *stable_node; 1896e9995ef9SHugh Dickins struct rmap_item *rmap_item; 1897e9995ef9SHugh Dickins int ret = SWAP_AGAIN; 1898e9995ef9SHugh Dickins int search_new_forks = 0; 1899e9995ef9SHugh Dickins 1900309381feSSasha Levin VM_BUG_ON_PAGE(!PageKsm(page), page); 19019f32624bSJoonsoo Kim 19029f32624bSJoonsoo Kim /* 19039f32624bSJoonsoo Kim * Rely on the page lock to protect against concurrent modifications 19049f32624bSJoonsoo Kim * to that page's node of the stable tree. 19059f32624bSJoonsoo Kim */ 1906309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 1907e9995ef9SHugh Dickins 1908e9995ef9SHugh Dickins stable_node = page_stable_node(page); 1909e9995ef9SHugh Dickins if (!stable_node) 1910e9995ef9SHugh Dickins return ret; 1911e9995ef9SHugh Dickins again: 1912b67bfe0dSSasha Levin hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) { 1913e9995ef9SHugh Dickins struct anon_vma *anon_vma = rmap_item->anon_vma; 19145beb4930SRik van Riel struct anon_vma_chain *vmac; 1915e9995ef9SHugh Dickins struct vm_area_struct *vma; 1916e9995ef9SHugh Dickins 1917b6b19f25SHugh Dickins anon_vma_lock_read(anon_vma); 1918bf181b9fSMichel Lespinasse anon_vma_interval_tree_foreach(vmac, &anon_vma->rb_root, 1919bf181b9fSMichel Lespinasse 0, ULONG_MAX) { 19205beb4930SRik van Riel vma = vmac->vma; 1921e9995ef9SHugh Dickins if (rmap_item->address < vma->vm_start || 1922e9995ef9SHugh Dickins rmap_item->address >= vma->vm_end) 1923e9995ef9SHugh Dickins continue; 1924e9995ef9SHugh Dickins /* 1925e9995ef9SHugh Dickins * Initially we examine only the vma which covers this 1926e9995ef9SHugh Dickins * rmap_item; but later, if there is still work to do, 1927e9995ef9SHugh Dickins * we examine covering vmas in other mms: in case they 1928e9995ef9SHugh Dickins * were forked from the original since ksmd passed. 1929e9995ef9SHugh Dickins */ 1930e9995ef9SHugh Dickins if ((rmap_item->mm == vma->vm_mm) == search_new_forks) 1931e9995ef9SHugh Dickins continue; 1932e9995ef9SHugh Dickins 19330dd1c7bbSJoonsoo Kim if (rwc->invalid_vma && rwc->invalid_vma(vma, rwc->arg)) 19340dd1c7bbSJoonsoo Kim continue; 19350dd1c7bbSJoonsoo Kim 1936051ac83aSJoonsoo Kim ret = rwc->rmap_one(page, vma, 1937051ac83aSJoonsoo Kim rmap_item->address, rwc->arg); 1938e9995ef9SHugh Dickins if (ret != SWAP_AGAIN) { 1939b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 1940e9995ef9SHugh Dickins goto out; 1941e9995ef9SHugh Dickins } 19420dd1c7bbSJoonsoo Kim if (rwc->done && rwc->done(page)) { 19430dd1c7bbSJoonsoo Kim anon_vma_unlock_read(anon_vma); 19440dd1c7bbSJoonsoo Kim goto out; 19450dd1c7bbSJoonsoo Kim } 1946e9995ef9SHugh Dickins } 1947b6b19f25SHugh Dickins anon_vma_unlock_read(anon_vma); 1948e9995ef9SHugh Dickins } 1949e9995ef9SHugh Dickins if (!search_new_forks++) 1950e9995ef9SHugh Dickins goto again; 1951e9995ef9SHugh Dickins out: 1952e9995ef9SHugh Dickins return ret; 1953e9995ef9SHugh Dickins } 1954e9995ef9SHugh Dickins 195552629506SJoonsoo Kim #ifdef CONFIG_MIGRATION 1956e9995ef9SHugh Dickins void ksm_migrate_page(struct page *newpage, struct page *oldpage) 1957e9995ef9SHugh Dickins { 1958e9995ef9SHugh Dickins struct stable_node *stable_node; 1959e9995ef9SHugh Dickins 1960309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(oldpage), oldpage); 1961309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(newpage), newpage); 1962309381feSSasha Levin VM_BUG_ON_PAGE(newpage->mapping != oldpage->mapping, newpage); 1963e9995ef9SHugh Dickins 1964e9995ef9SHugh Dickins stable_node = page_stable_node(newpage); 1965e9995ef9SHugh Dickins if (stable_node) { 1966309381feSSasha Levin VM_BUG_ON_PAGE(stable_node->kpfn != page_to_pfn(oldpage), oldpage); 196762b61f61SHugh Dickins stable_node->kpfn = page_to_pfn(newpage); 1968c8d6553bSHugh Dickins /* 1969c8d6553bSHugh Dickins * newpage->mapping was set in advance; now we need smp_wmb() 1970c8d6553bSHugh Dickins * to make sure that the new stable_node->kpfn is visible 1971c8d6553bSHugh Dickins * to get_ksm_page() before it can see that oldpage->mapping 1972c8d6553bSHugh Dickins * has gone stale (or that PageSwapCache has been cleared). 1973c8d6553bSHugh Dickins */ 1974c8d6553bSHugh Dickins smp_wmb(); 1975c8d6553bSHugh Dickins set_page_stable_node(oldpage, NULL); 1976e9995ef9SHugh Dickins } 1977e9995ef9SHugh Dickins } 1978e9995ef9SHugh Dickins #endif /* CONFIG_MIGRATION */ 1979e9995ef9SHugh Dickins 198062b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 1981ef4d43a8SHugh Dickins static void wait_while_offlining(void) 1982ef4d43a8SHugh Dickins { 1983ef4d43a8SHugh Dickins while (ksm_run & KSM_RUN_OFFLINE) { 1984ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 1985ef4d43a8SHugh Dickins wait_on_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE), 198674316201SNeilBrown TASK_UNINTERRUPTIBLE); 1987ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 1988ef4d43a8SHugh Dickins } 1989ef4d43a8SHugh Dickins } 1990ef4d43a8SHugh Dickins 1991ee0ea59cSHugh Dickins static void ksm_check_stable_tree(unsigned long start_pfn, 199262b61f61SHugh Dickins unsigned long end_pfn) 199362b61f61SHugh Dickins { 1994ee0ea59cSHugh Dickins struct stable_node *stable_node; 19954146d2d6SHugh Dickins struct list_head *this, *next; 199662b61f61SHugh Dickins struct rb_node *node; 199790bd6fd3SPetr Holasek int nid; 199862b61f61SHugh Dickins 1999ef53d16cSHugh Dickins for (nid = 0; nid < ksm_nr_node_ids; nid++) { 2000ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 2001ee0ea59cSHugh Dickins while (node) { 200262b61f61SHugh Dickins stable_node = rb_entry(node, struct stable_node, node); 200362b61f61SHugh Dickins if (stable_node->kpfn >= start_pfn && 2004ee0ea59cSHugh Dickins stable_node->kpfn < end_pfn) { 2005ee0ea59cSHugh Dickins /* 2006ee0ea59cSHugh Dickins * Don't get_ksm_page, page has already gone: 2007ee0ea59cSHugh Dickins * which is why we keep kpfn instead of page* 2008ee0ea59cSHugh Dickins */ 2009ee0ea59cSHugh Dickins remove_node_from_stable_tree(stable_node); 2010ef53d16cSHugh Dickins node = rb_first(root_stable_tree + nid); 2011ee0ea59cSHugh Dickins } else 2012ee0ea59cSHugh Dickins node = rb_next(node); 2013ee0ea59cSHugh Dickins cond_resched(); 201462b61f61SHugh Dickins } 2015ee0ea59cSHugh Dickins } 20164146d2d6SHugh Dickins list_for_each_safe(this, next, &migrate_nodes) { 20174146d2d6SHugh Dickins stable_node = list_entry(this, struct stable_node, list); 20184146d2d6SHugh Dickins if (stable_node->kpfn >= start_pfn && 20194146d2d6SHugh Dickins stable_node->kpfn < end_pfn) 20204146d2d6SHugh Dickins remove_node_from_stable_tree(stable_node); 20214146d2d6SHugh Dickins cond_resched(); 20224146d2d6SHugh Dickins } 202362b61f61SHugh Dickins } 202462b61f61SHugh Dickins 202562b61f61SHugh Dickins static int ksm_memory_callback(struct notifier_block *self, 202662b61f61SHugh Dickins unsigned long action, void *arg) 202762b61f61SHugh Dickins { 202862b61f61SHugh Dickins struct memory_notify *mn = arg; 202962b61f61SHugh Dickins 203062b61f61SHugh Dickins switch (action) { 203162b61f61SHugh Dickins case MEM_GOING_OFFLINE: 203262b61f61SHugh Dickins /* 2033ef4d43a8SHugh Dickins * Prevent ksm_do_scan(), unmerge_and_remove_all_rmap_items() 2034ef4d43a8SHugh Dickins * and remove_all_stable_nodes() while memory is going offline: 2035ef4d43a8SHugh Dickins * it is unsafe for them to touch the stable tree at this time. 2036ef4d43a8SHugh Dickins * But unmerge_ksm_pages(), rmap lookups and other entry points 2037ef4d43a8SHugh Dickins * which do not need the ksm_thread_mutex are all safe. 203862b61f61SHugh Dickins */ 2039ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2040ef4d43a8SHugh Dickins ksm_run |= KSM_RUN_OFFLINE; 2041ef4d43a8SHugh Dickins mutex_unlock(&ksm_thread_mutex); 204262b61f61SHugh Dickins break; 204362b61f61SHugh Dickins 204462b61f61SHugh Dickins case MEM_OFFLINE: 204562b61f61SHugh Dickins /* 204662b61f61SHugh Dickins * Most of the work is done by page migration; but there might 204762b61f61SHugh Dickins * be a few stable_nodes left over, still pointing to struct 2048ee0ea59cSHugh Dickins * pages which have been offlined: prune those from the tree, 2049ee0ea59cSHugh Dickins * otherwise get_ksm_page() might later try to access a 2050ee0ea59cSHugh Dickins * non-existent struct page. 205162b61f61SHugh Dickins */ 2052ee0ea59cSHugh Dickins ksm_check_stable_tree(mn->start_pfn, 2053ee0ea59cSHugh Dickins mn->start_pfn + mn->nr_pages); 205462b61f61SHugh Dickins /* fallthrough */ 205562b61f61SHugh Dickins 205662b61f61SHugh Dickins case MEM_CANCEL_OFFLINE: 2057ef4d43a8SHugh Dickins mutex_lock(&ksm_thread_mutex); 2058ef4d43a8SHugh Dickins ksm_run &= ~KSM_RUN_OFFLINE; 205962b61f61SHugh Dickins mutex_unlock(&ksm_thread_mutex); 2060ef4d43a8SHugh Dickins 2061ef4d43a8SHugh Dickins smp_mb(); /* wake_up_bit advises this */ 2062ef4d43a8SHugh Dickins wake_up_bit(&ksm_run, ilog2(KSM_RUN_OFFLINE)); 206362b61f61SHugh Dickins break; 206462b61f61SHugh Dickins } 206562b61f61SHugh Dickins return NOTIFY_OK; 206662b61f61SHugh Dickins } 2067ef4d43a8SHugh Dickins #else 2068ef4d43a8SHugh Dickins static void wait_while_offlining(void) 2069ef4d43a8SHugh Dickins { 2070ef4d43a8SHugh Dickins } 207162b61f61SHugh Dickins #endif /* CONFIG_MEMORY_HOTREMOVE */ 207262b61f61SHugh Dickins 20732ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 20742ffd8679SHugh Dickins /* 20752ffd8679SHugh Dickins * This all compiles without CONFIG_SYSFS, but is a waste of space. 20762ffd8679SHugh Dickins */ 20772ffd8679SHugh Dickins 207831dbd01fSIzik Eidus #define KSM_ATTR_RO(_name) \ 207931dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = __ATTR_RO(_name) 208031dbd01fSIzik Eidus #define KSM_ATTR(_name) \ 208131dbd01fSIzik Eidus static struct kobj_attribute _name##_attr = \ 208231dbd01fSIzik Eidus __ATTR(_name, 0644, _name##_show, _name##_store) 208331dbd01fSIzik Eidus 208431dbd01fSIzik Eidus static ssize_t sleep_millisecs_show(struct kobject *kobj, 208531dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 208631dbd01fSIzik Eidus { 208731dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_thread_sleep_millisecs); 208831dbd01fSIzik Eidus } 208931dbd01fSIzik Eidus 209031dbd01fSIzik Eidus static ssize_t sleep_millisecs_store(struct kobject *kobj, 209131dbd01fSIzik Eidus struct kobj_attribute *attr, 209231dbd01fSIzik Eidus const char *buf, size_t count) 209331dbd01fSIzik Eidus { 209431dbd01fSIzik Eidus unsigned long msecs; 209531dbd01fSIzik Eidus int err; 209631dbd01fSIzik Eidus 20973dbb95f7SJingoo Han err = kstrtoul(buf, 10, &msecs); 209831dbd01fSIzik Eidus if (err || msecs > UINT_MAX) 209931dbd01fSIzik Eidus return -EINVAL; 210031dbd01fSIzik Eidus 210131dbd01fSIzik Eidus ksm_thread_sleep_millisecs = msecs; 210231dbd01fSIzik Eidus 210331dbd01fSIzik Eidus return count; 210431dbd01fSIzik Eidus } 210531dbd01fSIzik Eidus KSM_ATTR(sleep_millisecs); 210631dbd01fSIzik Eidus 210731dbd01fSIzik Eidus static ssize_t pages_to_scan_show(struct kobject *kobj, 210831dbd01fSIzik Eidus struct kobj_attribute *attr, char *buf) 210931dbd01fSIzik Eidus { 211031dbd01fSIzik Eidus return sprintf(buf, "%u\n", ksm_thread_pages_to_scan); 211131dbd01fSIzik Eidus } 211231dbd01fSIzik Eidus 211331dbd01fSIzik Eidus static ssize_t pages_to_scan_store(struct kobject *kobj, 211431dbd01fSIzik Eidus struct kobj_attribute *attr, 211531dbd01fSIzik Eidus const char *buf, size_t count) 211631dbd01fSIzik Eidus { 211731dbd01fSIzik Eidus int err; 211831dbd01fSIzik Eidus unsigned long nr_pages; 211931dbd01fSIzik Eidus 21203dbb95f7SJingoo Han err = kstrtoul(buf, 10, &nr_pages); 212131dbd01fSIzik Eidus if (err || nr_pages > UINT_MAX) 212231dbd01fSIzik Eidus return -EINVAL; 212331dbd01fSIzik Eidus 212431dbd01fSIzik Eidus ksm_thread_pages_to_scan = nr_pages; 212531dbd01fSIzik Eidus 212631dbd01fSIzik Eidus return count; 212731dbd01fSIzik Eidus } 212831dbd01fSIzik Eidus KSM_ATTR(pages_to_scan); 212931dbd01fSIzik Eidus 213031dbd01fSIzik Eidus static ssize_t run_show(struct kobject *kobj, struct kobj_attribute *attr, 213131dbd01fSIzik Eidus char *buf) 213231dbd01fSIzik Eidus { 2133ef4d43a8SHugh Dickins return sprintf(buf, "%lu\n", ksm_run); 213431dbd01fSIzik Eidus } 213531dbd01fSIzik Eidus 213631dbd01fSIzik Eidus static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr, 213731dbd01fSIzik Eidus const char *buf, size_t count) 213831dbd01fSIzik Eidus { 213931dbd01fSIzik Eidus int err; 214031dbd01fSIzik Eidus unsigned long flags; 214131dbd01fSIzik Eidus 21423dbb95f7SJingoo Han err = kstrtoul(buf, 10, &flags); 214331dbd01fSIzik Eidus if (err || flags > UINT_MAX) 214431dbd01fSIzik Eidus return -EINVAL; 214531dbd01fSIzik Eidus if (flags > KSM_RUN_UNMERGE) 214631dbd01fSIzik Eidus return -EINVAL; 214731dbd01fSIzik Eidus 214831dbd01fSIzik Eidus /* 214931dbd01fSIzik Eidus * KSM_RUN_MERGE sets ksmd running, and 0 stops it running. 215031dbd01fSIzik Eidus * KSM_RUN_UNMERGE stops it running and unmerges all rmap_items, 2151d0f209f6SHugh Dickins * breaking COW to free the pages_shared (but leaves mm_slots 2152d0f209f6SHugh Dickins * on the list for when ksmd may be set running again). 215331dbd01fSIzik Eidus */ 215431dbd01fSIzik Eidus 215531dbd01fSIzik Eidus mutex_lock(&ksm_thread_mutex); 2156ef4d43a8SHugh Dickins wait_while_offlining(); 215731dbd01fSIzik Eidus if (ksm_run != flags) { 215831dbd01fSIzik Eidus ksm_run = flags; 2159d952b791SHugh Dickins if (flags & KSM_RUN_UNMERGE) { 2160e1e12d2fSDavid Rientjes set_current_oom_origin(); 2161d952b791SHugh Dickins err = unmerge_and_remove_all_rmap_items(); 2162e1e12d2fSDavid Rientjes clear_current_oom_origin(); 2163d952b791SHugh Dickins if (err) { 2164d952b791SHugh Dickins ksm_run = KSM_RUN_STOP; 2165d952b791SHugh Dickins count = err; 2166d952b791SHugh Dickins } 2167d952b791SHugh Dickins } 216831dbd01fSIzik Eidus } 216931dbd01fSIzik Eidus mutex_unlock(&ksm_thread_mutex); 217031dbd01fSIzik Eidus 217131dbd01fSIzik Eidus if (flags & KSM_RUN_MERGE) 217231dbd01fSIzik Eidus wake_up_interruptible(&ksm_thread_wait); 217331dbd01fSIzik Eidus 217431dbd01fSIzik Eidus return count; 217531dbd01fSIzik Eidus } 217631dbd01fSIzik Eidus KSM_ATTR(run); 217731dbd01fSIzik Eidus 217890bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 217990bd6fd3SPetr Holasek static ssize_t merge_across_nodes_show(struct kobject *kobj, 218090bd6fd3SPetr Holasek struct kobj_attribute *attr, char *buf) 218190bd6fd3SPetr Holasek { 218290bd6fd3SPetr Holasek return sprintf(buf, "%u\n", ksm_merge_across_nodes); 218390bd6fd3SPetr Holasek } 218490bd6fd3SPetr Holasek 218590bd6fd3SPetr Holasek static ssize_t merge_across_nodes_store(struct kobject *kobj, 218690bd6fd3SPetr Holasek struct kobj_attribute *attr, 218790bd6fd3SPetr Holasek const char *buf, size_t count) 218890bd6fd3SPetr Holasek { 218990bd6fd3SPetr Holasek int err; 219090bd6fd3SPetr Holasek unsigned long knob; 219190bd6fd3SPetr Holasek 219290bd6fd3SPetr Holasek err = kstrtoul(buf, 10, &knob); 219390bd6fd3SPetr Holasek if (err) 219490bd6fd3SPetr Holasek return err; 219590bd6fd3SPetr Holasek if (knob > 1) 219690bd6fd3SPetr Holasek return -EINVAL; 219790bd6fd3SPetr Holasek 219890bd6fd3SPetr Holasek mutex_lock(&ksm_thread_mutex); 2199ef4d43a8SHugh Dickins wait_while_offlining(); 220090bd6fd3SPetr Holasek if (ksm_merge_across_nodes != knob) { 2201cbf86cfeSHugh Dickins if (ksm_pages_shared || remove_all_stable_nodes()) 220290bd6fd3SPetr Holasek err = -EBUSY; 2203ef53d16cSHugh Dickins else if (root_stable_tree == one_stable_tree) { 2204ef53d16cSHugh Dickins struct rb_root *buf; 2205ef53d16cSHugh Dickins /* 2206ef53d16cSHugh Dickins * This is the first time that we switch away from the 2207ef53d16cSHugh Dickins * default of merging across nodes: must now allocate 2208ef53d16cSHugh Dickins * a buffer to hold as many roots as may be needed. 2209ef53d16cSHugh Dickins * Allocate stable and unstable together: 2210ef53d16cSHugh Dickins * MAXSMP NODES_SHIFT 10 will use 16kB. 2211ef53d16cSHugh Dickins */ 2212bafe1e14SJoe Perches buf = kcalloc(nr_node_ids + nr_node_ids, sizeof(*buf), 2213bafe1e14SJoe Perches GFP_KERNEL); 2214ef53d16cSHugh Dickins /* Let us assume that RB_ROOT is NULL is zero */ 2215ef53d16cSHugh Dickins if (!buf) 2216ef53d16cSHugh Dickins err = -ENOMEM; 2217ef53d16cSHugh Dickins else { 2218ef53d16cSHugh Dickins root_stable_tree = buf; 2219ef53d16cSHugh Dickins root_unstable_tree = buf + nr_node_ids; 2220ef53d16cSHugh Dickins /* Stable tree is empty but not the unstable */ 2221ef53d16cSHugh Dickins root_unstable_tree[0] = one_unstable_tree[0]; 2222ef53d16cSHugh Dickins } 2223ef53d16cSHugh Dickins } 2224ef53d16cSHugh Dickins if (!err) { 222590bd6fd3SPetr Holasek ksm_merge_across_nodes = knob; 2226ef53d16cSHugh Dickins ksm_nr_node_ids = knob ? 1 : nr_node_ids; 2227ef53d16cSHugh Dickins } 222890bd6fd3SPetr Holasek } 222990bd6fd3SPetr Holasek mutex_unlock(&ksm_thread_mutex); 223090bd6fd3SPetr Holasek 223190bd6fd3SPetr Holasek return err ? err : count; 223290bd6fd3SPetr Holasek } 223390bd6fd3SPetr Holasek KSM_ATTR(merge_across_nodes); 223490bd6fd3SPetr Holasek #endif 223590bd6fd3SPetr Holasek 2236b4028260SHugh Dickins static ssize_t pages_shared_show(struct kobject *kobj, 2237b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 2238b4028260SHugh Dickins { 2239b4028260SHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_shared); 2240b4028260SHugh Dickins } 2241b4028260SHugh Dickins KSM_ATTR_RO(pages_shared); 2242b4028260SHugh Dickins 2243b4028260SHugh Dickins static ssize_t pages_sharing_show(struct kobject *kobj, 2244b4028260SHugh Dickins struct kobj_attribute *attr, char *buf) 2245b4028260SHugh Dickins { 2246e178dfdeSHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_sharing); 2247b4028260SHugh Dickins } 2248b4028260SHugh Dickins KSM_ATTR_RO(pages_sharing); 2249b4028260SHugh Dickins 2250473b0ce4SHugh Dickins static ssize_t pages_unshared_show(struct kobject *kobj, 2251473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 2252473b0ce4SHugh Dickins { 2253473b0ce4SHugh Dickins return sprintf(buf, "%lu\n", ksm_pages_unshared); 2254473b0ce4SHugh Dickins } 2255473b0ce4SHugh Dickins KSM_ATTR_RO(pages_unshared); 2256473b0ce4SHugh Dickins 2257473b0ce4SHugh Dickins static ssize_t pages_volatile_show(struct kobject *kobj, 2258473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 2259473b0ce4SHugh Dickins { 2260473b0ce4SHugh Dickins long ksm_pages_volatile; 2261473b0ce4SHugh Dickins 2262473b0ce4SHugh Dickins ksm_pages_volatile = ksm_rmap_items - ksm_pages_shared 2263473b0ce4SHugh Dickins - ksm_pages_sharing - ksm_pages_unshared; 2264473b0ce4SHugh Dickins /* 2265473b0ce4SHugh Dickins * It was not worth any locking to calculate that statistic, 2266473b0ce4SHugh Dickins * but it might therefore sometimes be negative: conceal that. 2267473b0ce4SHugh Dickins */ 2268473b0ce4SHugh Dickins if (ksm_pages_volatile < 0) 2269473b0ce4SHugh Dickins ksm_pages_volatile = 0; 2270473b0ce4SHugh Dickins return sprintf(buf, "%ld\n", ksm_pages_volatile); 2271473b0ce4SHugh Dickins } 2272473b0ce4SHugh Dickins KSM_ATTR_RO(pages_volatile); 2273473b0ce4SHugh Dickins 2274473b0ce4SHugh Dickins static ssize_t full_scans_show(struct kobject *kobj, 2275473b0ce4SHugh Dickins struct kobj_attribute *attr, char *buf) 2276473b0ce4SHugh Dickins { 2277473b0ce4SHugh Dickins return sprintf(buf, "%lu\n", ksm_scan.seqnr); 2278473b0ce4SHugh Dickins } 2279473b0ce4SHugh Dickins KSM_ATTR_RO(full_scans); 2280473b0ce4SHugh Dickins 228131dbd01fSIzik Eidus static struct attribute *ksm_attrs[] = { 228231dbd01fSIzik Eidus &sleep_millisecs_attr.attr, 228331dbd01fSIzik Eidus &pages_to_scan_attr.attr, 228431dbd01fSIzik Eidus &run_attr.attr, 2285b4028260SHugh Dickins &pages_shared_attr.attr, 2286b4028260SHugh Dickins &pages_sharing_attr.attr, 2287473b0ce4SHugh Dickins &pages_unshared_attr.attr, 2288473b0ce4SHugh Dickins &pages_volatile_attr.attr, 2289473b0ce4SHugh Dickins &full_scans_attr.attr, 229090bd6fd3SPetr Holasek #ifdef CONFIG_NUMA 229190bd6fd3SPetr Holasek &merge_across_nodes_attr.attr, 229290bd6fd3SPetr Holasek #endif 229331dbd01fSIzik Eidus NULL, 229431dbd01fSIzik Eidus }; 229531dbd01fSIzik Eidus 229631dbd01fSIzik Eidus static struct attribute_group ksm_attr_group = { 229731dbd01fSIzik Eidus .attrs = ksm_attrs, 229831dbd01fSIzik Eidus .name = "ksm", 229931dbd01fSIzik Eidus }; 23002ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 230131dbd01fSIzik Eidus 230231dbd01fSIzik Eidus static int __init ksm_init(void) 230331dbd01fSIzik Eidus { 230431dbd01fSIzik Eidus struct task_struct *ksm_thread; 230531dbd01fSIzik Eidus int err; 230631dbd01fSIzik Eidus 230731dbd01fSIzik Eidus err = ksm_slab_init(); 230831dbd01fSIzik Eidus if (err) 230931dbd01fSIzik Eidus goto out; 231031dbd01fSIzik Eidus 231131dbd01fSIzik Eidus ksm_thread = kthread_run(ksm_scan_thread, NULL, "ksmd"); 231231dbd01fSIzik Eidus if (IS_ERR(ksm_thread)) { 231325acde31SPaul McQuade pr_err("ksm: creating kthread failed\n"); 231431dbd01fSIzik Eidus err = PTR_ERR(ksm_thread); 2315d9f8984cSLai Jiangshan goto out_free; 231631dbd01fSIzik Eidus } 231731dbd01fSIzik Eidus 23182ffd8679SHugh Dickins #ifdef CONFIG_SYSFS 231931dbd01fSIzik Eidus err = sysfs_create_group(mm_kobj, &ksm_attr_group); 232031dbd01fSIzik Eidus if (err) { 232125acde31SPaul McQuade pr_err("ksm: register sysfs failed\n"); 23222ffd8679SHugh Dickins kthread_stop(ksm_thread); 2323d9f8984cSLai Jiangshan goto out_free; 232431dbd01fSIzik Eidus } 2325c73602adSHugh Dickins #else 2326c73602adSHugh Dickins ksm_run = KSM_RUN_MERGE; /* no way for user to start it */ 2327c73602adSHugh Dickins 23282ffd8679SHugh Dickins #endif /* CONFIG_SYSFS */ 232931dbd01fSIzik Eidus 233062b61f61SHugh Dickins #ifdef CONFIG_MEMORY_HOTREMOVE 2331ef4d43a8SHugh Dickins /* There is no significance to this priority 100 */ 233262b61f61SHugh Dickins hotplug_memory_notifier(ksm_memory_callback, 100); 233362b61f61SHugh Dickins #endif 233431dbd01fSIzik Eidus return 0; 233531dbd01fSIzik Eidus 2336d9f8984cSLai Jiangshan out_free: 233731dbd01fSIzik Eidus ksm_slab_free(); 233831dbd01fSIzik Eidus out: 233931dbd01fSIzik Eidus return err; 234031dbd01fSIzik Eidus } 2341*a64fb3cdSPaul Gortmaker subsys_initcall(ksm_init); 2342