Lines Matching full:migrate
3 * Memory Migration functionality - linux/mm/migrate.c
16 #include <linux/migrate.h>
56 #include <trace/events/migrate.h>
70 * drained them. Those pages will fail to migrate like other in migrate_prep()
692 * Common logic to directly migrate a single LRU page suitable for
998 * cannot try to migrate this page. in move_to_new_page()
1165 * Obtain the lock on page, remove all ptes and migrate the page
1211 * Compaction can migrate also non-LRU pages which are in unmap_and_move()
1266 * This means that when we try to migrate hugepage whose subpages are
1396 * migrate_pages - migrate the pages specified in a list, to the free pages
1699 * pages to migrate. Since we are going to in move_pages_and_store_status()
1713 * Migrate an array of page address onto an array of nodes and fill
2069 * Attempt to migrate a misplaced page to the specified destination
2082 * Don't migrate file pages that are mapped in multiple processes in migrate_misplaced_page()
2090 * Also do not migrate dirty pages as not all filesystems can move in migrate_misplaced_page()
2265 struct migrate_vma *migrate = walk->private; in migrate_vma_collect_hole() local
2271 migrate->src[migrate->npages] = 0; in migrate_vma_collect_hole()
2272 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole()
2273 migrate->npages++; in migrate_vma_collect_hole()
2279 migrate->src[migrate->npages] = MIGRATE_PFN_MIGRATE; in migrate_vma_collect_hole()
2280 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_hole()
2281 migrate->npages++; in migrate_vma_collect_hole()
2282 migrate->cpages++; in migrate_vma_collect_hole()
2292 struct migrate_vma *migrate = walk->private; in migrate_vma_collect_skip() local
2296 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_skip()
2297 migrate->src[migrate->npages++] = 0; in migrate_vma_collect_skip()
2308 struct migrate_vma *migrate = walk->private; in migrate_vma_collect_pmd() local
2372 migrate->cpages++; in migrate_vma_collect_pmd()
2388 if (!(migrate->flags & in migrate_vma_collect_pmd()
2390 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
2398 if (!(migrate->flags & MIGRATE_VMA_SELECT_SYSTEM)) in migrate_vma_collect_pmd()
2403 migrate->cpages++; in migrate_vma_collect_pmd()
2406 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
2427 migrate->cpages++; in migrate_vma_collect_pmd()
2470 migrate->dst[migrate->npages] = 0; in migrate_vma_collect_pmd()
2471 migrate->src[migrate->npages++] = mpfn; in migrate_vma_collect_pmd()
2490 * @migrate: migrate struct containing all migration information
2496 static void migrate_vma_collect(struct migrate_vma *migrate) in migrate_vma_collect() argument
2505 mmu_notifier_range_init_migrate(&range, 0, migrate->vma, in migrate_vma_collect()
2506 migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect()
2507 migrate->pgmap_owner); in migrate_vma_collect()
2510 walk_page_range(migrate->vma->vm_mm, migrate->start, migrate->end, in migrate_vma_collect()
2511 &migrate_vma_walk_ops, migrate); in migrate_vma_collect()
2514 migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT); in migrate_vma_collect()
2550 * avoid 2 racing thread trying to migrate back to CPU to enter in migrate_vma_check_page()
2572 * @migrate: migrate struct containing all migration information
2579 static void migrate_vma_prepare(struct migrate_vma *migrate) in migrate_vma_prepare() argument
2581 const unsigned long npages = migrate->npages; in migrate_vma_prepare()
2582 const unsigned long start = migrate->start; in migrate_vma_prepare()
2588 for (i = 0; (i < npages) && migrate->cpages; i++) { in migrate_vma_prepare()
2589 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare()
2595 if (!(migrate->src[i] & MIGRATE_PFN_LOCKED)) { in migrate_vma_prepare()
2605 migrate->src[i] = 0; in migrate_vma_prepare()
2606 migrate->cpages--; in migrate_vma_prepare()
2611 migrate->src[i] |= MIGRATE_PFN_LOCKED; in migrate_vma_prepare()
2624 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; in migrate_vma_prepare()
2625 migrate->cpages--; in migrate_vma_prepare()
2628 migrate->src[i] = 0; in migrate_vma_prepare()
2630 migrate->cpages--; in migrate_vma_prepare()
2642 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; in migrate_vma_prepare()
2643 migrate->cpages--; in migrate_vma_prepare()
2651 migrate->src[i] = 0; in migrate_vma_prepare()
2653 migrate->cpages--; in migrate_vma_prepare()
2664 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare()
2666 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_prepare()
2669 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2671 migrate->src[i] = 0; in migrate_vma_prepare()
2680 * @migrate: migrate struct containing all migration information
2684 * cannot migrate them.
2689 static void migrate_vma_unmap(struct migrate_vma *migrate) in migrate_vma_unmap() argument
2692 const unsigned long npages = migrate->npages; in migrate_vma_unmap()
2693 const unsigned long start = migrate->start; in migrate_vma_unmap()
2697 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap()
2699 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2712 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; in migrate_vma_unmap()
2713 migrate->cpages--; in migrate_vma_unmap()
2718 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap()
2720 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2725 migrate->src[i] = 0; in migrate_vma_unmap()
2737 * migrate_vma_setup() - prepare to migrate a range of memory
2743 * Prepare to migrate a range of memory virtual address range by collecting all
2758 * Note that the caller does not have to migrate all the pages that are marked
2760 * device memory to system memory. If the caller cannot migrate a device page
2777 * then migrate_vma_pages() to migrate struct page information from the source
2778 * struct page to the destination struct page. If it fails to migrate the
2848 static void migrate_vma_insert_page(struct migrate_vma *migrate, in migrate_vma_insert_page() argument
2854 struct vm_area_struct *vma = migrate->vma; in migrate_vma_insert_page()
2974 * migrate_vma_pages() - migrate meta-data from src page to dst page
2975 * @migrate: migrate struct containing all migration information
2981 void migrate_vma_pages(struct migrate_vma *migrate) in migrate_vma_pages() argument
2983 const unsigned long npages = migrate->npages; in migrate_vma_pages()
2984 const unsigned long start = migrate->start; in migrate_vma_pages()
2990 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
2991 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages()
2996 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; in migrate_vma_pages()
3001 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_pages()
3009 migrate->vma->vm_mm, in migrate_vma_pages()
3010 addr, migrate->end); in migrate_vma_pages()
3013 migrate_vma_insert_page(migrate, addr, newpage, in migrate_vma_pages()
3014 &migrate->src[i], in migrate_vma_pages()
3015 &migrate->dst[i]); in migrate_vma_pages()
3028 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; in migrate_vma_pages()
3036 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; in migrate_vma_pages()
3043 migrate->src[i] &= ~MIGRATE_PFN_MIGRATE; in migrate_vma_pages()
3058 * @migrate: migrate struct containing all migration information
3067 void migrate_vma_finalize(struct migrate_vma *migrate) in migrate_vma_finalize() argument
3069 const unsigned long npages = migrate->npages; in migrate_vma_finalize()
3073 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
3074 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize()
3084 if (!(migrate->src[i] & MIGRATE_PFN_MIGRATE) || !newpage) { in migrate_vma_finalize()