| /linux/mm/ |
| H A D | page_isolation.c | 33 static struct page *has_unmovable_pages(unsigned long start_pfn, unsigned long end_pfn, in has_unmovable_pages() argument 36 struct page *page = pfn_to_page(start_pfn); in has_unmovable_pages() 40 VM_BUG_ON(pageblock_start_pfn(start_pfn) != in has_unmovable_pages() 55 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in has_unmovable_pages() 155 unsigned long start_pfn, unsigned long end_pfn) in set_migratetype_isolate() argument 184 check_unmovable_start = max(page_to_pfn(page), start_pfn); in set_migratetype_isolate() 317 unsigned long start_pfn; in isolate_single_pageblock() local 337 start_pfn = max(ALIGN_DOWN(isolate_pageblock, MAX_ORDER_NR_PAGES), in isolate_single_pageblock() 371 for (pfn = start_pfn; pfn < boundary_pfn;) { in isolate_single_pageblock() 475 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, in start_isolate_page_range() argument [all …]
|
| H A D | mm_init.c | 317 unsigned long start_pfn, end_pfn; in early_calculate_totalpages() local 320 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { in early_calculate_totalpages() 321 unsigned long pages = end_pfn - start_pfn; in early_calculate_totalpages() 479 unsigned long start_pfn, end_pfn; in find_zone_movable_pfns_for_nodes() local 497 for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { in find_zone_movable_pfns_for_nodes() 500 start_pfn = max(start_pfn, zone_movable_pfn[nid]); in find_zone_movable_pfns_for_nodes() 501 if (start_pfn >= end_pfn) in find_zone_movable_pfns_for_nodes() 505 if (start_pfn < usable_startpfn) { in find_zone_movable_pfns_for_nodes() 508 - start_pfn; in find_zone_movable_pfns_for_nodes() 527 start_pfn = usable_startpfn; in find_zone_movable_pfns_for_nodes() [all …]
|
| H A D | memory_hotplug.c | 435 unsigned long start_pfn, in find_smallest_section_pfn() argument 438 for (; start_pfn < end_pfn; start_pfn += PAGES_PER_SUBSECTION) { in find_smallest_section_pfn() 439 if (unlikely(!pfn_to_online_page(start_pfn))) in find_smallest_section_pfn() 442 if (unlikely(pfn_to_nid(start_pfn) != nid)) in find_smallest_section_pfn() 445 if (zone != page_zone(pfn_to_page(start_pfn))) in find_smallest_section_pfn() 448 return start_pfn; in find_smallest_section_pfn() 456 unsigned long start_pfn, in find_biggest_section_pfn() argument 463 for (; pfn >= start_pfn; pfn -= PAGES_PER_SUBSECTION) { in find_biggest_section_pfn() 479 static void shrink_zone_span(struct zone *zone, unsigned long start_pfn, in shrink_zone_span() argument 485 if (zone->zone_start_pfn == start_pfn) { in shrink_zone_span() [all …]
|
| H A D | page_ext.c | 371 static int __meminit online_page_ext(unsigned long start_pfn, in online_page_ext() argument 374 int nid = pfn_to_nid(start_pfn); in online_page_ext() 378 start = SECTION_ALIGN_DOWN(start_pfn); in online_page_ext() 379 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in online_page_ext() 394 static void __meminit offline_page_ext(unsigned long start_pfn, in offline_page_ext() argument 399 start = SECTION_ALIGN_DOWN(start_pfn); in offline_page_ext() 400 end = SECTION_ALIGN_UP(start_pfn + nr_pages); in offline_page_ext() 428 ret = online_page_ext(mn->start_pfn, mn->nr_pages); in page_ext_callback() 431 offline_page_ext(mn->start_pfn, in page_ext_callback() 435 offline_page_ext(mn->start_pfn, in page_ext_callback() [all …]
|
| H A D | shuffle.c | 83 unsigned long start_pfn = z->zone_start_pfn; in __shuffle_zone() local 89 start_pfn = ALIGN(start_pfn, order_pages); in __shuffle_zone() 90 for (i = start_pfn; i < end_pfn; i += order_pages) { in __shuffle_zone()
|
| H A D | sparse.c | 129 static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, in mminit_validate_memmodel_limits() argument 138 if (*start_pfn > max_sparsemem_pfn) { in mminit_validate_memmodel_limits() 141 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 143 *start_pfn = max_sparsemem_pfn; in mminit_validate_memmodel_limits() 148 *start_pfn, *end_pfn, max_sparsemem_pfn); in mminit_validate_memmodel_limits() 630 void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) in online_mem_sections() argument 634 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in online_mem_sections() 648 void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) in offline_mem_sections() argument 652 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { in offline_mem_sections() 933 int __meminit sparse_add_section(int nid, unsigned long start_pfn, in sparse_add_section() argument [all …]
|
| H A D | bootmem_info.c | 44 static void __init register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument 51 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section() 79 static void __init register_page_bootmem_info_section(unsigned long start_pfn) in register_page_bootmem_info_section() argument 86 section_nr = pfn_to_section_nr(start_pfn); in register_page_bootmem_info_section()
|
| /linux/arch/x86/xen/ |
| H A D | setup.c | 83 static void __init xen_del_extra_mem(unsigned long start_pfn, in xen_del_extra_mem() argument 90 start_r = xen_extra_mem[i].start_pfn; in xen_del_extra_mem() 94 if (start_r == start_pfn) { in xen_del_extra_mem() 96 xen_extra_mem[i].start_pfn += n_pfns; in xen_del_extra_mem() 101 if (start_r + size_r == start_pfn + n_pfns) { in xen_del_extra_mem() 107 if (start_pfn > start_r && start_pfn < start_r + size_r) { in xen_del_extra_mem() 108 BUG_ON(start_pfn + n_pfns > start_r + size_r); in xen_del_extra_mem() 109 xen_extra_mem[i].n_pfns = start_pfn - start_r; in xen_del_extra_mem() 111 xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r - in xen_del_extra_mem() 112 (start_pfn + n_pfns)); in xen_del_extra_mem() [all …]
|
| H A D | enlighten.c | 414 void __init xen_add_extra_mem(unsigned long start_pfn, unsigned long n_pfns) in xen_add_extra_mem() argument 425 xen_extra_mem[i].start_pfn = start_pfn; in xen_add_extra_mem() 430 if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns == in xen_add_extra_mem() 431 start_pfn) { in xen_add_extra_mem() 439 memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); in xen_add_extra_mem() 462 pfn_to_page(xen_extra_mem[i].start_pfn + j); in arch_xen_unpopulated_init()
|
| /linux/include/trace/events/ |
| H A D | page_isolation.h | 13 unsigned long start_pfn, 17 TP_ARGS(start_pfn, end_pfn, fin_pfn), 20 __field(unsigned long, start_pfn) 26 __entry->start_pfn = start_pfn; 32 __entry->start_pfn, __entry->end_pfn, __entry->fin_pfn,
|
| H A D | compaction.h | 17 unsigned long start_pfn, 22 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken), 25 __field(unsigned long, start_pfn) 32 __entry->start_pfn = start_pfn; 39 __entry->start_pfn, 48 unsigned long start_pfn, 53 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 59 unsigned long start_pfn, 64 TP_ARGS(start_pfn, end_pfn, nr_scanned, nr_taken) 70 unsigned long start_pfn, [all …]
|
| /linux/arch/powerpc/platforms/powernv/ |
| H A D | memtrace.c | 98 unsigned long pfn, start_pfn; in memtrace_alloc_node() local 109 start_pfn = page_to_pfn(page); in memtrace_alloc_node() 115 flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn), in memtrace_alloc_node() 116 (unsigned long)pfn_to_kaddr(start_pfn + nr_pages), in memtrace_alloc_node() 123 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_alloc_node() 126 arch_remove_linear_mapping(PFN_PHYS(start_pfn), size); in memtrace_alloc_node() 128 return PFN_PHYS(start_pfn); in memtrace_alloc_node() 202 const unsigned long start_pfn = PHYS_PFN(start); in memtrace_free() local 210 for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) in memtrace_free() 213 free_contig_range(start_pfn, nr_pages); in memtrace_free()
|
| /linux/arch/x86/mm/ |
| H A D | init.c | 328 unsigned long start_pfn, unsigned long end_pfn, in save_mr() argument 331 if (start_pfn < end_pfn) { in save_mr() 334 mr[nr_range].start = start_pfn<<PAGE_SHIFT; in save_mr() 406 unsigned long start_pfn, end_pfn, limit_pfn; in split_mem_range() local 413 pfn = start_pfn = PFN_DOWN(start); in split_mem_range() 430 if (start_pfn < end_pfn) { in split_mem_range() 431 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, 0); in split_mem_range() 436 start_pfn = round_up(pfn, PFN_DOWN(PMD_SIZE)); in split_mem_range() 445 if (start_pfn < end_pfn) { in split_mem_range() 446 nr_range = save_mr(mr, nr_range, start_pfn, end_pfn, in split_mem_range() [all …]
|
| /linux/include/linux/ |
| H A D | memory_hotplug.h | 130 extern unsigned long __offline_isolated_pages(unsigned long start_pfn, 155 extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages, 159 extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 163 static inline int add_pages(int nid, unsigned long start_pfn, in add_pages() argument 166 return __add_pages(nid, start_pfn, nr_pages, params); in add_pages() 169 int add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, 280 extern int offline_pages(unsigned long start_pfn, unsigned long nr_pages, 289 static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages, in offline_pages() argument 315 extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, 320 unsigned long start_pfn, [all …]
|
| H A D | page-isolation.h | 63 int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn, 66 void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn); 68 int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
|
| /linux/arch/sh/mm/ |
| H A D | numa.c | 25 unsigned long start_pfn, end_pfn; in setup_bootmem_node() local 30 start_pfn = PFN_DOWN(start); in setup_bootmem_node() 38 __add_active_range(nid, start_pfn, end_pfn); in setup_bootmem_node() 48 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_bootmem_node() 49 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_bootmem_node()
|
| H A D | init.c | 204 unsigned long start_pfn, end_pfn; in allocate_pgdat() local 206 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in allocate_pgdat() 212 NODE_DATA(nid)->node_start_pfn = start_pfn; in allocate_pgdat() 213 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in allocate_pgdat() 218 unsigned long start_pfn, end_pfn; in do_init_bootmem() local 222 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, NULL) in do_init_bootmem() 223 __add_active_range(0, start_pfn, end_pfn); in do_init_bootmem() 236 unsigned long start_pfn; in early_reserve_mem() local 244 start_pfn = PFN_UP(__pa(_end)); in early_reserve_mem() 252 memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start); in early_reserve_mem()
|
| /linux/drivers/base/ |
| H A D | memory.c | 226 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); in memory_block_online() local 237 start_pfn, nr_pages); in memory_block_online() 249 arg.altmap_start_pfn = start_pfn; in memory_block_online() 251 arg.start_pfn = start_pfn + nr_vmemmap_pages; in memory_block_online() 260 ret = mhp_init_memmap_on_memory(start_pfn, nr_vmemmap_pages, in memory_block_online() 266 ret = online_pages(start_pfn + nr_vmemmap_pages, in memory_block_online() 270 mhp_deinit_memmap_on_memory(start_pfn, nr_vmemmap_pages); in memory_block_online() 279 adjust_present_page_count(pfn_to_page(start_pfn), mem->group, in memory_block_online() 297 unsigned long start_pfn = section_nr_to_pfn(mem->start_section_nr); in memory_block_offline() local 315 adjust_present_page_count(pfn_to_page(start_pfn), mem->group, in memory_block_offline() [all …]
|
| H A D | arch_numa.c | 195 static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) in setup_node_data() argument 197 if (start_pfn >= end_pfn) in setup_node_data() 203 NODE_DATA(nid)->node_start_pfn = start_pfn; in setup_node_data() 204 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; in setup_node_data() 217 unsigned long start_pfn, end_pfn; in numa_register_nodes() local 219 get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); in numa_register_nodes() 220 setup_node_data(nid, start_pfn, end_pfn); in numa_register_nodes()
|
| /linux/drivers/hv/ |
| H A D | hv_balloon.c | 425 unsigned long start_pfn; member 438 unsigned long start_pfn; member 593 if (pfn >= gap->start_pfn && pfn < gap->end_pfn) in has_pfn_is_backed() 600 static unsigned long hv_page_offline_check(unsigned long start_pfn, in hv_page_offline_check() argument 603 unsigned long pfn = start_pfn, count = 0; in hv_page_offline_check() 607 while (pfn < start_pfn + nr_pages) { in hv_page_offline_check() 614 while ((pfn >= has->start_pfn) && in hv_page_offline_check() 616 (pfn < start_pfn + nr_pages)) { in hv_page_offline_check() 650 pfn_count = hv_page_offline_check(mem->start_pfn, in hv_memory_notifier() 697 unsigned long start_pfn, unsigned long size) in hv_bring_pgs_online() argument [all …]
|
| /linux/arch/mips/loongson64/ |
| H A D | numa.c | 85 unsigned long start_pfn, end_pfn; in node_mem_init() local 91 get_pfn_range_for_nid(node, &start_pfn, &end_pfn); in node_mem_init() 93 node, start_pfn, end_pfn); in node_mem_init() 97 NODE_DATA(node)->node_start_pfn = start_pfn; in node_mem_init() 98 NODE_DATA(node)->node_spanned_pages = end_pfn - start_pfn; in node_mem_init() 120 memblock_reserve(0, PAGE_SIZE * start_pfn); in node_mem_init()
|
| /linux/arch/parisc/mm/ |
| H A D | init.c | 133 if (pmem_ranges[j-1].start_pfn < in setup_bootmem() 134 pmem_ranges[j].start_pfn) { in setup_bootmem() 149 if (pmem_ranges[i].start_pfn - in setup_bootmem() 150 (pmem_ranges[i-1].start_pfn + in setup_bootmem() 155 pmem_ranges[i].start_pfn - in setup_bootmem() 156 (pmem_ranges[i-1].start_pfn + in setup_bootmem() 172 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT); in setup_bootmem() 226 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages; in setup_bootmem() 229 hole_pages = pmem_ranges[i].start_pfn - end_pfn; in setup_bootmem() 231 pmem_holes[npmem_holes].start_pfn = end_pfn; in setup_bootmem() [all …]
|
| /linux/arch/sparc/mm/ |
| H A D | init_32.c | 64 unsigned long start_pfn = sp_banks[i].base_addr >> PAGE_SHIFT; in calc_highpages() local 70 if (start_pfn < max_low_pfn) in calc_highpages() 71 start_pfn = max_low_pfn; in calc_highpages() 73 nr += end_pfn - start_pfn; in calc_highpages() 133 unsigned long start_pfn, bytes_avail, size; in bootmem_init() local 171 start_pfn = (unsigned long)__pa(PAGE_ALIGN((unsigned long) &_end)); in bootmem_init() 174 start_pfn >>= PAGE_SHIFT; in bootmem_init() 192 size = (start_pfn << PAGE_SHIFT) - phys_base; in bootmem_init()
|
| /linux/drivers/gpu/drm/imagination/ |
| H A D | pvr_vm_mips.c | 155 s32 start_pfn; in pvr_vm_mips_map() local 171 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> ROGUE_MIPSFW_LOG2_PAGE_SIZE_4K; in pvr_vm_mips_map() 181 for (pfn = start_pfn; pfn <= end_pfn; pfn++) { in pvr_vm_mips_map() 186 (pfn - start_pfn) << in pvr_vm_mips_map() 204 while (--pfn >= start_pfn) in pvr_vm_mips_map() 227 const u32 start_pfn = (start & fw_dev->fw_heap_info.offset_mask) >> in pvr_vm_mips_unmap() local 232 for (u32 pfn = start_pfn; pfn < end_pfn; pfn++) in pvr_vm_mips_unmap()
|
| /linux/arch/powerpc/mm/ |
| H A D | mem.c | 128 int __ref add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages, in add_pages() argument 133 ret = __add_pages(nid, start_pfn, nr_pages, params); in add_pages() 138 update_end_of_memory_vars(start_pfn << PAGE_SHIFT, in add_pages() 147 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_add_memory() local 154 rc = add_pages(nid, start_pfn, nr_pages, params); in arch_add_memory() 162 unsigned long start_pfn = start >> PAGE_SHIFT; in arch_remove_memory() local 165 __remove_pages(start_pfn, nr_pages, altmap); in arch_remove_memory()
|