/linux/arch/x86/xen/ |
H A D | p2m.c | 127 static inline unsigned p2m_top_index(unsigned long pfn) in p2m_top_index() argument 129 BUG_ON(pfn >= MAX_P2M_PFN); in p2m_top_index() 130 return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); in p2m_top_index() 133 static inline unsigned p2m_mid_index(unsigned long pfn) in p2m_mid_index() argument 135 return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; in p2m_mid_index() 170 static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) in p2m_init_identity() argument 175 p2m[i] = IDENTITY_FRAME(pfn + i); in p2m_init_identity() 209 unsigned long pfn, mfn; in xen_build_mfn_list_list() local 232 for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN; in xen_build_mfn_list_list() 233 pfn += P2M_PER_PAGE) { in xen_build_mfn_list_list() [all …]
|
/linux/mm/ |
H A D | page_isolation.c | 38 unsigned long pfn; in has_unmovable_pages() local 55 for (pfn = start_pfn; pfn < end_pfn; pfn++) { in has_unmovable_pages() 56 page = pfn_to_page(pfn); in has_unmovable_pages() 100 pfn += skip_pages - 1; in has_unmovable_pages() 112 pfn += (1 << buddy_order(page)) - 1; in has_unmovable_pages() 276 __first_valid_page(unsigned long pfn, unsigned long nr_pages) in __first_valid_page() argument 283 page = pfn_to_online_page(pfn + i); in __first_valid_page() 319 unsigned long pfn; in isolate_single_pageblock() local 371 for (pfn = start_pfn; pfn < boundary_pfn;) { in isolate_single_pageblock() 372 struct page *page = __first_valid_page(pfn, boundary_pfn - pfn); in isolate_single_pageblock() [all …]
|
H A D | page_ext.c | 170 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() local 184 index = pfn - round_down(node_start_pfn(page_to_nid(page)), in lookup_page_ext() 250 unsigned long pfn = page_to_pfn(page); in lookup_page_ext() local 251 struct mem_section *section = __pfn_to_section(pfn); in lookup_page_ext() 263 return get_entry(page_ext, pfn); in lookup_page_ext() 283 static int __meminit init_section_page_ext(unsigned long pfn, int nid) in init_section_page_ext() argument 289 section = __pfn_to_section(pfn); in init_section_page_ext() 313 pfn &= PAGE_SECTION_MASK; in init_section_page_ext() 314 section->page_ext = (void *)base - page_ext_size * pfn; in init_section_page_ext() 337 static void __free_page_ext(unsigned long pfn) in __free_page_ext() argument [all …]
|
H A D | sparse.c | 179 static void subsection_mask_set(unsigned long *map, unsigned long pfn, in subsection_mask_set() argument 182 int idx = subsection_map_index(pfn); in subsection_mask_set() 183 int end = subsection_map_index(pfn + nr_pages - 1); in subsection_mask_set() 188 void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) in subsection_map_init() argument 190 int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1); in subsection_map_init() 191 unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn); in subsection_map_init() 198 - (pfn & ~PAGE_SECTION_MASK)); in subsection_map_init() 200 subsection_mask_set(ms->usage->subsection_map, pfn, pfns); in subsection_map_init() 203 pfns, subsection_map_index(pfn), in subsection_map_init() 204 subsection_map_index(pfn + pfns - 1)); in subsection_map_init() [all …]
|
H A D | memory-failure.c | 79 void num_poisoned_pages_inc(unsigned long pfn) in num_poisoned_pages_inc() argument 82 memblk_nr_poison_inc(pfn); in num_poisoned_pages_inc() 85 void num_poisoned_pages_sub(unsigned long pfn, long i) in num_poisoned_pages_sub() argument 88 if (pfn != -1UL) in num_poisoned_pages_sub() 89 memblk_nr_poison_sub(pfn, i); in num_poisoned_pages_sub() 350 static int kill_proc(struct to_kill *tk, unsigned long pfn, int flags) in kill_proc() argument 357 pfn, t->comm, task_pid_nr(t)); in kill_proc() 527 unsigned long pfn, int flags) in kill_procs() argument 535 pfn, tk->tsk->comm, task_pid_nr(tk->tsk)); in kill_procs() 546 else if (kill_proc(tk, pfn, flags) < 0) in kill_procs() [all …]
|
H A D | memory_hotplug.c | 319 static int check_pfn_span(unsigned long pfn, unsigned long nr_pages) in check_pfn_span() argument 336 if (!IS_ALIGNED(pfn | nr_pages, min_align)) in check_pfn_span() 346 struct page *pfn_to_online_page(unsigned long pfn) in pfn_to_online_page() argument 348 unsigned long nr = pfn_to_section_nr(pfn); in pfn_to_online_page() 363 if (IS_ENABLED(CONFIG_HAVE_ARCH_PFN_VALID) && !pfn_valid(pfn)) in pfn_to_online_page() 366 if (!pfn_section_valid(ms, pfn)) in pfn_to_online_page() 370 return pfn_to_page(pfn); in pfn_to_online_page() 378 pgmap = get_dev_pagemap(pfn, NULL); in pfn_to_online_page() 385 return pfn_to_page(pfn); in pfn_to_online_page() 389 int __add_pages(int nid, unsigned long pfn, unsigned long nr_pages, in __add_pages() argument [all …]
|
H A D | page_idle.c | 34 static struct folio *page_idle_get_folio(unsigned long pfn) in page_idle_get_folio() argument 36 struct page *page = pfn_to_online_page(pfn); in page_idle_get_folio() 125 unsigned long pfn, end_pfn; in page_idle_bitmap_read() local 131 pfn = pos * BITS_PER_BYTE; in page_idle_bitmap_read() 132 if (pfn >= max_pfn) in page_idle_bitmap_read() 135 end_pfn = pfn + count * BITS_PER_BYTE; in page_idle_bitmap_read() 139 for (; pfn < end_pfn; pfn++) { in page_idle_bitmap_read() 140 bit = pfn % BITMAP_CHUNK_BITS; in page_idle_bitmap_read() 143 folio = page_idle_get_folio(pfn); in page_idle_bitmap_read() 170 unsigned long pfn, end_pfn; in page_idle_bitmap_write() local [all …]
|
H A D | mm_init.c | 581 void __meminit __init_single_page(struct page *page, unsigned long pfn, in __init_single_page() argument 585 set_page_links(page, zone, nid, pfn); in __init_single_page() 595 set_page_address(page, __va(pfn << PAGE_SHIFT)); in __init_single_page() 616 static int __meminit __early_pfn_to_nid(unsigned long pfn, in __early_pfn_to_nid() argument 622 if (state->last_start <= pfn && pfn < state->last_end) in __early_pfn_to_nid() 625 nid = memblock_search_pfn_nid(pfn, &start_pfn, &end_pfn); in __early_pfn_to_nid() 635 int __meminit early_pfn_to_nid(unsigned long pfn) in early_pfn_to_nid() argument 641 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); in early_pfn_to_nid() 672 void __meminit __init_page_from_nid(unsigned long pfn, int nid) in __init_page_from_nid() argument 682 if (zone_spans_pfn(zone, pfn)) in __init_page_from_nid() [all …]
|
/linux/tools/testing/scatterlist/ |
H A D | main.c | 11 unsigned *pfn; member 28 #define pfn(...) (unsigned []){ __VA_ARGS__ } macro 42 printf(" %x", test->pfn[i]); in fail() 56 { -EINVAL, 1, pfn(0), NULL, PAGE_SIZE, 0, 1 }, in main() 57 { 0, 1, pfn(0), NULL, PAGE_SIZE, PAGE_SIZE + 1, 1 }, in main() 58 { 0, 1, pfn(0), NULL, PAGE_SIZE, sgmax, 1 }, in main() 59 { 0, 1, pfn(0), NULL, 1, sgmax, 1 }, in main() 60 { 0, 2, pfn(0, 1), NULL, 2 * PAGE_SIZE, sgmax, 1 }, in main() 61 { 0, 2, pfn(1, 0), NULL, 2 * PAGE_SIZE, sgmax, 2 }, in main() 62 { 0, 3, pfn(0, 1, 2), NULL, 3 * PAGE_SIZE, sgmax, 1 }, in main() [all …]
|
/linux/arch/arm/xen/ |
H A D | p2m.c | 23 unsigned long pfn; member 44 if (new->pfn == entry->pfn) in xen_add_phys_to_mach_entry() 47 if (new->pfn < entry->pfn) in xen_add_phys_to_mach_entry() 59 __func__, &new->pfn, &new->mfn, &entry->pfn, &entry->mfn); in xen_add_phys_to_mach_entry() 64 unsigned long __pfn_to_mfn(unsigned long pfn) in __pfn_to_mfn() argument 74 if (entry->pfn <= pfn && in __pfn_to_mfn() 75 entry->pfn + entry->nr_pages > pfn) { in __pfn_to_mfn() 76 unsigned long mfn = entry->mfn + (pfn - entry->pfn); in __pfn_to_mfn() 80 if (pfn < entry->pfn) in __pfn_to_mfn() 150 bool __set_phys_to_machine_multi(unsigned long pfn, in __set_phys_to_machine_multi() argument [all …]
|
/linux/include/trace/events/ |
H A D | ksm.h | 132 TP_PROTO(unsigned long pfn, void *rmap_item, void *mm, int err), 134 TP_ARGS(pfn, rmap_item, mm, err), 137 __field(unsigned long, pfn) 144 __entry->pfn = pfn; 151 __entry->pfn, __entry->rmap_item, __entry->mm, __entry->err) 167 TP_PROTO(void *ksm_page, unsigned long pfn, void *rmap_item, void *mm, int err), 169 TP_ARGS(ksm_page, pfn, rmap_item, mm, err), 173 __field(unsigned long, pfn) 181 __entry->pfn = pfn; 189 __entry->pfn, __entry->rmap_item, __entry->mm, __entry->err) [all …]
|
H A D | cma.h | 13 TP_PROTO(const char *name, unsigned long pfn, const struct page *page, 16 TP_ARGS(name, pfn, page, count), 20 __field(unsigned long, pfn) 27 __entry->pfn = pfn; 34 __entry->pfn, 65 TP_PROTO(const char *name, unsigned long pfn, const struct page *page, 68 TP_ARGS(name, pfn, page, count, align, errorno), 72 __field(unsigned long, pfn) 81 __entry->pfn = pfn; 90 __entry->pfn, [all …]
|
H A D | kmem.h | 143 __field( unsigned long, pfn ) 148 __entry->pfn = page_to_pfn(page); 153 pfn_to_page(__entry->pfn), 154 __entry->pfn, 165 __field( unsigned long, pfn ) 169 __entry->pfn = page_to_pfn(page); 173 pfn_to_page(__entry->pfn), 174 __entry->pfn) 185 __field( unsigned long, pfn ) 192 __entry->pfn = page ? page_to_pfn(page) : -1UL; [all …]
|
/linux/include/asm-generic/ |
H A D | memory_model.h | 18 #define __pfn_to_page(pfn) (mem_map + ((pfn) - ARCH_PFN_OFFSET)) argument 26 static inline int pfn_valid(unsigned long pfn) in pfn_valid() argument 30 return pfn >= pfn_offset && (pfn - pfn_offset) < max_mapnr; in pfn_valid() 35 #define for_each_valid_pfn(pfn, start_pfn, end_pfn) \ argument 36 for ((pfn) = max_t(unsigned long, (start_pfn), ARCH_PFN_OFFSET); \ 37 (pfn) < min_t(unsigned long, (end_pfn), \ 39 (pfn)++) 46 #define __pfn_to_page(pfn) (vmemmap + (pfn)) argument 60 #define __pfn_to_page(pfn) \ argument 61 ({ unsigned long __pfn = (pfn); \ [all …]
|
/linux/scripts/gdb/linux/ |
H A D | page_owner.py | 56 pfn = int(argv[1]) 57 self.read_page_owner_by_addr(self.p_ops.pfn_to_page(pfn)) 80 pfn = self.p_ops.page_to_pfn(page) 81 section = self.p_ops.pfn_to_section(pfn) 85 return self.get_entry(page_ext, pfn) 100 pfn = self.p_ops.page_to_pfn(page) 102 if pfn < self.min_pfn or pfn > self.max_pfn or (not self.p_ops.pfn_valid(pfn)): 106 page = self.p_ops.pfn_to_page(pfn) 130 gdb.write("PFN: %d, Flags: 0x%x\n" % (pfn, page['flags'])) 145 pfn = self.min_pfn [all …]
|
H A D | mm.py | 138 def pfn_to_section_nr(self, pfn): argument 139 return pfn >> self.PFN_SECTION_SHIFT 144 def __pfn_to_section(self, pfn): argument 145 return self.__nr_to_section(self.pfn_to_section_nr(pfn)) 147 def pfn_to_section(self, pfn): argument 148 return self.__pfn_to_section(pfn) 150 def subsection_map_index(self, pfn): argument 151 return (pfn & ~(self.PAGE_SECTION_MASK)) // self.PAGES_PER_SUBSECTION 153 def pfn_section_valid(self, ms, pfn): argument 155 idx = self.subsection_map_index(pfn) [all …]
|
/linux/drivers/gpu/drm/i915/selftests/ |
H A D | scatterlist.c | 48 unsigned long pfn, n; in expect_pfn_sg() local 50 pfn = pt->start; in expect_pfn_sg() 55 if (page_to_pfn(page) != pfn) { in expect_pfn_sg() 57 __func__, who, pfn, page_to_pfn(page)); in expect_pfn_sg() 70 pfn += npages; in expect_pfn_sg() 72 if (pfn != pt->end) { in expect_pfn_sg() 74 __func__, who, pt->end, pfn); in expect_pfn_sg() 86 unsigned long pfn; in expect_pfn_sg_page_iter() local 88 pfn = pt->start; in expect_pfn_sg_page_iter() 92 if (page != pfn_to_page(pfn)) { in expect_pfn_sg_page_iter() [all …]
|
/linux/include/linux/ |
H A D | pageblock-flags.h | 78 #define pageblock_align(pfn) ALIGN((pfn), pageblock_nr_pages) argument 79 #define pageblock_aligned(pfn) IS_ALIGNED((pfn), pageblock_nr_pages) argument 80 #define pageblock_start_pfn(pfn) ALIGN_DOWN((pfn), pageblock_nr_pages) argument 81 #define pageblock_end_pfn(pfn) ALIGN((pfn) + 1, pageblock_nr_pages) argument 87 unsigned long pfn); 88 bool get_pfnblock_bit(const struct page *page, unsigned long pfn, 90 void set_pfnblock_bit(const struct page *page, unsigned long pfn, 92 void clear_pfnblock_bit(const struct page *page, unsigned long pfn,
|
H A D | mmzone.h | 36 #define IS_MAX_ORDER_ALIGNED(pfn) IS_ALIGNED(pfn, MAX_ORDER_NR_PAGES) argument 102 # define is_migrate_cma_folio(folio, pfn) \ argument 103 (get_pfnblock_migratetype(&folio->page, pfn) == MIGRATE_CMA) 107 # define is_migrate_cma_folio(folio, pfn) false argument 1111 static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn) in zone_spans_pfn() argument 1113 return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone); in zone_spans_pfn() 1817 #define pfn_to_nid(pfn) (0) argument 1841 static inline unsigned long pfn_to_section_nr(unsigned long pfn) in pfn_to_section_nr() argument 1843 return pfn >> PFN_SECTION_SHIFT; in pfn_to_section_nr() 1850 #define SECTION_ALIGN_UP(pfn) (((pfn) + PAGES_PER_SECTION - 1) & PAGE_SECTION_MASK) argument [all …]
|
/linux/arch/arm/mach-omap2/ |
H A D | io.c | 68 .pfn = __phys_to_pfn(L3_24XX_PHYS), 74 .pfn = __phys_to_pfn(L4_24XX_PHYS), 84 .pfn = __phys_to_pfn(DSP_MEM_2420_PHYS), 90 .pfn = __phys_to_pfn(DSP_IPI_2420_PHYS), 96 .pfn = __phys_to_pfn(DSP_MMU_2420_PHYS), 108 .pfn = __phys_to_pfn(L4_WK_243X_PHYS), 114 .pfn = __phys_to_pfn(OMAP243X_GPMC_PHYS), 120 .pfn = __phys_to_pfn(OMAP243X_SDRC_PHYS), 126 .pfn = __phys_to_pfn(OMAP243X_SMS_PHYS), 138 .pfn = __phys_to_pfn(L3_34XX_PHYS), [all …]
|
/linux/arch/riscv/include/asm/ |
H A D | pgalloc.h | 21 unsigned long pfn = virt_to_pfn(pte); in pmd_populate_kernel() local 23 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pmd_populate_kernel() 29 unsigned long pfn = virt_to_pfn(page_address(pte)); in pmd_populate() local 31 set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pmd_populate() 37 unsigned long pfn = virt_to_pfn(pmd); in pud_populate() local 39 set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in pud_populate() 45 unsigned long pfn = virt_to_pfn(pud); in p4d_populate() local 47 set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in p4d_populate() 55 unsigned long pfn = virt_to_pfn(pud); in p4d_populate_safe() local 58 __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); in p4d_populate_safe() [all …]
|
/linux/arch/x86/virt/svm/ |
H A D | sev.c | 672 static struct rmpentry_raw *get_raw_rmpentry(u64 pfn) in get_raw_rmpentry() argument 680 paddr = pfn << PAGE_SHIFT; in get_raw_rmpentry() 701 static int get_rmpentry(u64 pfn, struct rmpentry *e) in get_rmpentry() argument 711 : "a" (pfn << PAGE_SHIFT), "c" (e) in get_rmpentry() 717 e_raw = get_raw_rmpentry(pfn); in get_rmpentry() 737 static int __snp_lookup_rmpentry(u64 pfn, struct rmpentry *e, int *level) in __snp_lookup_rmpentry() argument 745 ret = get_rmpentry(pfn, e); in __snp_lookup_rmpentry() 754 ret = get_rmpentry(pfn & PFN_PMD_MASK, &e_large); in __snp_lookup_rmpentry() 763 int snp_lookup_rmpentry(u64 pfn, bool *assigned, int *level) in snp_lookup_rmpentry() argument 768 ret = __snp_lookup_rmpentry(pfn, &e, level); in snp_lookup_rmpentry() [all …]
|
/linux/include/xen/arm/ |
H A D | page.h | 15 #define phys_to_machine_mapping_valid(pfn) (1) argument 43 unsigned long __pfn_to_mfn(unsigned long pfn); 47 static inline unsigned long pfn_to_gfn(unsigned long pfn) in pfn_to_gfn() argument 49 return pfn; in pfn_to_gfn() 58 static inline unsigned long pfn_to_bfn(unsigned long pfn) in pfn_to_bfn() argument 63 mfn = __pfn_to_mfn(pfn); in pfn_to_bfn() 68 return pfn; in pfn_to_bfn() 103 bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 104 bool __set_phys_to_machine_multi(unsigned long pfn, unsigned long mfn, 107 static inline bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) in set_phys_to_machine() argument [all …]
|
/linux/arch/arm/mm/ |
H A D | flush.c | 38 static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr) in flush_pfn_alias() argument 43 set_top_pte(to, pfn_pte(pfn, PAGE_KERNEL)); in flush_pfn_alias() 52 static void flush_icache_alias(unsigned long pfn, unsigned long vaddr, unsigned long len) in flush_icache_alias() argument 58 set_top_pte(va, pfn_pte(pfn, PAGE_KERNEL)); in flush_icache_alias() 98 void flush_cache_pages(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn, unsi… in flush_cache_pages() argument 101 vivt_flush_cache_pages(vma, user_addr, pfn, nr); in flush_cache_pages() 106 flush_pfn_alias(pfn, user_addr); in flush_cache_pages() 115 #define flush_pfn_alias(pfn,vaddr) do { } while (0) argument 116 #define flush_icache_alias(pfn,vaddr,len) do { } while (0) argument 255 unsigned long start, offset, pfn; in __flush_dcache_aliases() local [all …]
|
H A D | fault-armv.c | 37 unsigned long pfn, pte_t *ptep) in do_adjust_pte() argument 52 flush_cache_page(vma, address, pfn); in do_adjust_pte() 53 outer_flush_range((pfn << PAGE_SHIFT), in do_adjust_pte() 54 (pfn << PAGE_SHIFT) + PAGE_SIZE); in do_adjust_pte() 65 unsigned long pfn, bool need_lock) in adjust_pte() argument 114 ret = do_adjust_pte(vma, address, pfn, pte); in adjust_pte() 125 unsigned long addr, pte_t *ptep, unsigned long pfn) in make_coherent() argument 167 aliases += adjust_pte(mpnt, mpnt_addr, pfn, need_lock); in make_coherent() 171 do_adjust_pte(vma, addr, pfn, ptep); in make_coherent() 190 unsigned long pfn = pte_pfn(*ptep); in update_mmu_cache_range() local [all …]
|