Home
last modified time | relevance | path

Searched refs:pages_per_huge_page (Results 1 – 7 of 7) sorted by relevance

/linux/mm/
H A Dhugetlb.c518 resv->pages_per_hpage = pages_per_huge_page(h); in record_hugetlb_cgroup_uncharge_info()
522 VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h)); in record_hugetlb_cgroup_uncharge_info()
1107 resv_map->pages_per_hpage = pages_per_huge_page(h); in resv_map_set_hugetlb_cgroup_uncharge_info()
1866 pages_per_huge_page(h), folio); in free_huge_folio()
1868 pages_per_huge_page(h), folio); in free_huge_folio()
1869 lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h)); in free_huge_folio()
3029 idx, pages_per_huge_page(h), &h_cg); in alloc_hugetlb_folio()
3034 ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg); in alloc_hugetlb_folio()
3065 hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio); in alloc_hugetlb_folio()
3070 hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page( in alloc_hugetlb_folio()
[all...]
H A Dhugetlb_vmemmap.h38 return pages_per_huge_page(h) * sizeof(struct page); in hugetlb_vmemmap_size()
H A Dhugetlb_vmemmap.c672 epfn = spfn + pages_per_huge_page(h); in __hugetlb_vmemmap_optimize_folios()
813 nr_pages = pages_per_huge_page(m->hstate); in hugetlb_vmemmap_init_early()
862 nr_pages = pages_per_huge_page(h); in hugetlb_vmemmap_init_late()
H A Dhugetlb_cgroup.c126 pages_per_huge_page(&hstates[idx])); in hugetlb_cgroup_init()
562 pages_per_huge_page(&hstates[idx])); in hugetlb_cgroup_read_u64_max()
609 nr_pages = round_down(nr_pages, pages_per_huge_page(&hstates[idx])); in hugetlb_cgroup_write()
H A Dpage_vma_mapped.c215 if (!check_pte(pvmw, pages_per_huge_page(hstate))) in page_vma_mapped_walk()
/linux/include/linux/
H A Dhugetlb.h796 static inline unsigned int pages_per_huge_page(const struct hstate *h) in pages_per_huge_page() function
1192 static inline unsigned int pages_per_huge_page(struct hstate *h)
/linux/fs/hugetlbfs/
H A Dinode.c404 start = index * pages_per_huge_page(h); in hugetlb_unmap_file_folio()
405 end = (index + 1) * pages_per_huge_page(h); in hugetlb_unmap_file_folio()