1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * HugeTLB Vmemmap Optimization (HVO)
4  *
5  * Copyright (c) 2020, ByteDance. All rights reserved.
6  *
7  *     Author: Muchun Song <songmuchun@bytedance.com>
8  */
9 #ifndef _LINUX_HUGETLB_VMEMMAP_H
10 #define _LINUX_HUGETLB_VMEMMAP_H
11 #include <linux/hugetlb.h>
12 #include <linux/io.h>
13 #include <linux/memblock.h>
14 
15 /*
16  * Reserve one vmemmap page, all vmemmap addresses are mapped to it. See
17  * Documentation/mm/vmemmap_dedup.rst.
18  */
19 #define HUGETLB_VMEMMAP_RESERVE_SIZE	PAGE_SIZE
20 #define HUGETLB_VMEMMAP_RESERVE_PAGES	(HUGETLB_VMEMMAP_RESERVE_SIZE / sizeof(struct page))
21 
22 #ifdef CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP
23 int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio);
24 long hugetlb_vmemmap_restore_folios(const struct hstate *h,
25 					struct list_head *folio_list,
26 					struct list_head *non_hvo_folios);
27 void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio);
28 void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list);
29 void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h, struct list_head *folio_list);
30 #ifdef CONFIG_SPARSEMEM_VMEMMAP_PREINIT
31 void hugetlb_vmemmap_init_early(int nid);
32 void hugetlb_vmemmap_init_late(int nid);
33 #endif
34 
35 
hugetlb_vmemmap_size(const struct hstate * h)36 static inline unsigned int hugetlb_vmemmap_size(const struct hstate *h)
37 {
38 	return pages_per_huge_page(h) * sizeof(struct page);
39 }
40 
41 /*
42  * Return how many vmemmap size associated with a HugeTLB page that can be
43  * optimized and can be freed to the buddy allocator.
44  */
hugetlb_vmemmap_optimizable_size(const struct hstate * h)45 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
46 {
47 	int size = hugetlb_vmemmap_size(h) - HUGETLB_VMEMMAP_RESERVE_SIZE;
48 
49 	if (!is_power_of_2(sizeof(struct page)))
50 		return 0;
51 	return size > 0 ? size : 0;
52 }
53 #else
hugetlb_vmemmap_restore_folio(const struct hstate * h,struct folio * folio)54 static inline int hugetlb_vmemmap_restore_folio(const struct hstate *h, struct folio *folio)
55 {
56 	return 0;
57 }
58 
hugetlb_vmemmap_restore_folios(const struct hstate * h,struct list_head * folio_list,struct list_head * non_hvo_folios)59 static inline long hugetlb_vmemmap_restore_folios(const struct hstate *h,
60 					struct list_head *folio_list,
61 					struct list_head *non_hvo_folios)
62 {
63 	list_splice_init(folio_list, non_hvo_folios);
64 	return 0;
65 }
66 
hugetlb_vmemmap_optimize_folio(const struct hstate * h,struct folio * folio)67 static inline void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)
68 {
69 }
70 
hugetlb_vmemmap_optimize_folios(struct hstate * h,struct list_head * folio_list)71 static inline void hugetlb_vmemmap_optimize_folios(struct hstate *h, struct list_head *folio_list)
72 {
73 }
74 
hugetlb_vmemmap_optimize_bootmem_folios(struct hstate * h,struct list_head * folio_list)75 static inline void hugetlb_vmemmap_optimize_bootmem_folios(struct hstate *h,
76 						struct list_head *folio_list)
77 {
78 }
79 
hugetlb_vmemmap_init_early(int nid)80 static inline void hugetlb_vmemmap_init_early(int nid)
81 {
82 }
83 
hugetlb_vmemmap_init_late(int nid)84 static inline void hugetlb_vmemmap_init_late(int nid)
85 {
86 }
87 
hugetlb_vmemmap_optimizable_size(const struct hstate * h)88 static inline unsigned int hugetlb_vmemmap_optimizable_size(const struct hstate *h)
89 {
90 	return 0;
91 }
92 #endif /* CONFIG_HUGETLB_PAGE_OPTIMIZE_VMEMMAP */
93 
hugetlb_vmemmap_optimizable(const struct hstate * h)94 static inline bool hugetlb_vmemmap_optimizable(const struct hstate *h)
95 {
96 	return hugetlb_vmemmap_optimizable_size(h) != 0;
97 }
98 #endif /* _LINUX_HUGETLB_VMEMMAP_H */
99