xref: /linux/include/linux/page_ext.h (revision 1e0ea4dff0f46a3575b6882941dc7331c232d72c)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __LINUX_PAGE_EXT_H
3 #define __LINUX_PAGE_EXT_H
4 
5 #include <linux/types.h>
6 #include <linux/mmzone.h>
7 #include <linux/stacktrace.h>
8 
9 struct pglist_data;
10 
11 #ifdef CONFIG_PAGE_EXTENSION
12 /**
13  * struct page_ext_operations - per page_ext client operations
14  * @offset: Offset to the client's data within page_ext. Offset is returned to
15  *          the client by page_ext_init.
16  * @size: The size of the client data within page_ext.
17  * @need: Function that returns true if client requires page_ext.
18  * @init: (optional) Called to initialize client once page_exts are allocated.
19  * @need_shared_flags: True when client is using shared page_ext->flags
20  *                     field.
21  *
22  * Each Page Extension client must define page_ext_operations in
23  * page_ext_ops array.
24  */
25 struct page_ext_operations {
26 	size_t offset;
27 	size_t size;
28 	bool (*need)(void);
29 	void (*init)(void);
30 	bool need_shared_flags;
31 };
32 
33 /*
34  * The page_ext_flags users must set need_shared_flags to true.
35  */
36 enum page_ext_flags {
37 	PAGE_EXT_OWNER,
38 	PAGE_EXT_OWNER_ALLOCATED,
39 #if defined(CONFIG_PAGE_IDLE_FLAG) && !defined(CONFIG_64BIT)
40 	PAGE_EXT_YOUNG,
41 	PAGE_EXT_IDLE,
42 #endif
43 };
44 
45 /*
46  * Page Extension can be considered as an extended mem_map.
47  * A page_ext page is associated with every page descriptor. The
48  * page_ext helps us add more information about the page.
49  * All page_ext are allocated at boot or memory hotplug event,
50  * then the page_ext for pfn always exists.
51  */
52 struct page_ext {
53 	unsigned long flags;
54 };
55 
56 extern bool early_page_ext;
57 extern unsigned long page_ext_size;
58 extern void pgdat_page_ext_init(struct pglist_data *pgdat);
59 
early_page_ext_enabled(void)60 static inline bool early_page_ext_enabled(void)
61 {
62 	return early_page_ext;
63 }
64 
65 #ifdef CONFIG_SPARSEMEM
page_ext_init_flatmem(void)66 static inline void page_ext_init_flatmem(void)
67 {
68 }
69 extern void page_ext_init(void);
page_ext_init_flatmem_late(void)70 static inline void page_ext_init_flatmem_late(void)
71 {
72 }
73 
page_ext_iter_next_fast_possible(unsigned long next_pfn)74 static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
75 {
76 	/*
77 	 * page_ext is allocated per memory section. Once we cross a
78 	 * memory section, we have to fetch the new pointer.
79 	 */
80 	return next_pfn % PAGES_PER_SECTION;
81 }
82 #else
83 extern void page_ext_init_flatmem(void);
84 extern void page_ext_init_flatmem_late(void);
page_ext_init(void)85 static inline void page_ext_init(void)
86 {
87 }
88 
page_ext_iter_next_fast_possible(unsigned long next_pfn)89 static inline bool page_ext_iter_next_fast_possible(unsigned long next_pfn)
90 {
91 	return true;
92 }
93 #endif
94 
95 extern struct page_ext *page_ext_get(const struct page *page);
96 extern struct page_ext *page_ext_from_phys(phys_addr_t phys);
97 extern void page_ext_put(struct page_ext *page_ext);
98 extern struct page_ext *page_ext_lookup(unsigned long pfn);
99 
page_ext_data(struct page_ext * page_ext,struct page_ext_operations * ops)100 static inline void *page_ext_data(struct page_ext *page_ext,
101 				  struct page_ext_operations *ops)
102 {
103 	return (void *)(page_ext) + ops->offset;
104 }
105 
page_ext_next(struct page_ext * curr)106 static inline struct page_ext *page_ext_next(struct page_ext *curr)
107 {
108 	void *next = curr;
109 	next += page_ext_size;
110 	return next;
111 }
112 
113 struct page_ext_iter {
114 	unsigned long index;
115 	unsigned long start_pfn;
116 	struct page_ext *page_ext;
117 };
118 
119 /**
120  * page_ext_iter_begin() - Prepare for iterating through page extensions.
121  * @iter: page extension iterator.
122  * @pfn: PFN of the page we're interested in.
123  *
124  * Must be called with RCU read lock taken.
125  *
126  * Return: NULL if no page_ext exists for this page.
127  */
page_ext_iter_begin(struct page_ext_iter * iter,unsigned long pfn)128 static inline struct page_ext *page_ext_iter_begin(struct page_ext_iter *iter,
129 						unsigned long pfn)
130 {
131 	iter->index = 0;
132 	iter->start_pfn = pfn;
133 	iter->page_ext = page_ext_lookup(pfn);
134 
135 	return iter->page_ext;
136 }
137 
138 /**
139  * page_ext_iter_next() - Get next page extension
140  * @iter: page extension iterator.
141  *
142  * Must be called with RCU read lock taken.
143  *
144  * Return: NULL if no next page_ext exists.
145  */
page_ext_iter_next(struct page_ext_iter * iter)146 static inline struct page_ext *page_ext_iter_next(struct page_ext_iter *iter)
147 {
148 	unsigned long pfn;
149 
150 	if (WARN_ON_ONCE(!iter->page_ext))
151 		return NULL;
152 
153 	iter->index++;
154 	pfn = iter->start_pfn + iter->index;
155 
156 	if (page_ext_iter_next_fast_possible(pfn))
157 		iter->page_ext = page_ext_next(iter->page_ext);
158 	else
159 		iter->page_ext = page_ext_lookup(pfn);
160 
161 	return iter->page_ext;
162 }
163 
164 /**
165  * page_ext_iter_get() - Get current page extension
166  * @iter: page extension iterator.
167  *
168  * Return: NULL if no page_ext exists for this iterator.
169  */
page_ext_iter_get(const struct page_ext_iter * iter)170 static inline struct page_ext *page_ext_iter_get(const struct page_ext_iter *iter)
171 {
172 	return iter->page_ext;
173 }
174 
175 /**
176  * for_each_page_ext(): iterate through page_ext objects.
177  * @__page: the page we're interested in
178  * @__pgcount: how many pages to iterate through
179  * @__page_ext: struct page_ext pointer where the current page_ext
180  *              object is returned
181  * @__iter: struct page_ext_iter object (defined in the stack)
182  *
183  * IMPORTANT: must be called with RCU read lock taken.
184  */
185 #define for_each_page_ext(__page, __pgcount, __page_ext, __iter) \
186 	for (__page_ext = page_ext_iter_begin(&__iter, page_to_pfn(__page));\
187 		__page_ext && __iter.index < __pgcount;          \
188 		__page_ext = page_ext_iter_next(&__iter))
189 
190 #else /* !CONFIG_PAGE_EXTENSION */
191 struct page_ext;
192 
early_page_ext_enabled(void)193 static inline bool early_page_ext_enabled(void)
194 {
195 	return false;
196 }
197 
pgdat_page_ext_init(struct pglist_data * pgdat)198 static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
199 {
200 }
201 
page_ext_init(void)202 static inline void page_ext_init(void)
203 {
204 }
205 
page_ext_init_flatmem_late(void)206 static inline void page_ext_init_flatmem_late(void)
207 {
208 }
209 
page_ext_init_flatmem(void)210 static inline void page_ext_init_flatmem(void)
211 {
212 }
213 
page_ext_get(const struct page * page)214 static inline struct page_ext *page_ext_get(const struct page *page)
215 {
216 	return NULL;
217 }
218 
page_ext_from_phys(phys_addr_t phys)219 static inline struct page_ext *page_ext_from_phys(phys_addr_t phys)
220 {
221 	return NULL;
222 }
223 
page_ext_put(struct page_ext * page_ext)224 static inline void page_ext_put(struct page_ext *page_ext)
225 {
226 }
227 #endif /* CONFIG_PAGE_EXTENSION */
228 #endif /* __LINUX_PAGE_EXT_H */
229