1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_HUGE_MM_H
3 #define _LINUX_HUGE_MM_H
4
5 #include <linux/mm_types.h>
6
7 #include <linux/fs.h> /* only for vma_is_dax() */
8 #include <linux/kobject.h>
9
10 vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11 int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14 void huge_pmd_set_accessed(struct vm_fault *vmf);
15 int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
18
19 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20 void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21 #else
huge_pud_set_accessed(struct vm_fault * vmf,pud_t orig_pud)22 static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23 {
24 }
25 #endif
26
27 vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28 bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
29 pmd_t *pmd, unsigned long addr, unsigned long next);
30 int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
31 unsigned long addr);
32 int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
33 unsigned long addr);
34 bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
35 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
36 int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
37 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
38 unsigned long cp_flags);
39
40 vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, unsigned long pfn,
41 bool write);
42 vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, unsigned long pfn,
43 bool write);
44 vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
45 bool write);
46 vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
47 bool write);
48
49 enum transparent_hugepage_flag {
50 TRANSPARENT_HUGEPAGE_UNSUPPORTED,
51 TRANSPARENT_HUGEPAGE_FLAG,
52 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
53 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
54 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
55 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
56 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
57 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
58 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
59 };
60
61 struct kobject;
62 struct kobj_attribute;
63
64 ssize_t single_hugepage_flag_store(struct kobject *kobj,
65 struct kobj_attribute *attr,
66 const char *buf, size_t count,
67 enum transparent_hugepage_flag flag);
68 ssize_t single_hugepage_flag_show(struct kobject *kobj,
69 struct kobj_attribute *attr, char *buf,
70 enum transparent_hugepage_flag flag);
71 extern struct kobj_attribute shmem_enabled_attr;
72 extern struct kobj_attribute thpsize_shmem_enabled_attr;
73
74 /*
75 * Mask of all large folio orders supported for anonymous THP; all orders up to
76 * and including PMD_ORDER, except order-0 (which is not "huge") and order-1
77 * (which is a limitation of the THP implementation).
78 */
79 #define THP_ORDERS_ALL_ANON ((BIT(PMD_ORDER + 1) - 1) & ~(BIT(0) | BIT(1)))
80
81 /*
82 * Mask of all large folio orders supported for file THP. Folios in a DAX
83 * file is never split and the MAX_PAGECACHE_ORDER limit does not apply to
84 * it. Same to PFNMAPs where there's neither page* nor pagecache.
85 */
86 #define THP_ORDERS_ALL_SPECIAL \
87 (BIT(PMD_ORDER) | BIT(PUD_ORDER))
88 #define THP_ORDERS_ALL_FILE_DEFAULT \
89 ((BIT(MAX_PAGECACHE_ORDER + 1) - 1) & ~BIT(0))
90
91 /*
92 * Mask of all large folio orders supported for THP.
93 */
94 #define THP_ORDERS_ALL \
95 (THP_ORDERS_ALL_ANON | THP_ORDERS_ALL_SPECIAL | THP_ORDERS_ALL_FILE_DEFAULT)
96
97 #define TVA_SMAPS (1 << 0) /* Will be used for procfs */
98 #define TVA_IN_PF (1 << 1) /* Page fault handler */
99 #define TVA_ENFORCE_SYSFS (1 << 2) /* Obey sysfs configuration */
100
101 #define thp_vma_allowable_order(vma, vm_flags, tva_flags, order) \
102 (!!thp_vma_allowable_orders(vma, vm_flags, tva_flags, BIT(order)))
103
104 #define split_folio(f) split_folio_to_list(f, NULL)
105
106 #ifdef CONFIG_PGTABLE_HAS_HUGE_LEAVES
107 #define HPAGE_PMD_SHIFT PMD_SHIFT
108 #define HPAGE_PUD_SHIFT PUD_SHIFT
109 #else
110 #define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
111 #define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
112 #endif
113
114 #define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
115 #define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
116 #define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
117 #define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
118
119 #define HPAGE_PUD_ORDER (HPAGE_PUD_SHIFT-PAGE_SHIFT)
120 #define HPAGE_PUD_NR (1<<HPAGE_PUD_ORDER)
121 #define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
122 #define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
123
124 enum mthp_stat_item {
125 MTHP_STAT_ANON_FAULT_ALLOC,
126 MTHP_STAT_ANON_FAULT_FALLBACK,
127 MTHP_STAT_ANON_FAULT_FALLBACK_CHARGE,
128 MTHP_STAT_ZSWPOUT,
129 MTHP_STAT_SWPIN,
130 MTHP_STAT_SWPIN_FALLBACK,
131 MTHP_STAT_SWPIN_FALLBACK_CHARGE,
132 MTHP_STAT_SWPOUT,
133 MTHP_STAT_SWPOUT_FALLBACK,
134 MTHP_STAT_SHMEM_ALLOC,
135 MTHP_STAT_SHMEM_FALLBACK,
136 MTHP_STAT_SHMEM_FALLBACK_CHARGE,
137 MTHP_STAT_SPLIT,
138 MTHP_STAT_SPLIT_FAILED,
139 MTHP_STAT_SPLIT_DEFERRED,
140 MTHP_STAT_NR_ANON,
141 MTHP_STAT_NR_ANON_PARTIALLY_MAPPED,
142 __MTHP_STAT_COUNT
143 };
144
145 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) && defined(CONFIG_SYSFS)
146 struct mthp_stat {
147 unsigned long stats[ilog2(MAX_PTRS_PER_PTE) + 1][__MTHP_STAT_COUNT];
148 };
149
150 DECLARE_PER_CPU(struct mthp_stat, mthp_stats);
151
mod_mthp_stat(int order,enum mthp_stat_item item,int delta)152 static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
153 {
154 if (order <= 0 || order > PMD_ORDER)
155 return;
156
157 this_cpu_add(mthp_stats.stats[order][item], delta);
158 }
159
count_mthp_stat(int order,enum mthp_stat_item item)160 static inline void count_mthp_stat(int order, enum mthp_stat_item item)
161 {
162 mod_mthp_stat(order, item, 1);
163 }
164
165 #else
mod_mthp_stat(int order,enum mthp_stat_item item,int delta)166 static inline void mod_mthp_stat(int order, enum mthp_stat_item item, int delta)
167 {
168 }
169
count_mthp_stat(int order,enum mthp_stat_item item)170 static inline void count_mthp_stat(int order, enum mthp_stat_item item)
171 {
172 }
173 #endif
174
175 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
176
177 extern unsigned long transparent_hugepage_flags;
178 extern unsigned long huge_anon_orders_always;
179 extern unsigned long huge_anon_orders_madvise;
180 extern unsigned long huge_anon_orders_inherit;
181
hugepage_global_enabled(void)182 static inline bool hugepage_global_enabled(void)
183 {
184 return transparent_hugepage_flags &
185 ((1<<TRANSPARENT_HUGEPAGE_FLAG) |
186 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG));
187 }
188
hugepage_global_always(void)189 static inline bool hugepage_global_always(void)
190 {
191 return transparent_hugepage_flags &
192 (1<<TRANSPARENT_HUGEPAGE_FLAG);
193 }
194
highest_order(unsigned long orders)195 static inline int highest_order(unsigned long orders)
196 {
197 return fls_long(orders) - 1;
198 }
199
next_order(unsigned long * orders,int prev)200 static inline int next_order(unsigned long *orders, int prev)
201 {
202 *orders &= ~BIT(prev);
203 return highest_order(*orders);
204 }
205
206 /*
207 * Do the below checks:
208 * - For file vma, check if the linear page offset of vma is
209 * order-aligned within the file. The hugepage is
210 * guaranteed to be order-aligned within the file, but we must
211 * check that the order-aligned addresses in the VMA map to
212 * order-aligned offsets within the file, else the hugepage will
213 * not be mappable.
214 * - For all vmas, check if the haddr is in an aligned hugepage
215 * area.
216 */
thp_vma_suitable_order(struct vm_area_struct * vma,unsigned long addr,int order)217 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
218 unsigned long addr, int order)
219 {
220 unsigned long hpage_size = PAGE_SIZE << order;
221 unsigned long haddr;
222
223 /* Don't have to check pgoff for anonymous vma */
224 if (!vma_is_anonymous(vma)) {
225 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
226 hpage_size >> PAGE_SHIFT))
227 return false;
228 }
229
230 haddr = ALIGN_DOWN(addr, hpage_size);
231
232 if (haddr < vma->vm_start || haddr + hpage_size > vma->vm_end)
233 return false;
234 return true;
235 }
236
237 /*
238 * Filter the bitfield of input orders to the ones suitable for use in the vma.
239 * See thp_vma_suitable_order().
240 * All orders that pass the checks are returned as a bitfield.
241 */
thp_vma_suitable_orders(struct vm_area_struct * vma,unsigned long addr,unsigned long orders)242 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
243 unsigned long addr, unsigned long orders)
244 {
245 int order;
246
247 /*
248 * Iterate over orders, highest to lowest, removing orders that don't
249 * meet alignment requirements from the set. Exit loop at first order
250 * that meets requirements, since all lower orders must also meet
251 * requirements.
252 */
253
254 order = highest_order(orders);
255
256 while (orders) {
257 if (thp_vma_suitable_order(vma, addr, order))
258 break;
259 order = next_order(&orders, order);
260 }
261
262 return orders;
263 }
264
265 unsigned long __thp_vma_allowable_orders(struct vm_area_struct *vma,
266 vm_flags_t vm_flags,
267 unsigned long tva_flags,
268 unsigned long orders);
269
270 /**
271 * thp_vma_allowable_orders - determine hugepage orders that are allowed for vma
272 * @vma: the vm area to check
273 * @vm_flags: use these vm_flags instead of vma->vm_flags
274 * @tva_flags: Which TVA flags to honour
275 * @orders: bitfield of all orders to consider
276 *
277 * Calculates the intersection of the requested hugepage orders and the allowed
278 * hugepage orders for the provided vma. Permitted orders are encoded as a set
279 * bit at the corresponding bit position (bit-2 corresponds to order-2, bit-3
280 * corresponds to order-3, etc). Order-0 is never considered a hugepage order.
281 *
282 * Return: bitfield of orders allowed for hugepage in the vma. 0 if no hugepage
283 * orders are allowed.
284 */
285 static inline
thp_vma_allowable_orders(struct vm_area_struct * vma,vm_flags_t vm_flags,unsigned long tva_flags,unsigned long orders)286 unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
287 vm_flags_t vm_flags,
288 unsigned long tva_flags,
289 unsigned long orders)
290 {
291 /* Optimization to check if required orders are enabled early. */
292 if ((tva_flags & TVA_ENFORCE_SYSFS) && vma_is_anonymous(vma)) {
293 unsigned long mask = READ_ONCE(huge_anon_orders_always);
294
295 if (vm_flags & VM_HUGEPAGE)
296 mask |= READ_ONCE(huge_anon_orders_madvise);
297 if (hugepage_global_always() ||
298 ((vm_flags & VM_HUGEPAGE) && hugepage_global_enabled()))
299 mask |= READ_ONCE(huge_anon_orders_inherit);
300
301 orders &= mask;
302 if (!orders)
303 return 0;
304 }
305
306 return __thp_vma_allowable_orders(vma, vm_flags, tva_flags, orders);
307 }
308
309 struct thpsize {
310 struct kobject kobj;
311 struct list_head node;
312 int order;
313 };
314
315 #define to_thpsize(kobj) container_of(kobj, struct thpsize, kobj)
316
317 #define transparent_hugepage_use_zero_page() \
318 (transparent_hugepage_flags & \
319 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
320
vma_thp_disabled(struct vm_area_struct * vma,vm_flags_t vm_flags)321 static inline bool vma_thp_disabled(struct vm_area_struct *vma,
322 vm_flags_t vm_flags)
323 {
324 /*
325 * Explicitly disabled through madvise or prctl, or some
326 * architectures may disable THP for some mappings, for
327 * example, s390 kvm.
328 */
329 return (vm_flags & VM_NOHUGEPAGE) ||
330 test_bit(MMF_DISABLE_THP, &vma->vm_mm->flags);
331 }
332
thp_disabled_by_hw(void)333 static inline bool thp_disabled_by_hw(void)
334 {
335 /* If the hardware/firmware marked hugepage support disabled. */
336 return transparent_hugepage_flags & (1 << TRANSPARENT_HUGEPAGE_UNSUPPORTED);
337 }
338
339 unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
340 unsigned long len, unsigned long pgoff, unsigned long flags);
341 unsigned long thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
342 unsigned long len, unsigned long pgoff, unsigned long flags,
343 vm_flags_t vm_flags);
344
345 bool can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins);
346 int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
347 unsigned int new_order);
348 int min_order_for_split(struct folio *folio);
349 int split_folio_to_list(struct folio *folio, struct list_head *list);
350 bool uniform_split_supported(struct folio *folio, unsigned int new_order,
351 bool warns);
352 bool non_uniform_split_supported(struct folio *folio, unsigned int new_order,
353 bool warns);
354 int folio_split(struct folio *folio, unsigned int new_order, struct page *page,
355 struct list_head *list);
356 /*
357 * try_folio_split - try to split a @folio at @page using non uniform split.
358 * @folio: folio to be split
359 * @page: split to order-0 at the given page
360 * @list: store the after-split folios
361 *
362 * Try to split a @folio at @page using non uniform split to order-0, if
363 * non uniform split is not supported, fall back to uniform split.
364 *
365 * Return: 0: split is successful, otherwise split failed.
366 */
try_folio_split(struct folio * folio,struct page * page,struct list_head * list)367 static inline int try_folio_split(struct folio *folio, struct page *page,
368 struct list_head *list)
369 {
370 int ret = min_order_for_split(folio);
371
372 if (ret < 0)
373 return ret;
374
375 if (!non_uniform_split_supported(folio, 0, false))
376 return split_huge_page_to_list_to_order(&folio->page, list,
377 ret);
378 return folio_split(folio, ret, page, list);
379 }
split_huge_page(struct page * page)380 static inline int split_huge_page(struct page *page)
381 {
382 struct folio *folio = page_folio(page);
383 int ret = min_order_for_split(folio);
384
385 if (ret < 0)
386 return ret;
387
388 /*
389 * split_huge_page() locks the page before splitting and
390 * expects the same page that has been split to be locked when
391 * returned. split_folio(page_folio(page)) cannot be used here
392 * because it converts the page to folio and passes the head
393 * page to be split.
394 */
395 return split_huge_page_to_list_to_order(page, NULL, ret);
396 }
397 void deferred_split_folio(struct folio *folio, bool partially_mapped);
398
399 void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
400 unsigned long address, bool freeze);
401
402 #define split_huge_pmd(__vma, __pmd, __address) \
403 do { \
404 pmd_t *____pmd = (__pmd); \
405 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd)) \
406 __split_huge_pmd(__vma, __pmd, __address, \
407 false); \
408 } while (0)
409
410 void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
411 bool freeze);
412
413 void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
414 unsigned long address);
415
416 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
417 int change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
418 pud_t *pudp, unsigned long addr, pgprot_t newprot,
419 unsigned long cp_flags);
420 #else
421 static inline int
change_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pudp,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)422 change_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma,
423 pud_t *pudp, unsigned long addr, pgprot_t newprot,
424 unsigned long cp_flags) { return 0; }
425 #endif
426
427 #define split_huge_pud(__vma, __pud, __address) \
428 do { \
429 pud_t *____pud = (__pud); \
430 if (pud_trans_huge(*____pud)) \
431 __split_huge_pud(__vma, __pud, __address); \
432 } while (0)
433
434 int hugepage_madvise(struct vm_area_struct *vma, vm_flags_t *vm_flags,
435 int advice);
436 int madvise_collapse(struct vm_area_struct *vma, unsigned long start,
437 unsigned long end, bool *lock_dropped);
438 void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
439 unsigned long end, struct vm_area_struct *next);
440 spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
441 spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
442
is_swap_pmd(pmd_t pmd)443 static inline int is_swap_pmd(pmd_t pmd)
444 {
445 return !pmd_none(pmd) && !pmd_present(pmd);
446 }
447
448 /* mmap_lock must be held on entry */
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)449 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
450 struct vm_area_struct *vma)
451 {
452 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd))
453 return __pmd_trans_huge_lock(pmd, vma);
454 else
455 return NULL;
456 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)457 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
458 struct vm_area_struct *vma)
459 {
460 if (pud_trans_huge(*pud))
461 return __pud_trans_huge_lock(pud, vma);
462 else
463 return NULL;
464 }
465
466 /**
467 * folio_test_pmd_mappable - Can we map this folio with a PMD?
468 * @folio: The folio to test
469 */
folio_test_pmd_mappable(struct folio * folio)470 static inline bool folio_test_pmd_mappable(struct folio *folio)
471 {
472 return folio_order(folio) >= HPAGE_PMD_ORDER;
473 }
474
475 vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
476
477 extern struct folio *huge_zero_folio;
478 extern unsigned long huge_zero_pfn;
479
is_huge_zero_folio(const struct folio * folio)480 static inline bool is_huge_zero_folio(const struct folio *folio)
481 {
482 return READ_ONCE(huge_zero_folio) == folio;
483 }
484
is_huge_zero_pfn(unsigned long pfn)485 static inline bool is_huge_zero_pfn(unsigned long pfn)
486 {
487 return READ_ONCE(huge_zero_pfn) == (pfn & ~(HPAGE_PMD_NR - 1));
488 }
489
is_huge_zero_pmd(pmd_t pmd)490 static inline bool is_huge_zero_pmd(pmd_t pmd)
491 {
492 return pmd_present(pmd) && is_huge_zero_pfn(pmd_pfn(pmd));
493 }
494
495 struct folio *mm_get_huge_zero_folio(struct mm_struct *mm);
496 void mm_put_huge_zero_folio(struct mm_struct *mm);
497
thp_migration_supported(void)498 static inline bool thp_migration_supported(void)
499 {
500 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
501 }
502
503 void split_huge_pmd_locked(struct vm_area_struct *vma, unsigned long address,
504 pmd_t *pmd, bool freeze);
505 bool unmap_huge_pmd_locked(struct vm_area_struct *vma, unsigned long addr,
506 pmd_t *pmdp, struct folio *folio);
507
508 #else /* CONFIG_TRANSPARENT_HUGEPAGE */
509
folio_test_pmd_mappable(struct folio * folio)510 static inline bool folio_test_pmd_mappable(struct folio *folio)
511 {
512 return false;
513 }
514
thp_vma_suitable_order(struct vm_area_struct * vma,unsigned long addr,int order)515 static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
516 unsigned long addr, int order)
517 {
518 return false;
519 }
520
thp_vma_suitable_orders(struct vm_area_struct * vma,unsigned long addr,unsigned long orders)521 static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
522 unsigned long addr, unsigned long orders)
523 {
524 return 0;
525 }
526
thp_vma_allowable_orders(struct vm_area_struct * vma,vm_flags_t vm_flags,unsigned long tva_flags,unsigned long orders)527 static inline unsigned long thp_vma_allowable_orders(struct vm_area_struct *vma,
528 vm_flags_t vm_flags,
529 unsigned long tva_flags,
530 unsigned long orders)
531 {
532 return 0;
533 }
534
535 #define transparent_hugepage_flags 0UL
536
537 #define thp_get_unmapped_area NULL
538
539 static inline unsigned long
thp_get_unmapped_area_vmflags(struct file * filp,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags,vm_flags_t vm_flags)540 thp_get_unmapped_area_vmflags(struct file *filp, unsigned long addr,
541 unsigned long len, unsigned long pgoff,
542 unsigned long flags, vm_flags_t vm_flags)
543 {
544 return 0;
545 }
546
547 static inline bool
can_split_folio(struct folio * folio,int caller_pins,int * pextra_pins)548 can_split_folio(struct folio *folio, int caller_pins, int *pextra_pins)
549 {
550 return false;
551 }
552 static inline int
split_huge_page_to_list_to_order(struct page * page,struct list_head * list,unsigned int new_order)553 split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
554 unsigned int new_order)
555 {
556 return 0;
557 }
split_huge_page(struct page * page)558 static inline int split_huge_page(struct page *page)
559 {
560 return 0;
561 }
562
split_folio_to_list(struct folio * folio,struct list_head * list)563 static inline int split_folio_to_list(struct folio *folio, struct list_head *list)
564 {
565 return 0;
566 }
567
try_folio_split(struct folio * folio,struct page * page,struct list_head * list)568 static inline int try_folio_split(struct folio *folio, struct page *page,
569 struct list_head *list)
570 {
571 return 0;
572 }
573
deferred_split_folio(struct folio * folio,bool partially_mapped)574 static inline void deferred_split_folio(struct folio *folio, bool partially_mapped) {}
575 #define split_huge_pmd(__vma, __pmd, __address) \
576 do { } while (0)
577
__split_huge_pmd(struct vm_area_struct * vma,pmd_t * pmd,unsigned long address,bool freeze)578 static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
579 unsigned long address, bool freeze) {}
split_huge_pmd_address(struct vm_area_struct * vma,unsigned long address,bool freeze)580 static inline void split_huge_pmd_address(struct vm_area_struct *vma,
581 unsigned long address, bool freeze) {}
split_huge_pmd_locked(struct vm_area_struct * vma,unsigned long address,pmd_t * pmd,bool freeze)582 static inline void split_huge_pmd_locked(struct vm_area_struct *vma,
583 unsigned long address, pmd_t *pmd,
584 bool freeze) {}
585
unmap_huge_pmd_locked(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmdp,struct folio * folio)586 static inline bool unmap_huge_pmd_locked(struct vm_area_struct *vma,
587 unsigned long addr, pmd_t *pmdp,
588 struct folio *folio)
589 {
590 return false;
591 }
592
593 #define split_huge_pud(__vma, __pmd, __address) \
594 do { } while (0)
595
hugepage_madvise(struct vm_area_struct * vma,vm_flags_t * vm_flags,int advice)596 static inline int hugepage_madvise(struct vm_area_struct *vma,
597 vm_flags_t *vm_flags, int advice)
598 {
599 return -EINVAL;
600 }
601
madvise_collapse(struct vm_area_struct * vma,unsigned long start,unsigned long end,bool * lock_dropped)602 static inline int madvise_collapse(struct vm_area_struct *vma,
603 unsigned long start,
604 unsigned long end, bool *lock_dropped)
605 {
606 return -EINVAL;
607 }
608
vma_adjust_trans_huge(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct vm_area_struct * next)609 static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
610 unsigned long start,
611 unsigned long end,
612 struct vm_area_struct *next)
613 {
614 }
is_swap_pmd(pmd_t pmd)615 static inline int is_swap_pmd(pmd_t pmd)
616 {
617 return 0;
618 }
pmd_trans_huge_lock(pmd_t * pmd,struct vm_area_struct * vma)619 static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
620 struct vm_area_struct *vma)
621 {
622 return NULL;
623 }
pud_trans_huge_lock(pud_t * pud,struct vm_area_struct * vma)624 static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
625 struct vm_area_struct *vma)
626 {
627 return NULL;
628 }
629
do_huge_pmd_numa_page(struct vm_fault * vmf)630 static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
631 {
632 return 0;
633 }
634
is_huge_zero_folio(const struct folio * folio)635 static inline bool is_huge_zero_folio(const struct folio *folio)
636 {
637 return false;
638 }
639
is_huge_zero_pfn(unsigned long pfn)640 static inline bool is_huge_zero_pfn(unsigned long pfn)
641 {
642 return false;
643 }
644
is_huge_zero_pmd(pmd_t pmd)645 static inline bool is_huge_zero_pmd(pmd_t pmd)
646 {
647 return false;
648 }
649
mm_put_huge_zero_folio(struct mm_struct * mm)650 static inline void mm_put_huge_zero_folio(struct mm_struct *mm)
651 {
652 return;
653 }
654
follow_devmap_pmd(struct vm_area_struct * vma,unsigned long addr,pmd_t * pmd,int flags,struct dev_pagemap ** pgmap)655 static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
656 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
657 {
658 return NULL;
659 }
660
thp_migration_supported(void)661 static inline bool thp_migration_supported(void)
662 {
663 return false;
664 }
665
highest_order(unsigned long orders)666 static inline int highest_order(unsigned long orders)
667 {
668 return 0;
669 }
670
next_order(unsigned long * orders,int prev)671 static inline int next_order(unsigned long *orders, int prev)
672 {
673 return 0;
674 }
675
__split_huge_pud(struct vm_area_struct * vma,pud_t * pud,unsigned long address)676 static inline void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
677 unsigned long address)
678 {
679 }
680
change_huge_pud(struct mmu_gather * tlb,struct vm_area_struct * vma,pud_t * pudp,unsigned long addr,pgprot_t newprot,unsigned long cp_flags)681 static inline int change_huge_pud(struct mmu_gather *tlb,
682 struct vm_area_struct *vma, pud_t *pudp,
683 unsigned long addr, pgprot_t newprot,
684 unsigned long cp_flags)
685 {
686 return 0;
687 }
688 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
689
split_folio_to_list_to_order(struct folio * folio,struct list_head * list,int new_order)690 static inline int split_folio_to_list_to_order(struct folio *folio,
691 struct list_head *list, int new_order)
692 {
693 return split_huge_page_to_list_to_order(&folio->page, list, new_order);
694 }
695
split_folio_to_order(struct folio * folio,int new_order)696 static inline int split_folio_to_order(struct folio *folio, int new_order)
697 {
698 return split_folio_to_list_to_order(folio, NULL, new_order);
699 }
700
701 #endif /* _LINUX_HUGE_MM_H */
702