xref: /linux/mm/hugetlb.c (revision fc825e513cd494cfcbeb47acf5738fe64f3a9051)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpumask.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/minmax.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_choices.h>
27 #include <linux/string_helpers.h>
28 #include <linux/swap.h>
29 #include <linux/leafops.h>
30 #include <linux/jhash.h>
31 #include <linux/numa.h>
32 #include <linux/llist.h>
33 #include <linux/cma.h>
34 #include <linux/migrate.h>
35 #include <linux/nospec.h>
36 #include <linux/delayacct.h>
37 #include <linux/memory.h>
38 #include <linux/mm_inline.h>
39 #include <linux/padata.h>
40 #include <linux/pgalloc.h>
41 
42 #include <asm/page.h>
43 #include <asm/tlb.h>
44 #include <asm/setup.h>
45 
46 #include <linux/io.h>
47 #include <linux/node.h>
48 #include <linux/page_owner.h>
49 #include "internal.h"
50 #include "hugetlb_vmemmap.h"
51 #include "hugetlb_cma.h"
52 #include "hugetlb_internal.h"
53 #include <linux/page-isolation.h>
54 
55 int hugetlb_max_hstate __read_mostly;
56 unsigned int default_hstate_idx;
57 struct hstate hstates[HUGE_MAX_HSTATE];
58 
59 __initdata nodemask_t hugetlb_bootmem_nodes;
60 __initdata struct list_head huge_boot_pages[MAX_NUMNODES];
61 static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
62 
63 /*
64  * Due to ordering constraints across the init code for various
65  * architectures, hugetlb hstate cmdline parameters can't simply
66  * be early_param. early_param might call the setup function
67  * before valid hugetlb page sizes are determined, leading to
68  * incorrect rejection of valid hugepagesz= options.
69  *
70  * So, record the parameters early and consume them whenever the
71  * init code is ready for them, by calling hugetlb_parse_params().
72  */
73 
74 /* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */
75 #define HUGE_MAX_CMDLINE_ARGS	(2 * HUGE_MAX_HSTATE + 1)
76 struct hugetlb_cmdline {
77 	char *val;
78 	int (*setup)(char *val);
79 };
80 
81 /* for command line parsing */
82 static struct hstate * __initdata parsed_hstate;
83 static unsigned long __initdata default_hstate_max_huge_pages;
84 static bool __initdata parsed_valid_hugepagesz = true;
85 static bool __initdata parsed_default_hugepagesz;
86 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
87 static unsigned long hugepage_allocation_threads __initdata;
88 
89 static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata;
90 static int hstate_cmdline_index __initdata;
91 static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata;
92 static int hugetlb_param_index __initdata;
93 static __init int hugetlb_add_param(char *s, int (*setup)(char *val));
94 static __init void hugetlb_parse_params(void);
95 
96 #define hugetlb_early_param(str, func) \
97 static __init int func##args(char *s) \
98 { \
99 	return hugetlb_add_param(s, func); \
100 } \
101 early_param(str, func##args)
102 
103 /*
104  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
105  * free_huge_pages, and surplus_huge_pages.
106  */
107 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
108 
109 /*
110  * Serializes faults on the same logical page.  This is used to
111  * prevent spurious OOMs when the hugepage pool is fully utilized.
112  */
113 static int num_fault_mutexes __ro_after_init;
114 struct mutex *hugetlb_fault_mutex_table __ro_after_init;
115 
116 /* Forward declaration */
117 static int hugetlb_acct_memory(struct hstate *h, long delta);
118 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
119 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
120 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
121 		unsigned long start, unsigned long end, bool take_locks);
122 static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
123 
subpool_is_free(struct hugepage_subpool * spool)124 static inline bool subpool_is_free(struct hugepage_subpool *spool)
125 {
126 	if (spool->count)
127 		return false;
128 	if (spool->max_hpages != -1)
129 		return spool->used_hpages == 0;
130 	if (spool->min_hpages != -1)
131 		return spool->rsv_hpages == spool->min_hpages;
132 
133 	return true;
134 }
135 
unlock_or_release_subpool(struct hugepage_subpool * spool,unsigned long irq_flags)136 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
137 						unsigned long irq_flags)
138 {
139 	spin_unlock_irqrestore(&spool->lock, irq_flags);
140 
141 	/* If no pages are used, and no other handles to the subpool
142 	 * remain, give up any reservations based on minimum size and
143 	 * free the subpool */
144 	if (subpool_is_free(spool)) {
145 		if (spool->min_hpages != -1)
146 			hugetlb_acct_memory(spool->hstate,
147 						-spool->min_hpages);
148 		kfree(spool);
149 	}
150 }
151 
hugepage_new_subpool(struct hstate * h,long max_hpages,long min_hpages)152 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
153 						long min_hpages)
154 {
155 	struct hugepage_subpool *spool;
156 
157 	spool = kzalloc_obj(*spool);
158 	if (!spool)
159 		return NULL;
160 
161 	spin_lock_init(&spool->lock);
162 	spool->count = 1;
163 	spool->max_hpages = max_hpages;
164 	spool->hstate = h;
165 	spool->min_hpages = min_hpages;
166 
167 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
168 		kfree(spool);
169 		return NULL;
170 	}
171 	spool->rsv_hpages = min_hpages;
172 
173 	return spool;
174 }
175 
hugepage_put_subpool(struct hugepage_subpool * spool)176 void hugepage_put_subpool(struct hugepage_subpool *spool)
177 {
178 	unsigned long flags;
179 
180 	spin_lock_irqsave(&spool->lock, flags);
181 	BUG_ON(!spool->count);
182 	spool->count--;
183 	unlock_or_release_subpool(spool, flags);
184 }
185 
186 /*
187  * Subpool accounting for allocating and reserving pages.
188  * Return -ENOMEM if there are not enough resources to satisfy the
189  * request.  Otherwise, return the number of pages by which the
190  * global pools must be adjusted (upward).  The returned value may
191  * only be different than the passed value (delta) in the case where
192  * a subpool minimum size must be maintained.
193  */
hugepage_subpool_get_pages(struct hugepage_subpool * spool,long delta)194 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
195 				      long delta)
196 {
197 	long ret = delta;
198 
199 	if (!spool)
200 		return ret;
201 
202 	spin_lock_irq(&spool->lock);
203 
204 	if (spool->max_hpages != -1) {		/* maximum size accounting */
205 		if ((spool->used_hpages + delta) <= spool->max_hpages)
206 			spool->used_hpages += delta;
207 		else {
208 			ret = -ENOMEM;
209 			goto unlock_ret;
210 		}
211 	}
212 
213 	/* minimum size accounting */
214 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
215 		if (delta > spool->rsv_hpages) {
216 			/*
217 			 * Asking for more reserves than those already taken on
218 			 * behalf of subpool.  Return difference.
219 			 */
220 			ret = delta - spool->rsv_hpages;
221 			spool->rsv_hpages = 0;
222 		} else {
223 			ret = 0;	/* reserves already accounted for */
224 			spool->rsv_hpages -= delta;
225 		}
226 	}
227 
228 unlock_ret:
229 	spin_unlock_irq(&spool->lock);
230 	return ret;
231 }
232 
233 /*
234  * Subpool accounting for freeing and unreserving pages.
235  * Return the number of global page reservations that must be dropped.
236  * The return value may only be different than the passed value (delta)
237  * in the case where a subpool minimum size must be maintained.
238  */
hugepage_subpool_put_pages(struct hugepage_subpool * spool,long delta)239 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
240 				       long delta)
241 {
242 	long ret = delta;
243 	unsigned long flags;
244 
245 	if (!spool)
246 		return delta;
247 
248 	spin_lock_irqsave(&spool->lock, flags);
249 
250 	if (spool->max_hpages != -1)		/* maximum size accounting */
251 		spool->used_hpages -= delta;
252 
253 	 /* minimum size accounting */
254 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
255 		if (spool->rsv_hpages + delta <= spool->min_hpages)
256 			ret = 0;
257 		else
258 			ret = spool->rsv_hpages + delta - spool->min_hpages;
259 
260 		spool->rsv_hpages += delta;
261 		if (spool->rsv_hpages > spool->min_hpages)
262 			spool->rsv_hpages = spool->min_hpages;
263 	}
264 
265 	/*
266 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
267 	 * quota reference, free it now.
268 	 */
269 	unlock_or_release_subpool(spool, flags);
270 
271 	return ret;
272 }
273 
subpool_vma(struct vm_area_struct * vma)274 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
275 {
276 	return subpool_inode(file_inode(vma->vm_file));
277 }
278 
279 /*
280  * hugetlb vma_lock helper routines
281  */
hugetlb_vma_lock_read(struct vm_area_struct * vma)282 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
283 {
284 	if (__vma_shareable_lock(vma)) {
285 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
286 
287 		down_read(&vma_lock->rw_sema);
288 	} else if (__vma_private_lock(vma)) {
289 		struct resv_map *resv_map = vma_resv_map(vma);
290 
291 		down_read(&resv_map->rw_sema);
292 	}
293 }
294 
hugetlb_vma_unlock_read(struct vm_area_struct * vma)295 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
296 {
297 	if (__vma_shareable_lock(vma)) {
298 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
299 
300 		up_read(&vma_lock->rw_sema);
301 	} else if (__vma_private_lock(vma)) {
302 		struct resv_map *resv_map = vma_resv_map(vma);
303 
304 		up_read(&resv_map->rw_sema);
305 	}
306 }
307 
hugetlb_vma_lock_write(struct vm_area_struct * vma)308 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
309 {
310 	if (__vma_shareable_lock(vma)) {
311 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
312 
313 		down_write(&vma_lock->rw_sema);
314 	} else if (__vma_private_lock(vma)) {
315 		struct resv_map *resv_map = vma_resv_map(vma);
316 
317 		down_write(&resv_map->rw_sema);
318 	}
319 }
320 
hugetlb_vma_unlock_write(struct vm_area_struct * vma)321 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
322 {
323 	if (__vma_shareable_lock(vma)) {
324 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
325 
326 		up_write(&vma_lock->rw_sema);
327 	} else if (__vma_private_lock(vma)) {
328 		struct resv_map *resv_map = vma_resv_map(vma);
329 
330 		up_write(&resv_map->rw_sema);
331 	}
332 }
333 
hugetlb_vma_trylock_write(struct vm_area_struct * vma)334 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
335 {
336 
337 	if (__vma_shareable_lock(vma)) {
338 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
339 
340 		return down_write_trylock(&vma_lock->rw_sema);
341 	} else if (__vma_private_lock(vma)) {
342 		struct resv_map *resv_map = vma_resv_map(vma);
343 
344 		return down_write_trylock(&resv_map->rw_sema);
345 	}
346 
347 	return 1;
348 }
349 
hugetlb_vma_assert_locked(struct vm_area_struct * vma)350 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
351 {
352 	if (__vma_shareable_lock(vma)) {
353 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
354 
355 		lockdep_assert_held(&vma_lock->rw_sema);
356 	} else if (__vma_private_lock(vma)) {
357 		struct resv_map *resv_map = vma_resv_map(vma);
358 
359 		lockdep_assert_held(&resv_map->rw_sema);
360 	}
361 }
362 
hugetlb_vma_lock_release(struct kref * kref)363 void hugetlb_vma_lock_release(struct kref *kref)
364 {
365 	struct hugetlb_vma_lock *vma_lock = container_of(kref,
366 			struct hugetlb_vma_lock, refs);
367 
368 	kfree(vma_lock);
369 }
370 
__hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock * vma_lock)371 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
372 {
373 	struct vm_area_struct *vma = vma_lock->vma;
374 
375 	/*
376 	 * vma_lock structure may or not be released as a result of put,
377 	 * it certainly will no longer be attached to vma so clear pointer.
378 	 * Semaphore synchronizes access to vma_lock->vma field.
379 	 */
380 	vma_lock->vma = NULL;
381 	vma->vm_private_data = NULL;
382 	up_write(&vma_lock->rw_sema);
383 	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
384 }
385 
__hugetlb_vma_unlock_write_free(struct vm_area_struct * vma)386 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
387 {
388 	if (__vma_shareable_lock(vma)) {
389 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
390 
391 		__hugetlb_vma_unlock_write_put(vma_lock);
392 	} else if (__vma_private_lock(vma)) {
393 		struct resv_map *resv_map = vma_resv_map(vma);
394 
395 		/* no free for anon vmas, but still need to unlock */
396 		up_write(&resv_map->rw_sema);
397 	}
398 }
399 
hugetlb_vma_lock_free(struct vm_area_struct * vma)400 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
401 {
402 	/*
403 	 * Only present in sharable vmas.
404 	 */
405 	if (!vma || !__vma_shareable_lock(vma))
406 		return;
407 
408 	if (vma->vm_private_data) {
409 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
410 
411 		down_write(&vma_lock->rw_sema);
412 		__hugetlb_vma_unlock_write_put(vma_lock);
413 	}
414 }
415 
416 /*
417  * vma specific semaphore used for pmd sharing and fault/truncation
418  * synchronization
419  */
hugetlb_vma_lock_alloc(struct vm_area_struct * vma)420 int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
421 {
422 	struct hugetlb_vma_lock *vma_lock;
423 
424 	/* Only establish in (flags) sharable vmas */
425 	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
426 		return 0;
427 
428 	/* Should never get here with non-NULL vm_private_data */
429 	if (vma->vm_private_data)
430 		return -EINVAL;
431 
432 	vma_lock = kmalloc_obj(*vma_lock);
433 	if (!vma_lock) {
434 		/*
435 		 * If we can not allocate structure, then vma can not
436 		 * participate in pmd sharing.  This is only a possible
437 		 * performance enhancement and memory saving issue.
438 		 * However, the lock is also used to synchronize page
439 		 * faults with truncation.  If the lock is not present,
440 		 * unlikely races could leave pages in a file past i_size
441 		 * until the file is removed.  Warn in the unlikely case of
442 		 * allocation failure.
443 		 */
444 		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
445 		return -EINVAL;
446 	}
447 
448 	kref_init(&vma_lock->refs);
449 	init_rwsem(&vma_lock->rw_sema);
450 	vma_lock->vma = vma;
451 	vma->vm_private_data = vma_lock;
452 
453 	return 0;
454 }
455 
456 /* Helper that removes a struct file_region from the resv_map cache and returns
457  * it for use.
458  */
459 static struct file_region *
get_file_region_entry_from_cache(struct resv_map * resv,long from,long to)460 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
461 {
462 	struct file_region *nrg;
463 
464 	VM_BUG_ON(resv->region_cache_count <= 0);
465 
466 	resv->region_cache_count--;
467 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
468 	list_del(&nrg->link);
469 
470 	nrg->from = from;
471 	nrg->to = to;
472 
473 	return nrg;
474 }
475 
copy_hugetlb_cgroup_uncharge_info(struct file_region * nrg,struct file_region * rg)476 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
477 					      struct file_region *rg)
478 {
479 #ifdef CONFIG_CGROUP_HUGETLB
480 	nrg->reservation_counter = rg->reservation_counter;
481 	nrg->css = rg->css;
482 	if (rg->css)
483 		css_get(rg->css);
484 #endif
485 }
486 
487 /* Helper that records hugetlb_cgroup uncharge info. */
record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup * h_cg,struct hstate * h,struct resv_map * resv,struct file_region * nrg)488 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
489 						struct hstate *h,
490 						struct resv_map *resv,
491 						struct file_region *nrg)
492 {
493 #ifdef CONFIG_CGROUP_HUGETLB
494 	if (h_cg) {
495 		nrg->reservation_counter =
496 			&h_cg->rsvd_hugepage[hstate_index(h)];
497 		nrg->css = &h_cg->css;
498 		/*
499 		 * The caller will hold exactly one h_cg->css reference for the
500 		 * whole contiguous reservation region. But this area might be
501 		 * scattered when there are already some file_regions reside in
502 		 * it. As a result, many file_regions may share only one css
503 		 * reference. In order to ensure that one file_region must hold
504 		 * exactly one h_cg->css reference, we should do css_get for
505 		 * each file_region and leave the reference held by caller
506 		 * untouched.
507 		 */
508 		css_get(&h_cg->css);
509 		if (!resv->pages_per_hpage)
510 			resv->pages_per_hpage = pages_per_huge_page(h);
511 		/* pages_per_hpage should be the same for all entries in
512 		 * a resv_map.
513 		 */
514 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
515 	} else {
516 		nrg->reservation_counter = NULL;
517 		nrg->css = NULL;
518 	}
519 #endif
520 }
521 
put_uncharge_info(struct file_region * rg)522 static void put_uncharge_info(struct file_region *rg)
523 {
524 #ifdef CONFIG_CGROUP_HUGETLB
525 	if (rg->css)
526 		css_put(rg->css);
527 #endif
528 }
529 
has_same_uncharge_info(struct file_region * rg,struct file_region * org)530 static bool has_same_uncharge_info(struct file_region *rg,
531 				   struct file_region *org)
532 {
533 #ifdef CONFIG_CGROUP_HUGETLB
534 	return rg->reservation_counter == org->reservation_counter &&
535 	       rg->css == org->css;
536 
537 #else
538 	return true;
539 #endif
540 }
541 
coalesce_file_region(struct resv_map * resv,struct file_region * rg)542 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
543 {
544 	struct file_region *nrg, *prg;
545 
546 	prg = list_prev_entry(rg, link);
547 	if (&prg->link != &resv->regions && prg->to == rg->from &&
548 	    has_same_uncharge_info(prg, rg)) {
549 		prg->to = rg->to;
550 
551 		list_del(&rg->link);
552 		put_uncharge_info(rg);
553 		kfree(rg);
554 
555 		rg = prg;
556 	}
557 
558 	nrg = list_next_entry(rg, link);
559 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
560 	    has_same_uncharge_info(nrg, rg)) {
561 		nrg->from = rg->from;
562 
563 		list_del(&rg->link);
564 		put_uncharge_info(rg);
565 		kfree(rg);
566 	}
567 }
568 
569 static inline long
hugetlb_resv_map_add(struct resv_map * map,struct list_head * rg,long from,long to,struct hstate * h,struct hugetlb_cgroup * cg,long * regions_needed)570 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
571 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
572 		     long *regions_needed)
573 {
574 	struct file_region *nrg;
575 
576 	if (!regions_needed) {
577 		nrg = get_file_region_entry_from_cache(map, from, to);
578 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
579 		list_add(&nrg->link, rg);
580 		coalesce_file_region(map, nrg);
581 	} else {
582 		*regions_needed += 1;
583 	}
584 
585 	return to - from;
586 }
587 
588 /*
589  * Must be called with resv->lock held.
590  *
591  * Calling this with regions_needed != NULL will count the number of pages
592  * to be added but will not modify the linked list. And regions_needed will
593  * indicate the number of file_regions needed in the cache to carry out to add
594  * the regions for this range.
595  */
add_reservation_in_range(struct resv_map * resv,long f,long t,struct hugetlb_cgroup * h_cg,struct hstate * h,long * regions_needed)596 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
597 				     struct hugetlb_cgroup *h_cg,
598 				     struct hstate *h, long *regions_needed)
599 {
600 	long add = 0;
601 	struct list_head *head = &resv->regions;
602 	long last_accounted_offset = f;
603 	struct file_region *iter, *trg = NULL;
604 	struct list_head *rg = NULL;
605 
606 	if (regions_needed)
607 		*regions_needed = 0;
608 
609 	/* In this loop, we essentially handle an entry for the range
610 	 * [last_accounted_offset, iter->from), at every iteration, with some
611 	 * bounds checking.
612 	 */
613 	list_for_each_entry_safe(iter, trg, head, link) {
614 		/* Skip irrelevant regions that start before our range. */
615 		if (iter->from < f) {
616 			/* If this region ends after the last accounted offset,
617 			 * then we need to update last_accounted_offset.
618 			 */
619 			if (iter->to > last_accounted_offset)
620 				last_accounted_offset = iter->to;
621 			continue;
622 		}
623 
624 		/* When we find a region that starts beyond our range, we've
625 		 * finished.
626 		 */
627 		if (iter->from >= t) {
628 			rg = iter->link.prev;
629 			break;
630 		}
631 
632 		/* Add an entry for last_accounted_offset -> iter->from, and
633 		 * update last_accounted_offset.
634 		 */
635 		if (iter->from > last_accounted_offset)
636 			add += hugetlb_resv_map_add(resv, iter->link.prev,
637 						    last_accounted_offset,
638 						    iter->from, h, h_cg,
639 						    regions_needed);
640 
641 		last_accounted_offset = iter->to;
642 	}
643 
644 	/* Handle the case where our range extends beyond
645 	 * last_accounted_offset.
646 	 */
647 	if (!rg)
648 		rg = head->prev;
649 	if (last_accounted_offset < t)
650 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
651 					    t, h, h_cg, regions_needed);
652 
653 	return add;
654 }
655 
656 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
657  */
allocate_file_region_entries(struct resv_map * resv,int regions_needed)658 static int allocate_file_region_entries(struct resv_map *resv,
659 					int regions_needed)
660 	__must_hold(&resv->lock)
661 {
662 	LIST_HEAD(allocated_regions);
663 	int to_allocate = 0, i = 0;
664 	struct file_region *trg = NULL, *rg = NULL;
665 
666 	VM_BUG_ON(regions_needed < 0);
667 
668 	/*
669 	 * Check for sufficient descriptors in the cache to accommodate
670 	 * the number of in progress add operations plus regions_needed.
671 	 *
672 	 * This is a while loop because when we drop the lock, some other call
673 	 * to region_add or region_del may have consumed some region_entries,
674 	 * so we keep looping here until we finally have enough entries for
675 	 * (adds_in_progress + regions_needed).
676 	 */
677 	while (resv->region_cache_count <
678 	       (resv->adds_in_progress + regions_needed)) {
679 		to_allocate = resv->adds_in_progress + regions_needed -
680 			      resv->region_cache_count;
681 
682 		/* At this point, we should have enough entries in the cache
683 		 * for all the existing adds_in_progress. We should only be
684 		 * needing to allocate for regions_needed.
685 		 */
686 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
687 
688 		spin_unlock(&resv->lock);
689 		for (i = 0; i < to_allocate; i++) {
690 			trg = kmalloc_obj(*trg);
691 			if (!trg)
692 				goto out_of_memory;
693 			list_add(&trg->link, &allocated_regions);
694 		}
695 
696 		spin_lock(&resv->lock);
697 
698 		list_splice(&allocated_regions, &resv->region_cache);
699 		resv->region_cache_count += to_allocate;
700 	}
701 
702 	return 0;
703 
704 out_of_memory:
705 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
706 		list_del(&rg->link);
707 		kfree(rg);
708 	}
709 	return -ENOMEM;
710 }
711 
712 /*
713  * Add the huge page range represented by [f, t) to the reserve
714  * map.  Regions will be taken from the cache to fill in this range.
715  * Sufficient regions should exist in the cache due to the previous
716  * call to region_chg with the same range, but in some cases the cache will not
717  * have sufficient entries due to races with other code doing region_add or
718  * region_del.  The extra needed entries will be allocated.
719  *
720  * regions_needed is the out value provided by a previous call to region_chg.
721  *
722  * Return the number of new huge pages added to the map.  This number is greater
723  * than or equal to zero.  If file_region entries needed to be allocated for
724  * this operation and we were not able to allocate, it returns -ENOMEM.
725  * region_add of regions of length 1 never allocate file_regions and cannot
726  * fail; region_chg will always allocate at least 1 entry and a region_add for
727  * 1 page will only require at most 1 entry.
728  */
region_add(struct resv_map * resv,long f,long t,long in_regions_needed,struct hstate * h,struct hugetlb_cgroup * h_cg)729 static long region_add(struct resv_map *resv, long f, long t,
730 		       long in_regions_needed, struct hstate *h,
731 		       struct hugetlb_cgroup *h_cg)
732 {
733 	long add = 0, actual_regions_needed = 0;
734 
735 	spin_lock(&resv->lock);
736 retry:
737 
738 	/* Count how many regions are actually needed to execute this add. */
739 	add_reservation_in_range(resv, f, t, NULL, NULL,
740 				 &actual_regions_needed);
741 
742 	/*
743 	 * Check for sufficient descriptors in the cache to accommodate
744 	 * this add operation. Note that actual_regions_needed may be greater
745 	 * than in_regions_needed, as the resv_map may have been modified since
746 	 * the region_chg call. In this case, we need to make sure that we
747 	 * allocate extra entries, such that we have enough for all the
748 	 * existing adds_in_progress, plus the excess needed for this
749 	 * operation.
750 	 */
751 	if (actual_regions_needed > in_regions_needed &&
752 	    resv->region_cache_count <
753 		    resv->adds_in_progress +
754 			    (actual_regions_needed - in_regions_needed)) {
755 		/* region_add operation of range 1 should never need to
756 		 * allocate file_region entries.
757 		 */
758 		VM_BUG_ON(t - f <= 1);
759 
760 		if (allocate_file_region_entries(
761 			    resv, actual_regions_needed - in_regions_needed)) {
762 			return -ENOMEM;
763 		}
764 
765 		goto retry;
766 	}
767 
768 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
769 
770 	resv->adds_in_progress -= in_regions_needed;
771 
772 	spin_unlock(&resv->lock);
773 	return add;
774 }
775 
776 /*
777  * Examine the existing reserve map and determine how many
778  * huge pages in the specified range [f, t) are NOT currently
779  * represented.  This routine is called before a subsequent
780  * call to region_add that will actually modify the reserve
781  * map to add the specified range [f, t).  region_chg does
782  * not change the number of huge pages represented by the
783  * map.  A number of new file_region structures is added to the cache as a
784  * placeholder, for the subsequent region_add call to use. At least 1
785  * file_region structure is added.
786  *
787  * out_regions_needed is the number of regions added to the
788  * resv->adds_in_progress.  This value needs to be provided to a follow up call
789  * to region_add or region_abort for proper accounting.
790  *
791  * Returns the number of huge pages that need to be added to the existing
792  * reservation map for the range [f, t).  This number is greater or equal to
793  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
794  * is needed and can not be allocated.
795  */
region_chg(struct resv_map * resv,long f,long t,long * out_regions_needed)796 static long region_chg(struct resv_map *resv, long f, long t,
797 		       long *out_regions_needed)
798 {
799 	long chg = 0;
800 
801 	spin_lock(&resv->lock);
802 
803 	/* Count how many hugepages in this range are NOT represented. */
804 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
805 				       out_regions_needed);
806 
807 	if (*out_regions_needed == 0)
808 		*out_regions_needed = 1;
809 
810 	if (allocate_file_region_entries(resv, *out_regions_needed))
811 		return -ENOMEM;
812 
813 	resv->adds_in_progress += *out_regions_needed;
814 
815 	spin_unlock(&resv->lock);
816 	return chg;
817 }
818 
819 /*
820  * Abort the in progress add operation.  The adds_in_progress field
821  * of the resv_map keeps track of the operations in progress between
822  * calls to region_chg and region_add.  Operations are sometimes
823  * aborted after the call to region_chg.  In such cases, region_abort
824  * is called to decrement the adds_in_progress counter. regions_needed
825  * is the value returned by the region_chg call, it is used to decrement
826  * the adds_in_progress counter.
827  *
828  * NOTE: The range arguments [f, t) are not needed or used in this
829  * routine.  They are kept to make reading the calling code easier as
830  * arguments will match the associated region_chg call.
831  */
region_abort(struct resv_map * resv,long f,long t,long regions_needed)832 static void region_abort(struct resv_map *resv, long f, long t,
833 			 long regions_needed)
834 {
835 	spin_lock(&resv->lock);
836 	VM_BUG_ON(!resv->region_cache_count);
837 	resv->adds_in_progress -= regions_needed;
838 	spin_unlock(&resv->lock);
839 }
840 
841 /*
842  * Delete the specified range [f, t) from the reserve map.  If the
843  * t parameter is LONG_MAX, this indicates that ALL regions after f
844  * should be deleted.  Locate the regions which intersect [f, t)
845  * and either trim, delete or split the existing regions.
846  *
847  * Returns the number of huge pages deleted from the reserve map.
848  * In the normal case, the return value is zero or more.  In the
849  * case where a region must be split, a new region descriptor must
850  * be allocated.  If the allocation fails, -ENOMEM will be returned.
851  * NOTE: If the parameter t == LONG_MAX, then we will never split
852  * a region and possibly return -ENOMEM.  Callers specifying
853  * t == LONG_MAX do not need to check for -ENOMEM error.
854  */
region_del(struct resv_map * resv,long f,long t)855 static long region_del(struct resv_map *resv, long f, long t)
856 {
857 	struct list_head *head = &resv->regions;
858 	struct file_region *rg, *trg;
859 	struct file_region *nrg = NULL;
860 	long del = 0;
861 
862 retry:
863 	spin_lock(&resv->lock);
864 	list_for_each_entry_safe(rg, trg, head, link) {
865 		/*
866 		 * Skip regions before the range to be deleted.  file_region
867 		 * ranges are normally of the form [from, to).  However, there
868 		 * may be a "placeholder" entry in the map which is of the form
869 		 * (from, to) with from == to.  Check for placeholder entries
870 		 * at the beginning of the range to be deleted.
871 		 */
872 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
873 			continue;
874 
875 		if (rg->from >= t)
876 			break;
877 
878 		if (f > rg->from && t < rg->to) { /* Must split region */
879 			/*
880 			 * Check for an entry in the cache before dropping
881 			 * lock and attempting allocation.
882 			 */
883 			if (!nrg &&
884 			    resv->region_cache_count > resv->adds_in_progress) {
885 				nrg = list_first_entry(&resv->region_cache,
886 							struct file_region,
887 							link);
888 				list_del(&nrg->link);
889 				resv->region_cache_count--;
890 			}
891 
892 			if (!nrg) {
893 				spin_unlock(&resv->lock);
894 				nrg = kmalloc_obj(*nrg);
895 				if (!nrg)
896 					return -ENOMEM;
897 				goto retry;
898 			}
899 
900 			del += t - f;
901 			hugetlb_cgroup_uncharge_file_region(
902 				resv, rg, t - f, false);
903 
904 			/* New entry for end of split region */
905 			nrg->from = t;
906 			nrg->to = rg->to;
907 
908 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
909 
910 			INIT_LIST_HEAD(&nrg->link);
911 
912 			/* Original entry is trimmed */
913 			rg->to = f;
914 
915 			list_add(&nrg->link, &rg->link);
916 			nrg = NULL;
917 			break;
918 		}
919 
920 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
921 			del += rg->to - rg->from;
922 			hugetlb_cgroup_uncharge_file_region(resv, rg,
923 							    rg->to - rg->from, true);
924 			list_del(&rg->link);
925 			kfree(rg);
926 			continue;
927 		}
928 
929 		if (f <= rg->from) {	/* Trim beginning of region */
930 			hugetlb_cgroup_uncharge_file_region(resv, rg,
931 							    t - rg->from, false);
932 
933 			del += t - rg->from;
934 			rg->from = t;
935 		} else {		/* Trim end of region */
936 			hugetlb_cgroup_uncharge_file_region(resv, rg,
937 							    rg->to - f, false);
938 
939 			del += rg->to - f;
940 			rg->to = f;
941 		}
942 	}
943 
944 	spin_unlock(&resv->lock);
945 	kfree(nrg);
946 	return del;
947 }
948 
949 /*
950  * A rare out of memory error was encountered which prevented removal of
951  * the reserve map region for a page.  The huge page itself was free'ed
952  * and removed from the page cache.  This routine will adjust the subpool
953  * usage count, and the global reserve count if needed.  By incrementing
954  * these counts, the reserve map entry which could not be deleted will
955  * appear as a "reserved" entry instead of simply dangling with incorrect
956  * counts.
957  */
hugetlb_fix_reserve_counts(struct inode * inode)958 void hugetlb_fix_reserve_counts(struct inode *inode)
959 {
960 	struct hugepage_subpool *spool = subpool_inode(inode);
961 	long rsv_adjust;
962 	bool reserved = false;
963 
964 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
965 	if (rsv_adjust > 0) {
966 		struct hstate *h = hstate_inode(inode);
967 
968 		if (!hugetlb_acct_memory(h, 1))
969 			reserved = true;
970 	} else if (!rsv_adjust) {
971 		reserved = true;
972 	}
973 
974 	if (!reserved)
975 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
976 }
977 
978 /*
979  * Count and return the number of huge pages in the reserve map
980  * that intersect with the range [f, t).
981  */
region_count(struct resv_map * resv,long f,long t)982 static long region_count(struct resv_map *resv, long f, long t)
983 {
984 	struct list_head *head = &resv->regions;
985 	struct file_region *rg;
986 	long chg = 0;
987 
988 	spin_lock(&resv->lock);
989 	/* Locate each segment we overlap with, and count that overlap. */
990 	list_for_each_entry(rg, head, link) {
991 		long seg_from;
992 		long seg_to;
993 
994 		if (rg->to <= f)
995 			continue;
996 		if (rg->from >= t)
997 			break;
998 
999 		seg_from = max(rg->from, f);
1000 		seg_to = min(rg->to, t);
1001 
1002 		chg += seg_to - seg_from;
1003 	}
1004 	spin_unlock(&resv->lock);
1005 
1006 	return chg;
1007 }
1008 
1009 /*
1010  * Convert the address within this vma to the page offset within
1011  * the mapping, huge page units here.
1012  */
vma_hugecache_offset(struct hstate * h,struct vm_area_struct * vma,unsigned long address)1013 static pgoff_t vma_hugecache_offset(struct hstate *h,
1014 			struct vm_area_struct *vma, unsigned long address)
1015 {
1016 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
1017 			(vma->vm_pgoff >> huge_page_order(h));
1018 }
1019 
1020 /**
1021  * vma_kernel_pagesize - Page size granularity for this VMA.
1022  * @vma: The user mapping.
1023  *
1024  * Folios in this VMA will be aligned to, and at least the size of the
1025  * number of bytes returned by this function.
1026  *
1027  * Return: The default size of the folios allocated when backing a VMA.
1028  */
vma_kernel_pagesize(struct vm_area_struct * vma)1029 unsigned long vma_kernel_pagesize(struct vm_area_struct *vma)
1030 {
1031 	if (vma->vm_ops && vma->vm_ops->pagesize)
1032 		return vma->vm_ops->pagesize(vma);
1033 	return PAGE_SIZE;
1034 }
1035 EXPORT_SYMBOL_GPL(vma_kernel_pagesize);
1036 
1037 /*
1038  * Return the page size being used by the MMU to back a VMA. In the majority
1039  * of cases, the page size used by the kernel matches the MMU size. On
1040  * architectures where it differs, an architecture-specific 'strong'
1041  * version of this symbol is required.
1042  */
vma_mmu_pagesize(struct vm_area_struct * vma)1043 __weak unsigned long vma_mmu_pagesize(struct vm_area_struct *vma)
1044 {
1045 	return vma_kernel_pagesize(vma);
1046 }
1047 
1048 /*
1049  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
1050  * bits of the reservation map pointer, which are always clear due to
1051  * alignment.
1052  */
1053 #define HPAGE_RESV_OWNER    (1UL << 0)
1054 #define HPAGE_RESV_UNMAPPED (1UL << 1)
1055 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1056 
1057 /*
1058  * These helpers are used to track how many pages are reserved for
1059  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1060  * is guaranteed to have their future faults succeed.
1061  *
1062  * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1063  * the reserve counters are updated with the hugetlb_lock held. It is safe
1064  * to reset the VMA at fork() time as it is not in use yet and there is no
1065  * chance of the global counters getting corrupted as a result of the values.
1066  *
1067  * The private mapping reservation is represented in a subtly different
1068  * manner to a shared mapping.  A shared mapping has a region map associated
1069  * with the underlying file, this region map represents the backing file
1070  * pages which have ever had a reservation assigned which this persists even
1071  * after the page is instantiated.  A private mapping has a region map
1072  * associated with the original mmap which is attached to all VMAs which
1073  * reference it, this region map represents those offsets which have consumed
1074  * reservation ie. where pages have been instantiated.
1075  */
get_vma_private_data(struct vm_area_struct * vma)1076 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1077 {
1078 	return (unsigned long)vma->vm_private_data;
1079 }
1080 
set_vma_private_data(struct vm_area_struct * vma,unsigned long value)1081 static void set_vma_private_data(struct vm_area_struct *vma,
1082 							unsigned long value)
1083 {
1084 	vma->vm_private_data = (void *)value;
1085 }
1086 
1087 static void
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map * resv_map,struct hugetlb_cgroup * h_cg,struct hstate * h)1088 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1089 					  struct hugetlb_cgroup *h_cg,
1090 					  struct hstate *h)
1091 {
1092 #ifdef CONFIG_CGROUP_HUGETLB
1093 	if (!h_cg || !h) {
1094 		resv_map->reservation_counter = NULL;
1095 		resv_map->pages_per_hpage = 0;
1096 		resv_map->css = NULL;
1097 	} else {
1098 		resv_map->reservation_counter =
1099 			&h_cg->rsvd_hugepage[hstate_index(h)];
1100 		resv_map->pages_per_hpage = pages_per_huge_page(h);
1101 		resv_map->css = &h_cg->css;
1102 	}
1103 #endif
1104 }
1105 
resv_map_alloc(void)1106 struct resv_map *resv_map_alloc(void)
1107 {
1108 	struct resv_map *resv_map = kmalloc_obj(*resv_map);
1109 	struct file_region *rg = kmalloc_obj(*rg);
1110 
1111 	if (!resv_map || !rg) {
1112 		kfree(resv_map);
1113 		kfree(rg);
1114 		return NULL;
1115 	}
1116 
1117 	kref_init(&resv_map->refs);
1118 	spin_lock_init(&resv_map->lock);
1119 	INIT_LIST_HEAD(&resv_map->regions);
1120 	init_rwsem(&resv_map->rw_sema);
1121 
1122 	resv_map->adds_in_progress = 0;
1123 	/*
1124 	 * Initialize these to 0. On shared mappings, 0's here indicate these
1125 	 * fields don't do cgroup accounting. On private mappings, these will be
1126 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
1127 	 * reservations are to be un-charged from here.
1128 	 */
1129 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1130 
1131 	INIT_LIST_HEAD(&resv_map->region_cache);
1132 	list_add(&rg->link, &resv_map->region_cache);
1133 	resv_map->region_cache_count = 1;
1134 
1135 	return resv_map;
1136 }
1137 
resv_map_release(struct kref * ref)1138 void resv_map_release(struct kref *ref)
1139 {
1140 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1141 	struct list_head *head = &resv_map->region_cache;
1142 	struct file_region *rg, *trg;
1143 
1144 	/* Clear out any active regions before we release the map. */
1145 	region_del(resv_map, 0, LONG_MAX);
1146 
1147 	/* ... and any entries left in the cache */
1148 	list_for_each_entry_safe(rg, trg, head, link) {
1149 		list_del(&rg->link);
1150 		kfree(rg);
1151 	}
1152 
1153 	VM_BUG_ON(resv_map->adds_in_progress);
1154 
1155 	kfree(resv_map);
1156 }
1157 
inode_resv_map(struct inode * inode)1158 static inline struct resv_map *inode_resv_map(struct inode *inode)
1159 {
1160 	return HUGETLBFS_I(inode)->resv_map;
1161 }
1162 
vma_resv_map(struct vm_area_struct * vma)1163 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1164 {
1165 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1166 	if (vma->vm_flags & VM_MAYSHARE) {
1167 		struct address_space *mapping = vma->vm_file->f_mapping;
1168 		struct inode *inode = mapping->host;
1169 
1170 		return inode_resv_map(inode);
1171 
1172 	} else {
1173 		return (struct resv_map *)(get_vma_private_data(vma) &
1174 							~HPAGE_RESV_MASK);
1175 	}
1176 }
1177 
set_vma_resv_flags(struct vm_area_struct * vma,unsigned long flags)1178 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1179 {
1180 	VM_WARN_ON_ONCE_VMA(!is_vm_hugetlb_page(vma), vma);
1181 	VM_WARN_ON_ONCE_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1182 
1183 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1184 }
1185 
set_vma_desc_resv_map(struct vm_area_desc * desc,struct resv_map * map)1186 static void set_vma_desc_resv_map(struct vm_area_desc *desc, struct resv_map *map)
1187 {
1188 	VM_WARN_ON_ONCE(!is_vma_hugetlb_flags(&desc->vma_flags));
1189 	VM_WARN_ON_ONCE(vma_desc_test_flags(desc, VMA_MAYSHARE_BIT));
1190 
1191 	desc->private_data = map;
1192 }
1193 
set_vma_desc_resv_flags(struct vm_area_desc * desc,unsigned long flags)1194 static void set_vma_desc_resv_flags(struct vm_area_desc *desc, unsigned long flags)
1195 {
1196 	VM_WARN_ON_ONCE(!is_vma_hugetlb_flags(&desc->vma_flags));
1197 	VM_WARN_ON_ONCE(vma_desc_test_flags(desc, VMA_MAYSHARE_BIT));
1198 
1199 	desc->private_data = (void *)((unsigned long)desc->private_data | flags);
1200 }
1201 
is_vma_resv_set(struct vm_area_struct * vma,unsigned long flag)1202 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1203 {
1204 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1205 
1206 	return (get_vma_private_data(vma) & flag) != 0;
1207 }
1208 
is_vma_desc_resv_set(struct vm_area_desc * desc,unsigned long flag)1209 static bool is_vma_desc_resv_set(struct vm_area_desc *desc, unsigned long flag)
1210 {
1211 	VM_WARN_ON_ONCE(!is_vma_hugetlb_flags(&desc->vma_flags));
1212 
1213 	return ((unsigned long)desc->private_data) & flag;
1214 }
1215 
__vma_private_lock(struct vm_area_struct * vma)1216 bool __vma_private_lock(struct vm_area_struct *vma)
1217 {
1218 	return !(vma->vm_flags & VM_MAYSHARE) &&
1219 		get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1220 		is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1221 }
1222 
hugetlb_dup_vma_private(struct vm_area_struct * vma)1223 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1224 {
1225 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1226 	/*
1227 	 * Clear vm_private_data
1228 	 * - For shared mappings this is a per-vma semaphore that may be
1229 	 *   allocated in a subsequent call to hugetlb_vm_op_open.
1230 	 *   Before clearing, make sure pointer is not associated with vma
1231 	 *   as this will leak the structure.  This is the case when called
1232 	 *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1233 	 *   been called to allocate a new structure.
1234 	 * - For MAP_PRIVATE mappings, this is the reserve map which does
1235 	 *   not apply to children.  Faults generated by the children are
1236 	 *   not guaranteed to succeed, even if read-only.
1237 	 */
1238 	if (vma->vm_flags & VM_MAYSHARE) {
1239 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1240 
1241 		if (vma_lock && vma_lock->vma != vma)
1242 			vma->vm_private_data = NULL;
1243 	} else {
1244 		vma->vm_private_data = NULL;
1245 	}
1246 }
1247 
1248 /*
1249  * Reset and decrement one ref on hugepage private reservation.
1250  * Called with mm->mmap_lock writer semaphore held.
1251  * This function should be only used by mremap and operate on
1252  * same sized vma. It should never come here with last ref on the
1253  * reservation.
1254  */
clear_vma_resv_huge_pages(struct vm_area_struct * vma)1255 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1256 {
1257 	/*
1258 	 * Clear the old hugetlb private page reservation.
1259 	 * It has already been transferred to new_vma.
1260 	 *
1261 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1262 	 * which copies vma into new_vma and unmaps vma. After the copy
1263 	 * operation both new_vma and vma share a reference to the resv_map
1264 	 * struct, and at that point vma is about to be unmapped. We don't
1265 	 * want to return the reservation to the pool at unmap of vma because
1266 	 * the reservation still lives on in new_vma, so simply decrement the
1267 	 * ref here and remove the resv_map reference from this vma.
1268 	 */
1269 	struct resv_map *reservations = vma_resv_map(vma);
1270 
1271 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1272 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1273 		kref_put(&reservations->refs, resv_map_release);
1274 	}
1275 
1276 	hugetlb_dup_vma_private(vma);
1277 }
1278 
enqueue_hugetlb_folio(struct hstate * h,struct folio * folio)1279 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1280 {
1281 	int nid = folio_nid(folio);
1282 
1283 	lockdep_assert_held(&hugetlb_lock);
1284 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1285 
1286 	list_move(&folio->lru, &h->hugepage_freelists[nid]);
1287 	h->free_huge_pages++;
1288 	h->free_huge_pages_node[nid]++;
1289 	folio_set_hugetlb_freed(folio);
1290 }
1291 
dequeue_hugetlb_folio_node_exact(struct hstate * h,int nid)1292 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1293 								int nid)
1294 {
1295 	struct folio *folio;
1296 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1297 
1298 	lockdep_assert_held(&hugetlb_lock);
1299 	list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1300 		if (pin && !folio_is_longterm_pinnable(folio))
1301 			continue;
1302 
1303 		if (folio_test_hwpoison(folio))
1304 			continue;
1305 
1306 		if (is_migrate_isolate_page(&folio->page))
1307 			continue;
1308 
1309 		list_move(&folio->lru, &h->hugepage_activelist);
1310 		folio_ref_unfreeze(folio, 1);
1311 		folio_clear_hugetlb_freed(folio);
1312 		h->free_huge_pages--;
1313 		h->free_huge_pages_node[nid]--;
1314 		return folio;
1315 	}
1316 
1317 	return NULL;
1318 }
1319 
dequeue_hugetlb_folio_nodemask(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1320 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1321 							int nid, nodemask_t *nmask)
1322 {
1323 	unsigned int cpuset_mems_cookie;
1324 	struct zonelist *zonelist;
1325 	struct zone *zone;
1326 	struct zoneref *z;
1327 	int node = NUMA_NO_NODE;
1328 
1329 	/* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */
1330 	if (nid == NUMA_NO_NODE)
1331 		nid = numa_node_id();
1332 
1333 	zonelist = node_zonelist(nid, gfp_mask);
1334 
1335 retry_cpuset:
1336 	cpuset_mems_cookie = read_mems_allowed_begin();
1337 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1338 		struct folio *folio;
1339 
1340 		if (!cpuset_zone_allowed(zone, gfp_mask))
1341 			continue;
1342 		/*
1343 		 * no need to ask again on the same node. Pool is node rather than
1344 		 * zone aware
1345 		 */
1346 		if (zone_to_nid(zone) == node)
1347 			continue;
1348 		node = zone_to_nid(zone);
1349 
1350 		folio = dequeue_hugetlb_folio_node_exact(h, node);
1351 		if (folio)
1352 			return folio;
1353 	}
1354 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1355 		goto retry_cpuset;
1356 
1357 	return NULL;
1358 }
1359 
available_huge_pages(struct hstate * h)1360 static unsigned long available_huge_pages(struct hstate *h)
1361 {
1362 	return h->free_huge_pages - h->resv_huge_pages;
1363 }
1364 
dequeue_hugetlb_folio_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address,long gbl_chg)1365 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1366 				struct vm_area_struct *vma,
1367 				unsigned long address, long gbl_chg)
1368 {
1369 	struct folio *folio = NULL;
1370 	struct mempolicy *mpol;
1371 	gfp_t gfp_mask;
1372 	nodemask_t *nodemask;
1373 	int nid;
1374 
1375 	/*
1376 	 * gbl_chg==1 means the allocation requires a new page that was not
1377 	 * reserved before.  Making sure there's at least one free page.
1378 	 */
1379 	if (gbl_chg && !available_huge_pages(h))
1380 		goto err;
1381 
1382 	gfp_mask = htlb_alloc_mask(h);
1383 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1384 
1385 	if (mpol_is_preferred_many(mpol)) {
1386 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1387 							nid, nodemask);
1388 
1389 		/* Fallback to all nodes if page==NULL */
1390 		nodemask = NULL;
1391 	}
1392 
1393 	if (!folio)
1394 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1395 							nid, nodemask);
1396 
1397 	mpol_cond_put(mpol);
1398 	return folio;
1399 
1400 err:
1401 	return NULL;
1402 }
1403 
1404 #if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && defined(CONFIG_CONTIG_ALLOC)
alloc_gigantic_frozen_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1405 static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask,
1406 		int nid, nodemask_t *nodemask)
1407 {
1408 	struct folio *folio;
1409 
1410 	folio = hugetlb_cma_alloc_frozen_folio(order, gfp_mask, nid, nodemask);
1411 	if (folio)
1412 		return folio;
1413 
1414 	if (hugetlb_cma_exclusive_alloc())
1415 		return NULL;
1416 
1417 	folio = (struct folio *)alloc_contig_frozen_pages(1 << order, gfp_mask,
1418 							  nid, nodemask);
1419 	return folio;
1420 }
1421 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE || !CONFIG_CONTIG_ALLOC */
alloc_gigantic_frozen_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1422 static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask, int nid,
1423 					  nodemask_t *nodemask)
1424 {
1425 	return NULL;
1426 }
1427 #endif
1428 
1429 /*
1430  * Remove hugetlb folio from lists.
1431  * If vmemmap exists for the folio, clear the hugetlb flag so that the
1432  * folio appears as just a compound page.  Otherwise, wait until after
1433  * allocating vmemmap to clear the flag.
1434  *
1435  * Must be called with hugetlb lock held.
1436  */
remove_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus)1437 void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1438 			  bool adjust_surplus)
1439 {
1440 	int nid = folio_nid(folio);
1441 
1442 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1443 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1444 
1445 	lockdep_assert_held(&hugetlb_lock);
1446 	if (hstate_is_gigantic_no_runtime(h))
1447 		return;
1448 
1449 	list_del(&folio->lru);
1450 
1451 	if (folio_test_hugetlb_freed(folio)) {
1452 		folio_clear_hugetlb_freed(folio);
1453 		h->free_huge_pages--;
1454 		h->free_huge_pages_node[nid]--;
1455 	}
1456 	if (adjust_surplus) {
1457 		h->surplus_huge_pages--;
1458 		h->surplus_huge_pages_node[nid]--;
1459 	}
1460 
1461 	/*
1462 	 * We can only clear the hugetlb flag after allocating vmemmap
1463 	 * pages.  Otherwise, someone (memory error handling) may try to write
1464 	 * to tail struct pages.
1465 	 */
1466 	if (!folio_test_hugetlb_vmemmap_optimized(folio))
1467 		__folio_clear_hugetlb(folio);
1468 
1469 	h->nr_huge_pages--;
1470 	h->nr_huge_pages_node[nid]--;
1471 }
1472 
add_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus)1473 void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1474 		       bool adjust_surplus)
1475 {
1476 	int nid = folio_nid(folio);
1477 
1478 	VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1479 
1480 	lockdep_assert_held(&hugetlb_lock);
1481 
1482 	INIT_LIST_HEAD(&folio->lru);
1483 	h->nr_huge_pages++;
1484 	h->nr_huge_pages_node[nid]++;
1485 
1486 	if (adjust_surplus) {
1487 		h->surplus_huge_pages++;
1488 		h->surplus_huge_pages_node[nid]++;
1489 	}
1490 
1491 	__folio_set_hugetlb(folio);
1492 	folio_change_private(folio, NULL);
1493 	/*
1494 	 * We have to set hugetlb_vmemmap_optimized again as above
1495 	 * folio_change_private(folio, NULL) cleared it.
1496 	 */
1497 	folio_set_hugetlb_vmemmap_optimized(folio);
1498 
1499 	arch_clear_hugetlb_flags(folio);
1500 	enqueue_hugetlb_folio(h, folio);
1501 }
1502 
__update_and_free_hugetlb_folio(struct hstate * h,struct folio * folio)1503 static void __update_and_free_hugetlb_folio(struct hstate *h,
1504 						struct folio *folio)
1505 {
1506 	bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
1507 
1508 	if (hstate_is_gigantic_no_runtime(h))
1509 		return;
1510 
1511 	/*
1512 	 * If we don't know which subpages are hwpoisoned, we can't free
1513 	 * the hugepage, so it's leaked intentionally.
1514 	 */
1515 	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1516 		return;
1517 
1518 	/*
1519 	 * If folio is not vmemmap optimized (!clear_flag), then the folio
1520 	 * is no longer identified as a hugetlb page.  hugetlb_vmemmap_restore_folio
1521 	 * can only be passed hugetlb pages and will BUG otherwise.
1522 	 */
1523 	if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) {
1524 		spin_lock_irq(&hugetlb_lock);
1525 		/*
1526 		 * If we cannot allocate vmemmap pages, just refuse to free the
1527 		 * page and put the page back on the hugetlb free list and treat
1528 		 * as a surplus page.
1529 		 */
1530 		add_hugetlb_folio(h, folio, true);
1531 		spin_unlock_irq(&hugetlb_lock);
1532 		return;
1533 	}
1534 
1535 	/*
1536 	 * If vmemmap pages were allocated above, then we need to clear the
1537 	 * hugetlb flag under the hugetlb lock.
1538 	 */
1539 	if (folio_test_hugetlb(folio)) {
1540 		spin_lock_irq(&hugetlb_lock);
1541 		__folio_clear_hugetlb(folio);
1542 		spin_unlock_irq(&hugetlb_lock);
1543 	}
1544 
1545 	/*
1546 	 * Move PageHWPoison flag from head page to the raw error pages,
1547 	 * which makes any healthy subpages reusable.
1548 	 */
1549 	if (unlikely(folio_test_hwpoison(folio)))
1550 		folio_clear_hugetlb_hwpoison(folio);
1551 
1552 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1553 	if (folio_test_hugetlb_cma(folio))
1554 		hugetlb_cma_free_frozen_folio(folio);
1555 	else
1556 		free_frozen_pages(&folio->page, folio_order(folio));
1557 }
1558 
1559 /*
1560  * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1561  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1562  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1563  * the vmemmap pages.
1564  *
1565  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1566  * freed and frees them one-by-one. As the page->mapping pointer is going
1567  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1568  * structure of a lockless linked list of huge pages to be freed.
1569  */
1570 static LLIST_HEAD(hpage_freelist);
1571 
free_hpage_workfn(struct work_struct * work)1572 static void free_hpage_workfn(struct work_struct *work)
1573 {
1574 	struct llist_node *node;
1575 
1576 	node = llist_del_all(&hpage_freelist);
1577 
1578 	while (node) {
1579 		struct folio *folio;
1580 		struct hstate *h;
1581 
1582 		folio = container_of((struct address_space **)node,
1583 				     struct folio, mapping);
1584 		node = node->next;
1585 		folio->mapping = NULL;
1586 		/*
1587 		 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1588 		 * folio_hstate() is going to trigger because a previous call to
1589 		 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1590 		 * not use folio_hstate() directly.
1591 		 */
1592 		h = size_to_hstate(folio_size(folio));
1593 
1594 		__update_and_free_hugetlb_folio(h, folio);
1595 
1596 		cond_resched();
1597 	}
1598 }
1599 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1600 
flush_free_hpage_work(struct hstate * h)1601 static inline void flush_free_hpage_work(struct hstate *h)
1602 {
1603 	if (hugetlb_vmemmap_optimizable(h))
1604 		flush_work(&free_hpage_work);
1605 }
1606 
update_and_free_hugetlb_folio(struct hstate * h,struct folio * folio,bool atomic)1607 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1608 				 bool atomic)
1609 {
1610 	if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1611 		__update_and_free_hugetlb_folio(h, folio);
1612 		return;
1613 	}
1614 
1615 	/*
1616 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1617 	 *
1618 	 * Only call schedule_work() if hpage_freelist is previously
1619 	 * empty. Otherwise, schedule_work() had been called but the workfn
1620 	 * hasn't retrieved the list yet.
1621 	 */
1622 	if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1623 		schedule_work(&free_hpage_work);
1624 }
1625 
bulk_vmemmap_restore_error(struct hstate * h,struct list_head * folio_list,struct list_head * non_hvo_folios)1626 static void bulk_vmemmap_restore_error(struct hstate *h,
1627 					struct list_head *folio_list,
1628 					struct list_head *non_hvo_folios)
1629 {
1630 	struct folio *folio, *t_folio;
1631 
1632 	if (!list_empty(non_hvo_folios)) {
1633 		/*
1634 		 * Free any restored hugetlb pages so that restore of the
1635 		 * entire list can be retried.
1636 		 * The idea is that in the common case of ENOMEM errors freeing
1637 		 * hugetlb pages with vmemmap we will free up memory so that we
1638 		 * can allocate vmemmap for more hugetlb pages.
1639 		 */
1640 		list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1641 			list_del(&folio->lru);
1642 			spin_lock_irq(&hugetlb_lock);
1643 			__folio_clear_hugetlb(folio);
1644 			spin_unlock_irq(&hugetlb_lock);
1645 			update_and_free_hugetlb_folio(h, folio, false);
1646 			cond_resched();
1647 		}
1648 	} else {
1649 		/*
1650 		 * In the case where there are no folios which can be
1651 		 * immediately freed, we loop through the list trying to restore
1652 		 * vmemmap individually in the hope that someone elsewhere may
1653 		 * have done something to cause success (such as freeing some
1654 		 * memory).  If unable to restore a hugetlb page, the hugetlb
1655 		 * page is made a surplus page and removed from the list.
1656 		 * If are able to restore vmemmap and free one hugetlb page, we
1657 		 * quit processing the list to retry the bulk operation.
1658 		 */
1659 		list_for_each_entry_safe(folio, t_folio, folio_list, lru)
1660 			if (hugetlb_vmemmap_restore_folio(h, folio)) {
1661 				list_del(&folio->lru);
1662 				spin_lock_irq(&hugetlb_lock);
1663 				add_hugetlb_folio(h, folio, true);
1664 				spin_unlock_irq(&hugetlb_lock);
1665 			} else {
1666 				list_del(&folio->lru);
1667 				spin_lock_irq(&hugetlb_lock);
1668 				__folio_clear_hugetlb(folio);
1669 				spin_unlock_irq(&hugetlb_lock);
1670 				update_and_free_hugetlb_folio(h, folio, false);
1671 				cond_resched();
1672 				break;
1673 			}
1674 	}
1675 }
1676 
update_and_free_pages_bulk(struct hstate * h,struct list_head * folio_list)1677 static void update_and_free_pages_bulk(struct hstate *h,
1678 						struct list_head *folio_list)
1679 {
1680 	long ret;
1681 	struct folio *folio, *t_folio;
1682 	LIST_HEAD(non_hvo_folios);
1683 
1684 	/*
1685 	 * First allocate required vmemmmap (if necessary) for all folios.
1686 	 * Carefully handle errors and free up any available hugetlb pages
1687 	 * in an effort to make forward progress.
1688 	 */
1689 retry:
1690 	ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1691 	if (ret < 0) {
1692 		bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1693 		goto retry;
1694 	}
1695 
1696 	/*
1697 	 * At this point, list should be empty, ret should be >= 0 and there
1698 	 * should only be pages on the non_hvo_folios list.
1699 	 * Do note that the non_hvo_folios list could be empty.
1700 	 * Without HVO enabled, ret will be 0 and there is no need to call
1701 	 * __folio_clear_hugetlb as this was done previously.
1702 	 */
1703 	VM_WARN_ON(!list_empty(folio_list));
1704 	VM_WARN_ON(ret < 0);
1705 	if (!list_empty(&non_hvo_folios) && ret) {
1706 		spin_lock_irq(&hugetlb_lock);
1707 		list_for_each_entry(folio, &non_hvo_folios, lru)
1708 			__folio_clear_hugetlb(folio);
1709 		spin_unlock_irq(&hugetlb_lock);
1710 	}
1711 
1712 	list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
1713 		update_and_free_hugetlb_folio(h, folio, false);
1714 		cond_resched();
1715 	}
1716 }
1717 
size_to_hstate(unsigned long size)1718 struct hstate *size_to_hstate(unsigned long size)
1719 {
1720 	struct hstate *h;
1721 
1722 	for_each_hstate(h) {
1723 		if (huge_page_size(h) == size)
1724 			return h;
1725 	}
1726 	return NULL;
1727 }
1728 
free_huge_folio(struct folio * folio)1729 void free_huge_folio(struct folio *folio)
1730 {
1731 	/*
1732 	 * Can't pass hstate in here because it is called from the
1733 	 * generic mm code.
1734 	 */
1735 	struct hstate *h = folio_hstate(folio);
1736 	int nid = folio_nid(folio);
1737 	struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1738 	bool restore_reserve;
1739 	unsigned long flags;
1740 
1741 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1742 	VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1743 
1744 	hugetlb_set_folio_subpool(folio, NULL);
1745 	if (folio_test_anon(folio))
1746 		__ClearPageAnonExclusive(&folio->page);
1747 	folio->mapping = NULL;
1748 	restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1749 	folio_clear_hugetlb_restore_reserve(folio);
1750 
1751 	/*
1752 	 * If HPageRestoreReserve was set on page, page allocation consumed a
1753 	 * reservation.  If the page was associated with a subpool, there
1754 	 * would have been a page reserved in the subpool before allocation
1755 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1756 	 * reservation, do not call hugepage_subpool_put_pages() as this will
1757 	 * remove the reserved page from the subpool.
1758 	 */
1759 	if (!restore_reserve) {
1760 		/*
1761 		 * A return code of zero implies that the subpool will be
1762 		 * under its minimum size if the reservation is not restored
1763 		 * after page is free.  Therefore, force restore_reserve
1764 		 * operation.
1765 		 */
1766 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1767 			restore_reserve = true;
1768 	}
1769 
1770 	spin_lock_irqsave(&hugetlb_lock, flags);
1771 	folio_clear_hugetlb_migratable(folio);
1772 	hugetlb_cgroup_uncharge_folio(hstate_index(h),
1773 				     pages_per_huge_page(h), folio);
1774 	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1775 					  pages_per_huge_page(h), folio);
1776 	lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h));
1777 	mem_cgroup_uncharge(folio);
1778 	if (restore_reserve)
1779 		h->resv_huge_pages++;
1780 
1781 	if (folio_test_hugetlb_temporary(folio)) {
1782 		remove_hugetlb_folio(h, folio, false);
1783 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1784 		update_and_free_hugetlb_folio(h, folio, true);
1785 	} else if (h->surplus_huge_pages_node[nid]) {
1786 		/* remove the page from active list */
1787 		remove_hugetlb_folio(h, folio, true);
1788 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1789 		update_and_free_hugetlb_folio(h, folio, true);
1790 	} else {
1791 		arch_clear_hugetlb_flags(folio);
1792 		enqueue_hugetlb_folio(h, folio);
1793 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1794 	}
1795 }
1796 
1797 /*
1798  * Must be called with the hugetlb lock held
1799  */
account_new_hugetlb_folio(struct hstate * h,struct folio * folio)1800 static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1801 {
1802 	lockdep_assert_held(&hugetlb_lock);
1803 	h->nr_huge_pages++;
1804 	h->nr_huge_pages_node[folio_nid(folio)]++;
1805 }
1806 
init_new_hugetlb_folio(struct folio * folio)1807 void init_new_hugetlb_folio(struct folio *folio)
1808 {
1809 	__folio_set_hugetlb(folio);
1810 	INIT_LIST_HEAD(&folio->lru);
1811 	hugetlb_set_folio_subpool(folio, NULL);
1812 	set_hugetlb_cgroup(folio, NULL);
1813 	set_hugetlb_cgroup_rsvd(folio, NULL);
1814 }
1815 
1816 /*
1817  * Find and lock address space (mapping) in write mode.
1818  *
1819  * Upon entry, the folio is locked which means that folio_mapping() is
1820  * stable.  Due to locking order, we can only trylock_write.  If we can
1821  * not get the lock, simply return NULL to caller.
1822  */
hugetlb_folio_mapping_lock_write(struct folio * folio)1823 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
1824 {
1825 	struct address_space *mapping = folio_mapping(folio);
1826 
1827 	if (!mapping)
1828 		return mapping;
1829 
1830 	if (i_mmap_trylock_write(mapping))
1831 		return mapping;
1832 
1833 	return NULL;
1834 }
1835 
alloc_buddy_frozen_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1836 static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
1837 		int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
1838 {
1839 	struct folio *folio;
1840 	bool alloc_try_hard = true;
1841 
1842 	/*
1843 	 * By default we always try hard to allocate the folio with
1844 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating folios in
1845 	 * a loop (to adjust global huge page counts) and previous allocation
1846 	 * failed, do not continue to try hard on the same node.  Use the
1847 	 * node_alloc_noretry bitmap to manage this state information.
1848 	 */
1849 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1850 		alloc_try_hard = false;
1851 	if (alloc_try_hard)
1852 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1853 
1854 	folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
1855 
1856 	/*
1857 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
1858 	 * folio this indicates an overall state change.  Clear bit so
1859 	 * that we resume normal 'try hard' allocations.
1860 	 */
1861 	if (node_alloc_noretry && folio && !alloc_try_hard)
1862 		node_clear(nid, *node_alloc_noretry);
1863 
1864 	/*
1865 	 * If we tried hard to get a folio but failed, set bit so that
1866 	 * subsequent attempts will not try as hard until there is an
1867 	 * overall state change.
1868 	 */
1869 	if (node_alloc_noretry && !folio && alloc_try_hard)
1870 		node_set(nid, *node_alloc_noretry);
1871 
1872 	if (!folio) {
1873 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1874 		return NULL;
1875 	}
1876 
1877 	__count_vm_event(HTLB_BUDDY_PGALLOC);
1878 	return folio;
1879 }
1880 
only_alloc_fresh_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1881 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
1882 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1883 		nodemask_t *node_alloc_noretry)
1884 {
1885 	struct folio *folio;
1886 	int order = huge_page_order(h);
1887 
1888 	if (nid == NUMA_NO_NODE)
1889 		nid = numa_mem_id();
1890 
1891 	if (order_is_gigantic(order))
1892 		folio = alloc_gigantic_frozen_folio(order, gfp_mask, nid, nmask);
1893 	else
1894 		folio = alloc_buddy_frozen_folio(order, gfp_mask, nid, nmask,
1895 						 node_alloc_noretry);
1896 	if (folio)
1897 		init_new_hugetlb_folio(folio);
1898 	return folio;
1899 }
1900 
1901 /*
1902  * Common helper to allocate a fresh hugetlb folio. All specific allocators
1903  * should use this function to get new hugetlb folio
1904  *
1905  * Note that returned folio is 'frozen':  ref count of head page and all tail
1906  * pages is zero, and the accounting must be done in the caller.
1907  */
alloc_fresh_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1908 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
1909 		gfp_t gfp_mask, int nid, nodemask_t *nmask)
1910 {
1911 	struct folio *folio;
1912 
1913 	folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
1914 	if (folio)
1915 		hugetlb_vmemmap_optimize_folio(h, folio);
1916 	return folio;
1917 }
1918 
prep_and_add_allocated_folios(struct hstate * h,struct list_head * folio_list)1919 void prep_and_add_allocated_folios(struct hstate *h,
1920 				   struct list_head *folio_list)
1921 {
1922 	unsigned long flags;
1923 	struct folio *folio, *tmp_f;
1924 
1925 	/* Send list for bulk vmemmap optimization processing */
1926 	hugetlb_vmemmap_optimize_folios(h, folio_list);
1927 
1928 	/* Add all new pool pages to free lists in one lock cycle */
1929 	spin_lock_irqsave(&hugetlb_lock, flags);
1930 	list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
1931 		account_new_hugetlb_folio(h, folio);
1932 		enqueue_hugetlb_folio(h, folio);
1933 	}
1934 	spin_unlock_irqrestore(&hugetlb_lock, flags);
1935 }
1936 
1937 /*
1938  * Allocates a fresh hugetlb page in a node interleaved manner.  The page
1939  * will later be added to the appropriate hugetlb pool.
1940  */
alloc_pool_huge_folio(struct hstate * h,nodemask_t * nodes_allowed,nodemask_t * node_alloc_noretry,int * next_node)1941 static struct folio *alloc_pool_huge_folio(struct hstate *h,
1942 					nodemask_t *nodes_allowed,
1943 					nodemask_t *node_alloc_noretry,
1944 					int *next_node)
1945 {
1946 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1947 	int nr_nodes, node;
1948 
1949 	for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
1950 		struct folio *folio;
1951 
1952 		folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
1953 					nodes_allowed, node_alloc_noretry);
1954 		if (folio)
1955 			return folio;
1956 	}
1957 
1958 	return NULL;
1959 }
1960 
1961 /*
1962  * Remove huge page from pool from next node to free.  Attempt to keep
1963  * persistent huge pages more or less balanced over allowed nodes.
1964  * This routine only 'removes' the hugetlb page.  The caller must make
1965  * an additional call to free the page to low level allocators.
1966  * Called with hugetlb_lock locked.
1967  */
remove_pool_hugetlb_folio(struct hstate * h,nodemask_t * nodes_allowed,bool acct_surplus)1968 static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
1969 		nodemask_t *nodes_allowed, bool acct_surplus)
1970 {
1971 	int nr_nodes, node;
1972 	struct folio *folio = NULL;
1973 
1974 	lockdep_assert_held(&hugetlb_lock);
1975 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1976 		/*
1977 		 * If we're returning unused surplus pages, only examine
1978 		 * nodes with surplus pages.
1979 		 */
1980 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1981 		    !list_empty(&h->hugepage_freelists[node])) {
1982 			folio = list_entry(h->hugepage_freelists[node].next,
1983 					  struct folio, lru);
1984 			remove_hugetlb_folio(h, folio, acct_surplus);
1985 			break;
1986 		}
1987 	}
1988 
1989 	return folio;
1990 }
1991 
1992 /*
1993  * Dissolve a given free hugetlb folio into free buddy pages. This function
1994  * does nothing for in-use hugetlb folios and non-hugetlb folios.
1995  * This function returns values like below:
1996  *
1997  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
1998  *           when the system is under memory pressure and the feature of
1999  *           freeing unused vmemmap pages associated with each hugetlb page
2000  *           is enabled.
2001  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
2002  *           (allocated or reserved.)
2003  *       0:  successfully dissolved free hugepages or the page is not a
2004  *           hugepage (considered as already dissolved)
2005  */
dissolve_free_hugetlb_folio(struct folio * folio)2006 int dissolve_free_hugetlb_folio(struct folio *folio)
2007 {
2008 	int rc = -EBUSY;
2009 
2010 retry:
2011 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2012 	if (!folio_test_hugetlb(folio))
2013 		return 0;
2014 
2015 	spin_lock_irq(&hugetlb_lock);
2016 	if (!folio_test_hugetlb(folio)) {
2017 		rc = 0;
2018 		goto out;
2019 	}
2020 
2021 	if (!folio_ref_count(folio)) {
2022 		struct hstate *h = folio_hstate(folio);
2023 		bool adjust_surplus = false;
2024 
2025 		if (!available_huge_pages(h))
2026 			goto out;
2027 
2028 		/*
2029 		 * We should make sure that the page is already on the free list
2030 		 * when it is dissolved.
2031 		 */
2032 		if (unlikely(!folio_test_hugetlb_freed(folio))) {
2033 			spin_unlock_irq(&hugetlb_lock);
2034 			cond_resched();
2035 
2036 			/*
2037 			 * Theoretically, we should return -EBUSY when we
2038 			 * encounter this race. In fact, we have a chance
2039 			 * to successfully dissolve the page if we do a
2040 			 * retry. Because the race window is quite small.
2041 			 * If we seize this opportunity, it is an optimization
2042 			 * for increasing the success rate of dissolving page.
2043 			 */
2044 			goto retry;
2045 		}
2046 
2047 		if (h->surplus_huge_pages_node[folio_nid(folio)])
2048 			adjust_surplus = true;
2049 		remove_hugetlb_folio(h, folio, adjust_surplus);
2050 		h->max_huge_pages--;
2051 		spin_unlock_irq(&hugetlb_lock);
2052 
2053 		/*
2054 		 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2055 		 * before freeing the page.  update_and_free_hugtlb_folio will fail to
2056 		 * free the page if it can not allocate required vmemmap.  We
2057 		 * need to adjust max_huge_pages if the page is not freed.
2058 		 * Attempt to allocate vmemmmap here so that we can take
2059 		 * appropriate action on failure.
2060 		 *
2061 		 * The folio_test_hugetlb check here is because
2062 		 * remove_hugetlb_folio will clear hugetlb folio flag for
2063 		 * non-vmemmap optimized hugetlb folios.
2064 		 */
2065 		if (folio_test_hugetlb(folio)) {
2066 			rc = hugetlb_vmemmap_restore_folio(h, folio);
2067 			if (rc) {
2068 				spin_lock_irq(&hugetlb_lock);
2069 				add_hugetlb_folio(h, folio, adjust_surplus);
2070 				h->max_huge_pages++;
2071 				goto out;
2072 			}
2073 		} else {
2074 			rc = 0;
2075 		}
2076 
2077 		update_and_free_hugetlb_folio(h, folio, false);
2078 		return rc;
2079 	}
2080 out:
2081 	spin_unlock_irq(&hugetlb_lock);
2082 	return rc;
2083 }
2084 
2085 /*
2086  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2087  * make specified memory blocks removable from the system.
2088  * Note that this will dissolve a free gigantic hugepage completely, if any
2089  * part of it lies within the given range.
2090  * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
2091  * free hugetlb folios that were dissolved before that error are lost.
2092  */
dissolve_free_hugetlb_folios(unsigned long start_pfn,unsigned long end_pfn)2093 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
2094 {
2095 	unsigned long pfn;
2096 	struct folio *folio;
2097 	int rc = 0;
2098 	unsigned int order;
2099 	struct hstate *h;
2100 
2101 	if (!hugepages_supported())
2102 		return rc;
2103 
2104 	order = huge_page_order(&default_hstate);
2105 	for_each_hstate(h)
2106 		order = min(order, huge_page_order(h));
2107 
2108 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2109 		folio = pfn_folio(pfn);
2110 		rc = dissolve_free_hugetlb_folio(folio);
2111 		if (rc)
2112 			break;
2113 	}
2114 
2115 	return rc;
2116 }
2117 
2118 /*
2119  * Allocates a fresh surplus page from the page allocator.
2120  */
alloc_surplus_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)2121 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2122 				gfp_t gfp_mask,	int nid, nodemask_t *nmask)
2123 {
2124 	struct folio *folio = NULL;
2125 
2126 	if (hstate_is_gigantic_no_runtime(h))
2127 		return NULL;
2128 
2129 	spin_lock_irq(&hugetlb_lock);
2130 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2131 		goto out_unlock;
2132 	spin_unlock_irq(&hugetlb_lock);
2133 
2134 	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2135 	if (!folio)
2136 		return NULL;
2137 
2138 	spin_lock_irq(&hugetlb_lock);
2139 	/*
2140 	 * nr_huge_pages needs to be adjusted within the same lock cycle
2141 	 * as surplus_pages, otherwise it might confuse
2142 	 * persistent_huge_pages() momentarily.
2143 	 */
2144 	account_new_hugetlb_folio(h, folio);
2145 
2146 	/*
2147 	 * We could have raced with the pool size change.
2148 	 * Double check that and simply deallocate the new page
2149 	 * if we would end up overcommiting the surpluses. Abuse
2150 	 * temporary page to workaround the nasty free_huge_folio
2151 	 * codeflow
2152 	 */
2153 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2154 		folio_set_hugetlb_temporary(folio);
2155 		spin_unlock_irq(&hugetlb_lock);
2156 		free_huge_folio(folio);
2157 		return NULL;
2158 	}
2159 
2160 	h->surplus_huge_pages++;
2161 	h->surplus_huge_pages_node[folio_nid(folio)]++;
2162 
2163 out_unlock:
2164 	spin_unlock_irq(&hugetlb_lock);
2165 
2166 	return folio;
2167 }
2168 
alloc_migrate_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)2169 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2170 				     int nid, nodemask_t *nmask)
2171 {
2172 	struct folio *folio;
2173 
2174 	if (hstate_is_gigantic(h))
2175 		return NULL;
2176 
2177 	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2178 	if (!folio)
2179 		return NULL;
2180 
2181 	spin_lock_irq(&hugetlb_lock);
2182 	account_new_hugetlb_folio(h, folio);
2183 	spin_unlock_irq(&hugetlb_lock);
2184 
2185 	/* fresh huge pages are frozen */
2186 	folio_ref_unfreeze(folio, 1);
2187 	/*
2188 	 * We do not account these pages as surplus because they are only
2189 	 * temporary and will be released properly on the last reference
2190 	 */
2191 	folio_set_hugetlb_temporary(folio);
2192 
2193 	return folio;
2194 }
2195 
2196 /*
2197  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2198  */
2199 static
alloc_buddy_hugetlb_folio_with_mpol(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2200 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2201 		struct vm_area_struct *vma, unsigned long addr)
2202 {
2203 	struct folio *folio = NULL;
2204 	struct mempolicy *mpol;
2205 	gfp_t gfp_mask = htlb_alloc_mask(h);
2206 	int nid;
2207 	nodemask_t *nodemask;
2208 
2209 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2210 	if (mpol_is_preferred_many(mpol)) {
2211 		gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2212 
2213 		folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2214 
2215 		/* Fallback to all nodes if page==NULL */
2216 		nodemask = NULL;
2217 	}
2218 
2219 	if (!folio)
2220 		folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2221 	mpol_cond_put(mpol);
2222 	return folio;
2223 }
2224 
alloc_hugetlb_folio_reserve(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask)2225 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
2226 		nodemask_t *nmask, gfp_t gfp_mask)
2227 {
2228 	struct folio *folio;
2229 
2230 	spin_lock_irq(&hugetlb_lock);
2231 	if (!h->resv_huge_pages) {
2232 		spin_unlock_irq(&hugetlb_lock);
2233 		return NULL;
2234 	}
2235 
2236 	folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
2237 					       nmask);
2238 	if (folio)
2239 		h->resv_huge_pages--;
2240 
2241 	spin_unlock_irq(&hugetlb_lock);
2242 	return folio;
2243 }
2244 
2245 /* folio migration callback function */
alloc_hugetlb_folio_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask,bool allow_alloc_fallback)2246 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2247 		nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
2248 {
2249 	spin_lock_irq(&hugetlb_lock);
2250 	if (available_huge_pages(h)) {
2251 		struct folio *folio;
2252 
2253 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2254 						preferred_nid, nmask);
2255 		if (folio) {
2256 			spin_unlock_irq(&hugetlb_lock);
2257 			return folio;
2258 		}
2259 	}
2260 	spin_unlock_irq(&hugetlb_lock);
2261 
2262 	/* We cannot fallback to other nodes, as we could break the per-node pool. */
2263 	if (!allow_alloc_fallback)
2264 		gfp_mask |= __GFP_THISNODE;
2265 
2266 	return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2267 }
2268 
policy_mbind_nodemask(gfp_t gfp)2269 static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
2270 {
2271 #ifdef CONFIG_NUMA
2272 	struct mempolicy *mpol = get_task_policy(current);
2273 
2274 	/*
2275 	 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
2276 	 * (from policy_nodemask) specifically for hugetlb case
2277 	 */
2278 	if (mpol->mode == MPOL_BIND &&
2279 		(apply_policy_zone(mpol, gfp_zone(gfp)) &&
2280 		 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
2281 		return &mpol->nodes;
2282 #endif
2283 	return NULL;
2284 }
2285 
2286 /*
2287  * Increase the hugetlb pool such that it can accommodate a reservation
2288  * of size 'delta'.
2289  */
gather_surplus_pages(struct hstate * h,long delta)2290 static int gather_surplus_pages(struct hstate *h, long delta)
2291 	__must_hold(&hugetlb_lock)
2292 {
2293 	LIST_HEAD(surplus_list);
2294 	struct folio *folio, *tmp;
2295 	int ret;
2296 	long i;
2297 	long needed, allocated;
2298 	bool alloc_ok = true;
2299 	nodemask_t *mbind_nodemask, alloc_nodemask;
2300 
2301 	mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
2302 	if (mbind_nodemask)
2303 		nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed);
2304 	else
2305 		alloc_nodemask = cpuset_current_mems_allowed;
2306 
2307 	lockdep_assert_held(&hugetlb_lock);
2308 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2309 	if (needed <= 0) {
2310 		h->resv_huge_pages += delta;
2311 		return 0;
2312 	}
2313 
2314 	allocated = 0;
2315 
2316 	ret = -ENOMEM;
2317 retry:
2318 	spin_unlock_irq(&hugetlb_lock);
2319 	for (i = 0; i < needed; i++) {
2320 		folio = NULL;
2321 
2322 		/*
2323 		 * It is okay to use NUMA_NO_NODE because we use numa_mem_id()
2324 		 * down the road to pick the current node if that is the case.
2325 		 */
2326 		folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2327 						    NUMA_NO_NODE, &alloc_nodemask);
2328 		if (!folio) {
2329 			alloc_ok = false;
2330 			break;
2331 		}
2332 		list_add(&folio->lru, &surplus_list);
2333 		cond_resched();
2334 	}
2335 	allocated += i;
2336 
2337 	/*
2338 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2339 	 * because either resv_huge_pages or free_huge_pages may have changed.
2340 	 */
2341 	spin_lock_irq(&hugetlb_lock);
2342 	needed = (h->resv_huge_pages + delta) -
2343 			(h->free_huge_pages + allocated);
2344 	if (needed > 0) {
2345 		if (alloc_ok)
2346 			goto retry;
2347 		/*
2348 		 * We were not able to allocate enough pages to
2349 		 * satisfy the entire reservation so we free what
2350 		 * we've allocated so far.
2351 		 */
2352 		goto free;
2353 	}
2354 	/*
2355 	 * The surplus_list now contains _at_least_ the number of extra pages
2356 	 * needed to accommodate the reservation.  Add the appropriate number
2357 	 * of pages to the hugetlb pool and free the extras back to the buddy
2358 	 * allocator.  Commit the entire reservation here to prevent another
2359 	 * process from stealing the pages as they are added to the pool but
2360 	 * before they are reserved.
2361 	 */
2362 	needed += allocated;
2363 	h->resv_huge_pages += delta;
2364 	ret = 0;
2365 
2366 	/* Free the needed pages to the hugetlb pool */
2367 	list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2368 		if ((--needed) < 0)
2369 			break;
2370 		/* Add the page to the hugetlb allocator */
2371 		enqueue_hugetlb_folio(h, folio);
2372 	}
2373 free:
2374 	spin_unlock_irq(&hugetlb_lock);
2375 
2376 	/*
2377 	 * Free unnecessary surplus pages to the buddy allocator.
2378 	 * Pages have no ref count, call free_huge_folio directly.
2379 	 */
2380 	list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2381 		free_huge_folio(folio);
2382 	spin_lock_irq(&hugetlb_lock);
2383 
2384 	return ret;
2385 }
2386 
2387 /*
2388  * This routine has two main purposes:
2389  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2390  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2391  *    to the associated reservation map.
2392  * 2) Free any unused surplus pages that may have been allocated to satisfy
2393  *    the reservation.  As many as unused_resv_pages may be freed.
2394  */
return_unused_surplus_pages(struct hstate * h,unsigned long unused_resv_pages)2395 static void return_unused_surplus_pages(struct hstate *h,
2396 					unsigned long unused_resv_pages)
2397 {
2398 	unsigned long nr_pages;
2399 	LIST_HEAD(page_list);
2400 
2401 	lockdep_assert_held(&hugetlb_lock);
2402 	/* Uncommit the reservation */
2403 	h->resv_huge_pages -= unused_resv_pages;
2404 
2405 	if (hstate_is_gigantic_no_runtime(h))
2406 		goto out;
2407 
2408 	/*
2409 	 * Part (or even all) of the reservation could have been backed
2410 	 * by pre-allocated pages. Only free surplus pages.
2411 	 */
2412 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2413 
2414 	/*
2415 	 * We want to release as many surplus pages as possible, spread
2416 	 * evenly across all nodes with memory. Iterate across these nodes
2417 	 * until we can no longer free unreserved surplus pages. This occurs
2418 	 * when the nodes with surplus pages have no free pages.
2419 	 * remove_pool_hugetlb_folio() will balance the freed pages across the
2420 	 * on-line nodes with memory and will handle the hstate accounting.
2421 	 */
2422 	while (nr_pages--) {
2423 		struct folio *folio;
2424 
2425 		folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2426 		if (!folio)
2427 			goto out;
2428 
2429 		list_add(&folio->lru, &page_list);
2430 	}
2431 
2432 out:
2433 	spin_unlock_irq(&hugetlb_lock);
2434 	update_and_free_pages_bulk(h, &page_list);
2435 	spin_lock_irq(&hugetlb_lock);
2436 }
2437 
2438 
2439 /*
2440  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2441  * are used by the huge page allocation routines to manage reservations.
2442  *
2443  * vma_needs_reservation is called to determine if the huge page at addr
2444  * within the vma has an associated reservation.  If a reservation is
2445  * needed, the value 1 is returned.  The caller is then responsible for
2446  * managing the global reservation and subpool usage counts.  After
2447  * the huge page has been allocated, vma_commit_reservation is called
2448  * to add the page to the reservation map.  If the page allocation fails,
2449  * the reservation must be ended instead of committed.  vma_end_reservation
2450  * is called in such cases.
2451  *
2452  * In the normal case, vma_commit_reservation returns the same value
2453  * as the preceding vma_needs_reservation call.  The only time this
2454  * is not the case is if a reserve map was changed between calls.  It
2455  * is the responsibility of the caller to notice the difference and
2456  * take appropriate action.
2457  *
2458  * vma_add_reservation is used in error paths where a reservation must
2459  * be restored when a newly allocated huge page must be freed.  It is
2460  * to be called after calling vma_needs_reservation to determine if a
2461  * reservation exists.
2462  *
2463  * vma_del_reservation is used in error paths where an entry in the reserve
2464  * map was created during huge page allocation and must be removed.  It is to
2465  * be called after calling vma_needs_reservation to determine if a reservation
2466  * exists.
2467  */
2468 enum vma_resv_mode {
2469 	VMA_NEEDS_RESV,
2470 	VMA_COMMIT_RESV,
2471 	VMA_END_RESV,
2472 	VMA_ADD_RESV,
2473 	VMA_DEL_RESV,
2474 };
__vma_reservation_common(struct hstate * h,struct vm_area_struct * vma,unsigned long addr,enum vma_resv_mode mode)2475 static long __vma_reservation_common(struct hstate *h,
2476 				struct vm_area_struct *vma, unsigned long addr,
2477 				enum vma_resv_mode mode)
2478 {
2479 	struct resv_map *resv;
2480 	pgoff_t idx;
2481 	long ret;
2482 	long dummy_out_regions_needed;
2483 
2484 	resv = vma_resv_map(vma);
2485 	if (!resv)
2486 		return 1;
2487 
2488 	idx = vma_hugecache_offset(h, vma, addr);
2489 	switch (mode) {
2490 	case VMA_NEEDS_RESV:
2491 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2492 		/* We assume that vma_reservation_* routines always operate on
2493 		 * 1 page, and that adding to resv map a 1 page entry can only
2494 		 * ever require 1 region.
2495 		 */
2496 		VM_BUG_ON(dummy_out_regions_needed != 1);
2497 		break;
2498 	case VMA_COMMIT_RESV:
2499 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2500 		/* region_add calls of range 1 should never fail. */
2501 		VM_BUG_ON(ret < 0);
2502 		break;
2503 	case VMA_END_RESV:
2504 		region_abort(resv, idx, idx + 1, 1);
2505 		ret = 0;
2506 		break;
2507 	case VMA_ADD_RESV:
2508 		if (vma->vm_flags & VM_MAYSHARE) {
2509 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2510 			/* region_add calls of range 1 should never fail. */
2511 			VM_BUG_ON(ret < 0);
2512 		} else {
2513 			region_abort(resv, idx, idx + 1, 1);
2514 			ret = region_del(resv, idx, idx + 1);
2515 		}
2516 		break;
2517 	case VMA_DEL_RESV:
2518 		if (vma->vm_flags & VM_MAYSHARE) {
2519 			region_abort(resv, idx, idx + 1, 1);
2520 			ret = region_del(resv, idx, idx + 1);
2521 		} else {
2522 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2523 			/* region_add calls of range 1 should never fail. */
2524 			VM_BUG_ON(ret < 0);
2525 		}
2526 		break;
2527 	default:
2528 		BUG();
2529 	}
2530 
2531 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2532 		return ret;
2533 	/*
2534 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2535 	 *
2536 	 * In most cases, reserves always exist for private mappings.
2537 	 * However, a file associated with mapping could have been
2538 	 * hole punched or truncated after reserves were consumed.
2539 	 * As subsequent fault on such a range will not use reserves.
2540 	 * Subtle - The reserve map for private mappings has the
2541 	 * opposite meaning than that of shared mappings.  If NO
2542 	 * entry is in the reserve map, it means a reservation exists.
2543 	 * If an entry exists in the reserve map, it means the
2544 	 * reservation has already been consumed.  As a result, the
2545 	 * return value of this routine is the opposite of the
2546 	 * value returned from reserve map manipulation routines above.
2547 	 */
2548 	if (ret > 0)
2549 		return 0;
2550 	if (ret == 0)
2551 		return 1;
2552 	return ret;
2553 }
2554 
vma_needs_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2555 static long vma_needs_reservation(struct hstate *h,
2556 			struct vm_area_struct *vma, unsigned long addr)
2557 {
2558 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2559 }
2560 
vma_commit_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2561 static long vma_commit_reservation(struct hstate *h,
2562 			struct vm_area_struct *vma, unsigned long addr)
2563 {
2564 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2565 }
2566 
vma_end_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2567 static void vma_end_reservation(struct hstate *h,
2568 			struct vm_area_struct *vma, unsigned long addr)
2569 {
2570 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2571 }
2572 
vma_add_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2573 static long vma_add_reservation(struct hstate *h,
2574 			struct vm_area_struct *vma, unsigned long addr)
2575 {
2576 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2577 }
2578 
vma_del_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2579 static long vma_del_reservation(struct hstate *h,
2580 			struct vm_area_struct *vma, unsigned long addr)
2581 {
2582 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2583 }
2584 
2585 /*
2586  * This routine is called to restore reservation information on error paths.
2587  * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2588  * and the hugetlb mutex should remain held when calling this routine.
2589  *
2590  * It handles two specific cases:
2591  * 1) A reservation was in place and the folio consumed the reservation.
2592  *    hugetlb_restore_reserve is set in the folio.
2593  * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2594  *    not set.  However, alloc_hugetlb_folio always updates the reserve map.
2595  *
2596  * In case 1, free_huge_folio later in the error path will increment the
2597  * global reserve count.  But, free_huge_folio does not have enough context
2598  * to adjust the reservation map.  This case deals primarily with private
2599  * mappings.  Adjust the reserve map here to be consistent with global
2600  * reserve count adjustments to be made by free_huge_folio.  Make sure the
2601  * reserve map indicates there is a reservation present.
2602  *
2603  * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2604  */
restore_reserve_on_error(struct hstate * h,struct vm_area_struct * vma,unsigned long address,struct folio * folio)2605 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2606 			unsigned long address, struct folio *folio)
2607 {
2608 	long rc = vma_needs_reservation(h, vma, address);
2609 
2610 	if (folio_test_hugetlb_restore_reserve(folio)) {
2611 		if (unlikely(rc < 0))
2612 			/*
2613 			 * Rare out of memory condition in reserve map
2614 			 * manipulation.  Clear hugetlb_restore_reserve so
2615 			 * that global reserve count will not be incremented
2616 			 * by free_huge_folio.  This will make it appear
2617 			 * as though the reservation for this folio was
2618 			 * consumed.  This may prevent the task from
2619 			 * faulting in the folio at a later time.  This
2620 			 * is better than inconsistent global huge page
2621 			 * accounting of reserve counts.
2622 			 */
2623 			folio_clear_hugetlb_restore_reserve(folio);
2624 		else if (rc)
2625 			(void)vma_add_reservation(h, vma, address);
2626 		else
2627 			vma_end_reservation(h, vma, address);
2628 	} else {
2629 		if (!rc) {
2630 			/*
2631 			 * This indicates there is an entry in the reserve map
2632 			 * not added by alloc_hugetlb_folio.  We know it was added
2633 			 * before the alloc_hugetlb_folio call, otherwise
2634 			 * hugetlb_restore_reserve would be set on the folio.
2635 			 * Remove the entry so that a subsequent allocation
2636 			 * does not consume a reservation.
2637 			 */
2638 			rc = vma_del_reservation(h, vma, address);
2639 			if (rc < 0)
2640 				/*
2641 				 * VERY rare out of memory condition.  Since
2642 				 * we can not delete the entry, set
2643 				 * hugetlb_restore_reserve so that the reserve
2644 				 * count will be incremented when the folio
2645 				 * is freed.  This reserve will be consumed
2646 				 * on a subsequent allocation.
2647 				 */
2648 				folio_set_hugetlb_restore_reserve(folio);
2649 		} else if (rc < 0) {
2650 			/*
2651 			 * Rare out of memory condition from
2652 			 * vma_needs_reservation call.  Memory allocation is
2653 			 * only attempted if a new entry is needed.  Therefore,
2654 			 * this implies there is not an entry in the
2655 			 * reserve map.
2656 			 *
2657 			 * For shared mappings, no entry in the map indicates
2658 			 * no reservation.  We are done.
2659 			 */
2660 			if (!(vma->vm_flags & VM_MAYSHARE))
2661 				/*
2662 				 * For private mappings, no entry indicates
2663 				 * a reservation is present.  Since we can
2664 				 * not add an entry, set hugetlb_restore_reserve
2665 				 * on the folio so reserve count will be
2666 				 * incremented when freed.  This reserve will
2667 				 * be consumed on a subsequent allocation.
2668 				 */
2669 				folio_set_hugetlb_restore_reserve(folio);
2670 		} else {
2671 			/*
2672 			 * No reservation present, do nothing
2673 			 */
2674 			vma_end_reservation(h, vma, address);
2675 		}
2676 	}
2677 }
2678 
2679 /*
2680  * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2681  * the old one
2682  * @old_folio: Old folio to dissolve
2683  * @list: List to isolate the page in case we need to
2684  * Returns 0 on success, otherwise negated error.
2685  */
alloc_and_dissolve_hugetlb_folio(struct folio * old_folio,struct list_head * list)2686 static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
2687 			struct list_head *list)
2688 {
2689 	gfp_t gfp_mask;
2690 	struct hstate *h;
2691 	int nid = folio_nid(old_folio);
2692 	struct folio *new_folio = NULL;
2693 	int ret = 0;
2694 
2695 retry:
2696 	/*
2697 	 * The old_folio might have been dissolved from under our feet, so make sure
2698 	 * to carefully check the state under the lock.
2699 	 */
2700 	spin_lock_irq(&hugetlb_lock);
2701 	if (!folio_test_hugetlb(old_folio)) {
2702 		/*
2703 		 * Freed from under us. Drop new_folio too.
2704 		 */
2705 		goto free_new;
2706 	} else if (folio_ref_count(old_folio)) {
2707 		bool isolated;
2708 
2709 		/*
2710 		 * Someone has grabbed the folio, try to isolate it here.
2711 		 * Fail with -EBUSY if not possible.
2712 		 */
2713 		spin_unlock_irq(&hugetlb_lock);
2714 		isolated = folio_isolate_hugetlb(old_folio, list);
2715 		ret = isolated ? 0 : -EBUSY;
2716 		spin_lock_irq(&hugetlb_lock);
2717 		goto free_new;
2718 	} else if (!folio_test_hugetlb_freed(old_folio)) {
2719 		/*
2720 		 * Folio's refcount is 0 but it has not been enqueued in the
2721 		 * freelist yet. Race window is small, so we can succeed here if
2722 		 * we retry.
2723 		 */
2724 		spin_unlock_irq(&hugetlb_lock);
2725 		cond_resched();
2726 		goto retry;
2727 	} else {
2728 		h = folio_hstate(old_folio);
2729 		if (!new_folio) {
2730 			spin_unlock_irq(&hugetlb_lock);
2731 			gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2732 			new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask,
2733 							      nid, NULL);
2734 			if (!new_folio)
2735 				return -ENOMEM;
2736 			goto retry;
2737 		}
2738 
2739 		/*
2740 		 * Ok, old_folio is still a genuine free hugepage. Remove it from
2741 		 * the freelist and decrease the counters. These will be
2742 		 * incremented again when calling account_new_hugetlb_folio()
2743 		 * and enqueue_hugetlb_folio() for new_folio. The counters will
2744 		 * remain stable since this happens under the lock.
2745 		 */
2746 		remove_hugetlb_folio(h, old_folio, false);
2747 
2748 		/*
2749 		 * Ref count on new_folio is already zero as it was dropped
2750 		 * earlier.  It can be directly added to the pool free list.
2751 		 */
2752 		account_new_hugetlb_folio(h, new_folio);
2753 		enqueue_hugetlb_folio(h, new_folio);
2754 
2755 		/*
2756 		 * Folio has been replaced, we can safely free the old one.
2757 		 */
2758 		spin_unlock_irq(&hugetlb_lock);
2759 		update_and_free_hugetlb_folio(h, old_folio, false);
2760 	}
2761 
2762 	return ret;
2763 
2764 free_new:
2765 	spin_unlock_irq(&hugetlb_lock);
2766 	if (new_folio)
2767 		update_and_free_hugetlb_folio(h, new_folio, false);
2768 
2769 	return ret;
2770 }
2771 
isolate_or_dissolve_huge_folio(struct folio * folio,struct list_head * list)2772 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
2773 {
2774 	int ret = -EBUSY;
2775 
2776 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2777 	if (!folio_test_hugetlb(folio))
2778 		return 0;
2779 
2780 	/*
2781 	 * Fence off gigantic pages as there is a cyclic dependency between
2782 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2783 	 * of bailing out right away without further retrying.
2784 	 */
2785 	if (order_is_gigantic(folio_order(folio)))
2786 		return -ENOMEM;
2787 
2788 	if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
2789 		ret = 0;
2790 	else if (!folio_ref_count(folio))
2791 		ret = alloc_and_dissolve_hugetlb_folio(folio, list);
2792 
2793 	return ret;
2794 }
2795 
2796 /*
2797  *  replace_free_hugepage_folios - Replace free hugepage folios in a given pfn
2798  *  range with new folios.
2799  *  @start_pfn: start pfn of the given pfn range
2800  *  @end_pfn: end pfn of the given pfn range
2801  *  Returns 0 on success, otherwise negated error.
2802  */
replace_free_hugepage_folios(unsigned long start_pfn,unsigned long end_pfn)2803 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
2804 {
2805 	unsigned long nr = 0;
2806 	struct page *page;
2807 	struct hstate *h;
2808 	LIST_HEAD(list);
2809 	int ret = 0;
2810 
2811 	/* Avoid pfn iterations if no free non-gigantic huge pages */
2812 	for_each_hstate(h) {
2813 		if (hstate_is_gigantic(h))
2814 			continue;
2815 
2816 		nr += h->free_huge_pages;
2817 		if (nr)
2818 			break;
2819 	}
2820 
2821 	if (!nr)
2822 		return 0;
2823 
2824 	while (start_pfn < end_pfn) {
2825 		page = pfn_to_page(start_pfn);
2826 		nr = 1;
2827 
2828 		if (PageHuge(page) || PageCompound(page)) {
2829 			struct folio *folio = page_folio(page);
2830 
2831 			nr = folio_nr_pages(folio) - folio_page_idx(folio, page);
2832 
2833 			/*
2834 			 * Don't disrupt normal path by vainly holding
2835 			 * hugetlb_lock
2836 			 */
2837 			if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
2838 				if (order_is_gigantic(folio_order(folio))) {
2839 					ret = -ENOMEM;
2840 					break;
2841 				}
2842 
2843 				ret = alloc_and_dissolve_hugetlb_folio(folio, &list);
2844 				if (ret)
2845 					break;
2846 
2847 				putback_movable_pages(&list);
2848 			}
2849 		} else if (PageBuddy(page)) {
2850 			/*
2851 			 * Buddy order check without zone lock is unsafe and
2852 			 * the order is maybe invalid, but race should be
2853 			 * small, and the worst thing is skipping free hugetlb.
2854 			 */
2855 			const unsigned int order = buddy_order_unsafe(page);
2856 
2857 			if (order <= MAX_PAGE_ORDER)
2858 				nr = 1UL << order;
2859 		}
2860 		start_pfn += nr;
2861 	}
2862 
2863 	return ret;
2864 }
2865 
wait_for_freed_hugetlb_folios(void)2866 void wait_for_freed_hugetlb_folios(void)
2867 {
2868 	if (llist_empty(&hpage_freelist))
2869 		return;
2870 
2871 	flush_work(&free_hpage_work);
2872 }
2873 
2874 typedef enum {
2875 	/*
2876 	 * For either 0/1: we checked the per-vma resv map, and one resv
2877 	 * count either can be reused (0), or an extra needed (1).
2878 	 */
2879 	MAP_CHG_REUSE = 0,
2880 	MAP_CHG_NEEDED = 1,
2881 	/*
2882 	 * Cannot use per-vma resv count can be used, hence a new resv
2883 	 * count is enforced.
2884 	 *
2885 	 * NOTE: This is mostly identical to MAP_CHG_NEEDED, except
2886 	 * that currently vma_needs_reservation() has an unwanted side
2887 	 * effect to either use end() or commit() to complete the
2888 	 * transaction. Hence it needs to differentiate from NEEDED.
2889 	 */
2890 	MAP_CHG_ENFORCED = 2,
2891 } map_chg_state;
2892 
2893 /*
2894  * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW
2895  * faults of hugetlb private mappings on top of a non-page-cache folio (in
2896  * which case even if there's a private vma resv map it won't cover such
2897  * allocation).  New call sites should (probably) never set it to true!!
2898  * When it's set, the allocation will bypass all vma level reservations.
2899  */
alloc_hugetlb_folio(struct vm_area_struct * vma,unsigned long addr,bool cow_from_owner)2900 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
2901 				    unsigned long addr, bool cow_from_owner)
2902 {
2903 	struct hugepage_subpool *spool = subpool_vma(vma);
2904 	struct hstate *h = hstate_vma(vma);
2905 	struct folio *folio;
2906 	long retval, gbl_chg, gbl_reserve;
2907 	map_chg_state map_chg;
2908 	int ret, idx;
2909 	struct hugetlb_cgroup *h_cg = NULL;
2910 	gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
2911 
2912 	idx = hstate_index(h);
2913 
2914 	/* Whether we need a separate per-vma reservation? */
2915 	if (cow_from_owner) {
2916 		/*
2917 		 * Special case!  Since it's a CoW on top of a reserved
2918 		 * page, the private resv map doesn't count.  So it cannot
2919 		 * consume the per-vma resv map even if it's reserved.
2920 		 */
2921 		map_chg = MAP_CHG_ENFORCED;
2922 	} else {
2923 		/*
2924 		 * Examine the region/reserve map to determine if the process
2925 		 * has a reservation for the page to be allocated.  A return
2926 		 * code of zero indicates a reservation exists (no change).
2927 		 */
2928 		retval = vma_needs_reservation(h, vma, addr);
2929 		if (retval < 0)
2930 			return ERR_PTR(-ENOMEM);
2931 		map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE;
2932 	}
2933 
2934 	/*
2935 	 * Whether we need a separate global reservation?
2936 	 *
2937 	 * Processes that did not create the mapping will have no
2938 	 * reserves as indicated by the region/reserve map. Check
2939 	 * that the allocation will not exceed the subpool limit.
2940 	 * Or if it can get one from the pool reservation directly.
2941 	 */
2942 	if (map_chg) {
2943 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2944 		if (gbl_chg < 0)
2945 			goto out_end_reservation;
2946 	} else {
2947 		/*
2948 		 * If we have the vma reservation ready, no need for extra
2949 		 * global reservation.
2950 		 */
2951 		gbl_chg = 0;
2952 	}
2953 
2954 	/*
2955 	 * If this allocation is not consuming a per-vma reservation,
2956 	 * charge the hugetlb cgroup now.
2957 	 */
2958 	if (map_chg) {
2959 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
2960 			idx, pages_per_huge_page(h), &h_cg);
2961 		if (ret)
2962 			goto out_subpool_put;
2963 	}
2964 
2965 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2966 	if (ret)
2967 		goto out_uncharge_cgroup_reservation;
2968 
2969 	spin_lock_irq(&hugetlb_lock);
2970 	/*
2971 	 * glb_chg is passed to indicate whether or not a page must be taken
2972 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2973 	 * a reservation exists for the allocation.
2974 	 */
2975 	folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
2976 	if (!folio) {
2977 		spin_unlock_irq(&hugetlb_lock);
2978 		folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
2979 		if (!folio)
2980 			goto out_uncharge_cgroup;
2981 		spin_lock_irq(&hugetlb_lock);
2982 		list_add(&folio->lru, &h->hugepage_activelist);
2983 		folio_ref_unfreeze(folio, 1);
2984 		/* Fall through */
2985 	}
2986 
2987 	/*
2988 	 * Either dequeued or buddy-allocated folio needs to add special
2989 	 * mark to the folio when it consumes a global reservation.
2990 	 */
2991 	if (!gbl_chg) {
2992 		folio_set_hugetlb_restore_reserve(folio);
2993 		h->resv_huge_pages--;
2994 	}
2995 
2996 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
2997 	/* If allocation is not consuming a reservation, also store the
2998 	 * hugetlb_cgroup pointer on the page.
2999 	 */
3000 	if (map_chg) {
3001 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
3002 						  h_cg, folio);
3003 	}
3004 
3005 	spin_unlock_irq(&hugetlb_lock);
3006 
3007 	hugetlb_set_folio_subpool(folio, spool);
3008 
3009 	if (map_chg != MAP_CHG_ENFORCED) {
3010 		/* commit() is only needed if the map_chg is not enforced */
3011 		retval = vma_commit_reservation(h, vma, addr);
3012 		/*
3013 		 * Check for possible race conditions. When it happens..
3014 		 * The page was added to the reservation map between
3015 		 * vma_needs_reservation and vma_commit_reservation.
3016 		 * This indicates a race with hugetlb_reserve_pages.
3017 		 * Adjust for the subpool count incremented above AND
3018 		 * in hugetlb_reserve_pages for the same page.	Also,
3019 		 * the reservation count added in hugetlb_reserve_pages
3020 		 * no longer applies.
3021 		 */
3022 		if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) {
3023 			long rsv_adjust;
3024 
3025 			rsv_adjust = hugepage_subpool_put_pages(spool, 1);
3026 			hugetlb_acct_memory(h, -rsv_adjust);
3027 			spin_lock_irq(&hugetlb_lock);
3028 			hugetlb_cgroup_uncharge_folio_rsvd(
3029 			    hstate_index(h), pages_per_huge_page(h), folio);
3030 			spin_unlock_irq(&hugetlb_lock);
3031 		}
3032 	}
3033 
3034 	ret = mem_cgroup_charge_hugetlb(folio, gfp);
3035 	/*
3036 	 * Unconditionally increment NR_HUGETLB here. If it turns out that
3037 	 * mem_cgroup_charge_hugetlb failed, then immediately free the page and
3038 	 * decrement NR_HUGETLB.
3039 	 */
3040 	lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
3041 
3042 	if (ret == -ENOMEM) {
3043 		free_huge_folio(folio);
3044 		return ERR_PTR(-ENOMEM);
3045 	}
3046 
3047 	return folio;
3048 
3049 out_uncharge_cgroup:
3050 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3051 out_uncharge_cgroup_reservation:
3052 	if (map_chg)
3053 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3054 						    h_cg);
3055 out_subpool_put:
3056 	/*
3057 	 * put page to subpool iff the quota of subpool's rsv_hpages is used
3058 	 * during hugepage_subpool_get_pages.
3059 	 */
3060 	if (map_chg && !gbl_chg) {
3061 		gbl_reserve = hugepage_subpool_put_pages(spool, 1);
3062 		hugetlb_acct_memory(h, -gbl_reserve);
3063 	}
3064 
3065 
3066 out_end_reservation:
3067 	if (map_chg != MAP_CHG_ENFORCED)
3068 		vma_end_reservation(h, vma, addr);
3069 	return ERR_PTR(-ENOSPC);
3070 }
3071 
alloc_bootmem(struct hstate * h,int nid,bool node_exact)3072 static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3073 {
3074 	struct huge_bootmem_page *m;
3075 	int listnode = nid;
3076 
3077 	if (hugetlb_early_cma(h))
3078 		m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
3079 	else {
3080 		if (node_exact)
3081 			m = memblock_alloc_exact_nid_raw(huge_page_size(h),
3082 				huge_page_size(h), 0,
3083 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3084 		else {
3085 			m = memblock_alloc_try_nid_raw(huge_page_size(h),
3086 				huge_page_size(h), 0,
3087 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3088 			/*
3089 			 * For pre-HVO to work correctly, pages need to be on
3090 			 * the list for the node they were actually allocated
3091 			 * from. That node may be different in the case of
3092 			 * fallback by memblock_alloc_try_nid_raw. So,
3093 			 * extract the actual node first.
3094 			 */
3095 			if (m)
3096 				listnode = early_pfn_to_nid(PHYS_PFN(__pa(m)));
3097 		}
3098 
3099 		if (m) {
3100 			m->flags = 0;
3101 			m->cma = NULL;
3102 		}
3103 	}
3104 
3105 	if (m) {
3106 		/*
3107 		 * Use the beginning of the huge page to store the
3108 		 * huge_bootmem_page struct (until gather_bootmem
3109 		 * puts them into the mem_map).
3110 		 *
3111 		 * Put them into a private list first because mem_map
3112 		 * is not up yet.
3113 		 */
3114 		INIT_LIST_HEAD(&m->list);
3115 		list_add(&m->list, &huge_boot_pages[listnode]);
3116 		m->hstate = h;
3117 	}
3118 
3119 	return m;
3120 }
3121 
3122 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3123 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
__alloc_bootmem_huge_page(struct hstate * h,int nid)3124 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3125 {
3126 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
3127 	int nr_nodes, node = nid;
3128 
3129 	/* do node specific alloc */
3130 	if (nid != NUMA_NO_NODE) {
3131 		m = alloc_bootmem(h, node, true);
3132 		if (!m)
3133 			return 0;
3134 		goto found;
3135 	}
3136 
3137 	/* allocate from next node when distributing huge pages */
3138 	for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node,
3139 				    &hugetlb_bootmem_nodes) {
3140 		m = alloc_bootmem(h, node, false);
3141 		if (!m)
3142 			return 0;
3143 		goto found;
3144 	}
3145 
3146 found:
3147 
3148 	/*
3149 	 * Only initialize the head struct page in memmap_init_reserved_pages,
3150 	 * rest of the struct pages will be initialized by the HugeTLB
3151 	 * subsystem itself.
3152 	 * The head struct page is used to get folio information by the HugeTLB
3153 	 * subsystem like zone id and node id.
3154 	 */
3155 	memblock_reserved_mark_noinit(__pa((void *)m + PAGE_SIZE),
3156 		huge_page_size(h) - PAGE_SIZE);
3157 
3158 	return 1;
3159 }
3160 
3161 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
hugetlb_folio_init_tail_vmemmap(struct folio * folio,unsigned long start_page_number,unsigned long end_page_number)3162 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3163 					unsigned long start_page_number,
3164 					unsigned long end_page_number)
3165 {
3166 	enum zone_type zone = folio_zonenum(folio);
3167 	int nid = folio_nid(folio);
3168 	struct page *page = folio_page(folio, start_page_number);
3169 	unsigned long head_pfn = folio_pfn(folio);
3170 	unsigned long pfn, end_pfn = head_pfn + end_page_number;
3171 
3172 	/*
3173 	 * As we marked all tail pages with memblock_reserved_mark_noinit(),
3174 	 * we must initialize them ourselves here.
3175 	 */
3176 	for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) {
3177 		__init_single_page(page, pfn, zone, nid);
3178 		prep_compound_tail((struct page *)folio, pfn - head_pfn);
3179 		set_page_count(page, 0);
3180 	}
3181 }
3182 
hugetlb_folio_init_vmemmap(struct folio * folio,struct hstate * h,unsigned long nr_pages)3183 static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3184 					      struct hstate *h,
3185 					      unsigned long nr_pages)
3186 {
3187 	int ret;
3188 
3189 	/*
3190 	 * This is an open-coded prep_compound_page() whereby we avoid
3191 	 * walking pages twice by initializing/preparing+freezing them in the
3192 	 * same go.
3193 	 */
3194 	__folio_clear_reserved(folio);
3195 	__folio_set_head(folio);
3196 	ret = folio_ref_freeze(folio, 1);
3197 	VM_BUG_ON(!ret);
3198 	hugetlb_folio_init_tail_vmemmap(folio, 1, nr_pages);
3199 	prep_compound_head(&folio->page, huge_page_order(h));
3200 }
3201 
hugetlb_bootmem_page_prehvo(struct huge_bootmem_page * m)3202 static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
3203 {
3204 	return m->flags & HUGE_BOOTMEM_HVO;
3205 }
3206 
hugetlb_bootmem_page_earlycma(struct huge_bootmem_page * m)3207 static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
3208 {
3209 	return m->flags & HUGE_BOOTMEM_CMA;
3210 }
3211 
3212 /*
3213  * memblock-allocated pageblocks might not have the migrate type set
3214  * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
3215  * here, or MIGRATE_CMA if this was a page allocated through an early CMA
3216  * reservation.
3217  *
3218  * In case of vmemmap optimized folios, the tail vmemmap pages are mapped
3219  * read-only, but that's ok - for sparse vmemmap this does not write to
3220  * the page structure.
3221  */
hugetlb_bootmem_init_migratetype(struct folio * folio,struct hstate * h)3222 static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
3223 							  struct hstate *h)
3224 {
3225 	unsigned long nr_pages = pages_per_huge_page(h), i;
3226 
3227 	WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
3228 
3229 	for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
3230 		if (folio_test_hugetlb_cma(folio))
3231 			init_cma_pageblock(folio_page(folio, i));
3232 		else
3233 			init_pageblock_migratetype(folio_page(folio, i),
3234 					  MIGRATE_MOVABLE, false);
3235 	}
3236 }
3237 
prep_and_add_bootmem_folios(struct hstate * h,struct list_head * folio_list)3238 static void __init prep_and_add_bootmem_folios(struct hstate *h,
3239 					struct list_head *folio_list)
3240 {
3241 	unsigned long flags;
3242 	struct folio *folio, *tmp_f;
3243 
3244 	/* Send list for bulk vmemmap optimization processing */
3245 	hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list);
3246 
3247 	list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3248 		if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3249 			/*
3250 			 * If HVO fails, initialize all tail struct pages
3251 			 * We do not worry about potential long lock hold
3252 			 * time as this is early in boot and there should
3253 			 * be no contention.
3254 			 */
3255 			hugetlb_folio_init_tail_vmemmap(folio,
3256 					HUGETLB_VMEMMAP_RESERVE_PAGES,
3257 					pages_per_huge_page(h));
3258 		}
3259 		hugetlb_bootmem_init_migratetype(folio, h);
3260 		/* Subdivide locks to achieve better parallel performance */
3261 		spin_lock_irqsave(&hugetlb_lock, flags);
3262 		account_new_hugetlb_folio(h, folio);
3263 		enqueue_hugetlb_folio(h, folio);
3264 		spin_unlock_irqrestore(&hugetlb_lock, flags);
3265 	}
3266 }
3267 
hugetlb_bootmem_page_zones_valid(int nid,struct huge_bootmem_page * m)3268 bool __init hugetlb_bootmem_page_zones_valid(int nid,
3269 					     struct huge_bootmem_page *m)
3270 {
3271 	unsigned long start_pfn;
3272 	bool valid;
3273 
3274 	if (m->flags & HUGE_BOOTMEM_ZONES_VALID) {
3275 		/*
3276 		 * Already validated, skip check.
3277 		 */
3278 		return true;
3279 	}
3280 
3281 	if (hugetlb_bootmem_page_earlycma(m)) {
3282 		valid = cma_validate_zones(m->cma);
3283 		goto out;
3284 	}
3285 
3286 	start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
3287 
3288 	valid = !pfn_range_intersects_zones(nid, start_pfn,
3289 			pages_per_huge_page(m->hstate));
3290 out:
3291 	if (!valid)
3292 		hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
3293 
3294 	return valid;
3295 }
3296 
3297 /*
3298  * Free a bootmem page that was found to be invalid (intersecting with
3299  * multiple zones).
3300  *
3301  * Since it intersects with multiple zones, we can't just do a free
3302  * operation on all pages at once, but instead have to walk all
3303  * pages, freeing them one by one.
3304  */
hugetlb_bootmem_free_invalid_page(int nid,struct page * page,struct hstate * h)3305 static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
3306 					     struct hstate *h)
3307 {
3308 	unsigned long npages = pages_per_huge_page(h);
3309 	unsigned long pfn;
3310 
3311 	while (npages--) {
3312 		pfn = page_to_pfn(page);
3313 		__init_page_from_nid(pfn, nid);
3314 		free_reserved_page(page);
3315 		page++;
3316 	}
3317 }
3318 
3319 /*
3320  * Put bootmem huge pages into the standard lists after mem_map is up.
3321  * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3322  */
gather_bootmem_prealloc_node(unsigned long nid)3323 static void __init gather_bootmem_prealloc_node(unsigned long nid)
3324 {
3325 	LIST_HEAD(folio_list);
3326 	struct huge_bootmem_page *m, *tm;
3327 	struct hstate *h = NULL, *prev_h = NULL;
3328 
3329 	list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
3330 		struct page *page = virt_to_page(m);
3331 		struct folio *folio = (void *)page;
3332 
3333 		h = m->hstate;
3334 		if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
3335 			/*
3336 			 * Can't use this page. Initialize the
3337 			 * page structures if that hasn't already
3338 			 * been done, and give them to the page
3339 			 * allocator.
3340 			 */
3341 			hugetlb_bootmem_free_invalid_page(nid, page, h);
3342 			continue;
3343 		}
3344 
3345 		/*
3346 		 * It is possible to have multiple huge page sizes (hstates)
3347 		 * in this list.  If so, process each size separately.
3348 		 */
3349 		if (h != prev_h && prev_h != NULL)
3350 			prep_and_add_bootmem_folios(prev_h, &folio_list);
3351 		prev_h = h;
3352 
3353 		VM_BUG_ON(!hstate_is_gigantic(h));
3354 		WARN_ON(folio_ref_count(folio) != 1);
3355 
3356 		hugetlb_folio_init_vmemmap(folio, h,
3357 					   HUGETLB_VMEMMAP_RESERVE_PAGES);
3358 		init_new_hugetlb_folio(folio);
3359 
3360 		if (hugetlb_bootmem_page_prehvo(m))
3361 			/*
3362 			 * If pre-HVO was done, just set the
3363 			 * flag, the HVO code will then skip
3364 			 * this folio.
3365 			 */
3366 			folio_set_hugetlb_vmemmap_optimized(folio);
3367 
3368 		if (hugetlb_bootmem_page_earlycma(m))
3369 			folio_set_hugetlb_cma(folio);
3370 
3371 		list_add(&folio->lru, &folio_list);
3372 
3373 		/*
3374 		 * We need to restore the 'stolen' pages to totalram_pages
3375 		 * in order to fix confusing memory reports from free(1) and
3376 		 * other side-effects, like CommitLimit going negative.
3377 		 *
3378 		 * For CMA pages, this is done in init_cma_pageblock
3379 		 * (via hugetlb_bootmem_init_migratetype), so skip it here.
3380 		 */
3381 		if (!folio_test_hugetlb_cma(folio))
3382 			adjust_managed_page_count(page, pages_per_huge_page(h));
3383 		cond_resched();
3384 	}
3385 
3386 	prep_and_add_bootmem_folios(h, &folio_list);
3387 }
3388 
gather_bootmem_prealloc_parallel(unsigned long start,unsigned long end,void * arg)3389 static void __init gather_bootmem_prealloc_parallel(unsigned long start,
3390 						    unsigned long end, void *arg)
3391 {
3392 	int nid;
3393 
3394 	for (nid = start; nid < end; nid++)
3395 		gather_bootmem_prealloc_node(nid);
3396 }
3397 
gather_bootmem_prealloc(void)3398 static void __init gather_bootmem_prealloc(void)
3399 {
3400 	struct padata_mt_job job = {
3401 		.thread_fn	= gather_bootmem_prealloc_parallel,
3402 		.fn_arg		= NULL,
3403 		.start		= 0,
3404 		.size		= nr_node_ids,
3405 		.align		= 1,
3406 		.min_chunk	= 1,
3407 		.max_threads	= num_node_state(N_MEMORY),
3408 		.numa_aware	= true,
3409 	};
3410 
3411 	padata_do_multithreaded(&job);
3412 }
3413 
hugetlb_hstate_alloc_pages_onenode(struct hstate * h,int nid)3414 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3415 {
3416 	unsigned long i;
3417 	char buf[32];
3418 	LIST_HEAD(folio_list);
3419 
3420 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3421 		if (hstate_is_gigantic(h)) {
3422 			if (!alloc_bootmem_huge_page(h, nid))
3423 				break;
3424 		} else {
3425 			struct folio *folio;
3426 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3427 
3428 			folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3429 					&node_states[N_MEMORY], NULL);
3430 			if (!folio && !list_empty(&folio_list) &&
3431 			    hugetlb_vmemmap_optimizable_size(h)) {
3432 				prep_and_add_allocated_folios(h, &folio_list);
3433 				INIT_LIST_HEAD(&folio_list);
3434 				folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3435 						&node_states[N_MEMORY], NULL);
3436 			}
3437 			if (!folio)
3438 				break;
3439 			list_add(&folio->lru, &folio_list);
3440 		}
3441 		cond_resched();
3442 	}
3443 
3444 	if (!list_empty(&folio_list))
3445 		prep_and_add_allocated_folios(h, &folio_list);
3446 
3447 	if (i == h->max_huge_pages_node[nid])
3448 		return;
3449 
3450 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3451 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3452 		h->max_huge_pages_node[nid], buf, nid, i);
3453 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3454 	h->max_huge_pages_node[nid] = i;
3455 }
3456 
hugetlb_hstate_alloc_pages_specific_nodes(struct hstate * h)3457 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
3458 {
3459 	int i;
3460 	bool node_specific_alloc = false;
3461 
3462 	for_each_online_node(i) {
3463 		if (h->max_huge_pages_node[i] > 0) {
3464 			hugetlb_hstate_alloc_pages_onenode(h, i);
3465 			node_specific_alloc = true;
3466 		}
3467 	}
3468 
3469 	return node_specific_alloc;
3470 }
3471 
hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated,struct hstate * h)3472 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
3473 {
3474 	if (allocated < h->max_huge_pages) {
3475 		char buf[32];
3476 
3477 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3478 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3479 			h->max_huge_pages, buf, allocated);
3480 		h->max_huge_pages = allocated;
3481 	}
3482 }
3483 
hugetlb_pages_alloc_boot_node(unsigned long start,unsigned long end,void * arg)3484 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
3485 {
3486 	struct hstate *h = (struct hstate *)arg;
3487 	int i, num = end - start;
3488 	nodemask_t node_alloc_noretry;
3489 	LIST_HEAD(folio_list);
3490 	int next_node = first_online_node;
3491 
3492 	/* Bit mask controlling how hard we retry per-node allocations.*/
3493 	nodes_clear(node_alloc_noretry);
3494 
3495 	for (i = 0; i < num; ++i) {
3496 		struct folio *folio;
3497 
3498 		if (hugetlb_vmemmap_optimizable_size(h) &&
3499 		    (si_mem_available() == 0) && !list_empty(&folio_list)) {
3500 			prep_and_add_allocated_folios(h, &folio_list);
3501 			INIT_LIST_HEAD(&folio_list);
3502 		}
3503 		folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3504 						&node_alloc_noretry, &next_node);
3505 		if (!folio)
3506 			break;
3507 
3508 		list_move(&folio->lru, &folio_list);
3509 		cond_resched();
3510 	}
3511 
3512 	prep_and_add_allocated_folios(h, &folio_list);
3513 }
3514 
hugetlb_gigantic_pages_alloc_boot(struct hstate * h)3515 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
3516 {
3517 	unsigned long i;
3518 
3519 	for (i = 0; i < h->max_huge_pages; ++i) {
3520 		if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3521 			break;
3522 		cond_resched();
3523 	}
3524 
3525 	return i;
3526 }
3527 
hugetlb_pages_alloc_boot(struct hstate * h)3528 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
3529 {
3530 	struct padata_mt_job job = {
3531 		.fn_arg		= h,
3532 		.align		= 1,
3533 		.numa_aware	= true
3534 	};
3535 
3536 	unsigned long jiffies_start;
3537 	unsigned long jiffies_end;
3538 	unsigned long remaining;
3539 
3540 	job.thread_fn	= hugetlb_pages_alloc_boot_node;
3541 
3542 	/*
3543 	 * job.max_threads is 25% of the available cpu threads by default.
3544 	 *
3545 	 * On large servers with terabytes of memory, huge page allocation
3546 	 * can consume a considerably amount of time.
3547 	 *
3548 	 * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages.
3549 	 * 2MiB huge pages. Using more threads can significantly improve allocation time.
3550 	 *
3551 	 * +-----------------------+-------+-------+-------+-------+-------+
3552 	 * | threads               |   8   |   16  |   32  |   64  |   128 |
3553 	 * +-----------------------+-------+-------+-------+-------+-------+
3554 	 * | skylake      144 cpus |   44s |   22s |   16s |   19s |   20s |
3555 	 * | cascade lake 192 cpus |   39s |   20s |   11s |   10s |    9s |
3556 	 * +-----------------------+-------+-------+-------+-------+-------+
3557 	 */
3558 	if (hugepage_allocation_threads == 0) {
3559 		hugepage_allocation_threads = num_online_cpus() / 4;
3560 		hugepage_allocation_threads = max(hugepage_allocation_threads, 1);
3561 	}
3562 
3563 	job.max_threads	= hugepage_allocation_threads;
3564 
3565 	jiffies_start = jiffies;
3566 	do {
3567 		remaining = h->max_huge_pages - h->nr_huge_pages;
3568 
3569 		job.start     = h->nr_huge_pages;
3570 		job.size      = remaining;
3571 		job.min_chunk = remaining / hugepage_allocation_threads;
3572 		padata_do_multithreaded(&job);
3573 
3574 		if (h->nr_huge_pages == h->max_huge_pages)
3575 			break;
3576 
3577 		/*
3578 		 * Retry only if the vmemmap optimization might have been able to free
3579 		 * some memory back to the system.
3580 		 */
3581 		if (!hugetlb_vmemmap_optimizable(h))
3582 			break;
3583 
3584 		/* Continue if progress was made in last iteration */
3585 	} while (remaining != (h->max_huge_pages - h->nr_huge_pages));
3586 
3587 	jiffies_end = jiffies;
3588 
3589 	pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",
3590 		jiffies_to_msecs(jiffies_end - jiffies_start),
3591 		hugepage_allocation_threads);
3592 
3593 	return h->nr_huge_pages;
3594 }
3595 
3596 /*
3597  * NOTE: this routine is called in different contexts for gigantic and
3598  * non-gigantic pages.
3599  * - For gigantic pages, this is called early in the boot process and
3600  *   pages are allocated from memblock allocated or something similar.
3601  *   Gigantic pages are actually added to pools later with the routine
3602  *   gather_bootmem_prealloc.
3603  * - For non-gigantic pages, this is called later in the boot process after
3604  *   all of mm is up and functional.  Pages are allocated from buddy and
3605  *   then added to hugetlb pools.
3606  */
hugetlb_hstate_alloc_pages(struct hstate * h)3607 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3608 {
3609 	unsigned long allocated;
3610 
3611 	/*
3612 	 * Skip gigantic hugepages allocation if early CMA
3613 	 * reservations are not available.
3614 	 */
3615 	if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
3616 	    !hugetlb_early_cma(h)) {
3617 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3618 		return;
3619 	}
3620 
3621 	if (!h->max_huge_pages)
3622 		return;
3623 
3624 	/* do node specific alloc */
3625 	if (hugetlb_hstate_alloc_pages_specific_nodes(h))
3626 		return;
3627 
3628 	/* below will do all node balanced alloc */
3629 	if (hstate_is_gigantic(h))
3630 		allocated = hugetlb_gigantic_pages_alloc_boot(h);
3631 	else
3632 		allocated = hugetlb_pages_alloc_boot(h);
3633 
3634 	hugetlb_hstate_alloc_pages_errcheck(allocated, h);
3635 }
3636 
hugetlb_init_hstates(void)3637 static void __init hugetlb_init_hstates(void)
3638 {
3639 	struct hstate *h, *h2;
3640 
3641 	for_each_hstate(h) {
3642 		/*
3643 		 * Always reset to first_memory_node here, even if
3644 		 * next_nid_to_alloc was set before - we can't
3645 		 * reference hugetlb_bootmem_nodes after init, and
3646 		 * first_memory_node is right for all further allocations.
3647 		 */
3648 		h->next_nid_to_alloc = first_memory_node;
3649 		h->next_nid_to_free = first_memory_node;
3650 
3651 		/* oversize hugepages were init'ed in early boot */
3652 		if (!hstate_is_gigantic(h))
3653 			hugetlb_hstate_alloc_pages(h);
3654 
3655 		/*
3656 		 * Set demote order for each hstate.  Note that
3657 		 * h->demote_order is initially 0.
3658 		 * - We can not demote gigantic pages if runtime freeing
3659 		 *   is not supported, so skip this.
3660 		 * - If CMA allocation is possible, we can not demote
3661 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
3662 		 */
3663 		if (hstate_is_gigantic_no_runtime(h))
3664 			continue;
3665 		if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
3666 			continue;
3667 		for_each_hstate(h2) {
3668 			if (h2 == h)
3669 				continue;
3670 			if (h2->order < h->order &&
3671 			    h2->order > h->demote_order)
3672 				h->demote_order = h2->order;
3673 		}
3674 	}
3675 }
3676 
report_hugepages(void)3677 static void __init report_hugepages(void)
3678 {
3679 	struct hstate *h;
3680 	unsigned long nrinvalid;
3681 
3682 	for_each_hstate(h) {
3683 		char buf[32];
3684 
3685 		nrinvalid = hstate_boot_nrinvalid[hstate_index(h)];
3686 		h->max_huge_pages -= nrinvalid;
3687 
3688 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3689 		pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3690 			buf, h->nr_huge_pages);
3691 		if (nrinvalid)
3692 			pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n",
3693 					buf, nrinvalid, str_plural(nrinvalid));
3694 		pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3695 			hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3696 	}
3697 }
3698 
3699 #ifdef CONFIG_HIGHMEM
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)3700 static void try_to_free_low(struct hstate *h, unsigned long count,
3701 						nodemask_t *nodes_allowed)
3702 {
3703 	int i;
3704 	LIST_HEAD(page_list);
3705 
3706 	lockdep_assert_held(&hugetlb_lock);
3707 	if (hstate_is_gigantic(h))
3708 		return;
3709 
3710 	/*
3711 	 * Collect pages to be freed on a list, and free after dropping lock
3712 	 */
3713 	for_each_node_mask(i, *nodes_allowed) {
3714 		struct folio *folio, *next;
3715 		struct list_head *freel = &h->hugepage_freelists[i];
3716 		list_for_each_entry_safe(folio, next, freel, lru) {
3717 			if (count >= h->nr_huge_pages)
3718 				goto out;
3719 			if (folio_test_highmem(folio))
3720 				continue;
3721 			remove_hugetlb_folio(h, folio, false);
3722 			list_add(&folio->lru, &page_list);
3723 		}
3724 	}
3725 
3726 out:
3727 	spin_unlock_irq(&hugetlb_lock);
3728 	update_and_free_pages_bulk(h, &page_list);
3729 	spin_lock_irq(&hugetlb_lock);
3730 }
3731 #else
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)3732 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3733 						nodemask_t *nodes_allowed)
3734 {
3735 }
3736 #endif
3737 
3738 /*
3739  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3740  * balanced by operating on them in a round-robin fashion.
3741  * Returns 1 if an adjustment was made.
3742  */
adjust_pool_surplus(struct hstate * h,nodemask_t * nodes_allowed,int delta)3743 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3744 				int delta)
3745 {
3746 	int nr_nodes, node;
3747 
3748 	lockdep_assert_held(&hugetlb_lock);
3749 	VM_BUG_ON(delta != -1 && delta != 1);
3750 
3751 	if (delta < 0) {
3752 		for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
3753 			if (h->surplus_huge_pages_node[node])
3754 				goto found;
3755 		}
3756 	} else {
3757 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3758 			if (h->surplus_huge_pages_node[node] <
3759 					h->nr_huge_pages_node[node])
3760 				goto found;
3761 		}
3762 	}
3763 	return 0;
3764 
3765 found:
3766 	h->surplus_huge_pages += delta;
3767 	h->surplus_huge_pages_node[node] += delta;
3768 	return 1;
3769 }
3770 
3771 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
set_max_huge_pages(struct hstate * h,unsigned long count,int nid,nodemask_t * nodes_allowed)3772 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3773 			      nodemask_t *nodes_allowed)
3774 {
3775 	unsigned long persistent_free_count;
3776 	unsigned long min_count;
3777 	unsigned long allocated;
3778 	struct folio *folio;
3779 	LIST_HEAD(page_list);
3780 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3781 
3782 	/*
3783 	 * Bit mask controlling how hard we retry per-node allocations.
3784 	 * If we can not allocate the bit mask, do not attempt to allocate
3785 	 * the requested huge pages.
3786 	 */
3787 	if (node_alloc_noretry)
3788 		nodes_clear(*node_alloc_noretry);
3789 	else
3790 		return -ENOMEM;
3791 
3792 	/*
3793 	 * resize_lock mutex prevents concurrent adjustments to number of
3794 	 * pages in hstate via the proc/sysfs interfaces.
3795 	 */
3796 	mutex_lock(&h->resize_lock);
3797 	flush_free_hpage_work(h);
3798 	spin_lock_irq(&hugetlb_lock);
3799 
3800 	/*
3801 	 * Check for a node specific request.
3802 	 * Changing node specific huge page count may require a corresponding
3803 	 * change to the global count.  In any case, the passed node mask
3804 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3805 	 */
3806 	if (nid != NUMA_NO_NODE) {
3807 		unsigned long old_count = count;
3808 
3809 		count += persistent_huge_pages(h) -
3810 			 (h->nr_huge_pages_node[nid] -
3811 			  h->surplus_huge_pages_node[nid]);
3812 		/*
3813 		 * User may have specified a large count value which caused the
3814 		 * above calculation to overflow.  In this case, they wanted
3815 		 * to allocate as many huge pages as possible.  Set count to
3816 		 * largest possible value to align with their intention.
3817 		 */
3818 		if (count < old_count)
3819 			count = ULONG_MAX;
3820 	}
3821 
3822 	/*
3823 	 * Gigantic pages runtime allocation depend on the capability for large
3824 	 * page range allocation.
3825 	 * If the system does not provide this feature, return an error when
3826 	 * the user tries to allocate gigantic pages but let the user free the
3827 	 * boottime allocated gigantic pages.
3828 	 */
3829 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3830 		if (count > persistent_huge_pages(h)) {
3831 			spin_unlock_irq(&hugetlb_lock);
3832 			mutex_unlock(&h->resize_lock);
3833 			NODEMASK_FREE(node_alloc_noretry);
3834 			return -EINVAL;
3835 		}
3836 		/* Fall through to decrease pool */
3837 	}
3838 
3839 	/*
3840 	 * Increase the pool size
3841 	 * First take pages out of surplus state.  Then make up the
3842 	 * remaining difference by allocating fresh huge pages.
3843 	 *
3844 	 * We might race with alloc_surplus_hugetlb_folio() here and be unable
3845 	 * to convert a surplus huge page to a normal huge page. That is
3846 	 * not critical, though, it just means the overall size of the
3847 	 * pool might be one hugepage larger than it needs to be, but
3848 	 * within all the constraints specified by the sysctls.
3849 	 */
3850 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3851 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
3852 			break;
3853 	}
3854 
3855 	allocated = 0;
3856 	while (count > (persistent_huge_pages(h) + allocated)) {
3857 		/*
3858 		 * If this allocation races such that we no longer need the
3859 		 * page, free_huge_folio will handle it by freeing the page
3860 		 * and reducing the surplus.
3861 		 */
3862 		spin_unlock_irq(&hugetlb_lock);
3863 
3864 		/* yield cpu to avoid soft lockup */
3865 		cond_resched();
3866 
3867 		folio = alloc_pool_huge_folio(h, nodes_allowed,
3868 						node_alloc_noretry,
3869 						&h->next_nid_to_alloc);
3870 		if (!folio) {
3871 			prep_and_add_allocated_folios(h, &page_list);
3872 			spin_lock_irq(&hugetlb_lock);
3873 			goto out;
3874 		}
3875 
3876 		list_add(&folio->lru, &page_list);
3877 		allocated++;
3878 
3879 		/* Bail for signals. Probably ctrl-c from user */
3880 		if (signal_pending(current)) {
3881 			prep_and_add_allocated_folios(h, &page_list);
3882 			spin_lock_irq(&hugetlb_lock);
3883 			goto out;
3884 		}
3885 
3886 		spin_lock_irq(&hugetlb_lock);
3887 	}
3888 
3889 	/* Add allocated pages to the pool */
3890 	if (!list_empty(&page_list)) {
3891 		spin_unlock_irq(&hugetlb_lock);
3892 		prep_and_add_allocated_folios(h, &page_list);
3893 		spin_lock_irq(&hugetlb_lock);
3894 	}
3895 
3896 	/*
3897 	 * Decrease the pool size
3898 	 * First return free pages to the buddy allocator (being careful
3899 	 * to keep enough around to satisfy reservations).  Then place
3900 	 * pages into surplus state as needed so the pool will shrink
3901 	 * to the desired size as pages become free.
3902 	 *
3903 	 * By placing pages into the surplus state independent of the
3904 	 * overcommit value, we are allowing the surplus pool size to
3905 	 * exceed overcommit. There are few sane options here. Since
3906 	 * alloc_surplus_hugetlb_folio() is checking the global counter,
3907 	 * though, we'll note that we're not allowed to exceed surplus
3908 	 * and won't grow the pool anywhere else. Not until one of the
3909 	 * sysctls are changed, or the surplus pages go out of use.
3910 	 *
3911 	 * min_count is the expected number of persistent pages, we
3912 	 * shouldn't calculate min_count by using
3913 	 * resv_huge_pages + persistent_huge_pages() - free_huge_pages,
3914 	 * because there may exist free surplus huge pages, and this will
3915 	 * lead to subtracting twice. Free surplus huge pages come from HVO
3916 	 * failing to restore vmemmap, see comments in the callers of
3917 	 * hugetlb_vmemmap_restore_folio(). Thus, we should calculate
3918 	 * persistent free count first.
3919 	 */
3920 	persistent_free_count = h->free_huge_pages;
3921 	if (h->free_huge_pages > persistent_huge_pages(h)) {
3922 		if (h->free_huge_pages > h->surplus_huge_pages)
3923 			persistent_free_count -= h->surplus_huge_pages;
3924 		else
3925 			persistent_free_count = 0;
3926 	}
3927 	min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count;
3928 	min_count = max(count, min_count);
3929 	try_to_free_low(h, min_count, nodes_allowed);
3930 
3931 	/*
3932 	 * Collect pages to be removed on list without dropping lock
3933 	 */
3934 	while (min_count < persistent_huge_pages(h)) {
3935 		folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3936 		if (!folio)
3937 			break;
3938 
3939 		list_add(&folio->lru, &page_list);
3940 	}
3941 	/* free the pages after dropping lock */
3942 	spin_unlock_irq(&hugetlb_lock);
3943 	update_and_free_pages_bulk(h, &page_list);
3944 	flush_free_hpage_work(h);
3945 	spin_lock_irq(&hugetlb_lock);
3946 
3947 	while (count < persistent_huge_pages(h)) {
3948 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
3949 			break;
3950 	}
3951 out:
3952 	h->max_huge_pages = persistent_huge_pages(h);
3953 	spin_unlock_irq(&hugetlb_lock);
3954 	mutex_unlock(&h->resize_lock);
3955 
3956 	NODEMASK_FREE(node_alloc_noretry);
3957 
3958 	return 0;
3959 }
3960 
demote_free_hugetlb_folios(struct hstate * src,struct hstate * dst,struct list_head * src_list)3961 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
3962 				       struct list_head *src_list)
3963 {
3964 	long rc;
3965 	struct folio *folio, *next;
3966 	LIST_HEAD(dst_list);
3967 	LIST_HEAD(ret_list);
3968 
3969 	rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
3970 	list_splice_init(&ret_list, src_list);
3971 
3972 	/*
3973 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3974 	 * Without the mutex, pages added to target hstate could be marked
3975 	 * as surplus.
3976 	 *
3977 	 * Note that we already hold src->resize_lock.  To prevent deadlock,
3978 	 * use the convention of always taking larger size hstate mutex first.
3979 	 */
3980 	mutex_lock(&dst->resize_lock);
3981 
3982 	list_for_each_entry_safe(folio, next, src_list, lru) {
3983 		int i;
3984 		bool cma;
3985 
3986 		if (folio_test_hugetlb_vmemmap_optimized(folio))
3987 			continue;
3988 
3989 		cma = folio_test_hugetlb_cma(folio);
3990 
3991 		list_del(&folio->lru);
3992 
3993 		split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
3994 		pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));
3995 
3996 		for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
3997 			struct page *page = folio_page(folio, i);
3998 			/* Careful: see __split_huge_page_tail() */
3999 			struct folio *new_folio = (struct folio *)page;
4000 
4001 			clear_compound_head(page);
4002 			prep_compound_page(page, dst->order);
4003 
4004 			new_folio->mapping = NULL;
4005 			init_new_hugetlb_folio(new_folio);
4006 			/* Copy the CMA flag so that it is freed correctly */
4007 			if (cma)
4008 				folio_set_hugetlb_cma(new_folio);
4009 			list_add(&new_folio->lru, &dst_list);
4010 		}
4011 	}
4012 
4013 	prep_and_add_allocated_folios(dst, &dst_list);
4014 
4015 	mutex_unlock(&dst->resize_lock);
4016 
4017 	return rc;
4018 }
4019 
demote_pool_huge_page(struct hstate * src,nodemask_t * nodes_allowed,unsigned long nr_to_demote)4020 long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
4021 			   unsigned long nr_to_demote)
4022 	__must_hold(&hugetlb_lock)
4023 {
4024 	int nr_nodes, node;
4025 	struct hstate *dst;
4026 	long rc = 0;
4027 	long nr_demoted = 0;
4028 
4029 	lockdep_assert_held(&hugetlb_lock);
4030 
4031 	/* We should never get here if no demote order */
4032 	if (!src->demote_order) {
4033 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
4034 		return -EINVAL;		/* internal error */
4035 	}
4036 	dst = size_to_hstate(PAGE_SIZE << src->demote_order);
4037 
4038 	for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
4039 		LIST_HEAD(list);
4040 		struct folio *folio, *next;
4041 
4042 		list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
4043 			if (folio_test_hwpoison(folio))
4044 				continue;
4045 
4046 			remove_hugetlb_folio(src, folio, false);
4047 			list_add(&folio->lru, &list);
4048 
4049 			if (++nr_demoted == nr_to_demote)
4050 				break;
4051 		}
4052 
4053 		spin_unlock_irq(&hugetlb_lock);
4054 
4055 		rc = demote_free_hugetlb_folios(src, dst, &list);
4056 
4057 		spin_lock_irq(&hugetlb_lock);
4058 
4059 		list_for_each_entry_safe(folio, next, &list, lru) {
4060 			list_del(&folio->lru);
4061 			add_hugetlb_folio(src, folio, false);
4062 
4063 			nr_demoted--;
4064 		}
4065 
4066 		if (rc < 0 || nr_demoted == nr_to_demote)
4067 			break;
4068 	}
4069 
4070 	/*
4071 	 * Not absolutely necessary, but for consistency update max_huge_pages
4072 	 * based on pool changes for the demoted page.
4073 	 */
4074 	src->max_huge_pages -= nr_demoted;
4075 	dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
4076 
4077 	if (rc < 0)
4078 		return rc;
4079 
4080 	if (nr_demoted)
4081 		return nr_demoted;
4082 	/*
4083 	 * Only way to get here is if all pages on free lists are poisoned.
4084 	 * Return -EBUSY so that caller will not retry.
4085 	 */
4086 	return -EBUSY;
4087 }
4088 
__nr_hugepages_store_common(bool obey_mempolicy,struct hstate * h,int nid,unsigned long count,size_t len)4089 ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4090 					   struct hstate *h, int nid,
4091 					   unsigned long count, size_t len)
4092 {
4093 	int err;
4094 	nodemask_t nodes_allowed, *n_mask;
4095 
4096 	if (hstate_is_gigantic_no_runtime(h))
4097 		return -EINVAL;
4098 
4099 	if (nid == NUMA_NO_NODE) {
4100 		/*
4101 		 * global hstate attribute
4102 		 */
4103 		if (!(obey_mempolicy &&
4104 				init_nodemask_of_mempolicy(&nodes_allowed)))
4105 			n_mask = &node_states[N_MEMORY];
4106 		else
4107 			n_mask = &nodes_allowed;
4108 	} else {
4109 		/*
4110 		 * Node specific request.  count adjustment happens in
4111 		 * set_max_huge_pages() after acquiring hugetlb_lock.
4112 		 */
4113 		init_nodemask_of_node(&nodes_allowed, nid);
4114 		n_mask = &nodes_allowed;
4115 	}
4116 
4117 	err = set_max_huge_pages(h, count, nid, n_mask);
4118 
4119 	return err ? err : len;
4120 }
4121 
hugetlb_init(void)4122 static int __init hugetlb_init(void)
4123 {
4124 	int i;
4125 
4126 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4127 			__NR_HPAGEFLAGS);
4128 	BUILD_BUG_ON_INVALID(HUGETLB_PAGE_ORDER > MAX_FOLIO_ORDER);
4129 
4130 	if (!hugepages_supported()) {
4131 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4132 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4133 		return 0;
4134 	}
4135 
4136 	/*
4137 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4138 	 * architectures depend on setup being done here.
4139 	 */
4140 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4141 	if (!parsed_default_hugepagesz) {
4142 		/*
4143 		 * If we did not parse a default huge page size, set
4144 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4145 		 * number of huge pages for this default size was implicitly
4146 		 * specified, set that here as well.
4147 		 * Note that the implicit setting will overwrite an explicit
4148 		 * setting.  A warning will be printed in this case.
4149 		 */
4150 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4151 		if (default_hstate_max_huge_pages) {
4152 			if (default_hstate.max_huge_pages) {
4153 				char buf[32];
4154 
4155 				string_get_size(huge_page_size(&default_hstate),
4156 					1, STRING_UNITS_2, buf, 32);
4157 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4158 					default_hstate.max_huge_pages, buf);
4159 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4160 					default_hstate_max_huge_pages);
4161 			}
4162 			default_hstate.max_huge_pages =
4163 				default_hstate_max_huge_pages;
4164 
4165 			for_each_online_node(i)
4166 				default_hstate.max_huge_pages_node[i] =
4167 					default_hugepages_in_node[i];
4168 		}
4169 	}
4170 
4171 	hugetlb_init_hstates();
4172 	gather_bootmem_prealloc();
4173 	report_hugepages();
4174 
4175 	hugetlb_sysfs_init();
4176 	hugetlb_cgroup_file_init();
4177 	hugetlb_sysctl_init();
4178 
4179 #ifdef CONFIG_SMP
4180 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4181 #else
4182 	num_fault_mutexes = 1;
4183 #endif
4184 	hugetlb_fault_mutex_table =
4185 		kmalloc_objs(struct mutex, num_fault_mutexes);
4186 	BUG_ON(!hugetlb_fault_mutex_table);
4187 
4188 	for (i = 0; i < num_fault_mutexes; i++)
4189 		mutex_init(&hugetlb_fault_mutex_table[i]);
4190 	return 0;
4191 }
4192 subsys_initcall(hugetlb_init);
4193 
4194 /* Overwritten by architectures with more huge page sizes */
__init(weak)4195 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4196 {
4197 	return size == HPAGE_SIZE;
4198 }
4199 
hugetlb_add_hstate(unsigned int order)4200 void __init hugetlb_add_hstate(unsigned int order)
4201 {
4202 	struct hstate *h;
4203 	unsigned long i;
4204 
4205 	if (size_to_hstate(PAGE_SIZE << order)) {
4206 		return;
4207 	}
4208 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4209 	BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
4210 	WARN_ON(order > MAX_FOLIO_ORDER);
4211 	h = &hstates[hugetlb_max_hstate++];
4212 	__mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
4213 	h->order = order;
4214 	h->mask = ~(huge_page_size(h) - 1);
4215 	for (i = 0; i < MAX_NUMNODES; ++i)
4216 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4217 	INIT_LIST_HEAD(&h->hugepage_activelist);
4218 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4219 					huge_page_size(h)/SZ_1K);
4220 
4221 	parsed_hstate = h;
4222 }
4223 
hugetlb_node_alloc_supported(void)4224 bool __init __weak hugetlb_node_alloc_supported(void)
4225 {
4226 	return true;
4227 }
4228 
hugepages_clear_pages_in_node(void)4229 static void __init hugepages_clear_pages_in_node(void)
4230 {
4231 	if (!hugetlb_max_hstate) {
4232 		default_hstate_max_huge_pages = 0;
4233 		memset(default_hugepages_in_node, 0,
4234 			sizeof(default_hugepages_in_node));
4235 	} else {
4236 		parsed_hstate->max_huge_pages = 0;
4237 		memset(parsed_hstate->max_huge_pages_node, 0,
4238 			sizeof(parsed_hstate->max_huge_pages_node));
4239 	}
4240 }
4241 
hugetlb_add_param(char * s,int (* setup)(char *))4242 static __init int hugetlb_add_param(char *s, int (*setup)(char *))
4243 {
4244 	size_t len;
4245 	char *p;
4246 
4247 	if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS)
4248 		return -EINVAL;
4249 
4250 	len = strlen(s) + 1;
4251 	if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf))
4252 		return -EINVAL;
4253 
4254 	p = &hstate_cmdline_buf[hstate_cmdline_index];
4255 	memcpy(p, s, len);
4256 	hstate_cmdline_index += len;
4257 
4258 	hugetlb_params[hugetlb_param_index].val = p;
4259 	hugetlb_params[hugetlb_param_index].setup = setup;
4260 
4261 	hugetlb_param_index++;
4262 
4263 	return 0;
4264 }
4265 
hugetlb_parse_params(void)4266 static __init void hugetlb_parse_params(void)
4267 {
4268 	int i;
4269 	struct hugetlb_cmdline *hcp;
4270 
4271 	for (i = 0; i < hugetlb_param_index; i++) {
4272 		hcp = &hugetlb_params[i];
4273 
4274 		hcp->setup(hcp->val);
4275 	}
4276 
4277 	hugetlb_cma_validate_params();
4278 }
4279 
4280 /*
4281  * hugepages command line processing
4282  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4283  * specification.  If not, ignore the hugepages value.  hugepages can also
4284  * be the first huge page command line  option in which case it implicitly
4285  * specifies the number of huge pages for the default size.
4286  */
hugepages_setup(char * s)4287 static int __init hugepages_setup(char *s)
4288 {
4289 	unsigned long *mhp;
4290 	static unsigned long *last_mhp;
4291 	int node = NUMA_NO_NODE;
4292 	int count;
4293 	unsigned long tmp;
4294 	char *p = s;
4295 
4296 	if (!hugepages_supported()) {
4297 		pr_warn("HugeTLB: hugepages unsupported, ignoring hugepages=%s cmdline\n", s);
4298 		return 0;
4299 	}
4300 
4301 	if (!parsed_valid_hugepagesz) {
4302 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4303 		parsed_valid_hugepagesz = true;
4304 		return -EINVAL;
4305 	}
4306 
4307 	/*
4308 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4309 	 * yet, so this hugepages= parameter goes to the "default hstate".
4310 	 * Otherwise, it goes with the previously parsed hugepagesz or
4311 	 * default_hugepagesz.
4312 	 */
4313 	else if (!hugetlb_max_hstate)
4314 		mhp = &default_hstate_max_huge_pages;
4315 	else
4316 		mhp = &parsed_hstate->max_huge_pages;
4317 
4318 	if (mhp == last_mhp) {
4319 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4320 		return 1;
4321 	}
4322 
4323 	while (*p) {
4324 		count = 0;
4325 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4326 			goto invalid;
4327 		/* Parameter is node format */
4328 		if (p[count] == ':') {
4329 			if (!hugetlb_node_alloc_supported()) {
4330 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4331 				return 1;
4332 			}
4333 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4334 				goto invalid;
4335 			node = array_index_nospec(tmp, MAX_NUMNODES);
4336 			p += count + 1;
4337 			/* Parse hugepages */
4338 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4339 				goto invalid;
4340 			if (!hugetlb_max_hstate)
4341 				default_hugepages_in_node[node] = tmp;
4342 			else
4343 				parsed_hstate->max_huge_pages_node[node] = tmp;
4344 			*mhp += tmp;
4345 			/* Go to parse next node*/
4346 			if (p[count] == ',')
4347 				p += count + 1;
4348 			else
4349 				break;
4350 		} else {
4351 			if (p != s)
4352 				goto invalid;
4353 			*mhp = tmp;
4354 			break;
4355 		}
4356 	}
4357 
4358 	last_mhp = mhp;
4359 
4360 	return 0;
4361 
4362 invalid:
4363 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4364 	hugepages_clear_pages_in_node();
4365 	return -EINVAL;
4366 }
4367 hugetlb_early_param("hugepages", hugepages_setup);
4368 
4369 /*
4370  * hugepagesz command line processing
4371  * A specific huge page size can only be specified once with hugepagesz.
4372  * hugepagesz is followed by hugepages on the command line.  The global
4373  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4374  * hugepagesz argument was valid.
4375  */
hugepagesz_setup(char * s)4376 static int __init hugepagesz_setup(char *s)
4377 {
4378 	unsigned long size;
4379 	struct hstate *h;
4380 
4381 	if (!hugepages_supported()) {
4382 		pr_warn("HugeTLB: hugepages unsupported, ignoring hugepagesz=%s cmdline\n", s);
4383 		return 0;
4384 	}
4385 
4386 	parsed_valid_hugepagesz = false;
4387 	size = (unsigned long)memparse(s, NULL);
4388 
4389 	if (!arch_hugetlb_valid_size(size)) {
4390 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4391 		return -EINVAL;
4392 	}
4393 
4394 	h = size_to_hstate(size);
4395 	if (h) {
4396 		/*
4397 		 * hstate for this size already exists.  This is normally
4398 		 * an error, but is allowed if the existing hstate is the
4399 		 * default hstate.  More specifically, it is only allowed if
4400 		 * the number of huge pages for the default hstate was not
4401 		 * previously specified.
4402 		 */
4403 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4404 		    default_hstate.max_huge_pages) {
4405 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4406 			return -EINVAL;
4407 		}
4408 
4409 		/*
4410 		 * No need to call hugetlb_add_hstate() as hstate already
4411 		 * exists.  But, do set parsed_hstate so that a following
4412 		 * hugepages= parameter will be applied to this hstate.
4413 		 */
4414 		parsed_hstate = h;
4415 		parsed_valid_hugepagesz = true;
4416 		return 0;
4417 	}
4418 
4419 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4420 	parsed_valid_hugepagesz = true;
4421 	return 0;
4422 }
4423 hugetlb_early_param("hugepagesz", hugepagesz_setup);
4424 
4425 /*
4426  * default_hugepagesz command line input
4427  * Only one instance of default_hugepagesz allowed on command line.
4428  */
default_hugepagesz_setup(char * s)4429 static int __init default_hugepagesz_setup(char *s)
4430 {
4431 	unsigned long size;
4432 	int i;
4433 
4434 	if (!hugepages_supported()) {
4435 		pr_warn("HugeTLB: hugepages unsupported, ignoring default_hugepagesz=%s cmdline\n",
4436 			s);
4437 		return 0;
4438 	}
4439 
4440 	parsed_valid_hugepagesz = false;
4441 	if (parsed_default_hugepagesz) {
4442 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4443 		return -EINVAL;
4444 	}
4445 
4446 	size = (unsigned long)memparse(s, NULL);
4447 
4448 	if (!arch_hugetlb_valid_size(size)) {
4449 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4450 		return -EINVAL;
4451 	}
4452 
4453 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4454 	parsed_valid_hugepagesz = true;
4455 	parsed_default_hugepagesz = true;
4456 	default_hstate_idx = hstate_index(size_to_hstate(size));
4457 
4458 	/*
4459 	 * The number of default huge pages (for this size) could have been
4460 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4461 	 * then default_hstate_max_huge_pages is set.  If the default huge
4462 	 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4463 	 * allocated here from bootmem allocator.
4464 	 */
4465 	if (default_hstate_max_huge_pages) {
4466 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4467 		/*
4468 		 * Since this is an early parameter, we can't check
4469 		 * NUMA node state yet, so loop through MAX_NUMNODES.
4470 		 */
4471 		for (i = 0; i < MAX_NUMNODES; i++) {
4472 			if (default_hugepages_in_node[i] != 0)
4473 				default_hstate.max_huge_pages_node[i] =
4474 					default_hugepages_in_node[i];
4475 		}
4476 		default_hstate_max_huge_pages = 0;
4477 	}
4478 
4479 	return 0;
4480 }
4481 hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup);
4482 
hugetlb_bootmem_set_nodes(void)4483 void __init hugetlb_bootmem_set_nodes(void)
4484 {
4485 	int i, nid;
4486 	unsigned long start_pfn, end_pfn;
4487 
4488 	if (!nodes_empty(hugetlb_bootmem_nodes))
4489 		return;
4490 
4491 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4492 		if (end_pfn > start_pfn)
4493 			node_set(nid, hugetlb_bootmem_nodes);
4494 	}
4495 }
4496 
hugetlb_bootmem_alloc(void)4497 void __init hugetlb_bootmem_alloc(void)
4498 {
4499 	struct hstate *h;
4500 	int i;
4501 
4502 	hugetlb_bootmem_set_nodes();
4503 
4504 	for (i = 0; i < MAX_NUMNODES; i++)
4505 		INIT_LIST_HEAD(&huge_boot_pages[i]);
4506 
4507 	hugetlb_parse_params();
4508 
4509 	for_each_hstate(h) {
4510 		h->next_nid_to_alloc = first_online_node;
4511 
4512 		if (hstate_is_gigantic(h))
4513 			hugetlb_hstate_alloc_pages(h);
4514 	}
4515 }
4516 
4517 /*
4518  * hugepage_alloc_threads command line parsing.
4519  *
4520  * When set, use this specific number of threads for the boot
4521  * allocation of hugepages.
4522  */
hugepage_alloc_threads_setup(char * s)4523 static int __init hugepage_alloc_threads_setup(char *s)
4524 {
4525 	unsigned long allocation_threads;
4526 
4527 	if (kstrtoul(s, 0, &allocation_threads) != 0)
4528 		return 1;
4529 
4530 	if (allocation_threads == 0)
4531 		return 1;
4532 
4533 	hugepage_allocation_threads = allocation_threads;
4534 
4535 	return 1;
4536 }
4537 __setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup);
4538 
allowed_mems_nr(struct hstate * h)4539 static unsigned int allowed_mems_nr(struct hstate *h)
4540 {
4541 	int node;
4542 	unsigned int nr = 0;
4543 	nodemask_t *mbind_nodemask;
4544 	unsigned int *array = h->free_huge_pages_node;
4545 	gfp_t gfp_mask = htlb_alloc_mask(h);
4546 
4547 	mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4548 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4549 		if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4550 			nr += array[node];
4551 	}
4552 
4553 	return nr;
4554 }
4555 
hugetlb_report_meminfo(struct seq_file * m)4556 void hugetlb_report_meminfo(struct seq_file *m)
4557 {
4558 	struct hstate *h;
4559 	unsigned long total = 0;
4560 
4561 	if (!hugepages_supported())
4562 		return;
4563 
4564 	for_each_hstate(h) {
4565 		unsigned long count = h->nr_huge_pages;
4566 
4567 		total += huge_page_size(h) * count;
4568 
4569 		if (h == &default_hstate)
4570 			seq_printf(m,
4571 				   "HugePages_Total:   %5lu\n"
4572 				   "HugePages_Free:    %5lu\n"
4573 				   "HugePages_Rsvd:    %5lu\n"
4574 				   "HugePages_Surp:    %5lu\n"
4575 				   "Hugepagesize:   %8lu kB\n",
4576 				   count,
4577 				   h->free_huge_pages,
4578 				   h->resv_huge_pages,
4579 				   h->surplus_huge_pages,
4580 				   huge_page_size(h) / SZ_1K);
4581 	}
4582 
4583 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4584 }
4585 
hugetlb_report_node_meminfo(char * buf,int len,int nid)4586 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4587 {
4588 	struct hstate *h = &default_hstate;
4589 
4590 	if (!hugepages_supported())
4591 		return 0;
4592 
4593 	return sysfs_emit_at(buf, len,
4594 			     "Node %d HugePages_Total: %5u\n"
4595 			     "Node %d HugePages_Free:  %5u\n"
4596 			     "Node %d HugePages_Surp:  %5u\n",
4597 			     nid, h->nr_huge_pages_node[nid],
4598 			     nid, h->free_huge_pages_node[nid],
4599 			     nid, h->surplus_huge_pages_node[nid]);
4600 }
4601 
hugetlb_show_meminfo_node(int nid)4602 void hugetlb_show_meminfo_node(int nid)
4603 {
4604 	struct hstate *h;
4605 
4606 	if (!hugepages_supported())
4607 		return;
4608 
4609 	for_each_hstate(h)
4610 		printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4611 			nid,
4612 			h->nr_huge_pages_node[nid],
4613 			h->free_huge_pages_node[nid],
4614 			h->surplus_huge_pages_node[nid],
4615 			huge_page_size(h) / SZ_1K);
4616 }
4617 
hugetlb_report_usage(struct seq_file * m,struct mm_struct * mm)4618 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4619 {
4620 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4621 		   K(atomic_long_read(&mm->hugetlb_usage)));
4622 }
4623 
4624 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
hugetlb_total_pages(void)4625 unsigned long hugetlb_total_pages(void)
4626 {
4627 	struct hstate *h;
4628 	unsigned long nr_total_pages = 0;
4629 
4630 	for_each_hstate(h)
4631 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4632 	return nr_total_pages;
4633 }
4634 
hugetlb_acct_memory(struct hstate * h,long delta)4635 static int hugetlb_acct_memory(struct hstate *h, long delta)
4636 {
4637 	int ret = -ENOMEM;
4638 
4639 	if (!delta)
4640 		return 0;
4641 
4642 	spin_lock_irq(&hugetlb_lock);
4643 	/*
4644 	 * When cpuset is configured, it breaks the strict hugetlb page
4645 	 * reservation as the accounting is done on a global variable. Such
4646 	 * reservation is completely rubbish in the presence of cpuset because
4647 	 * the reservation is not checked against page availability for the
4648 	 * current cpuset. Application can still potentially OOM'ed by kernel
4649 	 * with lack of free htlb page in cpuset that the task is in.
4650 	 * Attempt to enforce strict accounting with cpuset is almost
4651 	 * impossible (or too ugly) because cpuset is too fluid that
4652 	 * task or memory node can be dynamically moved between cpusets.
4653 	 *
4654 	 * The change of semantics for shared hugetlb mapping with cpuset is
4655 	 * undesirable. However, in order to preserve some of the semantics,
4656 	 * we fall back to check against current free page availability as
4657 	 * a best attempt and hopefully to minimize the impact of changing
4658 	 * semantics that cpuset has.
4659 	 *
4660 	 * Apart from cpuset, we also have memory policy mechanism that
4661 	 * also determines from which node the kernel will allocate memory
4662 	 * in a NUMA system. So similar to cpuset, we also should consider
4663 	 * the memory policy of the current task. Similar to the description
4664 	 * above.
4665 	 */
4666 	if (delta > 0) {
4667 		if (gather_surplus_pages(h, delta) < 0)
4668 			goto out;
4669 
4670 		if (delta > allowed_mems_nr(h)) {
4671 			return_unused_surplus_pages(h, delta);
4672 			goto out;
4673 		}
4674 	}
4675 
4676 	ret = 0;
4677 	if (delta < 0)
4678 		return_unused_surplus_pages(h, (unsigned long) -delta);
4679 
4680 out:
4681 	spin_unlock_irq(&hugetlb_lock);
4682 	return ret;
4683 }
4684 
hugetlb_vm_op_open(struct vm_area_struct * vma)4685 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4686 {
4687 	struct resv_map *resv = vma_resv_map(vma);
4688 
4689 	/*
4690 	 * HPAGE_RESV_OWNER indicates a private mapping.
4691 	 * This new VMA should share its siblings reservation map if present.
4692 	 * The VMA will only ever have a valid reservation map pointer where
4693 	 * it is being copied for another still existing VMA.  As that VMA
4694 	 * has a reference to the reservation map it cannot disappear until
4695 	 * after this open call completes.  It is therefore safe to take a
4696 	 * new reference here without additional locking.
4697 	 */
4698 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4699 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4700 		kref_get(&resv->refs);
4701 	}
4702 
4703 	/*
4704 	 * vma_lock structure for sharable mappings is vma specific.
4705 	 * Clear old pointer (if copied via vm_area_dup) and allocate
4706 	 * new structure.  Before clearing, make sure vma_lock is not
4707 	 * for this vma.
4708 	 */
4709 	if (vma->vm_flags & VM_MAYSHARE) {
4710 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4711 
4712 		if (vma_lock) {
4713 			if (vma_lock->vma != vma) {
4714 				vma->vm_private_data = NULL;
4715 				hugetlb_vma_lock_alloc(vma);
4716 			} else {
4717 				pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4718 			}
4719 		} else {
4720 			hugetlb_vma_lock_alloc(vma);
4721 		}
4722 	}
4723 }
4724 
hugetlb_vm_op_close(struct vm_area_struct * vma)4725 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4726 {
4727 	struct hstate *h = hstate_vma(vma);
4728 	struct resv_map *resv;
4729 	struct hugepage_subpool *spool = subpool_vma(vma);
4730 	unsigned long reserve, start, end;
4731 	long gbl_reserve;
4732 
4733 	hugetlb_vma_lock_free(vma);
4734 
4735 	resv = vma_resv_map(vma);
4736 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4737 		return;
4738 
4739 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4740 	end = vma_hugecache_offset(h, vma, vma->vm_end);
4741 
4742 	reserve = (end - start) - region_count(resv, start, end);
4743 	hugetlb_cgroup_uncharge_counter(resv, start, end);
4744 	if (reserve) {
4745 		/*
4746 		 * Decrement reserve counts.  The global reserve count may be
4747 		 * adjusted if the subpool has a minimum size.
4748 		 */
4749 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4750 		hugetlb_acct_memory(h, -gbl_reserve);
4751 	}
4752 
4753 	kref_put(&resv->refs, resv_map_release);
4754 }
4755 
hugetlb_vm_op_split(struct vm_area_struct * vma,unsigned long addr)4756 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4757 {
4758 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
4759 		return -EINVAL;
4760 	return 0;
4761 }
4762 
hugetlb_split(struct vm_area_struct * vma,unsigned long addr)4763 void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
4764 {
4765 	/*
4766 	 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
4767 	 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4768 	 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4769 	 * This function is called in the middle of a VMA split operation, with
4770 	 * MM, VMA and rmap all write-locked to prevent concurrent page table
4771 	 * walks (except hardware and gup_fast()).
4772 	 */
4773 	vma_assert_write_locked(vma);
4774 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
4775 
4776 	if (addr & ~PUD_MASK) {
4777 		unsigned long floor = addr & PUD_MASK;
4778 		unsigned long ceil = floor + PUD_SIZE;
4779 
4780 		if (floor >= vma->vm_start && ceil <= vma->vm_end) {
4781 			/*
4782 			 * Locking:
4783 			 * Use take_locks=false here.
4784 			 * The file rmap lock is already held.
4785 			 * The hugetlb VMA lock can't be taken when we already
4786 			 * hold the file rmap lock, and we don't need it because
4787 			 * its purpose is to synchronize against concurrent page
4788 			 * table walks, which are not possible thanks to the
4789 			 * locks held by our caller.
4790 			 */
4791 			hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
4792 		}
4793 	}
4794 }
4795 
hugetlb_vm_op_pagesize(struct vm_area_struct * vma)4796 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4797 {
4798 	return huge_page_size(hstate_vma(vma));
4799 }
4800 
4801 /*
4802  * We cannot handle pagefaults against hugetlb pages at all.  They cause
4803  * handle_mm_fault() to try to instantiate regular-sized pages in the
4804  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
4805  * this far.
4806  */
hugetlb_vm_op_fault(struct vm_fault * vmf)4807 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4808 {
4809 	BUG();
4810 	return 0;
4811 }
4812 
4813 /*
4814  * When a new function is introduced to vm_operations_struct and added
4815  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4816  * This is because under System V memory model, mappings created via
4817  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4818  * their original vm_ops are overwritten with shm_vm_ops.
4819  */
4820 const struct vm_operations_struct hugetlb_vm_ops = {
4821 	.fault = hugetlb_vm_op_fault,
4822 	.open = hugetlb_vm_op_open,
4823 	.close = hugetlb_vm_op_close,
4824 	.may_split = hugetlb_vm_op_split,
4825 	.pagesize = hugetlb_vm_op_pagesize,
4826 };
4827 
make_huge_pte(struct vm_area_struct * vma,struct folio * folio,bool try_mkwrite)4828 static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
4829 		bool try_mkwrite)
4830 {
4831 	pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
4832 	unsigned int shift = huge_page_shift(hstate_vma(vma));
4833 
4834 	if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
4835 		entry = pte_mkwrite_novma(pte_mkdirty(entry));
4836 	} else {
4837 		entry = pte_wrprotect(entry);
4838 	}
4839 	entry = pte_mkyoung(entry);
4840 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4841 
4842 	return entry;
4843 }
4844 
set_huge_ptep_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)4845 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4846 				   unsigned long address, pte_t *ptep)
4847 {
4848 	pte_t entry;
4849 
4850 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
4851 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4852 		update_mmu_cache(vma, address, ptep);
4853 }
4854 
set_huge_ptep_maybe_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)4855 static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
4856 					 unsigned long address, pte_t *ptep)
4857 {
4858 	if (vma->vm_flags & VM_WRITE)
4859 		set_huge_ptep_writable(vma, address, ptep);
4860 }
4861 
4862 static void
hugetlb_install_folio(struct vm_area_struct * vma,pte_t * ptep,unsigned long addr,struct folio * new_folio,pte_t old,unsigned long sz)4863 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4864 		      struct folio *new_folio, pte_t old, unsigned long sz)
4865 {
4866 	pte_t newpte = make_huge_pte(vma, new_folio, true);
4867 
4868 	__folio_mark_uptodate(new_folio);
4869 	hugetlb_add_new_anon_rmap(new_folio, vma, addr);
4870 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
4871 		newpte = huge_pte_mkuffd_wp(newpte);
4872 	set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
4873 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4874 	folio_set_hugetlb_migratable(new_folio);
4875 }
4876 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)4877 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4878 			    struct vm_area_struct *dst_vma,
4879 			    struct vm_area_struct *src_vma)
4880 {
4881 	pte_t *src_pte, *dst_pte, entry;
4882 	struct folio *pte_folio;
4883 	unsigned long addr;
4884 	bool cow = is_cow_mapping(src_vma->vm_flags);
4885 	struct hstate *h = hstate_vma(src_vma);
4886 	unsigned long sz = huge_page_size(h);
4887 	unsigned long npages = pages_per_huge_page(h);
4888 	struct mmu_notifier_range range;
4889 	unsigned long last_addr_mask;
4890 	softleaf_t softleaf;
4891 	int ret = 0;
4892 
4893 	if (cow) {
4894 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
4895 					src_vma->vm_start,
4896 					src_vma->vm_end);
4897 		mmu_notifier_invalidate_range_start(&range);
4898 		vma_assert_write_locked(src_vma);
4899 		raw_write_seqcount_begin(&src->write_protect_seq);
4900 	} else {
4901 		/*
4902 		 * For shared mappings the vma lock must be held before
4903 		 * calling hugetlb_walk() in the src vma. Otherwise, the
4904 		 * returned ptep could go away if part of a shared pmd and
4905 		 * another thread calls huge_pmd_unshare.
4906 		 */
4907 		hugetlb_vma_lock_read(src_vma);
4908 	}
4909 
4910 	last_addr_mask = hugetlb_mask_last_page(h);
4911 	for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4912 		spinlock_t *src_ptl, *dst_ptl;
4913 		src_pte = hugetlb_walk(src_vma, addr, sz);
4914 		if (!src_pte) {
4915 			addr |= last_addr_mask;
4916 			continue;
4917 		}
4918 		dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4919 		if (!dst_pte) {
4920 			ret = -ENOMEM;
4921 			break;
4922 		}
4923 
4924 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
4925 		/* If the pagetables are shared, there is nothing to do */
4926 		if (ptdesc_pmd_is_shared(virt_to_ptdesc(dst_pte))) {
4927 			addr |= last_addr_mask;
4928 			continue;
4929 		}
4930 #endif
4931 
4932 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4933 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4934 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4935 		entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4936 again:
4937 		if (huge_pte_none(entry)) {
4938 			/* Skip if src entry none. */
4939 			goto next;
4940 		}
4941 
4942 		softleaf = softleaf_from_pte(entry);
4943 		if (unlikely(softleaf_is_hwpoison(softleaf))) {
4944 			if (!userfaultfd_wp(dst_vma))
4945 				entry = huge_pte_clear_uffd_wp(entry);
4946 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4947 		} else if (unlikely(softleaf_is_migration(softleaf))) {
4948 			bool uffd_wp = pte_swp_uffd_wp(entry);
4949 
4950 			if (!softleaf_is_migration_read(softleaf) && cow) {
4951 				/*
4952 				 * COW mappings require pages in both
4953 				 * parent and child to be set to read.
4954 				 */
4955 				softleaf = make_readable_migration_entry(
4956 							swp_offset(softleaf));
4957 				entry = swp_entry_to_pte(softleaf);
4958 				if (userfaultfd_wp(src_vma) && uffd_wp)
4959 					entry = pte_swp_mkuffd_wp(entry);
4960 				set_huge_pte_at(src, addr, src_pte, entry, sz);
4961 			}
4962 			if (!userfaultfd_wp(dst_vma))
4963 				entry = huge_pte_clear_uffd_wp(entry);
4964 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4965 		} else if (unlikely(pte_is_marker(entry))) {
4966 			const pte_marker marker = copy_pte_marker(softleaf, dst_vma);
4967 
4968 			if (marker)
4969 				set_huge_pte_at(dst, addr, dst_pte,
4970 						make_pte_marker(marker), sz);
4971 		} else {
4972 			entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4973 			pte_folio = page_folio(pte_page(entry));
4974 			folio_get(pte_folio);
4975 
4976 			/*
4977 			 * Failing to duplicate the anon rmap is a rare case
4978 			 * where we see pinned hugetlb pages while they're
4979 			 * prone to COW. We need to do the COW earlier during
4980 			 * fork.
4981 			 *
4982 			 * When pre-allocating the page or copying data, we
4983 			 * need to be without the pgtable locks since we could
4984 			 * sleep during the process.
4985 			 */
4986 			if (!folio_test_anon(pte_folio)) {
4987 				hugetlb_add_file_rmap(pte_folio);
4988 			} else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
4989 				pte_t src_pte_old = entry;
4990 				struct folio *new_folio;
4991 
4992 				spin_unlock(src_ptl);
4993 				spin_unlock(dst_ptl);
4994 				/* Do not use reserve as it's private owned */
4995 				new_folio = alloc_hugetlb_folio(dst_vma, addr, false);
4996 				if (IS_ERR(new_folio)) {
4997 					folio_put(pte_folio);
4998 					ret = PTR_ERR(new_folio);
4999 					break;
5000 				}
5001 				ret = copy_user_large_folio(new_folio, pte_folio,
5002 							    addr, dst_vma);
5003 				folio_put(pte_folio);
5004 				if (ret) {
5005 					folio_put(new_folio);
5006 					break;
5007 				}
5008 
5009 				/* Install the new hugetlb folio if src pte stable */
5010 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
5011 				src_ptl = huge_pte_lockptr(h, src, src_pte);
5012 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5013 				entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
5014 				if (!pte_same(src_pte_old, entry)) {
5015 					restore_reserve_on_error(h, dst_vma, addr,
5016 								new_folio);
5017 					folio_put(new_folio);
5018 					/* huge_ptep of dst_pte won't change as in child */
5019 					goto again;
5020 				}
5021 				hugetlb_install_folio(dst_vma, dst_pte, addr,
5022 						      new_folio, src_pte_old, sz);
5023 				goto next;
5024 			}
5025 
5026 			if (cow) {
5027 				/*
5028 				 * No need to notify as we are downgrading page
5029 				 * table protection not changing it to point
5030 				 * to a new page.
5031 				 *
5032 				 * See Documentation/mm/mmu_notifier.rst
5033 				 */
5034 				huge_ptep_set_wrprotect(src, addr, src_pte);
5035 				entry = huge_pte_wrprotect(entry);
5036 			}
5037 
5038 			if (!userfaultfd_wp(dst_vma))
5039 				entry = huge_pte_clear_uffd_wp(entry);
5040 
5041 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5042 			hugetlb_count_add(npages, dst);
5043 		}
5044 
5045 next:
5046 		spin_unlock(src_ptl);
5047 		spin_unlock(dst_ptl);
5048 	}
5049 
5050 	if (cow) {
5051 		raw_write_seqcount_end(&src->write_protect_seq);
5052 		mmu_notifier_invalidate_range_end(&range);
5053 	} else {
5054 		hugetlb_vma_unlock_read(src_vma);
5055 	}
5056 
5057 	return ret;
5058 }
5059 
move_huge_pte(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pte_t * src_pte,pte_t * dst_pte,unsigned long sz)5060 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5061 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5062 			  unsigned long sz)
5063 {
5064 	bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
5065 	struct hstate *h = hstate_vma(vma);
5066 	struct mm_struct *mm = vma->vm_mm;
5067 	spinlock_t *src_ptl, *dst_ptl;
5068 	pte_t pte;
5069 
5070 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
5071 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
5072 
5073 	/*
5074 	 * We don't have to worry about the ordering of src and dst ptlocks
5075 	 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5076 	 */
5077 	if (src_ptl != dst_ptl)
5078 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5079 
5080 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
5081 
5082 	if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte)) {
5083 		huge_pte_clear(mm, new_addr, dst_pte, sz);
5084 	} else {
5085 		if (need_clear_uffd_wp) {
5086 			if (pte_present(pte))
5087 				pte = huge_pte_clear_uffd_wp(pte);
5088 			else
5089 				pte = pte_swp_clear_uffd_wp(pte);
5090 		}
5091 		set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5092 	}
5093 
5094 	if (src_ptl != dst_ptl)
5095 		spin_unlock(src_ptl);
5096 	spin_unlock(dst_ptl);
5097 }
5098 
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)5099 int move_hugetlb_page_tables(struct vm_area_struct *vma,
5100 			     struct vm_area_struct *new_vma,
5101 			     unsigned long old_addr, unsigned long new_addr,
5102 			     unsigned long len)
5103 {
5104 	struct hstate *h = hstate_vma(vma);
5105 	struct address_space *mapping = vma->vm_file->f_mapping;
5106 	unsigned long sz = huge_page_size(h);
5107 	struct mm_struct *mm = vma->vm_mm;
5108 	unsigned long old_end = old_addr + len;
5109 	unsigned long last_addr_mask;
5110 	pte_t *src_pte, *dst_pte;
5111 	struct mmu_notifier_range range;
5112 	struct mmu_gather tlb;
5113 
5114 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5115 				old_end);
5116 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5117 	/*
5118 	 * In case of shared PMDs, we should cover the maximum possible
5119 	 * range.
5120 	 */
5121 	flush_cache_range(vma, range.start, range.end);
5122 	tlb_gather_mmu_vma(&tlb, vma);
5123 
5124 	mmu_notifier_invalidate_range_start(&range);
5125 	last_addr_mask = hugetlb_mask_last_page(h);
5126 	/* Prevent race with file truncation */
5127 	hugetlb_vma_lock_write(vma);
5128 	i_mmap_lock_write(mapping);
5129 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5130 		src_pte = hugetlb_walk(vma, old_addr, sz);
5131 		if (!src_pte) {
5132 			old_addr |= last_addr_mask;
5133 			new_addr |= last_addr_mask;
5134 			continue;
5135 		}
5136 		if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
5137 			continue;
5138 
5139 		if (huge_pmd_unshare(&tlb, vma, old_addr, src_pte)) {
5140 			old_addr |= last_addr_mask;
5141 			new_addr |= last_addr_mask;
5142 			continue;
5143 		}
5144 
5145 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5146 		if (!dst_pte)
5147 			break;
5148 
5149 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5150 		tlb_remove_huge_tlb_entry(h, &tlb, src_pte, old_addr);
5151 	}
5152 
5153 	tlb_flush_mmu_tlbonly(&tlb);
5154 	huge_pmd_unshare_flush(&tlb, vma);
5155 
5156 	mmu_notifier_invalidate_range_end(&range);
5157 	i_mmap_unlock_write(mapping);
5158 	hugetlb_vma_unlock_write(vma);
5159 	tlb_finish_mmu(&tlb);
5160 
5161 	return len + old_addr - old_end;
5162 }
5163 
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct folio * folio,zap_flags_t zap_flags)5164 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5165 			    unsigned long start, unsigned long end,
5166 			    struct folio *folio, zap_flags_t zap_flags)
5167 {
5168 	struct mm_struct *mm = vma->vm_mm;
5169 	const bool folio_provided = !!folio;
5170 	unsigned long address;
5171 	pte_t *ptep;
5172 	pte_t pte;
5173 	spinlock_t *ptl;
5174 	struct hstate *h = hstate_vma(vma);
5175 	unsigned long sz = huge_page_size(h);
5176 	bool adjust_reservation;
5177 	unsigned long last_addr_mask;
5178 
5179 	WARN_ON(!is_vm_hugetlb_page(vma));
5180 	BUG_ON(start & ~huge_page_mask(h));
5181 	BUG_ON(end & ~huge_page_mask(h));
5182 
5183 	/*
5184 	 * This is a hugetlb vma, all the pte entries should point
5185 	 * to huge page.
5186 	 */
5187 	tlb_change_page_size(tlb, sz);
5188 	tlb_start_vma(tlb, vma);
5189 
5190 	last_addr_mask = hugetlb_mask_last_page(h);
5191 	address = start;
5192 	for (; address < end; address += sz) {
5193 		ptep = hugetlb_walk(vma, address, sz);
5194 		if (!ptep) {
5195 			address |= last_addr_mask;
5196 			continue;
5197 		}
5198 
5199 		ptl = huge_pte_lock(h, mm, ptep);
5200 		if (huge_pmd_unshare(tlb, vma, address, ptep)) {
5201 			spin_unlock(ptl);
5202 			address |= last_addr_mask;
5203 			continue;
5204 		}
5205 
5206 		pte = huge_ptep_get(mm, address, ptep);
5207 		if (huge_pte_none(pte)) {
5208 			spin_unlock(ptl);
5209 			continue;
5210 		}
5211 
5212 		/*
5213 		 * Migrating hugepage or HWPoisoned hugepage is already
5214 		 * unmapped and its refcount is dropped, so just clear pte here.
5215 		 */
5216 		if (unlikely(!pte_present(pte))) {
5217 			/*
5218 			 * If the pte was wr-protected by uffd-wp in any of the
5219 			 * swap forms, meanwhile the caller does not want to
5220 			 * drop the uffd-wp bit in this zap, then replace the
5221 			 * pte with a marker.
5222 			 */
5223 			if (pte_swp_uffd_wp_any(pte) &&
5224 			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5225 				set_huge_pte_at(mm, address, ptep,
5226 						make_pte_marker(PTE_MARKER_UFFD_WP),
5227 						sz);
5228 			else
5229 				huge_pte_clear(mm, address, ptep, sz);
5230 			spin_unlock(ptl);
5231 			continue;
5232 		}
5233 
5234 		/*
5235 		 * If a folio is supplied, it is because a specific
5236 		 * folio is being unmapped, not a range. Ensure the folio we
5237 		 * are about to unmap is the actual folio of interest.
5238 		 */
5239 		if (folio_provided) {
5240 			if (folio != page_folio(pte_page(pte))) {
5241 				spin_unlock(ptl);
5242 				continue;
5243 			}
5244 			/*
5245 			 * Mark the VMA as having unmapped its page so that
5246 			 * future faults in this VMA will fail rather than
5247 			 * looking like data was lost
5248 			 */
5249 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5250 		} else {
5251 			folio = page_folio(pte_page(pte));
5252 		}
5253 
5254 		pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
5255 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5256 		if (huge_pte_dirty(pte))
5257 			folio_mark_dirty(folio);
5258 		/* Leave a uffd-wp pte marker if needed */
5259 		if (huge_pte_uffd_wp(pte) &&
5260 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5261 			set_huge_pte_at(mm, address, ptep,
5262 					make_pte_marker(PTE_MARKER_UFFD_WP),
5263 					sz);
5264 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5265 		hugetlb_remove_rmap(folio);
5266 		spin_unlock(ptl);
5267 
5268 		/*
5269 		 * Restore the reservation for anonymous page, otherwise the
5270 		 * backing page could be stolen by someone.
5271 		 * If there we are freeing a surplus, do not set the restore
5272 		 * reservation bit.
5273 		 */
5274 		adjust_reservation = false;
5275 
5276 		spin_lock_irq(&hugetlb_lock);
5277 		if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5278 		    folio_test_anon(folio)) {
5279 			folio_set_hugetlb_restore_reserve(folio);
5280 			/* Reservation to be adjusted after the spin lock */
5281 			adjust_reservation = true;
5282 		}
5283 		spin_unlock_irq(&hugetlb_lock);
5284 
5285 		/*
5286 		 * Adjust the reservation for the region that will have the
5287 		 * reserve restored. Keep in mind that vma_needs_reservation() changes
5288 		 * resv->adds_in_progress if it succeeds. If this is not done,
5289 		 * do_exit() will not see it, and will keep the reservation
5290 		 * forever.
5291 		 */
5292 		if (adjust_reservation) {
5293 			int rc = vma_needs_reservation(h, vma, address);
5294 
5295 			if (rc < 0)
5296 				/* Pressumably allocate_file_region_entries failed
5297 				 * to allocate a file_region struct. Clear
5298 				 * hugetlb_restore_reserve so that global reserve
5299 				 * count will not be incremented by free_huge_folio.
5300 				 * Act as if we consumed the reservation.
5301 				 */
5302 				folio_clear_hugetlb_restore_reserve(folio);
5303 			else if (rc)
5304 				vma_add_reservation(h, vma, address);
5305 		}
5306 
5307 		tlb_remove_page_size(tlb, folio_page(folio, 0),
5308 				     folio_size(folio));
5309 		/*
5310 		 * If we were instructed to unmap a specific folio, we're done.
5311 		 */
5312 		if (folio_provided)
5313 			break;
5314 	}
5315 	tlb_end_vma(tlb, vma);
5316 
5317 	huge_pmd_unshare_flush(tlb, vma);
5318 }
5319 
__hugetlb_zap_begin(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)5320 void __hugetlb_zap_begin(struct vm_area_struct *vma,
5321 			 unsigned long *start, unsigned long *end)
5322 {
5323 	if (!vma->vm_file)	/* hugetlbfs_file_mmap error */
5324 		return;
5325 
5326 	adjust_range_if_pmd_sharing_possible(vma, start, end);
5327 	hugetlb_vma_lock_write(vma);
5328 	if (vma->vm_file)
5329 		i_mmap_lock_write(vma->vm_file->f_mapping);
5330 }
5331 
__hugetlb_zap_end(struct vm_area_struct * vma,struct zap_details * details)5332 void __hugetlb_zap_end(struct vm_area_struct *vma,
5333 		       struct zap_details *details)
5334 {
5335 	zap_flags_t zap_flags = details ? details->zap_flags : 0;
5336 
5337 	if (!vma->vm_file)	/* hugetlbfs_file_mmap error */
5338 		return;
5339 
5340 	if (zap_flags & ZAP_FLAG_UNMAP) {	/* final unmap */
5341 		/*
5342 		 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5343 		 * When the vma_lock is freed, this makes the vma ineligible
5344 		 * for pmd sharing.  And, i_mmap_rwsem is required to set up
5345 		 * pmd sharing.  This is important as page tables for this
5346 		 * unmapped range will be asynchrously deleted.  If the page
5347 		 * tables are shared, there will be issues when accessed by
5348 		 * someone else.
5349 		 */
5350 		__hugetlb_vma_unlock_write_free(vma);
5351 	} else {
5352 		hugetlb_vma_unlock_write(vma);
5353 	}
5354 
5355 	if (vma->vm_file)
5356 		i_mmap_unlock_write(vma->vm_file->f_mapping);
5357 }
5358 
unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct folio * folio,zap_flags_t zap_flags)5359 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5360 			  unsigned long end, struct folio *folio,
5361 			  zap_flags_t zap_flags)
5362 {
5363 	struct mmu_notifier_range range;
5364 	struct mmu_gather tlb;
5365 
5366 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5367 				start, end);
5368 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5369 	mmu_notifier_invalidate_range_start(&range);
5370 	tlb_gather_mmu(&tlb, vma->vm_mm);
5371 
5372 	__unmap_hugepage_range(&tlb, vma, start, end,
5373 			       folio, zap_flags);
5374 
5375 	mmu_notifier_invalidate_range_end(&range);
5376 	tlb_finish_mmu(&tlb);
5377 }
5378 
5379 /*
5380  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5381  * mapping it owns the reserve page for. The intention is to unmap the page
5382  * from other VMAs and let the children be SIGKILLed if they are faulting the
5383  * same region.
5384  */
unmap_ref_private(struct mm_struct * mm,struct vm_area_struct * vma,struct folio * folio,unsigned long address)5385 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5386 			      struct folio *folio, unsigned long address)
5387 {
5388 	struct hstate *h = hstate_vma(vma);
5389 	struct vm_area_struct *iter_vma;
5390 	struct address_space *mapping;
5391 	pgoff_t pgoff;
5392 
5393 	/*
5394 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5395 	 * from page cache lookup which is in HPAGE_SIZE units.
5396 	 */
5397 	address = address & huge_page_mask(h);
5398 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5399 			vma->vm_pgoff;
5400 	mapping = vma->vm_file->f_mapping;
5401 
5402 	/*
5403 	 * Take the mapping lock for the duration of the table walk. As
5404 	 * this mapping should be shared between all the VMAs,
5405 	 * __unmap_hugepage_range() is called as the lock is already held
5406 	 */
5407 	i_mmap_lock_write(mapping);
5408 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5409 		/* Do not unmap the current VMA */
5410 		if (iter_vma == vma)
5411 			continue;
5412 
5413 		/*
5414 		 * Shared VMAs have their own reserves and do not affect
5415 		 * MAP_PRIVATE accounting but it is possible that a shared
5416 		 * VMA is using the same page so check and skip such VMAs.
5417 		 */
5418 		if (iter_vma->vm_flags & VM_MAYSHARE)
5419 			continue;
5420 
5421 		/*
5422 		 * Unmap the page from other VMAs without their own reserves.
5423 		 * They get marked to be SIGKILLed if they fault in these
5424 		 * areas. This is because a future no-page fault on this VMA
5425 		 * could insert a zeroed page instead of the data existing
5426 		 * from the time of fork. This would look like data corruption
5427 		 */
5428 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5429 			unmap_hugepage_range(iter_vma, address,
5430 					     address + huge_page_size(h),
5431 					     folio, 0);
5432 	}
5433 	i_mmap_unlock_write(mapping);
5434 }
5435 
5436 /*
5437  * hugetlb_wp() should be called with page lock of the original hugepage held.
5438  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5439  * cannot race with other handlers or page migration.
5440  * Keep the pte_same checks anyway to make transition from the mutex easier.
5441  */
hugetlb_wp(struct vm_fault * vmf)5442 static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
5443 {
5444 	struct vm_area_struct *vma = vmf->vma;
5445 	struct mm_struct *mm = vma->vm_mm;
5446 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5447 	pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
5448 	struct hstate *h = hstate_vma(vma);
5449 	struct folio *old_folio;
5450 	struct folio *new_folio;
5451 	bool cow_from_owner = 0;
5452 	vm_fault_t ret = 0;
5453 	struct mmu_notifier_range range;
5454 
5455 	/*
5456 	 * Never handle CoW for uffd-wp protected pages.  It should be only
5457 	 * handled when the uffd-wp protection is removed.
5458 	 *
5459 	 * Note that only the CoW optimization path (in hugetlb_no_page())
5460 	 * can trigger this, because hugetlb_fault() will always resolve
5461 	 * uffd-wp bit first.
5462 	 */
5463 	if (!unshare && huge_pte_uffd_wp(pte))
5464 		return 0;
5465 
5466 	/* Let's take out MAP_SHARED mappings first. */
5467 	if (vma->vm_flags & VM_MAYSHARE) {
5468 		set_huge_ptep_writable(vma, vmf->address, vmf->pte);
5469 		return 0;
5470 	}
5471 
5472 	old_folio = page_folio(pte_page(pte));
5473 
5474 	delayacct_wpcopy_start();
5475 
5476 retry_avoidcopy:
5477 	/*
5478 	 * If no-one else is actually using this page, we're the exclusive
5479 	 * owner and can reuse this page.
5480 	 *
5481 	 * Note that we don't rely on the (safer) folio refcount here, because
5482 	 * copying the hugetlb folio when there are unexpected (temporary)
5483 	 * folio references could harm simple fork()+exit() users when
5484 	 * we run out of free hugetlb folios: we would have to kill processes
5485 	 * in scenarios that used to work. As a side effect, there can still
5486 	 * be leaks between processes, for example, with FOLL_GET users.
5487 	 */
5488 	if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5489 		if (!PageAnonExclusive(&old_folio->page)) {
5490 			folio_move_anon_rmap(old_folio, vma);
5491 			SetPageAnonExclusive(&old_folio->page);
5492 		}
5493 		if (likely(!unshare))
5494 			set_huge_ptep_maybe_writable(vma, vmf->address,
5495 						     vmf->pte);
5496 
5497 		delayacct_wpcopy_end();
5498 		return 0;
5499 	}
5500 	VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5501 		       PageAnonExclusive(&old_folio->page), &old_folio->page);
5502 
5503 	/*
5504 	 * If the process that created a MAP_PRIVATE mapping is about to perform
5505 	 * a COW due to a shared page count, attempt to satisfy the allocation
5506 	 * without using the existing reserves.
5507 	 * In order to determine where this is a COW on a MAP_PRIVATE mapping it
5508 	 * is enough to check whether the old_folio is anonymous. This means that
5509 	 * the reserve for this address was consumed. If reserves were used, a
5510 	 * partial faulted mapping at the fime of fork() could consume its reserves
5511 	 * on COW instead of the full address range.
5512 	 */
5513 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5514 	    folio_test_anon(old_folio))
5515 		cow_from_owner = true;
5516 
5517 	folio_get(old_folio);
5518 
5519 	/*
5520 	 * Drop page table lock as buddy allocator may be called. It will
5521 	 * be acquired again before returning to the caller, as expected.
5522 	 */
5523 	spin_unlock(vmf->ptl);
5524 	new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
5525 
5526 	if (IS_ERR(new_folio)) {
5527 		/*
5528 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
5529 		 * it is due to references held by a child and an insufficient
5530 		 * huge page pool. To guarantee the original mappers
5531 		 * reliability, unmap the page from child processes. The child
5532 		 * may get SIGKILLed if it later faults.
5533 		 */
5534 		if (cow_from_owner) {
5535 			struct address_space *mapping = vma->vm_file->f_mapping;
5536 			pgoff_t idx;
5537 			u32 hash;
5538 
5539 			folio_put(old_folio);
5540 			/*
5541 			 * Drop hugetlb_fault_mutex and vma_lock before
5542 			 * unmapping.  unmapping needs to hold vma_lock
5543 			 * in write mode.  Dropping vma_lock in read mode
5544 			 * here is OK as COW mappings do not interact with
5545 			 * PMD sharing.
5546 			 *
5547 			 * Reacquire both after unmap operation.
5548 			 */
5549 			idx = vma_hugecache_offset(h, vma, vmf->address);
5550 			hash = hugetlb_fault_mutex_hash(mapping, idx);
5551 			hugetlb_vma_unlock_read(vma);
5552 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5553 
5554 			unmap_ref_private(mm, vma, old_folio, vmf->address);
5555 
5556 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
5557 			hugetlb_vma_lock_read(vma);
5558 			spin_lock(vmf->ptl);
5559 			vmf->pte = hugetlb_walk(vma, vmf->address,
5560 					huge_page_size(h));
5561 			if (likely(vmf->pte &&
5562 				   pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
5563 				goto retry_avoidcopy;
5564 			/*
5565 			 * race occurs while re-acquiring page table
5566 			 * lock, and our job is done.
5567 			 */
5568 			delayacct_wpcopy_end();
5569 			return 0;
5570 		}
5571 
5572 		ret = vmf_error(PTR_ERR(new_folio));
5573 		goto out_release_old;
5574 	}
5575 
5576 	/*
5577 	 * When the original hugepage is shared one, it does not have
5578 	 * anon_vma prepared.
5579 	 */
5580 	ret = __vmf_anon_prepare(vmf);
5581 	if (unlikely(ret))
5582 		goto out_release_all;
5583 
5584 	if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
5585 		ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h));
5586 		goto out_release_all;
5587 	}
5588 	__folio_mark_uptodate(new_folio);
5589 
5590 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
5591 				vmf->address + huge_page_size(h));
5592 	mmu_notifier_invalidate_range_start(&range);
5593 
5594 	/*
5595 	 * Retake the page table lock to check for racing updates
5596 	 * before the page tables are altered
5597 	 */
5598 	spin_lock(vmf->ptl);
5599 	vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
5600 	if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
5601 		pte_t newpte = make_huge_pte(vma, new_folio, !unshare);
5602 
5603 		/* Break COW or unshare */
5604 		huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
5605 		hugetlb_remove_rmap(old_folio);
5606 		hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
5607 		if (huge_pte_uffd_wp(pte))
5608 			newpte = huge_pte_mkuffd_wp(newpte);
5609 		set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
5610 				huge_page_size(h));
5611 		folio_set_hugetlb_migratable(new_folio);
5612 		/* Make the old page be freed below */
5613 		new_folio = old_folio;
5614 	}
5615 	spin_unlock(vmf->ptl);
5616 	mmu_notifier_invalidate_range_end(&range);
5617 out_release_all:
5618 	/*
5619 	 * No restore in case of successful pagetable update (Break COW or
5620 	 * unshare)
5621 	 */
5622 	if (new_folio != old_folio)
5623 		restore_reserve_on_error(h, vma, vmf->address, new_folio);
5624 	folio_put(new_folio);
5625 out_release_old:
5626 	folio_put(old_folio);
5627 
5628 	spin_lock(vmf->ptl); /* Caller expects lock to be held */
5629 
5630 	delayacct_wpcopy_end();
5631 	return ret;
5632 }
5633 
5634 /*
5635  * Return whether there is a pagecache page to back given address within VMA.
5636  */
hugetlbfs_pagecache_present(struct hstate * h,struct vm_area_struct * vma,unsigned long address)5637 bool hugetlbfs_pagecache_present(struct hstate *h,
5638 				 struct vm_area_struct *vma, unsigned long address)
5639 {
5640 	struct address_space *mapping = vma->vm_file->f_mapping;
5641 	pgoff_t idx = linear_page_index(vma, address);
5642 	struct folio *folio;
5643 
5644 	folio = filemap_get_folio(mapping, idx);
5645 	if (IS_ERR(folio))
5646 		return false;
5647 	folio_put(folio);
5648 	return true;
5649 }
5650 
hugetlb_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t idx)5651 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5652 			   pgoff_t idx)
5653 {
5654 	struct inode *inode = mapping->host;
5655 	struct hstate *h = hstate_inode(inode);
5656 	int err;
5657 
5658 	idx <<= huge_page_order(h);
5659 	__folio_set_locked(folio);
5660 	err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5661 
5662 	if (unlikely(err)) {
5663 		__folio_clear_locked(folio);
5664 		return err;
5665 	}
5666 	folio_clear_hugetlb_restore_reserve(folio);
5667 
5668 	/*
5669 	 * mark folio dirty so that it will not be removed from cache/file
5670 	 * by non-hugetlbfs specific code paths.
5671 	 */
5672 	folio_mark_dirty(folio);
5673 
5674 	spin_lock(&inode->i_lock);
5675 	inode->i_blocks += blocks_per_huge_page(h);
5676 	spin_unlock(&inode->i_lock);
5677 	return 0;
5678 }
5679 
hugetlb_handle_userfault(struct vm_fault * vmf,struct address_space * mapping,unsigned long reason)5680 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
5681 						  struct address_space *mapping,
5682 						  unsigned long reason)
5683 {
5684 	u32 hash;
5685 
5686 	/*
5687 	 * vma_lock and hugetlb_fault_mutex must be dropped before handling
5688 	 * userfault. Also mmap_lock could be dropped due to handling
5689 	 * userfault, any vma operation should be careful from here.
5690 	 */
5691 	hugetlb_vma_unlock_read(vmf->vma);
5692 	hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5693 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5694 	return handle_userfault(vmf, reason);
5695 }
5696 
5697 /*
5698  * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
5699  * false if pte changed or is changing.
5700  */
hugetlb_pte_stable(struct hstate * h,struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t old_pte)5701 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
5702 			       pte_t *ptep, pte_t old_pte)
5703 {
5704 	spinlock_t *ptl;
5705 	bool same;
5706 
5707 	ptl = huge_pte_lock(h, mm, ptep);
5708 	same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
5709 	spin_unlock(ptl);
5710 
5711 	return same;
5712 }
5713 
hugetlb_no_page(struct address_space * mapping,struct vm_fault * vmf)5714 static vm_fault_t hugetlb_no_page(struct address_space *mapping,
5715 			struct vm_fault *vmf)
5716 {
5717 	u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5718 	bool new_folio, new_anon_folio = false;
5719 	struct vm_area_struct *vma = vmf->vma;
5720 	struct mm_struct *mm = vma->vm_mm;
5721 	struct hstate *h = hstate_vma(vma);
5722 	vm_fault_t ret = VM_FAULT_SIGBUS;
5723 	bool folio_locked = true;
5724 	struct folio *folio;
5725 	unsigned long size;
5726 	pte_t new_pte;
5727 
5728 	/*
5729 	 * Currently, we are forced to kill the process in the event the
5730 	 * original mapper has unmapped pages from the child due to a failed
5731 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5732 	 * be obvious.
5733 	 */
5734 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5735 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5736 			   current->pid);
5737 		goto out;
5738 	}
5739 
5740 	/*
5741 	 * Use page lock to guard against racing truncation
5742 	 * before we get page_table_lock.
5743 	 */
5744 	new_folio = false;
5745 	folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
5746 	if (IS_ERR(folio)) {
5747 		size = i_size_read(mapping->host) >> huge_page_shift(h);
5748 		if (vmf->pgoff >= size)
5749 			goto out;
5750 		/* Check for page in userfault range */
5751 		if (userfaultfd_missing(vma)) {
5752 			/*
5753 			 * Since hugetlb_no_page() was examining pte
5754 			 * without pgtable lock, we need to re-test under
5755 			 * lock because the pte may not be stable and could
5756 			 * have changed from under us.  Try to detect
5757 			 * either changed or during-changing ptes and retry
5758 			 * properly when needed.
5759 			 *
5760 			 * Note that userfaultfd is actually fine with
5761 			 * false positives (e.g. caused by pte changed),
5762 			 * but not wrong logical events (e.g. caused by
5763 			 * reading a pte during changing).  The latter can
5764 			 * confuse the userspace, so the strictness is very
5765 			 * much preferred.  E.g., MISSING event should
5766 			 * never happen on the page after UFFDIO_COPY has
5767 			 * correctly installed the page and returned.
5768 			 */
5769 			if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5770 				ret = 0;
5771 				goto out;
5772 			}
5773 
5774 			return hugetlb_handle_userfault(vmf, mapping,
5775 							VM_UFFD_MISSING);
5776 		}
5777 
5778 		if (!(vma->vm_flags & VM_MAYSHARE)) {
5779 			ret = __vmf_anon_prepare(vmf);
5780 			if (unlikely(ret))
5781 				goto out;
5782 		}
5783 
5784 		folio = alloc_hugetlb_folio(vma, vmf->address, false);
5785 		if (IS_ERR(folio)) {
5786 			/*
5787 			 * Returning error will result in faulting task being
5788 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
5789 			 * tasks from racing to fault in the same page which
5790 			 * could result in false unable to allocate errors.
5791 			 * Page migration does not take the fault mutex, but
5792 			 * does a clear then write of pte's under page table
5793 			 * lock.  Page fault code could race with migration,
5794 			 * notice the clear pte and try to allocate a page
5795 			 * here.  Before returning error, get ptl and make
5796 			 * sure there really is no pte entry.
5797 			 */
5798 			if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
5799 				ret = vmf_error(PTR_ERR(folio));
5800 			else
5801 				ret = 0;
5802 			goto out;
5803 		}
5804 		folio_zero_user(folio, vmf->real_address);
5805 		__folio_mark_uptodate(folio);
5806 		new_folio = true;
5807 
5808 		if (vma->vm_flags & VM_MAYSHARE) {
5809 			int err = hugetlb_add_to_page_cache(folio, mapping,
5810 							vmf->pgoff);
5811 			if (err) {
5812 				/*
5813 				 * err can't be -EEXIST which implies someone
5814 				 * else consumed the reservation since hugetlb
5815 				 * fault mutex is held when add a hugetlb page
5816 				 * to the page cache. So it's safe to call
5817 				 * restore_reserve_on_error() here.
5818 				 */
5819 				restore_reserve_on_error(h, vma, vmf->address,
5820 							folio);
5821 				folio_put(folio);
5822 				ret = VM_FAULT_SIGBUS;
5823 				goto out;
5824 			}
5825 		} else {
5826 			new_anon_folio = true;
5827 			folio_lock(folio);
5828 		}
5829 	} else {
5830 		/*
5831 		 * If memory error occurs between mmap() and fault, some process
5832 		 * don't have hwpoisoned swap entry for errored virtual address.
5833 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5834 		 */
5835 		if (unlikely(folio_test_hwpoison(folio))) {
5836 			ret = VM_FAULT_HWPOISON_LARGE |
5837 				VM_FAULT_SET_HINDEX(hstate_index(h));
5838 			goto backout_unlocked;
5839 		}
5840 
5841 		/* Check for page in userfault range. */
5842 		if (userfaultfd_minor(vma)) {
5843 			folio_unlock(folio);
5844 			folio_put(folio);
5845 			/* See comment in userfaultfd_missing() block above */
5846 			if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5847 				ret = 0;
5848 				goto out;
5849 			}
5850 			return hugetlb_handle_userfault(vmf, mapping,
5851 							VM_UFFD_MINOR);
5852 		}
5853 	}
5854 
5855 	/*
5856 	 * If we are going to COW a private mapping later, we examine the
5857 	 * pending reservations for this page now. This will ensure that
5858 	 * any allocations necessary to record that reservation occur outside
5859 	 * the spinlock.
5860 	 */
5861 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5862 		if (vma_needs_reservation(h, vma, vmf->address) < 0) {
5863 			ret = VM_FAULT_OOM;
5864 			goto backout_unlocked;
5865 		}
5866 		/* Just decrements count, does not deallocate */
5867 		vma_end_reservation(h, vma, vmf->address);
5868 	}
5869 
5870 	vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
5871 	ret = 0;
5872 	/* If pte changed from under us, retry */
5873 	if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
5874 		goto backout;
5875 
5876 	if (new_anon_folio)
5877 		hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
5878 	else
5879 		hugetlb_add_file_rmap(folio);
5880 	new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED);
5881 	/*
5882 	 * If this pte was previously wr-protected, keep it wr-protected even
5883 	 * if populated.
5884 	 */
5885 	if (unlikely(pte_is_uffd_wp_marker(vmf->orig_pte)))
5886 		new_pte = huge_pte_mkuffd_wp(new_pte);
5887 	set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
5888 
5889 	hugetlb_count_add(pages_per_huge_page(h), mm);
5890 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5891 		/*
5892 		 * No need to keep file folios locked. See comment in
5893 		 * hugetlb_fault().
5894 		 */
5895 		if (!new_anon_folio) {
5896 			folio_locked = false;
5897 			folio_unlock(folio);
5898 		}
5899 		/* Optimization, do the COW without a second fault */
5900 		ret = hugetlb_wp(vmf);
5901 	}
5902 
5903 	spin_unlock(vmf->ptl);
5904 
5905 	/*
5906 	 * Only set hugetlb_migratable in newly allocated pages.  Existing pages
5907 	 * found in the pagecache may not have hugetlb_migratable if they have
5908 	 * been isolated for migration.
5909 	 */
5910 	if (new_folio)
5911 		folio_set_hugetlb_migratable(folio);
5912 
5913 	if (folio_locked)
5914 		folio_unlock(folio);
5915 out:
5916 	hugetlb_vma_unlock_read(vma);
5917 
5918 	/*
5919 	 * We must check to release the per-VMA lock. __vmf_anon_prepare() is
5920 	 * the only way ret can be set to VM_FAULT_RETRY.
5921 	 */
5922 	if (unlikely(ret & VM_FAULT_RETRY))
5923 		vma_end_read(vma);
5924 
5925 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5926 	return ret;
5927 
5928 backout:
5929 	spin_unlock(vmf->ptl);
5930 backout_unlocked:
5931 	/* We only need to restore reservations for private mappings */
5932 	if (new_anon_folio)
5933 		restore_reserve_on_error(h, vma, vmf->address, folio);
5934 
5935 	folio_unlock(folio);
5936 	folio_put(folio);
5937 	goto out;
5938 }
5939 
5940 #ifdef CONFIG_SMP
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)5941 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5942 {
5943 	unsigned long key[2];
5944 	u32 hash;
5945 
5946 	key[0] = (unsigned long) mapping;
5947 	key[1] = idx;
5948 
5949 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5950 
5951 	return hash & (num_fault_mutexes - 1);
5952 }
5953 #else
5954 /*
5955  * For uniprocessor systems we always use a single mutex, so just
5956  * return 0 and avoid the hashing overhead.
5957  */
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)5958 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5959 {
5960 	return 0;
5961 }
5962 #endif
5963 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)5964 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5965 			unsigned long address, unsigned int flags)
5966 {
5967 	vm_fault_t ret;
5968 	u32 hash;
5969 	struct folio *folio = NULL;
5970 	struct hstate *h = hstate_vma(vma);
5971 	struct address_space *mapping;
5972 	bool need_wait_lock = false;
5973 	struct vm_fault vmf = {
5974 		.vma = vma,
5975 		.address = address & huge_page_mask(h),
5976 		.real_address = address,
5977 		.flags = flags,
5978 		.pgoff = vma_hugecache_offset(h, vma,
5979 				address & huge_page_mask(h)),
5980 		/* TODO: Track hugetlb faults using vm_fault */
5981 
5982 		/*
5983 		 * Some fields may not be initialized, be careful as it may
5984 		 * be hard to debug if called functions make assumptions
5985 		 */
5986 	};
5987 
5988 	/*
5989 	 * Serialize hugepage allocation and instantiation, so that we don't
5990 	 * get spurious allocation failures if two CPUs race to instantiate
5991 	 * the same page in the page cache.
5992 	 */
5993 	mapping = vma->vm_file->f_mapping;
5994 	hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
5995 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
5996 
5997 	/*
5998 	 * Acquire vma lock before calling huge_pte_alloc and hold
5999 	 * until finished with vmf.pte.  This prevents huge_pmd_unshare from
6000 	 * being called elsewhere and making the vmf.pte no longer valid.
6001 	 */
6002 	hugetlb_vma_lock_read(vma);
6003 	vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h));
6004 	if (!vmf.pte) {
6005 		hugetlb_vma_unlock_read(vma);
6006 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6007 		return VM_FAULT_OOM;
6008 	}
6009 
6010 	vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
6011 	if (huge_pte_none(vmf.orig_pte))
6012 		/*
6013 		 * hugetlb_no_page will drop vma lock and hugetlb fault
6014 		 * mutex internally, which make us return immediately.
6015 		 */
6016 		return hugetlb_no_page(mapping, &vmf);
6017 
6018 	if (pte_is_marker(vmf.orig_pte)) {
6019 		const pte_marker marker =
6020 			softleaf_to_marker(softleaf_from_pte(vmf.orig_pte));
6021 
6022 		if (marker & PTE_MARKER_POISONED) {
6023 			ret = VM_FAULT_HWPOISON_LARGE |
6024 				VM_FAULT_SET_HINDEX(hstate_index(h));
6025 			goto out_mutex;
6026 		} else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
6027 			/* This isn't supported in hugetlb. */
6028 			ret = VM_FAULT_SIGSEGV;
6029 			goto out_mutex;
6030 		}
6031 
6032 		return hugetlb_no_page(mapping, &vmf);
6033 	}
6034 
6035 	ret = 0;
6036 
6037 	/* Not present, either a migration or a hwpoisoned entry */
6038 	if (!pte_present(vmf.orig_pte) && !huge_pte_none(vmf.orig_pte)) {
6039 		const softleaf_t softleaf = softleaf_from_pte(vmf.orig_pte);
6040 
6041 		if (softleaf_is_migration(softleaf)) {
6042 			/*
6043 			 * Release the hugetlb fault lock now, but retain
6044 			 * the vma lock, because it is needed to guard the
6045 			 * huge_pte_lockptr() later in
6046 			 * migration_entry_wait_huge(). The vma lock will
6047 			 * be released there.
6048 			 */
6049 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6050 			migration_entry_wait_huge(vma, vmf.address, vmf.pte);
6051 			return 0;
6052 		}
6053 		if (softleaf_is_hwpoison(softleaf)) {
6054 			ret = VM_FAULT_HWPOISON_LARGE |
6055 			    VM_FAULT_SET_HINDEX(hstate_index(h));
6056 		}
6057 
6058 		goto out_mutex;
6059 	}
6060 
6061 	/*
6062 	 * If we are going to COW/unshare the mapping later, we examine the
6063 	 * pending reservations for this page now. This will ensure that any
6064 	 * allocations necessary to record that reservation occur outside the
6065 	 * spinlock.
6066 	 */
6067 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6068 	    !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) {
6069 		if (vma_needs_reservation(h, vma, vmf.address) < 0) {
6070 			ret = VM_FAULT_OOM;
6071 			goto out_mutex;
6072 		}
6073 		/* Just decrements count, does not deallocate */
6074 		vma_end_reservation(h, vma, vmf.address);
6075 	}
6076 
6077 	vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
6078 
6079 	/* Check for a racing update before calling hugetlb_wp() */
6080 	if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
6081 		goto out_ptl;
6082 
6083 	/* Handle userfault-wp first, before trying to lock more pages */
6084 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
6085 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
6086 		if (!userfaultfd_wp_async(vma)) {
6087 			spin_unlock(vmf.ptl);
6088 			hugetlb_vma_unlock_read(vma);
6089 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6090 			return handle_userfault(&vmf, VM_UFFD_WP);
6091 		}
6092 
6093 		vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte);
6094 		set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte,
6095 				huge_page_size(hstate_vma(vma)));
6096 		/* Fallthrough to CoW */
6097 	}
6098 
6099 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6100 		if (!huge_pte_write(vmf.orig_pte)) {
6101 			/*
6102 			 * Anonymous folios need to be lock since hugetlb_wp()
6103 			 * checks whether we can re-use the folio exclusively
6104 			 * for us in case we are the only user of it.
6105 			 */
6106 			folio = page_folio(pte_page(vmf.orig_pte));
6107 			if (folio_test_anon(folio) && !folio_trylock(folio)) {
6108 				need_wait_lock = true;
6109 				goto out_ptl;
6110 			}
6111 			folio_get(folio);
6112 			ret = hugetlb_wp(&vmf);
6113 			if (folio_test_anon(folio))
6114 				folio_unlock(folio);
6115 			folio_put(folio);
6116 			goto out_ptl;
6117 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
6118 			vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
6119 		}
6120 	}
6121 	vmf.orig_pte = pte_mkyoung(vmf.orig_pte);
6122 	if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte,
6123 						flags & FAULT_FLAG_WRITE))
6124 		update_mmu_cache(vma, vmf.address, vmf.pte);
6125 out_ptl:
6126 	spin_unlock(vmf.ptl);
6127 out_mutex:
6128 	hugetlb_vma_unlock_read(vma);
6129 
6130 	/*
6131 	 * We must check to release the per-VMA lock. __vmf_anon_prepare() in
6132 	 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
6133 	 */
6134 	if (unlikely(ret & VM_FAULT_RETRY))
6135 		vma_end_read(vma);
6136 
6137 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6138 	/*
6139 	 * hugetlb_wp drops all the locks, but the folio lock, before trying to
6140 	 * unmap the folio from other processes. During that window, if another
6141 	 * process mapping that folio faults in, it will take the mutex and then
6142 	 * it will wait on folio_lock, causing an ABBA deadlock.
6143 	 * Use trylock instead and bail out if we fail.
6144 	 *
6145 	 * Ideally, we should hold a refcount on the folio we wait for, but we do
6146 	 * not want to use the folio after it becomes unlocked, but rather just
6147 	 * wait for it to become unlocked, so hopefully next fault successes on
6148 	 * the trylock.
6149 	 */
6150 	if (need_wait_lock)
6151 		folio_wait_locked(folio);
6152 	return ret;
6153 }
6154 
6155 #ifdef CONFIG_USERFAULTFD
6156 /*
6157  * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6158  */
alloc_hugetlb_folio_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)6159 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6160 		struct vm_area_struct *vma, unsigned long address)
6161 {
6162 	struct mempolicy *mpol;
6163 	nodemask_t *nodemask;
6164 	struct folio *folio;
6165 	gfp_t gfp_mask;
6166 	int node;
6167 
6168 	gfp_mask = htlb_alloc_mask(h);
6169 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6170 	/*
6171 	 * This is used to allocate a temporary hugetlb to hold the copied
6172 	 * content, which will then be copied again to the final hugetlb
6173 	 * consuming a reservation. Set the alloc_fallback to false to indicate
6174 	 * that breaking the per-node hugetlb pool is not allowed in this case.
6175 	 */
6176 	folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false);
6177 	mpol_cond_put(mpol);
6178 
6179 	return folio;
6180 }
6181 
6182 /*
6183  * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6184  * with modifications for hugetlb pages.
6185  */
hugetlb_mfill_atomic_pte(pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)6186 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6187 			     struct vm_area_struct *dst_vma,
6188 			     unsigned long dst_addr,
6189 			     unsigned long src_addr,
6190 			     uffd_flags_t flags,
6191 			     struct folio **foliop)
6192 {
6193 	struct mm_struct *dst_mm = dst_vma->vm_mm;
6194 	bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6195 	bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6196 	struct hstate *h = hstate_vma(dst_vma);
6197 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
6198 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6199 	unsigned long size = huge_page_size(h);
6200 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
6201 	pte_t _dst_pte;
6202 	spinlock_t *ptl;
6203 	int ret = -ENOMEM;
6204 	struct folio *folio;
6205 	bool folio_in_pagecache = false;
6206 	pte_t dst_ptep;
6207 
6208 	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6209 		ptl = huge_pte_lock(h, dst_mm, dst_pte);
6210 
6211 		/* Don't overwrite any existing PTEs (even markers) */
6212 		if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
6213 			spin_unlock(ptl);
6214 			return -EEXIST;
6215 		}
6216 
6217 		_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6218 		set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6219 
6220 		/* No need to invalidate - it was non-present before */
6221 		update_mmu_cache(dst_vma, dst_addr, dst_pte);
6222 
6223 		spin_unlock(ptl);
6224 		return 0;
6225 	}
6226 
6227 	if (is_continue) {
6228 		ret = -EFAULT;
6229 		folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6230 		if (IS_ERR(folio))
6231 			goto out;
6232 		folio_in_pagecache = true;
6233 	} else if (!*foliop) {
6234 		/* If a folio already exists, then it's UFFDIO_COPY for
6235 		 * a non-missing case. Return -EEXIST.
6236 		 */
6237 		if (vm_shared &&
6238 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6239 			ret = -EEXIST;
6240 			goto out;
6241 		}
6242 
6243 		folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6244 		if (IS_ERR(folio)) {
6245 			pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE);
6246 			if (actual_pte) {
6247 				ret = -EEXIST;
6248 				goto out;
6249 			}
6250 			ret = -ENOMEM;
6251 			goto out;
6252 		}
6253 
6254 		ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6255 					   false);
6256 
6257 		/* fallback to copy_from_user outside mmap_lock */
6258 		if (unlikely(ret)) {
6259 			ret = -ENOENT;
6260 			/* Free the allocated folio which may have
6261 			 * consumed a reservation.
6262 			 */
6263 			restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6264 			folio_put(folio);
6265 
6266 			/* Allocate a temporary folio to hold the copied
6267 			 * contents.
6268 			 */
6269 			folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6270 			if (!folio) {
6271 				ret = -ENOMEM;
6272 				goto out;
6273 			}
6274 			*foliop = folio;
6275 			/* Set the outparam foliop and return to the caller to
6276 			 * copy the contents outside the lock. Don't free the
6277 			 * folio.
6278 			 */
6279 			goto out;
6280 		}
6281 	} else {
6282 		if (vm_shared &&
6283 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6284 			folio_put(*foliop);
6285 			ret = -EEXIST;
6286 			*foliop = NULL;
6287 			goto out;
6288 		}
6289 
6290 		folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6291 		if (IS_ERR(folio)) {
6292 			folio_put(*foliop);
6293 			ret = -ENOMEM;
6294 			*foliop = NULL;
6295 			goto out;
6296 		}
6297 		ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6298 		folio_put(*foliop);
6299 		*foliop = NULL;
6300 		if (ret) {
6301 			folio_put(folio);
6302 			goto out;
6303 		}
6304 	}
6305 
6306 	/*
6307 	 * If we just allocated a new page, we need a memory barrier to ensure
6308 	 * that preceding stores to the page become visible before the
6309 	 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate
6310 	 * is what we need.
6311 	 *
6312 	 * In the case where we have not allocated a new page (is_continue),
6313 	 * the page must already be uptodate. UFFDIO_CONTINUE already includes
6314 	 * an earlier smp_wmb() to ensure that prior stores will be visible
6315 	 * before the set_pte_at() write.
6316 	 */
6317 	if (!is_continue)
6318 		__folio_mark_uptodate(folio);
6319 	else
6320 		WARN_ON_ONCE(!folio_test_uptodate(folio));
6321 
6322 	/* Add shared, newly allocated pages to the page cache. */
6323 	if (vm_shared && !is_continue) {
6324 		ret = -EFAULT;
6325 		if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
6326 			goto out_release_nounlock;
6327 
6328 		/*
6329 		 * Serialization between remove_inode_hugepages() and
6330 		 * hugetlb_add_to_page_cache() below happens through the
6331 		 * hugetlb_fault_mutex_table that here must be hold by
6332 		 * the caller.
6333 		 */
6334 		ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6335 		if (ret)
6336 			goto out_release_nounlock;
6337 		folio_in_pagecache = true;
6338 	}
6339 
6340 	ptl = huge_pte_lock(h, dst_mm, dst_pte);
6341 
6342 	ret = -EIO;
6343 	if (folio_test_hwpoison(folio))
6344 		goto out_release_unlock;
6345 
6346 	ret = -EEXIST;
6347 
6348 	dst_ptep = huge_ptep_get(dst_mm, dst_addr, dst_pte);
6349 	/*
6350 	 * See comment about UFFD marker overwriting in
6351 	 * mfill_atomic_install_pte().
6352 	 */
6353 	if (!huge_pte_none(dst_ptep) && !pte_is_uffd_marker(dst_ptep))
6354 		goto out_release_unlock;
6355 
6356 	if (folio_in_pagecache)
6357 		hugetlb_add_file_rmap(folio);
6358 	else
6359 		hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
6360 
6361 	/*
6362 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6363 	 * with wp flag set, don't set pte write bit.
6364 	 */
6365 	_dst_pte = make_huge_pte(dst_vma, folio,
6366 				 !wp_enabled && !(is_continue && !vm_shared));
6367 	/*
6368 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
6369 	 * extremely important for hugetlbfs for now since swapping is not
6370 	 * supported, but we should still be clear in that this page cannot be
6371 	 * thrown away at will, even if write bit not set.
6372 	 */
6373 	_dst_pte = huge_pte_mkdirty(_dst_pte);
6374 	_dst_pte = pte_mkyoung(_dst_pte);
6375 
6376 	if (wp_enabled)
6377 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6378 
6379 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6380 
6381 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6382 
6383 	/* No need to invalidate - it was non-present before */
6384 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
6385 
6386 	spin_unlock(ptl);
6387 	if (!is_continue)
6388 		folio_set_hugetlb_migratable(folio);
6389 	if (vm_shared || is_continue)
6390 		folio_unlock(folio);
6391 	ret = 0;
6392 out:
6393 	return ret;
6394 out_release_unlock:
6395 	spin_unlock(ptl);
6396 	if (vm_shared || is_continue)
6397 		folio_unlock(folio);
6398 out_release_nounlock:
6399 	if (!folio_in_pagecache)
6400 		restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6401 	folio_put(folio);
6402 	goto out;
6403 }
6404 #endif /* CONFIG_USERFAULTFD */
6405 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot,unsigned long cp_flags)6406 long hugetlb_change_protection(struct vm_area_struct *vma,
6407 		unsigned long address, unsigned long end,
6408 		pgprot_t newprot, unsigned long cp_flags)
6409 {
6410 	struct mm_struct *mm = vma->vm_mm;
6411 	unsigned long start = address;
6412 	pte_t *ptep;
6413 	pte_t pte;
6414 	struct hstate *h = hstate_vma(vma);
6415 	long pages = 0, psize = huge_page_size(h);
6416 	struct mmu_notifier_range range;
6417 	unsigned long last_addr_mask;
6418 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6419 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6420 	struct mmu_gather tlb;
6421 
6422 	/*
6423 	 * In the case of shared PMDs, the area to flush could be beyond
6424 	 * start/end.  Set range.start/range.end to cover the maximum possible
6425 	 * range if PMD sharing is possible.
6426 	 */
6427 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6428 				0, mm, start, end);
6429 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6430 
6431 	BUG_ON(address >= end);
6432 	flush_cache_range(vma, range.start, range.end);
6433 	tlb_gather_mmu_vma(&tlb, vma);
6434 
6435 	mmu_notifier_invalidate_range_start(&range);
6436 	hugetlb_vma_lock_write(vma);
6437 	i_mmap_lock_write(vma->vm_file->f_mapping);
6438 	last_addr_mask = hugetlb_mask_last_page(h);
6439 	for (; address < end; address += psize) {
6440 		softleaf_t entry;
6441 		spinlock_t *ptl;
6442 
6443 		ptep = hugetlb_walk(vma, address, psize);
6444 		if (!ptep) {
6445 			if (!uffd_wp) {
6446 				address |= last_addr_mask;
6447 				continue;
6448 			}
6449 			/*
6450 			 * Userfaultfd wr-protect requires pgtable
6451 			 * pre-allocations to install pte markers.
6452 			 */
6453 			ptep = huge_pte_alloc(mm, vma, address, psize);
6454 			if (!ptep) {
6455 				pages = -ENOMEM;
6456 				break;
6457 			}
6458 		}
6459 		ptl = huge_pte_lock(h, mm, ptep);
6460 		if (huge_pmd_unshare(&tlb, vma, address, ptep)) {
6461 			/*
6462 			 * When uffd-wp is enabled on the vma, unshare
6463 			 * shouldn't happen at all.  Warn about it if it
6464 			 * happened due to some reason.
6465 			 */
6466 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6467 			pages++;
6468 			spin_unlock(ptl);
6469 			address |= last_addr_mask;
6470 			continue;
6471 		}
6472 		pte = huge_ptep_get(mm, address, ptep);
6473 		if (huge_pte_none(pte)) {
6474 			if (unlikely(uffd_wp))
6475 				/* Safe to modify directly (none->non-present). */
6476 				set_huge_pte_at(mm, address, ptep,
6477 						make_pte_marker(PTE_MARKER_UFFD_WP),
6478 						psize);
6479 			goto next;
6480 		}
6481 
6482 		entry = softleaf_from_pte(pte);
6483 		if (unlikely(softleaf_is_hwpoison(entry))) {
6484 			/* Nothing to do. */
6485 		} else if (unlikely(softleaf_is_migration(entry))) {
6486 			struct folio *folio = softleaf_to_folio(entry);
6487 			pte_t newpte = pte;
6488 
6489 			if (softleaf_is_migration_write(entry)) {
6490 				if (folio_test_anon(folio))
6491 					entry = make_readable_exclusive_migration_entry(
6492 								swp_offset(entry));
6493 				else
6494 					entry = make_readable_migration_entry(
6495 								swp_offset(entry));
6496 				newpte = swp_entry_to_pte(entry);
6497 				pages++;
6498 			}
6499 
6500 			if (uffd_wp)
6501 				newpte = pte_swp_mkuffd_wp(newpte);
6502 			else if (uffd_wp_resolve)
6503 				newpte = pte_swp_clear_uffd_wp(newpte);
6504 			if (!pte_same(pte, newpte))
6505 				set_huge_pte_at(mm, address, ptep, newpte, psize);
6506 		} else if (unlikely(pte_is_marker(pte))) {
6507 			/*
6508 			 * Do nothing on a poison marker; page is
6509 			 * corrupted, permissions do not apply. Here
6510 			 * pte_marker_uffd_wp()==true implies !poison
6511 			 * because they're mutual exclusive.
6512 			 */
6513 			if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve)
6514 				/* Safe to modify directly (non-present->none). */
6515 				huge_pte_clear(mm, address, ptep, psize);
6516 		} else {
6517 			pte_t old_pte;
6518 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6519 
6520 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6521 			pte = huge_pte_modify(old_pte, newprot);
6522 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6523 			if (uffd_wp)
6524 				pte = huge_pte_mkuffd_wp(pte);
6525 			else if (uffd_wp_resolve)
6526 				pte = huge_pte_clear_uffd_wp(pte);
6527 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6528 			pages++;
6529 			tlb_remove_huge_tlb_entry(h, &tlb, ptep, address);
6530 		}
6531 
6532 next:
6533 		spin_unlock(ptl);
6534 		cond_resched();
6535 	}
6536 
6537 	tlb_flush_mmu_tlbonly(&tlb);
6538 	huge_pmd_unshare_flush(&tlb, vma);
6539 	/*
6540 	 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6541 	 * downgrading page table protection not changing it to point to a new
6542 	 * page.
6543 	 *
6544 	 * See Documentation/mm/mmu_notifier.rst
6545 	 */
6546 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6547 	hugetlb_vma_unlock_write(vma);
6548 	mmu_notifier_invalidate_range_end(&range);
6549 	tlb_finish_mmu(&tlb);
6550 
6551 	return pages > 0 ? (pages << h->order) : pages;
6552 }
6553 
6554 /*
6555  * Update the reservation map for the range [from, to].
6556  *
6557  * Returns the number of entries that would be added to the reservation map
6558  * associated with the range [from, to].  This number is greater or equal to
6559  * zero. -EINVAL or -ENOMEM is returned in case of any errors.
6560  */
6561 
hugetlb_reserve_pages(struct inode * inode,long from,long to,struct vm_area_desc * desc,vma_flags_t vma_flags)6562 long hugetlb_reserve_pages(struct inode *inode,
6563 		long from, long to,
6564 		struct vm_area_desc *desc,
6565 		vma_flags_t vma_flags)
6566 {
6567 	long chg = -1, add = -1, spool_resv, gbl_resv;
6568 	struct hstate *h = hstate_inode(inode);
6569 	struct hugepage_subpool *spool = subpool_inode(inode);
6570 	struct resv_map *resv_map;
6571 	struct hugetlb_cgroup *h_cg = NULL;
6572 	long gbl_reserve, regions_needed = 0;
6573 	int err;
6574 
6575 	/* This should never happen */
6576 	if (from > to) {
6577 		VM_WARN(1, "%s called with a negative range\n", __func__);
6578 		return -EINVAL;
6579 	}
6580 
6581 	/*
6582 	 * Only apply hugepage reservation if asked. At fault time, an
6583 	 * attempt will be made for VM_NORESERVE to allocate a page
6584 	 * without using reserves
6585 	 */
6586 	if (vma_flags_test(&vma_flags, VMA_NORESERVE_BIT))
6587 		return 0;
6588 
6589 	/*
6590 	 * Shared mappings base their reservation on the number of pages that
6591 	 * are already allocated on behalf of the file. Private mappings need
6592 	 * to reserve the full area even if read-only as mprotect() may be
6593 	 * called to make the mapping read-write. Assume !desc is a shm mapping
6594 	 */
6595 	if (!desc || vma_desc_test_flags(desc, VMA_MAYSHARE_BIT)) {
6596 		/*
6597 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6598 		 * called for inodes for which resv_maps were created (see
6599 		 * hugetlbfs_get_inode).
6600 		 */
6601 		resv_map = inode_resv_map(inode);
6602 
6603 		chg = region_chg(resv_map, from, to, &regions_needed);
6604 	} else {
6605 		/* Private mapping. */
6606 		resv_map = resv_map_alloc();
6607 		if (!resv_map) {
6608 			err = -ENOMEM;
6609 			goto out_err;
6610 		}
6611 
6612 		chg = to - from;
6613 
6614 		set_vma_desc_resv_map(desc, resv_map);
6615 		set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER);
6616 	}
6617 
6618 	if (chg < 0) {
6619 		/* region_chg() above can return -ENOMEM */
6620 		err = (chg == -ENOMEM) ? -ENOMEM : -EINVAL;
6621 		goto out_err;
6622 	}
6623 
6624 	err = hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6625 				chg * pages_per_huge_page(h), &h_cg);
6626 	if (err < 0)
6627 		goto out_err;
6628 
6629 	if (desc && !vma_desc_test_flags(desc, VMA_MAYSHARE_BIT) && h_cg) {
6630 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6631 		 * of the resv_map.
6632 		 */
6633 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6634 	}
6635 
6636 	/*
6637 	 * There must be enough pages in the subpool for the mapping. If
6638 	 * the subpool has a minimum size, there may be some global
6639 	 * reservations already in place (gbl_reserve).
6640 	 */
6641 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6642 	if (gbl_reserve < 0) {
6643 		err = gbl_reserve;
6644 		goto out_uncharge_cgroup;
6645 	}
6646 
6647 	/*
6648 	 * Check enough hugepages are available for the reservation.
6649 	 * Hand the pages back to the subpool if there are not
6650 	 */
6651 	err = hugetlb_acct_memory(h, gbl_reserve);
6652 	if (err < 0)
6653 		goto out_put_pages;
6654 
6655 	/*
6656 	 * Account for the reservations made. Shared mappings record regions
6657 	 * that have reservations as they are shared by multiple VMAs.
6658 	 * When the last VMA disappears, the region map says how much
6659 	 * the reservation was and the page cache tells how much of
6660 	 * the reservation was consumed. Private mappings are per-VMA and
6661 	 * only the consumed reservations are tracked. When the VMA
6662 	 * disappears, the original reservation is the VMA size and the
6663 	 * consumed reservations are stored in the map. Hence, nothing
6664 	 * else has to be done for private mappings here
6665 	 */
6666 	if (!desc || vma_desc_test_flags(desc, VMA_MAYSHARE_BIT)) {
6667 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6668 
6669 		if (unlikely(add < 0)) {
6670 			hugetlb_acct_memory(h, -gbl_reserve);
6671 			err = add;
6672 			goto out_put_pages;
6673 		} else if (unlikely(chg > add)) {
6674 			/*
6675 			 * pages in this range were added to the reserve
6676 			 * map between region_chg and region_add.  This
6677 			 * indicates a race with alloc_hugetlb_folio.  Adjust
6678 			 * the subpool and reserve counts modified above
6679 			 * based on the difference.
6680 			 */
6681 			long rsv_adjust;
6682 
6683 			/*
6684 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6685 			 * reference to h_cg->css. See comment below for detail.
6686 			 */
6687 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6688 				hstate_index(h),
6689 				(chg - add) * pages_per_huge_page(h), h_cg);
6690 
6691 			rsv_adjust = hugepage_subpool_put_pages(spool,
6692 								chg - add);
6693 			hugetlb_acct_memory(h, -rsv_adjust);
6694 		} else if (h_cg) {
6695 			/*
6696 			 * The file_regions will hold their own reference to
6697 			 * h_cg->css. So we should release the reference held
6698 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6699 			 * done.
6700 			 */
6701 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6702 		}
6703 	}
6704 	return chg;
6705 
6706 out_put_pages:
6707 	spool_resv = chg - gbl_reserve;
6708 	if (spool_resv) {
6709 		/* put sub pool's reservation back, chg - gbl_reserve */
6710 		gbl_resv = hugepage_subpool_put_pages(spool, spool_resv);
6711 		/*
6712 		 * subpool's reserved pages can not be put back due to race,
6713 		 * return to hstate.
6714 		 */
6715 		hugetlb_acct_memory(h, -gbl_resv);
6716 	}
6717 	/* Restore used_hpages for pages that failed global reservation */
6718 	if (gbl_reserve && spool) {
6719 		unsigned long flags;
6720 
6721 		spin_lock_irqsave(&spool->lock, flags);
6722 		if (spool->max_hpages != -1)
6723 			spool->used_hpages -= gbl_reserve;
6724 		unlock_or_release_subpool(spool, flags);
6725 	}
6726 out_uncharge_cgroup:
6727 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6728 					    chg * pages_per_huge_page(h), h_cg);
6729 out_err:
6730 	if (!desc || vma_desc_test_flags(desc, VMA_MAYSHARE_BIT))
6731 		/* Only call region_abort if the region_chg succeeded but the
6732 		 * region_add failed or didn't run.
6733 		 */
6734 		if (chg >= 0 && add < 0)
6735 			region_abort(resv_map, from, to, regions_needed);
6736 	if (desc && is_vma_desc_resv_set(desc, HPAGE_RESV_OWNER)) {
6737 		kref_put(&resv_map->refs, resv_map_release);
6738 		set_vma_desc_resv_map(desc, NULL);
6739 	}
6740 	return err;
6741 }
6742 
hugetlb_unreserve_pages(struct inode * inode,long start,long end,long freed)6743 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6744 								long freed)
6745 {
6746 	struct hstate *h = hstate_inode(inode);
6747 	struct resv_map *resv_map = inode_resv_map(inode);
6748 	long chg = 0;
6749 	struct hugepage_subpool *spool = subpool_inode(inode);
6750 	long gbl_reserve;
6751 
6752 	/*
6753 	 * Since this routine can be called in the evict inode path for all
6754 	 * hugetlbfs inodes, resv_map could be NULL.
6755 	 */
6756 	if (resv_map) {
6757 		chg = region_del(resv_map, start, end);
6758 		/*
6759 		 * region_del() can fail in the rare case where a region
6760 		 * must be split and another region descriptor can not be
6761 		 * allocated.  If end == LONG_MAX, it will not fail.
6762 		 */
6763 		if (chg < 0)
6764 			return chg;
6765 	}
6766 
6767 	spin_lock(&inode->i_lock);
6768 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6769 	spin_unlock(&inode->i_lock);
6770 
6771 	/*
6772 	 * If the subpool has a minimum size, the number of global
6773 	 * reservations to be released may be adjusted.
6774 	 *
6775 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6776 	 * won't go negative.
6777 	 */
6778 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6779 	hugetlb_acct_memory(h, -gbl_reserve);
6780 
6781 	return 0;
6782 }
6783 
6784 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
page_table_shareable(struct vm_area_struct * svma,struct vm_area_struct * vma,unsigned long addr,pgoff_t idx)6785 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6786 				struct vm_area_struct *vma,
6787 				unsigned long addr, pgoff_t idx)
6788 {
6789 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6790 				svma->vm_start;
6791 	unsigned long sbase = saddr & PUD_MASK;
6792 	unsigned long s_end = sbase + PUD_SIZE;
6793 
6794 	/* Allow segments to share if only one is marked locked */
6795 	vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
6796 	vm_flags_t svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
6797 
6798 	/*
6799 	 * match the virtual addresses, permission and the alignment of the
6800 	 * page table page.
6801 	 *
6802 	 * Also, vma_lock (vm_private_data) is required for sharing.
6803 	 */
6804 	if (pmd_index(addr) != pmd_index(saddr) ||
6805 	    vm_flags != svm_flags ||
6806 	    !range_in_vma(svma, sbase, s_end) ||
6807 	    !svma->vm_private_data)
6808 		return 0;
6809 
6810 	return saddr;
6811 }
6812 
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)6813 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6814 {
6815 	unsigned long start = addr & PUD_MASK;
6816 	unsigned long end = start + PUD_SIZE;
6817 
6818 #ifdef CONFIG_USERFAULTFD
6819 	if (uffd_disable_huge_pmd_share(vma))
6820 		return false;
6821 #endif
6822 	/*
6823 	 * check on proper vm_flags and page table alignment
6824 	 */
6825 	if (!(vma->vm_flags & VM_MAYSHARE))
6826 		return false;
6827 	if (!vma->vm_private_data)	/* vma lock required for sharing */
6828 		return false;
6829 	if (!range_in_vma(vma, start, end))
6830 		return false;
6831 	return true;
6832 }
6833 
6834 /*
6835  * Determine if start,end range within vma could be mapped by shared pmd.
6836  * If yes, adjust start and end to cover range associated with possible
6837  * shared pmd mappings.
6838  */
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)6839 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6840 				unsigned long *start, unsigned long *end)
6841 {
6842 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6843 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6844 
6845 	/*
6846 	 * vma needs to span at least one aligned PUD size, and the range
6847 	 * must be at least partially within in.
6848 	 */
6849 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6850 		(*end <= v_start) || (*start >= v_end))
6851 		return;
6852 
6853 	/* Extend the range to be PUD aligned for a worst case scenario */
6854 	if (*start > v_start)
6855 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6856 
6857 	if (*end < v_end)
6858 		*end = ALIGN(*end, PUD_SIZE);
6859 }
6860 
6861 /*
6862  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6863  * and returns the corresponding pte. While this is not necessary for the
6864  * !shared pmd case because we can allocate the pmd later as well, it makes the
6865  * code much cleaner. pmd allocation is essential for the shared case because
6866  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
6867  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
6868  * bad pmd for sharing.
6869  */
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)6870 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6871 		      unsigned long addr, pud_t *pud)
6872 {
6873 	struct address_space *mapping = vma->vm_file->f_mapping;
6874 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
6875 			vma->vm_pgoff;
6876 	struct vm_area_struct *svma;
6877 	unsigned long saddr;
6878 	pte_t *spte = NULL;
6879 	pte_t *pte;
6880 
6881 	i_mmap_lock_read(mapping);
6882 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
6883 		if (svma == vma)
6884 			continue;
6885 
6886 		saddr = page_table_shareable(svma, vma, addr, idx);
6887 		if (saddr) {
6888 			spte = hugetlb_walk(svma, saddr,
6889 					    vma_mmu_pagesize(svma));
6890 			if (spte) {
6891 				ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
6892 				break;
6893 			}
6894 		}
6895 	}
6896 
6897 	if (!spte)
6898 		goto out;
6899 
6900 	spin_lock(&mm->page_table_lock);
6901 	if (pud_none(*pud)) {
6902 		pud_populate(mm, pud,
6903 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
6904 		mm_inc_nr_pmds(mm);
6905 	} else {
6906 		ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
6907 	}
6908 	spin_unlock(&mm->page_table_lock);
6909 out:
6910 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
6911 	i_mmap_unlock_read(mapping);
6912 	return pte;
6913 }
6914 
6915 /**
6916  * huge_pmd_unshare - Unmap a pmd table if it is shared by multiple users
6917  * @tlb: the current mmu_gather.
6918  * @vma: the vma covering the pmd table.
6919  * @addr: the address we are trying to unshare.
6920  * @ptep: pointer into the (pmd) page table.
6921  *
6922  * Called with the page table lock held, the i_mmap_rwsem held in write mode
6923  * and the hugetlb vma lock held in write mode.
6924  *
6925  * Note: The caller must call huge_pmd_unshare_flush() before dropping the
6926  * i_mmap_rwsem.
6927  *
6928  * Returns: 1 if it was a shared PMD table and it got unmapped, or 0 if it
6929  *	    was not a shared PMD table.
6930  */
huge_pmd_unshare(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)6931 int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
6932 		unsigned long addr, pte_t *ptep)
6933 {
6934 	unsigned long sz = huge_page_size(hstate_vma(vma));
6935 	struct mm_struct *mm = vma->vm_mm;
6936 	pgd_t *pgd = pgd_offset(mm, addr);
6937 	p4d_t *p4d = p4d_offset(pgd, addr);
6938 	pud_t *pud = pud_offset(p4d, addr);
6939 
6940 	if (sz != PMD_SIZE)
6941 		return 0;
6942 	if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
6943 		return 0;
6944 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6945 	hugetlb_vma_assert_locked(vma);
6946 	pud_clear(pud);
6947 
6948 	tlb_unshare_pmd_ptdesc(tlb, virt_to_ptdesc(ptep), addr);
6949 
6950 	mm_dec_nr_pmds(mm);
6951 	return 1;
6952 }
6953 
6954 /*
6955  * huge_pmd_unshare_flush - Complete a sequence of huge_pmd_unshare() calls
6956  * @tlb: the current mmu_gather.
6957  * @vma: the vma covering the pmd table.
6958  *
6959  * Perform necessary TLB flushes or IPI broadcasts to synchronize PMD table
6960  * unsharing with concurrent page table walkers.
6961  *
6962  * This function must be called after a sequence of huge_pmd_unshare()
6963  * calls while still holding the i_mmap_rwsem.
6964  */
huge_pmd_unshare_flush(struct mmu_gather * tlb,struct vm_area_struct * vma)6965 void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
6966 {
6967 	/*
6968 	 * We must synchronize page table unsharing such that nobody will
6969 	 * try reusing a previously-shared page table while it might still
6970 	 * be in use by previous sharers (TLB, GUP_fast).
6971 	 */
6972 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6973 
6974 	tlb_flush_unshared_tables(tlb);
6975 }
6976 
6977 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
6978 
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)6979 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6980 		      unsigned long addr, pud_t *pud)
6981 {
6982 	return NULL;
6983 }
6984 
huge_pmd_unshare(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)6985 int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
6986 		unsigned long addr, pte_t *ptep)
6987 {
6988 	return 0;
6989 }
6990 
huge_pmd_unshare_flush(struct mmu_gather * tlb,struct vm_area_struct * vma)6991 void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
6992 {
6993 }
6994 
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)6995 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6996 				unsigned long *start, unsigned long *end)
6997 {
6998 }
6999 
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)7000 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
7001 {
7002 	return false;
7003 }
7004 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
7005 
7006 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)7007 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
7008 			unsigned long addr, unsigned long sz)
7009 {
7010 	pgd_t *pgd;
7011 	p4d_t *p4d;
7012 	pud_t *pud;
7013 	pte_t *pte = NULL;
7014 
7015 	pgd = pgd_offset(mm, addr);
7016 	p4d = p4d_alloc(mm, pgd, addr);
7017 	if (!p4d)
7018 		return NULL;
7019 	pud = pud_alloc(mm, p4d, addr);
7020 	if (pud) {
7021 		if (sz == PUD_SIZE) {
7022 			pte = (pte_t *)pud;
7023 		} else {
7024 			BUG_ON(sz != PMD_SIZE);
7025 			if (want_pmd_share(vma, addr) && pud_none(*pud))
7026 				pte = huge_pmd_share(mm, vma, addr, pud);
7027 			else
7028 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
7029 		}
7030 	}
7031 
7032 	if (pte) {
7033 		pte_t pteval = ptep_get_lockless(pte);
7034 
7035 		BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7036 	}
7037 
7038 	return pte;
7039 }
7040 
7041 /*
7042  * huge_pte_offset() - Walk the page table to resolve the hugepage
7043  * entry at address @addr
7044  *
7045  * Return: Pointer to page table entry (PUD or PMD) for
7046  * address @addr, or NULL if a !p*d_present() entry is encountered and the
7047  * size @sz doesn't match the hugepage size at this level of the page
7048  * table.
7049  */
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)7050 pte_t *huge_pte_offset(struct mm_struct *mm,
7051 		       unsigned long addr, unsigned long sz)
7052 {
7053 	pgd_t *pgd;
7054 	p4d_t *p4d;
7055 	pud_t *pud;
7056 	pmd_t *pmd;
7057 
7058 	pgd = pgd_offset(mm, addr);
7059 	if (!pgd_present(*pgd))
7060 		return NULL;
7061 	p4d = p4d_offset(pgd, addr);
7062 	if (!p4d_present(*p4d))
7063 		return NULL;
7064 
7065 	pud = pud_offset(p4d, addr);
7066 	if (sz == PUD_SIZE)
7067 		/* must be pud huge, non-present or none */
7068 		return (pte_t *)pud;
7069 	if (!pud_present(*pud))
7070 		return NULL;
7071 	/* must have a valid entry and size to go further */
7072 
7073 	pmd = pmd_offset(pud, addr);
7074 	/* must be pmd huge, non-present or none */
7075 	return (pte_t *)pmd;
7076 }
7077 
7078 /*
7079  * Return a mask that can be used to update an address to the last huge
7080  * page in a page table page mapping size.  Used to skip non-present
7081  * page table entries when linearly scanning address ranges.  Architectures
7082  * with unique huge page to page table relationships can define their own
7083  * version of this routine.
7084  */
hugetlb_mask_last_page(struct hstate * h)7085 unsigned long hugetlb_mask_last_page(struct hstate *h)
7086 {
7087 	unsigned long hp_size = huge_page_size(h);
7088 
7089 	if (hp_size == PUD_SIZE)
7090 		return P4D_SIZE - PUD_SIZE;
7091 	else if (hp_size == PMD_SIZE)
7092 		return PUD_SIZE - PMD_SIZE;
7093 	else
7094 		return 0UL;
7095 }
7096 
7097 #else
7098 
7099 /* See description above.  Architectures can provide their own version. */
hugetlb_mask_last_page(struct hstate * h)7100 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7101 {
7102 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
7103 	if (huge_page_size(h) == PMD_SIZE)
7104 		return PUD_SIZE - PMD_SIZE;
7105 #endif
7106 	return 0UL;
7107 }
7108 
7109 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7110 
7111 /**
7112  * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio
7113  * @folio: the folio to isolate
7114  * @list: the list to add the folio to on success
7115  *
7116  * Isolate an allocated (refcount > 0) hugetlb folio, marking it as
7117  * isolated/non-migratable, and moving it from the active list to the
7118  * given list.
7119  *
7120  * Isolation will fail if @folio is not an allocated hugetlb folio, or if
7121  * it is already isolated/non-migratable.
7122  *
7123  * On success, an additional folio reference is taken that must be dropped
7124  * using folio_putback_hugetlb() to undo the isolation.
7125  *
7126  * Return: True if isolation worked, otherwise False.
7127  */
folio_isolate_hugetlb(struct folio * folio,struct list_head * list)7128 bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
7129 {
7130 	bool ret = true;
7131 
7132 	spin_lock_irq(&hugetlb_lock);
7133 	if (!folio_test_hugetlb(folio) ||
7134 	    !folio_test_hugetlb_migratable(folio) ||
7135 	    !folio_try_get(folio)) {
7136 		ret = false;
7137 		goto unlock;
7138 	}
7139 	folio_clear_hugetlb_migratable(folio);
7140 	list_move_tail(&folio->lru, list);
7141 unlock:
7142 	spin_unlock_irq(&hugetlb_lock);
7143 	return ret;
7144 }
7145 
get_hwpoison_hugetlb_folio(struct folio * folio,bool * hugetlb,bool unpoison)7146 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7147 {
7148 	int ret = 0;
7149 
7150 	*hugetlb = false;
7151 	spin_lock_irq(&hugetlb_lock);
7152 	if (folio_test_hugetlb(folio)) {
7153 		*hugetlb = true;
7154 		if (folio_test_hugetlb_freed(folio))
7155 			ret = 0;
7156 		else if (folio_test_hugetlb_migratable(folio) || unpoison)
7157 			ret = folio_try_get(folio);
7158 		else
7159 			ret = -EBUSY;
7160 	}
7161 	spin_unlock_irq(&hugetlb_lock);
7162 	return ret;
7163 }
7164 
get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)7165 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7166 				bool *migratable_cleared)
7167 {
7168 	int ret;
7169 
7170 	spin_lock_irq(&hugetlb_lock);
7171 	ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7172 	spin_unlock_irq(&hugetlb_lock);
7173 	return ret;
7174 }
7175 
7176 /**
7177  * folio_putback_hugetlb - unisolate a hugetlb folio
7178  * @folio: the isolated hugetlb folio
7179  *
7180  * Putback/un-isolate the hugetlb folio that was previous isolated using
7181  * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it
7182  * back onto the active list.
7183  *
7184  * Will drop the additional folio reference obtained through
7185  * folio_isolate_hugetlb().
7186  */
folio_putback_hugetlb(struct folio * folio)7187 void folio_putback_hugetlb(struct folio *folio)
7188 {
7189 	spin_lock_irq(&hugetlb_lock);
7190 	folio_set_hugetlb_migratable(folio);
7191 	list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7192 	spin_unlock_irq(&hugetlb_lock);
7193 	folio_put(folio);
7194 }
7195 
move_hugetlb_state(struct folio * old_folio,struct folio * new_folio,int reason)7196 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7197 {
7198 	struct hstate *h = folio_hstate(old_folio);
7199 
7200 	hugetlb_cgroup_migrate(old_folio, new_folio);
7201 	folio_set_owner_migrate_reason(new_folio, reason);
7202 
7203 	/*
7204 	 * transfer temporary state of the new hugetlb folio. This is
7205 	 * reverse to other transitions because the newpage is going to
7206 	 * be final while the old one will be freed so it takes over
7207 	 * the temporary status.
7208 	 *
7209 	 * Also note that we have to transfer the per-node surplus state
7210 	 * here as well otherwise the global surplus count will not match
7211 	 * the per-node's.
7212 	 */
7213 	if (folio_test_hugetlb_temporary(new_folio)) {
7214 		int old_nid = folio_nid(old_folio);
7215 		int new_nid = folio_nid(new_folio);
7216 
7217 		folio_set_hugetlb_temporary(old_folio);
7218 		folio_clear_hugetlb_temporary(new_folio);
7219 
7220 
7221 		/*
7222 		 * There is no need to transfer the per-node surplus state
7223 		 * when we do not cross the node.
7224 		 */
7225 		if (new_nid == old_nid)
7226 			return;
7227 		spin_lock_irq(&hugetlb_lock);
7228 		if (h->surplus_huge_pages_node[old_nid]) {
7229 			h->surplus_huge_pages_node[old_nid]--;
7230 			h->surplus_huge_pages_node[new_nid]++;
7231 		}
7232 		spin_unlock_irq(&hugetlb_lock);
7233 	}
7234 
7235 	/*
7236 	 * Our old folio is isolated and has "migratable" cleared until it
7237 	 * is putback. As migration succeeded, set the new folio "migratable"
7238 	 * and add it to the active list.
7239 	 */
7240 	spin_lock_irq(&hugetlb_lock);
7241 	folio_set_hugetlb_migratable(new_folio);
7242 	list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist);
7243 	spin_unlock_irq(&hugetlb_lock);
7244 }
7245 
7246 /*
7247  * If @take_locks is false, the caller must ensure that no concurrent page table
7248  * access can happen (except for gup_fast() and hardware page walks).
7249  * If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
7250  * concurrent page fault handling) and the file rmap lock.
7251  */
hugetlb_unshare_pmds(struct vm_area_struct * vma,unsigned long start,unsigned long end,bool take_locks)7252 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7253 				   unsigned long start,
7254 				   unsigned long end,
7255 				   bool take_locks)
7256 {
7257 	struct hstate *h = hstate_vma(vma);
7258 	unsigned long sz = huge_page_size(h);
7259 	struct mm_struct *mm = vma->vm_mm;
7260 	struct mmu_notifier_range range;
7261 	struct mmu_gather tlb;
7262 	unsigned long address;
7263 	spinlock_t *ptl;
7264 	pte_t *ptep;
7265 
7266 	if (!(vma->vm_flags & VM_MAYSHARE))
7267 		return;
7268 
7269 	if (start >= end)
7270 		return;
7271 
7272 	flush_cache_range(vma, start, end);
7273 	tlb_gather_mmu_vma(&tlb, vma);
7274 
7275 	/*
7276 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
7277 	 * we have already done the PUD_SIZE alignment.
7278 	 */
7279 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7280 				start, end);
7281 	mmu_notifier_invalidate_range_start(&range);
7282 	if (take_locks) {
7283 		hugetlb_vma_lock_write(vma);
7284 		i_mmap_lock_write(vma->vm_file->f_mapping);
7285 	} else {
7286 		i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7287 	}
7288 	for (address = start; address < end; address += PUD_SIZE) {
7289 		ptep = hugetlb_walk(vma, address, sz);
7290 		if (!ptep)
7291 			continue;
7292 		ptl = huge_pte_lock(h, mm, ptep);
7293 		huge_pmd_unshare(&tlb, vma, address, ptep);
7294 		spin_unlock(ptl);
7295 	}
7296 	huge_pmd_unshare_flush(&tlb, vma);
7297 	if (take_locks) {
7298 		i_mmap_unlock_write(vma->vm_file->f_mapping);
7299 		hugetlb_vma_unlock_write(vma);
7300 	}
7301 	/*
7302 	 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7303 	 * Documentation/mm/mmu_notifier.rst.
7304 	 */
7305 	mmu_notifier_invalidate_range_end(&range);
7306 	tlb_finish_mmu(&tlb);
7307 }
7308 
7309 /*
7310  * This function will unconditionally remove all the shared pmd pgtable entries
7311  * within the specific vma for a hugetlbfs memory range.
7312  */
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)7313 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7314 {
7315 	hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7316 			ALIGN_DOWN(vma->vm_end, PUD_SIZE),
7317 			/* take_locks = */ true);
7318 }
7319 
7320 /*
7321  * For hugetlb, mremap() is an odd edge case - while the VMA copying is
7322  * performed, we permit both the old and new VMAs to reference the same
7323  * reservation.
7324  *
7325  * We fix this up after the operation succeeds, or if a newly allocated VMA
7326  * is closed as a result of a failure to allocate memory.
7327  */
fixup_hugetlb_reservations(struct vm_area_struct * vma)7328 void fixup_hugetlb_reservations(struct vm_area_struct *vma)
7329 {
7330 	if (is_vm_hugetlb_page(vma))
7331 		clear_vma_resv_huge_pages(vma);
7332 }
7333