xref: /linux/mm/hugetlb.c (revision c8d0beedf0da06652432354882b95c33a4cb7cfe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Generic hugetlb support.
4  * (C) Nadia Yvette Chambers, April 2004
5  */
6 #include <linux/list.h>
7 #include <linux/init.h>
8 #include <linux/mm.h>
9 #include <linux/seq_file.h>
10 #include <linux/highmem.h>
11 #include <linux/mmu_notifier.h>
12 #include <linux/nodemask.h>
13 #include <linux/pagemap.h>
14 #include <linux/mempolicy.h>
15 #include <linux/compiler.h>
16 #include <linux/cpumask.h>
17 #include <linux/cpuset.h>
18 #include <linux/mutex.h>
19 #include <linux/memblock.h>
20 #include <linux/minmax.h>
21 #include <linux/slab.h>
22 #include <linux/sched/mm.h>
23 #include <linux/mmdebug.h>
24 #include <linux/sched/signal.h>
25 #include <linux/rmap.h>
26 #include <linux/string_choices.h>
27 #include <linux/string_helpers.h>
28 #include <linux/swap.h>
29 #include <linux/leafops.h>
30 #include <linux/jhash.h>
31 #include <linux/numa.h>
32 #include <linux/llist.h>
33 #include <linux/cma.h>
34 #include <linux/migrate.h>
35 #include <linux/nospec.h>
36 #include <linux/delayacct.h>
37 #include <linux/memory.h>
38 #include <linux/mm_inline.h>
39 #include <linux/padata.h>
40 #include <linux/pgalloc.h>
41 
42 #include <asm/page.h>
43 #include <asm/tlb.h>
44 #include <asm/setup.h>
45 
46 #include <linux/io.h>
47 #include <linux/node.h>
48 #include <linux/page_owner.h>
49 #include "internal.h"
50 #include "hugetlb_vmemmap.h"
51 #include "hugetlb_cma.h"
52 #include "hugetlb_internal.h"
53 #include <linux/page-isolation.h>
54 
55 int hugetlb_max_hstate __read_mostly;
56 unsigned int default_hstate_idx;
57 struct hstate hstates[HUGE_MAX_HSTATE];
58 
59 __initdata nodemask_t hugetlb_bootmem_nodes;
60 __initdata struct list_head huge_boot_pages[MAX_NUMNODES];
61 static unsigned long hstate_boot_nrinvalid[HUGE_MAX_HSTATE] __initdata;
62 
63 /*
64  * Due to ordering constraints across the init code for various
65  * architectures, hugetlb hstate cmdline parameters can't simply
66  * be early_param. early_param might call the setup function
67  * before valid hugetlb page sizes are determined, leading to
68  * incorrect rejection of valid hugepagesz= options.
69  *
70  * So, record the parameters early and consume them whenever the
71  * init code is ready for them, by calling hugetlb_parse_params().
72  */
73 
74 /* one (hugepagesz=,hugepages=) pair per hstate, one default_hugepagesz */
75 #define HUGE_MAX_CMDLINE_ARGS	(2 * HUGE_MAX_HSTATE + 1)
76 struct hugetlb_cmdline {
77 	char *val;
78 	int (*setup)(char *val);
79 };
80 
81 /* for command line parsing */
82 static struct hstate * __initdata parsed_hstate;
83 static unsigned long __initdata default_hstate_max_huge_pages;
84 static bool __initdata parsed_valid_hugepagesz = true;
85 static bool __initdata parsed_default_hugepagesz;
86 static unsigned int default_hugepages_in_node[MAX_NUMNODES] __initdata;
87 static unsigned long hugepage_allocation_threads __initdata;
88 
89 static char hstate_cmdline_buf[COMMAND_LINE_SIZE] __initdata;
90 static int hstate_cmdline_index __initdata;
91 static struct hugetlb_cmdline hugetlb_params[HUGE_MAX_CMDLINE_ARGS] __initdata;
92 static int hugetlb_param_index __initdata;
93 static __init int hugetlb_add_param(char *s, int (*setup)(char *val));
94 static __init void hugetlb_parse_params(void);
95 
96 #define hugetlb_early_param(str, func) \
97 static __init int func##args(char *s) \
98 { \
99 	return hugetlb_add_param(s, func); \
100 } \
101 early_param(str, func##args)
102 
103 /*
104  * Protects updates to hugepage_freelists, hugepage_activelist, nr_huge_pages,
105  * free_huge_pages, and surplus_huge_pages.
106  */
107 __cacheline_aligned_in_smp DEFINE_SPINLOCK(hugetlb_lock);
108 
109 /*
110  * Serializes faults on the same logical page.  This is used to
111  * prevent spurious OOMs when the hugepage pool is fully utilized.
112  */
113 static int num_fault_mutexes __ro_after_init;
114 struct mutex *hugetlb_fault_mutex_table __ro_after_init;
115 
116 /* Forward declaration */
117 static int hugetlb_acct_memory(struct hstate *h, long delta);
118 static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
119 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
120 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
121 		unsigned long start, unsigned long end, bool take_locks);
122 static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
123 
subpool_is_free(struct hugepage_subpool * spool)124 static inline bool subpool_is_free(struct hugepage_subpool *spool)
125 {
126 	if (spool->count)
127 		return false;
128 	if (spool->max_hpages != -1)
129 		return spool->used_hpages == 0;
130 	if (spool->min_hpages != -1)
131 		return spool->rsv_hpages == spool->min_hpages;
132 
133 	return true;
134 }
135 
unlock_or_release_subpool(struct hugepage_subpool * spool,unsigned long irq_flags)136 static inline void unlock_or_release_subpool(struct hugepage_subpool *spool,
137 						unsigned long irq_flags)
138 {
139 	spin_unlock_irqrestore(&spool->lock, irq_flags);
140 
141 	/* If no pages are used, and no other handles to the subpool
142 	 * remain, give up any reservations based on minimum size and
143 	 * free the subpool */
144 	if (subpool_is_free(spool)) {
145 		if (spool->min_hpages != -1)
146 			hugetlb_acct_memory(spool->hstate,
147 						-spool->min_hpages);
148 		kfree(spool);
149 	}
150 }
151 
hugepage_new_subpool(struct hstate * h,long max_hpages,long min_hpages)152 struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages,
153 						long min_hpages)
154 {
155 	struct hugepage_subpool *spool;
156 
157 	spool = kzalloc_obj(*spool);
158 	if (!spool)
159 		return NULL;
160 
161 	spin_lock_init(&spool->lock);
162 	spool->count = 1;
163 	spool->max_hpages = max_hpages;
164 	spool->hstate = h;
165 	spool->min_hpages = min_hpages;
166 
167 	if (min_hpages != -1 && hugetlb_acct_memory(h, min_hpages)) {
168 		kfree(spool);
169 		return NULL;
170 	}
171 	spool->rsv_hpages = min_hpages;
172 
173 	return spool;
174 }
175 
hugepage_put_subpool(struct hugepage_subpool * spool)176 void hugepage_put_subpool(struct hugepage_subpool *spool)
177 {
178 	unsigned long flags;
179 
180 	spin_lock_irqsave(&spool->lock, flags);
181 	BUG_ON(!spool->count);
182 	spool->count--;
183 	unlock_or_release_subpool(spool, flags);
184 }
185 
186 /*
187  * Subpool accounting for allocating and reserving pages.
188  * Return -ENOMEM if there are not enough resources to satisfy the
189  * request.  Otherwise, return the number of pages by which the
190  * global pools must be adjusted (upward).  The returned value may
191  * only be different than the passed value (delta) in the case where
192  * a subpool minimum size must be maintained.
193  */
hugepage_subpool_get_pages(struct hugepage_subpool * spool,long delta)194 static long hugepage_subpool_get_pages(struct hugepage_subpool *spool,
195 				      long delta)
196 {
197 	long ret = delta;
198 
199 	if (!spool)
200 		return ret;
201 
202 	spin_lock_irq(&spool->lock);
203 
204 	if (spool->max_hpages != -1) {		/* maximum size accounting */
205 		if ((spool->used_hpages + delta) <= spool->max_hpages)
206 			spool->used_hpages += delta;
207 		else {
208 			ret = -ENOMEM;
209 			goto unlock_ret;
210 		}
211 	}
212 
213 	/* minimum size accounting */
214 	if (spool->min_hpages != -1 && spool->rsv_hpages) {
215 		if (delta > spool->rsv_hpages) {
216 			/*
217 			 * Asking for more reserves than those already taken on
218 			 * behalf of subpool.  Return difference.
219 			 */
220 			ret = delta - spool->rsv_hpages;
221 			spool->rsv_hpages = 0;
222 		} else {
223 			ret = 0;	/* reserves already accounted for */
224 			spool->rsv_hpages -= delta;
225 		}
226 	}
227 
228 unlock_ret:
229 	spin_unlock_irq(&spool->lock);
230 	return ret;
231 }
232 
233 /*
234  * Subpool accounting for freeing and unreserving pages.
235  * Return the number of global page reservations that must be dropped.
236  * The return value may only be different than the passed value (delta)
237  * in the case where a subpool minimum size must be maintained.
238  */
hugepage_subpool_put_pages(struct hugepage_subpool * spool,long delta)239 static long hugepage_subpool_put_pages(struct hugepage_subpool *spool,
240 				       long delta)
241 {
242 	long ret = delta;
243 	unsigned long flags;
244 
245 	if (!spool)
246 		return delta;
247 
248 	spin_lock_irqsave(&spool->lock, flags);
249 
250 	if (spool->max_hpages != -1)		/* maximum size accounting */
251 		spool->used_hpages -= delta;
252 
253 	 /* minimum size accounting */
254 	if (spool->min_hpages != -1 && spool->used_hpages < spool->min_hpages) {
255 		if (spool->rsv_hpages + delta <= spool->min_hpages)
256 			ret = 0;
257 		else
258 			ret = spool->rsv_hpages + delta - spool->min_hpages;
259 
260 		spool->rsv_hpages += delta;
261 		if (spool->rsv_hpages > spool->min_hpages)
262 			spool->rsv_hpages = spool->min_hpages;
263 	}
264 
265 	/*
266 	 * If hugetlbfs_put_super couldn't free spool due to an outstanding
267 	 * quota reference, free it now.
268 	 */
269 	unlock_or_release_subpool(spool, flags);
270 
271 	return ret;
272 }
273 
subpool_vma(struct vm_area_struct * vma)274 static inline struct hugepage_subpool *subpool_vma(struct vm_area_struct *vma)
275 {
276 	return subpool_inode(file_inode(vma->vm_file));
277 }
278 
279 /*
280  * hugetlb vma_lock helper routines
281  */
hugetlb_vma_lock_read(struct vm_area_struct * vma)282 void hugetlb_vma_lock_read(struct vm_area_struct *vma)
283 {
284 	if (__vma_shareable_lock(vma)) {
285 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
286 
287 		down_read(&vma_lock->rw_sema);
288 	} else if (__vma_private_lock(vma)) {
289 		struct resv_map *resv_map = vma_resv_map(vma);
290 
291 		down_read(&resv_map->rw_sema);
292 	}
293 }
294 
hugetlb_vma_unlock_read(struct vm_area_struct * vma)295 void hugetlb_vma_unlock_read(struct vm_area_struct *vma)
296 {
297 	if (__vma_shareable_lock(vma)) {
298 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
299 
300 		up_read(&vma_lock->rw_sema);
301 	} else if (__vma_private_lock(vma)) {
302 		struct resv_map *resv_map = vma_resv_map(vma);
303 
304 		up_read(&resv_map->rw_sema);
305 	}
306 }
307 
hugetlb_vma_lock_write(struct vm_area_struct * vma)308 void hugetlb_vma_lock_write(struct vm_area_struct *vma)
309 {
310 	if (__vma_shareable_lock(vma)) {
311 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
312 
313 		down_write(&vma_lock->rw_sema);
314 	} else if (__vma_private_lock(vma)) {
315 		struct resv_map *resv_map = vma_resv_map(vma);
316 
317 		down_write(&resv_map->rw_sema);
318 	}
319 }
320 
hugetlb_vma_unlock_write(struct vm_area_struct * vma)321 void hugetlb_vma_unlock_write(struct vm_area_struct *vma)
322 {
323 	if (__vma_shareable_lock(vma)) {
324 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
325 
326 		up_write(&vma_lock->rw_sema);
327 	} else if (__vma_private_lock(vma)) {
328 		struct resv_map *resv_map = vma_resv_map(vma);
329 
330 		up_write(&resv_map->rw_sema);
331 	}
332 }
333 
hugetlb_vma_trylock_write(struct vm_area_struct * vma)334 int hugetlb_vma_trylock_write(struct vm_area_struct *vma)
335 {
336 
337 	if (__vma_shareable_lock(vma)) {
338 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
339 
340 		return down_write_trylock(&vma_lock->rw_sema);
341 	} else if (__vma_private_lock(vma)) {
342 		struct resv_map *resv_map = vma_resv_map(vma);
343 
344 		return down_write_trylock(&resv_map->rw_sema);
345 	}
346 
347 	return 1;
348 }
349 
hugetlb_vma_assert_locked(struct vm_area_struct * vma)350 void hugetlb_vma_assert_locked(struct vm_area_struct *vma)
351 {
352 	if (__vma_shareable_lock(vma)) {
353 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
354 
355 		lockdep_assert_held(&vma_lock->rw_sema);
356 	} else if (__vma_private_lock(vma)) {
357 		struct resv_map *resv_map = vma_resv_map(vma);
358 
359 		lockdep_assert_held(&resv_map->rw_sema);
360 	}
361 }
362 
hugetlb_vma_lock_release(struct kref * kref)363 void hugetlb_vma_lock_release(struct kref *kref)
364 {
365 	struct hugetlb_vma_lock *vma_lock = container_of(kref,
366 			struct hugetlb_vma_lock, refs);
367 
368 	kfree(vma_lock);
369 }
370 
__hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock * vma_lock)371 static void __hugetlb_vma_unlock_write_put(struct hugetlb_vma_lock *vma_lock)
372 {
373 	struct vm_area_struct *vma = vma_lock->vma;
374 
375 	/*
376 	 * vma_lock structure may or not be released as a result of put,
377 	 * it certainly will no longer be attached to vma so clear pointer.
378 	 * Semaphore synchronizes access to vma_lock->vma field.
379 	 */
380 	vma_lock->vma = NULL;
381 	vma->vm_private_data = NULL;
382 	up_write(&vma_lock->rw_sema);
383 	kref_put(&vma_lock->refs, hugetlb_vma_lock_release);
384 }
385 
__hugetlb_vma_unlock_write_free(struct vm_area_struct * vma)386 static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma)
387 {
388 	if (__vma_shareable_lock(vma)) {
389 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
390 
391 		__hugetlb_vma_unlock_write_put(vma_lock);
392 	} else if (__vma_private_lock(vma)) {
393 		struct resv_map *resv_map = vma_resv_map(vma);
394 
395 		/* no free for anon vmas, but still need to unlock */
396 		up_write(&resv_map->rw_sema);
397 	}
398 }
399 
hugetlb_vma_lock_free(struct vm_area_struct * vma)400 static void hugetlb_vma_lock_free(struct vm_area_struct *vma)
401 {
402 	/*
403 	 * Only present in sharable vmas.
404 	 */
405 	if (!vma || !__vma_shareable_lock(vma))
406 		return;
407 
408 	if (vma->vm_private_data) {
409 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
410 
411 		down_write(&vma_lock->rw_sema);
412 		__hugetlb_vma_unlock_write_put(vma_lock);
413 	}
414 }
415 
416 /*
417  * vma specific semaphore used for pmd sharing and fault/truncation
418  * synchronization
419  */
hugetlb_vma_lock_alloc(struct vm_area_struct * vma)420 int hugetlb_vma_lock_alloc(struct vm_area_struct *vma)
421 {
422 	struct hugetlb_vma_lock *vma_lock;
423 
424 	/* Only establish in (flags) sharable vmas */
425 	if (!vma || !(vma->vm_flags & VM_MAYSHARE))
426 		return 0;
427 
428 	/* Should never get here with non-NULL vm_private_data */
429 	if (vma->vm_private_data)
430 		return -EINVAL;
431 
432 	vma_lock = kmalloc_obj(*vma_lock);
433 	if (!vma_lock) {
434 		/*
435 		 * If we can not allocate structure, then vma can not
436 		 * participate in pmd sharing.  This is only a possible
437 		 * performance enhancement and memory saving issue.
438 		 * However, the lock is also used to synchronize page
439 		 * faults with truncation.  If the lock is not present,
440 		 * unlikely races could leave pages in a file past i_size
441 		 * until the file is removed.  Warn in the unlikely case of
442 		 * allocation failure.
443 		 */
444 		pr_warn_once("HugeTLB: unable to allocate vma specific lock\n");
445 		return -EINVAL;
446 	}
447 
448 	kref_init(&vma_lock->refs);
449 	init_rwsem(&vma_lock->rw_sema);
450 	vma_lock->vma = vma;
451 	vma->vm_private_data = vma_lock;
452 
453 	return 0;
454 }
455 
456 /* Helper that removes a struct file_region from the resv_map cache and returns
457  * it for use.
458  */
459 static struct file_region *
get_file_region_entry_from_cache(struct resv_map * resv,long from,long to)460 get_file_region_entry_from_cache(struct resv_map *resv, long from, long to)
461 {
462 	struct file_region *nrg;
463 
464 	VM_BUG_ON(resv->region_cache_count <= 0);
465 
466 	resv->region_cache_count--;
467 	nrg = list_first_entry(&resv->region_cache, struct file_region, link);
468 	list_del(&nrg->link);
469 
470 	nrg->from = from;
471 	nrg->to = to;
472 
473 	return nrg;
474 }
475 
copy_hugetlb_cgroup_uncharge_info(struct file_region * nrg,struct file_region * rg)476 static void copy_hugetlb_cgroup_uncharge_info(struct file_region *nrg,
477 					      struct file_region *rg)
478 {
479 #ifdef CONFIG_CGROUP_HUGETLB
480 	nrg->reservation_counter = rg->reservation_counter;
481 	nrg->css = rg->css;
482 	if (rg->css)
483 		css_get(rg->css);
484 #endif
485 }
486 
487 /* Helper that records hugetlb_cgroup uncharge info. */
record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup * h_cg,struct hstate * h,struct resv_map * resv,struct file_region * nrg)488 static void record_hugetlb_cgroup_uncharge_info(struct hugetlb_cgroup *h_cg,
489 						struct hstate *h,
490 						struct resv_map *resv,
491 						struct file_region *nrg)
492 {
493 #ifdef CONFIG_CGROUP_HUGETLB
494 	if (h_cg) {
495 		nrg->reservation_counter =
496 			&h_cg->rsvd_hugepage[hstate_index(h)];
497 		nrg->css = &h_cg->css;
498 		/*
499 		 * The caller will hold exactly one h_cg->css reference for the
500 		 * whole contiguous reservation region. But this area might be
501 		 * scattered when there are already some file_regions reside in
502 		 * it. As a result, many file_regions may share only one css
503 		 * reference. In order to ensure that one file_region must hold
504 		 * exactly one h_cg->css reference, we should do css_get for
505 		 * each file_region and leave the reference held by caller
506 		 * untouched.
507 		 */
508 		css_get(&h_cg->css);
509 		if (!resv->pages_per_hpage)
510 			resv->pages_per_hpage = pages_per_huge_page(h);
511 		/* pages_per_hpage should be the same for all entries in
512 		 * a resv_map.
513 		 */
514 		VM_BUG_ON(resv->pages_per_hpage != pages_per_huge_page(h));
515 	} else {
516 		nrg->reservation_counter = NULL;
517 		nrg->css = NULL;
518 	}
519 #endif
520 }
521 
put_uncharge_info(struct file_region * rg)522 static void put_uncharge_info(struct file_region *rg)
523 {
524 #ifdef CONFIG_CGROUP_HUGETLB
525 	if (rg->css)
526 		css_put(rg->css);
527 #endif
528 }
529 
has_same_uncharge_info(struct file_region * rg,struct file_region * org)530 static bool has_same_uncharge_info(struct file_region *rg,
531 				   struct file_region *org)
532 {
533 #ifdef CONFIG_CGROUP_HUGETLB
534 	return rg->reservation_counter == org->reservation_counter &&
535 	       rg->css == org->css;
536 
537 #else
538 	return true;
539 #endif
540 }
541 
coalesce_file_region(struct resv_map * resv,struct file_region * rg)542 static void coalesce_file_region(struct resv_map *resv, struct file_region *rg)
543 {
544 	struct file_region *nrg, *prg;
545 
546 	prg = list_prev_entry(rg, link);
547 	if (&prg->link != &resv->regions && prg->to == rg->from &&
548 	    has_same_uncharge_info(prg, rg)) {
549 		prg->to = rg->to;
550 
551 		list_del(&rg->link);
552 		put_uncharge_info(rg);
553 		kfree(rg);
554 
555 		rg = prg;
556 	}
557 
558 	nrg = list_next_entry(rg, link);
559 	if (&nrg->link != &resv->regions && nrg->from == rg->to &&
560 	    has_same_uncharge_info(nrg, rg)) {
561 		nrg->from = rg->from;
562 
563 		list_del(&rg->link);
564 		put_uncharge_info(rg);
565 		kfree(rg);
566 	}
567 }
568 
569 static inline long
hugetlb_resv_map_add(struct resv_map * map,struct list_head * rg,long from,long to,struct hstate * h,struct hugetlb_cgroup * cg,long * regions_needed)570 hugetlb_resv_map_add(struct resv_map *map, struct list_head *rg, long from,
571 		     long to, struct hstate *h, struct hugetlb_cgroup *cg,
572 		     long *regions_needed)
573 {
574 	struct file_region *nrg;
575 
576 	if (!regions_needed) {
577 		nrg = get_file_region_entry_from_cache(map, from, to);
578 		record_hugetlb_cgroup_uncharge_info(cg, h, map, nrg);
579 		list_add(&nrg->link, rg);
580 		coalesce_file_region(map, nrg);
581 	} else {
582 		*regions_needed += 1;
583 	}
584 
585 	return to - from;
586 }
587 
588 /*
589  * Must be called with resv->lock held.
590  *
591  * Calling this with regions_needed != NULL will count the number of pages
592  * to be added but will not modify the linked list. And regions_needed will
593  * indicate the number of file_regions needed in the cache to carry out to add
594  * the regions for this range.
595  */
add_reservation_in_range(struct resv_map * resv,long f,long t,struct hugetlb_cgroup * h_cg,struct hstate * h,long * regions_needed)596 static long add_reservation_in_range(struct resv_map *resv, long f, long t,
597 				     struct hugetlb_cgroup *h_cg,
598 				     struct hstate *h, long *regions_needed)
599 {
600 	long add = 0;
601 	struct list_head *head = &resv->regions;
602 	long last_accounted_offset = f;
603 	struct file_region *iter, *trg = NULL;
604 	struct list_head *rg = NULL;
605 
606 	if (regions_needed)
607 		*regions_needed = 0;
608 
609 	/* In this loop, we essentially handle an entry for the range
610 	 * [last_accounted_offset, iter->from), at every iteration, with some
611 	 * bounds checking.
612 	 */
613 	list_for_each_entry_safe(iter, trg, head, link) {
614 		/* Skip irrelevant regions that start before our range. */
615 		if (iter->from < f) {
616 			/* If this region ends after the last accounted offset,
617 			 * then we need to update last_accounted_offset.
618 			 */
619 			if (iter->to > last_accounted_offset)
620 				last_accounted_offset = iter->to;
621 			continue;
622 		}
623 
624 		/* When we find a region that starts beyond our range, we've
625 		 * finished.
626 		 */
627 		if (iter->from >= t) {
628 			rg = iter->link.prev;
629 			break;
630 		}
631 
632 		/* Add an entry for last_accounted_offset -> iter->from, and
633 		 * update last_accounted_offset.
634 		 */
635 		if (iter->from > last_accounted_offset)
636 			add += hugetlb_resv_map_add(resv, iter->link.prev,
637 						    last_accounted_offset,
638 						    iter->from, h, h_cg,
639 						    regions_needed);
640 
641 		last_accounted_offset = iter->to;
642 	}
643 
644 	/* Handle the case where our range extends beyond
645 	 * last_accounted_offset.
646 	 */
647 	if (!rg)
648 		rg = head->prev;
649 	if (last_accounted_offset < t)
650 		add += hugetlb_resv_map_add(resv, rg, last_accounted_offset,
651 					    t, h, h_cg, regions_needed);
652 
653 	return add;
654 }
655 
656 /* Must be called with resv->lock acquired. Will drop lock to allocate entries.
657  */
allocate_file_region_entries(struct resv_map * resv,int regions_needed)658 static int allocate_file_region_entries(struct resv_map *resv,
659 					int regions_needed)
660 	__must_hold(&resv->lock)
661 {
662 	LIST_HEAD(allocated_regions);
663 	int to_allocate = 0, i = 0;
664 	struct file_region *trg = NULL, *rg = NULL;
665 
666 	VM_BUG_ON(regions_needed < 0);
667 
668 	/*
669 	 * Check for sufficient descriptors in the cache to accommodate
670 	 * the number of in progress add operations plus regions_needed.
671 	 *
672 	 * This is a while loop because when we drop the lock, some other call
673 	 * to region_add or region_del may have consumed some region_entries,
674 	 * so we keep looping here until we finally have enough entries for
675 	 * (adds_in_progress + regions_needed).
676 	 */
677 	while (resv->region_cache_count <
678 	       (resv->adds_in_progress + regions_needed)) {
679 		to_allocate = resv->adds_in_progress + regions_needed -
680 			      resv->region_cache_count;
681 
682 		/* At this point, we should have enough entries in the cache
683 		 * for all the existing adds_in_progress. We should only be
684 		 * needing to allocate for regions_needed.
685 		 */
686 		VM_BUG_ON(resv->region_cache_count < resv->adds_in_progress);
687 
688 		spin_unlock(&resv->lock);
689 		for (i = 0; i < to_allocate; i++) {
690 			trg = kmalloc_obj(*trg);
691 			if (!trg)
692 				goto out_of_memory;
693 			list_add(&trg->link, &allocated_regions);
694 		}
695 
696 		spin_lock(&resv->lock);
697 
698 		list_splice(&allocated_regions, &resv->region_cache);
699 		resv->region_cache_count += to_allocate;
700 	}
701 
702 	return 0;
703 
704 out_of_memory:
705 	list_for_each_entry_safe(rg, trg, &allocated_regions, link) {
706 		list_del(&rg->link);
707 		kfree(rg);
708 	}
709 	return -ENOMEM;
710 }
711 
712 /*
713  * Add the huge page range represented by [f, t) to the reserve
714  * map.  Regions will be taken from the cache to fill in this range.
715  * Sufficient regions should exist in the cache due to the previous
716  * call to region_chg with the same range, but in some cases the cache will not
717  * have sufficient entries due to races with other code doing region_add or
718  * region_del.  The extra needed entries will be allocated.
719  *
720  * regions_needed is the out value provided by a previous call to region_chg.
721  *
722  * Return the number of new huge pages added to the map.  This number is greater
723  * than or equal to zero.  If file_region entries needed to be allocated for
724  * this operation and we were not able to allocate, it returns -ENOMEM.
725  * region_add of regions of length 1 never allocate file_regions and cannot
726  * fail; region_chg will always allocate at least 1 entry and a region_add for
727  * 1 page will only require at most 1 entry.
728  */
region_add(struct resv_map * resv,long f,long t,long in_regions_needed,struct hstate * h,struct hugetlb_cgroup * h_cg)729 static long region_add(struct resv_map *resv, long f, long t,
730 		       long in_regions_needed, struct hstate *h,
731 		       struct hugetlb_cgroup *h_cg)
732 {
733 	long add = 0, actual_regions_needed = 0;
734 
735 	spin_lock(&resv->lock);
736 retry:
737 
738 	/* Count how many regions are actually needed to execute this add. */
739 	add_reservation_in_range(resv, f, t, NULL, NULL,
740 				 &actual_regions_needed);
741 
742 	/*
743 	 * Check for sufficient descriptors in the cache to accommodate
744 	 * this add operation. Note that actual_regions_needed may be greater
745 	 * than in_regions_needed, as the resv_map may have been modified since
746 	 * the region_chg call. In this case, we need to make sure that we
747 	 * allocate extra entries, such that we have enough for all the
748 	 * existing adds_in_progress, plus the excess needed for this
749 	 * operation.
750 	 */
751 	if (actual_regions_needed > in_regions_needed &&
752 	    resv->region_cache_count <
753 		    resv->adds_in_progress +
754 			    (actual_regions_needed - in_regions_needed)) {
755 		/* region_add operation of range 1 should never need to
756 		 * allocate file_region entries.
757 		 */
758 		VM_BUG_ON(t - f <= 1);
759 
760 		if (allocate_file_region_entries(
761 			    resv, actual_regions_needed - in_regions_needed)) {
762 			return -ENOMEM;
763 		}
764 
765 		goto retry;
766 	}
767 
768 	add = add_reservation_in_range(resv, f, t, h_cg, h, NULL);
769 
770 	resv->adds_in_progress -= in_regions_needed;
771 
772 	spin_unlock(&resv->lock);
773 	return add;
774 }
775 
776 /*
777  * Examine the existing reserve map and determine how many
778  * huge pages in the specified range [f, t) are NOT currently
779  * represented.  This routine is called before a subsequent
780  * call to region_add that will actually modify the reserve
781  * map to add the specified range [f, t).  region_chg does
782  * not change the number of huge pages represented by the
783  * map.  A number of new file_region structures is added to the cache as a
784  * placeholder, for the subsequent region_add call to use. At least 1
785  * file_region structure is added.
786  *
787  * out_regions_needed is the number of regions added to the
788  * resv->adds_in_progress.  This value needs to be provided to a follow up call
789  * to region_add or region_abort for proper accounting.
790  *
791  * Returns the number of huge pages that need to be added to the existing
792  * reservation map for the range [f, t).  This number is greater or equal to
793  * zero.  -ENOMEM is returned if a new file_region structure or cache entry
794  * is needed and can not be allocated.
795  */
region_chg(struct resv_map * resv,long f,long t,long * out_regions_needed)796 static long region_chg(struct resv_map *resv, long f, long t,
797 		       long *out_regions_needed)
798 {
799 	long chg = 0;
800 
801 	spin_lock(&resv->lock);
802 
803 	/* Count how many hugepages in this range are NOT represented. */
804 	chg = add_reservation_in_range(resv, f, t, NULL, NULL,
805 				       out_regions_needed);
806 
807 	if (*out_regions_needed == 0)
808 		*out_regions_needed = 1;
809 
810 	if (allocate_file_region_entries(resv, *out_regions_needed))
811 		return -ENOMEM;
812 
813 	resv->adds_in_progress += *out_regions_needed;
814 
815 	spin_unlock(&resv->lock);
816 	return chg;
817 }
818 
819 /*
820  * Abort the in progress add operation.  The adds_in_progress field
821  * of the resv_map keeps track of the operations in progress between
822  * calls to region_chg and region_add.  Operations are sometimes
823  * aborted after the call to region_chg.  In such cases, region_abort
824  * is called to decrement the adds_in_progress counter. regions_needed
825  * is the value returned by the region_chg call, it is used to decrement
826  * the adds_in_progress counter.
827  *
828  * NOTE: The range arguments [f, t) are not needed or used in this
829  * routine.  They are kept to make reading the calling code easier as
830  * arguments will match the associated region_chg call.
831  */
region_abort(struct resv_map * resv,long f,long t,long regions_needed)832 static void region_abort(struct resv_map *resv, long f, long t,
833 			 long regions_needed)
834 {
835 	spin_lock(&resv->lock);
836 	VM_BUG_ON(!resv->region_cache_count);
837 	resv->adds_in_progress -= regions_needed;
838 	spin_unlock(&resv->lock);
839 }
840 
841 /*
842  * Delete the specified range [f, t) from the reserve map.  If the
843  * t parameter is LONG_MAX, this indicates that ALL regions after f
844  * should be deleted.  Locate the regions which intersect [f, t)
845  * and either trim, delete or split the existing regions.
846  *
847  * Returns the number of huge pages deleted from the reserve map.
848  * In the normal case, the return value is zero or more.  In the
849  * case where a region must be split, a new region descriptor must
850  * be allocated.  If the allocation fails, -ENOMEM will be returned.
851  * NOTE: If the parameter t == LONG_MAX, then we will never split
852  * a region and possibly return -ENOMEM.  Callers specifying
853  * t == LONG_MAX do not need to check for -ENOMEM error.
854  */
region_del(struct resv_map * resv,long f,long t)855 static long region_del(struct resv_map *resv, long f, long t)
856 {
857 	struct list_head *head = &resv->regions;
858 	struct file_region *rg, *trg;
859 	struct file_region *nrg = NULL;
860 	long del = 0;
861 
862 retry:
863 	spin_lock(&resv->lock);
864 	list_for_each_entry_safe(rg, trg, head, link) {
865 		/*
866 		 * Skip regions before the range to be deleted.  file_region
867 		 * ranges are normally of the form [from, to).  However, there
868 		 * may be a "placeholder" entry in the map which is of the form
869 		 * (from, to) with from == to.  Check for placeholder entries
870 		 * at the beginning of the range to be deleted.
871 		 */
872 		if (rg->to <= f && (rg->to != rg->from || rg->to != f))
873 			continue;
874 
875 		if (rg->from >= t)
876 			break;
877 
878 		if (f > rg->from && t < rg->to) { /* Must split region */
879 			/*
880 			 * Check for an entry in the cache before dropping
881 			 * lock and attempting allocation.
882 			 */
883 			if (!nrg &&
884 			    resv->region_cache_count > resv->adds_in_progress) {
885 				nrg = list_first_entry(&resv->region_cache,
886 							struct file_region,
887 							link);
888 				list_del(&nrg->link);
889 				resv->region_cache_count--;
890 			}
891 
892 			if (!nrg) {
893 				spin_unlock(&resv->lock);
894 				nrg = kmalloc_obj(*nrg);
895 				if (!nrg)
896 					return -ENOMEM;
897 				goto retry;
898 			}
899 
900 			del += t - f;
901 			hugetlb_cgroup_uncharge_file_region(
902 				resv, rg, t - f, false);
903 
904 			/* New entry for end of split region */
905 			nrg->from = t;
906 			nrg->to = rg->to;
907 
908 			copy_hugetlb_cgroup_uncharge_info(nrg, rg);
909 
910 			INIT_LIST_HEAD(&nrg->link);
911 
912 			/* Original entry is trimmed */
913 			rg->to = f;
914 
915 			list_add(&nrg->link, &rg->link);
916 			nrg = NULL;
917 			break;
918 		}
919 
920 		if (f <= rg->from && t >= rg->to) { /* Remove entire region */
921 			del += rg->to - rg->from;
922 			hugetlb_cgroup_uncharge_file_region(resv, rg,
923 							    rg->to - rg->from, true);
924 			list_del(&rg->link);
925 			kfree(rg);
926 			continue;
927 		}
928 
929 		if (f <= rg->from) {	/* Trim beginning of region */
930 			hugetlb_cgroup_uncharge_file_region(resv, rg,
931 							    t - rg->from, false);
932 
933 			del += t - rg->from;
934 			rg->from = t;
935 		} else {		/* Trim end of region */
936 			hugetlb_cgroup_uncharge_file_region(resv, rg,
937 							    rg->to - f, false);
938 
939 			del += rg->to - f;
940 			rg->to = f;
941 		}
942 	}
943 
944 	spin_unlock(&resv->lock);
945 	kfree(nrg);
946 	return del;
947 }
948 
949 /*
950  * A rare out of memory error was encountered which prevented removal of
951  * the reserve map region for a page.  The huge page itself was free'ed
952  * and removed from the page cache.  This routine will adjust the subpool
953  * usage count, and the global reserve count if needed.  By incrementing
954  * these counts, the reserve map entry which could not be deleted will
955  * appear as a "reserved" entry instead of simply dangling with incorrect
956  * counts.
957  */
hugetlb_fix_reserve_counts(struct inode * inode)958 void hugetlb_fix_reserve_counts(struct inode *inode)
959 {
960 	struct hugepage_subpool *spool = subpool_inode(inode);
961 	long rsv_adjust;
962 	bool reserved = false;
963 
964 	rsv_adjust = hugepage_subpool_get_pages(spool, 1);
965 	if (rsv_adjust > 0) {
966 		struct hstate *h = hstate_inode(inode);
967 
968 		if (!hugetlb_acct_memory(h, 1))
969 			reserved = true;
970 	} else if (!rsv_adjust) {
971 		reserved = true;
972 	}
973 
974 	if (!reserved)
975 		pr_warn("hugetlb: Huge Page Reserved count may go negative.\n");
976 }
977 
978 /*
979  * Count and return the number of huge pages in the reserve map
980  * that intersect with the range [f, t).
981  */
region_count(struct resv_map * resv,long f,long t)982 static long region_count(struct resv_map *resv, long f, long t)
983 {
984 	struct list_head *head = &resv->regions;
985 	struct file_region *rg;
986 	long chg = 0;
987 
988 	spin_lock(&resv->lock);
989 	/* Locate each segment we overlap with, and count that overlap. */
990 	list_for_each_entry(rg, head, link) {
991 		long seg_from;
992 		long seg_to;
993 
994 		if (rg->to <= f)
995 			continue;
996 		if (rg->from >= t)
997 			break;
998 
999 		seg_from = max(rg->from, f);
1000 		seg_to = min(rg->to, t);
1001 
1002 		chg += seg_to - seg_from;
1003 	}
1004 	spin_unlock(&resv->lock);
1005 
1006 	return chg;
1007 }
1008 
1009 /*
1010  * Convert the address within this vma to the page offset within
1011  * the mapping, huge page units here.
1012  */
vma_hugecache_offset(struct hstate * h,struct vm_area_struct * vma,unsigned long address)1013 static pgoff_t vma_hugecache_offset(struct hstate *h,
1014 			struct vm_area_struct *vma, unsigned long address)
1015 {
1016 	return ((address - vma->vm_start) >> huge_page_shift(h)) +
1017 			(vma->vm_pgoff >> huge_page_order(h));
1018 }
1019 
1020 /*
1021  * Flags for MAP_PRIVATE reservations.  These are stored in the bottom
1022  * bits of the reservation map pointer, which are always clear due to
1023  * alignment.
1024  */
1025 #define HPAGE_RESV_OWNER    (1UL << 0)
1026 #define HPAGE_RESV_UNMAPPED (1UL << 1)
1027 #define HPAGE_RESV_MASK (HPAGE_RESV_OWNER | HPAGE_RESV_UNMAPPED)
1028 
1029 /*
1030  * These helpers are used to track how many pages are reserved for
1031  * faults in a MAP_PRIVATE mapping. Only the process that called mmap()
1032  * is guaranteed to have their future faults succeed.
1033  *
1034  * With the exception of hugetlb_dup_vma_private() which is called at fork(),
1035  * the reserve counters are updated with the hugetlb_lock held. It is safe
1036  * to reset the VMA at fork() time as it is not in use yet and there is no
1037  * chance of the global counters getting corrupted as a result of the values.
1038  *
1039  * The private mapping reservation is represented in a subtly different
1040  * manner to a shared mapping.  A shared mapping has a region map associated
1041  * with the underlying file, this region map represents the backing file
1042  * pages which have ever had a reservation assigned which this persists even
1043  * after the page is instantiated.  A private mapping has a region map
1044  * associated with the original mmap which is attached to all VMAs which
1045  * reference it, this region map represents those offsets which have consumed
1046  * reservation ie. where pages have been instantiated.
1047  */
get_vma_private_data(struct vm_area_struct * vma)1048 static unsigned long get_vma_private_data(struct vm_area_struct *vma)
1049 {
1050 	return (unsigned long)vma->vm_private_data;
1051 }
1052 
set_vma_private_data(struct vm_area_struct * vma,unsigned long value)1053 static void set_vma_private_data(struct vm_area_struct *vma,
1054 							unsigned long value)
1055 {
1056 	vma->vm_private_data = (void *)value;
1057 }
1058 
1059 static void
resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map * resv_map,struct hugetlb_cgroup * h_cg,struct hstate * h)1060 resv_map_set_hugetlb_cgroup_uncharge_info(struct resv_map *resv_map,
1061 					  struct hugetlb_cgroup *h_cg,
1062 					  struct hstate *h)
1063 {
1064 #ifdef CONFIG_CGROUP_HUGETLB
1065 	if (!h_cg || !h) {
1066 		resv_map->reservation_counter = NULL;
1067 		resv_map->pages_per_hpage = 0;
1068 		resv_map->css = NULL;
1069 	} else {
1070 		resv_map->reservation_counter =
1071 			&h_cg->rsvd_hugepage[hstate_index(h)];
1072 		resv_map->pages_per_hpage = pages_per_huge_page(h);
1073 		resv_map->css = &h_cg->css;
1074 	}
1075 #endif
1076 }
1077 
resv_map_alloc(void)1078 struct resv_map *resv_map_alloc(void)
1079 {
1080 	struct resv_map *resv_map = kmalloc_obj(*resv_map);
1081 	struct file_region *rg = kmalloc_obj(*rg);
1082 
1083 	if (!resv_map || !rg) {
1084 		kfree(resv_map);
1085 		kfree(rg);
1086 		return NULL;
1087 	}
1088 
1089 	kref_init(&resv_map->refs);
1090 	spin_lock_init(&resv_map->lock);
1091 	INIT_LIST_HEAD(&resv_map->regions);
1092 	init_rwsem(&resv_map->rw_sema);
1093 
1094 	resv_map->adds_in_progress = 0;
1095 	/*
1096 	 * Initialize these to 0. On shared mappings, 0's here indicate these
1097 	 * fields don't do cgroup accounting. On private mappings, these will be
1098 	 * re-initialized to the proper values, to indicate that hugetlb cgroup
1099 	 * reservations are to be un-charged from here.
1100 	 */
1101 	resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, NULL, NULL);
1102 
1103 	INIT_LIST_HEAD(&resv_map->region_cache);
1104 	list_add(&rg->link, &resv_map->region_cache);
1105 	resv_map->region_cache_count = 1;
1106 
1107 	return resv_map;
1108 }
1109 
resv_map_release(struct kref * ref)1110 void resv_map_release(struct kref *ref)
1111 {
1112 	struct resv_map *resv_map = container_of(ref, struct resv_map, refs);
1113 	struct list_head *head = &resv_map->region_cache;
1114 	struct file_region *rg, *trg;
1115 
1116 	/* Clear out any active regions before we release the map. */
1117 	region_del(resv_map, 0, LONG_MAX);
1118 
1119 	/* ... and any entries left in the cache */
1120 	list_for_each_entry_safe(rg, trg, head, link) {
1121 		list_del(&rg->link);
1122 		kfree(rg);
1123 	}
1124 
1125 	VM_BUG_ON(resv_map->adds_in_progress);
1126 
1127 	kfree(resv_map);
1128 }
1129 
inode_resv_map(struct inode * inode)1130 static inline struct resv_map *inode_resv_map(struct inode *inode)
1131 {
1132 	return HUGETLBFS_I(inode)->resv_map;
1133 }
1134 
vma_resv_map(struct vm_area_struct * vma)1135 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1136 {
1137 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1138 	if (vma->vm_flags & VM_MAYSHARE) {
1139 		struct address_space *mapping = vma->vm_file->f_mapping;
1140 		struct inode *inode = mapping->host;
1141 
1142 		return inode_resv_map(inode);
1143 
1144 	} else {
1145 		return (struct resv_map *)(get_vma_private_data(vma) &
1146 							~HPAGE_RESV_MASK);
1147 	}
1148 }
1149 
set_vma_resv_flags(struct vm_area_struct * vma,unsigned long flags)1150 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1151 {
1152 	VM_WARN_ON_ONCE_VMA(!is_vm_hugetlb_page(vma), vma);
1153 	VM_WARN_ON_ONCE_VMA(vma->vm_flags & VM_MAYSHARE, vma);
1154 
1155 	set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1156 }
1157 
set_vma_desc_resv_map(struct vm_area_desc * desc,struct resv_map * map)1158 static void set_vma_desc_resv_map(struct vm_area_desc *desc, struct resv_map *map)
1159 {
1160 	VM_WARN_ON_ONCE(!is_vma_hugetlb_flags(&desc->vma_flags));
1161 	VM_WARN_ON_ONCE(vma_desc_test(desc, VMA_MAYSHARE_BIT));
1162 
1163 	desc->private_data = map;
1164 }
1165 
set_vma_desc_resv_flags(struct vm_area_desc * desc,unsigned long flags)1166 static void set_vma_desc_resv_flags(struct vm_area_desc *desc, unsigned long flags)
1167 {
1168 	VM_WARN_ON_ONCE(!is_vma_hugetlb_flags(&desc->vma_flags));
1169 	VM_WARN_ON_ONCE(vma_desc_test(desc, VMA_MAYSHARE_BIT));
1170 
1171 	desc->private_data = (void *)((unsigned long)desc->private_data | flags);
1172 }
1173 
is_vma_resv_set(struct vm_area_struct * vma,unsigned long flag)1174 static int is_vma_resv_set(struct vm_area_struct *vma, unsigned long flag)
1175 {
1176 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1177 
1178 	return (get_vma_private_data(vma) & flag) != 0;
1179 }
1180 
is_vma_desc_resv_set(struct vm_area_desc * desc,unsigned long flag)1181 static bool is_vma_desc_resv_set(struct vm_area_desc *desc, unsigned long flag)
1182 {
1183 	VM_WARN_ON_ONCE(!is_vma_hugetlb_flags(&desc->vma_flags));
1184 
1185 	return ((unsigned long)desc->private_data) & flag;
1186 }
1187 
__vma_private_lock(struct vm_area_struct * vma)1188 bool __vma_private_lock(struct vm_area_struct *vma)
1189 {
1190 	return !(vma->vm_flags & VM_MAYSHARE) &&
1191 		get_vma_private_data(vma) & ~HPAGE_RESV_MASK &&
1192 		is_vma_resv_set(vma, HPAGE_RESV_OWNER);
1193 }
1194 
hugetlb_dup_vma_private(struct vm_area_struct * vma)1195 void hugetlb_dup_vma_private(struct vm_area_struct *vma)
1196 {
1197 	VM_BUG_ON_VMA(!is_vm_hugetlb_page(vma), vma);
1198 	/*
1199 	 * Clear vm_private_data
1200 	 * - For shared mappings this is a per-vma semaphore that may be
1201 	 *   allocated in a subsequent call to hugetlb_vm_op_open.
1202 	 *   Before clearing, make sure pointer is not associated with vma
1203 	 *   as this will leak the structure.  This is the case when called
1204 	 *   via clear_vma_resv_huge_pages() and hugetlb_vm_op_open has already
1205 	 *   been called to allocate a new structure.
1206 	 * - For MAP_PRIVATE mappings, this is the reserve map which does
1207 	 *   not apply to children.  Faults generated by the children are
1208 	 *   not guaranteed to succeed, even if read-only.
1209 	 */
1210 	if (vma->vm_flags & VM_MAYSHARE) {
1211 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
1212 
1213 		if (vma_lock && vma_lock->vma != vma)
1214 			vma->vm_private_data = NULL;
1215 	} else {
1216 		vma->vm_private_data = NULL;
1217 	}
1218 }
1219 
1220 /*
1221  * Reset and decrement one ref on hugepage private reservation.
1222  * Called with mm->mmap_lock writer semaphore held.
1223  * This function should be only used by mremap and operate on
1224  * same sized vma. It should never come here with last ref on the
1225  * reservation.
1226  */
clear_vma_resv_huge_pages(struct vm_area_struct * vma)1227 void clear_vma_resv_huge_pages(struct vm_area_struct *vma)
1228 {
1229 	/*
1230 	 * Clear the old hugetlb private page reservation.
1231 	 * It has already been transferred to new_vma.
1232 	 *
1233 	 * During a mremap() operation of a hugetlb vma we call move_vma()
1234 	 * which copies vma into new_vma and unmaps vma. After the copy
1235 	 * operation both new_vma and vma share a reference to the resv_map
1236 	 * struct, and at that point vma is about to be unmapped. We don't
1237 	 * want to return the reservation to the pool at unmap of vma because
1238 	 * the reservation still lives on in new_vma, so simply decrement the
1239 	 * ref here and remove the resv_map reference from this vma.
1240 	 */
1241 	struct resv_map *reservations = vma_resv_map(vma);
1242 
1243 	if (reservations && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1244 		resv_map_put_hugetlb_cgroup_uncharge_info(reservations);
1245 		kref_put(&reservations->refs, resv_map_release);
1246 	}
1247 
1248 	hugetlb_dup_vma_private(vma);
1249 }
1250 
enqueue_hugetlb_folio(struct hstate * h,struct folio * folio)1251 static void enqueue_hugetlb_folio(struct hstate *h, struct folio *folio)
1252 {
1253 	int nid = folio_nid(folio);
1254 
1255 	lockdep_assert_held(&hugetlb_lock);
1256 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1257 
1258 	list_move(&folio->lru, &h->hugepage_freelists[nid]);
1259 	h->free_huge_pages++;
1260 	h->free_huge_pages_node[nid]++;
1261 	folio_set_hugetlb_freed(folio);
1262 }
1263 
dequeue_hugetlb_folio_node_exact(struct hstate * h,int nid)1264 static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
1265 								int nid)
1266 {
1267 	struct folio *folio;
1268 	bool pin = !!(current->flags & PF_MEMALLOC_PIN);
1269 
1270 	lockdep_assert_held(&hugetlb_lock);
1271 	list_for_each_entry(folio, &h->hugepage_freelists[nid], lru) {
1272 		if (pin && !folio_is_longterm_pinnable(folio))
1273 			continue;
1274 
1275 		if (folio_test_hwpoison(folio))
1276 			continue;
1277 
1278 		if (is_migrate_isolate_page(&folio->page))
1279 			continue;
1280 
1281 		list_move(&folio->lru, &h->hugepage_activelist);
1282 		folio_ref_unfreeze(folio, 1);
1283 		folio_clear_hugetlb_freed(folio);
1284 		h->free_huge_pages--;
1285 		h->free_huge_pages_node[nid]--;
1286 		return folio;
1287 	}
1288 
1289 	return NULL;
1290 }
1291 
dequeue_hugetlb_folio_nodemask(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1292 static struct folio *dequeue_hugetlb_folio_nodemask(struct hstate *h, gfp_t gfp_mask,
1293 							int nid, nodemask_t *nmask)
1294 {
1295 	unsigned int cpuset_mems_cookie;
1296 	struct zonelist *zonelist;
1297 	struct zone *zone;
1298 	struct zoneref *z;
1299 	int node = NUMA_NO_NODE;
1300 
1301 	/* 'nid' should not be NUMA_NO_NODE. Try to catch any misuse of it and rectifiy. */
1302 	if (nid == NUMA_NO_NODE)
1303 		nid = numa_node_id();
1304 
1305 	zonelist = node_zonelist(nid, gfp_mask);
1306 
1307 retry_cpuset:
1308 	cpuset_mems_cookie = read_mems_allowed_begin();
1309 	for_each_zone_zonelist_nodemask(zone, z, zonelist, gfp_zone(gfp_mask), nmask) {
1310 		struct folio *folio;
1311 
1312 		if (!cpuset_zone_allowed(zone, gfp_mask))
1313 			continue;
1314 		/*
1315 		 * no need to ask again on the same node. Pool is node rather than
1316 		 * zone aware
1317 		 */
1318 		if (zone_to_nid(zone) == node)
1319 			continue;
1320 		node = zone_to_nid(zone);
1321 
1322 		folio = dequeue_hugetlb_folio_node_exact(h, node);
1323 		if (folio)
1324 			return folio;
1325 	}
1326 	if (unlikely(read_mems_allowed_retry(cpuset_mems_cookie)))
1327 		goto retry_cpuset;
1328 
1329 	return NULL;
1330 }
1331 
available_huge_pages(struct hstate * h)1332 static unsigned long available_huge_pages(struct hstate *h)
1333 {
1334 	return h->free_huge_pages - h->resv_huge_pages;
1335 }
1336 
dequeue_hugetlb_folio_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address,long gbl_chg)1337 static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
1338 				struct vm_area_struct *vma,
1339 				unsigned long address, long gbl_chg)
1340 {
1341 	struct folio *folio = NULL;
1342 	struct mempolicy *mpol;
1343 	gfp_t gfp_mask;
1344 	nodemask_t *nodemask;
1345 	int nid;
1346 
1347 	/*
1348 	 * gbl_chg==1 means the allocation requires a new page that was not
1349 	 * reserved before.  Making sure there's at least one free page.
1350 	 */
1351 	if (gbl_chg && !available_huge_pages(h))
1352 		goto err;
1353 
1354 	gfp_mask = htlb_alloc_mask(h);
1355 	nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
1356 
1357 	if (mpol_is_preferred_many(mpol)) {
1358 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1359 							nid, nodemask);
1360 
1361 		/* Fallback to all nodes if page==NULL */
1362 		nodemask = NULL;
1363 	}
1364 
1365 	if (!folio)
1366 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
1367 							nid, nodemask);
1368 
1369 	mpol_cond_put(mpol);
1370 	return folio;
1371 
1372 err:
1373 	return NULL;
1374 }
1375 
1376 #if defined(CONFIG_ARCH_HAS_GIGANTIC_PAGE) && defined(CONFIG_CONTIG_ALLOC)
alloc_gigantic_frozen_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1377 static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask,
1378 		int nid, nodemask_t *nodemask)
1379 {
1380 	struct folio *folio;
1381 
1382 	folio = hugetlb_cma_alloc_frozen_folio(order, gfp_mask, nid, nodemask);
1383 	if (folio)
1384 		return folio;
1385 
1386 	if (hugetlb_cma_exclusive_alloc())
1387 		return NULL;
1388 
1389 	folio = (struct folio *)alloc_contig_frozen_pages(1 << order, gfp_mask,
1390 							  nid, nodemask);
1391 	return folio;
1392 }
1393 #else /* !CONFIG_ARCH_HAS_GIGANTIC_PAGE || !CONFIG_CONTIG_ALLOC */
alloc_gigantic_frozen_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nodemask)1394 static struct folio *alloc_gigantic_frozen_folio(int order, gfp_t gfp_mask, int nid,
1395 					  nodemask_t *nodemask)
1396 {
1397 	return NULL;
1398 }
1399 #endif
1400 
1401 /*
1402  * Remove hugetlb folio from lists.
1403  * If vmemmap exists for the folio, clear the hugetlb flag so that the
1404  * folio appears as just a compound page.  Otherwise, wait until after
1405  * allocating vmemmap to clear the flag.
1406  *
1407  * Must be called with hugetlb lock held.
1408  */
remove_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus)1409 void remove_hugetlb_folio(struct hstate *h, struct folio *folio,
1410 			  bool adjust_surplus)
1411 {
1412 	int nid = folio_nid(folio);
1413 
1414 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio(folio), folio);
1415 	VM_BUG_ON_FOLIO(hugetlb_cgroup_from_folio_rsvd(folio), folio);
1416 
1417 	lockdep_assert_held(&hugetlb_lock);
1418 	if (hstate_is_gigantic_no_runtime(h))
1419 		return;
1420 
1421 	list_del(&folio->lru);
1422 
1423 	if (folio_test_hugetlb_freed(folio)) {
1424 		folio_clear_hugetlb_freed(folio);
1425 		h->free_huge_pages--;
1426 		h->free_huge_pages_node[nid]--;
1427 	}
1428 	if (adjust_surplus) {
1429 		h->surplus_huge_pages--;
1430 		h->surplus_huge_pages_node[nid]--;
1431 	}
1432 
1433 	/*
1434 	 * We can only clear the hugetlb flag after allocating vmemmap
1435 	 * pages.  Otherwise, someone (memory error handling) may try to write
1436 	 * to tail struct pages.
1437 	 */
1438 	if (!folio_test_hugetlb_vmemmap_optimized(folio))
1439 		__folio_clear_hugetlb(folio);
1440 
1441 	h->nr_huge_pages--;
1442 	h->nr_huge_pages_node[nid]--;
1443 }
1444 
add_hugetlb_folio(struct hstate * h,struct folio * folio,bool adjust_surplus)1445 void add_hugetlb_folio(struct hstate *h, struct folio *folio,
1446 		       bool adjust_surplus)
1447 {
1448 	int nid = folio_nid(folio);
1449 
1450 	VM_BUG_ON_FOLIO(!folio_test_hugetlb_vmemmap_optimized(folio), folio);
1451 
1452 	lockdep_assert_held(&hugetlb_lock);
1453 
1454 	INIT_LIST_HEAD(&folio->lru);
1455 	h->nr_huge_pages++;
1456 	h->nr_huge_pages_node[nid]++;
1457 
1458 	if (adjust_surplus) {
1459 		h->surplus_huge_pages++;
1460 		h->surplus_huge_pages_node[nid]++;
1461 	}
1462 
1463 	__folio_set_hugetlb(folio);
1464 	folio_change_private(folio, NULL);
1465 	/*
1466 	 * We have to set hugetlb_vmemmap_optimized again as above
1467 	 * folio_change_private(folio, NULL) cleared it.
1468 	 */
1469 	folio_set_hugetlb_vmemmap_optimized(folio);
1470 
1471 	arch_clear_hugetlb_flags(folio);
1472 	enqueue_hugetlb_folio(h, folio);
1473 }
1474 
__update_and_free_hugetlb_folio(struct hstate * h,struct folio * folio)1475 static void __update_and_free_hugetlb_folio(struct hstate *h,
1476 						struct folio *folio)
1477 {
1478 	bool clear_flag = folio_test_hugetlb_vmemmap_optimized(folio);
1479 
1480 	if (hstate_is_gigantic_no_runtime(h))
1481 		return;
1482 
1483 	/*
1484 	 * If we don't know which subpages are hwpoisoned, we can't free
1485 	 * the hugepage, so it's leaked intentionally.
1486 	 */
1487 	if (folio_test_hugetlb_raw_hwp_unreliable(folio))
1488 		return;
1489 
1490 	/*
1491 	 * If folio is not vmemmap optimized (!clear_flag), then the folio
1492 	 * is no longer identified as a hugetlb page.  hugetlb_vmemmap_restore_folio
1493 	 * can only be passed hugetlb pages and will BUG otherwise.
1494 	 */
1495 	if (clear_flag && hugetlb_vmemmap_restore_folio(h, folio)) {
1496 		spin_lock_irq(&hugetlb_lock);
1497 		/*
1498 		 * If we cannot allocate vmemmap pages, just refuse to free the
1499 		 * page and put the page back on the hugetlb free list and treat
1500 		 * as a surplus page.
1501 		 */
1502 		add_hugetlb_folio(h, folio, true);
1503 		spin_unlock_irq(&hugetlb_lock);
1504 		return;
1505 	}
1506 
1507 	/*
1508 	 * If vmemmap pages were allocated above, then we need to clear the
1509 	 * hugetlb flag under the hugetlb lock.
1510 	 */
1511 	if (folio_test_hugetlb(folio)) {
1512 		spin_lock_irq(&hugetlb_lock);
1513 		__folio_clear_hugetlb(folio);
1514 		spin_unlock_irq(&hugetlb_lock);
1515 	}
1516 
1517 	/*
1518 	 * Move PageHWPoison flag from head page to the raw error pages,
1519 	 * which makes any healthy subpages reusable.
1520 	 */
1521 	if (unlikely(folio_test_hwpoison(folio)))
1522 		folio_clear_hugetlb_hwpoison(folio);
1523 
1524 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1525 	if (folio_test_hugetlb_cma(folio))
1526 		hugetlb_cma_free_frozen_folio(folio);
1527 	else
1528 		free_frozen_pages(&folio->page, folio_order(folio));
1529 }
1530 
1531 /*
1532  * As update_and_free_hugetlb_folio() can be called under any context, so we cannot
1533  * use GFP_KERNEL to allocate vmemmap pages. However, we can defer the
1534  * actual freeing in a workqueue to prevent from using GFP_ATOMIC to allocate
1535  * the vmemmap pages.
1536  *
1537  * free_hpage_workfn() locklessly retrieves the linked list of pages to be
1538  * freed and frees them one-by-one. As the page->mapping pointer is going
1539  * to be cleared in free_hpage_workfn() anyway, it is reused as the llist_node
1540  * structure of a lockless linked list of huge pages to be freed.
1541  */
1542 static LLIST_HEAD(hpage_freelist);
1543 
free_hpage_workfn(struct work_struct * work)1544 static void free_hpage_workfn(struct work_struct *work)
1545 {
1546 	struct llist_node *node;
1547 
1548 	node = llist_del_all(&hpage_freelist);
1549 
1550 	while (node) {
1551 		struct folio *folio;
1552 		struct hstate *h;
1553 
1554 		folio = container_of((struct address_space **)node,
1555 				     struct folio, mapping);
1556 		node = node->next;
1557 		folio->mapping = NULL;
1558 		/*
1559 		 * The VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio) in
1560 		 * folio_hstate() is going to trigger because a previous call to
1561 		 * remove_hugetlb_folio() will clear the hugetlb bit, so do
1562 		 * not use folio_hstate() directly.
1563 		 */
1564 		h = size_to_hstate(folio_size(folio));
1565 
1566 		__update_and_free_hugetlb_folio(h, folio);
1567 
1568 		cond_resched();
1569 	}
1570 }
1571 static DECLARE_WORK(free_hpage_work, free_hpage_workfn);
1572 
flush_free_hpage_work(struct hstate * h)1573 static inline void flush_free_hpage_work(struct hstate *h)
1574 {
1575 	if (hugetlb_vmemmap_optimizable(h))
1576 		flush_work(&free_hpage_work);
1577 }
1578 
update_and_free_hugetlb_folio(struct hstate * h,struct folio * folio,bool atomic)1579 static void update_and_free_hugetlb_folio(struct hstate *h, struct folio *folio,
1580 				 bool atomic)
1581 {
1582 	if (!folio_test_hugetlb_vmemmap_optimized(folio) || !atomic) {
1583 		__update_and_free_hugetlb_folio(h, folio);
1584 		return;
1585 	}
1586 
1587 	/*
1588 	 * Defer freeing to avoid using GFP_ATOMIC to allocate vmemmap pages.
1589 	 *
1590 	 * Only call schedule_work() if hpage_freelist is previously
1591 	 * empty. Otherwise, schedule_work() had been called but the workfn
1592 	 * hasn't retrieved the list yet.
1593 	 */
1594 	if (llist_add((struct llist_node *)&folio->mapping, &hpage_freelist))
1595 		schedule_work(&free_hpage_work);
1596 }
1597 
bulk_vmemmap_restore_error(struct hstate * h,struct list_head * folio_list,struct list_head * non_hvo_folios)1598 static void bulk_vmemmap_restore_error(struct hstate *h,
1599 					struct list_head *folio_list,
1600 					struct list_head *non_hvo_folios)
1601 {
1602 	struct folio *folio, *t_folio;
1603 
1604 	if (!list_empty(non_hvo_folios)) {
1605 		/*
1606 		 * Free any restored hugetlb pages so that restore of the
1607 		 * entire list can be retried.
1608 		 * The idea is that in the common case of ENOMEM errors freeing
1609 		 * hugetlb pages with vmemmap we will free up memory so that we
1610 		 * can allocate vmemmap for more hugetlb pages.
1611 		 */
1612 		list_for_each_entry_safe(folio, t_folio, non_hvo_folios, lru) {
1613 			list_del(&folio->lru);
1614 			spin_lock_irq(&hugetlb_lock);
1615 			__folio_clear_hugetlb(folio);
1616 			spin_unlock_irq(&hugetlb_lock);
1617 			update_and_free_hugetlb_folio(h, folio, false);
1618 			cond_resched();
1619 		}
1620 	} else {
1621 		/*
1622 		 * In the case where there are no folios which can be
1623 		 * immediately freed, we loop through the list trying to restore
1624 		 * vmemmap individually in the hope that someone elsewhere may
1625 		 * have done something to cause success (such as freeing some
1626 		 * memory).  If unable to restore a hugetlb page, the hugetlb
1627 		 * page is made a surplus page and removed from the list.
1628 		 * If are able to restore vmemmap and free one hugetlb page, we
1629 		 * quit processing the list to retry the bulk operation.
1630 		 */
1631 		list_for_each_entry_safe(folio, t_folio, folio_list, lru)
1632 			if (hugetlb_vmemmap_restore_folio(h, folio)) {
1633 				list_del(&folio->lru);
1634 				spin_lock_irq(&hugetlb_lock);
1635 				add_hugetlb_folio(h, folio, true);
1636 				spin_unlock_irq(&hugetlb_lock);
1637 			} else {
1638 				list_del(&folio->lru);
1639 				spin_lock_irq(&hugetlb_lock);
1640 				__folio_clear_hugetlb(folio);
1641 				spin_unlock_irq(&hugetlb_lock);
1642 				update_and_free_hugetlb_folio(h, folio, false);
1643 				cond_resched();
1644 				break;
1645 			}
1646 	}
1647 }
1648 
update_and_free_pages_bulk(struct hstate * h,struct list_head * folio_list)1649 static void update_and_free_pages_bulk(struct hstate *h,
1650 						struct list_head *folio_list)
1651 {
1652 	long ret;
1653 	struct folio *folio, *t_folio;
1654 	LIST_HEAD(non_hvo_folios);
1655 
1656 	/*
1657 	 * First allocate required vmemmmap (if necessary) for all folios.
1658 	 * Carefully handle errors and free up any available hugetlb pages
1659 	 * in an effort to make forward progress.
1660 	 */
1661 retry:
1662 	ret = hugetlb_vmemmap_restore_folios(h, folio_list, &non_hvo_folios);
1663 	if (ret < 0) {
1664 		bulk_vmemmap_restore_error(h, folio_list, &non_hvo_folios);
1665 		goto retry;
1666 	}
1667 
1668 	/*
1669 	 * At this point, list should be empty, ret should be >= 0 and there
1670 	 * should only be pages on the non_hvo_folios list.
1671 	 * Do note that the non_hvo_folios list could be empty.
1672 	 * Without HVO enabled, ret will be 0 and there is no need to call
1673 	 * __folio_clear_hugetlb as this was done previously.
1674 	 */
1675 	VM_WARN_ON(!list_empty(folio_list));
1676 	VM_WARN_ON(ret < 0);
1677 	if (!list_empty(&non_hvo_folios) && ret) {
1678 		spin_lock_irq(&hugetlb_lock);
1679 		list_for_each_entry(folio, &non_hvo_folios, lru)
1680 			__folio_clear_hugetlb(folio);
1681 		spin_unlock_irq(&hugetlb_lock);
1682 	}
1683 
1684 	list_for_each_entry_safe(folio, t_folio, &non_hvo_folios, lru) {
1685 		update_and_free_hugetlb_folio(h, folio, false);
1686 		cond_resched();
1687 	}
1688 }
1689 
size_to_hstate(unsigned long size)1690 struct hstate *size_to_hstate(unsigned long size)
1691 {
1692 	struct hstate *h;
1693 
1694 	for_each_hstate(h) {
1695 		if (huge_page_size(h) == size)
1696 			return h;
1697 	}
1698 	return NULL;
1699 }
1700 
free_huge_folio(struct folio * folio)1701 void free_huge_folio(struct folio *folio)
1702 {
1703 	/*
1704 	 * Can't pass hstate in here because it is called from the
1705 	 * generic mm code.
1706 	 */
1707 	struct hstate *h = folio_hstate(folio);
1708 	int nid = folio_nid(folio);
1709 	struct hugepage_subpool *spool = hugetlb_folio_subpool(folio);
1710 	bool restore_reserve;
1711 	unsigned long flags;
1712 
1713 	VM_BUG_ON_FOLIO(folio_ref_count(folio), folio);
1714 	VM_BUG_ON_FOLIO(folio_mapcount(folio), folio);
1715 
1716 	hugetlb_set_folio_subpool(folio, NULL);
1717 	if (folio_test_anon(folio))
1718 		__ClearPageAnonExclusive(&folio->page);
1719 	folio->mapping = NULL;
1720 	restore_reserve = folio_test_hugetlb_restore_reserve(folio);
1721 	folio_clear_hugetlb_restore_reserve(folio);
1722 
1723 	/*
1724 	 * If HPageRestoreReserve was set on page, page allocation consumed a
1725 	 * reservation.  If the page was associated with a subpool, there
1726 	 * would have been a page reserved in the subpool before allocation
1727 	 * via hugepage_subpool_get_pages().  Since we are 'restoring' the
1728 	 * reservation, do not call hugepage_subpool_put_pages() as this will
1729 	 * remove the reserved page from the subpool.
1730 	 */
1731 	if (!restore_reserve) {
1732 		/*
1733 		 * A return code of zero implies that the subpool will be
1734 		 * under its minimum size if the reservation is not restored
1735 		 * after page is free.  Therefore, force restore_reserve
1736 		 * operation.
1737 		 */
1738 		if (hugepage_subpool_put_pages(spool, 1) == 0)
1739 			restore_reserve = true;
1740 	}
1741 
1742 	spin_lock_irqsave(&hugetlb_lock, flags);
1743 	folio_clear_hugetlb_migratable(folio);
1744 	hugetlb_cgroup_uncharge_folio(hstate_index(h),
1745 				     pages_per_huge_page(h), folio);
1746 	hugetlb_cgroup_uncharge_folio_rsvd(hstate_index(h),
1747 					  pages_per_huge_page(h), folio);
1748 	lruvec_stat_mod_folio(folio, NR_HUGETLB, -pages_per_huge_page(h));
1749 	mem_cgroup_uncharge(folio);
1750 	if (restore_reserve)
1751 		h->resv_huge_pages++;
1752 
1753 	if (folio_test_hugetlb_temporary(folio)) {
1754 		remove_hugetlb_folio(h, folio, false);
1755 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1756 		update_and_free_hugetlb_folio(h, folio, true);
1757 	} else if (h->surplus_huge_pages_node[nid]) {
1758 		/* remove the page from active list */
1759 		remove_hugetlb_folio(h, folio, true);
1760 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1761 		update_and_free_hugetlb_folio(h, folio, true);
1762 	} else {
1763 		arch_clear_hugetlb_flags(folio);
1764 		enqueue_hugetlb_folio(h, folio);
1765 		spin_unlock_irqrestore(&hugetlb_lock, flags);
1766 	}
1767 }
1768 
1769 /*
1770  * Must be called with the hugetlb lock held
1771  */
account_new_hugetlb_folio(struct hstate * h,struct folio * folio)1772 static void account_new_hugetlb_folio(struct hstate *h, struct folio *folio)
1773 {
1774 	lockdep_assert_held(&hugetlb_lock);
1775 	h->nr_huge_pages++;
1776 	h->nr_huge_pages_node[folio_nid(folio)]++;
1777 }
1778 
init_new_hugetlb_folio(struct folio * folio)1779 void init_new_hugetlb_folio(struct folio *folio)
1780 {
1781 	__folio_set_hugetlb(folio);
1782 	INIT_LIST_HEAD(&folio->lru);
1783 	hugetlb_set_folio_subpool(folio, NULL);
1784 	set_hugetlb_cgroup(folio, NULL);
1785 	set_hugetlb_cgroup_rsvd(folio, NULL);
1786 }
1787 
1788 /*
1789  * Find and lock address space (mapping) in write mode.
1790  *
1791  * Upon entry, the folio is locked which means that folio_mapping() is
1792  * stable.  Due to locking order, we can only trylock_write.  If we can
1793  * not get the lock, simply return NULL to caller.
1794  */
hugetlb_folio_mapping_lock_write(struct folio * folio)1795 struct address_space *hugetlb_folio_mapping_lock_write(struct folio *folio)
1796 {
1797 	struct address_space *mapping = folio_mapping(folio);
1798 
1799 	if (!mapping)
1800 		return mapping;
1801 
1802 	if (i_mmap_trylock_write(mapping))
1803 		return mapping;
1804 
1805 	return NULL;
1806 }
1807 
alloc_buddy_frozen_folio(int order,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1808 static struct folio *alloc_buddy_frozen_folio(int order, gfp_t gfp_mask,
1809 		int nid, nodemask_t *nmask, nodemask_t *node_alloc_noretry)
1810 {
1811 	struct folio *folio;
1812 	bool alloc_try_hard = true;
1813 
1814 	/*
1815 	 * By default we always try hard to allocate the folio with
1816 	 * __GFP_RETRY_MAYFAIL flag.  However, if we are allocating folios in
1817 	 * a loop (to adjust global huge page counts) and previous allocation
1818 	 * failed, do not continue to try hard on the same node.  Use the
1819 	 * node_alloc_noretry bitmap to manage this state information.
1820 	 */
1821 	if (node_alloc_noretry && node_isset(nid, *node_alloc_noretry))
1822 		alloc_try_hard = false;
1823 	if (alloc_try_hard)
1824 		gfp_mask |= __GFP_RETRY_MAYFAIL;
1825 
1826 	folio = (struct folio *)__alloc_frozen_pages(gfp_mask, order, nid, nmask);
1827 
1828 	/*
1829 	 * If we did not specify __GFP_RETRY_MAYFAIL, but still got a
1830 	 * folio this indicates an overall state change.  Clear bit so
1831 	 * that we resume normal 'try hard' allocations.
1832 	 */
1833 	if (node_alloc_noretry && folio && !alloc_try_hard)
1834 		node_clear(nid, *node_alloc_noretry);
1835 
1836 	/*
1837 	 * If we tried hard to get a folio but failed, set bit so that
1838 	 * subsequent attempts will not try as hard until there is an
1839 	 * overall state change.
1840 	 */
1841 	if (node_alloc_noretry && !folio && alloc_try_hard)
1842 		node_set(nid, *node_alloc_noretry);
1843 
1844 	if (!folio) {
1845 		__count_vm_event(HTLB_BUDDY_PGALLOC_FAIL);
1846 		return NULL;
1847 	}
1848 
1849 	__count_vm_event(HTLB_BUDDY_PGALLOC);
1850 	return folio;
1851 }
1852 
only_alloc_fresh_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask,nodemask_t * node_alloc_noretry)1853 static struct folio *only_alloc_fresh_hugetlb_folio(struct hstate *h,
1854 		gfp_t gfp_mask, int nid, nodemask_t *nmask,
1855 		nodemask_t *node_alloc_noretry)
1856 {
1857 	struct folio *folio;
1858 	int order = huge_page_order(h);
1859 
1860 	if (nid == NUMA_NO_NODE)
1861 		nid = numa_mem_id();
1862 
1863 	if (order_is_gigantic(order))
1864 		folio = alloc_gigantic_frozen_folio(order, gfp_mask, nid, nmask);
1865 	else
1866 		folio = alloc_buddy_frozen_folio(order, gfp_mask, nid, nmask,
1867 						 node_alloc_noretry);
1868 	if (folio)
1869 		init_new_hugetlb_folio(folio);
1870 	return folio;
1871 }
1872 
1873 /*
1874  * Common helper to allocate a fresh hugetlb folio. All specific allocators
1875  * should use this function to get new hugetlb folio
1876  *
1877  * Note that returned folio is 'frozen':  ref count of head page and all tail
1878  * pages is zero, and the accounting must be done in the caller.
1879  */
alloc_fresh_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)1880 static struct folio *alloc_fresh_hugetlb_folio(struct hstate *h,
1881 		gfp_t gfp_mask, int nid, nodemask_t *nmask)
1882 {
1883 	struct folio *folio;
1884 
1885 	folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask, NULL);
1886 	if (folio)
1887 		hugetlb_vmemmap_optimize_folio(h, folio);
1888 	return folio;
1889 }
1890 
prep_and_add_allocated_folios(struct hstate * h,struct list_head * folio_list)1891 void prep_and_add_allocated_folios(struct hstate *h,
1892 				   struct list_head *folio_list)
1893 {
1894 	unsigned long flags;
1895 	struct folio *folio, *tmp_f;
1896 
1897 	/* Send list for bulk vmemmap optimization processing */
1898 	hugetlb_vmemmap_optimize_folios(h, folio_list);
1899 
1900 	/* Add all new pool pages to free lists in one lock cycle */
1901 	spin_lock_irqsave(&hugetlb_lock, flags);
1902 	list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
1903 		account_new_hugetlb_folio(h, folio);
1904 		enqueue_hugetlb_folio(h, folio);
1905 	}
1906 	spin_unlock_irqrestore(&hugetlb_lock, flags);
1907 }
1908 
1909 /*
1910  * Allocates a fresh hugetlb page in a node interleaved manner.  The page
1911  * will later be added to the appropriate hugetlb pool.
1912  */
alloc_pool_huge_folio(struct hstate * h,nodemask_t * nodes_allowed,nodemask_t * node_alloc_noretry,int * next_node)1913 static struct folio *alloc_pool_huge_folio(struct hstate *h,
1914 					nodemask_t *nodes_allowed,
1915 					nodemask_t *node_alloc_noretry,
1916 					int *next_node)
1917 {
1918 	gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
1919 	int nr_nodes, node;
1920 
1921 	for_each_node_mask_to_alloc(next_node, nr_nodes, node, nodes_allowed) {
1922 		struct folio *folio;
1923 
1924 		folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, node,
1925 					nodes_allowed, node_alloc_noretry);
1926 		if (folio)
1927 			return folio;
1928 	}
1929 
1930 	return NULL;
1931 }
1932 
1933 /*
1934  * Remove huge page from pool from next node to free.  Attempt to keep
1935  * persistent huge pages more or less balanced over allowed nodes.
1936  * This routine only 'removes' the hugetlb page.  The caller must make
1937  * an additional call to free the page to low level allocators.
1938  * Called with hugetlb_lock locked.
1939  */
remove_pool_hugetlb_folio(struct hstate * h,nodemask_t * nodes_allowed,bool acct_surplus)1940 static struct folio *remove_pool_hugetlb_folio(struct hstate *h,
1941 		nodemask_t *nodes_allowed, bool acct_surplus)
1942 {
1943 	int nr_nodes, node;
1944 	struct folio *folio = NULL;
1945 
1946 	lockdep_assert_held(&hugetlb_lock);
1947 	for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
1948 		/*
1949 		 * If we're returning unused surplus pages, only examine
1950 		 * nodes with surplus pages.
1951 		 */
1952 		if ((!acct_surplus || h->surplus_huge_pages_node[node]) &&
1953 		    !list_empty(&h->hugepage_freelists[node])) {
1954 			folio = list_entry(h->hugepage_freelists[node].next,
1955 					  struct folio, lru);
1956 			remove_hugetlb_folio(h, folio, acct_surplus);
1957 			break;
1958 		}
1959 	}
1960 
1961 	return folio;
1962 }
1963 
1964 /*
1965  * Dissolve a given free hugetlb folio into free buddy pages. This function
1966  * does nothing for in-use hugetlb folios and non-hugetlb folios.
1967  * This function returns values like below:
1968  *
1969  *  -ENOMEM: failed to allocate vmemmap pages to free the freed hugepages
1970  *           when the system is under memory pressure and the feature of
1971  *           freeing unused vmemmap pages associated with each hugetlb page
1972  *           is enabled.
1973  *  -EBUSY:  failed to dissolved free hugepages or the hugepage is in-use
1974  *           (allocated or reserved.)
1975  *       0:  successfully dissolved free hugepages or the page is not a
1976  *           hugepage (considered as already dissolved)
1977  */
dissolve_free_hugetlb_folio(struct folio * folio)1978 int dissolve_free_hugetlb_folio(struct folio *folio)
1979 {
1980 	int rc = -EBUSY;
1981 
1982 retry:
1983 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
1984 	if (!folio_test_hugetlb(folio))
1985 		return 0;
1986 
1987 	spin_lock_irq(&hugetlb_lock);
1988 	if (!folio_test_hugetlb(folio)) {
1989 		rc = 0;
1990 		goto out;
1991 	}
1992 
1993 	if (!folio_ref_count(folio)) {
1994 		struct hstate *h = folio_hstate(folio);
1995 		bool adjust_surplus = false;
1996 
1997 		if (!available_huge_pages(h))
1998 			goto out;
1999 
2000 		/*
2001 		 * We should make sure that the page is already on the free list
2002 		 * when it is dissolved.
2003 		 */
2004 		if (unlikely(!folio_test_hugetlb_freed(folio))) {
2005 			spin_unlock_irq(&hugetlb_lock);
2006 			cond_resched();
2007 
2008 			/*
2009 			 * Theoretically, we should return -EBUSY when we
2010 			 * encounter this race. In fact, we have a chance
2011 			 * to successfully dissolve the page if we do a
2012 			 * retry. Because the race window is quite small.
2013 			 * If we seize this opportunity, it is an optimization
2014 			 * for increasing the success rate of dissolving page.
2015 			 */
2016 			goto retry;
2017 		}
2018 
2019 		if (h->surplus_huge_pages_node[folio_nid(folio)])
2020 			adjust_surplus = true;
2021 		remove_hugetlb_folio(h, folio, adjust_surplus);
2022 		h->max_huge_pages--;
2023 		spin_unlock_irq(&hugetlb_lock);
2024 
2025 		/*
2026 		 * Normally update_and_free_hugtlb_folio will allocate required vmemmmap
2027 		 * before freeing the page.  update_and_free_hugtlb_folio will fail to
2028 		 * free the page if it can not allocate required vmemmap.  We
2029 		 * need to adjust max_huge_pages if the page is not freed.
2030 		 * Attempt to allocate vmemmmap here so that we can take
2031 		 * appropriate action on failure.
2032 		 *
2033 		 * The folio_test_hugetlb check here is because
2034 		 * remove_hugetlb_folio will clear hugetlb folio flag for
2035 		 * non-vmemmap optimized hugetlb folios.
2036 		 */
2037 		if (folio_test_hugetlb(folio)) {
2038 			rc = hugetlb_vmemmap_restore_folio(h, folio);
2039 			if (rc) {
2040 				spin_lock_irq(&hugetlb_lock);
2041 				add_hugetlb_folio(h, folio, adjust_surplus);
2042 				h->max_huge_pages++;
2043 				goto out;
2044 			}
2045 		} else {
2046 			rc = 0;
2047 		}
2048 
2049 		update_and_free_hugetlb_folio(h, folio, false);
2050 		return rc;
2051 	}
2052 out:
2053 	spin_unlock_irq(&hugetlb_lock);
2054 	return rc;
2055 }
2056 
2057 /*
2058  * Dissolve free hugepages in a given pfn range. Used by memory hotplug to
2059  * make specified memory blocks removable from the system.
2060  * Note that this will dissolve a free gigantic hugepage completely, if any
2061  * part of it lies within the given range.
2062  * Also note that if dissolve_free_hugetlb_folio() returns with an error, all
2063  * free hugetlb folios that were dissolved before that error are lost.
2064  */
dissolve_free_hugetlb_folios(unsigned long start_pfn,unsigned long end_pfn)2065 int dissolve_free_hugetlb_folios(unsigned long start_pfn, unsigned long end_pfn)
2066 {
2067 	unsigned long pfn;
2068 	struct folio *folio;
2069 	int rc = 0;
2070 	unsigned int order;
2071 	struct hstate *h;
2072 
2073 	if (!hugepages_supported())
2074 		return rc;
2075 
2076 	order = huge_page_order(&default_hstate);
2077 	for_each_hstate(h)
2078 		order = min(order, huge_page_order(h));
2079 
2080 	for (pfn = start_pfn; pfn < end_pfn; pfn += 1 << order) {
2081 		folio = pfn_folio(pfn);
2082 		rc = dissolve_free_hugetlb_folio(folio);
2083 		if (rc)
2084 			break;
2085 	}
2086 
2087 	return rc;
2088 }
2089 
2090 /*
2091  * Allocates a fresh surplus page from the page allocator.
2092  */
alloc_surplus_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)2093 static struct folio *alloc_surplus_hugetlb_folio(struct hstate *h,
2094 				gfp_t gfp_mask,	int nid, nodemask_t *nmask)
2095 {
2096 	struct folio *folio = NULL;
2097 
2098 	if (hstate_is_gigantic_no_runtime(h))
2099 		return NULL;
2100 
2101 	spin_lock_irq(&hugetlb_lock);
2102 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages)
2103 		goto out_unlock;
2104 	spin_unlock_irq(&hugetlb_lock);
2105 
2106 	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2107 	if (!folio)
2108 		return NULL;
2109 
2110 	spin_lock_irq(&hugetlb_lock);
2111 	/*
2112 	 * nr_huge_pages needs to be adjusted within the same lock cycle
2113 	 * as surplus_pages, otherwise it might confuse
2114 	 * persistent_huge_pages() momentarily.
2115 	 */
2116 	account_new_hugetlb_folio(h, folio);
2117 
2118 	/*
2119 	 * We could have raced with the pool size change.
2120 	 * Double check that and simply deallocate the new page
2121 	 * if we would end up overcommiting the surpluses. Abuse
2122 	 * temporary page to workaround the nasty free_huge_folio
2123 	 * codeflow
2124 	 */
2125 	if (h->surplus_huge_pages >= h->nr_overcommit_huge_pages) {
2126 		folio_set_hugetlb_temporary(folio);
2127 		spin_unlock_irq(&hugetlb_lock);
2128 		free_huge_folio(folio);
2129 		return NULL;
2130 	}
2131 
2132 	h->surplus_huge_pages++;
2133 	h->surplus_huge_pages_node[folio_nid(folio)]++;
2134 
2135 out_unlock:
2136 	spin_unlock_irq(&hugetlb_lock);
2137 
2138 	return folio;
2139 }
2140 
alloc_migrate_hugetlb_folio(struct hstate * h,gfp_t gfp_mask,int nid,nodemask_t * nmask)2141 static struct folio *alloc_migrate_hugetlb_folio(struct hstate *h, gfp_t gfp_mask,
2142 				     int nid, nodemask_t *nmask)
2143 {
2144 	struct folio *folio;
2145 
2146 	if (hstate_is_gigantic(h))
2147 		return NULL;
2148 
2149 	folio = alloc_fresh_hugetlb_folio(h, gfp_mask, nid, nmask);
2150 	if (!folio)
2151 		return NULL;
2152 
2153 	spin_lock_irq(&hugetlb_lock);
2154 	account_new_hugetlb_folio(h, folio);
2155 	spin_unlock_irq(&hugetlb_lock);
2156 
2157 	/* fresh huge pages are frozen */
2158 	folio_ref_unfreeze(folio, 1);
2159 	/*
2160 	 * We do not account these pages as surplus because they are only
2161 	 * temporary and will be released properly on the last reference
2162 	 */
2163 	folio_set_hugetlb_temporary(folio);
2164 
2165 	return folio;
2166 }
2167 
2168 /*
2169  * Use the VMA's mpolicy to allocate a huge page from the buddy.
2170  */
2171 static
alloc_buddy_hugetlb_folio_with_mpol(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2172 struct folio *alloc_buddy_hugetlb_folio_with_mpol(struct hstate *h,
2173 		struct vm_area_struct *vma, unsigned long addr)
2174 {
2175 	struct folio *folio = NULL;
2176 	struct mempolicy *mpol;
2177 	gfp_t gfp_mask = htlb_alloc_mask(h);
2178 	int nid;
2179 	nodemask_t *nodemask;
2180 
2181 	nid = huge_node(vma, addr, gfp_mask, &mpol, &nodemask);
2182 	if (mpol_is_preferred_many(mpol)) {
2183 		gfp_t gfp = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_NOFAIL);
2184 
2185 		folio = alloc_surplus_hugetlb_folio(h, gfp, nid, nodemask);
2186 
2187 		/* Fallback to all nodes if page==NULL */
2188 		nodemask = NULL;
2189 	}
2190 
2191 	if (!folio)
2192 		folio = alloc_surplus_hugetlb_folio(h, gfp_mask, nid, nodemask);
2193 	mpol_cond_put(mpol);
2194 	return folio;
2195 }
2196 
alloc_hugetlb_folio_reserve(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask)2197 struct folio *alloc_hugetlb_folio_reserve(struct hstate *h, int preferred_nid,
2198 		nodemask_t *nmask, gfp_t gfp_mask)
2199 {
2200 	struct folio *folio;
2201 
2202 	spin_lock_irq(&hugetlb_lock);
2203 	if (!h->resv_huge_pages) {
2204 		spin_unlock_irq(&hugetlb_lock);
2205 		return NULL;
2206 	}
2207 
2208 	folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask, preferred_nid,
2209 					       nmask);
2210 	if (folio)
2211 		h->resv_huge_pages--;
2212 
2213 	spin_unlock_irq(&hugetlb_lock);
2214 	return folio;
2215 }
2216 
2217 /* folio migration callback function */
alloc_hugetlb_folio_nodemask(struct hstate * h,int preferred_nid,nodemask_t * nmask,gfp_t gfp_mask,bool allow_alloc_fallback)2218 struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
2219 		nodemask_t *nmask, gfp_t gfp_mask, bool allow_alloc_fallback)
2220 {
2221 	spin_lock_irq(&hugetlb_lock);
2222 	if (available_huge_pages(h)) {
2223 		struct folio *folio;
2224 
2225 		folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
2226 						preferred_nid, nmask);
2227 		if (folio) {
2228 			spin_unlock_irq(&hugetlb_lock);
2229 			return folio;
2230 		}
2231 	}
2232 	spin_unlock_irq(&hugetlb_lock);
2233 
2234 	/* We cannot fallback to other nodes, as we could break the per-node pool. */
2235 	if (!allow_alloc_fallback)
2236 		gfp_mask |= __GFP_THISNODE;
2237 
2238 	return alloc_migrate_hugetlb_folio(h, gfp_mask, preferred_nid, nmask);
2239 }
2240 
policy_mbind_nodemask(gfp_t gfp)2241 static nodemask_t *policy_mbind_nodemask(gfp_t gfp)
2242 {
2243 #ifdef CONFIG_NUMA
2244 	struct mempolicy *mpol = get_task_policy(current);
2245 
2246 	/*
2247 	 * Only enforce MPOL_BIND policy which overlaps with cpuset policy
2248 	 * (from policy_nodemask) specifically for hugetlb case
2249 	 */
2250 	if (mpol->mode == MPOL_BIND &&
2251 		(apply_policy_zone(mpol, gfp_zone(gfp)) &&
2252 		 cpuset_nodemask_valid_mems_allowed(&mpol->nodes)))
2253 		return &mpol->nodes;
2254 #endif
2255 	return NULL;
2256 }
2257 
2258 /*
2259  * Increase the hugetlb pool such that it can accommodate a reservation
2260  * of size 'delta'.
2261  */
gather_surplus_pages(struct hstate * h,long delta)2262 static int gather_surplus_pages(struct hstate *h, long delta)
2263 	__must_hold(&hugetlb_lock)
2264 {
2265 	LIST_HEAD(surplus_list);
2266 	struct folio *folio, *tmp;
2267 	int ret;
2268 	long i;
2269 	long needed, allocated;
2270 	bool alloc_ok = true;
2271 	nodemask_t *mbind_nodemask, alloc_nodemask;
2272 
2273 	mbind_nodemask = policy_mbind_nodemask(htlb_alloc_mask(h));
2274 	if (mbind_nodemask)
2275 		nodes_and(alloc_nodemask, *mbind_nodemask, cpuset_current_mems_allowed);
2276 	else
2277 		alloc_nodemask = cpuset_current_mems_allowed;
2278 
2279 	lockdep_assert_held(&hugetlb_lock);
2280 	needed = (h->resv_huge_pages + delta) - h->free_huge_pages;
2281 	if (needed <= 0) {
2282 		h->resv_huge_pages += delta;
2283 		return 0;
2284 	}
2285 
2286 	allocated = 0;
2287 
2288 	ret = -ENOMEM;
2289 retry:
2290 	spin_unlock_irq(&hugetlb_lock);
2291 	for (i = 0; i < needed; i++) {
2292 		folio = NULL;
2293 
2294 		/*
2295 		 * It is okay to use NUMA_NO_NODE because we use numa_mem_id()
2296 		 * down the road to pick the current node if that is the case.
2297 		 */
2298 		folio = alloc_surplus_hugetlb_folio(h, htlb_alloc_mask(h),
2299 						    NUMA_NO_NODE, &alloc_nodemask);
2300 		if (!folio) {
2301 			alloc_ok = false;
2302 			break;
2303 		}
2304 		list_add(&folio->lru, &surplus_list);
2305 		cond_resched();
2306 	}
2307 	allocated += i;
2308 
2309 	/*
2310 	 * After retaking hugetlb_lock, we need to recalculate 'needed'
2311 	 * because either resv_huge_pages or free_huge_pages may have changed.
2312 	 */
2313 	spin_lock_irq(&hugetlb_lock);
2314 	needed = (h->resv_huge_pages + delta) -
2315 			(h->free_huge_pages + allocated);
2316 	if (needed > 0) {
2317 		if (alloc_ok)
2318 			goto retry;
2319 		/*
2320 		 * We were not able to allocate enough pages to
2321 		 * satisfy the entire reservation so we free what
2322 		 * we've allocated so far.
2323 		 */
2324 		goto free;
2325 	}
2326 	/*
2327 	 * The surplus_list now contains _at_least_ the number of extra pages
2328 	 * needed to accommodate the reservation.  Add the appropriate number
2329 	 * of pages to the hugetlb pool and free the extras back to the buddy
2330 	 * allocator.  Commit the entire reservation here to prevent another
2331 	 * process from stealing the pages as they are added to the pool but
2332 	 * before they are reserved.
2333 	 */
2334 	needed += allocated;
2335 	h->resv_huge_pages += delta;
2336 	ret = 0;
2337 
2338 	/* Free the needed pages to the hugetlb pool */
2339 	list_for_each_entry_safe(folio, tmp, &surplus_list, lru) {
2340 		if ((--needed) < 0)
2341 			break;
2342 		/* Add the page to the hugetlb allocator */
2343 		enqueue_hugetlb_folio(h, folio);
2344 	}
2345 free:
2346 	spin_unlock_irq(&hugetlb_lock);
2347 
2348 	/*
2349 	 * Free unnecessary surplus pages to the buddy allocator.
2350 	 * Pages have no ref count, call free_huge_folio directly.
2351 	 */
2352 	list_for_each_entry_safe(folio, tmp, &surplus_list, lru)
2353 		free_huge_folio(folio);
2354 	spin_lock_irq(&hugetlb_lock);
2355 
2356 	return ret;
2357 }
2358 
2359 /*
2360  * This routine has two main purposes:
2361  * 1) Decrement the reservation count (resv_huge_pages) by the value passed
2362  *    in unused_resv_pages.  This corresponds to the prior adjustments made
2363  *    to the associated reservation map.
2364  * 2) Free any unused surplus pages that may have been allocated to satisfy
2365  *    the reservation.  As many as unused_resv_pages may be freed.
2366  */
return_unused_surplus_pages(struct hstate * h,unsigned long unused_resv_pages)2367 static void return_unused_surplus_pages(struct hstate *h,
2368 					unsigned long unused_resv_pages)
2369 {
2370 	unsigned long nr_pages;
2371 	LIST_HEAD(page_list);
2372 
2373 	lockdep_assert_held(&hugetlb_lock);
2374 	/* Uncommit the reservation */
2375 	h->resv_huge_pages -= unused_resv_pages;
2376 
2377 	if (hstate_is_gigantic_no_runtime(h))
2378 		goto out;
2379 
2380 	/*
2381 	 * Part (or even all) of the reservation could have been backed
2382 	 * by pre-allocated pages. Only free surplus pages.
2383 	 */
2384 	nr_pages = min(unused_resv_pages, h->surplus_huge_pages);
2385 
2386 	/*
2387 	 * We want to release as many surplus pages as possible, spread
2388 	 * evenly across all nodes with memory. Iterate across these nodes
2389 	 * until we can no longer free unreserved surplus pages. This occurs
2390 	 * when the nodes with surplus pages have no free pages.
2391 	 * remove_pool_hugetlb_folio() will balance the freed pages across the
2392 	 * on-line nodes with memory and will handle the hstate accounting.
2393 	 */
2394 	while (nr_pages--) {
2395 		struct folio *folio;
2396 
2397 		folio = remove_pool_hugetlb_folio(h, &node_states[N_MEMORY], 1);
2398 		if (!folio)
2399 			goto out;
2400 
2401 		list_add(&folio->lru, &page_list);
2402 	}
2403 
2404 out:
2405 	spin_unlock_irq(&hugetlb_lock);
2406 	update_and_free_pages_bulk(h, &page_list);
2407 	spin_lock_irq(&hugetlb_lock);
2408 }
2409 
2410 
2411 /*
2412  * vma_needs_reservation, vma_commit_reservation and vma_end_reservation
2413  * are used by the huge page allocation routines to manage reservations.
2414  *
2415  * vma_needs_reservation is called to determine if the huge page at addr
2416  * within the vma has an associated reservation.  If a reservation is
2417  * needed, the value 1 is returned.  The caller is then responsible for
2418  * managing the global reservation and subpool usage counts.  After
2419  * the huge page has been allocated, vma_commit_reservation is called
2420  * to add the page to the reservation map.  If the page allocation fails,
2421  * the reservation must be ended instead of committed.  vma_end_reservation
2422  * is called in such cases.
2423  *
2424  * In the normal case, vma_commit_reservation returns the same value
2425  * as the preceding vma_needs_reservation call.  The only time this
2426  * is not the case is if a reserve map was changed between calls.  It
2427  * is the responsibility of the caller to notice the difference and
2428  * take appropriate action.
2429  *
2430  * vma_add_reservation is used in error paths where a reservation must
2431  * be restored when a newly allocated huge page must be freed.  It is
2432  * to be called after calling vma_needs_reservation to determine if a
2433  * reservation exists.
2434  *
2435  * vma_del_reservation is used in error paths where an entry in the reserve
2436  * map was created during huge page allocation and must be removed.  It is to
2437  * be called after calling vma_needs_reservation to determine if a reservation
2438  * exists.
2439  */
2440 enum vma_resv_mode {
2441 	VMA_NEEDS_RESV,
2442 	VMA_COMMIT_RESV,
2443 	VMA_END_RESV,
2444 	VMA_ADD_RESV,
2445 	VMA_DEL_RESV,
2446 };
__vma_reservation_common(struct hstate * h,struct vm_area_struct * vma,unsigned long addr,enum vma_resv_mode mode)2447 static long __vma_reservation_common(struct hstate *h,
2448 				struct vm_area_struct *vma, unsigned long addr,
2449 				enum vma_resv_mode mode)
2450 {
2451 	struct resv_map *resv;
2452 	pgoff_t idx;
2453 	long ret;
2454 	long dummy_out_regions_needed;
2455 
2456 	resv = vma_resv_map(vma);
2457 	if (!resv)
2458 		return 1;
2459 
2460 	idx = vma_hugecache_offset(h, vma, addr);
2461 	switch (mode) {
2462 	case VMA_NEEDS_RESV:
2463 		ret = region_chg(resv, idx, idx + 1, &dummy_out_regions_needed);
2464 		/* We assume that vma_reservation_* routines always operate on
2465 		 * 1 page, and that adding to resv map a 1 page entry can only
2466 		 * ever require 1 region.
2467 		 */
2468 		VM_BUG_ON(dummy_out_regions_needed != 1);
2469 		break;
2470 	case VMA_COMMIT_RESV:
2471 		ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2472 		/* region_add calls of range 1 should never fail. */
2473 		VM_BUG_ON(ret < 0);
2474 		break;
2475 	case VMA_END_RESV:
2476 		region_abort(resv, idx, idx + 1, 1);
2477 		ret = 0;
2478 		break;
2479 	case VMA_ADD_RESV:
2480 		if (vma->vm_flags & VM_MAYSHARE) {
2481 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2482 			/* region_add calls of range 1 should never fail. */
2483 			VM_BUG_ON(ret < 0);
2484 		} else {
2485 			region_abort(resv, idx, idx + 1, 1);
2486 			ret = region_del(resv, idx, idx + 1);
2487 		}
2488 		break;
2489 	case VMA_DEL_RESV:
2490 		if (vma->vm_flags & VM_MAYSHARE) {
2491 			region_abort(resv, idx, idx + 1, 1);
2492 			ret = region_del(resv, idx, idx + 1);
2493 		} else {
2494 			ret = region_add(resv, idx, idx + 1, 1, NULL, NULL);
2495 			/* region_add calls of range 1 should never fail. */
2496 			VM_BUG_ON(ret < 0);
2497 		}
2498 		break;
2499 	default:
2500 		BUG();
2501 	}
2502 
2503 	if (vma->vm_flags & VM_MAYSHARE || mode == VMA_DEL_RESV)
2504 		return ret;
2505 	/*
2506 	 * We know private mapping must have HPAGE_RESV_OWNER set.
2507 	 *
2508 	 * In most cases, reserves always exist for private mappings.
2509 	 * However, a file associated with mapping could have been
2510 	 * hole punched or truncated after reserves were consumed.
2511 	 * As subsequent fault on such a range will not use reserves.
2512 	 * Subtle - The reserve map for private mappings has the
2513 	 * opposite meaning than that of shared mappings.  If NO
2514 	 * entry is in the reserve map, it means a reservation exists.
2515 	 * If an entry exists in the reserve map, it means the
2516 	 * reservation has already been consumed.  As a result, the
2517 	 * return value of this routine is the opposite of the
2518 	 * value returned from reserve map manipulation routines above.
2519 	 */
2520 	if (ret > 0)
2521 		return 0;
2522 	if (ret == 0)
2523 		return 1;
2524 	return ret;
2525 }
2526 
vma_needs_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2527 static long vma_needs_reservation(struct hstate *h,
2528 			struct vm_area_struct *vma, unsigned long addr)
2529 {
2530 	return __vma_reservation_common(h, vma, addr, VMA_NEEDS_RESV);
2531 }
2532 
vma_commit_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2533 static long vma_commit_reservation(struct hstate *h,
2534 			struct vm_area_struct *vma, unsigned long addr)
2535 {
2536 	return __vma_reservation_common(h, vma, addr, VMA_COMMIT_RESV);
2537 }
2538 
vma_end_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2539 static void vma_end_reservation(struct hstate *h,
2540 			struct vm_area_struct *vma, unsigned long addr)
2541 {
2542 	(void)__vma_reservation_common(h, vma, addr, VMA_END_RESV);
2543 }
2544 
vma_add_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2545 static long vma_add_reservation(struct hstate *h,
2546 			struct vm_area_struct *vma, unsigned long addr)
2547 {
2548 	return __vma_reservation_common(h, vma, addr, VMA_ADD_RESV);
2549 }
2550 
vma_del_reservation(struct hstate * h,struct vm_area_struct * vma,unsigned long addr)2551 static long vma_del_reservation(struct hstate *h,
2552 			struct vm_area_struct *vma, unsigned long addr)
2553 {
2554 	return __vma_reservation_common(h, vma, addr, VMA_DEL_RESV);
2555 }
2556 
2557 /*
2558  * This routine is called to restore reservation information on error paths.
2559  * It should ONLY be called for folios allocated via alloc_hugetlb_folio(),
2560  * and the hugetlb mutex should remain held when calling this routine.
2561  *
2562  * It handles two specific cases:
2563  * 1) A reservation was in place and the folio consumed the reservation.
2564  *    hugetlb_restore_reserve is set in the folio.
2565  * 2) No reservation was in place for the page, so hugetlb_restore_reserve is
2566  *    not set.  However, alloc_hugetlb_folio always updates the reserve map.
2567  *
2568  * In case 1, free_huge_folio later in the error path will increment the
2569  * global reserve count.  But, free_huge_folio does not have enough context
2570  * to adjust the reservation map.  This case deals primarily with private
2571  * mappings.  Adjust the reserve map here to be consistent with global
2572  * reserve count adjustments to be made by free_huge_folio.  Make sure the
2573  * reserve map indicates there is a reservation present.
2574  *
2575  * In case 2, simply undo reserve map modifications done by alloc_hugetlb_folio.
2576  */
restore_reserve_on_error(struct hstate * h,struct vm_area_struct * vma,unsigned long address,struct folio * folio)2577 void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
2578 			unsigned long address, struct folio *folio)
2579 {
2580 	long rc = vma_needs_reservation(h, vma, address);
2581 
2582 	if (folio_test_hugetlb_restore_reserve(folio)) {
2583 		if (unlikely(rc < 0))
2584 			/*
2585 			 * Rare out of memory condition in reserve map
2586 			 * manipulation.  Clear hugetlb_restore_reserve so
2587 			 * that global reserve count will not be incremented
2588 			 * by free_huge_folio.  This will make it appear
2589 			 * as though the reservation for this folio was
2590 			 * consumed.  This may prevent the task from
2591 			 * faulting in the folio at a later time.  This
2592 			 * is better than inconsistent global huge page
2593 			 * accounting of reserve counts.
2594 			 */
2595 			folio_clear_hugetlb_restore_reserve(folio);
2596 		else if (rc)
2597 			(void)vma_add_reservation(h, vma, address);
2598 		else
2599 			vma_end_reservation(h, vma, address);
2600 	} else {
2601 		if (!rc) {
2602 			/*
2603 			 * This indicates there is an entry in the reserve map
2604 			 * not added by alloc_hugetlb_folio.  We know it was added
2605 			 * before the alloc_hugetlb_folio call, otherwise
2606 			 * hugetlb_restore_reserve would be set on the folio.
2607 			 * Remove the entry so that a subsequent allocation
2608 			 * does not consume a reservation.
2609 			 */
2610 			rc = vma_del_reservation(h, vma, address);
2611 			if (rc < 0)
2612 				/*
2613 				 * VERY rare out of memory condition.  Since
2614 				 * we can not delete the entry, set
2615 				 * hugetlb_restore_reserve so that the reserve
2616 				 * count will be incremented when the folio
2617 				 * is freed.  This reserve will be consumed
2618 				 * on a subsequent allocation.
2619 				 */
2620 				folio_set_hugetlb_restore_reserve(folio);
2621 		} else if (rc < 0) {
2622 			/*
2623 			 * Rare out of memory condition from
2624 			 * vma_needs_reservation call.  Memory allocation is
2625 			 * only attempted if a new entry is needed.  Therefore,
2626 			 * this implies there is not an entry in the
2627 			 * reserve map.
2628 			 *
2629 			 * For shared mappings, no entry in the map indicates
2630 			 * no reservation.  We are done.
2631 			 */
2632 			if (!(vma->vm_flags & VM_MAYSHARE))
2633 				/*
2634 				 * For private mappings, no entry indicates
2635 				 * a reservation is present.  Since we can
2636 				 * not add an entry, set hugetlb_restore_reserve
2637 				 * on the folio so reserve count will be
2638 				 * incremented when freed.  This reserve will
2639 				 * be consumed on a subsequent allocation.
2640 				 */
2641 				folio_set_hugetlb_restore_reserve(folio);
2642 		} else {
2643 			/*
2644 			 * No reservation present, do nothing
2645 			 */
2646 			vma_end_reservation(h, vma, address);
2647 		}
2648 	}
2649 }
2650 
2651 /*
2652  * alloc_and_dissolve_hugetlb_folio - Allocate a new folio and dissolve
2653  * the old one
2654  * @old_folio: Old folio to dissolve
2655  * @list: List to isolate the page in case we need to
2656  * Returns 0 on success, otherwise negated error.
2657  */
alloc_and_dissolve_hugetlb_folio(struct folio * old_folio,struct list_head * list)2658 static int alloc_and_dissolve_hugetlb_folio(struct folio *old_folio,
2659 			struct list_head *list)
2660 {
2661 	gfp_t gfp_mask;
2662 	struct hstate *h;
2663 	int nid = folio_nid(old_folio);
2664 	struct folio *new_folio = NULL;
2665 	int ret = 0;
2666 
2667 retry:
2668 	/*
2669 	 * The old_folio might have been dissolved from under our feet, so make sure
2670 	 * to carefully check the state under the lock.
2671 	 */
2672 	spin_lock_irq(&hugetlb_lock);
2673 	if (!folio_test_hugetlb(old_folio)) {
2674 		/*
2675 		 * Freed from under us. Drop new_folio too.
2676 		 */
2677 		goto free_new;
2678 	} else if (folio_ref_count(old_folio)) {
2679 		bool isolated;
2680 
2681 		/*
2682 		 * Someone has grabbed the folio, try to isolate it here.
2683 		 * Fail with -EBUSY if not possible.
2684 		 */
2685 		spin_unlock_irq(&hugetlb_lock);
2686 		isolated = folio_isolate_hugetlb(old_folio, list);
2687 		ret = isolated ? 0 : -EBUSY;
2688 		spin_lock_irq(&hugetlb_lock);
2689 		goto free_new;
2690 	} else if (!folio_test_hugetlb_freed(old_folio)) {
2691 		/*
2692 		 * Folio's refcount is 0 but it has not been enqueued in the
2693 		 * freelist yet. Race window is small, so we can succeed here if
2694 		 * we retry.
2695 		 */
2696 		spin_unlock_irq(&hugetlb_lock);
2697 		cond_resched();
2698 		goto retry;
2699 	} else {
2700 		h = folio_hstate(old_folio);
2701 		if (!new_folio) {
2702 			spin_unlock_irq(&hugetlb_lock);
2703 			gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
2704 			new_folio = alloc_fresh_hugetlb_folio(h, gfp_mask,
2705 							      nid, NULL);
2706 			if (!new_folio)
2707 				return -ENOMEM;
2708 			goto retry;
2709 		}
2710 
2711 		/*
2712 		 * Ok, old_folio is still a genuine free hugepage. Remove it from
2713 		 * the freelist and decrease the counters. These will be
2714 		 * incremented again when calling account_new_hugetlb_folio()
2715 		 * and enqueue_hugetlb_folio() for new_folio. The counters will
2716 		 * remain stable since this happens under the lock.
2717 		 */
2718 		remove_hugetlb_folio(h, old_folio, false);
2719 
2720 		/*
2721 		 * Ref count on new_folio is already zero as it was dropped
2722 		 * earlier.  It can be directly added to the pool free list.
2723 		 */
2724 		account_new_hugetlb_folio(h, new_folio);
2725 		enqueue_hugetlb_folio(h, new_folio);
2726 
2727 		/*
2728 		 * Folio has been replaced, we can safely free the old one.
2729 		 */
2730 		spin_unlock_irq(&hugetlb_lock);
2731 		update_and_free_hugetlb_folio(h, old_folio, false);
2732 	}
2733 
2734 	return ret;
2735 
2736 free_new:
2737 	spin_unlock_irq(&hugetlb_lock);
2738 	if (new_folio)
2739 		update_and_free_hugetlb_folio(h, new_folio, false);
2740 
2741 	return ret;
2742 }
2743 
isolate_or_dissolve_huge_folio(struct folio * folio,struct list_head * list)2744 int isolate_or_dissolve_huge_folio(struct folio *folio, struct list_head *list)
2745 {
2746 	int ret = -EBUSY;
2747 
2748 	/* Not to disrupt normal path by vainly holding hugetlb_lock */
2749 	if (!folio_test_hugetlb(folio))
2750 		return 0;
2751 
2752 	/*
2753 	 * Fence off gigantic pages as there is a cyclic dependency between
2754 	 * alloc_contig_range and them. Return -ENOMEM as this has the effect
2755 	 * of bailing out right away without further retrying.
2756 	 */
2757 	if (order_is_gigantic(folio_order(folio)))
2758 		return -ENOMEM;
2759 
2760 	if (folio_ref_count(folio) && folio_isolate_hugetlb(folio, list))
2761 		ret = 0;
2762 	else if (!folio_ref_count(folio))
2763 		ret = alloc_and_dissolve_hugetlb_folio(folio, list);
2764 
2765 	return ret;
2766 }
2767 
2768 /*
2769  *  replace_free_hugepage_folios - Replace free hugepage folios in a given pfn
2770  *  range with new folios.
2771  *  @start_pfn: start pfn of the given pfn range
2772  *  @end_pfn: end pfn of the given pfn range
2773  *  Returns 0 on success, otherwise negated error.
2774  */
replace_free_hugepage_folios(unsigned long start_pfn,unsigned long end_pfn)2775 int replace_free_hugepage_folios(unsigned long start_pfn, unsigned long end_pfn)
2776 {
2777 	unsigned long nr = 0;
2778 	struct page *page;
2779 	struct hstate *h;
2780 	LIST_HEAD(list);
2781 	int ret = 0;
2782 
2783 	/* Avoid pfn iterations if no free non-gigantic huge pages */
2784 	for_each_hstate(h) {
2785 		if (hstate_is_gigantic(h))
2786 			continue;
2787 
2788 		nr += h->free_huge_pages;
2789 		if (nr)
2790 			break;
2791 	}
2792 
2793 	if (!nr)
2794 		return 0;
2795 
2796 	while (start_pfn < end_pfn) {
2797 		page = pfn_to_page(start_pfn);
2798 		nr = 1;
2799 
2800 		if (PageHuge(page) || PageCompound(page)) {
2801 			struct folio *folio = page_folio(page);
2802 
2803 			nr = folio_nr_pages(folio) - folio_page_idx(folio, page);
2804 
2805 			/*
2806 			 * Don't disrupt normal path by vainly holding
2807 			 * hugetlb_lock
2808 			 */
2809 			if (folio_test_hugetlb(folio) && !folio_ref_count(folio)) {
2810 				if (order_is_gigantic(folio_order(folio))) {
2811 					ret = -ENOMEM;
2812 					break;
2813 				}
2814 
2815 				ret = alloc_and_dissolve_hugetlb_folio(folio, &list);
2816 				if (ret)
2817 					break;
2818 
2819 				putback_movable_pages(&list);
2820 			}
2821 		} else if (PageBuddy(page)) {
2822 			/*
2823 			 * Buddy order check without zone lock is unsafe and
2824 			 * the order is maybe invalid, but race should be
2825 			 * small, and the worst thing is skipping free hugetlb.
2826 			 */
2827 			const unsigned int order = buddy_order_unsafe(page);
2828 
2829 			if (order <= MAX_PAGE_ORDER)
2830 				nr = 1UL << order;
2831 		}
2832 		start_pfn += nr;
2833 	}
2834 
2835 	return ret;
2836 }
2837 
wait_for_freed_hugetlb_folios(void)2838 void wait_for_freed_hugetlb_folios(void)
2839 {
2840 	if (llist_empty(&hpage_freelist))
2841 		return;
2842 
2843 	flush_work(&free_hpage_work);
2844 }
2845 
2846 typedef enum {
2847 	/*
2848 	 * For either 0/1: we checked the per-vma resv map, and one resv
2849 	 * count either can be reused (0), or an extra needed (1).
2850 	 */
2851 	MAP_CHG_REUSE = 0,
2852 	MAP_CHG_NEEDED = 1,
2853 	/*
2854 	 * Cannot use per-vma resv count can be used, hence a new resv
2855 	 * count is enforced.
2856 	 *
2857 	 * NOTE: This is mostly identical to MAP_CHG_NEEDED, except
2858 	 * that currently vma_needs_reservation() has an unwanted side
2859 	 * effect to either use end() or commit() to complete the
2860 	 * transaction. Hence it needs to differentiate from NEEDED.
2861 	 */
2862 	MAP_CHG_ENFORCED = 2,
2863 } map_chg_state;
2864 
2865 /*
2866  * NOTE! "cow_from_owner" represents a very hacky usage only used in CoW
2867  * faults of hugetlb private mappings on top of a non-page-cache folio (in
2868  * which case even if there's a private vma resv map it won't cover such
2869  * allocation).  New call sites should (probably) never set it to true!!
2870  * When it's set, the allocation will bypass all vma level reservations.
2871  */
alloc_hugetlb_folio(struct vm_area_struct * vma,unsigned long addr,bool cow_from_owner)2872 struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
2873 				    unsigned long addr, bool cow_from_owner)
2874 {
2875 	struct hugepage_subpool *spool = subpool_vma(vma);
2876 	struct hstate *h = hstate_vma(vma);
2877 	struct folio *folio;
2878 	long retval, gbl_chg, gbl_reserve;
2879 	map_chg_state map_chg;
2880 	int ret, idx;
2881 	struct hugetlb_cgroup *h_cg = NULL;
2882 	gfp_t gfp = htlb_alloc_mask(h) | __GFP_RETRY_MAYFAIL;
2883 
2884 	idx = hstate_index(h);
2885 
2886 	/* Whether we need a separate per-vma reservation? */
2887 	if (cow_from_owner) {
2888 		/*
2889 		 * Special case!  Since it's a CoW on top of a reserved
2890 		 * page, the private resv map doesn't count.  So it cannot
2891 		 * consume the per-vma resv map even if it's reserved.
2892 		 */
2893 		map_chg = MAP_CHG_ENFORCED;
2894 	} else {
2895 		/*
2896 		 * Examine the region/reserve map to determine if the process
2897 		 * has a reservation for the page to be allocated.  A return
2898 		 * code of zero indicates a reservation exists (no change).
2899 		 */
2900 		retval = vma_needs_reservation(h, vma, addr);
2901 		if (retval < 0)
2902 			return ERR_PTR(-ENOMEM);
2903 		map_chg = retval ? MAP_CHG_NEEDED : MAP_CHG_REUSE;
2904 	}
2905 
2906 	/*
2907 	 * Whether we need a separate global reservation?
2908 	 *
2909 	 * Processes that did not create the mapping will have no
2910 	 * reserves as indicated by the region/reserve map. Check
2911 	 * that the allocation will not exceed the subpool limit.
2912 	 * Or if it can get one from the pool reservation directly.
2913 	 */
2914 	if (map_chg) {
2915 		gbl_chg = hugepage_subpool_get_pages(spool, 1);
2916 		if (gbl_chg < 0)
2917 			goto out_end_reservation;
2918 	} else {
2919 		/*
2920 		 * If we have the vma reservation ready, no need for extra
2921 		 * global reservation.
2922 		 */
2923 		gbl_chg = 0;
2924 	}
2925 
2926 	/*
2927 	 * If this allocation is not consuming a per-vma reservation,
2928 	 * charge the hugetlb cgroup now.
2929 	 */
2930 	if (map_chg) {
2931 		ret = hugetlb_cgroup_charge_cgroup_rsvd(
2932 			idx, pages_per_huge_page(h), &h_cg);
2933 		if (ret)
2934 			goto out_subpool_put;
2935 	}
2936 
2937 	ret = hugetlb_cgroup_charge_cgroup(idx, pages_per_huge_page(h), &h_cg);
2938 	if (ret)
2939 		goto out_uncharge_cgroup_reservation;
2940 
2941 	spin_lock_irq(&hugetlb_lock);
2942 	/*
2943 	 * glb_chg is passed to indicate whether or not a page must be taken
2944 	 * from the global free pool (global change).  gbl_chg == 0 indicates
2945 	 * a reservation exists for the allocation.
2946 	 */
2947 	folio = dequeue_hugetlb_folio_vma(h, vma, addr, gbl_chg);
2948 	if (!folio) {
2949 		spin_unlock_irq(&hugetlb_lock);
2950 		folio = alloc_buddy_hugetlb_folio_with_mpol(h, vma, addr);
2951 		if (!folio)
2952 			goto out_uncharge_cgroup;
2953 		spin_lock_irq(&hugetlb_lock);
2954 		list_add(&folio->lru, &h->hugepage_activelist);
2955 		folio_ref_unfreeze(folio, 1);
2956 		/* Fall through */
2957 	}
2958 
2959 	/*
2960 	 * Either dequeued or buddy-allocated folio needs to add special
2961 	 * mark to the folio when it consumes a global reservation.
2962 	 */
2963 	if (!gbl_chg) {
2964 		folio_set_hugetlb_restore_reserve(folio);
2965 		h->resv_huge_pages--;
2966 	}
2967 
2968 	hugetlb_cgroup_commit_charge(idx, pages_per_huge_page(h), h_cg, folio);
2969 	/* If allocation is not consuming a reservation, also store the
2970 	 * hugetlb_cgroup pointer on the page.
2971 	 */
2972 	if (map_chg) {
2973 		hugetlb_cgroup_commit_charge_rsvd(idx, pages_per_huge_page(h),
2974 						  h_cg, folio);
2975 	}
2976 
2977 	spin_unlock_irq(&hugetlb_lock);
2978 
2979 	hugetlb_set_folio_subpool(folio, spool);
2980 
2981 	if (map_chg != MAP_CHG_ENFORCED) {
2982 		/* commit() is only needed if the map_chg is not enforced */
2983 		retval = vma_commit_reservation(h, vma, addr);
2984 		/*
2985 		 * Check for possible race conditions. When it happens..
2986 		 * The page was added to the reservation map between
2987 		 * vma_needs_reservation and vma_commit_reservation.
2988 		 * This indicates a race with hugetlb_reserve_pages.
2989 		 * Adjust for the subpool count incremented above AND
2990 		 * in hugetlb_reserve_pages for the same page.	Also,
2991 		 * the reservation count added in hugetlb_reserve_pages
2992 		 * no longer applies.
2993 		 */
2994 		if (unlikely(map_chg == MAP_CHG_NEEDED && retval == 0)) {
2995 			long rsv_adjust;
2996 
2997 			rsv_adjust = hugepage_subpool_put_pages(spool, 1);
2998 			hugetlb_acct_memory(h, -rsv_adjust);
2999 			spin_lock_irq(&hugetlb_lock);
3000 			hugetlb_cgroup_uncharge_folio_rsvd(
3001 			    hstate_index(h), pages_per_huge_page(h), folio);
3002 			spin_unlock_irq(&hugetlb_lock);
3003 		}
3004 	}
3005 
3006 	ret = mem_cgroup_charge_hugetlb(folio, gfp);
3007 	/*
3008 	 * Unconditionally increment NR_HUGETLB here. If it turns out that
3009 	 * mem_cgroup_charge_hugetlb failed, then immediately free the page and
3010 	 * decrement NR_HUGETLB.
3011 	 */
3012 	lruvec_stat_mod_folio(folio, NR_HUGETLB, pages_per_huge_page(h));
3013 
3014 	if (ret == -ENOMEM) {
3015 		free_huge_folio(folio);
3016 		return ERR_PTR(-ENOMEM);
3017 	}
3018 
3019 	return folio;
3020 
3021 out_uncharge_cgroup:
3022 	hugetlb_cgroup_uncharge_cgroup(idx, pages_per_huge_page(h), h_cg);
3023 out_uncharge_cgroup_reservation:
3024 	if (map_chg)
3025 		hugetlb_cgroup_uncharge_cgroup_rsvd(idx, pages_per_huge_page(h),
3026 						    h_cg);
3027 out_subpool_put:
3028 	/*
3029 	 * put page to subpool iff the quota of subpool's rsv_hpages is used
3030 	 * during hugepage_subpool_get_pages.
3031 	 */
3032 	if (map_chg && !gbl_chg) {
3033 		gbl_reserve = hugepage_subpool_put_pages(spool, 1);
3034 		hugetlb_acct_memory(h, -gbl_reserve);
3035 	}
3036 
3037 
3038 out_end_reservation:
3039 	if (map_chg != MAP_CHG_ENFORCED)
3040 		vma_end_reservation(h, vma, addr);
3041 	return ERR_PTR(-ENOSPC);
3042 }
3043 
alloc_bootmem(struct hstate * h,int nid,bool node_exact)3044 static __init void *alloc_bootmem(struct hstate *h, int nid, bool node_exact)
3045 {
3046 	struct huge_bootmem_page *m;
3047 	int listnode = nid;
3048 
3049 	if (hugetlb_early_cma(h))
3050 		m = hugetlb_cma_alloc_bootmem(h, &listnode, node_exact);
3051 	else {
3052 		if (node_exact)
3053 			m = memblock_alloc_exact_nid_raw(huge_page_size(h),
3054 				huge_page_size(h), 0,
3055 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3056 		else {
3057 			m = memblock_alloc_try_nid_raw(huge_page_size(h),
3058 				huge_page_size(h), 0,
3059 				MEMBLOCK_ALLOC_ACCESSIBLE, nid);
3060 			/*
3061 			 * For pre-HVO to work correctly, pages need to be on
3062 			 * the list for the node they were actually allocated
3063 			 * from. That node may be different in the case of
3064 			 * fallback by memblock_alloc_try_nid_raw. So,
3065 			 * extract the actual node first.
3066 			 */
3067 			if (m)
3068 				listnode = early_pfn_to_nid(PHYS_PFN(__pa(m)));
3069 		}
3070 
3071 		if (m) {
3072 			m->flags = 0;
3073 			m->cma = NULL;
3074 		}
3075 	}
3076 
3077 	if (m) {
3078 		/*
3079 		 * Use the beginning of the huge page to store the
3080 		 * huge_bootmem_page struct (until gather_bootmem
3081 		 * puts them into the mem_map).
3082 		 *
3083 		 * Put them into a private list first because mem_map
3084 		 * is not up yet.
3085 		 */
3086 		INIT_LIST_HEAD(&m->list);
3087 		list_add(&m->list, &huge_boot_pages[listnode]);
3088 		m->hstate = h;
3089 	}
3090 
3091 	return m;
3092 }
3093 
3094 int alloc_bootmem_huge_page(struct hstate *h, int nid)
3095 	__attribute__ ((weak, alias("__alloc_bootmem_huge_page")));
__alloc_bootmem_huge_page(struct hstate * h,int nid)3096 int __alloc_bootmem_huge_page(struct hstate *h, int nid)
3097 {
3098 	struct huge_bootmem_page *m = NULL; /* initialize for clang */
3099 	int nr_nodes, node = nid;
3100 
3101 	/* do node specific alloc */
3102 	if (nid != NUMA_NO_NODE) {
3103 		m = alloc_bootmem(h, node, true);
3104 		if (!m)
3105 			return 0;
3106 		goto found;
3107 	}
3108 
3109 	/* allocate from next node when distributing huge pages */
3110 	for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node,
3111 				    &hugetlb_bootmem_nodes) {
3112 		m = alloc_bootmem(h, node, false);
3113 		if (!m)
3114 			return 0;
3115 		goto found;
3116 	}
3117 
3118 found:
3119 
3120 	/*
3121 	 * Only initialize the head struct page in memmap_init_reserved_pages,
3122 	 * rest of the struct pages will be initialized by the HugeTLB
3123 	 * subsystem itself.
3124 	 * The head struct page is used to get folio information by the HugeTLB
3125 	 * subsystem like zone id and node id.
3126 	 */
3127 	memblock_reserved_mark_noinit(__pa((void *)m + PAGE_SIZE),
3128 		huge_page_size(h) - PAGE_SIZE);
3129 
3130 	return 1;
3131 }
3132 
3133 /* Initialize [start_page:end_page_number] tail struct pages of a hugepage */
hugetlb_folio_init_tail_vmemmap(struct folio * folio,struct hstate * h,unsigned long start_page_number,unsigned long end_page_number)3134 static void __init hugetlb_folio_init_tail_vmemmap(struct folio *folio,
3135 					struct hstate *h,
3136 					unsigned long start_page_number,
3137 					unsigned long end_page_number)
3138 {
3139 	enum zone_type zone = folio_zonenum(folio);
3140 	int nid = folio_nid(folio);
3141 	struct page *page = folio_page(folio, start_page_number);
3142 	unsigned long head_pfn = folio_pfn(folio);
3143 	unsigned long pfn, end_pfn = head_pfn + end_page_number;
3144 	unsigned int order = huge_page_order(h);
3145 
3146 	/*
3147 	 * As we marked all tail pages with memblock_reserved_mark_noinit(),
3148 	 * we must initialize them ourselves here.
3149 	 */
3150 	for (pfn = head_pfn + start_page_number; pfn < end_pfn; page++, pfn++) {
3151 		__init_single_page(page, pfn, zone, nid);
3152 		prep_compound_tail(page, &folio->page, order);
3153 		set_page_count(page, 0);
3154 	}
3155 }
3156 
hugetlb_folio_init_vmemmap(struct folio * folio,struct hstate * h,unsigned long nr_pages)3157 static void __init hugetlb_folio_init_vmemmap(struct folio *folio,
3158 					      struct hstate *h,
3159 					      unsigned long nr_pages)
3160 {
3161 	int ret;
3162 
3163 	/*
3164 	 * This is an open-coded prep_compound_page() whereby we avoid
3165 	 * walking pages twice by initializing/preparing+freezing them in the
3166 	 * same go.
3167 	 */
3168 	__folio_clear_reserved(folio);
3169 	__folio_set_head(folio);
3170 	ret = folio_ref_freeze(folio, 1);
3171 	VM_BUG_ON(!ret);
3172 	hugetlb_folio_init_tail_vmemmap(folio, h, 1, nr_pages);
3173 	prep_compound_head(&folio->page, huge_page_order(h));
3174 }
3175 
hugetlb_bootmem_page_prehvo(struct huge_bootmem_page * m)3176 static bool __init hugetlb_bootmem_page_prehvo(struct huge_bootmem_page *m)
3177 {
3178 	return m->flags & HUGE_BOOTMEM_HVO;
3179 }
3180 
hugetlb_bootmem_page_earlycma(struct huge_bootmem_page * m)3181 static bool __init hugetlb_bootmem_page_earlycma(struct huge_bootmem_page *m)
3182 {
3183 	return m->flags & HUGE_BOOTMEM_CMA;
3184 }
3185 
3186 /*
3187  * memblock-allocated pageblocks might not have the migrate type set
3188  * if marked with the 'noinit' flag. Set it to the default (MIGRATE_MOVABLE)
3189  * here, or MIGRATE_CMA if this was a page allocated through an early CMA
3190  * reservation.
3191  *
3192  * In case of vmemmap optimized folios, the tail vmemmap pages are mapped
3193  * read-only, but that's ok - for sparse vmemmap this does not write to
3194  * the page structure.
3195  */
hugetlb_bootmem_init_migratetype(struct folio * folio,struct hstate * h)3196 static void __init hugetlb_bootmem_init_migratetype(struct folio *folio,
3197 							  struct hstate *h)
3198 {
3199 	unsigned long nr_pages = pages_per_huge_page(h), i;
3200 
3201 	WARN_ON_ONCE(!pageblock_aligned(folio_pfn(folio)));
3202 
3203 	for (i = 0; i < nr_pages; i += pageblock_nr_pages) {
3204 		if (folio_test_hugetlb_cma(folio))
3205 			init_cma_pageblock(folio_page(folio, i));
3206 		else
3207 			init_pageblock_migratetype(folio_page(folio, i),
3208 					  MIGRATE_MOVABLE, false);
3209 	}
3210 }
3211 
prep_and_add_bootmem_folios(struct hstate * h,struct list_head * folio_list)3212 static void __init prep_and_add_bootmem_folios(struct hstate *h,
3213 					struct list_head *folio_list)
3214 {
3215 	unsigned long flags;
3216 	struct folio *folio, *tmp_f;
3217 
3218 	/* Send list for bulk vmemmap optimization processing */
3219 	hugetlb_vmemmap_optimize_bootmem_folios(h, folio_list);
3220 
3221 	list_for_each_entry_safe(folio, tmp_f, folio_list, lru) {
3222 		if (!folio_test_hugetlb_vmemmap_optimized(folio)) {
3223 			/*
3224 			 * If HVO fails, initialize all tail struct pages
3225 			 * We do not worry about potential long lock hold
3226 			 * time as this is early in boot and there should
3227 			 * be no contention.
3228 			 */
3229 			hugetlb_folio_init_tail_vmemmap(folio, h,
3230 					HUGETLB_VMEMMAP_RESERVE_PAGES,
3231 					pages_per_huge_page(h));
3232 		}
3233 		hugetlb_bootmem_init_migratetype(folio, h);
3234 		/* Subdivide locks to achieve better parallel performance */
3235 		spin_lock_irqsave(&hugetlb_lock, flags);
3236 		account_new_hugetlb_folio(h, folio);
3237 		enqueue_hugetlb_folio(h, folio);
3238 		spin_unlock_irqrestore(&hugetlb_lock, flags);
3239 	}
3240 }
3241 
hugetlb_bootmem_page_zones_valid(int nid,struct huge_bootmem_page * m)3242 bool __init hugetlb_bootmem_page_zones_valid(int nid,
3243 					     struct huge_bootmem_page *m)
3244 {
3245 	unsigned long start_pfn;
3246 	bool valid;
3247 
3248 	if (m->flags & HUGE_BOOTMEM_ZONES_VALID) {
3249 		/*
3250 		 * Already validated, skip check.
3251 		 */
3252 		return true;
3253 	}
3254 
3255 	if (hugetlb_bootmem_page_earlycma(m)) {
3256 		valid = cma_validate_zones(m->cma);
3257 		goto out;
3258 	}
3259 
3260 	start_pfn = virt_to_phys(m) >> PAGE_SHIFT;
3261 
3262 	valid = !pfn_range_intersects_zones(nid, start_pfn,
3263 			pages_per_huge_page(m->hstate));
3264 out:
3265 	if (!valid)
3266 		hstate_boot_nrinvalid[hstate_index(m->hstate)]++;
3267 
3268 	return valid;
3269 }
3270 
3271 /*
3272  * Free a bootmem page that was found to be invalid (intersecting with
3273  * multiple zones).
3274  *
3275  * Since it intersects with multiple zones, we can't just do a free
3276  * operation on all pages at once, but instead have to walk all
3277  * pages, freeing them one by one.
3278  */
hugetlb_bootmem_free_invalid_page(int nid,struct page * page,struct hstate * h)3279 static void __init hugetlb_bootmem_free_invalid_page(int nid, struct page *page,
3280 					     struct hstate *h)
3281 {
3282 	unsigned long npages = pages_per_huge_page(h);
3283 	unsigned long pfn;
3284 
3285 	while (npages--) {
3286 		pfn = page_to_pfn(page);
3287 		__init_page_from_nid(pfn, nid);
3288 		free_reserved_page(page);
3289 		page++;
3290 	}
3291 }
3292 
3293 /*
3294  * Put bootmem huge pages into the standard lists after mem_map is up.
3295  * Note: This only applies to gigantic (order > MAX_PAGE_ORDER) pages.
3296  */
gather_bootmem_prealloc_node(unsigned long nid)3297 static void __init gather_bootmem_prealloc_node(unsigned long nid)
3298 {
3299 	LIST_HEAD(folio_list);
3300 	struct huge_bootmem_page *m, *tm;
3301 	struct hstate *h = NULL, *prev_h = NULL;
3302 
3303 	list_for_each_entry_safe(m, tm, &huge_boot_pages[nid], list) {
3304 		struct page *page = virt_to_page(m);
3305 		struct folio *folio = (void *)page;
3306 
3307 		h = m->hstate;
3308 		if (!hugetlb_bootmem_page_zones_valid(nid, m)) {
3309 			/*
3310 			 * Can't use this page. Initialize the
3311 			 * page structures if that hasn't already
3312 			 * been done, and give them to the page
3313 			 * allocator.
3314 			 */
3315 			hugetlb_bootmem_free_invalid_page(nid, page, h);
3316 			continue;
3317 		}
3318 
3319 		/*
3320 		 * It is possible to have multiple huge page sizes (hstates)
3321 		 * in this list.  If so, process each size separately.
3322 		 */
3323 		if (h != prev_h && prev_h != NULL)
3324 			prep_and_add_bootmem_folios(prev_h, &folio_list);
3325 		prev_h = h;
3326 
3327 		VM_BUG_ON(!hstate_is_gigantic(h));
3328 		WARN_ON(folio_ref_count(folio) != 1);
3329 
3330 		hugetlb_folio_init_vmemmap(folio, h,
3331 					   HUGETLB_VMEMMAP_RESERVE_PAGES);
3332 		init_new_hugetlb_folio(folio);
3333 
3334 		if (hugetlb_bootmem_page_prehvo(m))
3335 			/*
3336 			 * If pre-HVO was done, just set the
3337 			 * flag, the HVO code will then skip
3338 			 * this folio.
3339 			 */
3340 			folio_set_hugetlb_vmemmap_optimized(folio);
3341 
3342 		if (hugetlb_bootmem_page_earlycma(m))
3343 			folio_set_hugetlb_cma(folio);
3344 
3345 		list_add(&folio->lru, &folio_list);
3346 
3347 		/*
3348 		 * We need to restore the 'stolen' pages to totalram_pages
3349 		 * in order to fix confusing memory reports from free(1) and
3350 		 * other side-effects, like CommitLimit going negative.
3351 		 *
3352 		 * For CMA pages, this is done in init_cma_pageblock
3353 		 * (via hugetlb_bootmem_init_migratetype), so skip it here.
3354 		 */
3355 		if (!folio_test_hugetlb_cma(folio))
3356 			adjust_managed_page_count(page, pages_per_huge_page(h));
3357 		cond_resched();
3358 	}
3359 
3360 	prep_and_add_bootmem_folios(h, &folio_list);
3361 }
3362 
gather_bootmem_prealloc_parallel(unsigned long start,unsigned long end,void * arg)3363 static void __init gather_bootmem_prealloc_parallel(unsigned long start,
3364 						    unsigned long end, void *arg)
3365 {
3366 	int nid;
3367 
3368 	for (nid = start; nid < end; nid++)
3369 		gather_bootmem_prealloc_node(nid);
3370 }
3371 
gather_bootmem_prealloc(void)3372 static void __init gather_bootmem_prealloc(void)
3373 {
3374 	struct padata_mt_job job = {
3375 		.thread_fn	= gather_bootmem_prealloc_parallel,
3376 		.fn_arg		= NULL,
3377 		.start		= 0,
3378 		.size		= nr_node_ids,
3379 		.align		= 1,
3380 		.min_chunk	= 1,
3381 		.max_threads	= num_node_state(N_MEMORY),
3382 		.numa_aware	= true,
3383 	};
3384 
3385 	padata_do_multithreaded(&job);
3386 }
3387 
hugetlb_hstate_alloc_pages_onenode(struct hstate * h,int nid)3388 static void __init hugetlb_hstate_alloc_pages_onenode(struct hstate *h, int nid)
3389 {
3390 	unsigned long i;
3391 	char buf[32];
3392 	LIST_HEAD(folio_list);
3393 
3394 	for (i = 0; i < h->max_huge_pages_node[nid]; ++i) {
3395 		if (hstate_is_gigantic(h)) {
3396 			if (!alloc_bootmem_huge_page(h, nid))
3397 				break;
3398 		} else {
3399 			struct folio *folio;
3400 			gfp_t gfp_mask = htlb_alloc_mask(h) | __GFP_THISNODE;
3401 
3402 			folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3403 					&node_states[N_MEMORY], NULL);
3404 			if (!folio && !list_empty(&folio_list) &&
3405 			    hugetlb_vmemmap_optimizable_size(h)) {
3406 				prep_and_add_allocated_folios(h, &folio_list);
3407 				INIT_LIST_HEAD(&folio_list);
3408 				folio = only_alloc_fresh_hugetlb_folio(h, gfp_mask, nid,
3409 						&node_states[N_MEMORY], NULL);
3410 			}
3411 			if (!folio)
3412 				break;
3413 			list_add(&folio->lru, &folio_list);
3414 		}
3415 		cond_resched();
3416 	}
3417 
3418 	if (!list_empty(&folio_list))
3419 		prep_and_add_allocated_folios(h, &folio_list);
3420 
3421 	if (i == h->max_huge_pages_node[nid])
3422 		return;
3423 
3424 	string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3425 	pr_warn("HugeTLB: allocating %u of page size %s failed node%d.  Only allocated %lu hugepages.\n",
3426 		h->max_huge_pages_node[nid], buf, nid, i);
3427 	h->max_huge_pages -= (h->max_huge_pages_node[nid] - i);
3428 	h->max_huge_pages_node[nid] = i;
3429 }
3430 
hugetlb_hstate_alloc_pages_specific_nodes(struct hstate * h)3431 static bool __init hugetlb_hstate_alloc_pages_specific_nodes(struct hstate *h)
3432 {
3433 	int i;
3434 	bool node_specific_alloc = false;
3435 
3436 	for_each_online_node(i) {
3437 		if (h->max_huge_pages_node[i] > 0) {
3438 			hugetlb_hstate_alloc_pages_onenode(h, i);
3439 			node_specific_alloc = true;
3440 		}
3441 	}
3442 
3443 	return node_specific_alloc;
3444 }
3445 
hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated,struct hstate * h)3446 static void __init hugetlb_hstate_alloc_pages_errcheck(unsigned long allocated, struct hstate *h)
3447 {
3448 	if (allocated < h->max_huge_pages) {
3449 		char buf[32];
3450 
3451 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3452 		pr_warn("HugeTLB: allocating %lu of page size %s failed.  Only allocated %lu hugepages.\n",
3453 			h->max_huge_pages, buf, allocated);
3454 		h->max_huge_pages = allocated;
3455 	}
3456 }
3457 
hugetlb_pages_alloc_boot_node(unsigned long start,unsigned long end,void * arg)3458 static void __init hugetlb_pages_alloc_boot_node(unsigned long start, unsigned long end, void *arg)
3459 {
3460 	struct hstate *h = (struct hstate *)arg;
3461 	int i, num = end - start;
3462 	nodemask_t node_alloc_noretry;
3463 	LIST_HEAD(folio_list);
3464 	int next_node = first_online_node;
3465 
3466 	/* Bit mask controlling how hard we retry per-node allocations.*/
3467 	nodes_clear(node_alloc_noretry);
3468 
3469 	for (i = 0; i < num; ++i) {
3470 		struct folio *folio;
3471 
3472 		if (hugetlb_vmemmap_optimizable_size(h) &&
3473 		    (si_mem_available() == 0) && !list_empty(&folio_list)) {
3474 			prep_and_add_allocated_folios(h, &folio_list);
3475 			INIT_LIST_HEAD(&folio_list);
3476 		}
3477 		folio = alloc_pool_huge_folio(h, &node_states[N_MEMORY],
3478 						&node_alloc_noretry, &next_node);
3479 		if (!folio)
3480 			break;
3481 
3482 		list_move(&folio->lru, &folio_list);
3483 		cond_resched();
3484 	}
3485 
3486 	prep_and_add_allocated_folios(h, &folio_list);
3487 }
3488 
hugetlb_gigantic_pages_alloc_boot(struct hstate * h)3489 static unsigned long __init hugetlb_gigantic_pages_alloc_boot(struct hstate *h)
3490 {
3491 	unsigned long i;
3492 
3493 	for (i = 0; i < h->max_huge_pages; ++i) {
3494 		if (!alloc_bootmem_huge_page(h, NUMA_NO_NODE))
3495 			break;
3496 		cond_resched();
3497 	}
3498 
3499 	return i;
3500 }
3501 
hugetlb_pages_alloc_boot(struct hstate * h)3502 static unsigned long __init hugetlb_pages_alloc_boot(struct hstate *h)
3503 {
3504 	struct padata_mt_job job = {
3505 		.fn_arg		= h,
3506 		.align		= 1,
3507 		.numa_aware	= true
3508 	};
3509 
3510 	unsigned long jiffies_start;
3511 	unsigned long jiffies_end;
3512 	unsigned long remaining;
3513 
3514 	job.thread_fn	= hugetlb_pages_alloc_boot_node;
3515 
3516 	/*
3517 	 * job.max_threads is 25% of the available cpu threads by default.
3518 	 *
3519 	 * On large servers with terabytes of memory, huge page allocation
3520 	 * can consume a considerably amount of time.
3521 	 *
3522 	 * Tests below show how long it takes to allocate 1 TiB of memory with 2MiB huge pages.
3523 	 * 2MiB huge pages. Using more threads can significantly improve allocation time.
3524 	 *
3525 	 * +-----------------------+-------+-------+-------+-------+-------+
3526 	 * | threads               |   8   |   16  |   32  |   64  |   128 |
3527 	 * +-----------------------+-------+-------+-------+-------+-------+
3528 	 * | skylake      144 cpus |   44s |   22s |   16s |   19s |   20s |
3529 	 * | cascade lake 192 cpus |   39s |   20s |   11s |   10s |    9s |
3530 	 * +-----------------------+-------+-------+-------+-------+-------+
3531 	 */
3532 	if (hugepage_allocation_threads == 0) {
3533 		hugepage_allocation_threads = num_online_cpus() / 4;
3534 		hugepage_allocation_threads = max(hugepage_allocation_threads, 1);
3535 	}
3536 
3537 	job.max_threads	= hugepage_allocation_threads;
3538 
3539 	jiffies_start = jiffies;
3540 	do {
3541 		remaining = h->max_huge_pages - h->nr_huge_pages;
3542 
3543 		job.start     = h->nr_huge_pages;
3544 		job.size      = remaining;
3545 		job.min_chunk = remaining / hugepage_allocation_threads;
3546 		padata_do_multithreaded(&job);
3547 
3548 		if (h->nr_huge_pages == h->max_huge_pages)
3549 			break;
3550 
3551 		/*
3552 		 * Retry only if the vmemmap optimization might have been able to free
3553 		 * some memory back to the system.
3554 		 */
3555 		if (!hugetlb_vmemmap_optimizable(h))
3556 			break;
3557 
3558 		/* Continue if progress was made in last iteration */
3559 	} while (remaining != (h->max_huge_pages - h->nr_huge_pages));
3560 
3561 	jiffies_end = jiffies;
3562 
3563 	pr_info("HugeTLB: allocation took %dms with hugepage_allocation_threads=%ld\n",
3564 		jiffies_to_msecs(jiffies_end - jiffies_start),
3565 		hugepage_allocation_threads);
3566 
3567 	return h->nr_huge_pages;
3568 }
3569 
3570 /*
3571  * NOTE: this routine is called in different contexts for gigantic and
3572  * non-gigantic pages.
3573  * - For gigantic pages, this is called early in the boot process and
3574  *   pages are allocated from memblock allocated or something similar.
3575  *   Gigantic pages are actually added to pools later with the routine
3576  *   gather_bootmem_prealloc.
3577  * - For non-gigantic pages, this is called later in the boot process after
3578  *   all of mm is up and functional.  Pages are allocated from buddy and
3579  *   then added to hugetlb pools.
3580  */
hugetlb_hstate_alloc_pages(struct hstate * h)3581 static void __init hugetlb_hstate_alloc_pages(struct hstate *h)
3582 {
3583 	unsigned long allocated;
3584 
3585 	/*
3586 	 * Skip gigantic hugepages allocation if early CMA
3587 	 * reservations are not available.
3588 	 */
3589 	if (hstate_is_gigantic(h) && hugetlb_cma_total_size() &&
3590 	    !hugetlb_early_cma(h)) {
3591 		pr_warn_once("HugeTLB: hugetlb_cma is enabled, skip boot time allocation\n");
3592 		return;
3593 	}
3594 
3595 	if (!h->max_huge_pages)
3596 		return;
3597 
3598 	/* do node specific alloc */
3599 	if (hugetlb_hstate_alloc_pages_specific_nodes(h))
3600 		return;
3601 
3602 	/* below will do all node balanced alloc */
3603 	if (hstate_is_gigantic(h))
3604 		allocated = hugetlb_gigantic_pages_alloc_boot(h);
3605 	else
3606 		allocated = hugetlb_pages_alloc_boot(h);
3607 
3608 	hugetlb_hstate_alloc_pages_errcheck(allocated, h);
3609 }
3610 
hugetlb_init_hstates(void)3611 static void __init hugetlb_init_hstates(void)
3612 {
3613 	struct hstate *h, *h2;
3614 
3615 	for_each_hstate(h) {
3616 		/*
3617 		 * Always reset to first_memory_node here, even if
3618 		 * next_nid_to_alloc was set before - we can't
3619 		 * reference hugetlb_bootmem_nodes after init, and
3620 		 * first_memory_node is right for all further allocations.
3621 		 */
3622 		h->next_nid_to_alloc = first_memory_node;
3623 		h->next_nid_to_free = first_memory_node;
3624 
3625 		/* oversize hugepages were init'ed in early boot */
3626 		if (!hstate_is_gigantic(h))
3627 			hugetlb_hstate_alloc_pages(h);
3628 
3629 		/*
3630 		 * Set demote order for each hstate.  Note that
3631 		 * h->demote_order is initially 0.
3632 		 * - We can not demote gigantic pages if runtime freeing
3633 		 *   is not supported, so skip this.
3634 		 * - If CMA allocation is possible, we can not demote
3635 		 *   HUGETLB_PAGE_ORDER or smaller size pages.
3636 		 */
3637 		if (hstate_is_gigantic_no_runtime(h))
3638 			continue;
3639 		if (hugetlb_cma_total_size() && h->order <= HUGETLB_PAGE_ORDER)
3640 			continue;
3641 		for_each_hstate(h2) {
3642 			if (h2 == h)
3643 				continue;
3644 			if (h2->order < h->order &&
3645 			    h2->order > h->demote_order)
3646 				h->demote_order = h2->order;
3647 		}
3648 	}
3649 }
3650 
report_hugepages(void)3651 static void __init report_hugepages(void)
3652 {
3653 	struct hstate *h;
3654 	unsigned long nrinvalid;
3655 
3656 	for_each_hstate(h) {
3657 		char buf[32];
3658 
3659 		nrinvalid = hstate_boot_nrinvalid[hstate_index(h)];
3660 		h->max_huge_pages -= nrinvalid;
3661 
3662 		string_get_size(huge_page_size(h), 1, STRING_UNITS_2, buf, 32);
3663 		pr_info("HugeTLB: registered %s page size, pre-allocated %ld pages\n",
3664 			buf, h->nr_huge_pages);
3665 		if (nrinvalid)
3666 			pr_info("HugeTLB: %s page size: %lu invalid page%s discarded\n",
3667 					buf, nrinvalid, str_plural(nrinvalid));
3668 		pr_info("HugeTLB: %d KiB vmemmap can be freed for a %s page\n",
3669 			hugetlb_vmemmap_optimizable_size(h) / SZ_1K, buf);
3670 	}
3671 }
3672 
3673 #ifdef CONFIG_HIGHMEM
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)3674 static void try_to_free_low(struct hstate *h, unsigned long count,
3675 						nodemask_t *nodes_allowed)
3676 {
3677 	int i;
3678 	LIST_HEAD(page_list);
3679 
3680 	lockdep_assert_held(&hugetlb_lock);
3681 	if (hstate_is_gigantic(h))
3682 		return;
3683 
3684 	/*
3685 	 * Collect pages to be freed on a list, and free after dropping lock
3686 	 */
3687 	for_each_node_mask(i, *nodes_allowed) {
3688 		struct folio *folio, *next;
3689 		struct list_head *freel = &h->hugepage_freelists[i];
3690 		list_for_each_entry_safe(folio, next, freel, lru) {
3691 			if (count >= h->nr_huge_pages)
3692 				goto out;
3693 			if (folio_test_highmem(folio))
3694 				continue;
3695 			remove_hugetlb_folio(h, folio, false);
3696 			list_add(&folio->lru, &page_list);
3697 		}
3698 	}
3699 
3700 out:
3701 	spin_unlock_irq(&hugetlb_lock);
3702 	update_and_free_pages_bulk(h, &page_list);
3703 	spin_lock_irq(&hugetlb_lock);
3704 }
3705 #else
try_to_free_low(struct hstate * h,unsigned long count,nodemask_t * nodes_allowed)3706 static inline void try_to_free_low(struct hstate *h, unsigned long count,
3707 						nodemask_t *nodes_allowed)
3708 {
3709 }
3710 #endif
3711 
3712 /*
3713  * Increment or decrement surplus_huge_pages.  Keep node-specific counters
3714  * balanced by operating on them in a round-robin fashion.
3715  * Returns 1 if an adjustment was made.
3716  */
adjust_pool_surplus(struct hstate * h,nodemask_t * nodes_allowed,int delta)3717 static int adjust_pool_surplus(struct hstate *h, nodemask_t *nodes_allowed,
3718 				int delta)
3719 {
3720 	int nr_nodes, node;
3721 
3722 	lockdep_assert_held(&hugetlb_lock);
3723 	VM_BUG_ON(delta != -1 && delta != 1);
3724 
3725 	if (delta < 0) {
3726 		for_each_node_mask_to_alloc(&h->next_nid_to_alloc, nr_nodes, node, nodes_allowed) {
3727 			if (h->surplus_huge_pages_node[node])
3728 				goto found;
3729 		}
3730 	} else {
3731 		for_each_node_mask_to_free(h, nr_nodes, node, nodes_allowed) {
3732 			if (h->surplus_huge_pages_node[node] <
3733 					h->nr_huge_pages_node[node])
3734 				goto found;
3735 		}
3736 	}
3737 	return 0;
3738 
3739 found:
3740 	h->surplus_huge_pages += delta;
3741 	h->surplus_huge_pages_node[node] += delta;
3742 	return 1;
3743 }
3744 
3745 #define persistent_huge_pages(h) (h->nr_huge_pages - h->surplus_huge_pages)
set_max_huge_pages(struct hstate * h,unsigned long count,int nid,nodemask_t * nodes_allowed)3746 static int set_max_huge_pages(struct hstate *h, unsigned long count, int nid,
3747 			      nodemask_t *nodes_allowed)
3748 {
3749 	unsigned long persistent_free_count;
3750 	unsigned long min_count;
3751 	unsigned long allocated;
3752 	struct folio *folio;
3753 	LIST_HEAD(page_list);
3754 	NODEMASK_ALLOC(nodemask_t, node_alloc_noretry, GFP_KERNEL);
3755 
3756 	/*
3757 	 * Bit mask controlling how hard we retry per-node allocations.
3758 	 * If we can not allocate the bit mask, do not attempt to allocate
3759 	 * the requested huge pages.
3760 	 */
3761 	if (node_alloc_noretry)
3762 		nodes_clear(*node_alloc_noretry);
3763 	else
3764 		return -ENOMEM;
3765 
3766 	/*
3767 	 * resize_lock mutex prevents concurrent adjustments to number of
3768 	 * pages in hstate via the proc/sysfs interfaces.
3769 	 */
3770 	mutex_lock(&h->resize_lock);
3771 	flush_free_hpage_work(h);
3772 	spin_lock_irq(&hugetlb_lock);
3773 
3774 	/*
3775 	 * Check for a node specific request.
3776 	 * Changing node specific huge page count may require a corresponding
3777 	 * change to the global count.  In any case, the passed node mask
3778 	 * (nodes_allowed) will restrict alloc/free to the specified node.
3779 	 */
3780 	if (nid != NUMA_NO_NODE) {
3781 		unsigned long old_count = count;
3782 
3783 		count += persistent_huge_pages(h) -
3784 			 (h->nr_huge_pages_node[nid] -
3785 			  h->surplus_huge_pages_node[nid]);
3786 		/*
3787 		 * User may have specified a large count value which caused the
3788 		 * above calculation to overflow.  In this case, they wanted
3789 		 * to allocate as many huge pages as possible.  Set count to
3790 		 * largest possible value to align with their intention.
3791 		 */
3792 		if (count < old_count)
3793 			count = ULONG_MAX;
3794 	}
3795 
3796 	/*
3797 	 * Gigantic pages runtime allocation depend on the capability for large
3798 	 * page range allocation.
3799 	 * If the system does not provide this feature, return an error when
3800 	 * the user tries to allocate gigantic pages but let the user free the
3801 	 * boottime allocated gigantic pages.
3802 	 */
3803 	if (hstate_is_gigantic(h) && !IS_ENABLED(CONFIG_CONTIG_ALLOC)) {
3804 		if (count > persistent_huge_pages(h)) {
3805 			spin_unlock_irq(&hugetlb_lock);
3806 			mutex_unlock(&h->resize_lock);
3807 			NODEMASK_FREE(node_alloc_noretry);
3808 			return -EINVAL;
3809 		}
3810 		/* Fall through to decrease pool */
3811 	}
3812 
3813 	/*
3814 	 * Increase the pool size
3815 	 * First take pages out of surplus state.  Then make up the
3816 	 * remaining difference by allocating fresh huge pages.
3817 	 *
3818 	 * We might race with alloc_surplus_hugetlb_folio() here and be unable
3819 	 * to convert a surplus huge page to a normal huge page. That is
3820 	 * not critical, though, it just means the overall size of the
3821 	 * pool might be one hugepage larger than it needs to be, but
3822 	 * within all the constraints specified by the sysctls.
3823 	 */
3824 	while (h->surplus_huge_pages && count > persistent_huge_pages(h)) {
3825 		if (!adjust_pool_surplus(h, nodes_allowed, -1))
3826 			break;
3827 	}
3828 
3829 	allocated = 0;
3830 	while (count > (persistent_huge_pages(h) + allocated)) {
3831 		/*
3832 		 * If this allocation races such that we no longer need the
3833 		 * page, free_huge_folio will handle it by freeing the page
3834 		 * and reducing the surplus.
3835 		 */
3836 		spin_unlock_irq(&hugetlb_lock);
3837 
3838 		/* yield cpu to avoid soft lockup */
3839 		cond_resched();
3840 
3841 		folio = alloc_pool_huge_folio(h, nodes_allowed,
3842 						node_alloc_noretry,
3843 						&h->next_nid_to_alloc);
3844 		if (!folio) {
3845 			prep_and_add_allocated_folios(h, &page_list);
3846 			spin_lock_irq(&hugetlb_lock);
3847 			goto out;
3848 		}
3849 
3850 		list_add(&folio->lru, &page_list);
3851 		allocated++;
3852 
3853 		/* Bail for signals. Probably ctrl-c from user */
3854 		if (signal_pending(current)) {
3855 			prep_and_add_allocated_folios(h, &page_list);
3856 			spin_lock_irq(&hugetlb_lock);
3857 			goto out;
3858 		}
3859 
3860 		spin_lock_irq(&hugetlb_lock);
3861 	}
3862 
3863 	/* Add allocated pages to the pool */
3864 	if (!list_empty(&page_list)) {
3865 		spin_unlock_irq(&hugetlb_lock);
3866 		prep_and_add_allocated_folios(h, &page_list);
3867 		spin_lock_irq(&hugetlb_lock);
3868 	}
3869 
3870 	/*
3871 	 * Decrease the pool size
3872 	 * First return free pages to the buddy allocator (being careful
3873 	 * to keep enough around to satisfy reservations).  Then place
3874 	 * pages into surplus state as needed so the pool will shrink
3875 	 * to the desired size as pages become free.
3876 	 *
3877 	 * By placing pages into the surplus state independent of the
3878 	 * overcommit value, we are allowing the surplus pool size to
3879 	 * exceed overcommit. There are few sane options here. Since
3880 	 * alloc_surplus_hugetlb_folio() is checking the global counter,
3881 	 * though, we'll note that we're not allowed to exceed surplus
3882 	 * and won't grow the pool anywhere else. Not until one of the
3883 	 * sysctls are changed, or the surplus pages go out of use.
3884 	 *
3885 	 * min_count is the expected number of persistent pages, we
3886 	 * shouldn't calculate min_count by using
3887 	 * resv_huge_pages + persistent_huge_pages() - free_huge_pages,
3888 	 * because there may exist free surplus huge pages, and this will
3889 	 * lead to subtracting twice. Free surplus huge pages come from HVO
3890 	 * failing to restore vmemmap, see comments in the callers of
3891 	 * hugetlb_vmemmap_restore_folio(). Thus, we should calculate
3892 	 * persistent free count first.
3893 	 */
3894 	persistent_free_count = h->free_huge_pages;
3895 	if (h->free_huge_pages > persistent_huge_pages(h)) {
3896 		if (h->free_huge_pages > h->surplus_huge_pages)
3897 			persistent_free_count -= h->surplus_huge_pages;
3898 		else
3899 			persistent_free_count = 0;
3900 	}
3901 	min_count = h->resv_huge_pages + persistent_huge_pages(h) - persistent_free_count;
3902 	min_count = max(count, min_count);
3903 	try_to_free_low(h, min_count, nodes_allowed);
3904 
3905 	/*
3906 	 * Collect pages to be removed on list without dropping lock
3907 	 */
3908 	while (min_count < persistent_huge_pages(h)) {
3909 		folio = remove_pool_hugetlb_folio(h, nodes_allowed, 0);
3910 		if (!folio)
3911 			break;
3912 
3913 		list_add(&folio->lru, &page_list);
3914 	}
3915 	/* free the pages after dropping lock */
3916 	spin_unlock_irq(&hugetlb_lock);
3917 	update_and_free_pages_bulk(h, &page_list);
3918 	flush_free_hpage_work(h);
3919 	spin_lock_irq(&hugetlb_lock);
3920 
3921 	while (count < persistent_huge_pages(h)) {
3922 		if (!adjust_pool_surplus(h, nodes_allowed, 1))
3923 			break;
3924 	}
3925 out:
3926 	h->max_huge_pages = persistent_huge_pages(h);
3927 	spin_unlock_irq(&hugetlb_lock);
3928 	mutex_unlock(&h->resize_lock);
3929 
3930 	NODEMASK_FREE(node_alloc_noretry);
3931 
3932 	return 0;
3933 }
3934 
demote_free_hugetlb_folios(struct hstate * src,struct hstate * dst,struct list_head * src_list)3935 static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,
3936 				       struct list_head *src_list)
3937 {
3938 	long rc;
3939 	struct folio *folio, *next;
3940 	LIST_HEAD(dst_list);
3941 	LIST_HEAD(ret_list);
3942 
3943 	rc = hugetlb_vmemmap_restore_folios(src, src_list, &ret_list);
3944 	list_splice_init(&ret_list, src_list);
3945 
3946 	/*
3947 	 * Taking target hstate mutex synchronizes with set_max_huge_pages.
3948 	 * Without the mutex, pages added to target hstate could be marked
3949 	 * as surplus.
3950 	 *
3951 	 * Note that we already hold src->resize_lock.  To prevent deadlock,
3952 	 * use the convention of always taking larger size hstate mutex first.
3953 	 */
3954 	mutex_lock(&dst->resize_lock);
3955 
3956 	list_for_each_entry_safe(folio, next, src_list, lru) {
3957 		int i;
3958 		bool cma;
3959 
3960 		if (folio_test_hugetlb_vmemmap_optimized(folio))
3961 			continue;
3962 
3963 		cma = folio_test_hugetlb_cma(folio);
3964 
3965 		list_del(&folio->lru);
3966 
3967 		split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
3968 		pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));
3969 
3970 		for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
3971 			struct page *page = folio_page(folio, i);
3972 			/* Careful: see __split_huge_page_tail() */
3973 			struct folio *new_folio = (struct folio *)page;
3974 
3975 			clear_compound_head(page);
3976 			prep_compound_page(page, dst->order);
3977 
3978 			new_folio->mapping = NULL;
3979 			init_new_hugetlb_folio(new_folio);
3980 			/* Copy the CMA flag so that it is freed correctly */
3981 			if (cma)
3982 				folio_set_hugetlb_cma(new_folio);
3983 			list_add(&new_folio->lru, &dst_list);
3984 		}
3985 	}
3986 
3987 	prep_and_add_allocated_folios(dst, &dst_list);
3988 
3989 	mutex_unlock(&dst->resize_lock);
3990 
3991 	return rc;
3992 }
3993 
demote_pool_huge_page(struct hstate * src,nodemask_t * nodes_allowed,unsigned long nr_to_demote)3994 long demote_pool_huge_page(struct hstate *src, nodemask_t *nodes_allowed,
3995 			   unsigned long nr_to_demote)
3996 	__must_hold(&hugetlb_lock)
3997 {
3998 	int nr_nodes, node;
3999 	struct hstate *dst;
4000 	long rc = 0;
4001 	long nr_demoted = 0;
4002 
4003 	lockdep_assert_held(&hugetlb_lock);
4004 
4005 	/* We should never get here if no demote order */
4006 	if (!src->demote_order) {
4007 		pr_warn("HugeTLB: NULL demote order passed to demote_pool_huge_page.\n");
4008 		return -EINVAL;		/* internal error */
4009 	}
4010 	dst = size_to_hstate(PAGE_SIZE << src->demote_order);
4011 
4012 	for_each_node_mask_to_free(src, nr_nodes, node, nodes_allowed) {
4013 		LIST_HEAD(list);
4014 		struct folio *folio, *next;
4015 
4016 		list_for_each_entry_safe(folio, next, &src->hugepage_freelists[node], lru) {
4017 			if (folio_test_hwpoison(folio))
4018 				continue;
4019 
4020 			remove_hugetlb_folio(src, folio, false);
4021 			list_add(&folio->lru, &list);
4022 
4023 			if (++nr_demoted == nr_to_demote)
4024 				break;
4025 		}
4026 
4027 		spin_unlock_irq(&hugetlb_lock);
4028 
4029 		rc = demote_free_hugetlb_folios(src, dst, &list);
4030 
4031 		spin_lock_irq(&hugetlb_lock);
4032 
4033 		list_for_each_entry_safe(folio, next, &list, lru) {
4034 			list_del(&folio->lru);
4035 			add_hugetlb_folio(src, folio, false);
4036 
4037 			nr_demoted--;
4038 		}
4039 
4040 		if (rc < 0 || nr_demoted == nr_to_demote)
4041 			break;
4042 	}
4043 
4044 	/*
4045 	 * Not absolutely necessary, but for consistency update max_huge_pages
4046 	 * based on pool changes for the demoted page.
4047 	 */
4048 	src->max_huge_pages -= nr_demoted;
4049 	dst->max_huge_pages += nr_demoted << (huge_page_order(src) - huge_page_order(dst));
4050 
4051 	if (rc < 0)
4052 		return rc;
4053 
4054 	if (nr_demoted)
4055 		return nr_demoted;
4056 	/*
4057 	 * Only way to get here is if all pages on free lists are poisoned.
4058 	 * Return -EBUSY so that caller will not retry.
4059 	 */
4060 	return -EBUSY;
4061 }
4062 
__nr_hugepages_store_common(bool obey_mempolicy,struct hstate * h,int nid,unsigned long count,size_t len)4063 ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
4064 					   struct hstate *h, int nid,
4065 					   unsigned long count, size_t len)
4066 {
4067 	int err;
4068 	nodemask_t nodes_allowed, *n_mask;
4069 
4070 	if (hstate_is_gigantic_no_runtime(h))
4071 		return -EINVAL;
4072 
4073 	if (nid == NUMA_NO_NODE) {
4074 		/*
4075 		 * global hstate attribute
4076 		 */
4077 		if (!(obey_mempolicy &&
4078 				init_nodemask_of_mempolicy(&nodes_allowed)))
4079 			n_mask = &node_states[N_MEMORY];
4080 		else
4081 			n_mask = &nodes_allowed;
4082 	} else {
4083 		/*
4084 		 * Node specific request.  count adjustment happens in
4085 		 * set_max_huge_pages() after acquiring hugetlb_lock.
4086 		 */
4087 		init_nodemask_of_node(&nodes_allowed, nid);
4088 		n_mask = &nodes_allowed;
4089 	}
4090 
4091 	err = set_max_huge_pages(h, count, nid, n_mask);
4092 
4093 	return err ? err : len;
4094 }
4095 
hugetlb_init(void)4096 static int __init hugetlb_init(void)
4097 {
4098 	int i;
4099 
4100 	BUILD_BUG_ON(sizeof_field(struct page, private) * BITS_PER_BYTE <
4101 			__NR_HPAGEFLAGS);
4102 	BUILD_BUG_ON_INVALID(HUGETLB_PAGE_ORDER > MAX_FOLIO_ORDER);
4103 
4104 	if (!hugepages_supported()) {
4105 		if (hugetlb_max_hstate || default_hstate_max_huge_pages)
4106 			pr_warn("HugeTLB: huge pages not supported, ignoring associated command-line parameters\n");
4107 		return 0;
4108 	}
4109 
4110 	/*
4111 	 * Make sure HPAGE_SIZE (HUGETLB_PAGE_ORDER) hstate exists.  Some
4112 	 * architectures depend on setup being done here.
4113 	 */
4114 	hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
4115 	if (!parsed_default_hugepagesz) {
4116 		/*
4117 		 * If we did not parse a default huge page size, set
4118 		 * default_hstate_idx to HPAGE_SIZE hstate. And, if the
4119 		 * number of huge pages for this default size was implicitly
4120 		 * specified, set that here as well.
4121 		 * Note that the implicit setting will overwrite an explicit
4122 		 * setting.  A warning will be printed in this case.
4123 		 */
4124 		default_hstate_idx = hstate_index(size_to_hstate(HPAGE_SIZE));
4125 		if (default_hstate_max_huge_pages) {
4126 			if (default_hstate.max_huge_pages) {
4127 				char buf[32];
4128 
4129 				string_get_size(huge_page_size(&default_hstate),
4130 					1, STRING_UNITS_2, buf, 32);
4131 				pr_warn("HugeTLB: Ignoring hugepages=%lu associated with %s page size\n",
4132 					default_hstate.max_huge_pages, buf);
4133 				pr_warn("HugeTLB: Using hugepages=%lu for number of default huge pages\n",
4134 					default_hstate_max_huge_pages);
4135 			}
4136 			default_hstate.max_huge_pages =
4137 				default_hstate_max_huge_pages;
4138 
4139 			for_each_online_node(i)
4140 				default_hstate.max_huge_pages_node[i] =
4141 					default_hugepages_in_node[i];
4142 		}
4143 	}
4144 
4145 	hugetlb_init_hstates();
4146 	gather_bootmem_prealloc();
4147 	report_hugepages();
4148 
4149 	hugetlb_sysfs_init();
4150 	hugetlb_cgroup_file_init();
4151 	hugetlb_sysctl_init();
4152 
4153 #ifdef CONFIG_SMP
4154 	num_fault_mutexes = roundup_pow_of_two(8 * num_possible_cpus());
4155 #else
4156 	num_fault_mutexes = 1;
4157 #endif
4158 	hugetlb_fault_mutex_table =
4159 		kmalloc_objs(struct mutex, num_fault_mutexes);
4160 	BUG_ON(!hugetlb_fault_mutex_table);
4161 
4162 	for (i = 0; i < num_fault_mutexes; i++)
4163 		mutex_init(&hugetlb_fault_mutex_table[i]);
4164 	return 0;
4165 }
4166 subsys_initcall(hugetlb_init);
4167 
4168 /* Overwritten by architectures with more huge page sizes */
__init(weak)4169 bool __init __attribute((weak)) arch_hugetlb_valid_size(unsigned long size)
4170 {
4171 	return size == HPAGE_SIZE;
4172 }
4173 
hugetlb_add_hstate(unsigned int order)4174 void __init hugetlb_add_hstate(unsigned int order)
4175 {
4176 	struct hstate *h;
4177 	unsigned long i;
4178 
4179 	if (size_to_hstate(PAGE_SIZE << order)) {
4180 		return;
4181 	}
4182 	BUG_ON(hugetlb_max_hstate >= HUGE_MAX_HSTATE);
4183 	BUG_ON(order < order_base_2(__NR_USED_SUBPAGE));
4184 	WARN_ON(order > MAX_FOLIO_ORDER);
4185 	h = &hstates[hugetlb_max_hstate++];
4186 	__mutex_init(&h->resize_lock, "resize mutex", &h->resize_key);
4187 	h->order = order;
4188 	h->mask = ~(huge_page_size(h) - 1);
4189 	for (i = 0; i < MAX_NUMNODES; ++i)
4190 		INIT_LIST_HEAD(&h->hugepage_freelists[i]);
4191 	INIT_LIST_HEAD(&h->hugepage_activelist);
4192 	snprintf(h->name, HSTATE_NAME_LEN, "hugepages-%lukB",
4193 					huge_page_size(h)/SZ_1K);
4194 
4195 	parsed_hstate = h;
4196 }
4197 
hugetlb_node_alloc_supported(void)4198 bool __init __weak hugetlb_node_alloc_supported(void)
4199 {
4200 	return true;
4201 }
4202 
hugepages_clear_pages_in_node(void)4203 static void __init hugepages_clear_pages_in_node(void)
4204 {
4205 	if (!hugetlb_max_hstate) {
4206 		default_hstate_max_huge_pages = 0;
4207 		memset(default_hugepages_in_node, 0,
4208 			sizeof(default_hugepages_in_node));
4209 	} else {
4210 		parsed_hstate->max_huge_pages = 0;
4211 		memset(parsed_hstate->max_huge_pages_node, 0,
4212 			sizeof(parsed_hstate->max_huge_pages_node));
4213 	}
4214 }
4215 
hugetlb_add_param(char * s,int (* setup)(char *))4216 static __init int hugetlb_add_param(char *s, int (*setup)(char *))
4217 {
4218 	size_t len;
4219 	char *p;
4220 
4221 	if (hugetlb_param_index >= HUGE_MAX_CMDLINE_ARGS)
4222 		return -EINVAL;
4223 
4224 	len = strlen(s) + 1;
4225 	if (len + hstate_cmdline_index > sizeof(hstate_cmdline_buf))
4226 		return -EINVAL;
4227 
4228 	p = &hstate_cmdline_buf[hstate_cmdline_index];
4229 	memcpy(p, s, len);
4230 	hstate_cmdline_index += len;
4231 
4232 	hugetlb_params[hugetlb_param_index].val = p;
4233 	hugetlb_params[hugetlb_param_index].setup = setup;
4234 
4235 	hugetlb_param_index++;
4236 
4237 	return 0;
4238 }
4239 
hugetlb_parse_params(void)4240 static __init void hugetlb_parse_params(void)
4241 {
4242 	int i;
4243 	struct hugetlb_cmdline *hcp;
4244 
4245 	for (i = 0; i < hugetlb_param_index; i++) {
4246 		hcp = &hugetlb_params[i];
4247 
4248 		hcp->setup(hcp->val);
4249 	}
4250 
4251 	hugetlb_cma_validate_params();
4252 }
4253 
4254 /*
4255  * hugepages command line processing
4256  * hugepages normally follows a valid hugepagsz or default_hugepagsz
4257  * specification.  If not, ignore the hugepages value.  hugepages can also
4258  * be the first huge page command line  option in which case it implicitly
4259  * specifies the number of huge pages for the default size.
4260  */
hugepages_setup(char * s)4261 static int __init hugepages_setup(char *s)
4262 {
4263 	unsigned long *mhp;
4264 	static unsigned long *last_mhp;
4265 	int node = NUMA_NO_NODE;
4266 	int count;
4267 	unsigned long tmp;
4268 	char *p = s;
4269 
4270 	if (!hugepages_supported()) {
4271 		pr_warn("HugeTLB: hugepages unsupported, ignoring hugepages=%s cmdline\n", s);
4272 		return 0;
4273 	}
4274 
4275 	if (!parsed_valid_hugepagesz) {
4276 		pr_warn("HugeTLB: hugepages=%s does not follow a valid hugepagesz, ignoring\n", s);
4277 		parsed_valid_hugepagesz = true;
4278 		return -EINVAL;
4279 	}
4280 
4281 	/*
4282 	 * !hugetlb_max_hstate means we haven't parsed a hugepagesz= parameter
4283 	 * yet, so this hugepages= parameter goes to the "default hstate".
4284 	 * Otherwise, it goes with the previously parsed hugepagesz or
4285 	 * default_hugepagesz.
4286 	 */
4287 	else if (!hugetlb_max_hstate)
4288 		mhp = &default_hstate_max_huge_pages;
4289 	else
4290 		mhp = &parsed_hstate->max_huge_pages;
4291 
4292 	if (mhp == last_mhp) {
4293 		pr_warn("HugeTLB: hugepages= specified twice without interleaving hugepagesz=, ignoring hugepages=%s\n", s);
4294 		return 1;
4295 	}
4296 
4297 	while (*p) {
4298 		count = 0;
4299 		if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4300 			goto invalid;
4301 		/* Parameter is node format */
4302 		if (p[count] == ':') {
4303 			if (!hugetlb_node_alloc_supported()) {
4304 				pr_warn("HugeTLB: architecture can't support node specific alloc, ignoring!\n");
4305 				return 1;
4306 			}
4307 			if (tmp >= MAX_NUMNODES || !node_online(tmp))
4308 				goto invalid;
4309 			node = array_index_nospec(tmp, MAX_NUMNODES);
4310 			p += count + 1;
4311 			/* Parse hugepages */
4312 			if (sscanf(p, "%lu%n", &tmp, &count) != 1)
4313 				goto invalid;
4314 			if (!hugetlb_max_hstate)
4315 				default_hugepages_in_node[node] = tmp;
4316 			else
4317 				parsed_hstate->max_huge_pages_node[node] = tmp;
4318 			*mhp += tmp;
4319 			/* Go to parse next node*/
4320 			if (p[count] == ',')
4321 				p += count + 1;
4322 			else
4323 				break;
4324 		} else {
4325 			if (p != s)
4326 				goto invalid;
4327 			*mhp = tmp;
4328 			break;
4329 		}
4330 	}
4331 
4332 	last_mhp = mhp;
4333 
4334 	return 0;
4335 
4336 invalid:
4337 	pr_warn("HugeTLB: Invalid hugepages parameter %s\n", p);
4338 	hugepages_clear_pages_in_node();
4339 	return -EINVAL;
4340 }
4341 hugetlb_early_param("hugepages", hugepages_setup);
4342 
4343 /*
4344  * hugepagesz command line processing
4345  * A specific huge page size can only be specified once with hugepagesz.
4346  * hugepagesz is followed by hugepages on the command line.  The global
4347  * variable 'parsed_valid_hugepagesz' is used to determine if prior
4348  * hugepagesz argument was valid.
4349  */
hugepagesz_setup(char * s)4350 static int __init hugepagesz_setup(char *s)
4351 {
4352 	unsigned long size;
4353 	struct hstate *h;
4354 
4355 	if (!hugepages_supported()) {
4356 		pr_warn("HugeTLB: hugepages unsupported, ignoring hugepagesz=%s cmdline\n", s);
4357 		return 0;
4358 	}
4359 
4360 	parsed_valid_hugepagesz = false;
4361 	size = (unsigned long)memparse(s, NULL);
4362 
4363 	if (!arch_hugetlb_valid_size(size)) {
4364 		pr_err("HugeTLB: unsupported hugepagesz=%s\n", s);
4365 		return -EINVAL;
4366 	}
4367 
4368 	h = size_to_hstate(size);
4369 	if (h) {
4370 		/*
4371 		 * hstate for this size already exists.  This is normally
4372 		 * an error, but is allowed if the existing hstate is the
4373 		 * default hstate.  More specifically, it is only allowed if
4374 		 * the number of huge pages for the default hstate was not
4375 		 * previously specified.
4376 		 */
4377 		if (!parsed_default_hugepagesz ||  h != &default_hstate ||
4378 		    default_hstate.max_huge_pages) {
4379 			pr_warn("HugeTLB: hugepagesz=%s specified twice, ignoring\n", s);
4380 			return -EINVAL;
4381 		}
4382 
4383 		/*
4384 		 * No need to call hugetlb_add_hstate() as hstate already
4385 		 * exists.  But, do set parsed_hstate so that a following
4386 		 * hugepages= parameter will be applied to this hstate.
4387 		 */
4388 		parsed_hstate = h;
4389 		parsed_valid_hugepagesz = true;
4390 		return 0;
4391 	}
4392 
4393 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4394 	parsed_valid_hugepagesz = true;
4395 	return 0;
4396 }
4397 hugetlb_early_param("hugepagesz", hugepagesz_setup);
4398 
4399 /*
4400  * default_hugepagesz command line input
4401  * Only one instance of default_hugepagesz allowed on command line.
4402  */
default_hugepagesz_setup(char * s)4403 static int __init default_hugepagesz_setup(char *s)
4404 {
4405 	unsigned long size;
4406 	int i;
4407 
4408 	if (!hugepages_supported()) {
4409 		pr_warn("HugeTLB: hugepages unsupported, ignoring default_hugepagesz=%s cmdline\n",
4410 			s);
4411 		return 0;
4412 	}
4413 
4414 	parsed_valid_hugepagesz = false;
4415 	if (parsed_default_hugepagesz) {
4416 		pr_err("HugeTLB: default_hugepagesz previously specified, ignoring %s\n", s);
4417 		return -EINVAL;
4418 	}
4419 
4420 	size = (unsigned long)memparse(s, NULL);
4421 
4422 	if (!arch_hugetlb_valid_size(size)) {
4423 		pr_err("HugeTLB: unsupported default_hugepagesz=%s\n", s);
4424 		return -EINVAL;
4425 	}
4426 
4427 	hugetlb_add_hstate(ilog2(size) - PAGE_SHIFT);
4428 	parsed_valid_hugepagesz = true;
4429 	parsed_default_hugepagesz = true;
4430 	default_hstate_idx = hstate_index(size_to_hstate(size));
4431 
4432 	/*
4433 	 * The number of default huge pages (for this size) could have been
4434 	 * specified as the first hugetlb parameter: hugepages=X.  If so,
4435 	 * then default_hstate_max_huge_pages is set.  If the default huge
4436 	 * page size is gigantic (> MAX_PAGE_ORDER), then the pages must be
4437 	 * allocated here from bootmem allocator.
4438 	 */
4439 	if (default_hstate_max_huge_pages) {
4440 		default_hstate.max_huge_pages = default_hstate_max_huge_pages;
4441 		/*
4442 		 * Since this is an early parameter, we can't check
4443 		 * NUMA node state yet, so loop through MAX_NUMNODES.
4444 		 */
4445 		for (i = 0; i < MAX_NUMNODES; i++) {
4446 			if (default_hugepages_in_node[i] != 0)
4447 				default_hstate.max_huge_pages_node[i] =
4448 					default_hugepages_in_node[i];
4449 		}
4450 		default_hstate_max_huge_pages = 0;
4451 	}
4452 
4453 	return 0;
4454 }
4455 hugetlb_early_param("default_hugepagesz", default_hugepagesz_setup);
4456 
hugetlb_bootmem_set_nodes(void)4457 void __init hugetlb_bootmem_set_nodes(void)
4458 {
4459 	int i, nid;
4460 	unsigned long start_pfn, end_pfn;
4461 
4462 	if (!nodes_empty(hugetlb_bootmem_nodes))
4463 		return;
4464 
4465 	for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) {
4466 		if (end_pfn > start_pfn)
4467 			node_set(nid, hugetlb_bootmem_nodes);
4468 	}
4469 }
4470 
hugetlb_bootmem_alloc(void)4471 void __init hugetlb_bootmem_alloc(void)
4472 {
4473 	struct hstate *h;
4474 	int i;
4475 
4476 	hugetlb_bootmem_set_nodes();
4477 
4478 	for (i = 0; i < MAX_NUMNODES; i++)
4479 		INIT_LIST_HEAD(&huge_boot_pages[i]);
4480 
4481 	hugetlb_parse_params();
4482 
4483 	for_each_hstate(h) {
4484 		h->next_nid_to_alloc = first_online_node;
4485 
4486 		if (hstate_is_gigantic(h))
4487 			hugetlb_hstate_alloc_pages(h);
4488 	}
4489 }
4490 
4491 /*
4492  * hugepage_alloc_threads command line parsing.
4493  *
4494  * When set, use this specific number of threads for the boot
4495  * allocation of hugepages.
4496  */
hugepage_alloc_threads_setup(char * s)4497 static int __init hugepage_alloc_threads_setup(char *s)
4498 {
4499 	unsigned long allocation_threads;
4500 
4501 	if (kstrtoul(s, 0, &allocation_threads) != 0)
4502 		return 1;
4503 
4504 	if (allocation_threads == 0)
4505 		return 1;
4506 
4507 	hugepage_allocation_threads = allocation_threads;
4508 
4509 	return 1;
4510 }
4511 __setup("hugepage_alloc_threads=", hugepage_alloc_threads_setup);
4512 
allowed_mems_nr(struct hstate * h)4513 static unsigned int allowed_mems_nr(struct hstate *h)
4514 {
4515 	int node;
4516 	unsigned int nr = 0;
4517 	nodemask_t *mbind_nodemask;
4518 	unsigned int *array = h->free_huge_pages_node;
4519 	gfp_t gfp_mask = htlb_alloc_mask(h);
4520 
4521 	mbind_nodemask = policy_mbind_nodemask(gfp_mask);
4522 	for_each_node_mask(node, cpuset_current_mems_allowed) {
4523 		if (!mbind_nodemask || node_isset(node, *mbind_nodemask))
4524 			nr += array[node];
4525 	}
4526 
4527 	return nr;
4528 }
4529 
hugetlb_report_meminfo(struct seq_file * m)4530 void hugetlb_report_meminfo(struct seq_file *m)
4531 {
4532 	struct hstate *h;
4533 	unsigned long total = 0;
4534 
4535 	if (!hugepages_supported())
4536 		return;
4537 
4538 	for_each_hstate(h) {
4539 		unsigned long count = h->nr_huge_pages;
4540 
4541 		total += huge_page_size(h) * count;
4542 
4543 		if (h == &default_hstate)
4544 			seq_printf(m,
4545 				   "HugePages_Total:   %5lu\n"
4546 				   "HugePages_Free:    %5lu\n"
4547 				   "HugePages_Rsvd:    %5lu\n"
4548 				   "HugePages_Surp:    %5lu\n"
4549 				   "Hugepagesize:   %8lu kB\n",
4550 				   count,
4551 				   h->free_huge_pages,
4552 				   h->resv_huge_pages,
4553 				   h->surplus_huge_pages,
4554 				   huge_page_size(h) / SZ_1K);
4555 	}
4556 
4557 	seq_printf(m, "Hugetlb:        %8lu kB\n", total / SZ_1K);
4558 }
4559 
hugetlb_report_node_meminfo(char * buf,int len,int nid)4560 int hugetlb_report_node_meminfo(char *buf, int len, int nid)
4561 {
4562 	struct hstate *h = &default_hstate;
4563 
4564 	if (!hugepages_supported())
4565 		return 0;
4566 
4567 	return sysfs_emit_at(buf, len,
4568 			     "Node %d HugePages_Total: %5u\n"
4569 			     "Node %d HugePages_Free:  %5u\n"
4570 			     "Node %d HugePages_Surp:  %5u\n",
4571 			     nid, h->nr_huge_pages_node[nid],
4572 			     nid, h->free_huge_pages_node[nid],
4573 			     nid, h->surplus_huge_pages_node[nid]);
4574 }
4575 
hugetlb_show_meminfo_node(int nid)4576 void hugetlb_show_meminfo_node(int nid)
4577 {
4578 	struct hstate *h;
4579 
4580 	if (!hugepages_supported())
4581 		return;
4582 
4583 	for_each_hstate(h)
4584 		printk("Node %d hugepages_total=%u hugepages_free=%u hugepages_surp=%u hugepages_size=%lukB\n",
4585 			nid,
4586 			h->nr_huge_pages_node[nid],
4587 			h->free_huge_pages_node[nid],
4588 			h->surplus_huge_pages_node[nid],
4589 			huge_page_size(h) / SZ_1K);
4590 }
4591 
hugetlb_report_usage(struct seq_file * m,struct mm_struct * mm)4592 void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm)
4593 {
4594 	seq_printf(m, "HugetlbPages:\t%8lu kB\n",
4595 		   K(atomic_long_read(&mm->hugetlb_usage)));
4596 }
4597 
4598 /* Return the number pages of memory we physically have, in PAGE_SIZE units. */
hugetlb_total_pages(void)4599 unsigned long hugetlb_total_pages(void)
4600 {
4601 	struct hstate *h;
4602 	unsigned long nr_total_pages = 0;
4603 
4604 	for_each_hstate(h)
4605 		nr_total_pages += h->nr_huge_pages * pages_per_huge_page(h);
4606 	return nr_total_pages;
4607 }
4608 
hugetlb_acct_memory(struct hstate * h,long delta)4609 static int hugetlb_acct_memory(struct hstate *h, long delta)
4610 {
4611 	int ret = -ENOMEM;
4612 
4613 	if (!delta)
4614 		return 0;
4615 
4616 	spin_lock_irq(&hugetlb_lock);
4617 	/*
4618 	 * When cpuset is configured, it breaks the strict hugetlb page
4619 	 * reservation as the accounting is done on a global variable. Such
4620 	 * reservation is completely rubbish in the presence of cpuset because
4621 	 * the reservation is not checked against page availability for the
4622 	 * current cpuset. Application can still potentially OOM'ed by kernel
4623 	 * with lack of free htlb page in cpuset that the task is in.
4624 	 * Attempt to enforce strict accounting with cpuset is almost
4625 	 * impossible (or too ugly) because cpuset is too fluid that
4626 	 * task or memory node can be dynamically moved between cpusets.
4627 	 *
4628 	 * The change of semantics for shared hugetlb mapping with cpuset is
4629 	 * undesirable. However, in order to preserve some of the semantics,
4630 	 * we fall back to check against current free page availability as
4631 	 * a best attempt and hopefully to minimize the impact of changing
4632 	 * semantics that cpuset has.
4633 	 *
4634 	 * Apart from cpuset, we also have memory policy mechanism that
4635 	 * also determines from which node the kernel will allocate memory
4636 	 * in a NUMA system. So similar to cpuset, we also should consider
4637 	 * the memory policy of the current task. Similar to the description
4638 	 * above.
4639 	 */
4640 	if (delta > 0) {
4641 		if (gather_surplus_pages(h, delta) < 0)
4642 			goto out;
4643 
4644 		if (delta > allowed_mems_nr(h)) {
4645 			return_unused_surplus_pages(h, delta);
4646 			goto out;
4647 		}
4648 	}
4649 
4650 	ret = 0;
4651 	if (delta < 0)
4652 		return_unused_surplus_pages(h, (unsigned long) -delta);
4653 
4654 out:
4655 	spin_unlock_irq(&hugetlb_lock);
4656 	return ret;
4657 }
4658 
hugetlb_vm_op_open(struct vm_area_struct * vma)4659 static void hugetlb_vm_op_open(struct vm_area_struct *vma)
4660 {
4661 	struct resv_map *resv = vma_resv_map(vma);
4662 
4663 	/*
4664 	 * HPAGE_RESV_OWNER indicates a private mapping.
4665 	 * This new VMA should share its siblings reservation map if present.
4666 	 * The VMA will only ever have a valid reservation map pointer where
4667 	 * it is being copied for another still existing VMA.  As that VMA
4668 	 * has a reference to the reservation map it cannot disappear until
4669 	 * after this open call completes.  It is therefore safe to take a
4670 	 * new reference here without additional locking.
4671 	 */
4672 	if (resv && is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
4673 		resv_map_dup_hugetlb_cgroup_uncharge_info(resv);
4674 		kref_get(&resv->refs);
4675 	}
4676 
4677 	/*
4678 	 * vma_lock structure for sharable mappings is vma specific.
4679 	 * Clear old pointer (if copied via vm_area_dup) and allocate
4680 	 * new structure.  Before clearing, make sure vma_lock is not
4681 	 * for this vma.
4682 	 */
4683 	if (vma->vm_flags & VM_MAYSHARE) {
4684 		struct hugetlb_vma_lock *vma_lock = vma->vm_private_data;
4685 
4686 		if (vma_lock) {
4687 			if (vma_lock->vma != vma) {
4688 				vma->vm_private_data = NULL;
4689 				hugetlb_vma_lock_alloc(vma);
4690 			} else {
4691 				pr_warn("HugeTLB: vma_lock already exists in %s.\n", __func__);
4692 			}
4693 		} else {
4694 			hugetlb_vma_lock_alloc(vma);
4695 		}
4696 	}
4697 }
4698 
hugetlb_vm_op_close(struct vm_area_struct * vma)4699 static void hugetlb_vm_op_close(struct vm_area_struct *vma)
4700 {
4701 	struct hstate *h = hstate_vma(vma);
4702 	struct resv_map *resv;
4703 	struct hugepage_subpool *spool = subpool_vma(vma);
4704 	unsigned long reserve, start, end;
4705 	long gbl_reserve;
4706 
4707 	hugetlb_vma_lock_free(vma);
4708 
4709 	resv = vma_resv_map(vma);
4710 	if (!resv || !is_vma_resv_set(vma, HPAGE_RESV_OWNER))
4711 		return;
4712 
4713 	start = vma_hugecache_offset(h, vma, vma->vm_start);
4714 	end = vma_hugecache_offset(h, vma, vma->vm_end);
4715 
4716 	reserve = (end - start) - region_count(resv, start, end);
4717 	hugetlb_cgroup_uncharge_counter(resv, start, end);
4718 	if (reserve) {
4719 		/*
4720 		 * Decrement reserve counts.  The global reserve count may be
4721 		 * adjusted if the subpool has a minimum size.
4722 		 */
4723 		gbl_reserve = hugepage_subpool_put_pages(spool, reserve);
4724 		hugetlb_acct_memory(h, -gbl_reserve);
4725 	}
4726 
4727 	kref_put(&resv->refs, resv_map_release);
4728 }
4729 
hugetlb_vm_op_split(struct vm_area_struct * vma,unsigned long addr)4730 static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
4731 {
4732 	if (addr & ~(huge_page_mask(hstate_vma(vma))))
4733 		return -EINVAL;
4734 	return 0;
4735 }
4736 
hugetlb_split(struct vm_area_struct * vma,unsigned long addr)4737 void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
4738 {
4739 	/*
4740 	 * PMD sharing is only possible for PUD_SIZE-aligned address ranges
4741 	 * in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
4742 	 * split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
4743 	 * This function is called in the middle of a VMA split operation, with
4744 	 * MM, VMA and rmap all write-locked to prevent concurrent page table
4745 	 * walks (except hardware and gup_fast()).
4746 	 */
4747 	vma_assert_write_locked(vma);
4748 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
4749 
4750 	if (addr & ~PUD_MASK) {
4751 		unsigned long floor = addr & PUD_MASK;
4752 		unsigned long ceil = floor + PUD_SIZE;
4753 
4754 		if (floor >= vma->vm_start && ceil <= vma->vm_end) {
4755 			/*
4756 			 * Locking:
4757 			 * Use take_locks=false here.
4758 			 * The file rmap lock is already held.
4759 			 * The hugetlb VMA lock can't be taken when we already
4760 			 * hold the file rmap lock, and we don't need it because
4761 			 * its purpose is to synchronize against concurrent page
4762 			 * table walks, which are not possible thanks to the
4763 			 * locks held by our caller.
4764 			 */
4765 			hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
4766 		}
4767 	}
4768 }
4769 
hugetlb_vm_op_pagesize(struct vm_area_struct * vma)4770 static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
4771 {
4772 	return huge_page_size(hstate_vma(vma));
4773 }
4774 
4775 /*
4776  * We cannot handle pagefaults against hugetlb pages at all.  They cause
4777  * handle_mm_fault() to try to instantiate regular-sized pages in the
4778  * hugepage VMA.  do_page_fault() is supposed to trap this, so BUG is we get
4779  * this far.
4780  */
hugetlb_vm_op_fault(struct vm_fault * vmf)4781 static vm_fault_t hugetlb_vm_op_fault(struct vm_fault *vmf)
4782 {
4783 	BUG();
4784 	return 0;
4785 }
4786 
4787 /*
4788  * When a new function is introduced to vm_operations_struct and added
4789  * to hugetlb_vm_ops, please consider adding the function to shm_vm_ops.
4790  * This is because under System V memory model, mappings created via
4791  * shmget/shmat with "huge page" specified are backed by hugetlbfs files,
4792  * their original vm_ops are overwritten with shm_vm_ops.
4793  */
4794 const struct vm_operations_struct hugetlb_vm_ops = {
4795 	.fault = hugetlb_vm_op_fault,
4796 	.open = hugetlb_vm_op_open,
4797 	.close = hugetlb_vm_op_close,
4798 	.may_split = hugetlb_vm_op_split,
4799 	.pagesize = hugetlb_vm_op_pagesize,
4800 };
4801 
make_huge_pte(struct vm_area_struct * vma,struct folio * folio,bool try_mkwrite)4802 static pte_t make_huge_pte(struct vm_area_struct *vma, struct folio *folio,
4803 		bool try_mkwrite)
4804 {
4805 	pte_t entry = folio_mk_pte(folio, vma->vm_page_prot);
4806 	unsigned int shift = huge_page_shift(hstate_vma(vma));
4807 
4808 	if (try_mkwrite && (vma->vm_flags & VM_WRITE)) {
4809 		entry = pte_mkwrite_novma(pte_mkdirty(entry));
4810 	} else {
4811 		entry = pte_wrprotect(entry);
4812 	}
4813 	entry = pte_mkyoung(entry);
4814 	entry = arch_make_huge_pte(entry, shift, vma->vm_flags);
4815 
4816 	return entry;
4817 }
4818 
set_huge_ptep_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)4819 static void set_huge_ptep_writable(struct vm_area_struct *vma,
4820 				   unsigned long address, pte_t *ptep)
4821 {
4822 	pte_t entry;
4823 
4824 	entry = huge_pte_mkwrite(huge_pte_mkdirty(huge_ptep_get(vma->vm_mm, address, ptep)));
4825 	if (huge_ptep_set_access_flags(vma, address, ptep, entry, 1))
4826 		update_mmu_cache(vma, address, ptep);
4827 }
4828 
set_huge_ptep_maybe_writable(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)4829 static void set_huge_ptep_maybe_writable(struct vm_area_struct *vma,
4830 					 unsigned long address, pte_t *ptep)
4831 {
4832 	if (vma->vm_flags & VM_WRITE)
4833 		set_huge_ptep_writable(vma, address, ptep);
4834 }
4835 
4836 static void
hugetlb_install_folio(struct vm_area_struct * vma,pte_t * ptep,unsigned long addr,struct folio * new_folio,pte_t old,unsigned long sz)4837 hugetlb_install_folio(struct vm_area_struct *vma, pte_t *ptep, unsigned long addr,
4838 		      struct folio *new_folio, pte_t old, unsigned long sz)
4839 {
4840 	pte_t newpte = make_huge_pte(vma, new_folio, true);
4841 
4842 	__folio_mark_uptodate(new_folio);
4843 	hugetlb_add_new_anon_rmap(new_folio, vma, addr);
4844 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(old))
4845 		newpte = huge_pte_mkuffd_wp(newpte);
4846 	set_huge_pte_at(vma->vm_mm, addr, ptep, newpte, sz);
4847 	hugetlb_count_add(pages_per_huge_page(hstate_vma(vma)), vma->vm_mm);
4848 	folio_set_hugetlb_migratable(new_folio);
4849 }
4850 
copy_hugetlb_page_range(struct mm_struct * dst,struct mm_struct * src,struct vm_area_struct * dst_vma,struct vm_area_struct * src_vma)4851 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
4852 			    struct vm_area_struct *dst_vma,
4853 			    struct vm_area_struct *src_vma)
4854 {
4855 	pte_t *src_pte, *dst_pte, entry;
4856 	struct folio *pte_folio;
4857 	unsigned long addr;
4858 	bool cow = is_cow_mapping(src_vma->vm_flags);
4859 	struct hstate *h = hstate_vma(src_vma);
4860 	unsigned long sz = huge_page_size(h);
4861 	unsigned long npages = pages_per_huge_page(h);
4862 	struct mmu_notifier_range range;
4863 	unsigned long last_addr_mask;
4864 	softleaf_t softleaf;
4865 	int ret = 0;
4866 
4867 	if (cow) {
4868 		mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
4869 					src_vma->vm_start,
4870 					src_vma->vm_end);
4871 		mmu_notifier_invalidate_range_start(&range);
4872 		vma_assert_write_locked(src_vma);
4873 		raw_write_seqcount_begin(&src->write_protect_seq);
4874 	} else {
4875 		/*
4876 		 * For shared mappings the vma lock must be held before
4877 		 * calling hugetlb_walk() in the src vma. Otherwise, the
4878 		 * returned ptep could go away if part of a shared pmd and
4879 		 * another thread calls huge_pmd_unshare.
4880 		 */
4881 		hugetlb_vma_lock_read(src_vma);
4882 	}
4883 
4884 	last_addr_mask = hugetlb_mask_last_page(h);
4885 	for (addr = src_vma->vm_start; addr < src_vma->vm_end; addr += sz) {
4886 		spinlock_t *src_ptl, *dst_ptl;
4887 		src_pte = hugetlb_walk(src_vma, addr, sz);
4888 		if (!src_pte) {
4889 			addr |= last_addr_mask;
4890 			continue;
4891 		}
4892 		dst_pte = huge_pte_alloc(dst, dst_vma, addr, sz);
4893 		if (!dst_pte) {
4894 			ret = -ENOMEM;
4895 			break;
4896 		}
4897 
4898 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
4899 		/* If the pagetables are shared, there is nothing to do */
4900 		if (ptdesc_pmd_is_shared(virt_to_ptdesc(dst_pte))) {
4901 			addr |= last_addr_mask;
4902 			continue;
4903 		}
4904 #endif
4905 
4906 		dst_ptl = huge_pte_lock(h, dst, dst_pte);
4907 		src_ptl = huge_pte_lockptr(h, src, src_pte);
4908 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4909 		entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4910 again:
4911 		if (huge_pte_none(entry)) {
4912 			/* Skip if src entry none. */
4913 			goto next;
4914 		}
4915 
4916 		softleaf = softleaf_from_pte(entry);
4917 		if (unlikely(softleaf_is_hwpoison(softleaf))) {
4918 			if (!userfaultfd_wp(dst_vma))
4919 				entry = huge_pte_clear_uffd_wp(entry);
4920 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4921 		} else if (unlikely(softleaf_is_migration(softleaf))) {
4922 			bool uffd_wp = pte_swp_uffd_wp(entry);
4923 
4924 			if (!softleaf_is_migration_read(softleaf) && cow) {
4925 				/*
4926 				 * COW mappings require pages in both
4927 				 * parent and child to be set to read.
4928 				 */
4929 				softleaf = make_readable_migration_entry(
4930 							swp_offset(softleaf));
4931 				entry = swp_entry_to_pte(softleaf);
4932 				if (userfaultfd_wp(src_vma) && uffd_wp)
4933 					entry = pte_swp_mkuffd_wp(entry);
4934 				set_huge_pte_at(src, addr, src_pte, entry, sz);
4935 			}
4936 			if (!userfaultfd_wp(dst_vma))
4937 				entry = huge_pte_clear_uffd_wp(entry);
4938 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
4939 		} else if (unlikely(pte_is_marker(entry))) {
4940 			const pte_marker marker = copy_pte_marker(softleaf, dst_vma);
4941 
4942 			if (marker)
4943 				set_huge_pte_at(dst, addr, dst_pte,
4944 						make_pte_marker(marker), sz);
4945 		} else {
4946 			entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4947 			pte_folio = page_folio(pte_page(entry));
4948 			folio_get(pte_folio);
4949 
4950 			/*
4951 			 * Failing to duplicate the anon rmap is a rare case
4952 			 * where we see pinned hugetlb pages while they're
4953 			 * prone to COW. We need to do the COW earlier during
4954 			 * fork.
4955 			 *
4956 			 * When pre-allocating the page or copying data, we
4957 			 * need to be without the pgtable locks since we could
4958 			 * sleep during the process.
4959 			 */
4960 			if (!folio_test_anon(pte_folio)) {
4961 				hugetlb_add_file_rmap(pte_folio);
4962 			} else if (hugetlb_try_dup_anon_rmap(pte_folio, src_vma)) {
4963 				pte_t src_pte_old = entry;
4964 				struct folio *new_folio;
4965 
4966 				spin_unlock(src_ptl);
4967 				spin_unlock(dst_ptl);
4968 				/* Do not use reserve as it's private owned */
4969 				new_folio = alloc_hugetlb_folio(dst_vma, addr, false);
4970 				if (IS_ERR(new_folio)) {
4971 					folio_put(pte_folio);
4972 					ret = PTR_ERR(new_folio);
4973 					break;
4974 				}
4975 				ret = copy_user_large_folio(new_folio, pte_folio,
4976 							    addr, dst_vma);
4977 				folio_put(pte_folio);
4978 				if (ret) {
4979 					folio_put(new_folio);
4980 					break;
4981 				}
4982 
4983 				/* Install the new hugetlb folio if src pte stable */
4984 				dst_ptl = huge_pte_lock(h, dst, dst_pte);
4985 				src_ptl = huge_pte_lockptr(h, src, src_pte);
4986 				spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
4987 				entry = huge_ptep_get(src_vma->vm_mm, addr, src_pte);
4988 				if (!pte_same(src_pte_old, entry)) {
4989 					restore_reserve_on_error(h, dst_vma, addr,
4990 								new_folio);
4991 					folio_put(new_folio);
4992 					/* huge_ptep of dst_pte won't change as in child */
4993 					goto again;
4994 				}
4995 				hugetlb_install_folio(dst_vma, dst_pte, addr,
4996 						      new_folio, src_pte_old, sz);
4997 				goto next;
4998 			}
4999 
5000 			if (cow) {
5001 				/*
5002 				 * No need to notify as we are downgrading page
5003 				 * table protection not changing it to point
5004 				 * to a new page.
5005 				 *
5006 				 * See Documentation/mm/mmu_notifier.rst
5007 				 */
5008 				huge_ptep_set_wrprotect(src, addr, src_pte);
5009 				entry = huge_pte_wrprotect(entry);
5010 			}
5011 
5012 			if (!userfaultfd_wp(dst_vma))
5013 				entry = huge_pte_clear_uffd_wp(entry);
5014 
5015 			set_huge_pte_at(dst, addr, dst_pte, entry, sz);
5016 			hugetlb_count_add(npages, dst);
5017 		}
5018 
5019 next:
5020 		spin_unlock(src_ptl);
5021 		spin_unlock(dst_ptl);
5022 	}
5023 
5024 	if (cow) {
5025 		raw_write_seqcount_end(&src->write_protect_seq);
5026 		mmu_notifier_invalidate_range_end(&range);
5027 	} else {
5028 		hugetlb_vma_unlock_read(src_vma);
5029 	}
5030 
5031 	return ret;
5032 }
5033 
move_huge_pte(struct vm_area_struct * vma,unsigned long old_addr,unsigned long new_addr,pte_t * src_pte,pte_t * dst_pte,unsigned long sz)5034 static void move_huge_pte(struct vm_area_struct *vma, unsigned long old_addr,
5035 			  unsigned long new_addr, pte_t *src_pte, pte_t *dst_pte,
5036 			  unsigned long sz)
5037 {
5038 	bool need_clear_uffd_wp = vma_has_uffd_without_event_remap(vma);
5039 	struct hstate *h = hstate_vma(vma);
5040 	struct mm_struct *mm = vma->vm_mm;
5041 	spinlock_t *src_ptl, *dst_ptl;
5042 	pte_t pte;
5043 
5044 	dst_ptl = huge_pte_lock(h, mm, dst_pte);
5045 	src_ptl = huge_pte_lockptr(h, mm, src_pte);
5046 
5047 	/*
5048 	 * We don't have to worry about the ordering of src and dst ptlocks
5049 	 * because exclusive mmap_lock (or the i_mmap_lock) prevents deadlock.
5050 	 */
5051 	if (src_ptl != dst_ptl)
5052 		spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
5053 
5054 	pte = huge_ptep_get_and_clear(mm, old_addr, src_pte, sz);
5055 
5056 	if (need_clear_uffd_wp && pte_is_uffd_wp_marker(pte)) {
5057 		huge_pte_clear(mm, new_addr, dst_pte, sz);
5058 	} else {
5059 		if (need_clear_uffd_wp) {
5060 			if (pte_present(pte))
5061 				pte = huge_pte_clear_uffd_wp(pte);
5062 			else
5063 				pte = pte_swp_clear_uffd_wp(pte);
5064 		}
5065 		set_huge_pte_at(mm, new_addr, dst_pte, pte, sz);
5066 	}
5067 
5068 	if (src_ptl != dst_ptl)
5069 		spin_unlock(src_ptl);
5070 	spin_unlock(dst_ptl);
5071 }
5072 
move_hugetlb_page_tables(struct vm_area_struct * vma,struct vm_area_struct * new_vma,unsigned long old_addr,unsigned long new_addr,unsigned long len)5073 int move_hugetlb_page_tables(struct vm_area_struct *vma,
5074 			     struct vm_area_struct *new_vma,
5075 			     unsigned long old_addr, unsigned long new_addr,
5076 			     unsigned long len)
5077 {
5078 	struct hstate *h = hstate_vma(vma);
5079 	struct address_space *mapping = vma->vm_file->f_mapping;
5080 	unsigned long sz = huge_page_size(h);
5081 	struct mm_struct *mm = vma->vm_mm;
5082 	unsigned long old_end = old_addr + len;
5083 	unsigned long last_addr_mask;
5084 	pte_t *src_pte, *dst_pte;
5085 	struct mmu_notifier_range range;
5086 	struct mmu_gather tlb;
5087 
5088 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
5089 				old_end);
5090 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5091 	/*
5092 	 * In case of shared PMDs, we should cover the maximum possible
5093 	 * range.
5094 	 */
5095 	flush_cache_range(vma, range.start, range.end);
5096 	tlb_gather_mmu_vma(&tlb, vma);
5097 
5098 	mmu_notifier_invalidate_range_start(&range);
5099 	last_addr_mask = hugetlb_mask_last_page(h);
5100 	/* Prevent race with file truncation */
5101 	hugetlb_vma_lock_write(vma);
5102 	i_mmap_lock_write(mapping);
5103 	for (; old_addr < old_end; old_addr += sz, new_addr += sz) {
5104 		src_pte = hugetlb_walk(vma, old_addr, sz);
5105 		if (!src_pte) {
5106 			old_addr |= last_addr_mask;
5107 			new_addr |= last_addr_mask;
5108 			continue;
5109 		}
5110 		if (huge_pte_none(huge_ptep_get(mm, old_addr, src_pte)))
5111 			continue;
5112 
5113 		if (huge_pmd_unshare(&tlb, vma, old_addr, src_pte)) {
5114 			old_addr |= last_addr_mask;
5115 			new_addr |= last_addr_mask;
5116 			continue;
5117 		}
5118 
5119 		dst_pte = huge_pte_alloc(mm, new_vma, new_addr, sz);
5120 		if (!dst_pte)
5121 			break;
5122 
5123 		move_huge_pte(vma, old_addr, new_addr, src_pte, dst_pte, sz);
5124 		tlb_remove_huge_tlb_entry(h, &tlb, src_pte, old_addr);
5125 	}
5126 
5127 	tlb_flush_mmu_tlbonly(&tlb);
5128 	huge_pmd_unshare_flush(&tlb, vma);
5129 
5130 	mmu_notifier_invalidate_range_end(&range);
5131 	i_mmap_unlock_write(mapping);
5132 	hugetlb_vma_unlock_write(vma);
5133 	tlb_finish_mmu(&tlb);
5134 
5135 	return len + old_addr - old_end;
5136 }
5137 
__unmap_hugepage_range(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long start,unsigned long end,struct folio * folio,zap_flags_t zap_flags)5138 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
5139 			    unsigned long start, unsigned long end,
5140 			    struct folio *folio, zap_flags_t zap_flags)
5141 {
5142 	struct mm_struct *mm = vma->vm_mm;
5143 	const bool folio_provided = !!folio;
5144 	unsigned long address;
5145 	pte_t *ptep;
5146 	pte_t pte;
5147 	spinlock_t *ptl;
5148 	struct hstate *h = hstate_vma(vma);
5149 	unsigned long sz = huge_page_size(h);
5150 	bool adjust_reservation;
5151 	unsigned long last_addr_mask;
5152 
5153 	WARN_ON(!is_vm_hugetlb_page(vma));
5154 	BUG_ON(start & ~huge_page_mask(h));
5155 	BUG_ON(end & ~huge_page_mask(h));
5156 
5157 	/*
5158 	 * This is a hugetlb vma, all the pte entries should point
5159 	 * to huge page.
5160 	 */
5161 	tlb_change_page_size(tlb, sz);
5162 	tlb_start_vma(tlb, vma);
5163 
5164 	last_addr_mask = hugetlb_mask_last_page(h);
5165 	address = start;
5166 	for (; address < end; address += sz) {
5167 		ptep = hugetlb_walk(vma, address, sz);
5168 		if (!ptep) {
5169 			address |= last_addr_mask;
5170 			continue;
5171 		}
5172 
5173 		ptl = huge_pte_lock(h, mm, ptep);
5174 		if (huge_pmd_unshare(tlb, vma, address, ptep)) {
5175 			spin_unlock(ptl);
5176 			address |= last_addr_mask;
5177 			continue;
5178 		}
5179 
5180 		pte = huge_ptep_get(mm, address, ptep);
5181 		if (huge_pte_none(pte)) {
5182 			spin_unlock(ptl);
5183 			continue;
5184 		}
5185 
5186 		/*
5187 		 * Migrating hugepage or HWPoisoned hugepage is already
5188 		 * unmapped and its refcount is dropped, so just clear pte here.
5189 		 */
5190 		if (unlikely(!pte_present(pte))) {
5191 			/*
5192 			 * If the pte was wr-protected by uffd-wp in any of the
5193 			 * swap forms, meanwhile the caller does not want to
5194 			 * drop the uffd-wp bit in this zap, then replace the
5195 			 * pte with a marker.
5196 			 */
5197 			if (pte_swp_uffd_wp_any(pte) &&
5198 			    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5199 				set_huge_pte_at(mm, address, ptep,
5200 						make_pte_marker(PTE_MARKER_UFFD_WP),
5201 						sz);
5202 			else
5203 				huge_pte_clear(mm, address, ptep, sz);
5204 			spin_unlock(ptl);
5205 			continue;
5206 		}
5207 
5208 		/*
5209 		 * If a folio is supplied, it is because a specific
5210 		 * folio is being unmapped, not a range. Ensure the folio we
5211 		 * are about to unmap is the actual folio of interest.
5212 		 */
5213 		if (folio_provided) {
5214 			if (folio != page_folio(pte_page(pte))) {
5215 				spin_unlock(ptl);
5216 				continue;
5217 			}
5218 			/*
5219 			 * Mark the VMA as having unmapped its page so that
5220 			 * future faults in this VMA will fail rather than
5221 			 * looking like data was lost
5222 			 */
5223 			set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
5224 		} else {
5225 			folio = page_folio(pte_page(pte));
5226 		}
5227 
5228 		pte = huge_ptep_get_and_clear(mm, address, ptep, sz);
5229 		tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
5230 		if (huge_pte_dirty(pte))
5231 			folio_mark_dirty(folio);
5232 		/* Leave a uffd-wp pte marker if needed */
5233 		if (huge_pte_uffd_wp(pte) &&
5234 		    !(zap_flags & ZAP_FLAG_DROP_MARKER))
5235 			set_huge_pte_at(mm, address, ptep,
5236 					make_pte_marker(PTE_MARKER_UFFD_WP),
5237 					sz);
5238 		hugetlb_count_sub(pages_per_huge_page(h), mm);
5239 		hugetlb_remove_rmap(folio);
5240 		spin_unlock(ptl);
5241 
5242 		/*
5243 		 * Restore the reservation for anonymous page, otherwise the
5244 		 * backing page could be stolen by someone.
5245 		 * If there we are freeing a surplus, do not set the restore
5246 		 * reservation bit.
5247 		 */
5248 		adjust_reservation = false;
5249 
5250 		spin_lock_irq(&hugetlb_lock);
5251 		if (!h->surplus_huge_pages && __vma_private_lock(vma) &&
5252 		    folio_test_anon(folio)) {
5253 			folio_set_hugetlb_restore_reserve(folio);
5254 			/* Reservation to be adjusted after the spin lock */
5255 			adjust_reservation = true;
5256 		}
5257 		spin_unlock_irq(&hugetlb_lock);
5258 
5259 		/*
5260 		 * Adjust the reservation for the region that will have the
5261 		 * reserve restored. Keep in mind that vma_needs_reservation() changes
5262 		 * resv->adds_in_progress if it succeeds. If this is not done,
5263 		 * do_exit() will not see it, and will keep the reservation
5264 		 * forever.
5265 		 */
5266 		if (adjust_reservation) {
5267 			int rc = vma_needs_reservation(h, vma, address);
5268 
5269 			if (rc < 0)
5270 				/* Pressumably allocate_file_region_entries failed
5271 				 * to allocate a file_region struct. Clear
5272 				 * hugetlb_restore_reserve so that global reserve
5273 				 * count will not be incremented by free_huge_folio.
5274 				 * Act as if we consumed the reservation.
5275 				 */
5276 				folio_clear_hugetlb_restore_reserve(folio);
5277 			else if (rc)
5278 				vma_add_reservation(h, vma, address);
5279 		}
5280 
5281 		tlb_remove_page_size(tlb, folio_page(folio, 0),
5282 				     folio_size(folio));
5283 		/*
5284 		 * If we were instructed to unmap a specific folio, we're done.
5285 		 */
5286 		if (folio_provided)
5287 			break;
5288 	}
5289 	tlb_end_vma(tlb, vma);
5290 
5291 	huge_pmd_unshare_flush(tlb, vma);
5292 }
5293 
__hugetlb_zap_begin(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)5294 void __hugetlb_zap_begin(struct vm_area_struct *vma,
5295 			 unsigned long *start, unsigned long *end)
5296 {
5297 	if (!vma->vm_file)	/* hugetlbfs_file_mmap error */
5298 		return;
5299 
5300 	adjust_range_if_pmd_sharing_possible(vma, start, end);
5301 	hugetlb_vma_lock_write(vma);
5302 	if (vma->vm_file)
5303 		i_mmap_lock_write(vma->vm_file->f_mapping);
5304 }
5305 
__hugetlb_zap_end(struct vm_area_struct * vma,struct zap_details * details)5306 void __hugetlb_zap_end(struct vm_area_struct *vma,
5307 		       struct zap_details *details)
5308 {
5309 	zap_flags_t zap_flags = details ? details->zap_flags : 0;
5310 
5311 	if (!vma->vm_file)	/* hugetlbfs_file_mmap error */
5312 		return;
5313 
5314 	if (zap_flags & ZAP_FLAG_UNMAP) {	/* final unmap */
5315 		/*
5316 		 * Unlock and free the vma lock before releasing i_mmap_rwsem.
5317 		 * When the vma_lock is freed, this makes the vma ineligible
5318 		 * for pmd sharing.  And, i_mmap_rwsem is required to set up
5319 		 * pmd sharing.  This is important as page tables for this
5320 		 * unmapped range will be asynchrously deleted.  If the page
5321 		 * tables are shared, there will be issues when accessed by
5322 		 * someone else.
5323 		 */
5324 		__hugetlb_vma_unlock_write_free(vma);
5325 	} else {
5326 		hugetlb_vma_unlock_write(vma);
5327 	}
5328 
5329 	if (vma->vm_file)
5330 		i_mmap_unlock_write(vma->vm_file->f_mapping);
5331 }
5332 
unmap_hugepage_range(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct folio * folio,zap_flags_t zap_flags)5333 void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
5334 			  unsigned long end, struct folio *folio,
5335 			  zap_flags_t zap_flags)
5336 {
5337 	struct mmu_notifier_range range;
5338 	struct mmu_gather tlb;
5339 
5340 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
5341 				start, end);
5342 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
5343 	mmu_notifier_invalidate_range_start(&range);
5344 	tlb_gather_mmu(&tlb, vma->vm_mm);
5345 
5346 	__unmap_hugepage_range(&tlb, vma, start, end,
5347 			       folio, zap_flags);
5348 
5349 	mmu_notifier_invalidate_range_end(&range);
5350 	tlb_finish_mmu(&tlb);
5351 }
5352 
5353 /*
5354  * This is called when the original mapper is failing to COW a MAP_PRIVATE
5355  * mapping it owns the reserve page for. The intention is to unmap the page
5356  * from other VMAs and let the children be SIGKILLed if they are faulting the
5357  * same region.
5358  */
unmap_ref_private(struct mm_struct * mm,struct vm_area_struct * vma,struct folio * folio,unsigned long address)5359 static void unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
5360 			      struct folio *folio, unsigned long address)
5361 {
5362 	struct hstate *h = hstate_vma(vma);
5363 	struct vm_area_struct *iter_vma;
5364 	struct address_space *mapping;
5365 	pgoff_t pgoff;
5366 
5367 	/*
5368 	 * vm_pgoff is in PAGE_SIZE units, hence the different calculation
5369 	 * from page cache lookup which is in HPAGE_SIZE units.
5370 	 */
5371 	address = address & huge_page_mask(h);
5372 	pgoff = ((address - vma->vm_start) >> PAGE_SHIFT) +
5373 			vma->vm_pgoff;
5374 	mapping = vma->vm_file->f_mapping;
5375 
5376 	/*
5377 	 * Take the mapping lock for the duration of the table walk. As
5378 	 * this mapping should be shared between all the VMAs,
5379 	 * __unmap_hugepage_range() is called as the lock is already held
5380 	 */
5381 	i_mmap_lock_write(mapping);
5382 	vma_interval_tree_foreach(iter_vma, &mapping->i_mmap, pgoff, pgoff) {
5383 		/* Do not unmap the current VMA */
5384 		if (iter_vma == vma)
5385 			continue;
5386 
5387 		/*
5388 		 * Shared VMAs have their own reserves and do not affect
5389 		 * MAP_PRIVATE accounting but it is possible that a shared
5390 		 * VMA is using the same page so check and skip such VMAs.
5391 		 */
5392 		if (iter_vma->vm_flags & VM_MAYSHARE)
5393 			continue;
5394 
5395 		/*
5396 		 * Unmap the page from other VMAs without their own reserves.
5397 		 * They get marked to be SIGKILLed if they fault in these
5398 		 * areas. This is because a future no-page fault on this VMA
5399 		 * could insert a zeroed page instead of the data existing
5400 		 * from the time of fork. This would look like data corruption
5401 		 */
5402 		if (!is_vma_resv_set(iter_vma, HPAGE_RESV_OWNER))
5403 			unmap_hugepage_range(iter_vma, address,
5404 					     address + huge_page_size(h),
5405 					     folio, 0);
5406 	}
5407 	i_mmap_unlock_write(mapping);
5408 }
5409 
5410 /*
5411  * hugetlb_wp() should be called with page lock of the original hugepage held.
5412  * Called with hugetlb_fault_mutex_table held and pte_page locked so we
5413  * cannot race with other handlers or page migration.
5414  * Keep the pte_same checks anyway to make transition from the mutex easier.
5415  */
hugetlb_wp(struct vm_fault * vmf)5416 static vm_fault_t hugetlb_wp(struct vm_fault *vmf)
5417 {
5418 	struct vm_area_struct *vma = vmf->vma;
5419 	struct mm_struct *mm = vma->vm_mm;
5420 	const bool unshare = vmf->flags & FAULT_FLAG_UNSHARE;
5421 	pte_t pte = huge_ptep_get(mm, vmf->address, vmf->pte);
5422 	struct hstate *h = hstate_vma(vma);
5423 	struct folio *old_folio;
5424 	struct folio *new_folio;
5425 	bool cow_from_owner = 0;
5426 	vm_fault_t ret = 0;
5427 	struct mmu_notifier_range range;
5428 
5429 	/*
5430 	 * Never handle CoW for uffd-wp protected pages.  It should be only
5431 	 * handled when the uffd-wp protection is removed.
5432 	 *
5433 	 * Note that only the CoW optimization path (in hugetlb_no_page())
5434 	 * can trigger this, because hugetlb_fault() will always resolve
5435 	 * uffd-wp bit first.
5436 	 */
5437 	if (!unshare && huge_pte_uffd_wp(pte))
5438 		return 0;
5439 
5440 	/* Let's take out MAP_SHARED mappings first. */
5441 	if (vma->vm_flags & VM_MAYSHARE) {
5442 		set_huge_ptep_writable(vma, vmf->address, vmf->pte);
5443 		return 0;
5444 	}
5445 
5446 	old_folio = page_folio(pte_page(pte));
5447 
5448 	delayacct_wpcopy_start();
5449 
5450 retry_avoidcopy:
5451 	/*
5452 	 * If no-one else is actually using this page, we're the exclusive
5453 	 * owner and can reuse this page.
5454 	 *
5455 	 * Note that we don't rely on the (safer) folio refcount here, because
5456 	 * copying the hugetlb folio when there are unexpected (temporary)
5457 	 * folio references could harm simple fork()+exit() users when
5458 	 * we run out of free hugetlb folios: we would have to kill processes
5459 	 * in scenarios that used to work. As a side effect, there can still
5460 	 * be leaks between processes, for example, with FOLL_GET users.
5461 	 */
5462 	if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
5463 		if (!PageAnonExclusive(&old_folio->page)) {
5464 			folio_move_anon_rmap(old_folio, vma);
5465 			SetPageAnonExclusive(&old_folio->page);
5466 		}
5467 		if (likely(!unshare))
5468 			set_huge_ptep_maybe_writable(vma, vmf->address,
5469 						     vmf->pte);
5470 
5471 		delayacct_wpcopy_end();
5472 		return 0;
5473 	}
5474 	VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
5475 		       PageAnonExclusive(&old_folio->page), &old_folio->page);
5476 
5477 	/*
5478 	 * If the process that created a MAP_PRIVATE mapping is about to perform
5479 	 * a COW due to a shared page count, attempt to satisfy the allocation
5480 	 * without using the existing reserves.
5481 	 * In order to determine where this is a COW on a MAP_PRIVATE mapping it
5482 	 * is enough to check whether the old_folio is anonymous. This means that
5483 	 * the reserve for this address was consumed. If reserves were used, a
5484 	 * partial faulted mapping at the fime of fork() could consume its reserves
5485 	 * on COW instead of the full address range.
5486 	 */
5487 	if (is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
5488 	    folio_test_anon(old_folio))
5489 		cow_from_owner = true;
5490 
5491 	folio_get(old_folio);
5492 
5493 	/*
5494 	 * Drop page table lock as buddy allocator may be called. It will
5495 	 * be acquired again before returning to the caller, as expected.
5496 	 */
5497 	spin_unlock(vmf->ptl);
5498 	new_folio = alloc_hugetlb_folio(vma, vmf->address, cow_from_owner);
5499 
5500 	if (IS_ERR(new_folio)) {
5501 		/*
5502 		 * If a process owning a MAP_PRIVATE mapping fails to COW,
5503 		 * it is due to references held by a child and an insufficient
5504 		 * huge page pool. To guarantee the original mappers
5505 		 * reliability, unmap the page from child processes. The child
5506 		 * may get SIGKILLed if it later faults.
5507 		 */
5508 		if (cow_from_owner) {
5509 			struct address_space *mapping = vma->vm_file->f_mapping;
5510 			pgoff_t idx;
5511 			u32 hash;
5512 
5513 			folio_put(old_folio);
5514 			/*
5515 			 * Drop hugetlb_fault_mutex and vma_lock before
5516 			 * unmapping.  unmapping needs to hold vma_lock
5517 			 * in write mode.  Dropping vma_lock in read mode
5518 			 * here is OK as COW mappings do not interact with
5519 			 * PMD sharing.
5520 			 *
5521 			 * Reacquire both after unmap operation.
5522 			 */
5523 			idx = vma_hugecache_offset(h, vma, vmf->address);
5524 			hash = hugetlb_fault_mutex_hash(mapping, idx);
5525 			hugetlb_vma_unlock_read(vma);
5526 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5527 
5528 			unmap_ref_private(mm, vma, old_folio, vmf->address);
5529 
5530 			mutex_lock(&hugetlb_fault_mutex_table[hash]);
5531 			hugetlb_vma_lock_read(vma);
5532 			spin_lock(vmf->ptl);
5533 			vmf->pte = hugetlb_walk(vma, vmf->address,
5534 					huge_page_size(h));
5535 			if (likely(vmf->pte &&
5536 				   pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte)))
5537 				goto retry_avoidcopy;
5538 			/*
5539 			 * race occurs while re-acquiring page table
5540 			 * lock, and our job is done.
5541 			 */
5542 			delayacct_wpcopy_end();
5543 			return 0;
5544 		}
5545 
5546 		ret = vmf_error(PTR_ERR(new_folio));
5547 		goto out_release_old;
5548 	}
5549 
5550 	/*
5551 	 * When the original hugepage is shared one, it does not have
5552 	 * anon_vma prepared.
5553 	 */
5554 	ret = __vmf_anon_prepare(vmf);
5555 	if (unlikely(ret))
5556 		goto out_release_all;
5557 
5558 	if (copy_user_large_folio(new_folio, old_folio, vmf->real_address, vma)) {
5559 		ret = VM_FAULT_HWPOISON_LARGE | VM_FAULT_SET_HINDEX(hstate_index(h));
5560 		goto out_release_all;
5561 	}
5562 	__folio_mark_uptodate(new_folio);
5563 
5564 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, vmf->address,
5565 				vmf->address + huge_page_size(h));
5566 	mmu_notifier_invalidate_range_start(&range);
5567 
5568 	/*
5569 	 * Retake the page table lock to check for racing updates
5570 	 * before the page tables are altered
5571 	 */
5572 	spin_lock(vmf->ptl);
5573 	vmf->pte = hugetlb_walk(vma, vmf->address, huge_page_size(h));
5574 	if (likely(vmf->pte && pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), pte))) {
5575 		pte_t newpte = make_huge_pte(vma, new_folio, !unshare);
5576 
5577 		/* Break COW or unshare */
5578 		huge_ptep_clear_flush(vma, vmf->address, vmf->pte);
5579 		hugetlb_remove_rmap(old_folio);
5580 		hugetlb_add_new_anon_rmap(new_folio, vma, vmf->address);
5581 		if (huge_pte_uffd_wp(pte))
5582 			newpte = huge_pte_mkuffd_wp(newpte);
5583 		set_huge_pte_at(mm, vmf->address, vmf->pte, newpte,
5584 				huge_page_size(h));
5585 		folio_set_hugetlb_migratable(new_folio);
5586 		/* Make the old page be freed below */
5587 		new_folio = old_folio;
5588 	}
5589 	spin_unlock(vmf->ptl);
5590 	mmu_notifier_invalidate_range_end(&range);
5591 out_release_all:
5592 	/*
5593 	 * No restore in case of successful pagetable update (Break COW or
5594 	 * unshare)
5595 	 */
5596 	if (new_folio != old_folio)
5597 		restore_reserve_on_error(h, vma, vmf->address, new_folio);
5598 	folio_put(new_folio);
5599 out_release_old:
5600 	folio_put(old_folio);
5601 
5602 	spin_lock(vmf->ptl); /* Caller expects lock to be held */
5603 
5604 	delayacct_wpcopy_end();
5605 	return ret;
5606 }
5607 
5608 /*
5609  * Return whether there is a pagecache page to back given address within VMA.
5610  */
hugetlbfs_pagecache_present(struct hstate * h,struct vm_area_struct * vma,unsigned long address)5611 bool hugetlbfs_pagecache_present(struct hstate *h,
5612 				 struct vm_area_struct *vma, unsigned long address)
5613 {
5614 	struct address_space *mapping = vma->vm_file->f_mapping;
5615 	pgoff_t idx = linear_page_index(vma, address);
5616 	struct folio *folio;
5617 
5618 	folio = filemap_get_folio(mapping, idx);
5619 	if (IS_ERR(folio))
5620 		return false;
5621 	folio_put(folio);
5622 	return true;
5623 }
5624 
hugetlb_add_to_page_cache(struct folio * folio,struct address_space * mapping,pgoff_t idx)5625 int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
5626 			   pgoff_t idx)
5627 {
5628 	struct inode *inode = mapping->host;
5629 	struct hstate *h = hstate_inode(inode);
5630 	int err;
5631 
5632 	idx <<= huge_page_order(h);
5633 	__folio_set_locked(folio);
5634 	err = __filemap_add_folio(mapping, folio, idx, GFP_KERNEL, NULL);
5635 
5636 	if (unlikely(err)) {
5637 		__folio_clear_locked(folio);
5638 		return err;
5639 	}
5640 	folio_clear_hugetlb_restore_reserve(folio);
5641 
5642 	/*
5643 	 * mark folio dirty so that it will not be removed from cache/file
5644 	 * by non-hugetlbfs specific code paths.
5645 	 */
5646 	folio_mark_dirty(folio);
5647 
5648 	spin_lock(&inode->i_lock);
5649 	inode->i_blocks += blocks_per_huge_page(h);
5650 	spin_unlock(&inode->i_lock);
5651 	return 0;
5652 }
5653 
hugetlb_handle_userfault(struct vm_fault * vmf,struct address_space * mapping,unsigned long reason)5654 static inline vm_fault_t hugetlb_handle_userfault(struct vm_fault *vmf,
5655 						  struct address_space *mapping,
5656 						  unsigned long reason)
5657 {
5658 	u32 hash;
5659 
5660 	/*
5661 	 * vma_lock and hugetlb_fault_mutex must be dropped before handling
5662 	 * userfault. Also mmap_lock could be dropped due to handling
5663 	 * userfault, any vma operation should be careful from here.
5664 	 */
5665 	hugetlb_vma_unlock_read(vmf->vma);
5666 	hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5667 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5668 	return handle_userfault(vmf, reason);
5669 }
5670 
5671 /*
5672  * Recheck pte with pgtable lock.  Returns true if pte didn't change, or
5673  * false if pte changed or is changing.
5674  */
hugetlb_pte_stable(struct hstate * h,struct mm_struct * mm,unsigned long addr,pte_t * ptep,pte_t old_pte)5675 static bool hugetlb_pte_stable(struct hstate *h, struct mm_struct *mm, unsigned long addr,
5676 			       pte_t *ptep, pte_t old_pte)
5677 {
5678 	spinlock_t *ptl;
5679 	bool same;
5680 
5681 	ptl = huge_pte_lock(h, mm, ptep);
5682 	same = pte_same(huge_ptep_get(mm, addr, ptep), old_pte);
5683 	spin_unlock(ptl);
5684 
5685 	return same;
5686 }
5687 
hugetlb_no_page(struct address_space * mapping,struct vm_fault * vmf)5688 static vm_fault_t hugetlb_no_page(struct address_space *mapping,
5689 			struct vm_fault *vmf)
5690 {
5691 	u32 hash = hugetlb_fault_mutex_hash(mapping, vmf->pgoff);
5692 	bool new_folio, new_anon_folio = false;
5693 	struct vm_area_struct *vma = vmf->vma;
5694 	struct mm_struct *mm = vma->vm_mm;
5695 	struct hstate *h = hstate_vma(vma);
5696 	vm_fault_t ret = VM_FAULT_SIGBUS;
5697 	bool folio_locked = true;
5698 	struct folio *folio;
5699 	unsigned long size;
5700 	pte_t new_pte;
5701 
5702 	/*
5703 	 * Currently, we are forced to kill the process in the event the
5704 	 * original mapper has unmapped pages from the child due to a failed
5705 	 * COW/unsharing. Warn that such a situation has occurred as it may not
5706 	 * be obvious.
5707 	 */
5708 	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
5709 		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
5710 			   current->pid);
5711 		goto out;
5712 	}
5713 
5714 	/*
5715 	 * Use page lock to guard against racing truncation
5716 	 * before we get page_table_lock.
5717 	 */
5718 	new_folio = false;
5719 	folio = filemap_lock_hugetlb_folio(h, mapping, vmf->pgoff);
5720 	if (IS_ERR(folio)) {
5721 		size = i_size_read(mapping->host) >> huge_page_shift(h);
5722 		if (vmf->pgoff >= size)
5723 			goto out;
5724 		/* Check for page in userfault range */
5725 		if (userfaultfd_missing(vma)) {
5726 			/*
5727 			 * Since hugetlb_no_page() was examining pte
5728 			 * without pgtable lock, we need to re-test under
5729 			 * lock because the pte may not be stable and could
5730 			 * have changed from under us.  Try to detect
5731 			 * either changed or during-changing ptes and retry
5732 			 * properly when needed.
5733 			 *
5734 			 * Note that userfaultfd is actually fine with
5735 			 * false positives (e.g. caused by pte changed),
5736 			 * but not wrong logical events (e.g. caused by
5737 			 * reading a pte during changing).  The latter can
5738 			 * confuse the userspace, so the strictness is very
5739 			 * much preferred.  E.g., MISSING event should
5740 			 * never happen on the page after UFFDIO_COPY has
5741 			 * correctly installed the page and returned.
5742 			 */
5743 			if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5744 				ret = 0;
5745 				goto out;
5746 			}
5747 
5748 			return hugetlb_handle_userfault(vmf, mapping,
5749 							VM_UFFD_MISSING);
5750 		}
5751 
5752 		if (!(vma->vm_flags & VM_MAYSHARE)) {
5753 			ret = __vmf_anon_prepare(vmf);
5754 			if (unlikely(ret))
5755 				goto out;
5756 		}
5757 
5758 		folio = alloc_hugetlb_folio(vma, vmf->address, false);
5759 		if (IS_ERR(folio)) {
5760 			/*
5761 			 * Returning error will result in faulting task being
5762 			 * sent SIGBUS.  The hugetlb fault mutex prevents two
5763 			 * tasks from racing to fault in the same page which
5764 			 * could result in false unable to allocate errors.
5765 			 * Page migration does not take the fault mutex, but
5766 			 * does a clear then write of pte's under page table
5767 			 * lock.  Page fault code could race with migration,
5768 			 * notice the clear pte and try to allocate a page
5769 			 * here.  Before returning error, get ptl and make
5770 			 * sure there really is no pte entry.
5771 			 */
5772 			if (hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte))
5773 				ret = vmf_error(PTR_ERR(folio));
5774 			else
5775 				ret = 0;
5776 			goto out;
5777 		}
5778 		folio_zero_user(folio, vmf->real_address);
5779 		__folio_mark_uptodate(folio);
5780 		new_folio = true;
5781 
5782 		if (vma->vm_flags & VM_MAYSHARE) {
5783 			int err = hugetlb_add_to_page_cache(folio, mapping,
5784 							vmf->pgoff);
5785 			if (err) {
5786 				/*
5787 				 * err can't be -EEXIST which implies someone
5788 				 * else consumed the reservation since hugetlb
5789 				 * fault mutex is held when add a hugetlb page
5790 				 * to the page cache. So it's safe to call
5791 				 * restore_reserve_on_error() here.
5792 				 */
5793 				restore_reserve_on_error(h, vma, vmf->address,
5794 							folio);
5795 				folio_put(folio);
5796 				ret = VM_FAULT_SIGBUS;
5797 				goto out;
5798 			}
5799 		} else {
5800 			new_anon_folio = true;
5801 			folio_lock(folio);
5802 		}
5803 	} else {
5804 		/*
5805 		 * If memory error occurs between mmap() and fault, some process
5806 		 * don't have hwpoisoned swap entry for errored virtual address.
5807 		 * So we need to block hugepage fault by PG_hwpoison bit check.
5808 		 */
5809 		if (unlikely(folio_test_hwpoison(folio))) {
5810 			ret = VM_FAULT_HWPOISON_LARGE |
5811 				VM_FAULT_SET_HINDEX(hstate_index(h));
5812 			goto backout_unlocked;
5813 		}
5814 
5815 		/* Check for page in userfault range. */
5816 		if (userfaultfd_minor(vma)) {
5817 			folio_unlock(folio);
5818 			folio_put(folio);
5819 			/* See comment in userfaultfd_missing() block above */
5820 			if (!hugetlb_pte_stable(h, mm, vmf->address, vmf->pte, vmf->orig_pte)) {
5821 				ret = 0;
5822 				goto out;
5823 			}
5824 			return hugetlb_handle_userfault(vmf, mapping,
5825 							VM_UFFD_MINOR);
5826 		}
5827 	}
5828 
5829 	/*
5830 	 * If we are going to COW a private mapping later, we examine the
5831 	 * pending reservations for this page now. This will ensure that
5832 	 * any allocations necessary to record that reservation occur outside
5833 	 * the spinlock.
5834 	 */
5835 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5836 		if (vma_needs_reservation(h, vma, vmf->address) < 0) {
5837 			ret = VM_FAULT_OOM;
5838 			goto backout_unlocked;
5839 		}
5840 		/* Just decrements count, does not deallocate */
5841 		vma_end_reservation(h, vma, vmf->address);
5842 	}
5843 
5844 	vmf->ptl = huge_pte_lock(h, mm, vmf->pte);
5845 	ret = 0;
5846 	/* If pte changed from under us, retry */
5847 	if (!pte_same(huge_ptep_get(mm, vmf->address, vmf->pte), vmf->orig_pte))
5848 		goto backout;
5849 
5850 	if (new_anon_folio)
5851 		hugetlb_add_new_anon_rmap(folio, vma, vmf->address);
5852 	else
5853 		hugetlb_add_file_rmap(folio);
5854 	new_pte = make_huge_pte(vma, folio, vma->vm_flags & VM_SHARED);
5855 	/*
5856 	 * If this pte was previously wr-protected, keep it wr-protected even
5857 	 * if populated.
5858 	 */
5859 	if (unlikely(pte_is_uffd_wp_marker(vmf->orig_pte)))
5860 		new_pte = huge_pte_mkuffd_wp(new_pte);
5861 	set_huge_pte_at(mm, vmf->address, vmf->pte, new_pte, huge_page_size(h));
5862 
5863 	hugetlb_count_add(pages_per_huge_page(h), mm);
5864 	if ((vmf->flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
5865 		/*
5866 		 * No need to keep file folios locked. See comment in
5867 		 * hugetlb_fault().
5868 		 */
5869 		if (!new_anon_folio) {
5870 			folio_locked = false;
5871 			folio_unlock(folio);
5872 		}
5873 		/* Optimization, do the COW without a second fault */
5874 		ret = hugetlb_wp(vmf);
5875 	}
5876 
5877 	spin_unlock(vmf->ptl);
5878 
5879 	/*
5880 	 * Only set hugetlb_migratable in newly allocated pages.  Existing pages
5881 	 * found in the pagecache may not have hugetlb_migratable if they have
5882 	 * been isolated for migration.
5883 	 */
5884 	if (new_folio)
5885 		folio_set_hugetlb_migratable(folio);
5886 
5887 	if (folio_locked)
5888 		folio_unlock(folio);
5889 out:
5890 	hugetlb_vma_unlock_read(vma);
5891 
5892 	/*
5893 	 * We must check to release the per-VMA lock. __vmf_anon_prepare() is
5894 	 * the only way ret can be set to VM_FAULT_RETRY.
5895 	 */
5896 	if (unlikely(ret & VM_FAULT_RETRY))
5897 		vma_end_read(vma);
5898 
5899 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5900 	return ret;
5901 
5902 backout:
5903 	spin_unlock(vmf->ptl);
5904 backout_unlocked:
5905 	/* We only need to restore reservations for private mappings */
5906 	if (new_anon_folio)
5907 		restore_reserve_on_error(h, vma, vmf->address, folio);
5908 
5909 	folio_unlock(folio);
5910 	folio_put(folio);
5911 	goto out;
5912 }
5913 
5914 #ifdef CONFIG_SMP
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)5915 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5916 {
5917 	unsigned long key[2];
5918 	u32 hash;
5919 
5920 	key[0] = (unsigned long) mapping;
5921 	key[1] = idx;
5922 
5923 	hash = jhash2((u32 *)&key, sizeof(key)/(sizeof(u32)), 0);
5924 
5925 	return hash & (num_fault_mutexes - 1);
5926 }
5927 #else
5928 /*
5929  * For uniprocessor systems we always use a single mutex, so just
5930  * return 0 and avoid the hashing overhead.
5931  */
hugetlb_fault_mutex_hash(struct address_space * mapping,pgoff_t idx)5932 u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx)
5933 {
5934 	return 0;
5935 }
5936 #endif
5937 
hugetlb_fault(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long address,unsigned int flags)5938 vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
5939 			unsigned long address, unsigned int flags)
5940 {
5941 	vm_fault_t ret;
5942 	u32 hash;
5943 	struct folio *folio = NULL;
5944 	struct hstate *h = hstate_vma(vma);
5945 	struct address_space *mapping;
5946 	bool need_wait_lock = false;
5947 	struct vm_fault vmf = {
5948 		.vma = vma,
5949 		.address = address & huge_page_mask(h),
5950 		.real_address = address,
5951 		.flags = flags,
5952 		.pgoff = vma_hugecache_offset(h, vma,
5953 				address & huge_page_mask(h)),
5954 		/* TODO: Track hugetlb faults using vm_fault */
5955 
5956 		/*
5957 		 * Some fields may not be initialized, be careful as it may
5958 		 * be hard to debug if called functions make assumptions
5959 		 */
5960 	};
5961 
5962 	/*
5963 	 * Serialize hugepage allocation and instantiation, so that we don't
5964 	 * get spurious allocation failures if two CPUs race to instantiate
5965 	 * the same page in the page cache.
5966 	 */
5967 	mapping = vma->vm_file->f_mapping;
5968 	hash = hugetlb_fault_mutex_hash(mapping, vmf.pgoff);
5969 	mutex_lock(&hugetlb_fault_mutex_table[hash]);
5970 
5971 	/*
5972 	 * Acquire vma lock before calling huge_pte_alloc and hold
5973 	 * until finished with vmf.pte.  This prevents huge_pmd_unshare from
5974 	 * being called elsewhere and making the vmf.pte no longer valid.
5975 	 */
5976 	hugetlb_vma_lock_read(vma);
5977 	vmf.pte = huge_pte_alloc(mm, vma, vmf.address, huge_page_size(h));
5978 	if (!vmf.pte) {
5979 		hugetlb_vma_unlock_read(vma);
5980 		mutex_unlock(&hugetlb_fault_mutex_table[hash]);
5981 		return VM_FAULT_OOM;
5982 	}
5983 
5984 	vmf.orig_pte = huge_ptep_get(mm, vmf.address, vmf.pte);
5985 	if (huge_pte_none(vmf.orig_pte))
5986 		/*
5987 		 * hugetlb_no_page will drop vma lock and hugetlb fault
5988 		 * mutex internally, which make us return immediately.
5989 		 */
5990 		return hugetlb_no_page(mapping, &vmf);
5991 
5992 	if (pte_is_marker(vmf.orig_pte)) {
5993 		const pte_marker marker =
5994 			softleaf_to_marker(softleaf_from_pte(vmf.orig_pte));
5995 
5996 		if (marker & PTE_MARKER_POISONED) {
5997 			ret = VM_FAULT_HWPOISON_LARGE |
5998 				VM_FAULT_SET_HINDEX(hstate_index(h));
5999 			goto out_mutex;
6000 		} else if (WARN_ON_ONCE(marker & PTE_MARKER_GUARD)) {
6001 			/* This isn't supported in hugetlb. */
6002 			ret = VM_FAULT_SIGSEGV;
6003 			goto out_mutex;
6004 		}
6005 
6006 		return hugetlb_no_page(mapping, &vmf);
6007 	}
6008 
6009 	ret = 0;
6010 
6011 	/* Not present, either a migration or a hwpoisoned entry */
6012 	if (!pte_present(vmf.orig_pte) && !huge_pte_none(vmf.orig_pte)) {
6013 		const softleaf_t softleaf = softleaf_from_pte(vmf.orig_pte);
6014 
6015 		if (softleaf_is_migration(softleaf)) {
6016 			/*
6017 			 * Release the hugetlb fault lock now, but retain
6018 			 * the vma lock, because it is needed to guard the
6019 			 * huge_pte_lockptr() later in
6020 			 * migration_entry_wait_huge(). The vma lock will
6021 			 * be released there.
6022 			 */
6023 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6024 			migration_entry_wait_huge(vma, vmf.address, vmf.pte);
6025 			return 0;
6026 		}
6027 		if (softleaf_is_hwpoison(softleaf)) {
6028 			ret = VM_FAULT_HWPOISON_LARGE |
6029 			    VM_FAULT_SET_HINDEX(hstate_index(h));
6030 		}
6031 
6032 		goto out_mutex;
6033 	}
6034 
6035 	/*
6036 	 * If we are going to COW/unshare the mapping later, we examine the
6037 	 * pending reservations for this page now. This will ensure that any
6038 	 * allocations necessary to record that reservation occur outside the
6039 	 * spinlock.
6040 	 */
6041 	if ((flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) &&
6042 	    !(vma->vm_flags & VM_MAYSHARE) && !huge_pte_write(vmf.orig_pte)) {
6043 		if (vma_needs_reservation(h, vma, vmf.address) < 0) {
6044 			ret = VM_FAULT_OOM;
6045 			goto out_mutex;
6046 		}
6047 		/* Just decrements count, does not deallocate */
6048 		vma_end_reservation(h, vma, vmf.address);
6049 	}
6050 
6051 	vmf.ptl = huge_pte_lock(h, mm, vmf.pte);
6052 
6053 	/* Check for a racing update before calling hugetlb_wp() */
6054 	if (unlikely(!pte_same(vmf.orig_pte, huge_ptep_get(mm, vmf.address, vmf.pte))))
6055 		goto out_ptl;
6056 
6057 	/* Handle userfault-wp first, before trying to lock more pages */
6058 	if (userfaultfd_wp(vma) && huge_pte_uffd_wp(huge_ptep_get(mm, vmf.address, vmf.pte)) &&
6059 	    (flags & FAULT_FLAG_WRITE) && !huge_pte_write(vmf.orig_pte)) {
6060 		if (!userfaultfd_wp_async(vma)) {
6061 			spin_unlock(vmf.ptl);
6062 			hugetlb_vma_unlock_read(vma);
6063 			mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6064 			return handle_userfault(&vmf, VM_UFFD_WP);
6065 		}
6066 
6067 		vmf.orig_pte = huge_pte_clear_uffd_wp(vmf.orig_pte);
6068 		set_huge_pte_at(mm, vmf.address, vmf.pte, vmf.orig_pte,
6069 				huge_page_size(hstate_vma(vma)));
6070 		/* Fallthrough to CoW */
6071 	}
6072 
6073 	if (flags & (FAULT_FLAG_WRITE|FAULT_FLAG_UNSHARE)) {
6074 		if (!huge_pte_write(vmf.orig_pte)) {
6075 			/*
6076 			 * Anonymous folios need to be lock since hugetlb_wp()
6077 			 * checks whether we can re-use the folio exclusively
6078 			 * for us in case we are the only user of it.
6079 			 */
6080 			folio = page_folio(pte_page(vmf.orig_pte));
6081 			if (folio_test_anon(folio) && !folio_trylock(folio)) {
6082 				need_wait_lock = true;
6083 				goto out_ptl;
6084 			}
6085 			folio_get(folio);
6086 			ret = hugetlb_wp(&vmf);
6087 			if (folio_test_anon(folio))
6088 				folio_unlock(folio);
6089 			folio_put(folio);
6090 			goto out_ptl;
6091 		} else if (likely(flags & FAULT_FLAG_WRITE)) {
6092 			vmf.orig_pte = huge_pte_mkdirty(vmf.orig_pte);
6093 		}
6094 	}
6095 	vmf.orig_pte = pte_mkyoung(vmf.orig_pte);
6096 	if (huge_ptep_set_access_flags(vma, vmf.address, vmf.pte, vmf.orig_pte,
6097 						flags & FAULT_FLAG_WRITE))
6098 		update_mmu_cache(vma, vmf.address, vmf.pte);
6099 out_ptl:
6100 	spin_unlock(vmf.ptl);
6101 out_mutex:
6102 	hugetlb_vma_unlock_read(vma);
6103 
6104 	/*
6105 	 * We must check to release the per-VMA lock. __vmf_anon_prepare() in
6106 	 * hugetlb_wp() is the only way ret can be set to VM_FAULT_RETRY.
6107 	 */
6108 	if (unlikely(ret & VM_FAULT_RETRY))
6109 		vma_end_read(vma);
6110 
6111 	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
6112 	/*
6113 	 * hugetlb_wp drops all the locks, but the folio lock, before trying to
6114 	 * unmap the folio from other processes. During that window, if another
6115 	 * process mapping that folio faults in, it will take the mutex and then
6116 	 * it will wait on folio_lock, causing an ABBA deadlock.
6117 	 * Use trylock instead and bail out if we fail.
6118 	 *
6119 	 * Ideally, we should hold a refcount on the folio we wait for, but we do
6120 	 * not want to use the folio after it becomes unlocked, but rather just
6121 	 * wait for it to become unlocked, so hopefully next fault successes on
6122 	 * the trylock.
6123 	 */
6124 	if (need_wait_lock)
6125 		folio_wait_locked(folio);
6126 	return ret;
6127 }
6128 
6129 #ifdef CONFIG_USERFAULTFD
6130 /*
6131  * Can probably be eliminated, but still used by hugetlb_mfill_atomic_pte().
6132  */
alloc_hugetlb_folio_vma(struct hstate * h,struct vm_area_struct * vma,unsigned long address)6133 static struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
6134 		struct vm_area_struct *vma, unsigned long address)
6135 {
6136 	struct mempolicy *mpol;
6137 	nodemask_t *nodemask;
6138 	struct folio *folio;
6139 	gfp_t gfp_mask;
6140 	int node;
6141 
6142 	gfp_mask = htlb_alloc_mask(h);
6143 	node = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
6144 	/*
6145 	 * This is used to allocate a temporary hugetlb to hold the copied
6146 	 * content, which will then be copied again to the final hugetlb
6147 	 * consuming a reservation. Set the alloc_fallback to false to indicate
6148 	 * that breaking the per-node hugetlb pool is not allowed in this case.
6149 	 */
6150 	folio = alloc_hugetlb_folio_nodemask(h, node, nodemask, gfp_mask, false);
6151 	mpol_cond_put(mpol);
6152 
6153 	return folio;
6154 }
6155 
6156 /*
6157  * Used by userfaultfd UFFDIO_* ioctls. Based on userfaultfd's mfill_atomic_pte
6158  * with modifications for hugetlb pages.
6159  */
hugetlb_mfill_atomic_pte(pte_t * dst_pte,struct vm_area_struct * dst_vma,unsigned long dst_addr,unsigned long src_addr,uffd_flags_t flags,struct folio ** foliop)6160 int hugetlb_mfill_atomic_pte(pte_t *dst_pte,
6161 			     struct vm_area_struct *dst_vma,
6162 			     unsigned long dst_addr,
6163 			     unsigned long src_addr,
6164 			     uffd_flags_t flags,
6165 			     struct folio **foliop)
6166 {
6167 	struct mm_struct *dst_mm = dst_vma->vm_mm;
6168 	bool is_continue = uffd_flags_mode_is(flags, MFILL_ATOMIC_CONTINUE);
6169 	bool wp_enabled = (flags & MFILL_ATOMIC_WP);
6170 	struct hstate *h = hstate_vma(dst_vma);
6171 	struct address_space *mapping = dst_vma->vm_file->f_mapping;
6172 	pgoff_t idx = vma_hugecache_offset(h, dst_vma, dst_addr);
6173 	unsigned long size = huge_page_size(h);
6174 	int vm_shared = dst_vma->vm_flags & VM_SHARED;
6175 	pte_t _dst_pte;
6176 	spinlock_t *ptl;
6177 	int ret = -ENOMEM;
6178 	struct folio *folio;
6179 	bool folio_in_pagecache = false;
6180 	pte_t dst_ptep;
6181 
6182 	if (uffd_flags_mode_is(flags, MFILL_ATOMIC_POISON)) {
6183 		ptl = huge_pte_lock(h, dst_mm, dst_pte);
6184 
6185 		/* Don't overwrite any existing PTEs (even markers) */
6186 		if (!huge_pte_none(huge_ptep_get(dst_mm, dst_addr, dst_pte))) {
6187 			spin_unlock(ptl);
6188 			return -EEXIST;
6189 		}
6190 
6191 		_dst_pte = make_pte_marker(PTE_MARKER_POISONED);
6192 		set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6193 
6194 		/* No need to invalidate - it was non-present before */
6195 		update_mmu_cache(dst_vma, dst_addr, dst_pte);
6196 
6197 		spin_unlock(ptl);
6198 		return 0;
6199 	}
6200 
6201 	if (is_continue) {
6202 		ret = -EFAULT;
6203 		folio = filemap_lock_hugetlb_folio(h, mapping, idx);
6204 		if (IS_ERR(folio))
6205 			goto out;
6206 		folio_in_pagecache = true;
6207 	} else if (!*foliop) {
6208 		/* If a folio already exists, then it's UFFDIO_COPY for
6209 		 * a non-missing case. Return -EEXIST.
6210 		 */
6211 		if (vm_shared &&
6212 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6213 			ret = -EEXIST;
6214 			goto out;
6215 		}
6216 
6217 		folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6218 		if (IS_ERR(folio)) {
6219 			pte_t *actual_pte = hugetlb_walk(dst_vma, dst_addr, PMD_SIZE);
6220 			if (actual_pte) {
6221 				ret = -EEXIST;
6222 				goto out;
6223 			}
6224 			ret = -ENOMEM;
6225 			goto out;
6226 		}
6227 
6228 		ret = copy_folio_from_user(folio, (const void __user *) src_addr,
6229 					   false);
6230 
6231 		/* fallback to copy_from_user outside mmap_lock */
6232 		if (unlikely(ret)) {
6233 			ret = -ENOENT;
6234 			/* Free the allocated folio which may have
6235 			 * consumed a reservation.
6236 			 */
6237 			restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6238 			folio_put(folio);
6239 
6240 			/* Allocate a temporary folio to hold the copied
6241 			 * contents.
6242 			 */
6243 			folio = alloc_hugetlb_folio_vma(h, dst_vma, dst_addr);
6244 			if (!folio) {
6245 				ret = -ENOMEM;
6246 				goto out;
6247 			}
6248 			*foliop = folio;
6249 			/* Set the outparam foliop and return to the caller to
6250 			 * copy the contents outside the lock. Don't free the
6251 			 * folio.
6252 			 */
6253 			goto out;
6254 		}
6255 	} else {
6256 		if (vm_shared &&
6257 		    hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
6258 			folio_put(*foliop);
6259 			ret = -EEXIST;
6260 			*foliop = NULL;
6261 			goto out;
6262 		}
6263 
6264 		folio = alloc_hugetlb_folio(dst_vma, dst_addr, false);
6265 		if (IS_ERR(folio)) {
6266 			folio_put(*foliop);
6267 			ret = -ENOMEM;
6268 			*foliop = NULL;
6269 			goto out;
6270 		}
6271 		ret = copy_user_large_folio(folio, *foliop, dst_addr, dst_vma);
6272 		folio_put(*foliop);
6273 		*foliop = NULL;
6274 		if (ret) {
6275 			folio_put(folio);
6276 			goto out;
6277 		}
6278 	}
6279 
6280 	/*
6281 	 * If we just allocated a new page, we need a memory barrier to ensure
6282 	 * that preceding stores to the page become visible before the
6283 	 * set_pte_at() write. The memory barrier inside __folio_mark_uptodate
6284 	 * is what we need.
6285 	 *
6286 	 * In the case where we have not allocated a new page (is_continue),
6287 	 * the page must already be uptodate. UFFDIO_CONTINUE already includes
6288 	 * an earlier smp_wmb() to ensure that prior stores will be visible
6289 	 * before the set_pte_at() write.
6290 	 */
6291 	if (!is_continue)
6292 		__folio_mark_uptodate(folio);
6293 	else
6294 		WARN_ON_ONCE(!folio_test_uptodate(folio));
6295 
6296 	/* Add shared, newly allocated pages to the page cache. */
6297 	if (vm_shared && !is_continue) {
6298 		ret = -EFAULT;
6299 		if (idx >= (i_size_read(mapping->host) >> huge_page_shift(h)))
6300 			goto out_release_nounlock;
6301 
6302 		/*
6303 		 * Serialization between remove_inode_hugepages() and
6304 		 * hugetlb_add_to_page_cache() below happens through the
6305 		 * hugetlb_fault_mutex_table that here must be hold by
6306 		 * the caller.
6307 		 */
6308 		ret = hugetlb_add_to_page_cache(folio, mapping, idx);
6309 		if (ret)
6310 			goto out_release_nounlock;
6311 		folio_in_pagecache = true;
6312 	}
6313 
6314 	ptl = huge_pte_lock(h, dst_mm, dst_pte);
6315 
6316 	ret = -EIO;
6317 	if (folio_test_hwpoison(folio))
6318 		goto out_release_unlock;
6319 
6320 	ret = -EEXIST;
6321 
6322 	dst_ptep = huge_ptep_get(dst_mm, dst_addr, dst_pte);
6323 	/*
6324 	 * See comment about UFFD marker overwriting in
6325 	 * mfill_atomic_install_pte().
6326 	 */
6327 	if (!huge_pte_none(dst_ptep) && !pte_is_uffd_marker(dst_ptep))
6328 		goto out_release_unlock;
6329 
6330 	if (folio_in_pagecache)
6331 		hugetlb_add_file_rmap(folio);
6332 	else
6333 		hugetlb_add_new_anon_rmap(folio, dst_vma, dst_addr);
6334 
6335 	/*
6336 	 * For either: (1) CONTINUE on a non-shared VMA, or (2) UFFDIO_COPY
6337 	 * with wp flag set, don't set pte write bit.
6338 	 */
6339 	_dst_pte = make_huge_pte(dst_vma, folio,
6340 				 !wp_enabled && !(is_continue && !vm_shared));
6341 	/*
6342 	 * Always mark UFFDIO_COPY page dirty; note that this may not be
6343 	 * extremely important for hugetlbfs for now since swapping is not
6344 	 * supported, but we should still be clear in that this page cannot be
6345 	 * thrown away at will, even if write bit not set.
6346 	 */
6347 	_dst_pte = huge_pte_mkdirty(_dst_pte);
6348 	_dst_pte = pte_mkyoung(_dst_pte);
6349 
6350 	if (wp_enabled)
6351 		_dst_pte = huge_pte_mkuffd_wp(_dst_pte);
6352 
6353 	set_huge_pte_at(dst_mm, dst_addr, dst_pte, _dst_pte, size);
6354 
6355 	hugetlb_count_add(pages_per_huge_page(h), dst_mm);
6356 
6357 	/* No need to invalidate - it was non-present before */
6358 	update_mmu_cache(dst_vma, dst_addr, dst_pte);
6359 
6360 	spin_unlock(ptl);
6361 	if (!is_continue)
6362 		folio_set_hugetlb_migratable(folio);
6363 	if (vm_shared || is_continue)
6364 		folio_unlock(folio);
6365 	ret = 0;
6366 out:
6367 	return ret;
6368 out_release_unlock:
6369 	spin_unlock(ptl);
6370 	if (vm_shared || is_continue)
6371 		folio_unlock(folio);
6372 out_release_nounlock:
6373 	if (!folio_in_pagecache)
6374 		restore_reserve_on_error(h, dst_vma, dst_addr, folio);
6375 	folio_put(folio);
6376 	goto out;
6377 }
6378 #endif /* CONFIG_USERFAULTFD */
6379 
hugetlb_change_protection(struct vm_area_struct * vma,unsigned long address,unsigned long end,pgprot_t newprot,unsigned long cp_flags)6380 long hugetlb_change_protection(struct vm_area_struct *vma,
6381 		unsigned long address, unsigned long end,
6382 		pgprot_t newprot, unsigned long cp_flags)
6383 {
6384 	struct mm_struct *mm = vma->vm_mm;
6385 	unsigned long start = address;
6386 	pte_t *ptep;
6387 	pte_t pte;
6388 	struct hstate *h = hstate_vma(vma);
6389 	long pages = 0, psize = huge_page_size(h);
6390 	struct mmu_notifier_range range;
6391 	unsigned long last_addr_mask;
6392 	bool uffd_wp = cp_flags & MM_CP_UFFD_WP;
6393 	bool uffd_wp_resolve = cp_flags & MM_CP_UFFD_WP_RESOLVE;
6394 	struct mmu_gather tlb;
6395 
6396 	/*
6397 	 * In the case of shared PMDs, the area to flush could be beyond
6398 	 * start/end.  Set range.start/range.end to cover the maximum possible
6399 	 * range if PMD sharing is possible.
6400 	 */
6401 	mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
6402 				0, mm, start, end);
6403 	adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
6404 
6405 	BUG_ON(address >= end);
6406 	flush_cache_range(vma, range.start, range.end);
6407 	tlb_gather_mmu_vma(&tlb, vma);
6408 
6409 	mmu_notifier_invalidate_range_start(&range);
6410 	hugetlb_vma_lock_write(vma);
6411 	i_mmap_lock_write(vma->vm_file->f_mapping);
6412 	last_addr_mask = hugetlb_mask_last_page(h);
6413 	for (; address < end; address += psize) {
6414 		softleaf_t entry;
6415 		spinlock_t *ptl;
6416 
6417 		ptep = hugetlb_walk(vma, address, psize);
6418 		if (!ptep) {
6419 			if (!uffd_wp) {
6420 				address |= last_addr_mask;
6421 				continue;
6422 			}
6423 			/*
6424 			 * Userfaultfd wr-protect requires pgtable
6425 			 * pre-allocations to install pte markers.
6426 			 */
6427 			ptep = huge_pte_alloc(mm, vma, address, psize);
6428 			if (!ptep) {
6429 				pages = -ENOMEM;
6430 				break;
6431 			}
6432 		}
6433 		ptl = huge_pte_lock(h, mm, ptep);
6434 		if (huge_pmd_unshare(&tlb, vma, address, ptep)) {
6435 			/*
6436 			 * When uffd-wp is enabled on the vma, unshare
6437 			 * shouldn't happen at all.  Warn about it if it
6438 			 * happened due to some reason.
6439 			 */
6440 			WARN_ON_ONCE(uffd_wp || uffd_wp_resolve);
6441 			pages++;
6442 			spin_unlock(ptl);
6443 			address |= last_addr_mask;
6444 			continue;
6445 		}
6446 		pte = huge_ptep_get(mm, address, ptep);
6447 		if (huge_pte_none(pte)) {
6448 			if (unlikely(uffd_wp))
6449 				/* Safe to modify directly (none->non-present). */
6450 				set_huge_pte_at(mm, address, ptep,
6451 						make_pte_marker(PTE_MARKER_UFFD_WP),
6452 						psize);
6453 			goto next;
6454 		}
6455 
6456 		entry = softleaf_from_pte(pte);
6457 		if (unlikely(softleaf_is_hwpoison(entry))) {
6458 			/* Nothing to do. */
6459 		} else if (unlikely(softleaf_is_migration(entry))) {
6460 			struct folio *folio = softleaf_to_folio(entry);
6461 			pte_t newpte = pte;
6462 
6463 			if (softleaf_is_migration_write(entry)) {
6464 				if (folio_test_anon(folio))
6465 					entry = make_readable_exclusive_migration_entry(
6466 								swp_offset(entry));
6467 				else
6468 					entry = make_readable_migration_entry(
6469 								swp_offset(entry));
6470 				newpte = swp_entry_to_pte(entry);
6471 				pages++;
6472 			}
6473 
6474 			if (uffd_wp)
6475 				newpte = pte_swp_mkuffd_wp(newpte);
6476 			else if (uffd_wp_resolve)
6477 				newpte = pte_swp_clear_uffd_wp(newpte);
6478 			if (!pte_same(pte, newpte))
6479 				set_huge_pte_at(mm, address, ptep, newpte, psize);
6480 		} else if (unlikely(pte_is_marker(pte))) {
6481 			/*
6482 			 * Do nothing on a poison marker; page is
6483 			 * corrupted, permissions do not apply. Here
6484 			 * pte_marker_uffd_wp()==true implies !poison
6485 			 * because they're mutual exclusive.
6486 			 */
6487 			if (pte_is_uffd_wp_marker(pte) && uffd_wp_resolve)
6488 				/* Safe to modify directly (non-present->none). */
6489 				huge_pte_clear(mm, address, ptep, psize);
6490 		} else {
6491 			pte_t old_pte;
6492 			unsigned int shift = huge_page_shift(hstate_vma(vma));
6493 
6494 			old_pte = huge_ptep_modify_prot_start(vma, address, ptep);
6495 			pte = huge_pte_modify(old_pte, newprot);
6496 			pte = arch_make_huge_pte(pte, shift, vma->vm_flags);
6497 			if (uffd_wp)
6498 				pte = huge_pte_mkuffd_wp(pte);
6499 			else if (uffd_wp_resolve)
6500 				pte = huge_pte_clear_uffd_wp(pte);
6501 			huge_ptep_modify_prot_commit(vma, address, ptep, old_pte, pte);
6502 			pages++;
6503 			tlb_remove_huge_tlb_entry(h, &tlb, ptep, address);
6504 		}
6505 
6506 next:
6507 		spin_unlock(ptl);
6508 		cond_resched();
6509 	}
6510 
6511 	tlb_flush_mmu_tlbonly(&tlb);
6512 	huge_pmd_unshare_flush(&tlb, vma);
6513 	/*
6514 	 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs() we are
6515 	 * downgrading page table protection not changing it to point to a new
6516 	 * page.
6517 	 *
6518 	 * See Documentation/mm/mmu_notifier.rst
6519 	 */
6520 	i_mmap_unlock_write(vma->vm_file->f_mapping);
6521 	hugetlb_vma_unlock_write(vma);
6522 	mmu_notifier_invalidate_range_end(&range);
6523 	tlb_finish_mmu(&tlb);
6524 
6525 	return pages > 0 ? (pages << h->order) : pages;
6526 }
6527 
6528 /*
6529  * Update the reservation map for the range [from, to].
6530  *
6531  * Returns the number of entries that would be added to the reservation map
6532  * associated with the range [from, to].  This number is greater or equal to
6533  * zero. -EINVAL or -ENOMEM is returned in case of any errors.
6534  */
6535 
hugetlb_reserve_pages(struct inode * inode,long from,long to,struct vm_area_desc * desc,vma_flags_t vma_flags)6536 long hugetlb_reserve_pages(struct inode *inode,
6537 		long from, long to,
6538 		struct vm_area_desc *desc,
6539 		vma_flags_t vma_flags)
6540 {
6541 	long chg = -1, add = -1, spool_resv, gbl_resv;
6542 	struct hstate *h = hstate_inode(inode);
6543 	struct hugepage_subpool *spool = subpool_inode(inode);
6544 	struct resv_map *resv_map;
6545 	struct hugetlb_cgroup *h_cg = NULL;
6546 	long gbl_reserve, regions_needed = 0;
6547 	int err;
6548 
6549 	/* This should never happen */
6550 	if (from > to) {
6551 		VM_WARN(1, "%s called with a negative range\n", __func__);
6552 		return -EINVAL;
6553 	}
6554 
6555 	/*
6556 	 * Only apply hugepage reservation if asked. At fault time, an
6557 	 * attempt will be made for VM_NORESERVE to allocate a page
6558 	 * without using reserves
6559 	 */
6560 	if (vma_flags_test(&vma_flags, VMA_NORESERVE_BIT))
6561 		return 0;
6562 
6563 	/*
6564 	 * Shared mappings base their reservation on the number of pages that
6565 	 * are already allocated on behalf of the file. Private mappings need
6566 	 * to reserve the full area even if read-only as mprotect() may be
6567 	 * called to make the mapping read-write. Assume !desc is a shm mapping
6568 	 */
6569 	if (!desc || vma_desc_test(desc, VMA_MAYSHARE_BIT)) {
6570 		/*
6571 		 * resv_map can not be NULL as hugetlb_reserve_pages is only
6572 		 * called for inodes for which resv_maps were created (see
6573 		 * hugetlbfs_get_inode).
6574 		 */
6575 		resv_map = inode_resv_map(inode);
6576 
6577 		chg = region_chg(resv_map, from, to, &regions_needed);
6578 	} else {
6579 		/* Private mapping. */
6580 		resv_map = resv_map_alloc();
6581 		if (!resv_map) {
6582 			err = -ENOMEM;
6583 			goto out_err;
6584 		}
6585 
6586 		chg = to - from;
6587 
6588 		set_vma_desc_resv_map(desc, resv_map);
6589 		set_vma_desc_resv_flags(desc, HPAGE_RESV_OWNER);
6590 	}
6591 
6592 	if (chg < 0) {
6593 		/* region_chg() above can return -ENOMEM */
6594 		err = (chg == -ENOMEM) ? -ENOMEM : -EINVAL;
6595 		goto out_err;
6596 	}
6597 
6598 	err = hugetlb_cgroup_charge_cgroup_rsvd(hstate_index(h),
6599 				chg * pages_per_huge_page(h), &h_cg);
6600 	if (err < 0)
6601 		goto out_err;
6602 
6603 	if (desc && !vma_desc_test(desc, VMA_MAYSHARE_BIT) && h_cg) {
6604 		/* For private mappings, the hugetlb_cgroup uncharge info hangs
6605 		 * of the resv_map.
6606 		 */
6607 		resv_map_set_hugetlb_cgroup_uncharge_info(resv_map, h_cg, h);
6608 	}
6609 
6610 	/*
6611 	 * There must be enough pages in the subpool for the mapping. If
6612 	 * the subpool has a minimum size, there may be some global
6613 	 * reservations already in place (gbl_reserve).
6614 	 */
6615 	gbl_reserve = hugepage_subpool_get_pages(spool, chg);
6616 	if (gbl_reserve < 0) {
6617 		err = gbl_reserve;
6618 		goto out_uncharge_cgroup;
6619 	}
6620 
6621 	/*
6622 	 * Check enough hugepages are available for the reservation.
6623 	 * Hand the pages back to the subpool if there are not
6624 	 */
6625 	err = hugetlb_acct_memory(h, gbl_reserve);
6626 	if (err < 0)
6627 		goto out_put_pages;
6628 
6629 	/*
6630 	 * Account for the reservations made. Shared mappings record regions
6631 	 * that have reservations as they are shared by multiple VMAs.
6632 	 * When the last VMA disappears, the region map says how much
6633 	 * the reservation was and the page cache tells how much of
6634 	 * the reservation was consumed. Private mappings are per-VMA and
6635 	 * only the consumed reservations are tracked. When the VMA
6636 	 * disappears, the original reservation is the VMA size and the
6637 	 * consumed reservations are stored in the map. Hence, nothing
6638 	 * else has to be done for private mappings here
6639 	 */
6640 	if (!desc || vma_desc_test(desc, VMA_MAYSHARE_BIT)) {
6641 		add = region_add(resv_map, from, to, regions_needed, h, h_cg);
6642 
6643 		if (unlikely(add < 0)) {
6644 			hugetlb_acct_memory(h, -gbl_reserve);
6645 			err = add;
6646 			goto out_put_pages;
6647 		} else if (unlikely(chg > add)) {
6648 			/*
6649 			 * pages in this range were added to the reserve
6650 			 * map between region_chg and region_add.  This
6651 			 * indicates a race with alloc_hugetlb_folio.  Adjust
6652 			 * the subpool and reserve counts modified above
6653 			 * based on the difference.
6654 			 */
6655 			long rsv_adjust;
6656 
6657 			/*
6658 			 * hugetlb_cgroup_uncharge_cgroup_rsvd() will put the
6659 			 * reference to h_cg->css. See comment below for detail.
6660 			 */
6661 			hugetlb_cgroup_uncharge_cgroup_rsvd(
6662 				hstate_index(h),
6663 				(chg - add) * pages_per_huge_page(h), h_cg);
6664 
6665 			rsv_adjust = hugepage_subpool_put_pages(spool,
6666 								chg - add);
6667 			hugetlb_acct_memory(h, -rsv_adjust);
6668 		} else if (h_cg) {
6669 			/*
6670 			 * The file_regions will hold their own reference to
6671 			 * h_cg->css. So we should release the reference held
6672 			 * via hugetlb_cgroup_charge_cgroup_rsvd() when we are
6673 			 * done.
6674 			 */
6675 			hugetlb_cgroup_put_rsvd_cgroup(h_cg);
6676 		}
6677 	}
6678 	return chg;
6679 
6680 out_put_pages:
6681 	spool_resv = chg - gbl_reserve;
6682 	if (spool_resv) {
6683 		/* put sub pool's reservation back, chg - gbl_reserve */
6684 		gbl_resv = hugepage_subpool_put_pages(spool, spool_resv);
6685 		/*
6686 		 * subpool's reserved pages can not be put back due to race,
6687 		 * return to hstate.
6688 		 */
6689 		hugetlb_acct_memory(h, -gbl_resv);
6690 	}
6691 	/* Restore used_hpages for pages that failed global reservation */
6692 	if (gbl_reserve && spool) {
6693 		unsigned long flags;
6694 
6695 		spin_lock_irqsave(&spool->lock, flags);
6696 		if (spool->max_hpages != -1)
6697 			spool->used_hpages -= gbl_reserve;
6698 		unlock_or_release_subpool(spool, flags);
6699 	}
6700 out_uncharge_cgroup:
6701 	hugetlb_cgroup_uncharge_cgroup_rsvd(hstate_index(h),
6702 					    chg * pages_per_huge_page(h), h_cg);
6703 out_err:
6704 	if (!desc || vma_desc_test(desc, VMA_MAYSHARE_BIT))
6705 		/* Only call region_abort if the region_chg succeeded but the
6706 		 * region_add failed or didn't run.
6707 		 */
6708 		if (chg >= 0 && add < 0)
6709 			region_abort(resv_map, from, to, regions_needed);
6710 	if (desc && is_vma_desc_resv_set(desc, HPAGE_RESV_OWNER)) {
6711 		kref_put(&resv_map->refs, resv_map_release);
6712 		set_vma_desc_resv_map(desc, NULL);
6713 	}
6714 	return err;
6715 }
6716 
hugetlb_unreserve_pages(struct inode * inode,long start,long end,long freed)6717 long hugetlb_unreserve_pages(struct inode *inode, long start, long end,
6718 								long freed)
6719 {
6720 	struct hstate *h = hstate_inode(inode);
6721 	struct resv_map *resv_map = inode_resv_map(inode);
6722 	long chg = 0;
6723 	struct hugepage_subpool *spool = subpool_inode(inode);
6724 	long gbl_reserve;
6725 
6726 	/*
6727 	 * Since this routine can be called in the evict inode path for all
6728 	 * hugetlbfs inodes, resv_map could be NULL.
6729 	 */
6730 	if (resv_map) {
6731 		chg = region_del(resv_map, start, end);
6732 		/*
6733 		 * region_del() can fail in the rare case where a region
6734 		 * must be split and another region descriptor can not be
6735 		 * allocated.  If end == LONG_MAX, it will not fail.
6736 		 */
6737 		if (chg < 0)
6738 			return chg;
6739 	}
6740 
6741 	spin_lock(&inode->i_lock);
6742 	inode->i_blocks -= (blocks_per_huge_page(h) * freed);
6743 	spin_unlock(&inode->i_lock);
6744 
6745 	/*
6746 	 * If the subpool has a minimum size, the number of global
6747 	 * reservations to be released may be adjusted.
6748 	 *
6749 	 * Note that !resv_map implies freed == 0. So (chg - freed)
6750 	 * won't go negative.
6751 	 */
6752 	gbl_reserve = hugepage_subpool_put_pages(spool, (chg - freed));
6753 	hugetlb_acct_memory(h, -gbl_reserve);
6754 
6755 	return 0;
6756 }
6757 
6758 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
page_table_shareable(struct vm_area_struct * svma,struct vm_area_struct * vma,unsigned long addr,pgoff_t idx)6759 static unsigned long page_table_shareable(struct vm_area_struct *svma,
6760 				struct vm_area_struct *vma,
6761 				unsigned long addr, pgoff_t idx)
6762 {
6763 	unsigned long saddr = ((idx - svma->vm_pgoff) << PAGE_SHIFT) +
6764 				svma->vm_start;
6765 	unsigned long sbase = saddr & PUD_MASK;
6766 	unsigned long s_end = sbase + PUD_SIZE;
6767 
6768 	/* Allow segments to share if only one is marked locked */
6769 	vm_flags_t vm_flags = vma->vm_flags & ~VM_LOCKED_MASK;
6770 	vm_flags_t svm_flags = svma->vm_flags & ~VM_LOCKED_MASK;
6771 
6772 	/*
6773 	 * match the virtual addresses, permission and the alignment of the
6774 	 * page table page.
6775 	 *
6776 	 * Also, vma_lock (vm_private_data) is required for sharing.
6777 	 */
6778 	if (pmd_index(addr) != pmd_index(saddr) ||
6779 	    vm_flags != svm_flags ||
6780 	    !range_in_vma(svma, sbase, s_end) ||
6781 	    !svma->vm_private_data)
6782 		return 0;
6783 
6784 	return saddr;
6785 }
6786 
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)6787 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6788 {
6789 	unsigned long start = addr & PUD_MASK;
6790 	unsigned long end = start + PUD_SIZE;
6791 
6792 #ifdef CONFIG_USERFAULTFD
6793 	if (uffd_disable_huge_pmd_share(vma))
6794 		return false;
6795 #endif
6796 	/*
6797 	 * check on proper vm_flags and page table alignment
6798 	 */
6799 	if (!(vma->vm_flags & VM_MAYSHARE))
6800 		return false;
6801 	if (!vma->vm_private_data)	/* vma lock required for sharing */
6802 		return false;
6803 	if (!range_in_vma(vma, start, end))
6804 		return false;
6805 	return true;
6806 }
6807 
6808 /*
6809  * Determine if start,end range within vma could be mapped by shared pmd.
6810  * If yes, adjust start and end to cover range associated with possible
6811  * shared pmd mappings.
6812  */
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)6813 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6814 				unsigned long *start, unsigned long *end)
6815 {
6816 	unsigned long v_start = ALIGN(vma->vm_start, PUD_SIZE),
6817 		v_end = ALIGN_DOWN(vma->vm_end, PUD_SIZE);
6818 
6819 	/*
6820 	 * vma needs to span at least one aligned PUD size, and the range
6821 	 * must be at least partially within in.
6822 	 */
6823 	if (!(vma->vm_flags & VM_MAYSHARE) || !(v_end > v_start) ||
6824 		(*end <= v_start) || (*start >= v_end))
6825 		return;
6826 
6827 	/* Extend the range to be PUD aligned for a worst case scenario */
6828 	if (*start > v_start)
6829 		*start = ALIGN_DOWN(*start, PUD_SIZE);
6830 
6831 	if (*end < v_end)
6832 		*end = ALIGN(*end, PUD_SIZE);
6833 }
6834 
6835 /*
6836  * Search for a shareable pmd page for hugetlb. In any case calls pmd_alloc()
6837  * and returns the corresponding pte. While this is not necessary for the
6838  * !shared pmd case because we can allocate the pmd later as well, it makes the
6839  * code much cleaner. pmd allocation is essential for the shared case because
6840  * pud has to be populated inside the same i_mmap_rwsem section - otherwise
6841  * racing tasks could either miss the sharing (see huge_pte_offset) or select a
6842  * bad pmd for sharing.
6843  */
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)6844 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6845 		      unsigned long addr, pud_t *pud)
6846 {
6847 	struct address_space *mapping = vma->vm_file->f_mapping;
6848 	pgoff_t idx = ((addr - vma->vm_start) >> PAGE_SHIFT) +
6849 			vma->vm_pgoff;
6850 	struct vm_area_struct *svma;
6851 	unsigned long saddr;
6852 	pte_t *spte = NULL;
6853 	pte_t *pte;
6854 
6855 	i_mmap_lock_read(mapping);
6856 	vma_interval_tree_foreach(svma, &mapping->i_mmap, idx, idx) {
6857 		if (svma == vma)
6858 			continue;
6859 
6860 		saddr = page_table_shareable(svma, vma, addr, idx);
6861 		if (saddr) {
6862 			spte = hugetlb_walk(svma, saddr,
6863 					    vma_mmu_pagesize(svma));
6864 			if (spte) {
6865 				ptdesc_pmd_pts_inc(virt_to_ptdesc(spte));
6866 				break;
6867 			}
6868 		}
6869 	}
6870 
6871 	if (!spte)
6872 		goto out;
6873 
6874 	spin_lock(&mm->page_table_lock);
6875 	if (pud_none(*pud)) {
6876 		pud_populate(mm, pud,
6877 				(pmd_t *)((unsigned long)spte & PAGE_MASK));
6878 		mm_inc_nr_pmds(mm);
6879 	} else {
6880 		ptdesc_pmd_pts_dec(virt_to_ptdesc(spte));
6881 	}
6882 	spin_unlock(&mm->page_table_lock);
6883 out:
6884 	pte = (pte_t *)pmd_alloc(mm, pud, addr);
6885 	i_mmap_unlock_read(mapping);
6886 	return pte;
6887 }
6888 
6889 /**
6890  * huge_pmd_unshare - Unmap a pmd table if it is shared by multiple users
6891  * @tlb: the current mmu_gather.
6892  * @vma: the vma covering the pmd table.
6893  * @addr: the address we are trying to unshare.
6894  * @ptep: pointer into the (pmd) page table.
6895  *
6896  * Called with the page table lock held, the i_mmap_rwsem held in write mode
6897  * and the hugetlb vma lock held in write mode.
6898  *
6899  * Note: The caller must call huge_pmd_unshare_flush() before dropping the
6900  * i_mmap_rwsem.
6901  *
6902  * Returns: 1 if it was a shared PMD table and it got unmapped, or 0 if it
6903  *	    was not a shared PMD table.
6904  */
huge_pmd_unshare(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)6905 int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
6906 		unsigned long addr, pte_t *ptep)
6907 {
6908 	unsigned long sz = huge_page_size(hstate_vma(vma));
6909 	struct mm_struct *mm = vma->vm_mm;
6910 	pgd_t *pgd = pgd_offset(mm, addr);
6911 	p4d_t *p4d = p4d_offset(pgd, addr);
6912 	pud_t *pud = pud_offset(p4d, addr);
6913 
6914 	if (sz != PMD_SIZE)
6915 		return 0;
6916 	if (!ptdesc_pmd_is_shared(virt_to_ptdesc(ptep)))
6917 		return 0;
6918 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6919 	hugetlb_vma_assert_locked(vma);
6920 	pud_clear(pud);
6921 
6922 	tlb_unshare_pmd_ptdesc(tlb, virt_to_ptdesc(ptep), addr);
6923 
6924 	mm_dec_nr_pmds(mm);
6925 	return 1;
6926 }
6927 
6928 /*
6929  * huge_pmd_unshare_flush - Complete a sequence of huge_pmd_unshare() calls
6930  * @tlb: the current mmu_gather.
6931  * @vma: the vma covering the pmd table.
6932  *
6933  * Perform necessary TLB flushes or IPI broadcasts to synchronize PMD table
6934  * unsharing with concurrent page table walkers.
6935  *
6936  * This function must be called after a sequence of huge_pmd_unshare()
6937  * calls while still holding the i_mmap_rwsem.
6938  */
huge_pmd_unshare_flush(struct mmu_gather * tlb,struct vm_area_struct * vma)6939 void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
6940 {
6941 	/*
6942 	 * We must synchronize page table unsharing such that nobody will
6943 	 * try reusing a previously-shared page table while it might still
6944 	 * be in use by previous sharers (TLB, GUP_fast).
6945 	 */
6946 	i_mmap_assert_write_locked(vma->vm_file->f_mapping);
6947 
6948 	tlb_flush_unshared_tables(tlb);
6949 }
6950 
6951 #else /* !CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
6952 
huge_pmd_share(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,pud_t * pud)6953 pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma,
6954 		      unsigned long addr, pud_t *pud)
6955 {
6956 	return NULL;
6957 }
6958 
huge_pmd_unshare(struct mmu_gather * tlb,struct vm_area_struct * vma,unsigned long addr,pte_t * ptep)6959 int huge_pmd_unshare(struct mmu_gather *tlb, struct vm_area_struct *vma,
6960 		unsigned long addr, pte_t *ptep)
6961 {
6962 	return 0;
6963 }
6964 
huge_pmd_unshare_flush(struct mmu_gather * tlb,struct vm_area_struct * vma)6965 void huge_pmd_unshare_flush(struct mmu_gather *tlb, struct vm_area_struct *vma)
6966 {
6967 }
6968 
adjust_range_if_pmd_sharing_possible(struct vm_area_struct * vma,unsigned long * start,unsigned long * end)6969 void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
6970 				unsigned long *start, unsigned long *end)
6971 {
6972 }
6973 
want_pmd_share(struct vm_area_struct * vma,unsigned long addr)6974 bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr)
6975 {
6976 	return false;
6977 }
6978 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
6979 
6980 #ifdef CONFIG_ARCH_WANT_GENERAL_HUGETLB
huge_pte_alloc(struct mm_struct * mm,struct vm_area_struct * vma,unsigned long addr,unsigned long sz)6981 pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
6982 			unsigned long addr, unsigned long sz)
6983 {
6984 	pgd_t *pgd;
6985 	p4d_t *p4d;
6986 	pud_t *pud;
6987 	pte_t *pte = NULL;
6988 
6989 	pgd = pgd_offset(mm, addr);
6990 	p4d = p4d_alloc(mm, pgd, addr);
6991 	if (!p4d)
6992 		return NULL;
6993 	pud = pud_alloc(mm, p4d, addr);
6994 	if (pud) {
6995 		if (sz == PUD_SIZE) {
6996 			pte = (pte_t *)pud;
6997 		} else {
6998 			BUG_ON(sz != PMD_SIZE);
6999 			if (want_pmd_share(vma, addr) && pud_none(*pud))
7000 				pte = huge_pmd_share(mm, vma, addr, pud);
7001 			else
7002 				pte = (pte_t *)pmd_alloc(mm, pud, addr);
7003 		}
7004 	}
7005 
7006 	if (pte) {
7007 		pte_t pteval = ptep_get_lockless(pte);
7008 
7009 		BUG_ON(pte_present(pteval) && !pte_huge(pteval));
7010 	}
7011 
7012 	return pte;
7013 }
7014 
7015 /*
7016  * huge_pte_offset() - Walk the page table to resolve the hugepage
7017  * entry at address @addr
7018  *
7019  * Return: Pointer to page table entry (PUD or PMD) for
7020  * address @addr, or NULL if a !p*d_present() entry is encountered and the
7021  * size @sz doesn't match the hugepage size at this level of the page
7022  * table.
7023  */
huge_pte_offset(struct mm_struct * mm,unsigned long addr,unsigned long sz)7024 pte_t *huge_pte_offset(struct mm_struct *mm,
7025 		       unsigned long addr, unsigned long sz)
7026 {
7027 	pgd_t *pgd;
7028 	p4d_t *p4d;
7029 	pud_t *pud;
7030 	pmd_t *pmd;
7031 
7032 	pgd = pgd_offset(mm, addr);
7033 	if (!pgd_present(*pgd))
7034 		return NULL;
7035 	p4d = p4d_offset(pgd, addr);
7036 	if (!p4d_present(*p4d))
7037 		return NULL;
7038 
7039 	pud = pud_offset(p4d, addr);
7040 	if (sz == PUD_SIZE)
7041 		/* must be pud huge, non-present or none */
7042 		return (pte_t *)pud;
7043 	if (!pud_present(*pud))
7044 		return NULL;
7045 	/* must have a valid entry and size to go further */
7046 
7047 	pmd = pmd_offset(pud, addr);
7048 	/* must be pmd huge, non-present or none */
7049 	return (pte_t *)pmd;
7050 }
7051 
7052 /*
7053  * Return a mask that can be used to update an address to the last huge
7054  * page in a page table page mapping size.  Used to skip non-present
7055  * page table entries when linearly scanning address ranges.  Architectures
7056  * with unique huge page to page table relationships can define their own
7057  * version of this routine.
7058  */
hugetlb_mask_last_page(struct hstate * h)7059 unsigned long hugetlb_mask_last_page(struct hstate *h)
7060 {
7061 	unsigned long hp_size = huge_page_size(h);
7062 
7063 	if (hp_size == PUD_SIZE)
7064 		return P4D_SIZE - PUD_SIZE;
7065 	else if (hp_size == PMD_SIZE)
7066 		return PUD_SIZE - PMD_SIZE;
7067 	else
7068 		return 0UL;
7069 }
7070 
7071 #else
7072 
7073 /* See description above.  Architectures can provide their own version. */
hugetlb_mask_last_page(struct hstate * h)7074 __weak unsigned long hugetlb_mask_last_page(struct hstate *h)
7075 {
7076 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
7077 	if (huge_page_size(h) == PMD_SIZE)
7078 		return PUD_SIZE - PMD_SIZE;
7079 #endif
7080 	return 0UL;
7081 }
7082 
7083 #endif /* CONFIG_ARCH_WANT_GENERAL_HUGETLB */
7084 
7085 /**
7086  * folio_isolate_hugetlb - try to isolate an allocated hugetlb folio
7087  * @folio: the folio to isolate
7088  * @list: the list to add the folio to on success
7089  *
7090  * Isolate an allocated (refcount > 0) hugetlb folio, marking it as
7091  * isolated/non-migratable, and moving it from the active list to the
7092  * given list.
7093  *
7094  * Isolation will fail if @folio is not an allocated hugetlb folio, or if
7095  * it is already isolated/non-migratable.
7096  *
7097  * On success, an additional folio reference is taken that must be dropped
7098  * using folio_putback_hugetlb() to undo the isolation.
7099  *
7100  * Return: True if isolation worked, otherwise False.
7101  */
folio_isolate_hugetlb(struct folio * folio,struct list_head * list)7102 bool folio_isolate_hugetlb(struct folio *folio, struct list_head *list)
7103 {
7104 	bool ret = true;
7105 
7106 	spin_lock_irq(&hugetlb_lock);
7107 	if (!folio_test_hugetlb(folio) ||
7108 	    !folio_test_hugetlb_migratable(folio) ||
7109 	    !folio_try_get(folio)) {
7110 		ret = false;
7111 		goto unlock;
7112 	}
7113 	folio_clear_hugetlb_migratable(folio);
7114 	list_move_tail(&folio->lru, list);
7115 unlock:
7116 	spin_unlock_irq(&hugetlb_lock);
7117 	return ret;
7118 }
7119 
get_hwpoison_hugetlb_folio(struct folio * folio,bool * hugetlb,bool unpoison)7120 int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison)
7121 {
7122 	int ret = 0;
7123 
7124 	*hugetlb = false;
7125 	spin_lock_irq(&hugetlb_lock);
7126 	if (folio_test_hugetlb(folio)) {
7127 		*hugetlb = true;
7128 		if (folio_test_hugetlb_freed(folio))
7129 			ret = 0;
7130 		else if (folio_test_hugetlb_migratable(folio) || unpoison)
7131 			ret = folio_try_get(folio);
7132 		else
7133 			ret = -EBUSY;
7134 	}
7135 	spin_unlock_irq(&hugetlb_lock);
7136 	return ret;
7137 }
7138 
get_huge_page_for_hwpoison(unsigned long pfn,int flags,bool * migratable_cleared)7139 int get_huge_page_for_hwpoison(unsigned long pfn, int flags,
7140 				bool *migratable_cleared)
7141 {
7142 	int ret;
7143 
7144 	spin_lock_irq(&hugetlb_lock);
7145 	ret = __get_huge_page_for_hwpoison(pfn, flags, migratable_cleared);
7146 	spin_unlock_irq(&hugetlb_lock);
7147 	return ret;
7148 }
7149 
7150 /**
7151  * folio_putback_hugetlb - unisolate a hugetlb folio
7152  * @folio: the isolated hugetlb folio
7153  *
7154  * Putback/un-isolate the hugetlb folio that was previous isolated using
7155  * folio_isolate_hugetlb(): marking it non-isolated/migratable and putting it
7156  * back onto the active list.
7157  *
7158  * Will drop the additional folio reference obtained through
7159  * folio_isolate_hugetlb().
7160  */
folio_putback_hugetlb(struct folio * folio)7161 void folio_putback_hugetlb(struct folio *folio)
7162 {
7163 	spin_lock_irq(&hugetlb_lock);
7164 	folio_set_hugetlb_migratable(folio);
7165 	list_move_tail(&folio->lru, &(folio_hstate(folio))->hugepage_activelist);
7166 	spin_unlock_irq(&hugetlb_lock);
7167 	folio_put(folio);
7168 }
7169 
move_hugetlb_state(struct folio * old_folio,struct folio * new_folio,int reason)7170 void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason)
7171 {
7172 	struct hstate *h = folio_hstate(old_folio);
7173 
7174 	hugetlb_cgroup_migrate(old_folio, new_folio);
7175 	folio_set_owner_migrate_reason(new_folio, reason);
7176 
7177 	/*
7178 	 * transfer temporary state of the new hugetlb folio. This is
7179 	 * reverse to other transitions because the newpage is going to
7180 	 * be final while the old one will be freed so it takes over
7181 	 * the temporary status.
7182 	 *
7183 	 * Also note that we have to transfer the per-node surplus state
7184 	 * here as well otherwise the global surplus count will not match
7185 	 * the per-node's.
7186 	 */
7187 	if (folio_test_hugetlb_temporary(new_folio)) {
7188 		int old_nid = folio_nid(old_folio);
7189 		int new_nid = folio_nid(new_folio);
7190 
7191 		folio_set_hugetlb_temporary(old_folio);
7192 		folio_clear_hugetlb_temporary(new_folio);
7193 
7194 
7195 		/*
7196 		 * There is no need to transfer the per-node surplus state
7197 		 * when we do not cross the node.
7198 		 */
7199 		if (new_nid == old_nid)
7200 			return;
7201 		spin_lock_irq(&hugetlb_lock);
7202 		if (h->surplus_huge_pages_node[old_nid]) {
7203 			h->surplus_huge_pages_node[old_nid]--;
7204 			h->surplus_huge_pages_node[new_nid]++;
7205 		}
7206 		spin_unlock_irq(&hugetlb_lock);
7207 	}
7208 
7209 	/*
7210 	 * Our old folio is isolated and has "migratable" cleared until it
7211 	 * is putback. As migration succeeded, set the new folio "migratable"
7212 	 * and add it to the active list.
7213 	 */
7214 	spin_lock_irq(&hugetlb_lock);
7215 	folio_set_hugetlb_migratable(new_folio);
7216 	list_move_tail(&new_folio->lru, &(folio_hstate(new_folio))->hugepage_activelist);
7217 	spin_unlock_irq(&hugetlb_lock);
7218 }
7219 
7220 /*
7221  * If @take_locks is false, the caller must ensure that no concurrent page table
7222  * access can happen (except for gup_fast() and hardware page walks).
7223  * If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
7224  * concurrent page fault handling) and the file rmap lock.
7225  */
hugetlb_unshare_pmds(struct vm_area_struct * vma,unsigned long start,unsigned long end,bool take_locks)7226 static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
7227 				   unsigned long start,
7228 				   unsigned long end,
7229 				   bool take_locks)
7230 {
7231 	struct hstate *h = hstate_vma(vma);
7232 	unsigned long sz = huge_page_size(h);
7233 	struct mm_struct *mm = vma->vm_mm;
7234 	struct mmu_notifier_range range;
7235 	struct mmu_gather tlb;
7236 	unsigned long address;
7237 	spinlock_t *ptl;
7238 	pte_t *ptep;
7239 
7240 	if (!(vma->vm_flags & VM_MAYSHARE))
7241 		return;
7242 
7243 	if (start >= end)
7244 		return;
7245 
7246 	flush_cache_range(vma, start, end);
7247 	tlb_gather_mmu_vma(&tlb, vma);
7248 
7249 	/*
7250 	 * No need to call adjust_range_if_pmd_sharing_possible(), because
7251 	 * we have already done the PUD_SIZE alignment.
7252 	 */
7253 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
7254 				start, end);
7255 	mmu_notifier_invalidate_range_start(&range);
7256 	if (take_locks) {
7257 		hugetlb_vma_lock_write(vma);
7258 		i_mmap_lock_write(vma->vm_file->f_mapping);
7259 	} else {
7260 		i_mmap_assert_write_locked(vma->vm_file->f_mapping);
7261 	}
7262 	for (address = start; address < end; address += PUD_SIZE) {
7263 		ptep = hugetlb_walk(vma, address, sz);
7264 		if (!ptep)
7265 			continue;
7266 		ptl = huge_pte_lock(h, mm, ptep);
7267 		huge_pmd_unshare(&tlb, vma, address, ptep);
7268 		spin_unlock(ptl);
7269 	}
7270 	huge_pmd_unshare_flush(&tlb, vma);
7271 	if (take_locks) {
7272 		i_mmap_unlock_write(vma->vm_file->f_mapping);
7273 		hugetlb_vma_unlock_write(vma);
7274 	}
7275 	/*
7276 	 * No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
7277 	 * Documentation/mm/mmu_notifier.rst.
7278 	 */
7279 	mmu_notifier_invalidate_range_end(&range);
7280 	tlb_finish_mmu(&tlb);
7281 }
7282 
7283 /*
7284  * This function will unconditionally remove all the shared pmd pgtable entries
7285  * within the specific vma for a hugetlbfs memory range.
7286  */
hugetlb_unshare_all_pmds(struct vm_area_struct * vma)7287 void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
7288 {
7289 	hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
7290 			ALIGN_DOWN(vma->vm_end, PUD_SIZE),
7291 			/* take_locks = */ true);
7292 }
7293 
7294 /*
7295  * For hugetlb, mremap() is an odd edge case - while the VMA copying is
7296  * performed, we permit both the old and new VMAs to reference the same
7297  * reservation.
7298  *
7299  * We fix this up after the operation succeeds, or if a newly allocated VMA
7300  * is closed as a result of a failure to allocate memory.
7301  */
fixup_hugetlb_reservations(struct vm_area_struct * vma)7302 void fixup_hugetlb_reservations(struct vm_area_struct *vma)
7303 {
7304 	if (is_vm_hugetlb_page(vma))
7305 		clear_vma_resv_huge_pages(vma);
7306 }
7307