xref: /linux/mm/vma.h (revision 41e6ddcaa0f18dda4c3fadf22533775a30d6f72f)
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /*
3  * vma.h
4  *
5  * Core VMA manipulation API implemented in vma.c.
6  */
7 #ifndef __MM_VMA_H
8 #define __MM_VMA_H
9 
10 /*
11  * VMA lock generalization
12  */
13 struct vma_prepare {
14 	struct vm_area_struct *vma;
15 	struct vm_area_struct *adj_next;
16 	struct file *file;
17 	struct address_space *mapping;
18 	struct anon_vma *anon_vma;
19 	struct vm_area_struct *insert;
20 	struct vm_area_struct *remove;
21 	struct vm_area_struct *remove2;
22 };
23 
24 struct unlink_vma_file_batch {
25 	int count;
26 	struct vm_area_struct *vmas[8];
27 };
28 
29 /*
30  * vma munmap operation
31  */
32 struct vma_munmap_struct {
33 	struct vma_iterator *vmi;
34 	struct vm_area_struct *vma;     /* The first vma to munmap */
35 	struct vm_area_struct *prev;    /* vma before the munmap area */
36 	struct vm_area_struct *next;    /* vma after the munmap area */
37 	struct list_head *uf;           /* Userfaultfd list_head */
38 	unsigned long start;            /* Aligned start addr (inclusive) */
39 	unsigned long end;              /* Aligned end addr (exclusive) */
40 	unsigned long unmap_start;      /* Unmap PTE start */
41 	unsigned long unmap_end;        /* Unmap PTE end */
42 	int vma_count;                  /* Number of vmas that will be removed */
43 	bool unlock;                    /* Unlock after the munmap */
44 	bool clear_ptes;                /* If there are outstanding PTE to be cleared */
45 	/* 2 byte hole */
46 	unsigned long nr_pages;         /* Number of pages being removed */
47 	unsigned long locked_vm;        /* Number of locked pages */
48 	unsigned long nr_accounted;     /* Number of VM_ACCOUNT pages */
49 	unsigned long exec_vm;
50 	unsigned long stack_vm;
51 	unsigned long data_vm;
52 };
53 
54 enum vma_merge_state {
55 	VMA_MERGE_START,
56 	VMA_MERGE_ERROR_NOMEM,
57 	VMA_MERGE_NOMERGE,
58 	VMA_MERGE_SUCCESS,
59 };
60 
61 /*
62  * Describes a VMA merge operation and is threaded throughout it.
63  *
64  * Any of the fields may be mutated by the merge operation, so no guarantees are
65  * made to the contents of this structure after a merge operation has completed.
66  */
67 struct vma_merge_struct {
68 	struct mm_struct *mm;
69 	struct vma_iterator *vmi;
70 	/*
71 	 * Adjacent VMAs, any of which may be NULL if not present:
72 	 *
73 	 * |------|--------|------|
74 	 * | prev | middle | next |
75 	 * |------|--------|------|
76 	 *
77 	 * middle may not yet exist in the case of a proposed new VMA being
78 	 * merged, or it may be an existing VMA.
79 	 *
80 	 * next may be assigned by the caller.
81 	 */
82 	struct vm_area_struct *prev;
83 	struct vm_area_struct *middle;
84 	struct vm_area_struct *next;
85 	/* This is the VMA we ultimately target to become the merged VMA. */
86 	struct vm_area_struct *target;
87 	/*
88 	 * Initially, the start, end, pgoff fields are provided by the caller
89 	 * and describe the proposed new VMA range, whether modifying an
90 	 * existing VMA (which will be 'middle'), or adding a new one.
91 	 *
92 	 * During the merge process these fields are updated to describe the new
93 	 * range _including those VMAs which will be merged_.
94 	 */
95 	unsigned long start;
96 	unsigned long end;
97 	pgoff_t pgoff;
98 
99 	unsigned long flags;
100 	struct file *file;
101 	struct anon_vma *anon_vma;
102 	struct mempolicy *policy;
103 	struct vm_userfaultfd_ctx uffd_ctx;
104 	struct anon_vma_name *anon_name;
105 	enum vma_merge_state state;
106 
107 	/* Flags which callers can use to modify merge behaviour: */
108 
109 	/*
110 	 * If we can expand, simply do so. We know there is nothing to merge to
111 	 * the right. Does not reset state upon failure to merge. The VMA
112 	 * iterator is assumed to be positioned at the previous VMA, rather than
113 	 * at the gap.
114 	 */
115 	bool just_expand :1;
116 
117 	/*
118 	 * If a merge is possible, but an OOM error occurs, give up and don't
119 	 * execute the merge, returning NULL.
120 	 */
121 	bool give_up_on_oom :1;
122 
123 	/* Internal flags set during merge process: */
124 
125 	/*
126 	 * Internal flag indicating the merge increases vmg->middle->vm_start
127 	 * (and thereby, vmg->prev->vm_end).
128 	 */
129 	bool __adjust_middle_start :1;
130 	/*
131 	 * Internal flag indicating the merge decreases vmg->next->vm_start
132 	 * (and thereby, vmg->middle->vm_end).
133 	 */
134 	bool __adjust_next_start :1;
135 	/*
136 	 * Internal flag used during the merge operation to indicate we will
137 	 * remove vmg->middle.
138 	 */
139 	bool __remove_middle :1;
140 	/*
141 	 * Internal flag used during the merge operationr to indicate we will
142 	 * remove vmg->next.
143 	 */
144 	bool __remove_next :1;
145 
146 };
147 
148 static inline bool vmg_nomem(struct vma_merge_struct *vmg)
149 {
150 	return vmg->state == VMA_MERGE_ERROR_NOMEM;
151 }
152 
153 /* Assumes addr >= vma->vm_start. */
154 static inline pgoff_t vma_pgoff_offset(struct vm_area_struct *vma,
155 				       unsigned long addr)
156 {
157 	return vma->vm_pgoff + PHYS_PFN(addr - vma->vm_start);
158 }
159 
160 #define VMG_STATE(name, mm_, vmi_, start_, end_, flags_, pgoff_)	\
161 	struct vma_merge_struct name = {				\
162 		.mm = mm_,						\
163 		.vmi = vmi_,						\
164 		.start = start_,					\
165 		.end = end_,						\
166 		.flags = flags_,					\
167 		.pgoff = pgoff_,					\
168 		.state = VMA_MERGE_START,				\
169 	}
170 
171 #define VMG_VMA_STATE(name, vmi_, prev_, vma_, start_, end_)	\
172 	struct vma_merge_struct name = {			\
173 		.mm = vma_->vm_mm,				\
174 		.vmi = vmi_,					\
175 		.prev = prev_,					\
176 		.middle = vma_,					\
177 		.next = NULL,					\
178 		.start = start_,				\
179 		.end = end_,					\
180 		.flags = vma_->vm_flags,			\
181 		.pgoff = vma_pgoff_offset(vma_, start_),	\
182 		.file = vma_->vm_file,				\
183 		.anon_vma = vma_->anon_vma,			\
184 		.policy = vma_policy(vma_),			\
185 		.uffd_ctx = vma_->vm_userfaultfd_ctx,		\
186 		.anon_name = anon_vma_name(vma_),		\
187 		.state = VMA_MERGE_START,			\
188 	}
189 
190 #ifdef CONFIG_DEBUG_VM_MAPLE_TREE
191 void validate_mm(struct mm_struct *mm);
192 #else
193 #define validate_mm(mm) do { } while (0)
194 #endif
195 
196 __must_check int vma_expand(struct vma_merge_struct *vmg);
197 __must_check int vma_shrink(struct vma_iterator *vmi,
198 		struct vm_area_struct *vma,
199 		unsigned long start, unsigned long end, pgoff_t pgoff);
200 
201 static inline int vma_iter_store_gfp(struct vma_iterator *vmi,
202 			struct vm_area_struct *vma, gfp_t gfp)
203 
204 {
205 	if (vmi->mas.status != ma_start &&
206 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
207 		vma_iter_invalidate(vmi);
208 
209 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
210 	mas_store_gfp(&vmi->mas, vma, gfp);
211 	if (unlikely(mas_is_err(&vmi->mas)))
212 		return -ENOMEM;
213 
214 	vma_mark_attached(vma);
215 	return 0;
216 }
217 
218 int
219 do_vmi_align_munmap(struct vma_iterator *vmi, struct vm_area_struct *vma,
220 		    struct mm_struct *mm, unsigned long start,
221 		    unsigned long end, struct list_head *uf, bool unlock);
222 
223 int do_vmi_munmap(struct vma_iterator *vmi, struct mm_struct *mm,
224 		  unsigned long start, size_t len, struct list_head *uf,
225 		  bool unlock);
226 
227 void remove_vma(struct vm_area_struct *vma);
228 
229 void unmap_region(struct ma_state *mas, struct vm_area_struct *vma,
230 		struct vm_area_struct *prev, struct vm_area_struct *next);
231 
232 /* We are about to modify the VMA's flags. */
233 __must_check struct vm_area_struct
234 *vma_modify_flags(struct vma_iterator *vmi,
235 		struct vm_area_struct *prev, struct vm_area_struct *vma,
236 		unsigned long start, unsigned long end,
237 		unsigned long new_flags);
238 
239 /* We are about to modify the VMA's flags and/or anon_name. */
240 __must_check struct vm_area_struct
241 *vma_modify_flags_name(struct vma_iterator *vmi,
242 		       struct vm_area_struct *prev,
243 		       struct vm_area_struct *vma,
244 		       unsigned long start,
245 		       unsigned long end,
246 		       unsigned long new_flags,
247 		       struct anon_vma_name *new_name);
248 
249 /* We are about to modify the VMA's memory policy. */
250 __must_check struct vm_area_struct
251 *vma_modify_policy(struct vma_iterator *vmi,
252 		   struct vm_area_struct *prev,
253 		   struct vm_area_struct *vma,
254 		   unsigned long start, unsigned long end,
255 		   struct mempolicy *new_pol);
256 
257 /* We are about to modify the VMA's flags and/or uffd context. */
258 __must_check struct vm_area_struct
259 *vma_modify_flags_uffd(struct vma_iterator *vmi,
260 		       struct vm_area_struct *prev,
261 		       struct vm_area_struct *vma,
262 		       unsigned long start, unsigned long end,
263 		       unsigned long new_flags,
264 		       struct vm_userfaultfd_ctx new_ctx,
265 		       bool give_up_on_oom);
266 
267 __must_check struct vm_area_struct
268 *vma_merge_new_range(struct vma_merge_struct *vmg);
269 
270 __must_check struct vm_area_struct
271 *vma_merge_extend(struct vma_iterator *vmi,
272 		  struct vm_area_struct *vma,
273 		  unsigned long delta);
274 
275 void unlink_file_vma_batch_init(struct unlink_vma_file_batch *vb);
276 
277 void unlink_file_vma_batch_final(struct unlink_vma_file_batch *vb);
278 
279 void unlink_file_vma_batch_add(struct unlink_vma_file_batch *vb,
280 			       struct vm_area_struct *vma);
281 
282 void unlink_file_vma(struct vm_area_struct *vma);
283 
284 void vma_link_file(struct vm_area_struct *vma);
285 
286 int vma_link(struct mm_struct *mm, struct vm_area_struct *vma);
287 
288 struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
289 	unsigned long addr, unsigned long len, pgoff_t pgoff,
290 	bool *need_rmap_locks);
291 
292 struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma);
293 
294 bool vma_needs_dirty_tracking(struct vm_area_struct *vma);
295 bool vma_wants_writenotify(struct vm_area_struct *vma, pgprot_t vm_page_prot);
296 
297 int mm_take_all_locks(struct mm_struct *mm);
298 void mm_drop_all_locks(struct mm_struct *mm);
299 
300 unsigned long mmap_region(struct file *file, unsigned long addr,
301 		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
302 		struct list_head *uf);
303 
304 int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *brkvma,
305 		 unsigned long addr, unsigned long request, unsigned long flags);
306 
307 unsigned long unmapped_area(struct vm_unmapped_area_info *info);
308 unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
309 
310 static inline bool vma_wants_manual_pte_write_upgrade(struct vm_area_struct *vma)
311 {
312 	/*
313 	 * We want to check manually if we can change individual PTEs writable
314 	 * if we can't do that automatically for all PTEs in a mapping. For
315 	 * private mappings, that's always the case when we have write
316 	 * permissions as we properly have to handle COW.
317 	 */
318 	if (vma->vm_flags & VM_SHARED)
319 		return vma_wants_writenotify(vma, vma->vm_page_prot);
320 	return !!(vma->vm_flags & VM_WRITE);
321 }
322 
323 #ifdef CONFIG_MMU
324 static inline pgprot_t vm_pgprot_modify(pgprot_t oldprot, unsigned long vm_flags)
325 {
326 	return pgprot_modify(oldprot, vm_get_page_prot(vm_flags));
327 }
328 #endif
329 
330 static inline struct vm_area_struct *vma_prev_limit(struct vma_iterator *vmi,
331 						    unsigned long min)
332 {
333 	return mas_prev(&vmi->mas, min);
334 }
335 
336 /*
337  * These three helpers classifies VMAs for virtual memory accounting.
338  */
339 
340 /*
341  * Executable code area - executable, not writable, not stack
342  */
343 static inline bool is_exec_mapping(vm_flags_t flags)
344 {
345 	return (flags & (VM_EXEC | VM_WRITE | VM_STACK)) == VM_EXEC;
346 }
347 
348 /*
349  * Stack area (including shadow stacks)
350  *
351  * VM_GROWSUP / VM_GROWSDOWN VMAs are always private anonymous:
352  * do_mmap() forbids all other combinations.
353  */
354 static inline bool is_stack_mapping(vm_flags_t flags)
355 {
356 	return ((flags & VM_STACK) == VM_STACK) || (flags & VM_SHADOW_STACK);
357 }
358 
359 /*
360  * Data area - private, writable, not stack
361  */
362 static inline bool is_data_mapping(vm_flags_t flags)
363 {
364 	return (flags & (VM_WRITE | VM_SHARED | VM_STACK)) == VM_WRITE;
365 }
366 
367 
368 static inline void vma_iter_config(struct vma_iterator *vmi,
369 		unsigned long index, unsigned long last)
370 {
371 	__mas_set_range(&vmi->mas, index, last - 1);
372 }
373 
374 static inline void vma_iter_reset(struct vma_iterator *vmi)
375 {
376 	mas_reset(&vmi->mas);
377 }
378 
379 static inline
380 struct vm_area_struct *vma_iter_prev_range_limit(struct vma_iterator *vmi, unsigned long min)
381 {
382 	return mas_prev_range(&vmi->mas, min);
383 }
384 
385 static inline
386 struct vm_area_struct *vma_iter_next_range_limit(struct vma_iterator *vmi, unsigned long max)
387 {
388 	return mas_next_range(&vmi->mas, max);
389 }
390 
391 static inline int vma_iter_area_lowest(struct vma_iterator *vmi, unsigned long min,
392 				       unsigned long max, unsigned long size)
393 {
394 	return mas_empty_area(&vmi->mas, min, max - 1, size);
395 }
396 
397 static inline int vma_iter_area_highest(struct vma_iterator *vmi, unsigned long min,
398 					unsigned long max, unsigned long size)
399 {
400 	return mas_empty_area_rev(&vmi->mas, min, max - 1, size);
401 }
402 
403 /*
404  * VMA Iterator functions shared between nommu and mmap
405  */
406 static inline int vma_iter_prealloc(struct vma_iterator *vmi,
407 		struct vm_area_struct *vma)
408 {
409 	return mas_preallocate(&vmi->mas, vma, GFP_KERNEL);
410 }
411 
412 static inline void vma_iter_clear(struct vma_iterator *vmi)
413 {
414 	mas_store_prealloc(&vmi->mas, NULL);
415 }
416 
417 static inline struct vm_area_struct *vma_iter_load(struct vma_iterator *vmi)
418 {
419 	return mas_walk(&vmi->mas);
420 }
421 
422 /* Store a VMA with preallocated memory */
423 static inline void vma_iter_store_overwrite(struct vma_iterator *vmi,
424 					    struct vm_area_struct *vma)
425 {
426 	vma_assert_attached(vma);
427 
428 #if defined(CONFIG_DEBUG_VM_MAPLE_TREE)
429 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
430 			vmi->mas.index > vma->vm_start)) {
431 		pr_warn("%lx > %lx\n store vma %lx-%lx\n into slot %lx-%lx\n",
432 			vmi->mas.index, vma->vm_start, vma->vm_start,
433 			vma->vm_end, vmi->mas.index, vmi->mas.last);
434 	}
435 	if (MAS_WARN_ON(&vmi->mas, vmi->mas.status != ma_start &&
436 			vmi->mas.last <  vma->vm_start)) {
437 		pr_warn("%lx < %lx\nstore vma %lx-%lx\ninto slot %lx-%lx\n",
438 		       vmi->mas.last, vma->vm_start, vma->vm_start, vma->vm_end,
439 		       vmi->mas.index, vmi->mas.last);
440 	}
441 #endif
442 
443 	if (vmi->mas.status != ma_start &&
444 	    ((vmi->mas.index > vma->vm_start) || (vmi->mas.last < vma->vm_start)))
445 		vma_iter_invalidate(vmi);
446 
447 	__mas_set_range(&vmi->mas, vma->vm_start, vma->vm_end - 1);
448 	mas_store_prealloc(&vmi->mas, vma);
449 }
450 
451 static inline void vma_iter_store_new(struct vma_iterator *vmi,
452 				      struct vm_area_struct *vma)
453 {
454 	vma_mark_attached(vma);
455 	vma_iter_store_overwrite(vmi, vma);
456 }
457 
458 static inline unsigned long vma_iter_addr(struct vma_iterator *vmi)
459 {
460 	return vmi->mas.index;
461 }
462 
463 static inline unsigned long vma_iter_end(struct vma_iterator *vmi)
464 {
465 	return vmi->mas.last + 1;
466 }
467 
468 static inline int vma_iter_bulk_alloc(struct vma_iterator *vmi,
469 				      unsigned long count)
470 {
471 	return mas_expected_entries(&vmi->mas, count);
472 }
473 
474 static inline
475 struct vm_area_struct *vma_iter_prev_range(struct vma_iterator *vmi)
476 {
477 	return mas_prev_range(&vmi->mas, 0);
478 }
479 
480 /*
481  * Retrieve the next VMA and rewind the iterator to end of the previous VMA, or
482  * if no previous VMA, to index 0.
483  */
484 static inline
485 struct vm_area_struct *vma_iter_next_rewind(struct vma_iterator *vmi,
486 		struct vm_area_struct **pprev)
487 {
488 	struct vm_area_struct *next = vma_next(vmi);
489 	struct vm_area_struct *prev = vma_prev(vmi);
490 
491 	/*
492 	 * Consider the case where no previous VMA exists. We advance to the
493 	 * next VMA, skipping any gap, then rewind to the start of the range.
494 	 *
495 	 * If we were to unconditionally advance to the next range we'd wind up
496 	 * at the next VMA again, so we check to ensure there is a previous VMA
497 	 * to skip over.
498 	 */
499 	if (prev)
500 		vma_iter_next_range(vmi);
501 
502 	if (pprev)
503 		*pprev = prev;
504 
505 	return next;
506 }
507 
508 #ifdef CONFIG_64BIT
509 
510 static inline bool vma_is_sealed(struct vm_area_struct *vma)
511 {
512 	return (vma->vm_flags & VM_SEALED);
513 }
514 
515 /*
516  * check if a vma is sealed for modification.
517  * return true, if modification is allowed.
518  */
519 static inline bool can_modify_vma(struct vm_area_struct *vma)
520 {
521 	if (unlikely(vma_is_sealed(vma)))
522 		return false;
523 
524 	return true;
525 }
526 
527 bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior);
528 
529 #else
530 
531 static inline bool can_modify_vma(struct vm_area_struct *vma)
532 {
533 	return true;
534 }
535 
536 static inline bool can_modify_vma_madv(struct vm_area_struct *vma, int behavior)
537 {
538 	return true;
539 }
540 
541 #endif
542 
543 #if defined(CONFIG_STACK_GROWSUP)
544 int expand_upwards(struct vm_area_struct *vma, unsigned long address);
545 #endif
546 
547 int expand_downwards(struct vm_area_struct *vma, unsigned long address);
548 
549 int __vm_munmap(unsigned long start, size_t len, bool unlock);
550 
551 #endif	/* __MM_VMA_H */
552