1 /* SPDX-License-Identifier: GPL-2.0 */
2 /*
3 * include/linux/userfaultfd_k.h
4 *
5 * Copyright (C) 2015 Red Hat, Inc.
6 *
7 */
8
9 #ifndef _LINUX_USERFAULTFD_K_H
10 #define _LINUX_USERFAULTFD_K_H
11
12 #ifdef CONFIG_USERFAULTFD
13
14 #include <linux/userfaultfd.h> /* linux/include/uapi/linux/userfaultfd.h */
15
16 #include <linux/fcntl.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/swapops.h>
20 #include <asm-generic/pgtable_uffd.h>
21 #include <linux/hugetlb_inline.h>
22
23 /* The set of all possible UFFD-related VM flags. */
24 #define __VM_UFFD_FLAGS (VM_UFFD_MISSING | VM_UFFD_WP | VM_UFFD_MINOR)
25
26 /*
27 * CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
28 * new flags, since they might collide with O_* ones. We want
29 * to re-use O_* flags that couldn't possibly have a meaning
30 * from userfaultfd, in order to leave a free define-space for
31 * shared O_* flags.
32 */
33 #define UFFD_CLOEXEC O_CLOEXEC
34 #define UFFD_NONBLOCK O_NONBLOCK
35
36 #define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
37 #define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
38
39 extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
40
41 /* A combined operation mode + behavior flags. */
42 typedef unsigned int __bitwise uffd_flags_t;
43
44 /* Mutually exclusive modes of operation. */
45 enum mfill_atomic_mode {
46 MFILL_ATOMIC_COPY,
47 MFILL_ATOMIC_ZEROPAGE,
48 MFILL_ATOMIC_CONTINUE,
49 MFILL_ATOMIC_POISON,
50 NR_MFILL_ATOMIC_MODES,
51 };
52
53 #define MFILL_ATOMIC_MODE_BITS (const_ilog2(NR_MFILL_ATOMIC_MODES - 1) + 1)
54 #define MFILL_ATOMIC_BIT(nr) BIT(MFILL_ATOMIC_MODE_BITS + (nr))
55 #define MFILL_ATOMIC_FLAG(nr) ((__force uffd_flags_t) MFILL_ATOMIC_BIT(nr))
56 #define MFILL_ATOMIC_MODE_MASK ((__force uffd_flags_t) (MFILL_ATOMIC_BIT(0) - 1))
57
uffd_flags_mode_is(uffd_flags_t flags,enum mfill_atomic_mode expected)58 static inline bool uffd_flags_mode_is(uffd_flags_t flags, enum mfill_atomic_mode expected)
59 {
60 return (flags & MFILL_ATOMIC_MODE_MASK) == ((__force uffd_flags_t) expected);
61 }
62
uffd_flags_set_mode(uffd_flags_t flags,enum mfill_atomic_mode mode)63 static inline uffd_flags_t uffd_flags_set_mode(uffd_flags_t flags, enum mfill_atomic_mode mode)
64 {
65 flags &= ~MFILL_ATOMIC_MODE_MASK;
66 return flags | ((__force uffd_flags_t) mode);
67 }
68
69 /* Flags controlling behavior. These behavior changes are mode-independent. */
70 #define MFILL_ATOMIC_WP MFILL_ATOMIC_FLAG(0)
71
72 extern int mfill_atomic_install_pte(pmd_t *dst_pmd,
73 struct vm_area_struct *dst_vma,
74 unsigned long dst_addr, struct page *page,
75 bool newly_allocated, uffd_flags_t flags);
76
77 extern ssize_t mfill_atomic_copy(struct mm_struct *dst_mm, unsigned long dst_start,
78 unsigned long src_start, unsigned long len,
79 atomic_t *mmap_changing, uffd_flags_t flags);
80 extern ssize_t mfill_atomic_zeropage(struct mm_struct *dst_mm,
81 unsigned long dst_start,
82 unsigned long len,
83 atomic_t *mmap_changing);
84 extern ssize_t mfill_atomic_continue(struct mm_struct *dst_mm, unsigned long dst_start,
85 unsigned long len, atomic_t *mmap_changing,
86 uffd_flags_t flags);
87 extern ssize_t mfill_atomic_poison(struct mm_struct *dst_mm, unsigned long start,
88 unsigned long len, atomic_t *mmap_changing,
89 uffd_flags_t flags);
90 extern int mwriteprotect_range(struct mm_struct *dst_mm,
91 unsigned long start, unsigned long len,
92 bool enable_wp, atomic_t *mmap_changing);
93 extern long uffd_wp_range(struct vm_area_struct *vma,
94 unsigned long start, unsigned long len, bool enable_wp);
95
96 /* move_pages */
97 void double_pt_lock(spinlock_t *ptl1, spinlock_t *ptl2);
98 void double_pt_unlock(spinlock_t *ptl1, spinlock_t *ptl2);
99 ssize_t move_pages(struct userfaultfd_ctx *ctx, struct mm_struct *mm,
100 unsigned long dst_start, unsigned long src_start,
101 unsigned long len, __u64 flags);
102 int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pmd_t dst_pmdval,
103 struct vm_area_struct *dst_vma,
104 struct vm_area_struct *src_vma,
105 unsigned long dst_addr, unsigned long src_addr);
106
107 /* mm helpers */
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)108 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
109 struct vm_userfaultfd_ctx vm_ctx)
110 {
111 return vma->vm_userfaultfd_ctx.ctx == vm_ctx.ctx;
112 }
113
114 /*
115 * Never enable huge pmd sharing on some uffd registered vmas:
116 *
117 * - VM_UFFD_WP VMAs, because write protect information is per pgtable entry.
118 *
119 * - VM_UFFD_MINOR VMAs, because otherwise we would never get minor faults for
120 * VMAs which share huge pmds. (If you have two mappings to the same
121 * underlying pages, and fault in the non-UFFD-registered one with a write,
122 * with huge pmd sharing this would *also* setup the second UFFD-registered
123 * mapping, and we'd not get minor faults.)
124 */
uffd_disable_huge_pmd_share(struct vm_area_struct * vma)125 static inline bool uffd_disable_huge_pmd_share(struct vm_area_struct *vma)
126 {
127 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
128 }
129
130 /*
131 * Don't do fault around for either WP or MINOR registered uffd range. For
132 * MINOR registered range, fault around will be a total disaster and ptes can
133 * be installed without notifications; for WP it should mostly be fine as long
134 * as the fault around checks for pte_none() before the installation, however
135 * to be super safe we just forbid it.
136 */
uffd_disable_fault_around(struct vm_area_struct * vma)137 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
138 {
139 return vma->vm_flags & (VM_UFFD_WP | VM_UFFD_MINOR);
140 }
141
userfaultfd_missing(struct vm_area_struct * vma)142 static inline bool userfaultfd_missing(struct vm_area_struct *vma)
143 {
144 return vma->vm_flags & VM_UFFD_MISSING;
145 }
146
userfaultfd_wp(struct vm_area_struct * vma)147 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
148 {
149 return vma->vm_flags & VM_UFFD_WP;
150 }
151
userfaultfd_minor(struct vm_area_struct * vma)152 static inline bool userfaultfd_minor(struct vm_area_struct *vma)
153 {
154 return vma->vm_flags & VM_UFFD_MINOR;
155 }
156
userfaultfd_pte_wp(struct vm_area_struct * vma,pte_t pte)157 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
158 pte_t pte)
159 {
160 return userfaultfd_wp(vma) && pte_uffd_wp(pte);
161 }
162
userfaultfd_huge_pmd_wp(struct vm_area_struct * vma,pmd_t pmd)163 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
164 pmd_t pmd)
165 {
166 return userfaultfd_wp(vma) && pmd_uffd_wp(pmd);
167 }
168
userfaultfd_armed(struct vm_area_struct * vma)169 static inline bool userfaultfd_armed(struct vm_area_struct *vma)
170 {
171 return vma->vm_flags & __VM_UFFD_FLAGS;
172 }
173
vma_can_userfault(struct vm_area_struct * vma,unsigned long vm_flags,bool wp_async)174 static inline bool vma_can_userfault(struct vm_area_struct *vma,
175 unsigned long vm_flags,
176 bool wp_async)
177 {
178 vm_flags &= __VM_UFFD_FLAGS;
179
180 if ((vm_flags & VM_UFFD_MINOR) &&
181 (!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
182 return false;
183
184 /*
185 * If wp async enabled, and WP is the only mode enabled, allow any
186 * memory type.
187 */
188 if (wp_async && (vm_flags == VM_UFFD_WP))
189 return true;
190
191 #ifndef CONFIG_PTE_MARKER_UFFD_WP
192 /*
193 * If user requested uffd-wp but not enabled pte markers for
194 * uffd-wp, then shmem & hugetlbfs are not supported but only
195 * anonymous.
196 */
197 if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
198 return false;
199 #endif
200
201 /* By default, allow any of anon|shmem|hugetlb */
202 return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
203 vma_is_shmem(vma);
204 }
205
206 extern int dup_userfaultfd(struct vm_area_struct *, struct list_head *);
207 extern void dup_userfaultfd_complete(struct list_head *);
208
209 extern void mremap_userfaultfd_prep(struct vm_area_struct *,
210 struct vm_userfaultfd_ctx *);
211 extern void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *,
212 unsigned long from, unsigned long to,
213 unsigned long len);
214
215 extern bool userfaultfd_remove(struct vm_area_struct *vma,
216 unsigned long start,
217 unsigned long end);
218
219 extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
220 unsigned long start, unsigned long end, struct list_head *uf);
221 extern void userfaultfd_unmap_complete(struct mm_struct *mm,
222 struct list_head *uf);
223 extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
224 extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
225
226 #else /* CONFIG_USERFAULTFD */
227
228 /* mm helpers */
handle_userfault(struct vm_fault * vmf,unsigned long reason)229 static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
230 unsigned long reason)
231 {
232 return VM_FAULT_SIGBUS;
233 }
234
uffd_wp_range(struct vm_area_struct * vma,unsigned long start,unsigned long len,bool enable_wp)235 static inline long uffd_wp_range(struct vm_area_struct *vma,
236 unsigned long start, unsigned long len,
237 bool enable_wp)
238 {
239 return false;
240 }
241
is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct * vma,struct vm_userfaultfd_ctx vm_ctx)242 static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
243 struct vm_userfaultfd_ctx vm_ctx)
244 {
245 return true;
246 }
247
userfaultfd_missing(struct vm_area_struct * vma)248 static inline bool userfaultfd_missing(struct vm_area_struct *vma)
249 {
250 return false;
251 }
252
userfaultfd_wp(struct vm_area_struct * vma)253 static inline bool userfaultfd_wp(struct vm_area_struct *vma)
254 {
255 return false;
256 }
257
userfaultfd_minor(struct vm_area_struct * vma)258 static inline bool userfaultfd_minor(struct vm_area_struct *vma)
259 {
260 return false;
261 }
262
userfaultfd_pte_wp(struct vm_area_struct * vma,pte_t pte)263 static inline bool userfaultfd_pte_wp(struct vm_area_struct *vma,
264 pte_t pte)
265 {
266 return false;
267 }
268
userfaultfd_huge_pmd_wp(struct vm_area_struct * vma,pmd_t pmd)269 static inline bool userfaultfd_huge_pmd_wp(struct vm_area_struct *vma,
270 pmd_t pmd)
271 {
272 return false;
273 }
274
275
userfaultfd_armed(struct vm_area_struct * vma)276 static inline bool userfaultfd_armed(struct vm_area_struct *vma)
277 {
278 return false;
279 }
280
dup_userfaultfd(struct vm_area_struct * vma,struct list_head * l)281 static inline int dup_userfaultfd(struct vm_area_struct *vma,
282 struct list_head *l)
283 {
284 return 0;
285 }
286
dup_userfaultfd_complete(struct list_head * l)287 static inline void dup_userfaultfd_complete(struct list_head *l)
288 {
289 }
290
mremap_userfaultfd_prep(struct vm_area_struct * vma,struct vm_userfaultfd_ctx * ctx)291 static inline void mremap_userfaultfd_prep(struct vm_area_struct *vma,
292 struct vm_userfaultfd_ctx *ctx)
293 {
294 }
295
mremap_userfaultfd_complete(struct vm_userfaultfd_ctx * ctx,unsigned long from,unsigned long to,unsigned long len)296 static inline void mremap_userfaultfd_complete(struct vm_userfaultfd_ctx *ctx,
297 unsigned long from,
298 unsigned long to,
299 unsigned long len)
300 {
301 }
302
userfaultfd_remove(struct vm_area_struct * vma,unsigned long start,unsigned long end)303 static inline bool userfaultfd_remove(struct vm_area_struct *vma,
304 unsigned long start,
305 unsigned long end)
306 {
307 return true;
308 }
309
userfaultfd_unmap_prep(struct vm_area_struct * vma,unsigned long start,unsigned long end,struct list_head * uf)310 static inline int userfaultfd_unmap_prep(struct vm_area_struct *vma,
311 unsigned long start, unsigned long end,
312 struct list_head *uf)
313 {
314 return 0;
315 }
316
userfaultfd_unmap_complete(struct mm_struct * mm,struct list_head * uf)317 static inline void userfaultfd_unmap_complete(struct mm_struct *mm,
318 struct list_head *uf)
319 {
320 }
321
uffd_disable_fault_around(struct vm_area_struct * vma)322 static inline bool uffd_disable_fault_around(struct vm_area_struct *vma)
323 {
324 return false;
325 }
326
userfaultfd_wp_unpopulated(struct vm_area_struct * vma)327 static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
328 {
329 return false;
330 }
331
userfaultfd_wp_async(struct vm_area_struct * vma)332 static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
333 {
334 return false;
335 }
336
337 #endif /* CONFIG_USERFAULTFD */
338
userfaultfd_wp_use_markers(struct vm_area_struct * vma)339 static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
340 {
341 /* Only wr-protect mode uses pte markers */
342 if (!userfaultfd_wp(vma))
343 return false;
344
345 /* File-based uffd-wp always need markers */
346 if (!vma_is_anonymous(vma))
347 return true;
348
349 /*
350 * Anonymous uffd-wp only needs the markers if WP_UNPOPULATED
351 * enabled (to apply markers on zero pages).
352 */
353 return userfaultfd_wp_unpopulated(vma);
354 }
355
pte_marker_entry_uffd_wp(swp_entry_t entry)356 static inline bool pte_marker_entry_uffd_wp(swp_entry_t entry)
357 {
358 #ifdef CONFIG_PTE_MARKER_UFFD_WP
359 return is_pte_marker_entry(entry) &&
360 (pte_marker_get(entry) & PTE_MARKER_UFFD_WP);
361 #else
362 return false;
363 #endif
364 }
365
pte_marker_uffd_wp(pte_t pte)366 static inline bool pte_marker_uffd_wp(pte_t pte)
367 {
368 #ifdef CONFIG_PTE_MARKER_UFFD_WP
369 swp_entry_t entry;
370
371 if (!is_swap_pte(pte))
372 return false;
373
374 entry = pte_to_swp_entry(pte);
375
376 return pte_marker_entry_uffd_wp(entry);
377 #else
378 return false;
379 #endif
380 }
381
382 /*
383 * Returns true if this is a swap pte and was uffd-wp wr-protected in either
384 * forms (pte marker or a normal swap pte), false otherwise.
385 */
pte_swp_uffd_wp_any(pte_t pte)386 static inline bool pte_swp_uffd_wp_any(pte_t pte)
387 {
388 #ifdef CONFIG_PTE_MARKER_UFFD_WP
389 if (!is_swap_pte(pte))
390 return false;
391
392 if (pte_swp_uffd_wp(pte))
393 return true;
394
395 if (pte_marker_uffd_wp(pte))
396 return true;
397 #endif
398 return false;
399 }
400
401 #endif /* _LINUX_USERFAULTFD_K_H */
402