1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * mm/pgtable-generic.c
4 *
5 * Generic pgtable methods declared in linux/pgtable.h
6 *
7 * Copyright (C) 2010 Linus Torvalds
8 */
9
10 #include <linux/pagemap.h>
11 #include <linux/hugetlb.h>
12 #include <linux/pgtable.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/mm_inline.h>
16 #include <asm/pgalloc.h>
17 #include <asm/tlb.h>
18
19 /*
20 * If a p?d_bad entry is found while walking page tables, report
21 * the error, before resetting entry to p?d_none. Usually (but
22 * very seldom) called out from the p?d_none_or_clear_bad macros.
23 */
24
pgd_clear_bad(pgd_t * pgd)25 void pgd_clear_bad(pgd_t *pgd)
26 {
27 pgd_ERROR(*pgd);
28 pgd_clear(pgd);
29 }
30
31 #ifndef __PAGETABLE_P4D_FOLDED
p4d_clear_bad(p4d_t * p4d)32 void p4d_clear_bad(p4d_t *p4d)
33 {
34 p4d_ERROR(*p4d);
35 p4d_clear(p4d);
36 }
37 #endif
38
39 #ifndef __PAGETABLE_PUD_FOLDED
pud_clear_bad(pud_t * pud)40 void pud_clear_bad(pud_t *pud)
41 {
42 pud_ERROR(*pud);
43 pud_clear(pud);
44 }
45 #endif
46
47 /*
48 * Note that the pmd variant below can't be stub'ed out just as for p4d/pud
49 * above. pmd folding is special and typically pmd_* macros refer to upper
50 * level even when folded
51 */
pmd_clear_bad(pmd_t * pmd)52 void pmd_clear_bad(pmd_t *pmd)
53 {
54 pmd_ERROR(*pmd);
55 pmd_clear(pmd);
56 }
57
58 #ifndef __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
59 /*
60 * Only sets the access flags (dirty, accessed), as well as write
61 * permission. Furthermore, we know it always gets set to a "more
62 * permissive" setting, which allows most architectures to optimize
63 * this. We return whether the PTE actually changed, which in turn
64 * instructs the caller to do things like update__mmu_cache. This
65 * used to be done in the caller, but sparc needs minor faults to
66 * force that call on sun4c so we changed this macro slightly
67 */
ptep_set_access_flags(struct vm_area_struct * vma,unsigned long address,pte_t * ptep,pte_t entry,int dirty)68 int ptep_set_access_flags(struct vm_area_struct *vma,
69 unsigned long address, pte_t *ptep,
70 pte_t entry, int dirty)
71 {
72 int changed = !pte_same(ptep_get(ptep), entry);
73 if (changed) {
74 set_pte_at(vma->vm_mm, address, ptep, entry);
75 flush_tlb_fix_spurious_fault(vma, address, ptep);
76 }
77 return changed;
78 }
79 #endif
80
81 #ifndef __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
ptep_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)82 int ptep_clear_flush_young(struct vm_area_struct *vma,
83 unsigned long address, pte_t *ptep)
84 {
85 int young;
86 young = ptep_test_and_clear_young(vma, address, ptep);
87 if (young)
88 flush_tlb_page(vma, address);
89 return young;
90 }
91 #endif
92
93 #ifndef __HAVE_ARCH_PTEP_CLEAR_FLUSH
ptep_clear_flush(struct vm_area_struct * vma,unsigned long address,pte_t * ptep)94 pte_t ptep_clear_flush(struct vm_area_struct *vma, unsigned long address,
95 pte_t *ptep)
96 {
97 struct mm_struct *mm = (vma)->vm_mm;
98 pte_t pte;
99 pte = ptep_get_and_clear(mm, address, ptep);
100 if (pte_accessible(mm, pte))
101 flush_tlb_page(vma, address);
102 return pte;
103 }
104 #endif
105
106 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
107
108 #ifndef __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
pmdp_set_access_flags(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp,pmd_t entry,int dirty)109 int pmdp_set_access_flags(struct vm_area_struct *vma,
110 unsigned long address, pmd_t *pmdp,
111 pmd_t entry, int dirty)
112 {
113 int changed = !pmd_same(*pmdp, entry);
114 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
115 if (changed) {
116 set_pmd_at(vma->vm_mm, address, pmdp, entry);
117 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
118 }
119 return changed;
120 }
121 #endif
122
123 #ifndef __HAVE_ARCH_PMDP_CLEAR_YOUNG_FLUSH
pmdp_clear_flush_young(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)124 int pmdp_clear_flush_young(struct vm_area_struct *vma,
125 unsigned long address, pmd_t *pmdp)
126 {
127 int young;
128 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
129 young = pmdp_test_and_clear_young(vma, address, pmdp);
130 if (young)
131 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
132 return young;
133 }
134 #endif
135
136 #ifndef __HAVE_ARCH_PMDP_HUGE_CLEAR_FLUSH
pmdp_huge_clear_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)137 pmd_t pmdp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
138 pmd_t *pmdp)
139 {
140 pmd_t pmd;
141 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
142 VM_BUG_ON(pmd_present(*pmdp) && !pmd_trans_huge(*pmdp));
143 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
144 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
145 return pmd;
146 }
147
148 #ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
pudp_huge_clear_flush(struct vm_area_struct * vma,unsigned long address,pud_t * pudp)149 pud_t pudp_huge_clear_flush(struct vm_area_struct *vma, unsigned long address,
150 pud_t *pudp)
151 {
152 pud_t pud;
153
154 VM_BUG_ON(address & ~HPAGE_PUD_MASK);
155 VM_BUG_ON(!pud_trans_huge(*pudp));
156 pud = pudp_huge_get_and_clear(vma->vm_mm, address, pudp);
157 flush_pud_tlb_range(vma, address, address + HPAGE_PUD_SIZE);
158 return pud;
159 }
160 #endif
161 #endif
162
163 #ifndef __HAVE_ARCH_PGTABLE_DEPOSIT
pgtable_trans_huge_deposit(struct mm_struct * mm,pmd_t * pmdp,pgtable_t pgtable)164 void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
165 pgtable_t pgtable)
166 {
167 assert_spin_locked(pmd_lockptr(mm, pmdp));
168
169 /* FIFO */
170 if (!pmd_huge_pte(mm, pmdp))
171 INIT_LIST_HEAD(&pgtable->lru);
172 else
173 list_add(&pgtable->lru, &pmd_huge_pte(mm, pmdp)->lru);
174 pmd_huge_pte(mm, pmdp) = pgtable;
175 }
176 #endif
177
178 #ifndef __HAVE_ARCH_PGTABLE_WITHDRAW
179 /* no "address" argument so destroys page coloring of some arch */
pgtable_trans_huge_withdraw(struct mm_struct * mm,pmd_t * pmdp)180 pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp)
181 {
182 pgtable_t pgtable;
183
184 assert_spin_locked(pmd_lockptr(mm, pmdp));
185
186 /* FIFO */
187 pgtable = pmd_huge_pte(mm, pmdp);
188 pmd_huge_pte(mm, pmdp) = list_first_entry_or_null(&pgtable->lru,
189 struct page, lru);
190 if (pmd_huge_pte(mm, pmdp))
191 list_del(&pgtable->lru);
192 return pgtable;
193 }
194 #endif
195
196 #ifndef __HAVE_ARCH_PMDP_INVALIDATE
pmdp_invalidate(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)197 pmd_t pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
198 pmd_t *pmdp)
199 {
200 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
201 pmd_t old = pmdp_establish(vma, address, pmdp, pmd_mkinvalid(*pmdp));
202 flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
203 return old;
204 }
205 #endif
206
207 #ifndef __HAVE_ARCH_PMDP_INVALIDATE_AD
pmdp_invalidate_ad(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)208 pmd_t pmdp_invalidate_ad(struct vm_area_struct *vma, unsigned long address,
209 pmd_t *pmdp)
210 {
211 VM_WARN_ON_ONCE(!pmd_present(*pmdp));
212 return pmdp_invalidate(vma, address, pmdp);
213 }
214 #endif
215
216 #ifndef pmdp_collapse_flush
pmdp_collapse_flush(struct vm_area_struct * vma,unsigned long address,pmd_t * pmdp)217 pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, unsigned long address,
218 pmd_t *pmdp)
219 {
220 /*
221 * pmd and hugepage pte format are same. So we could
222 * use the same function.
223 */
224 pmd_t pmd;
225
226 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
227 VM_BUG_ON(pmd_trans_huge(*pmdp));
228 pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
229
230 /* collapse entails shooting down ptes not pmd */
231 flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
232 return pmd;
233 }
234 #endif
235
236 /* arch define pte_free_defer in asm/pgalloc.h for its own implementation */
237 #ifndef pte_free_defer
pte_free_now(struct rcu_head * head)238 static void pte_free_now(struct rcu_head *head)
239 {
240 struct page *page;
241
242 page = container_of(head, struct page, rcu_head);
243 pte_free(NULL /* mm not passed and not used */, (pgtable_t)page);
244 }
245
pte_free_defer(struct mm_struct * mm,pgtable_t pgtable)246 void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable)
247 {
248 struct page *page;
249
250 page = pgtable;
251 call_rcu(&page->rcu_head, pte_free_now);
252 }
253 #endif /* pte_free_defer */
254 #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
255
256 #if defined(CONFIG_GUP_GET_PXX_LOW_HIGH) && \
257 (defined(CONFIG_SMP) || defined(CONFIG_PREEMPT_RCU))
258 /*
259 * See the comment above ptep_get_lockless() in include/linux/pgtable.h:
260 * the barriers in pmdp_get_lockless() cannot guarantee that the value in
261 * pmd_high actually belongs with the value in pmd_low; but holding interrupts
262 * off blocks the TLB flush between present updates, which guarantees that a
263 * successful __pte_offset_map() points to a page from matched halves.
264 */
pmdp_get_lockless_start(void)265 static unsigned long pmdp_get_lockless_start(void)
266 {
267 unsigned long irqflags;
268
269 local_irq_save(irqflags);
270 return irqflags;
271 }
pmdp_get_lockless_end(unsigned long irqflags)272 static void pmdp_get_lockless_end(unsigned long irqflags)
273 {
274 local_irq_restore(irqflags);
275 }
276 #else
pmdp_get_lockless_start(void)277 static unsigned long pmdp_get_lockless_start(void) { return 0; }
pmdp_get_lockless_end(unsigned long irqflags)278 static void pmdp_get_lockless_end(unsigned long irqflags) { }
279 #endif
280
___pte_offset_map(pmd_t * pmd,unsigned long addr,pmd_t * pmdvalp)281 pte_t *___pte_offset_map(pmd_t *pmd, unsigned long addr, pmd_t *pmdvalp)
282 {
283 unsigned long irqflags;
284 pmd_t pmdval;
285
286 rcu_read_lock();
287 irqflags = pmdp_get_lockless_start();
288 pmdval = pmdp_get_lockless(pmd);
289 pmdp_get_lockless_end(irqflags);
290
291 if (pmdvalp)
292 *pmdvalp = pmdval;
293 if (unlikely(pmd_none(pmdval) || is_pmd_migration_entry(pmdval)))
294 goto nomap;
295 if (unlikely(pmd_trans_huge(pmdval)))
296 goto nomap;
297 if (unlikely(pmd_bad(pmdval))) {
298 pmd_clear_bad(pmd);
299 goto nomap;
300 }
301 return __pte_map(&pmdval, addr);
302 nomap:
303 rcu_read_unlock();
304 return NULL;
305 }
306
pte_offset_map_ro_nolock(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,spinlock_t ** ptlp)307 pte_t *pte_offset_map_ro_nolock(struct mm_struct *mm, pmd_t *pmd,
308 unsigned long addr, spinlock_t **ptlp)
309 {
310 pmd_t pmdval;
311 pte_t *pte;
312
313 pte = __pte_offset_map(pmd, addr, &pmdval);
314 if (likely(pte))
315 *ptlp = pte_lockptr(mm, &pmdval);
316 return pte;
317 }
318
pte_offset_map_rw_nolock(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,pmd_t * pmdvalp,spinlock_t ** ptlp)319 pte_t *pte_offset_map_rw_nolock(struct mm_struct *mm, pmd_t *pmd,
320 unsigned long addr, pmd_t *pmdvalp,
321 spinlock_t **ptlp)
322 {
323 pte_t *pte;
324
325 VM_WARN_ON_ONCE(!pmdvalp);
326 pte = __pte_offset_map(pmd, addr, pmdvalp);
327 if (likely(pte))
328 *ptlp = pte_lockptr(mm, pmdvalp);
329 return pte;
330 }
331
332 /*
333 * pte_offset_map_lock(mm, pmd, addr, ptlp), and its internal implementation
334 * __pte_offset_map_lock() below, is usually called with the pmd pointer for
335 * addr, reached by walking down the mm's pgd, p4d, pud for addr: either while
336 * holding mmap_lock or vma lock for read or for write; or in truncate or rmap
337 * context, while holding file's i_mmap_lock or anon_vma lock for read (or for
338 * write). In a few cases, it may be used with pmd pointing to a pmd_t already
339 * copied to or constructed on the stack.
340 *
341 * When successful, it returns the pte pointer for addr, with its page table
342 * kmapped if necessary (when CONFIG_HIGHPTE), and locked against concurrent
343 * modification by software, with a pointer to that spinlock in ptlp (in some
344 * configs mm->page_table_lock, in SPLIT_PTLOCK configs a spinlock in table's
345 * struct page). pte_unmap_unlock(pte, ptl) to unlock and unmap afterwards.
346 *
347 * But it is unsuccessful, returning NULL with *ptlp unchanged, if there is no
348 * page table at *pmd: if, for example, the page table has just been removed,
349 * or replaced by the huge pmd of a THP. (When successful, *pmd is rechecked
350 * after acquiring the ptlock, and retried internally if it changed: so that a
351 * page table can be safely removed or replaced by THP while holding its lock.)
352 *
353 * pte_offset_map(pmd, addr), and its internal helper __pte_offset_map() above,
354 * just returns the pte pointer for addr, its page table kmapped if necessary;
355 * or NULL if there is no page table at *pmd. It does not attempt to lock the
356 * page table, so cannot normally be used when the page table is to be updated,
357 * or when entries read must be stable. But it does take rcu_read_lock(): so
358 * that even when page table is racily removed, it remains a valid though empty
359 * and disconnected table. Until pte_unmap(pte) unmaps and rcu_read_unlock()s
360 * afterwards.
361 *
362 * pte_offset_map_ro_nolock(mm, pmd, addr, ptlp), above, is like pte_offset_map();
363 * but when successful, it also outputs a pointer to the spinlock in ptlp - as
364 * pte_offset_map_lock() does, but in this case without locking it. This helps
365 * the caller to avoid a later pte_lockptr(mm, *pmd), which might by that time
366 * act on a changed *pmd: pte_offset_map_ro_nolock() provides the correct spinlock
367 * pointer for the page table that it returns. Even after grabbing the spinlock,
368 * we might be looking either at a page table that is still mapped or one that
369 * was unmapped and is about to get freed. But for R/O access this is sufficient.
370 * So it is only applicable for read-only cases where any modification operations
371 * to the page table are not allowed even if the corresponding spinlock is held
372 * afterwards.
373 *
374 * pte_offset_map_rw_nolock(mm, pmd, addr, pmdvalp, ptlp), above, is like
375 * pte_offset_map_ro_nolock(); but when successful, it also outputs the pdmval.
376 * It is applicable for may-write cases where any modification operations to the
377 * page table may happen after the corresponding spinlock is held afterwards.
378 * But the users should make sure the page table is stable like checking pte_same()
379 * or checking pmd_same() by using the output pmdval before performing the write
380 * operations.
381 *
382 * Note: "RO" / "RW" expresses the intended semantics, not that the *kmap* will
383 * be read-only/read-write protected.
384 *
385 * Note that free_pgtables(), used after unmapping detached vmas, or when
386 * exiting the whole mm, does not take page table lock before freeing a page
387 * table, and may not use RCU at all: "outsiders" like khugepaged should avoid
388 * pte_offset_map() and co once the vma is detached from mm or mm_users is zero.
389 */
__pte_offset_map_lock(struct mm_struct * mm,pmd_t * pmd,unsigned long addr,spinlock_t ** ptlp)390 pte_t *__pte_offset_map_lock(struct mm_struct *mm, pmd_t *pmd,
391 unsigned long addr, spinlock_t **ptlp)
392 {
393 spinlock_t *ptl;
394 pmd_t pmdval;
395 pte_t *pte;
396 again:
397 pte = __pte_offset_map(pmd, addr, &pmdval);
398 if (unlikely(!pte))
399 return pte;
400 ptl = pte_lockptr(mm, &pmdval);
401 spin_lock(ptl);
402 if (likely(pmd_same(pmdval, pmdp_get_lockless(pmd)))) {
403 *ptlp = ptl;
404 return pte;
405 }
406 pte_unmap_unlock(pte, ptl);
407 goto again;
408 }
409