1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* include/asm-generic/tlb.h
3 *
4 * Generic TLB shootdown code
5 *
6 * Copyright 2001 Red Hat, Inc.
7 * Based on code from mm/memory.c Copyright Linus Torvalds and others.
8 *
9 * Copyright 2011 Red Hat, Inc., Peter Zijlstra
10 */
11 #ifndef _ASM_GENERIC__TLB_H
12 #define _ASM_GENERIC__TLB_H
13
14 #include <linux/mmu_notifier.h>
15 #include <linux/swap.h>
16 #include <linux/hugetlb_inline.h>
17 #include <asm/tlbflush.h>
18 #include <asm/cacheflush.h>
19
20 /*
21 * Blindly accessing user memory from NMI context can be dangerous
22 * if we're in the middle of switching the current user task or switching
23 * the loaded mm.
24 */
25 #ifndef nmi_uaccess_okay
26 # define nmi_uaccess_okay() true
27 #endif
28
29 #ifdef CONFIG_MMU
30
31 /*
32 * Generic MMU-gather implementation.
33 *
34 * The mmu_gather data structure is used by the mm code to implement the
35 * correct and efficient ordering of freeing pages and TLB invalidations.
36 *
37 * This correct ordering is:
38 *
39 * 1) unhook page
40 * 2) TLB invalidate page
41 * 3) free page
42 *
43 * That is, we must never free a page before we have ensured there are no live
44 * translations left to it. Otherwise it might be possible to observe (or
45 * worse, change) the page content after it has been reused.
46 *
47 * The mmu_gather API consists of:
48 *
49 * - tlb_gather_mmu() / tlb_gather_mmu_fullmm() / tlb_gather_mmu_vma() /
50 * tlb_finish_mmu()
51 *
52 * start and finish a mmu_gather
53 *
54 * Finish in particular will issue a (final) TLB invalidate and free
55 * all (remaining) queued pages.
56 *
57 * - tlb_start_vma() / tlb_end_vma(); marks the start / end of a VMA
58 *
59 * Defaults to flushing at tlb_end_vma() to reset the range; helps when
60 * there's large holes between the VMAs.
61 *
62 * - tlb_free_vmas()
63 *
64 * tlb_free_vmas() marks the start of unlinking of one or more vmas
65 * and freeing page-tables.
66 *
67 * - tlb_remove_table()
68 *
69 * tlb_remove_table() is the basic primitive to free page-table directories
70 * (__p*_free_tlb()). In it's most primitive form it is an alias for
71 * tlb_remove_page() below, for when page directories are pages and have no
72 * additional constraints.
73 *
74 * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
75 *
76 * - tlb_remove_page() / tlb_remove_page_size()
77 * - __tlb_remove_folio_pages() / __tlb_remove_page_size()
78 * - __tlb_remove_folio_pages_size()
79 *
80 * __tlb_remove_folio_pages_size() is the basic primitive that queues pages
81 * for freeing. It will return a boolean indicating if the queue is (now)
82 * full and a call to tlb_flush_mmu() is required.
83 *
84 * tlb_remove_page() and tlb_remove_page_size() imply the call to
85 * tlb_flush_mmu() when required and has no return value.
86 *
87 * __tlb_remove_folio_pages() is similar to __tlb_remove_page_size(),
88 * however, instead of removing a single page, assume PAGE_SIZE and remove
89 * the given number of consecutive pages that are all part of the
90 * same (large) folio.
91 *
92 * - tlb_change_page_size()
93 *
94 * call before __tlb_remove_page*() to set the current page-size; implies a
95 * possible tlb_flush_mmu() call.
96 *
97 * - tlb_flush_mmu() / tlb_flush_mmu_tlbonly()
98 *
99 * tlb_flush_mmu_tlbonly() - does the TLB invalidate (and resets
100 * related state, like the range)
101 *
102 * tlb_flush_mmu() - in addition to the above TLB invalidate, also frees
103 * whatever pages are still batched.
104 *
105 * - mmu_gather::fullmm
106 *
107 * A flag set by tlb_gather_mmu_fullmm() to indicate we're going to free
108 * the entire mm; this allows a number of optimizations.
109 *
110 * - We can ignore tlb_{start,end}_vma(); because we don't
111 * care about ranges. Everything will be shot down.
112 *
113 * - (RISC) architectures that use ASIDs can cycle to a new ASID
114 * and delay the invalidation until ASID space runs out.
115 *
116 * - mmu_gather::need_flush_all
117 *
118 * A flag that can be set by the arch code if it wants to force
119 * flush the entire TLB irrespective of the range. For instance
120 * x86-PAE needs this when changing top-level entries.
121 *
122 * And allows the architecture to provide and implement tlb_flush():
123 *
124 * tlb_flush() may, in addition to the above mentioned mmu_gather fields, make
125 * use of:
126 *
127 * - mmu_gather::start / mmu_gather::end
128 *
129 * which provides the range that needs to be flushed to cover the pages to
130 * be freed.
131 *
132 * - mmu_gather::freed_tables
133 *
134 * set when we freed page table pages
135 *
136 * - tlb_get_unmap_shift() / tlb_get_unmap_size()
137 *
138 * returns the smallest TLB entry size unmapped in this range.
139 *
140 * If an architecture does not provide tlb_flush() a default implementation
141 * based on flush_tlb_range() will be used, unless MMU_GATHER_NO_RANGE is
142 * specified, in which case we'll default to flush_tlb_mm().
143 *
144 * Additionally there are a few opt-in features:
145 *
146 * MMU_GATHER_PAGE_SIZE
147 *
148 * This ensures we call tlb_flush() every time tlb_change_page_size() actually
149 * changes the size and provides mmu_gather::page_size to tlb_flush().
150 *
151 * This might be useful if your architecture has size specific TLB
152 * invalidation instructions.
153 *
154 * MMU_GATHER_TABLE_FREE
155 *
156 * This provides tlb_remove_table(), to be used instead of tlb_remove_page()
157 * for page directores (__p*_free_tlb()).
158 *
159 * Useful if your architecture has non-page page directories.
160 *
161 * When used, an architecture is expected to provide __tlb_remove_table() or
162 * use the generic __tlb_remove_table(), which does the actual freeing of these
163 * pages.
164 *
165 * MMU_GATHER_RCU_TABLE_FREE
166 *
167 * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
168 * comment below).
169 *
170 * Useful if your architecture doesn't use IPIs for remote TLB invalidates
171 * and therefore doesn't naturally serialize with software page-table walkers.
172 *
173 * MMU_GATHER_NO_FLUSH_CACHE
174 *
175 * Indicates the architecture has flush_cache_range() but it needs *NOT* be called
176 * before unmapping a VMA.
177 *
178 * NOTE: strictly speaking we shouldn't have this knob and instead rely on
179 * flush_cache_range() being a NOP, except Sparc64 seems to be
180 * different here.
181 *
182 * MMU_GATHER_MERGE_VMAS
183 *
184 * Indicates the architecture wants to merge ranges over VMAs; typical when
185 * multiple range invalidates are more expensive than a full invalidate.
186 *
187 * MMU_GATHER_NO_RANGE
188 *
189 * Use this if your architecture lacks an efficient flush_tlb_range(). This
190 * option implies MMU_GATHER_MERGE_VMAS above.
191 *
192 * MMU_GATHER_NO_GATHER
193 *
194 * If the option is set the mmu_gather will not track individual pages for
195 * delayed page free anymore. A platform that enables the option needs to
196 * provide its own implementation of the __tlb_remove_page_size() function to
197 * free pages.
198 *
199 * This is useful if your architecture already flushes TLB entries in the
200 * various ptep_get_and_clear() functions.
201 */
202
203 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
204
205 struct mmu_table_batch {
206 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
207 struct rcu_head rcu;
208 #endif
209 unsigned int nr;
210 void *tables[];
211 };
212
213 #define MAX_TABLE_BATCH \
214 ((PAGE_SIZE - sizeof(struct mmu_table_batch)) / sizeof(void *))
215
216 #ifndef CONFIG_HAVE_ARCH_TLB_REMOVE_TABLE
__tlb_remove_table(void * table)217 static inline void __tlb_remove_table(void *table)
218 {
219 struct ptdesc *ptdesc = (struct ptdesc *)table;
220
221 pagetable_dtor_free(ptdesc);
222 }
223 #endif
224
225 extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
226
227 #else /* !CONFIG_MMU_GATHER_TABLE_FREE */
228
229 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page);
230 /*
231 * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
232 * page directories and we can use the normal page batching to free them.
233 */
tlb_remove_table(struct mmu_gather * tlb,void * table)234 static inline void tlb_remove_table(struct mmu_gather *tlb, void *table)
235 {
236 struct ptdesc *ptdesc = (struct ptdesc *)table;
237
238 pagetable_dtor(ptdesc);
239 tlb_remove_page(tlb, ptdesc_page(ptdesc));
240 }
241 #endif /* CONFIG_MMU_GATHER_TABLE_FREE */
242
243 #ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
244 /*
245 * This allows an architecture that does not use the linux page-tables for
246 * hardware to skip the TLBI when freeing page tables.
247 */
248 #ifndef tlb_needs_table_invalidate
249 #define tlb_needs_table_invalidate() (true)
250 #endif
251
252 void tlb_remove_table_sync_one(void);
253
254 void tlb_remove_table_sync_rcu(void);
255
256 #else
257
258 #ifdef tlb_needs_table_invalidate
259 #error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
260 #endif
261
tlb_remove_table_sync_one(void)262 static inline void tlb_remove_table_sync_one(void) { }
263
tlb_remove_table_sync_rcu(void)264 static inline void tlb_remove_table_sync_rcu(void) { }
265
266 #endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
267
268
269 #ifndef CONFIG_MMU_GATHER_NO_GATHER
270 /*
271 * If we can't allocate a page to make a big batch of page pointers
272 * to work on, then just handle a few from the on-stack structure.
273 */
274 #define MMU_GATHER_BUNDLE 8
275
276 struct mmu_gather_batch {
277 struct mmu_gather_batch *next;
278 unsigned int nr;
279 unsigned int max;
280 struct encoded_page *encoded_pages[];
281 };
282
283 #define MAX_GATHER_BATCH \
284 ((PAGE_SIZE - sizeof(struct mmu_gather_batch)) / sizeof(void *))
285
286 /*
287 * Limit the maximum number of mmu_gather batches to reduce a risk of soft
288 * lockups for non-preemptible kernels on huge machines when a lot of memory
289 * is zapped during unmapping.
290 * 10K pages freed at once should be safe even without a preemption point.
291 */
292 #define MAX_GATHER_BATCH_COUNT (10000UL/MAX_GATHER_BATCH)
293
294 extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page, int page_size);
295 bool __tlb_remove_folio_pages(struct mmu_gather *tlb, struct page *page,
296 unsigned int nr_pages, bool delay_rmap);
297
298 #ifdef CONFIG_SMP
299 /*
300 * This both sets 'delayed_rmap', and returns true. It would be an inline
301 * function, except we define it before the 'struct mmu_gather'.
302 */
303 #define tlb_delay_rmap(tlb) (((tlb)->delayed_rmap = 1), true)
304 extern void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma);
305 #endif
306
307 #endif
308
309 /*
310 * We have a no-op version of the rmap removal that doesn't
311 * delay anything. That is used on S390, which flushes remote
312 * TLBs synchronously, and on UP, which doesn't have any
313 * remote TLBs to flush and is not preemptible due to this
314 * all happening under the page table lock.
315 */
316 #ifndef tlb_delay_rmap
317 #define tlb_delay_rmap(tlb) (false)
tlb_flush_rmaps(struct mmu_gather * tlb,struct vm_area_struct * vma)318 static inline void tlb_flush_rmaps(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
319 #endif
320
321 /*
322 * struct mmu_gather is an opaque type used by the mm code for passing around
323 * any data needed by arch specific code for tlb_remove_page.
324 */
325 struct mmu_gather {
326 struct mm_struct *mm;
327
328 #ifdef CONFIG_MMU_GATHER_TABLE_FREE
329 struct mmu_table_batch *batch;
330 #endif
331
332 unsigned long start;
333 unsigned long end;
334 /*
335 * we are in the middle of an operation to clear
336 * a full mm and can make some optimizations
337 */
338 unsigned int fullmm : 1;
339
340 /*
341 * we have performed an operation which
342 * requires a complete flush of the tlb
343 */
344 unsigned int need_flush_all : 1;
345
346 /*
347 * we have removed page directories
348 */
349 unsigned int freed_tables : 1;
350
351 /*
352 * Do we have pending delayed rmap removals?
353 */
354 unsigned int delayed_rmap : 1;
355
356 /*
357 * at which levels have we cleared entries?
358 */
359 unsigned int cleared_ptes : 1;
360 unsigned int cleared_pmds : 1;
361 unsigned int cleared_puds : 1;
362 unsigned int cleared_p4ds : 1;
363
364 /*
365 * tracks VM_EXEC | VM_HUGETLB in tlb_start_vma
366 */
367 unsigned int vma_exec : 1;
368 unsigned int vma_huge : 1;
369 unsigned int vma_pfn : 1;
370
371 /*
372 * Did we unshare (unmap) any shared page tables? For now only
373 * used for hugetlb PMD table sharing.
374 */
375 unsigned int unshared_tables : 1;
376
377 /*
378 * Did we unshare any page tables such that they are now exclusive
379 * and could get reused+modified by the new owner? When setting this
380 * flag, "unshared_tables" will be set as well. For now only used
381 * for hugetlb PMD table sharing.
382 */
383 unsigned int fully_unshared_tables : 1;
384
385 unsigned int batch_count;
386
387 #ifndef CONFIG_MMU_GATHER_NO_GATHER
388 struct mmu_gather_batch *active;
389 struct mmu_gather_batch local;
390 struct page *__pages[MMU_GATHER_BUNDLE];
391
392 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
393 unsigned int page_size;
394 #endif
395 #endif
396 };
397
398 void tlb_flush_mmu(struct mmu_gather *tlb);
399
__tlb_adjust_range(struct mmu_gather * tlb,unsigned long address,unsigned int range_size)400 static inline void __tlb_adjust_range(struct mmu_gather *tlb,
401 unsigned long address,
402 unsigned int range_size)
403 {
404 tlb->start = min(tlb->start, address);
405 tlb->end = max(tlb->end, address + range_size);
406 }
407
__tlb_reset_range(struct mmu_gather * tlb)408 static inline void __tlb_reset_range(struct mmu_gather *tlb)
409 {
410 if (tlb->fullmm) {
411 tlb->start = tlb->end = ~0;
412 } else {
413 tlb->start = TASK_SIZE;
414 tlb->end = 0;
415 }
416 tlb->freed_tables = 0;
417 tlb->cleared_ptes = 0;
418 tlb->cleared_pmds = 0;
419 tlb->cleared_puds = 0;
420 tlb->cleared_p4ds = 0;
421 tlb->unshared_tables = 0;
422 /*
423 * Do not reset mmu_gather::vma_* fields here, we do not
424 * call into tlb_start_vma() again to set them if there is an
425 * intermediate flush.
426 */
427 }
428
429 #ifdef CONFIG_MMU_GATHER_NO_RANGE
430
431 #if defined(tlb_flush)
432 #error MMU_GATHER_NO_RANGE relies on default tlb_flush()
433 #endif
434
435 /*
436 * When an architecture does not have efficient means of range flushing TLBs
437 * there is no point in doing intermediate flushes on tlb_end_vma() to keep the
438 * range small. We equally don't have to worry about page granularity or other
439 * things.
440 *
441 * All we need to do is issue a full flush for any !0 range.
442 */
tlb_flush(struct mmu_gather * tlb)443 static inline void tlb_flush(struct mmu_gather *tlb)
444 {
445 if (tlb->end)
446 flush_tlb_mm(tlb->mm);
447 }
448
449 #else /* CONFIG_MMU_GATHER_NO_RANGE */
450
451 #ifndef tlb_flush
452 /*
453 * When an architecture does not provide its own tlb_flush() implementation
454 * but does have a reasonably efficient flush_vma_range() implementation
455 * use that.
456 */
tlb_flush(struct mmu_gather * tlb)457 static inline void tlb_flush(struct mmu_gather *tlb)
458 {
459 if (tlb->fullmm || tlb->need_flush_all) {
460 flush_tlb_mm(tlb->mm);
461 } else if (tlb->end) {
462 struct vm_area_struct vma = {
463 .vm_mm = tlb->mm,
464 .vm_flags = (tlb->vma_exec ? VM_EXEC : 0) |
465 (tlb->vma_huge ? VM_HUGETLB : 0),
466 };
467
468 flush_tlb_range(&vma, tlb->start, tlb->end);
469 }
470 }
471 #endif
472
473 #endif /* CONFIG_MMU_GATHER_NO_RANGE */
474
475 static inline void
tlb_update_vma_flags(struct mmu_gather * tlb,struct vm_area_struct * vma)476 tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma)
477 {
478 /*
479 * flush_tlb_range() implementations that look at VM_HUGETLB (tile,
480 * mips-4k) flush only large pages.
481 *
482 * flush_tlb_range() implementations that flush I-TLB also flush D-TLB
483 * (tile, xtensa, arm), so it's ok to just add VM_EXEC to an existing
484 * range.
485 *
486 * We rely on tlb_end_vma() to issue a flush, such that when we reset
487 * these values the batch is empty.
488 */
489 tlb->vma_huge = is_vm_hugetlb_page(vma);
490 tlb->vma_exec = !!(vma->vm_flags & VM_EXEC);
491
492 /*
493 * Track if there's at least one VM_PFNMAP/VM_MIXEDMAP vma
494 * in the tracked range, see tlb_free_vmas().
495 */
496 tlb->vma_pfn |= !!(vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP));
497 }
498
tlb_flush_mmu_tlbonly(struct mmu_gather * tlb)499 static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
500 {
501 /*
502 * Anything calling __tlb_adjust_range() also sets at least one of
503 * these bits.
504 */
505 if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
506 tlb->cleared_puds || tlb->cleared_p4ds || tlb->unshared_tables))
507 return;
508
509 tlb_flush(tlb);
510 __tlb_reset_range(tlb);
511 }
512
tlb_remove_page_size(struct mmu_gather * tlb,struct page * page,int page_size)513 static inline void tlb_remove_page_size(struct mmu_gather *tlb,
514 struct page *page, int page_size)
515 {
516 if (__tlb_remove_page_size(tlb, page, page_size))
517 tlb_flush_mmu(tlb);
518 }
519
tlb_remove_page(struct mmu_gather * tlb,struct page * page)520 static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
521 {
522 return tlb_remove_page_size(tlb, page, PAGE_SIZE);
523 }
524
tlb_remove_ptdesc(struct mmu_gather * tlb,struct ptdesc * pt)525 static inline void tlb_remove_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt)
526 {
527 tlb_remove_table(tlb, pt);
528 }
529
tlb_change_page_size(struct mmu_gather * tlb,unsigned int page_size)530 static inline void tlb_change_page_size(struct mmu_gather *tlb,
531 unsigned int page_size)
532 {
533 #ifdef CONFIG_MMU_GATHER_PAGE_SIZE
534 if (tlb->page_size && tlb->page_size != page_size) {
535 if (!tlb->fullmm && !tlb->need_flush_all)
536 tlb_flush_mmu(tlb);
537 }
538
539 tlb->page_size = page_size;
540 #endif
541 }
542
tlb_get_unmap_shift(struct mmu_gather * tlb)543 static inline unsigned long tlb_get_unmap_shift(struct mmu_gather *tlb)
544 {
545 if (tlb->cleared_ptes)
546 return PAGE_SHIFT;
547 if (tlb->cleared_pmds)
548 return PMD_SHIFT;
549 if (tlb->cleared_puds)
550 return PUD_SHIFT;
551 if (tlb->cleared_p4ds)
552 return P4D_SHIFT;
553
554 return PAGE_SHIFT;
555 }
556
tlb_get_unmap_size(struct mmu_gather * tlb)557 static inline unsigned long tlb_get_unmap_size(struct mmu_gather *tlb)
558 {
559 return 1UL << tlb_get_unmap_shift(tlb);
560 }
561
562 /*
563 * In the case of tlb vma handling, we can optimise these away in the
564 * case where we're doing a full MM flush. When we're doing a munmap,
565 * the vmas are adjusted to only cover the region to be torn down.
566 */
tlb_start_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)567 static inline void tlb_start_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
568 {
569 if (tlb->fullmm)
570 return;
571
572 tlb_update_vma_flags(tlb, vma);
573 #ifndef CONFIG_MMU_GATHER_NO_FLUSH_CACHE
574 flush_cache_range(vma, vma->vm_start, vma->vm_end);
575 #endif
576 }
577
tlb_end_vma(struct mmu_gather * tlb,struct vm_area_struct * vma)578 static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vma)
579 {
580 if (tlb->fullmm || IS_ENABLED(CONFIG_MMU_GATHER_MERGE_VMAS))
581 return;
582
583 /*
584 * Do a TLB flush and reset the range at VMA boundaries; this avoids
585 * the ranges growing with the unused space between consecutive VMAs,
586 * but also the mmu_gather::vma_* flags from tlb_start_vma() rely on
587 * this.
588 */
589 tlb_flush_mmu_tlbonly(tlb);
590 }
591
tlb_free_vmas(struct mmu_gather * tlb)592 static inline void tlb_free_vmas(struct mmu_gather *tlb)
593 {
594 if (tlb->fullmm)
595 return;
596
597 /*
598 * VM_PFNMAP is more fragile because the core mm will not track the
599 * page mapcount -- there might not be page-frames for these PFNs
600 * after all.
601 *
602 * Specifically() there is a race between munmap() and
603 * unmap_mapping_range(), where munmap() will unlink the VMA, such
604 * that unmap_mapping_range() will no longer observe the VMA and
605 * no-op, without observing the TLBI, returning prematurely.
606 *
607 * So if we're about to unlink such a VMA, and we have pending
608 * TLBI for such a vma, flush things now.
609 */
610 if (tlb->vma_pfn)
611 tlb_flush_mmu_tlbonly(tlb);
612 }
613
614 /*
615 * tlb_flush_{pte|pmd|pud|p4d}_range() adjust the tlb->start and tlb->end,
616 * and set corresponding cleared_*.
617 */
tlb_flush_pte_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)618 static inline void tlb_flush_pte_range(struct mmu_gather *tlb,
619 unsigned long address, unsigned long size)
620 {
621 __tlb_adjust_range(tlb, address, size);
622 tlb->cleared_ptes = 1;
623 }
624
tlb_flush_pmd_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)625 static inline void tlb_flush_pmd_range(struct mmu_gather *tlb,
626 unsigned long address, unsigned long size)
627 {
628 __tlb_adjust_range(tlb, address, size);
629 tlb->cleared_pmds = 1;
630 }
631
tlb_flush_pud_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)632 static inline void tlb_flush_pud_range(struct mmu_gather *tlb,
633 unsigned long address, unsigned long size)
634 {
635 __tlb_adjust_range(tlb, address, size);
636 tlb->cleared_puds = 1;
637 }
638
tlb_flush_p4d_range(struct mmu_gather * tlb,unsigned long address,unsigned long size)639 static inline void tlb_flush_p4d_range(struct mmu_gather *tlb,
640 unsigned long address, unsigned long size)
641 {
642 __tlb_adjust_range(tlb, address, size);
643 tlb->cleared_p4ds = 1;
644 }
645
646 #ifndef __tlb_remove_tlb_entry
__tlb_remove_tlb_entry(struct mmu_gather * tlb,pte_t * ptep,unsigned long address)647 static inline void __tlb_remove_tlb_entry(struct mmu_gather *tlb, pte_t *ptep, unsigned long address)
648 {
649 }
650 #endif
651
652 /**
653 * tlb_remove_tlb_entry - remember a pte unmapping for later tlb invalidation.
654 *
655 * Record the fact that pte's were really unmapped by updating the range,
656 * so we can later optimise away the tlb invalidate. This helps when
657 * userspace is unmapping already-unmapped pages, which happens quite a lot.
658 */
659 #define tlb_remove_tlb_entry(tlb, ptep, address) \
660 do { \
661 tlb_flush_pte_range(tlb, address, PAGE_SIZE); \
662 __tlb_remove_tlb_entry(tlb, ptep, address); \
663 } while (0)
664
665 /**
666 * tlb_remove_tlb_entries - remember unmapping of multiple consecutive ptes for
667 * later tlb invalidation.
668 *
669 * Similar to tlb_remove_tlb_entry(), but remember unmapping of multiple
670 * consecutive ptes instead of only a single one.
671 */
tlb_remove_tlb_entries(struct mmu_gather * tlb,pte_t * ptep,unsigned int nr,unsigned long address)672 static inline void tlb_remove_tlb_entries(struct mmu_gather *tlb,
673 pte_t *ptep, unsigned int nr, unsigned long address)
674 {
675 tlb_flush_pte_range(tlb, address, PAGE_SIZE * nr);
676 for (;;) {
677 __tlb_remove_tlb_entry(tlb, ptep, address);
678 if (--nr == 0)
679 break;
680 ptep++;
681 address += PAGE_SIZE;
682 }
683 }
684
685 #define tlb_remove_huge_tlb_entry(h, tlb, ptep, address) \
686 do { \
687 unsigned long _sz = huge_page_size(h); \
688 if (_sz >= P4D_SIZE) \
689 tlb_flush_p4d_range(tlb, address, _sz); \
690 else if (_sz >= PUD_SIZE) \
691 tlb_flush_pud_range(tlb, address, _sz); \
692 else if (_sz >= PMD_SIZE) \
693 tlb_flush_pmd_range(tlb, address, _sz); \
694 else \
695 tlb_flush_pte_range(tlb, address, _sz); \
696 __tlb_remove_tlb_entry(tlb, ptep, address); \
697 } while (0)
698
699 /**
700 * tlb_remove_pmd_tlb_entry - remember a pmd mapping for later tlb invalidation
701 * This is a nop so far, because only x86 needs it.
702 */
703 #ifndef __tlb_remove_pmd_tlb_entry
704 #define __tlb_remove_pmd_tlb_entry(tlb, pmdp, address) do {} while (0)
705 #endif
706
707 #define tlb_remove_pmd_tlb_entry(tlb, pmdp, address) \
708 do { \
709 tlb_flush_pmd_range(tlb, address, HPAGE_PMD_SIZE); \
710 __tlb_remove_pmd_tlb_entry(tlb, pmdp, address); \
711 } while (0)
712
713 /**
714 * tlb_remove_pud_tlb_entry - remember a pud mapping for later tlb
715 * invalidation. This is a nop so far, because only x86 needs it.
716 */
717 #ifndef __tlb_remove_pud_tlb_entry
718 #define __tlb_remove_pud_tlb_entry(tlb, pudp, address) do {} while (0)
719 #endif
720
721 #define tlb_remove_pud_tlb_entry(tlb, pudp, address) \
722 do { \
723 tlb_flush_pud_range(tlb, address, HPAGE_PUD_SIZE); \
724 __tlb_remove_pud_tlb_entry(tlb, pudp, address); \
725 } while (0)
726
727 /*
728 * For things like page tables caches (ie caching addresses "inside" the
729 * page tables, like x86 does), for legacy reasons, flushing an
730 * individual page had better flush the page table caches behind it. This
731 * is definitely how x86 works, for example. And if you have an
732 * architected non-legacy page table cache (which I'm not aware of
733 * anybody actually doing), you're going to have some architecturally
734 * explicit flushing for that, likely *separate* from a regular TLB entry
735 * flush, and thus you'd need more than just some range expansion..
736 *
737 * So if we ever find an architecture
738 * that would want something that odd, I think it is up to that
739 * architecture to do its own odd thing, not cause pain for others
740 * http://lkml.kernel.org/r/CA+55aFzBggoXtNXQeng5d_mRoDnaMBE5Y+URs+PHR67nUpMtaw@mail.gmail.com
741 *
742 * For now w.r.t page table cache, mark the range_size as PAGE_SIZE
743 */
744
745 #ifndef pte_free_tlb
746 #define pte_free_tlb(tlb, ptep, address) \
747 do { \
748 tlb_flush_pmd_range(tlb, address, PAGE_SIZE); \
749 tlb->freed_tables = 1; \
750 __pte_free_tlb(tlb, ptep, address); \
751 } while (0)
752 #endif
753
754 #ifndef pmd_free_tlb
755 #define pmd_free_tlb(tlb, pmdp, address) \
756 do { \
757 tlb_flush_pud_range(tlb, address, PAGE_SIZE); \
758 tlb->freed_tables = 1; \
759 __pmd_free_tlb(tlb, pmdp, address); \
760 } while (0)
761 #endif
762
763 #ifndef pud_free_tlb
764 #define pud_free_tlb(tlb, pudp, address) \
765 do { \
766 tlb_flush_p4d_range(tlb, address, PAGE_SIZE); \
767 tlb->freed_tables = 1; \
768 __pud_free_tlb(tlb, pudp, address); \
769 } while (0)
770 #endif
771
772 #ifndef p4d_free_tlb
773 #define p4d_free_tlb(tlb, pudp, address) \
774 do { \
775 __tlb_adjust_range(tlb, address, PAGE_SIZE); \
776 tlb->freed_tables = 1; \
777 __p4d_free_tlb(tlb, pudp, address); \
778 } while (0)
779 #endif
780
781 #ifndef pte_needs_flush
pte_needs_flush(pte_t oldpte,pte_t newpte)782 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
783 {
784 return true;
785 }
786 #endif
787
788 #ifndef huge_pmd_needs_flush
huge_pmd_needs_flush(pmd_t oldpmd,pmd_t newpmd)789 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
790 {
791 return true;
792 }
793 #endif
794
795 #ifdef CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING
tlb_unshare_pmd_ptdesc(struct mmu_gather * tlb,struct ptdesc * pt,unsigned long addr)796 static inline void tlb_unshare_pmd_ptdesc(struct mmu_gather *tlb, struct ptdesc *pt,
797 unsigned long addr)
798 {
799 /*
800 * The caller must make sure that concurrent unsharing + exclusive
801 * reuse is impossible until tlb_flush_unshared_tables() was called.
802 */
803 VM_WARN_ON_ONCE(!ptdesc_pmd_is_shared(pt));
804 ptdesc_pmd_pts_dec(pt);
805
806 /* Clearing a PUD pointing at a PMD table with PMD leaves. */
807 tlb_flush_pmd_range(tlb, addr & PUD_MASK, PUD_SIZE);
808
809 /*
810 * If the page table is now exclusively owned, we fully unshared
811 * a page table.
812 */
813 if (!ptdesc_pmd_is_shared(pt))
814 tlb->fully_unshared_tables = true;
815 tlb->unshared_tables = true;
816 }
817
tlb_flush_unshared_tables(struct mmu_gather * tlb)818 static inline void tlb_flush_unshared_tables(struct mmu_gather *tlb)
819 {
820 /*
821 * As soon as the caller drops locks to allow for reuse of
822 * previously-shared tables, these tables could get modified and
823 * even reused outside of hugetlb context, so we have to make sure that
824 * any page table walkers (incl. TLB, GUP-fast) are aware of that
825 * change.
826 *
827 * Even if we are not fully unsharing a PMD table, we must
828 * flush the TLB for the unsharer now.
829 */
830 if (tlb->unshared_tables)
831 tlb_flush_mmu_tlbonly(tlb);
832
833 /*
834 * Similarly, we must make sure that concurrent GUP-fast will not
835 * walk previously-shared page tables that are getting modified+reused
836 * elsewhere. So broadcast an IPI to wait for any concurrent GUP-fast.
837 *
838 * We only perform this when we are the last sharer of a page table,
839 * as the IPI will reach all CPUs: any GUP-fast.
840 *
841 * Note that on configs where tlb_remove_table_sync_one() is a NOP,
842 * the expectation is that the tlb_flush_mmu_tlbonly() would have issued
843 * required IPIs already for us.
844 */
845 if (tlb->fully_unshared_tables) {
846 tlb_remove_table_sync_one();
847 tlb->fully_unshared_tables = false;
848 }
849 }
850 #endif /* CONFIG_HUGETLB_PMD_PAGE_TABLE_SHARING */
851
852 #endif /* CONFIG_MMU */
853
854 #endif /* _ASM_GENERIC__TLB_H */
855