1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _S390_TLBFLUSH_H
3 #define _S390_TLBFLUSH_H
4
5 #include <linux/cpufeature.h>
6 #include <linux/mm.h>
7 #include <linux/sched.h>
8 #include <asm/processor.h>
9 #include <asm/machine.h>
10
11 /*
12 * Flush all TLB entries on the local CPU.
13 */
__tlb_flush_local(void)14 static inline void __tlb_flush_local(void)
15 {
16 asm volatile("ptlb" : : : "memory");
17 }
18
19 /*
20 * Flush TLB entries for a specific ASCE on all CPUs
21 */
__tlb_flush_idte(unsigned long asce)22 static inline void __tlb_flush_idte(unsigned long asce)
23 {
24 unsigned long opt;
25
26 opt = IDTE_PTOA;
27 if (machine_has_tlb_guest())
28 opt |= IDTE_GUEST_ASCE;
29 /* Global TLB flush for the mm */
30 asm volatile("idte 0,%1,%0" : : "a" (opt), "a" (asce) : "cc");
31 }
32
33 /*
34 * Flush all TLB entries on all CPUs.
35 */
__tlb_flush_global(void)36 static inline void __tlb_flush_global(void)
37 {
38 unsigned int dummy = 0;
39
40 csp(&dummy, 0, 0);
41 }
42
43 /*
44 * Flush TLB entries for a specific mm on all CPUs (in case gmap is used
45 * this implicates multiple ASCEs!).
46 */
__tlb_flush_mm(struct mm_struct * mm)47 static inline void __tlb_flush_mm(struct mm_struct *mm)
48 {
49 unsigned long gmap_asce;
50
51 preempt_disable();
52 atomic_inc(&mm->context.flush_count);
53 /* Reset TLB flush mask */
54 cpumask_copy(mm_cpumask(mm), &mm->context.cpu_attach_mask);
55 barrier();
56 gmap_asce = READ_ONCE(mm->context.gmap_asce);
57 if (cpu_has_idte() && gmap_asce != -1UL) {
58 if (gmap_asce)
59 __tlb_flush_idte(gmap_asce);
60 __tlb_flush_idte(mm->context.asce);
61 } else {
62 /* Global TLB flush */
63 __tlb_flush_global();
64 }
65 atomic_dec(&mm->context.flush_count);
66 preempt_enable();
67 }
68
__tlb_flush_kernel(void)69 static inline void __tlb_flush_kernel(void)
70 {
71 if (cpu_has_idte())
72 __tlb_flush_idte(init_mm.context.asce);
73 else
74 __tlb_flush_global();
75 }
76
__tlb_flush_mm_lazy(struct mm_struct * mm)77 static inline void __tlb_flush_mm_lazy(struct mm_struct * mm)
78 {
79 spin_lock(&mm->context.lock);
80 if (mm->context.flush_mm) {
81 mm->context.flush_mm = 0;
82 __tlb_flush_mm(mm);
83 }
84 spin_unlock(&mm->context.lock);
85 }
86
87 /*
88 * TLB flushing:
89 * flush_tlb() - flushes the current mm struct TLBs
90 * flush_tlb_all() - flushes all processes TLBs
91 * flush_tlb_mm(mm) - flushes the specified mm context TLB's
92 * flush_tlb_page(vma, vmaddr) - flushes one page
93 * flush_tlb_range(vma, start, end) - flushes a range of pages
94 * flush_tlb_kernel_range(start, end) - flushes a range of kernel pages
95 */
96
97 /*
98 * flush_tlb_mm goes together with ptep_set_wrprotect for the
99 * copy_page_range operation and flush_tlb_range is related to
100 * ptep_get_and_clear for change_protection. ptep_set_wrprotect and
101 * ptep_get_and_clear do not flush the TLBs directly if the mm has
102 * only one user. At the end of the update the flush_tlb_mm and
103 * flush_tlb_range functions need to do the flush.
104 */
105 #define flush_tlb() do { } while (0)
106 #define flush_tlb_all() do { } while (0)
107 #define flush_tlb_page(vma, addr) do { } while (0)
108
flush_tlb_mm(struct mm_struct * mm)109 static inline void flush_tlb_mm(struct mm_struct *mm)
110 {
111 __tlb_flush_mm_lazy(mm);
112 }
113
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)114 static inline void flush_tlb_range(struct vm_area_struct *vma,
115 unsigned long start, unsigned long end)
116 {
117 __tlb_flush_mm_lazy(vma->vm_mm);
118 }
119
flush_tlb_kernel_range(unsigned long start,unsigned long end)120 static inline void flush_tlb_kernel_range(unsigned long start,
121 unsigned long end)
122 {
123 __tlb_flush_kernel();
124 }
125
126 #endif /* _S390_TLBFLUSH_H */
127