1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TLB_H
3 #define _ASM_X86_TLB_H
4
5 #define tlb_flush tlb_flush
6 static inline void tlb_flush(struct mmu_gather *tlb);
7
8 #include <asm-generic/tlb.h>
9 #include <linux/kernel.h>
10 #include <vdso/bits.h>
11 #include <vdso/page.h>
12
tlb_flush(struct mmu_gather * tlb)13 static inline void tlb_flush(struct mmu_gather *tlb)
14 {
15 unsigned long start = 0UL, end = TLB_FLUSH_ALL;
16 unsigned int stride_shift = tlb_get_unmap_shift(tlb);
17
18 if (!tlb->fullmm && !tlb->need_flush_all) {
19 start = tlb->start;
20 end = tlb->end;
21 }
22
23 flush_tlb_mm_range(tlb->mm, start, end, stride_shift, tlb->freed_tables);
24 }
25
invlpg(unsigned long addr)26 static inline void invlpg(unsigned long addr)
27 {
28 asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
29 }
30
31 enum addr_stride {
32 PTE_STRIDE = 0,
33 PMD_STRIDE = 1
34 };
35
36 /*
37 * INVLPGB can be targeted by virtual address, PCID, ASID, or any combination
38 * of the three. For example:
39 * - FLAG_VA | FLAG_INCLUDE_GLOBAL: invalidate all TLB entries at the address
40 * - FLAG_PCID: invalidate all TLB entries matching the PCID
41 *
42 * The first is used to invalidate (kernel) mappings at a particular
43 * address across all processes.
44 *
45 * The latter invalidates all TLB entries matching a PCID.
46 */
47 #define INVLPGB_FLAG_VA BIT(0)
48 #define INVLPGB_FLAG_PCID BIT(1)
49 #define INVLPGB_FLAG_ASID BIT(2)
50 #define INVLPGB_FLAG_INCLUDE_GLOBAL BIT(3)
51 #define INVLPGB_FLAG_FINAL_ONLY BIT(4)
52 #define INVLPGB_FLAG_INCLUDE_NESTED BIT(5)
53
54 /* The implied mode when all bits are clear: */
55 #define INVLPGB_MODE_ALL_NONGLOBALS 0UL
56
57 #ifdef CONFIG_BROADCAST_TLB_FLUSH
58 /*
59 * INVLPGB does broadcast TLB invalidation across all the CPUs in the system.
60 *
61 * The INVLPGB instruction is weakly ordered, and a batch of invalidations can
62 * be done in a parallel fashion.
63 *
64 * The instruction takes the number of extra pages to invalidate, beyond the
65 * first page, while __invlpgb gets the more human readable number of pages to
66 * invalidate.
67 *
68 * The bits in rax[0:2] determine respectively which components of the address
69 * (VA, PCID, ASID) get compared when flushing. If neither bits are set, *any*
70 * address in the specified range matches.
71 *
72 * Since it is desired to only flush TLB entries for the ASID that is executing
73 * the instruction (a host/hypervisor or a guest), the ASID valid bit should
74 * always be set. On a host/hypervisor, the hardware will use the ASID value
75 * specified in EDX[15:0] (which should be 0). On a guest, the hardware will
76 * use the actual ASID value of the guest.
77 *
78 * TLBSYNC is used to ensure that pending INVLPGB invalidations initiated from
79 * this CPU have completed.
80 */
__invlpgb(unsigned long asid,unsigned long pcid,unsigned long addr,u16 nr_pages,enum addr_stride stride,u8 flags)81 static inline void __invlpgb(unsigned long asid, unsigned long pcid,
82 unsigned long addr, u16 nr_pages,
83 enum addr_stride stride, u8 flags)
84 {
85 u64 rax = addr | flags | INVLPGB_FLAG_ASID;
86 u32 ecx = (stride << 31) | (nr_pages - 1);
87 u32 edx = (pcid << 16) | asid;
88
89 /* The low bits in rax are for flags. Verify addr is clean. */
90 VM_WARN_ON_ONCE(addr & ~PAGE_MASK);
91
92 /* INVLPGB; supported in binutils >= 2.36. */
93 asm volatile(".byte 0x0f, 0x01, 0xfe" :: "a" (rax), "c" (ecx), "d" (edx));
94 }
95
__invlpgb_all(unsigned long asid,unsigned long pcid,u8 flags)96 static inline void __invlpgb_all(unsigned long asid, unsigned long pcid, u8 flags)
97 {
98 __invlpgb(asid, pcid, 0, 1, 0, flags);
99 }
100
__tlbsync(void)101 static inline void __tlbsync(void)
102 {
103 /*
104 * TLBSYNC waits for INVLPGB instructions originating on the same CPU
105 * to have completed. Print a warning if the task has been migrated,
106 * and might not be waiting on all the INVLPGBs issued during this TLB
107 * invalidation sequence.
108 */
109 cant_migrate();
110
111 /* TLBSYNC: supported in binutils >= 0.36. */
112 asm volatile(".byte 0x0f, 0x01, 0xff" ::: "memory");
113 }
114 #else
115 /* Some compilers (I'm looking at you clang!) simply can't do DCE */
__invlpgb(unsigned long asid,unsigned long pcid,unsigned long addr,u16 nr_pages,enum addr_stride s,u8 flags)116 static inline void __invlpgb(unsigned long asid, unsigned long pcid,
117 unsigned long addr, u16 nr_pages,
118 enum addr_stride s, u8 flags) { }
__invlpgb_all(unsigned long asid,unsigned long pcid,u8 flags)119 static inline void __invlpgb_all(unsigned long asid, unsigned long pcid, u8 flags) { }
__tlbsync(void)120 static inline void __tlbsync(void) { }
121 #endif
122
invlpgb_flush_user_nr_nosync(unsigned long pcid,unsigned long addr,u16 nr,bool stride)123 static inline void invlpgb_flush_user_nr_nosync(unsigned long pcid,
124 unsigned long addr,
125 u16 nr, bool stride)
126 {
127 enum addr_stride str = stride ? PMD_STRIDE : PTE_STRIDE;
128 u8 flags = INVLPGB_FLAG_PCID | INVLPGB_FLAG_VA;
129
130 __invlpgb(0, pcid, addr, nr, str, flags);
131 }
132
133 /* Flush all mappings for a given PCID, not including globals. */
invlpgb_flush_single_pcid_nosync(unsigned long pcid)134 static inline void invlpgb_flush_single_pcid_nosync(unsigned long pcid)
135 {
136 __invlpgb_all(0, pcid, INVLPGB_FLAG_PCID);
137 }
138
139 /* Flush all mappings, including globals, for all PCIDs. */
invlpgb_flush_all(void)140 static inline void invlpgb_flush_all(void)
141 {
142 /*
143 * TLBSYNC at the end needs to make sure all flushes done on the
144 * current CPU have been executed system-wide. Therefore, make
145 * sure nothing gets migrated in-between but disable preemption
146 * as it is cheaper.
147 */
148 guard(preempt)();
149 __invlpgb_all(0, 0, INVLPGB_FLAG_INCLUDE_GLOBAL);
150 __tlbsync();
151 }
152
153 /* Flush addr, including globals, for all PCIDs. */
invlpgb_flush_addr_nosync(unsigned long addr,u16 nr)154 static inline void invlpgb_flush_addr_nosync(unsigned long addr, u16 nr)
155 {
156 __invlpgb(0, 0, addr, nr, PTE_STRIDE, INVLPGB_FLAG_INCLUDE_GLOBAL);
157 }
158
159 /* Flush all mappings for all PCIDs except globals. */
invlpgb_flush_all_nonglobals(void)160 static inline void invlpgb_flush_all_nonglobals(void)
161 {
162 guard(preempt)();
163 __invlpgb_all(0, 0, INVLPGB_MODE_ALL_NONGLOBALS);
164 __tlbsync();
165 }
166 #endif /* _ASM_X86_TLB_H */
167