1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_TLBFLUSH_H
3 #define _ASM_X86_TLBFLUSH_H
4
5 #include <linux/mm_types.h>
6 #include <linux/mmu_notifier.h>
7 #include <linux/sched.h>
8
9 #include <asm/barrier.h>
10 #include <asm/processor.h>
11 #include <asm/cpufeature.h>
12 #include <asm/special_insns.h>
13 #include <asm/smp.h>
14 #include <asm/invpcid.h>
15 #include <asm/pti.h>
16 #include <asm/processor-flags.h>
17 #include <asm/pgtable.h>
18
19 DECLARE_PER_CPU(u64, tlbstate_untag_mask);
20
21 void __flush_tlb_all(void);
22
23 #define TLB_FLUSH_ALL -1UL
24 #define TLB_GENERATION_INVALID 0
25
26 void cr4_update_irqsoff(unsigned long set, unsigned long clear);
27 unsigned long cr4_read_shadow(void);
28
29 /* Set in this cpu's CR4. */
cr4_set_bits_irqsoff(unsigned long mask)30 static inline void cr4_set_bits_irqsoff(unsigned long mask)
31 {
32 cr4_update_irqsoff(mask, 0);
33 }
34
35 /* Clear in this cpu's CR4. */
cr4_clear_bits_irqsoff(unsigned long mask)36 static inline void cr4_clear_bits_irqsoff(unsigned long mask)
37 {
38 cr4_update_irqsoff(0, mask);
39 }
40
41 /* Set in this cpu's CR4. */
cr4_set_bits(unsigned long mask)42 static inline void cr4_set_bits(unsigned long mask)
43 {
44 unsigned long flags;
45
46 local_irq_save(flags);
47 cr4_set_bits_irqsoff(mask);
48 local_irq_restore(flags);
49 }
50
51 /* Clear in this cpu's CR4. */
cr4_clear_bits(unsigned long mask)52 static inline void cr4_clear_bits(unsigned long mask)
53 {
54 unsigned long flags;
55
56 local_irq_save(flags);
57 cr4_clear_bits_irqsoff(mask);
58 local_irq_restore(flags);
59 }
60
61 #ifndef MODULE
62 /*
63 * 6 because 6 should be plenty and struct tlb_state will fit in two cache
64 * lines.
65 */
66 #define TLB_NR_DYN_ASIDS 6
67
68 struct tlb_context {
69 u64 ctx_id;
70 u64 tlb_gen;
71 };
72
73 struct tlb_state {
74 /*
75 * cpu_tlbstate.loaded_mm should match CR3 whenever interrupts
76 * are on. This means that it may not match current->active_mm,
77 * which will contain the previous user mm when we're in lazy TLB
78 * mode even if we've already switched back to swapper_pg_dir.
79 *
80 * During switch_mm_irqs_off(), loaded_mm will be set to
81 * LOADED_MM_SWITCHING during the brief interrupts-off window
82 * when CR3 and loaded_mm would otherwise be inconsistent. This
83 * is for nmi_uaccess_okay()'s benefit.
84 */
85 struct mm_struct *loaded_mm;
86
87 #define LOADED_MM_SWITCHING ((struct mm_struct *)1UL)
88
89 /* Last user mm for optimizing IBPB */
90 union {
91 struct mm_struct *last_user_mm;
92 unsigned long last_user_mm_spec;
93 };
94
95 u16 loaded_mm_asid;
96 u16 next_asid;
97
98 /*
99 * If set we changed the page tables in such a way that we
100 * needed an invalidation of all contexts (aka. PCIDs / ASIDs).
101 * This tells us to go invalidate all the non-loaded ctxs[]
102 * on the next context switch.
103 *
104 * The current ctx was kept up-to-date as it ran and does not
105 * need to be invalidated.
106 */
107 bool invalidate_other;
108
109 #ifdef CONFIG_ADDRESS_MASKING
110 /*
111 * Active LAM mode.
112 *
113 * X86_CR3_LAM_U57/U48 shifted right by X86_CR3_LAM_U57_BIT or 0 if LAM
114 * disabled.
115 */
116 u8 lam;
117 #endif
118
119 /*
120 * Mask that contains TLB_NR_DYN_ASIDS+1 bits to indicate
121 * the corresponding user PCID needs a flush next time we
122 * switch to it; see SWITCH_TO_USER_CR3.
123 */
124 unsigned short user_pcid_flush_mask;
125
126 /*
127 * Access to this CR4 shadow and to H/W CR4 is protected by
128 * disabling interrupts when modifying either one.
129 */
130 unsigned long cr4;
131
132 /*
133 * This is a list of all contexts that might exist in the TLB.
134 * There is one per ASID that we use, and the ASID (what the
135 * CPU calls PCID) is the index into ctxts.
136 *
137 * For each context, ctx_id indicates which mm the TLB's user
138 * entries came from. As an invariant, the TLB will never
139 * contain entries that are out-of-date as when that mm reached
140 * the tlb_gen in the list.
141 *
142 * To be clear, this means that it's legal for the TLB code to
143 * flush the TLB without updating tlb_gen. This can happen
144 * (for now, at least) due to paravirt remote flushes.
145 *
146 * NB: context 0 is a bit special, since it's also used by
147 * various bits of init code. This is fine -- code that
148 * isn't aware of PCID will end up harmlessly flushing
149 * context 0.
150 */
151 struct tlb_context ctxs[TLB_NR_DYN_ASIDS];
152 };
153 DECLARE_PER_CPU_ALIGNED(struct tlb_state, cpu_tlbstate);
154
155 struct tlb_state_shared {
156 /*
157 * We can be in one of several states:
158 *
159 * - Actively using an mm. Our CPU's bit will be set in
160 * mm_cpumask(loaded_mm) and is_lazy == false;
161 *
162 * - Not using a real mm. loaded_mm == &init_mm. Our CPU's bit
163 * will not be set in mm_cpumask(&init_mm) and is_lazy == false.
164 *
165 * - Lazily using a real mm. loaded_mm != &init_mm, our bit
166 * is set in mm_cpumask(loaded_mm), but is_lazy == true.
167 * We're heuristically guessing that the CR3 load we
168 * skipped more than makes up for the overhead added by
169 * lazy mode.
170 */
171 bool is_lazy;
172 };
173 DECLARE_PER_CPU_SHARED_ALIGNED(struct tlb_state_shared, cpu_tlbstate_shared);
174
175 /*
176 * Please ignore the name of this function. It should be called
177 * switch_to_kernel_thread().
178 *
179 * enter_lazy_tlb() is a hint from the scheduler that we are entering a
180 * kernel thread or other context without an mm. Acceptable implementations
181 * include doing nothing whatsoever, switching to init_mm, or various clever
182 * lazy tricks to try to minimize TLB flushes.
183 *
184 * The scheduler reserves the right to call enter_lazy_tlb() several times
185 * in a row. It will notify us that we're going back to a real mm by
186 * calling switch_mm_irqs_off().
187 */
188 #define enter_lazy_tlb enter_lazy_tlb
enter_lazy_tlb(struct mm_struct * mm,struct task_struct * tsk)189 static __always_inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
190 {
191 if (this_cpu_read(cpu_tlbstate.loaded_mm) == &init_mm)
192 return;
193
194 this_cpu_write(cpu_tlbstate_shared.is_lazy, true);
195 }
196
197 bool nmi_uaccess_okay(void);
198 #define nmi_uaccess_okay nmi_uaccess_okay
199
200 /* Initialize cr4 shadow for this CPU. */
cr4_init_shadow(void)201 static inline void cr4_init_shadow(void)
202 {
203 this_cpu_write(cpu_tlbstate.cr4, __read_cr4());
204 }
205
206 extern unsigned long mmu_cr4_features;
207 extern u32 *trampoline_cr4_features;
208
209 /* How many pages can be invalidated with one INVLPGB. */
210 extern u16 invlpgb_count_max;
211
212 extern void initialize_tlbstate_and_flush(void);
213
214 /*
215 * TLB flushing:
216 *
217 * - flush_tlb_all() flushes all processes TLBs
218 * - flush_tlb_mm(mm) flushes the specified mm context TLB's
219 * - flush_tlb_page(vma, vmaddr) flushes one page
220 * - flush_tlb_range(vma, start, end) flushes a range of pages
221 * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
222 * - flush_tlb_multi(cpumask, info) flushes TLBs on multiple cpus
223 *
224 * ..but the i386 has somewhat limited tlb flushing capabilities,
225 * and page-granular flushes are available only on i486 and up.
226 */
227 struct flush_tlb_info {
228 /*
229 * We support several kinds of flushes.
230 *
231 * - Fully flush a single mm. .mm will be set, .end will be
232 * TLB_FLUSH_ALL, and .new_tlb_gen will be the tlb_gen to
233 * which the IPI sender is trying to catch us up.
234 *
235 * - Partially flush a single mm. .mm will be set, .start and
236 * .end will indicate the range, and .new_tlb_gen will be set
237 * such that the changes between generation .new_tlb_gen-1 and
238 * .new_tlb_gen are entirely contained in the indicated range.
239 *
240 * - Fully flush all mms whose tlb_gens have been updated. .mm
241 * will be NULL, .end will be TLB_FLUSH_ALL, and .new_tlb_gen
242 * will be zero.
243 */
244 struct mm_struct *mm;
245 unsigned long start;
246 unsigned long end;
247 u64 new_tlb_gen;
248 unsigned int initiating_cpu;
249 u8 stride_shift;
250 u8 freed_tables;
251 u8 trim_cpumask;
252 };
253
254 void flush_tlb_local(void);
255 void flush_tlb_one_user(unsigned long addr);
256 void flush_tlb_one_kernel(unsigned long addr);
257 void flush_tlb_multi(const struct cpumask *cpumask,
258 const struct flush_tlb_info *info);
259
is_dyn_asid(u16 asid)260 static inline bool is_dyn_asid(u16 asid)
261 {
262 return asid < TLB_NR_DYN_ASIDS;
263 }
264
is_global_asid(u16 asid)265 static inline bool is_global_asid(u16 asid)
266 {
267 return !is_dyn_asid(asid);
268 }
269
270 #ifdef CONFIG_BROADCAST_TLB_FLUSH
mm_global_asid(struct mm_struct * mm)271 static inline u16 mm_global_asid(struct mm_struct *mm)
272 {
273 u16 asid;
274
275 if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
276 return 0;
277
278 asid = smp_load_acquire(&mm->context.global_asid);
279
280 /* mm->context.global_asid is either 0, or a global ASID */
281 VM_WARN_ON_ONCE(asid && is_dyn_asid(asid));
282
283 return asid;
284 }
285
mm_init_global_asid(struct mm_struct * mm)286 static inline void mm_init_global_asid(struct mm_struct *mm)
287 {
288 if (cpu_feature_enabled(X86_FEATURE_INVLPGB)) {
289 mm->context.global_asid = 0;
290 mm->context.asid_transition = false;
291 }
292 }
293
mm_assign_global_asid(struct mm_struct * mm,u16 asid)294 static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid)
295 {
296 /*
297 * Notably flush_tlb_mm_range() -> broadcast_tlb_flush() ->
298 * finish_asid_transition() needs to observe asid_transition = true
299 * once it observes global_asid.
300 */
301 mm->context.asid_transition = true;
302 smp_store_release(&mm->context.global_asid, asid);
303 }
304
mm_clear_asid_transition(struct mm_struct * mm)305 static inline void mm_clear_asid_transition(struct mm_struct *mm)
306 {
307 WRITE_ONCE(mm->context.asid_transition, false);
308 }
309
mm_in_asid_transition(struct mm_struct * mm)310 static inline bool mm_in_asid_transition(struct mm_struct *mm)
311 {
312 if (!cpu_feature_enabled(X86_FEATURE_INVLPGB))
313 return false;
314
315 return mm && READ_ONCE(mm->context.asid_transition);
316 }
317
318 extern void mm_free_global_asid(struct mm_struct *mm);
319 #else
mm_global_asid(struct mm_struct * mm)320 static inline u16 mm_global_asid(struct mm_struct *mm) { return 0; }
mm_init_global_asid(struct mm_struct * mm)321 static inline void mm_init_global_asid(struct mm_struct *mm) { }
mm_free_global_asid(struct mm_struct * mm)322 static inline void mm_free_global_asid(struct mm_struct *mm) { }
mm_assign_global_asid(struct mm_struct * mm,u16 asid)323 static inline void mm_assign_global_asid(struct mm_struct *mm, u16 asid) { }
mm_clear_asid_transition(struct mm_struct * mm)324 static inline void mm_clear_asid_transition(struct mm_struct *mm) { }
mm_in_asid_transition(struct mm_struct * mm)325 static inline bool mm_in_asid_transition(struct mm_struct *mm) { return false; }
326 #endif /* CONFIG_BROADCAST_TLB_FLUSH */
327
328 #define flush_tlb_mm(mm) \
329 flush_tlb_mm_range(mm, 0UL, TLB_FLUSH_ALL, 0UL, true)
330
331 #define flush_tlb_range(vma, start, end) \
332 flush_tlb_mm_range((vma)->vm_mm, start, end, \
333 ((vma)->vm_flags & VM_HUGETLB) \
334 ? huge_page_shift(hstate_vma(vma)) \
335 : PAGE_SHIFT, true)
336
337 extern void flush_tlb_all(void);
338 extern void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start,
339 unsigned long end, unsigned int stride_shift,
340 bool freed_tables);
341 extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
342
flush_tlb_page(struct vm_area_struct * vma,unsigned long a)343 static inline void flush_tlb_page(struct vm_area_struct *vma, unsigned long a)
344 {
345 flush_tlb_mm_range(vma->vm_mm, a, a + PAGE_SIZE, PAGE_SHIFT, false);
346 }
347
arch_tlbbatch_should_defer(struct mm_struct * mm)348 static inline bool arch_tlbbatch_should_defer(struct mm_struct *mm)
349 {
350 bool should_defer = false;
351
352 /* If remote CPUs need to be flushed then defer batch the flush */
353 if (cpumask_any_but(mm_cpumask(mm), get_cpu()) < nr_cpu_ids)
354 should_defer = true;
355 put_cpu();
356
357 return should_defer;
358 }
359
inc_mm_tlb_gen(struct mm_struct * mm)360 static inline u64 inc_mm_tlb_gen(struct mm_struct *mm)
361 {
362 /*
363 * Bump the generation count. This also serves as a full barrier
364 * that synchronizes with switch_mm(): callers are required to order
365 * their read of mm_cpumask after their writes to the paging
366 * structures.
367 */
368 return atomic64_inc_return(&mm->context.tlb_gen);
369 }
370
arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch * batch,struct mm_struct * mm,unsigned long start,unsigned long end)371 static inline void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch,
372 struct mm_struct *mm, unsigned long start, unsigned long end)
373 {
374 inc_mm_tlb_gen(mm);
375 cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm));
376 batch->unmapped_pages = true;
377 mmu_notifier_arch_invalidate_secondary_tlbs(mm, 0, -1UL);
378 }
379
380 extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
381
pte_flags_need_flush(unsigned long oldflags,unsigned long newflags,bool ignore_access)382 static inline bool pte_flags_need_flush(unsigned long oldflags,
383 unsigned long newflags,
384 bool ignore_access)
385 {
386 /*
387 * Flags that require a flush when cleared but not when they are set.
388 * Only include flags that would not trigger spurious page-faults.
389 * Non-present entries are not cached. Hardware would set the
390 * dirty/access bit if needed without a fault.
391 */
392 const pteval_t flush_on_clear = _PAGE_DIRTY | _PAGE_PRESENT |
393 _PAGE_ACCESSED;
394 const pteval_t software_flags = _PAGE_SOFTW1 | _PAGE_SOFTW2 |
395 _PAGE_SOFTW3 | _PAGE_SOFTW4 |
396 _PAGE_SAVED_DIRTY;
397 const pteval_t flush_on_change = _PAGE_RW | _PAGE_USER | _PAGE_PWT |
398 _PAGE_PCD | _PAGE_PSE | _PAGE_GLOBAL | _PAGE_PAT |
399 _PAGE_PAT_LARGE | _PAGE_PKEY_BIT0 | _PAGE_PKEY_BIT1 |
400 _PAGE_PKEY_BIT2 | _PAGE_PKEY_BIT3 | _PAGE_NX;
401 unsigned long diff = oldflags ^ newflags;
402
403 BUILD_BUG_ON(flush_on_clear & software_flags);
404 BUILD_BUG_ON(flush_on_clear & flush_on_change);
405 BUILD_BUG_ON(flush_on_change & software_flags);
406
407 /* Ignore software flags */
408 diff &= ~software_flags;
409
410 if (ignore_access)
411 diff &= ~_PAGE_ACCESSED;
412
413 /*
414 * Did any of the 'flush_on_clear' flags was clleared set from between
415 * 'oldflags' and 'newflags'?
416 */
417 if (diff & oldflags & flush_on_clear)
418 return true;
419
420 /* Flush on modified flags. */
421 if (diff & flush_on_change)
422 return true;
423
424 /* Ensure there are no flags that were left behind */
425 if (IS_ENABLED(CONFIG_DEBUG_VM) &&
426 (diff & ~(flush_on_clear | software_flags | flush_on_change))) {
427 VM_WARN_ON_ONCE(1);
428 return true;
429 }
430
431 return false;
432 }
433
434 /*
435 * pte_needs_flush() checks whether permissions were demoted and require a
436 * flush. It should only be used for userspace PTEs.
437 */
pte_needs_flush(pte_t oldpte,pte_t newpte)438 static inline bool pte_needs_flush(pte_t oldpte, pte_t newpte)
439 {
440 /* !PRESENT -> * ; no need for flush */
441 if (!(pte_flags(oldpte) & _PAGE_PRESENT))
442 return false;
443
444 /* PFN changed ; needs flush */
445 if (pte_pfn(oldpte) != pte_pfn(newpte))
446 return true;
447
448 /*
449 * check PTE flags; ignore access-bit; see comment in
450 * ptep_clear_flush_young().
451 */
452 return pte_flags_need_flush(pte_flags(oldpte), pte_flags(newpte),
453 true);
454 }
455 #define pte_needs_flush pte_needs_flush
456
457 /*
458 * huge_pmd_needs_flush() checks whether permissions were demoted and require a
459 * flush. It should only be used for userspace huge PMDs.
460 */
huge_pmd_needs_flush(pmd_t oldpmd,pmd_t newpmd)461 static inline bool huge_pmd_needs_flush(pmd_t oldpmd, pmd_t newpmd)
462 {
463 /* !PRESENT -> * ; no need for flush */
464 if (!(pmd_flags(oldpmd) & _PAGE_PRESENT))
465 return false;
466
467 /* PFN changed ; needs flush */
468 if (pmd_pfn(oldpmd) != pmd_pfn(newpmd))
469 return true;
470
471 /*
472 * check PMD flags; do not ignore access-bit; see
473 * pmdp_clear_flush_young().
474 */
475 return pte_flags_need_flush(pmd_flags(oldpmd), pmd_flags(newpmd),
476 false);
477 }
478 #define huge_pmd_needs_flush huge_pmd_needs_flush
479
480 #ifdef CONFIG_ADDRESS_MASKING
tlbstate_lam_cr3_mask(void)481 static inline u64 tlbstate_lam_cr3_mask(void)
482 {
483 u64 lam = this_cpu_read(cpu_tlbstate.lam);
484
485 return lam << X86_CR3_LAM_U57_BIT;
486 }
487
cpu_tlbstate_update_lam(unsigned long lam,u64 untag_mask)488 static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask)
489 {
490 this_cpu_write(cpu_tlbstate.lam, lam >> X86_CR3_LAM_U57_BIT);
491 this_cpu_write(tlbstate_untag_mask, untag_mask);
492 }
493
494 #else
495
tlbstate_lam_cr3_mask(void)496 static inline u64 tlbstate_lam_cr3_mask(void)
497 {
498 return 0;
499 }
500
cpu_tlbstate_update_lam(unsigned long lam,u64 untag_mask)501 static inline void cpu_tlbstate_update_lam(unsigned long lam, u64 untag_mask)
502 {
503 }
504 #endif
505 #else /* !MODULE */
506 #define enter_lazy_tlb enter_lazy_tlb
507 extern void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
508 __compiletime_error("enter_lazy_tlb() should not be used in modules");
509 #endif /* !MODULE */
510
__native_tlb_flush_global(unsigned long cr4)511 static inline void __native_tlb_flush_global(unsigned long cr4)
512 {
513 native_write_cr4(cr4 ^ X86_CR4_PGE);
514 native_write_cr4(cr4);
515 }
516 #endif /* _ASM_X86_TLBFLUSH_H */
517