1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Alpha TLB shootdown helpers
4 *
5 * Copyright (C) 2025 Magnus Lindholm <linmag7@gmail.com>
6 *
7 * Alpha-specific TLB flush helpers that cannot be expressed purely
8 * as inline functions.
9 *
10 * These helpers provide combined MM context handling (ASN rollover)
11 * and immediate TLB invalidation for page migration and memory
12 * compaction paths, where lazy shootdowns are insufficient.
13 */
14
15 #include <linux/mm.h>
16 #include <linux/smp.h>
17 #include <linux/sched.h>
18 #include <asm/tlbflush.h>
19 #include <asm/pal.h>
20 #include <asm/mmu_context.h>
21
22 #define asn_locked() (cpu_data[smp_processor_id()].asn_lock)
23
24 /*
25 * Migration/compaction helper: combine mm context (ASN) handling with an
26 * immediate per-page TLB invalidate and (for exec) an instruction barrier.
27 *
28 * This mirrors the SMP combined IPI handler semantics, but runs locally on UP.
29 */
30 #ifndef CONFIG_SMP
migrate_flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)31 void migrate_flush_tlb_page(struct vm_area_struct *vma,
32 unsigned long addr)
33 {
34 struct mm_struct *mm = vma->vm_mm;
35 int tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2;
36
37 /*
38 * First do the mm-context side:
39 * If we're currently running this mm, reload a fresh context ASN.
40 * Otherwise, mark context invalid.
41 *
42 * On UP, this is mostly about matching the SMP semantics and ensuring
43 * exec/i-cache tagging assumptions hold when compaction migrates pages.
44 */
45 if (mm == current->active_mm)
46 flush_tlb_current(mm);
47 else
48 flush_tlb_other(mm);
49
50 /*
51 * Then do the immediate translation kill for this VA.
52 * For exec mappings, order instruction fetch after invalidation.
53 */
54 tbi(tbi_type, addr);
55 }
56
57 #else
58 struct tlb_mm_and_addr {
59 struct mm_struct *mm;
60 unsigned long addr;
61 int tbi_type; /* 2 = DTB, 3 = ITB+DTB */
62 };
63
ipi_flush_mm_and_page(void * x)64 static void ipi_flush_mm_and_page(void *x)
65 {
66 struct tlb_mm_and_addr *d = x;
67
68 /* Part 1: mm context side (Alpha uses ASN/context as a key mechanism). */
69 if (d->mm == current->active_mm && !asn_locked())
70 __load_new_mm_context(d->mm);
71 else
72 flush_tlb_other(d->mm);
73
74 /* Part 2: immediate per-VA invalidation on this CPU. */
75 tbi(d->tbi_type, d->addr);
76 }
77
migrate_flush_tlb_page(struct vm_area_struct * vma,unsigned long addr)78 void migrate_flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
79 {
80 struct mm_struct *mm = vma->vm_mm;
81 struct tlb_mm_and_addr d = {
82 .mm = mm,
83 .addr = addr,
84 .tbi_type = (vma->vm_flags & VM_EXEC) ? 3 : 2,
85 };
86
87 /*
88 * One synchronous rendezvous: every CPU runs ipi_flush_mm_and_page().
89 * This is the "combined" version of flush_tlb_mm + per-page invalidate.
90 */
91 preempt_disable();
92 on_each_cpu(ipi_flush_mm_and_page, &d, 1);
93
94 /*
95 * mimic flush_tlb_mm()'s mm_users<=1 optimization.
96 */
97 if (atomic_read(&mm->mm_users) <= 1) {
98
99 int cpu, this_cpu;
100 this_cpu = smp_processor_id();
101
102 for (cpu = 0; cpu < NR_CPUS; cpu++) {
103 if (!cpu_online(cpu) || cpu == this_cpu)
104 continue;
105 if (READ_ONCE(mm->context[cpu]))
106 WRITE_ONCE(mm->context[cpu], 0);
107 }
108 }
109 preempt_enable();
110 }
111
112 #endif
113