1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017 SiFive
4 */
5
6 #include <asm/cacheflush.h>
7
8 #ifdef CONFIG_SMP
9
10 #include <asm/sbi.h>
11
ipi_remote_fence_i(void * info)12 static void ipi_remote_fence_i(void *info)
13 {
14 return local_flush_icache_all();
15 }
16
flush_icache_all(void)17 void flush_icache_all(void)
18 {
19 if (IS_ENABLED(CONFIG_RISCV_SBI))
20 sbi_remote_fence_i(NULL);
21 else
22 on_each_cpu(ipi_remote_fence_i, NULL, 1);
23 }
24 EXPORT_SYMBOL(flush_icache_all);
25
26 /*
27 * Performs an icache flush for the given MM context. RISC-V has no direct
28 * mechanism for instruction cache shoot downs, so instead we send an IPI that
29 * informs the remote harts they need to flush their local instruction caches.
30 * To avoid pathologically slow behavior in a common case (a bunch of
31 * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
32 * IPIs for harts that are not currently executing a MM context and instead
33 * schedule a deferred local instruction cache flush to be performed before
34 * execution resumes on each hart.
35 */
flush_icache_mm(struct mm_struct * mm,bool local)36 void flush_icache_mm(struct mm_struct *mm, bool local)
37 {
38 unsigned int cpu;
39 cpumask_t others, *mask;
40
41 preempt_disable();
42
43 /* Mark every hart's icache as needing a flush for this MM. */
44 mask = &mm->context.icache_stale_mask;
45 cpumask_setall(mask);
46 /* Flush this hart's I$ now, and mark it as flushed. */
47 cpu = smp_processor_id();
48 cpumask_clear_cpu(cpu, mask);
49 local_flush_icache_all();
50
51 /*
52 * Flush the I$ of other harts concurrently executing, and mark them as
53 * flushed.
54 */
55 cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
56 local |= cpumask_empty(&others);
57 if (mm == current->active_mm && local) {
58 /*
59 * It's assumed that at least one strongly ordered operation is
60 * performed on this hart between setting a hart's cpumask bit
61 * and scheduling this MM context on that hart. Sending an SBI
62 * remote message will do this, but in the case where no
63 * messages are sent we still need to order this hart's writes
64 * with flush_icache_deferred().
65 */
66 smp_mb();
67 } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
68 cpumask_t hartid_mask;
69
70 riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
71 sbi_remote_fence_i(cpumask_bits(&hartid_mask));
72 } else {
73 on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
74 }
75
76 preempt_enable();
77 }
78
79 #endif /* CONFIG_SMP */
80
81 #ifdef CONFIG_MMU
flush_icache_pte(pte_t pte)82 void flush_icache_pte(pte_t pte)
83 {
84 struct page *page = pte_page(pte);
85
86 if (!test_and_set_bit(PG_dcache_clean, &page->flags))
87 flush_icache_all();
88 }
89 #endif /* CONFIG_MMU */
90