1 /* SMP TLB support routines.
2  *
3  * Copyright (C) 2006-2008 Panasonic Corporation
4  * All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * version 2 as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful,
11  * but WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13  * GNU General Public License for more details.
14  */
15 #include <linux/interrupt.h>
16 #include <linux/spinlock.h>
17 #include <linux/init.h>
18 #include <linux/jiffies.h>
19 #include <linux/cpumask.h>
20 #include <linux/err.h>
21 #include <linux/kernel.h>
22 #include <linux/delay.h>
23 #include <linux/sched.h>
24 #include <linux/profile.h>
25 #include <linux/smp.h>
26 #include <asm/tlbflush.h>
27 #include <asm/system.h>
28 #include <asm/bitops.h>
29 #include <asm/processor.h>
30 #include <asm/bug.h>
31 #include <asm/exceptions.h>
32 #include <asm/hardirq.h>
33 #include <asm/fpu.h>
34 #include <asm/mmu_context.h>
35 #include <asm/thread_info.h>
36 #include <asm/cpu-regs.h>
37 #include <asm/intctl-regs.h>
38 
39 /*
40  * For flush TLB
41  */
42 #define FLUSH_ALL	0xffffffff
43 
44 static cpumask_t flush_cpumask;
45 static struct mm_struct *flush_mm;
46 static unsigned long flush_va;
47 static DEFINE_SPINLOCK(tlbstate_lock);
48 
49 DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) = {
50 	&init_mm, 0
51 };
52 
53 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
54 			     unsigned long va);
55 static void do_flush_tlb_all(void *info);
56 
57 /**
58  * smp_flush_tlb - Callback to invalidate the TLB.
59  * @unused: Callback context (ignored).
60  */
smp_flush_tlb(void * unused)61 void smp_flush_tlb(void *unused)
62 {
63 	unsigned long cpu_id;
64 
65 	cpu_id = get_cpu();
66 
67 	if (!cpumask_test_cpu(cpu_id, &flush_cpumask))
68 		/* This was a BUG() but until someone can quote me the line
69 		 * from the intel manual that guarantees an IPI to multiple
70 		 * CPUs is retried _only_ on the erroring CPUs its staying as a
71 		 * return
72 		 *
73 		 * BUG();
74 		 */
75 		goto out;
76 
77 	if (flush_va == FLUSH_ALL)
78 		local_flush_tlb();
79 	else
80 		local_flush_tlb_page(flush_mm, flush_va);
81 
82 	smp_mb__before_clear_bit();
83 	cpumask_clear_cpu(cpu_id, &flush_cpumask);
84 	smp_mb__after_clear_bit();
85 out:
86 	put_cpu();
87 }
88 
89 /**
90  * flush_tlb_others - Tell the specified CPUs to invalidate their TLBs
91  * @cpumask: The list of CPUs to target.
92  * @mm: The VM context to flush from (if va!=FLUSH_ALL).
93  * @va: Virtual address to flush or FLUSH_ALL to flush everything.
94  */
flush_tlb_others(cpumask_t cpumask,struct mm_struct * mm,unsigned long va)95 static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
96 			     unsigned long va)
97 {
98 	cpumask_t tmp;
99 
100 	/* A couple of sanity checks (to be removed):
101 	 * - mask must not be empty
102 	 * - current CPU must not be in mask
103 	 * - we do not send IPIs to as-yet unbooted CPUs.
104 	 */
105 	BUG_ON(!mm);
106 	BUG_ON(cpumask_empty(&cpumask));
107 	BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask));
108 
109 	cpumask_and(&tmp, &cpumask, cpu_online_mask);
110 	BUG_ON(!cpumask_equal(&cpumask, &tmp));
111 
112 	/* I'm not happy about this global shared spinlock in the MM hot path,
113 	 * but we'll see how contended it is.
114 	 *
115 	 * Temporarily this turns IRQs off, so that lockups are detected by the
116 	 * NMI watchdog.
117 	 */
118 	spin_lock(&tlbstate_lock);
119 
120 	flush_mm = mm;
121 	flush_va = va;
122 #if NR_CPUS <= BITS_PER_LONG
123 	atomic_set_mask(cpumask.bits[0], &flush_cpumask.bits[0]);
124 #else
125 #error Not supported.
126 #endif
127 
128 	/* FIXME: if NR_CPUS>=3, change send_IPI_mask */
129 	smp_call_function(smp_flush_tlb, NULL, 1);
130 
131 	while (!cpumask_empty(&flush_cpumask))
132 		/* Lockup detection does not belong here */
133 		smp_mb();
134 
135 	flush_mm = NULL;
136 	flush_va = 0;
137 	spin_unlock(&tlbstate_lock);
138 }
139 
140 /**
141  * flush_tlb_mm - Invalidate TLB of specified VM context
142  * @mm: The VM context to invalidate.
143  */
flush_tlb_mm(struct mm_struct * mm)144 void flush_tlb_mm(struct mm_struct *mm)
145 {
146 	cpumask_t cpu_mask;
147 
148 	preempt_disable();
149 	cpumask_copy(&cpu_mask, mm_cpumask(mm));
150 	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
151 
152 	local_flush_tlb();
153 	if (!cpumask_empty(&cpu_mask))
154 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
155 
156 	preempt_enable();
157 }
158 
159 /**
160  * flush_tlb_current_task - Invalidate TLB of current task
161  */
flush_tlb_current_task(void)162 void flush_tlb_current_task(void)
163 {
164 	struct mm_struct *mm = current->mm;
165 	cpumask_t cpu_mask;
166 
167 	preempt_disable();
168 	cpumask_copy(&cpu_mask, mm_cpumask(mm));
169 	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
170 
171 	local_flush_tlb();
172 	if (!cpumask_empty(&cpu_mask))
173 		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
174 
175 	preempt_enable();
176 }
177 
178 /**
179  * flush_tlb_page - Invalidate TLB of page
180  * @vma: The VM context to invalidate the page for.
181  * @va: The virtual address of the page to invalidate.
182  */
flush_tlb_page(struct vm_area_struct * vma,unsigned long va)183 void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
184 {
185 	struct mm_struct *mm = vma->vm_mm;
186 	cpumask_t cpu_mask;
187 
188 	preempt_disable();
189 	cpumask_copy(&cpu_mask, mm_cpumask(mm));
190 	cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
191 
192 	local_flush_tlb_page(mm, va);
193 	if (!cpumask_empty(&cpu_mask))
194 		flush_tlb_others(cpu_mask, mm, va);
195 
196 	preempt_enable();
197 }
198 
199 /**
200  * do_flush_tlb_all - Callback to completely invalidate a TLB
201  * @unused: Callback context (ignored).
202  */
do_flush_tlb_all(void * unused)203 static void do_flush_tlb_all(void *unused)
204 {
205 	local_flush_tlb_all();
206 }
207 
208 /**
209  * flush_tlb_all - Completely invalidate TLBs on all CPUs
210  */
flush_tlb_all(void)211 void flush_tlb_all(void)
212 {
213 	on_each_cpu(do_flush_tlb_all, 0, 1);
214 }
215