1 /*
2 * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi>
3 * Copyright (C) 2017 Stafford Horne <shorne@gmail.com>
4 *
5 * Based on arm64 and arc implementations
6 * Copyright (C) 2013 ARM Ltd.
7 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
8 *
9 * This file is licensed under the terms of the GNU General Public License
10 * version 2. This program is licensed "as is" without any warranty of any
11 * kind, whether express or implied.
12 */
13
14 #include <linux/smp.h>
15 #include <linux/cpu.h>
16 #include <linux/sched.h>
17 #include <linux/sched/mm.h>
18 #include <linux/irq.h>
19 #include <asm/cpuinfo.h>
20 #include <asm/mmu_context.h>
21 #include <asm/tlbflush.h>
22 #include <asm/cacheflush.h>
23 #include <asm/time.h>
24
25 static void (*smp_cross_call)(const struct cpumask *, unsigned int);
26
27 unsigned long secondary_release = -1;
28 struct thread_info *secondary_thread_info;
29
30 enum ipi_msg_type {
31 IPI_WAKEUP,
32 IPI_RESCHEDULE,
33 IPI_CALL_FUNC,
34 IPI_CALL_FUNC_SINGLE,
35 };
36
37 static DEFINE_SPINLOCK(boot_lock);
38
boot_secondary(unsigned int cpu,struct task_struct * idle)39 static void boot_secondary(unsigned int cpu, struct task_struct *idle)
40 {
41 /*
42 * set synchronisation state between this boot processor
43 * and the secondary one
44 */
45 spin_lock(&boot_lock);
46
47 secondary_release = cpu;
48 smp_cross_call(cpumask_of(cpu), IPI_WAKEUP);
49
50 /*
51 * now the secondary core is starting up let it run its
52 * calibrations, then wait for it to finish
53 */
54 spin_unlock(&boot_lock);
55 }
56
smp_prepare_boot_cpu(void)57 void __init smp_prepare_boot_cpu(void)
58 {
59 }
60
smp_init_cpus(void)61 void __init smp_init_cpus(void)
62 {
63 int i;
64
65 for (i = 0; i < NR_CPUS; i++)
66 set_cpu_possible(i, true);
67 }
68
smp_prepare_cpus(unsigned int max_cpus)69 void __init smp_prepare_cpus(unsigned int max_cpus)
70 {
71 int i;
72
73 /*
74 * Initialise the present map, which describes the set of CPUs
75 * actually populated at the present time.
76 */
77 for (i = 0; i < max_cpus; i++)
78 set_cpu_present(i, true);
79 }
80
smp_cpus_done(unsigned int max_cpus)81 void __init smp_cpus_done(unsigned int max_cpus)
82 {
83 }
84
85 static DECLARE_COMPLETION(cpu_running);
86
__cpu_up(unsigned int cpu,struct task_struct * idle)87 int __cpu_up(unsigned int cpu, struct task_struct *idle)
88 {
89 if (smp_cross_call == NULL) {
90 pr_warn("CPU%u: failed to start, IPI controller missing",
91 cpu);
92 return -EIO;
93 }
94
95 secondary_thread_info = task_thread_info(idle);
96 current_pgd[cpu] = init_mm.pgd;
97
98 boot_secondary(cpu, idle);
99 if (!wait_for_completion_timeout(&cpu_running,
100 msecs_to_jiffies(1000))) {
101 pr_crit("CPU%u: failed to start\n", cpu);
102 return -EIO;
103 }
104 synchronise_count_master(cpu);
105
106 return 0;
107 }
108
secondary_start_kernel(void)109 asmlinkage __init void secondary_start_kernel(void)
110 {
111 struct mm_struct *mm = &init_mm;
112 unsigned int cpu = smp_processor_id();
113 /*
114 * All kernel threads share the same mm context; grab a
115 * reference and switch to it.
116 */
117 mmgrab(mm);
118 current->active_mm = mm;
119 cpumask_set_cpu(cpu, mm_cpumask(mm));
120
121 pr_info("CPU%u: Booted secondary processor\n", cpu);
122
123 setup_cpuinfo();
124 openrisc_clockevent_init();
125
126 notify_cpu_starting(cpu);
127
128 /*
129 * OK, now it's safe to let the boot CPU continue
130 */
131 complete(&cpu_running);
132
133 synchronise_count_slave(cpu);
134 set_cpu_online(cpu, true);
135
136 local_irq_enable();
137
138 preempt_disable();
139 /*
140 * OK, it's off to the idle thread for us
141 */
142 cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
143 }
144
handle_IPI(unsigned int ipi_msg)145 void handle_IPI(unsigned int ipi_msg)
146 {
147 unsigned int cpu = smp_processor_id();
148
149 switch (ipi_msg) {
150 case IPI_WAKEUP:
151 break;
152
153 case IPI_RESCHEDULE:
154 scheduler_ipi();
155 break;
156
157 case IPI_CALL_FUNC:
158 generic_smp_call_function_interrupt();
159 break;
160
161 case IPI_CALL_FUNC_SINGLE:
162 generic_smp_call_function_single_interrupt();
163 break;
164
165 default:
166 WARN(1, "CPU%u: Unknown IPI message 0x%x\n", cpu, ipi_msg);
167 break;
168 }
169 }
170
smp_send_reschedule(int cpu)171 void smp_send_reschedule(int cpu)
172 {
173 smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE);
174 }
175
stop_this_cpu(void * dummy)176 static void stop_this_cpu(void *dummy)
177 {
178 /* Remove this CPU */
179 set_cpu_online(smp_processor_id(), false);
180
181 local_irq_disable();
182 /* CPU Doze */
183 if (mfspr(SPR_UPR) & SPR_UPR_PMP)
184 mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME);
185 /* If that didn't work, infinite loop */
186 while (1)
187 ;
188 }
189
smp_send_stop(void)190 void smp_send_stop(void)
191 {
192 smp_call_function(stop_this_cpu, NULL, 0);
193 }
194
195 /* not supported, yet */
setup_profiling_timer(unsigned int multiplier)196 int setup_profiling_timer(unsigned int multiplier)
197 {
198 return -EINVAL;
199 }
200
set_smp_cross_call(void (* fn)(const struct cpumask *,unsigned int))201 void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int))
202 {
203 smp_cross_call = fn;
204 }
205
arch_send_call_function_single_ipi(int cpu)206 void arch_send_call_function_single_ipi(int cpu)
207 {
208 smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
209 }
210
arch_send_call_function_ipi_mask(const struct cpumask * mask)211 void arch_send_call_function_ipi_mask(const struct cpumask *mask)
212 {
213 smp_cross_call(mask, IPI_CALL_FUNC);
214 }
215
216 /* TLB flush operations - Performed on each CPU*/
ipi_flush_tlb_all(void * ignored)217 static inline void ipi_flush_tlb_all(void *ignored)
218 {
219 local_flush_tlb_all();
220 }
221
ipi_flush_tlb_mm(void * info)222 static inline void ipi_flush_tlb_mm(void *info)
223 {
224 struct mm_struct *mm = (struct mm_struct *)info;
225
226 local_flush_tlb_mm(mm);
227 }
228
smp_flush_tlb_mm(struct cpumask * cmask,struct mm_struct * mm)229 static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm)
230 {
231 unsigned int cpuid;
232
233 if (cpumask_empty(cmask))
234 return;
235
236 cpuid = get_cpu();
237
238 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
239 /* local cpu is the only cpu present in cpumask */
240 local_flush_tlb_mm(mm);
241 } else {
242 on_each_cpu_mask(cmask, ipi_flush_tlb_mm, mm, 1);
243 }
244 put_cpu();
245 }
246
247 struct flush_tlb_data {
248 unsigned long addr1;
249 unsigned long addr2;
250 };
251
ipi_flush_tlb_page(void * info)252 static inline void ipi_flush_tlb_page(void *info)
253 {
254 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
255
256 local_flush_tlb_page(NULL, fd->addr1);
257 }
258
ipi_flush_tlb_range(void * info)259 static inline void ipi_flush_tlb_range(void *info)
260 {
261 struct flush_tlb_data *fd = (struct flush_tlb_data *)info;
262
263 local_flush_tlb_range(NULL, fd->addr1, fd->addr2);
264 }
265
smp_flush_tlb_range(struct cpumask * cmask,unsigned long start,unsigned long end)266 static void smp_flush_tlb_range(struct cpumask *cmask, unsigned long start,
267 unsigned long end)
268 {
269 unsigned int cpuid;
270
271 if (cpumask_empty(cmask))
272 return;
273
274 cpuid = get_cpu();
275
276 if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
277 /* local cpu is the only cpu present in cpumask */
278 if ((end - start) <= PAGE_SIZE)
279 local_flush_tlb_page(NULL, start);
280 else
281 local_flush_tlb_range(NULL, start, end);
282 } else {
283 struct flush_tlb_data fd;
284
285 fd.addr1 = start;
286 fd.addr2 = end;
287
288 if ((end - start) <= PAGE_SIZE)
289 on_each_cpu_mask(cmask, ipi_flush_tlb_page, &fd, 1);
290 else
291 on_each_cpu_mask(cmask, ipi_flush_tlb_range, &fd, 1);
292 }
293 put_cpu();
294 }
295
flush_tlb_all(void)296 void flush_tlb_all(void)
297 {
298 on_each_cpu(ipi_flush_tlb_all, NULL, 1);
299 }
300
flush_tlb_mm(struct mm_struct * mm)301 void flush_tlb_mm(struct mm_struct *mm)
302 {
303 smp_flush_tlb_mm(mm_cpumask(mm), mm);
304 }
305
flush_tlb_page(struct vm_area_struct * vma,unsigned long uaddr)306 void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr)
307 {
308 smp_flush_tlb_range(mm_cpumask(vma->vm_mm), uaddr, uaddr + PAGE_SIZE);
309 }
310
flush_tlb_range(struct vm_area_struct * vma,unsigned long start,unsigned long end)311 void flush_tlb_range(struct vm_area_struct *vma,
312 unsigned long start, unsigned long end)
313 {
314 smp_flush_tlb_range(mm_cpumask(vma->vm_mm), start, end);
315 }
316
317 /* Instruction cache invalidate - performed on each cpu */
ipi_icache_page_inv(void * arg)318 static void ipi_icache_page_inv(void *arg)
319 {
320 struct page *page = arg;
321
322 local_icache_page_inv(page);
323 }
324
smp_icache_page_inv(struct page * page)325 void smp_icache_page_inv(struct page *page)
326 {
327 on_each_cpu(ipi_icache_page_inv, page, 1);
328 }
329 EXPORT_SYMBOL(smp_icache_page_inv);
330