xref: /kvm-unit-tests/lib/arm/smp.c (revision b36f35a82ff4cec5f71a68aa782332e2bc3488f7)
1 /*
2  * Secondary cpu support
3  *
4  * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <libcflat.h>
9 #include <auxinfo.h>
10 #include <asm/thread_info.h>
11 #include <asm/spinlock.h>
12 #include <asm/cpumask.h>
13 #include <asm/barrier.h>
14 #include <asm/mmu.h>
15 #include <asm/psci.h>
16 #include <asm/smp.h>
17 
18 bool cpu0_calls_idle;
19 
20 cpumask_t cpu_present_mask;
21 cpumask_t cpu_online_mask;
22 cpumask_t cpu_idle_mask;
23 
24 struct secondary_data secondary_data;
25 static struct spinlock lock;
26 
27 /* Needed to compile with -Wmissing-prototypes */
28 secondary_entry_fn secondary_cinit(void);
29 
30 secondary_entry_fn secondary_cinit(void)
31 {
32 	struct thread_info *ti = current_thread_info();
33 	secondary_entry_fn entry;
34 
35 	thread_info_init(ti, 0);
36 
37 	if (!(auxinfo.flags & AUXINFO_MMU_OFF)) {
38 		ti->pgtable = mmu_idmap;
39 		mmu_mark_enabled(ti->cpu);
40 	}
41 
42 	/*
43 	 * Save secondary_data.entry locally to avoid opening a race
44 	 * window between marking ourselves online and calling it.
45 	 */
46 	entry = secondary_data.entry;
47 	set_cpu_online(ti->cpu, true);
48 	sev();
49 
50 	/*
51 	 * Return to the assembly stub, allowing entry to be called
52 	 * from there with an empty stack.
53 	 */
54 	return entry;
55 }
56 
57 static void __smp_boot_secondary(int cpu, secondary_entry_fn entry)
58 {
59 	int ret;
60 
61 	secondary_data.stack = thread_stack_alloc();
62 	secondary_data.entry = entry;
63 	mmu_mark_disabled(cpu);
64 	ret = cpu_psci_cpu_boot(cpu);
65 	assert(ret == 0);
66 
67 	while (!cpu_online(cpu))
68 		wfe();
69 }
70 
71 void smp_boot_secondary(int cpu, secondary_entry_fn entry)
72 {
73 	spin_lock(&lock);
74 	assert_msg(!cpu_online(cpu), "CPU%d already boot once", cpu);
75 	__smp_boot_secondary(cpu, entry);
76 	spin_unlock(&lock);
77 }
78 
79 struct on_cpu_info {
80 	void (*func)(void *data);
81 	void *data;
82 	cpumask_t waiters;
83 };
84 static struct on_cpu_info on_cpu_info[NR_CPUS];
85 
86 static void __deadlock_check(int cpu, const cpumask_t *waiters, bool *found)
87 {
88 	int i;
89 
90 	for_each_cpu(i, waiters) {
91 		if (i == cpu) {
92 			printf("CPU%d", cpu);
93 			*found = true;
94 			return;
95 		}
96 		__deadlock_check(cpu, &on_cpu_info[i].waiters, found);
97 		if (*found) {
98 			printf(" <=> CPU%d", i);
99 			return;
100 		}
101 	}
102 }
103 
104 static void deadlock_check(int me, int cpu)
105 {
106 	bool found = false;
107 
108 	__deadlock_check(cpu, &on_cpu_info[me].waiters, &found);
109 	if (found) {
110 		printf(" <=> CPU%d deadlock detectd\n", me);
111 		assert(0);
112 	}
113 }
114 
115 static void cpu_wait(int cpu)
116 {
117 	int me = smp_processor_id();
118 
119 	if (cpu == me)
120 		return;
121 
122 	cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
123 	deadlock_check(me, cpu);
124 	while (!cpu_idle(cpu))
125 		wfe();
126 	cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
127 }
128 
129 void do_idle(void)
130 {
131 	int cpu = smp_processor_id();
132 
133 	if (cpu == 0)
134 		cpu0_calls_idle = true;
135 
136 	set_cpu_idle(cpu, true);
137 	sev();
138 
139 	for (;;) {
140 		while (cpu_idle(cpu))
141 			wfe();
142 		smp_rmb();
143 		on_cpu_info[cpu].func(on_cpu_info[cpu].data);
144 		on_cpu_info[cpu].func = NULL;
145 		smp_wmb();
146 		set_cpu_idle(cpu, true);
147 		sev();
148 	}
149 }
150 
151 void on_cpu_async(int cpu, void (*func)(void *data), void *data)
152 {
153 	if (cpu == smp_processor_id()) {
154 		func(data);
155 		return;
156 	}
157 
158 	assert_msg(cpu != 0 || cpu0_calls_idle, "Waiting on CPU0, which is unlikely to idle. "
159 						"If this is intended set cpu0_calls_idle=1");
160 
161 	spin_lock(&lock);
162 	if (!cpu_online(cpu))
163 		__smp_boot_secondary(cpu, do_idle);
164 	spin_unlock(&lock);
165 
166 	for (;;) {
167 		cpu_wait(cpu);
168 		spin_lock(&lock);
169 		if ((volatile void *)on_cpu_info[cpu].func == NULL)
170 			break;
171 		spin_unlock(&lock);
172 	}
173 	on_cpu_info[cpu].func = func;
174 	on_cpu_info[cpu].data = data;
175 	spin_unlock(&lock);
176 	set_cpu_idle(cpu, false);
177 	sev();
178 }
179 
180 void on_cpu(int cpu, void (*func)(void *data), void *data)
181 {
182 	on_cpu_async(cpu, func, data);
183 	cpu_wait(cpu);
184 }
185 
186 void on_cpus(void (*func)(void *data), void *data)
187 {
188 	int cpu, me = smp_processor_id();
189 
190 	for_each_present_cpu(cpu) {
191 		if (cpu == me)
192 			continue;
193 		on_cpu_async(cpu, func, data);
194 	}
195 	func(data);
196 
197 	for_each_present_cpu(cpu) {
198 		if (cpu == me)
199 			continue;
200 		cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
201 		deadlock_check(me, cpu);
202 	}
203 	while (cpumask_weight(&cpu_idle_mask) < nr_cpus - 1)
204 		wfe();
205 	for_each_present_cpu(cpu)
206 		cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
207 }
208