xref: /kvm-unit-tests/lib/arm/smp.c (revision 018550041b3837264cbaa6c8984ba57a93491a51)
1 /*
2  * Secondary cpu support
3  *
4  * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <libcflat.h>
9 #include <auxinfo.h>
10 #include <cpumask.h>
11 #include <asm/thread_info.h>
12 #include <asm/spinlock.h>
13 #include <asm/barrier.h>
14 #include <asm/mmu.h>
15 #include <asm/psci.h>
16 #include <asm/smp.h>
17 
18 bool cpu0_calls_idle;
19 
20 cpumask_t cpu_present_mask;
21 cpumask_t cpu_online_mask;
22 cpumask_t cpu_idle_mask;
23 
24 struct secondary_data secondary_data;
25 static struct spinlock lock;
26 
27 /* Needed to compile with -Wmissing-prototypes */
28 secondary_entry_fn secondary_cinit(void);
29 
30 secondary_entry_fn secondary_cinit(void)
31 {
32 	struct thread_info *ti = current_thread_info();
33 	secondary_entry_fn entry;
34 
35 	thread_info_init(ti, 0);
36 
37 	if (!(auxinfo.flags & AUXINFO_MMU_OFF)) {
38 		ti->pgtable = mmu_idmap;
39 		mmu_mark_enabled(ti->cpu);
40 	}
41 
42 	/*
43 	 * Save secondary_data.entry locally to avoid opening a race
44 	 * window between marking ourselves online and calling it.
45 	 */
46 	entry = secondary_data.entry;
47 	set_cpu_online(ti->cpu, true);
48 	smp_send_event();
49 
50 	/*
51 	 * Return to the assembly stub, allowing entry to be called
52 	 * from there with an empty stack.
53 	 */
54 	return entry;
55 }
56 
57 static void __smp_boot_secondary(int cpu, secondary_entry_fn entry)
58 {
59 	int ret;
60 
61 	secondary_data.stack = thread_stack_alloc();
62 	secondary_data.entry = entry;
63 	mmu_mark_disabled(cpu);
64 	ret = cpu_psci_cpu_boot(cpu);
65 	assert(ret == 0);
66 
67 	while (!cpu_online(cpu))
68 		smp_wait_for_event();
69 }
70 
71 void smp_boot_secondary(int cpu, secondary_entry_fn entry)
72 {
73 	spin_lock(&lock);
74 	assert_msg(!cpu_online(cpu), "CPU%d already boot once", cpu);
75 	__smp_boot_secondary(cpu, entry);
76 	spin_unlock(&lock);
77 }
78 
79 void smp_boot_secondary_nofail(int cpu, secondary_entry_fn entry)
80 {
81 	spin_lock(&lock);
82 	if (!cpu_online(cpu))
83 		__smp_boot_secondary(cpu, entry);
84 	spin_unlock(&lock);
85 }
86 
87 struct on_cpu_info {
88 	void (*func)(void *data);
89 	void *data;
90 	cpumask_t waiters;
91 };
92 static struct on_cpu_info on_cpu_info[NR_CPUS];
93 static cpumask_t on_cpu_info_lock;
94 
95 static bool get_on_cpu_info(int cpu)
96 {
97 	return !cpumask_test_and_set_cpu(cpu, &on_cpu_info_lock);
98 }
99 
100 static void put_on_cpu_info(int cpu)
101 {
102 	int ret = cpumask_test_and_clear_cpu(cpu, &on_cpu_info_lock);
103 	assert(ret);
104 }
105 
106 static void __deadlock_check(int cpu, const cpumask_t *waiters, bool *found)
107 {
108 	int i;
109 
110 	for_each_cpu(i, waiters) {
111 		if (i == cpu) {
112 			printf("CPU%d", cpu);
113 			*found = true;
114 			return;
115 		}
116 		__deadlock_check(cpu, &on_cpu_info[i].waiters, found);
117 		if (*found) {
118 			printf(" <=> CPU%d", i);
119 			return;
120 		}
121 	}
122 }
123 
124 static void deadlock_check(int me, int cpu)
125 {
126 	bool found = false;
127 
128 	__deadlock_check(cpu, &on_cpu_info[me].waiters, &found);
129 	if (found) {
130 		printf(" <=> CPU%d deadlock detectd\n", me);
131 		assert(0);
132 	}
133 }
134 
135 static void cpu_wait(int cpu)
136 {
137 	int me = smp_processor_id();
138 
139 	if (cpu == me)
140 		return;
141 
142 	cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
143 	deadlock_check(me, cpu);
144 	while (!cpu_idle(cpu))
145 		smp_wait_for_event();
146 	cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
147 }
148 
149 void do_idle(void)
150 {
151 	int cpu = smp_processor_id();
152 
153 	if (cpu == 0)
154 		cpu0_calls_idle = true;
155 
156 	set_cpu_idle(cpu, true);
157 	smp_send_event();
158 
159 	for (;;) {
160 		while (cpu_idle(cpu))
161 			smp_wait_for_event();
162 		smp_rmb();
163 		on_cpu_info[cpu].func(on_cpu_info[cpu].data);
164 		on_cpu_info[cpu].func = NULL;
165 		smp_wmb();
166 		set_cpu_idle(cpu, true);
167 		smp_send_event();
168 	}
169 }
170 
171 void on_cpu_async(int cpu, void (*func)(void *data), void *data)
172 {
173 	if (cpu == smp_processor_id()) {
174 		func(data);
175 		return;
176 	}
177 
178 	assert_msg(cpu != 0 || cpu0_calls_idle, "Waiting on CPU0, which is unlikely to idle. "
179 						"If this is intended set cpu0_calls_idle=1");
180 
181 	smp_boot_secondary_nofail(cpu, do_idle);
182 
183 	for (;;) {
184 		cpu_wait(cpu);
185 		if (get_on_cpu_info(cpu)) {
186 			if ((volatile void *)on_cpu_info[cpu].func == NULL)
187 				break;
188 			put_on_cpu_info(cpu);
189 		}
190 	}
191 
192 	on_cpu_info[cpu].func = func;
193 	on_cpu_info[cpu].data = data;
194 	set_cpu_idle(cpu, false);
195 	put_on_cpu_info(cpu);
196 	smp_send_event();
197 }
198 
199 void on_cpu(int cpu, void (*func)(void *data), void *data)
200 {
201 	on_cpu_async(cpu, func, data);
202 	cpu_wait(cpu);
203 }
204 
205 void on_cpus(void (*func)(void *data), void *data)
206 {
207 	int cpu, me = smp_processor_id();
208 
209 	for_each_present_cpu(cpu) {
210 		if (cpu == me)
211 			continue;
212 		on_cpu_async(cpu, func, data);
213 	}
214 	func(data);
215 
216 	for_each_present_cpu(cpu) {
217 		if (cpu == me)
218 			continue;
219 		cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
220 		deadlock_check(me, cpu);
221 	}
222 	while (cpumask_weight(&cpu_idle_mask) < nr_cpus - 1)
223 		smp_wait_for_event();
224 	for_each_present_cpu(cpu)
225 		cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
226 }
227