xref: /kvm-unit-tests/lib/arm/smp.c (revision 2c96b77ec9d3b1fcec7525174e23a6240ee05949)
1 /*
2  * Secondary cpu support
3  *
4  * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <libcflat.h>
9 #include <auxinfo.h>
10 #include <asm/thread_info.h>
11 #include <asm/spinlock.h>
12 #include <asm/cpumask.h>
13 #include <asm/barrier.h>
14 #include <asm/mmu.h>
15 #include <asm/psci.h>
16 #include <asm/smp.h>
17 
18 bool cpu0_calls_idle;
19 
20 cpumask_t cpu_present_mask;
21 cpumask_t cpu_online_mask;
22 cpumask_t cpu_idle_mask;
23 
24 struct secondary_data {
25 	void *stack;            /* must be first member of struct */
26 	secondary_entry_fn entry;
27 };
28 struct secondary_data secondary_data;
29 static struct spinlock lock;
30 
31 /* Needed to compile with -Wmissing-prototypes */
32 secondary_entry_fn secondary_cinit(void);
33 
34 secondary_entry_fn secondary_cinit(void)
35 {
36 	struct thread_info *ti = current_thread_info();
37 	secondary_entry_fn entry;
38 
39 	thread_info_init(ti, 0);
40 
41 	if (!(auxinfo.flags & AUXINFO_MMU_OFF)) {
42 		ti->pgtable = mmu_idmap;
43 		mmu_mark_enabled(ti->cpu);
44 	}
45 
46 	/*
47 	 * Save secondary_data.entry locally to avoid opening a race
48 	 * window between marking ourselves online and calling it.
49 	 */
50 	entry = secondary_data.entry;
51 	set_cpu_online(ti->cpu, true);
52 	sev();
53 
54 	/*
55 	 * Return to the assembly stub, allowing entry to be called
56 	 * from there with an empty stack.
57 	 */
58 	return entry;
59 }
60 
61 static void __smp_boot_secondary(int cpu, secondary_entry_fn entry)
62 {
63 	int ret;
64 
65 	secondary_data.stack = thread_stack_alloc();
66 	secondary_data.entry = entry;
67 	mmu_mark_disabled(cpu);
68 	ret = cpu_psci_cpu_boot(cpu);
69 	assert(ret == 0);
70 
71 	while (!cpu_online(cpu))
72 		wfe();
73 }
74 
75 void smp_boot_secondary(int cpu, secondary_entry_fn entry)
76 {
77 	spin_lock(&lock);
78 	assert_msg(!cpu_online(cpu), "CPU%d already boot once", cpu);
79 	__smp_boot_secondary(cpu, entry);
80 	spin_unlock(&lock);
81 }
82 
83 struct on_cpu_info {
84 	void (*func)(void *data);
85 	void *data;
86 	cpumask_t waiters;
87 };
88 static struct on_cpu_info on_cpu_info[NR_CPUS];
89 
90 static void __deadlock_check(int cpu, const cpumask_t *waiters, bool *found)
91 {
92 	int i;
93 
94 	for_each_cpu(i, waiters) {
95 		if (i == cpu) {
96 			printf("CPU%d", cpu);
97 			*found = true;
98 			return;
99 		}
100 		__deadlock_check(cpu, &on_cpu_info[i].waiters, found);
101 		if (*found) {
102 			printf(" <=> CPU%d", i);
103 			return;
104 		}
105 	}
106 }
107 
108 static void deadlock_check(int me, int cpu)
109 {
110 	bool found = false;
111 
112 	__deadlock_check(cpu, &on_cpu_info[me].waiters, &found);
113 	if (found) {
114 		printf(" <=> CPU%d deadlock detectd\n", me);
115 		assert(0);
116 	}
117 }
118 
119 static void cpu_wait(int cpu)
120 {
121 	int me = smp_processor_id();
122 
123 	if (cpu == me)
124 		return;
125 
126 	cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
127 	deadlock_check(me, cpu);
128 	while (!cpu_idle(cpu))
129 		wfe();
130 	cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
131 }
132 
133 void do_idle(void)
134 {
135 	int cpu = smp_processor_id();
136 
137 	if (cpu == 0)
138 		cpu0_calls_idle = true;
139 
140 	set_cpu_idle(cpu, true);
141 	sev();
142 
143 	for (;;) {
144 		while (cpu_idle(cpu))
145 			wfe();
146 		smp_rmb();
147 		on_cpu_info[cpu].func(on_cpu_info[cpu].data);
148 		on_cpu_info[cpu].func = NULL;
149 		smp_wmb();
150 		set_cpu_idle(cpu, true);
151 		sev();
152 	}
153 }
154 
155 void on_cpu_async(int cpu, void (*func)(void *data), void *data)
156 {
157 	if (cpu == smp_processor_id()) {
158 		func(data);
159 		return;
160 	}
161 
162 	assert_msg(cpu != 0 || cpu0_calls_idle, "Waiting on CPU0, which is unlikely to idle. "
163 						"If this is intended set cpu0_calls_idle=1");
164 
165 	spin_lock(&lock);
166 	if (!cpu_online(cpu))
167 		__smp_boot_secondary(cpu, do_idle);
168 	spin_unlock(&lock);
169 
170 	for (;;) {
171 		cpu_wait(cpu);
172 		spin_lock(&lock);
173 		if ((volatile void *)on_cpu_info[cpu].func == NULL)
174 			break;
175 		spin_unlock(&lock);
176 	}
177 	on_cpu_info[cpu].func = func;
178 	on_cpu_info[cpu].data = data;
179 	spin_unlock(&lock);
180 	set_cpu_idle(cpu, false);
181 	sev();
182 }
183 
184 void on_cpu(int cpu, void (*func)(void *data), void *data)
185 {
186 	on_cpu_async(cpu, func, data);
187 	cpu_wait(cpu);
188 }
189 
190 void on_cpus(void (*func)(void *data), void *data)
191 {
192 	int cpu, me = smp_processor_id();
193 
194 	for_each_present_cpu(cpu) {
195 		if (cpu == me)
196 			continue;
197 		on_cpu_async(cpu, func, data);
198 	}
199 	func(data);
200 
201 	for_each_present_cpu(cpu) {
202 		if (cpu == me)
203 			continue;
204 		cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
205 		deadlock_check(me, cpu);
206 	}
207 	while (cpumask_weight(&cpu_idle_mask) < nr_cpus - 1)
208 		wfe();
209 	for_each_present_cpu(cpu)
210 		cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
211 }
212