xref: /kvm-unit-tests/lib/arm/smp.c (revision 4363f1d9a646a5c7ea673bee8fc33ca6f2cddbd8)
1 /*
2  * Secondary cpu support
3  *
4  * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <libcflat.h>
9 #include <asm/thread_info.h>
10 #include <asm/spinlock.h>
11 #include <asm/cpumask.h>
12 #include <asm/barrier.h>
13 #include <asm/mmu.h>
14 #include <asm/psci.h>
15 #include <asm/smp.h>
16 
17 bool cpu0_calls_idle;
18 
19 cpumask_t cpu_present_mask;
20 cpumask_t cpu_online_mask;
21 cpumask_t cpu_idle_mask;
22 
23 struct secondary_data {
24 	void *stack;            /* must be first member of struct */
25 	secondary_entry_fn entry;
26 };
27 struct secondary_data secondary_data;
28 static struct spinlock lock;
29 
30 secondary_entry_fn secondary_cinit(void)
31 {
32 	struct thread_info *ti = current_thread_info();
33 	secondary_entry_fn entry;
34 
35 	thread_info_init(ti, 0);
36 	mmu_mark_enabled(ti->cpu);
37 
38 	/*
39 	 * Save secondary_data.entry locally to avoid opening a race
40 	 * window between marking ourselves online and calling it.
41 	 */
42 	entry = secondary_data.entry;
43 	set_cpu_online(ti->cpu, true);
44 	sev();
45 
46 	/*
47 	 * Return to the assembly stub, allowing entry to be called
48 	 * from there with an empty stack.
49 	 */
50 	return entry;
51 }
52 
53 static void __smp_boot_secondary(int cpu, secondary_entry_fn entry)
54 {
55 	int ret;
56 
57 	secondary_data.stack = thread_stack_alloc();
58 	secondary_data.entry = entry;
59 	mmu_mark_disabled(cpu);
60 	ret = cpu_psci_cpu_boot(cpu);
61 	assert(ret == 0);
62 
63 	while (!cpu_online(cpu))
64 		wfe();
65 }
66 
67 void smp_boot_secondary(int cpu, secondary_entry_fn entry)
68 {
69 	spin_lock(&lock);
70 	assert_msg(!cpu_online(cpu), "CPU%d already boot once", cpu);
71 	__smp_boot_secondary(cpu, entry);
72 	spin_unlock(&lock);
73 }
74 
75 struct on_cpu_info {
76 	void (*func)(void *data);
77 	void *data;
78 	cpumask_t waiters;
79 };
80 static struct on_cpu_info on_cpu_info[NR_CPUS];
81 
82 static void __deadlock_check(int cpu, const cpumask_t *waiters, bool *found)
83 {
84 	int i;
85 
86 	for_each_cpu(i, waiters) {
87 		if (i == cpu) {
88 			printf("CPU%d", cpu);
89 			*found = true;
90 			return;
91 		}
92 		__deadlock_check(cpu, &on_cpu_info[i].waiters, found);
93 		if (*found) {
94 			printf(" <=> CPU%d", i);
95 			return;
96 		}
97 	}
98 }
99 
100 static void deadlock_check(int me, int cpu)
101 {
102 	bool found = false;
103 
104 	__deadlock_check(cpu, &on_cpu_info[me].waiters, &found);
105 	if (found) {
106 		printf(" <=> CPU%d deadlock detectd\n", me);
107 		assert(0);
108 	}
109 }
110 
111 static void cpu_wait(int cpu)
112 {
113 	int me = smp_processor_id();
114 
115 	if (cpu == me)
116 		return;
117 
118 	cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
119 	deadlock_check(me, cpu);
120 	while (!cpu_idle(cpu))
121 		wfe();
122 	cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
123 }
124 
125 void do_idle(void)
126 {
127 	int cpu = smp_processor_id();
128 
129 	if (cpu == 0)
130 		cpu0_calls_idle = true;
131 
132 	set_cpu_idle(cpu, true);
133 	sev();
134 
135 	for (;;) {
136 		while (cpu_idle(cpu))
137 			wfe();
138 		smp_rmb();
139 		on_cpu_info[cpu].func(on_cpu_info[cpu].data);
140 		on_cpu_info[cpu].func = NULL;
141 		smp_wmb();
142 		set_cpu_idle(cpu, true);
143 		sev();
144 	}
145 }
146 
147 void on_cpu_async(int cpu, void (*func)(void *data), void *data)
148 {
149 	if (cpu == smp_processor_id()) {
150 		func(data);
151 		return;
152 	}
153 
154 	assert_msg(cpu != 0 || cpu0_calls_idle, "Waiting on CPU0, which is unlikely to idle. "
155 						"If this is intended set cpu0_calls_idle=1");
156 
157 	spin_lock(&lock);
158 	if (!cpu_online(cpu))
159 		__smp_boot_secondary(cpu, do_idle);
160 	spin_unlock(&lock);
161 
162 	for (;;) {
163 		cpu_wait(cpu);
164 		spin_lock(&lock);
165 		if ((volatile void *)on_cpu_info[cpu].func == NULL)
166 			break;
167 		spin_unlock(&lock);
168 	}
169 	on_cpu_info[cpu].func = func;
170 	on_cpu_info[cpu].data = data;
171 	spin_unlock(&lock);
172 	set_cpu_idle(cpu, false);
173 	sev();
174 }
175 
176 void on_cpu(int cpu, void (*func)(void *data), void *data)
177 {
178 	on_cpu_async(cpu, func, data);
179 	cpu_wait(cpu);
180 }
181 
182 void on_cpus(void (*func)(void *data), void *data)
183 {
184 	int cpu, me = smp_processor_id();
185 
186 	for_each_present_cpu(cpu) {
187 		if (cpu == me)
188 			continue;
189 		on_cpu_async(cpu, func, data);
190 	}
191 	func(data);
192 
193 	for_each_present_cpu(cpu) {
194 		if (cpu == me)
195 			continue;
196 		cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
197 		deadlock_check(me, cpu);
198 	}
199 	while (cpumask_weight(&cpu_idle_mask) < nr_cpus - 1)
200 		wfe();
201 	for_each_present_cpu(cpu)
202 		cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
203 }
204