xref: /kvm-unit-tests/lib/arm/smp.c (revision 71a6a145226927f50d938b0f2befc24363a496bc)
1 /*
2  * Secondary cpu support
3  *
4  * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com>
5  *
6  * This work is licensed under the terms of the GNU LGPL, version 2.
7  */
8 #include <libcflat.h>
9 #include <asm/thread_info.h>
10 #include <asm/spinlock.h>
11 #include <asm/cpumask.h>
12 #include <asm/barrier.h>
13 #include <asm/mmu.h>
14 #include <asm/psci.h>
15 #include <asm/smp.h>
16 
17 bool cpu0_calls_idle;
18 
19 cpumask_t cpu_present_mask;
20 cpumask_t cpu_online_mask;
21 cpumask_t cpu_idle_mask;
22 
23 struct secondary_data {
24 	void *stack;            /* must be first member of struct */
25 	secondary_entry_fn entry;
26 };
27 struct secondary_data secondary_data;
28 static struct spinlock lock;
29 
30 secondary_entry_fn secondary_cinit(void)
31 {
32 	struct thread_info *ti = current_thread_info();
33 	secondary_entry_fn entry;
34 
35 	thread_info_init(ti, 0);
36 	ti->pgtable = mmu_idmap;
37 	mmu_mark_enabled(ti->cpu);
38 
39 	/*
40 	 * Save secondary_data.entry locally to avoid opening a race
41 	 * window between marking ourselves online and calling it.
42 	 */
43 	entry = secondary_data.entry;
44 	set_cpu_online(ti->cpu, true);
45 	sev();
46 
47 	/*
48 	 * Return to the assembly stub, allowing entry to be called
49 	 * from there with an empty stack.
50 	 */
51 	return entry;
52 }
53 
54 static void __smp_boot_secondary(int cpu, secondary_entry_fn entry)
55 {
56 	int ret;
57 
58 	secondary_data.stack = thread_stack_alloc();
59 	secondary_data.entry = entry;
60 	mmu_mark_disabled(cpu);
61 	ret = cpu_psci_cpu_boot(cpu);
62 	assert(ret == 0);
63 
64 	while (!cpu_online(cpu))
65 		wfe();
66 }
67 
68 void smp_boot_secondary(int cpu, secondary_entry_fn entry)
69 {
70 	spin_lock(&lock);
71 	assert_msg(!cpu_online(cpu), "CPU%d already boot once", cpu);
72 	__smp_boot_secondary(cpu, entry);
73 	spin_unlock(&lock);
74 }
75 
76 struct on_cpu_info {
77 	void (*func)(void *data);
78 	void *data;
79 	cpumask_t waiters;
80 };
81 static struct on_cpu_info on_cpu_info[NR_CPUS];
82 
83 static void __deadlock_check(int cpu, const cpumask_t *waiters, bool *found)
84 {
85 	int i;
86 
87 	for_each_cpu(i, waiters) {
88 		if (i == cpu) {
89 			printf("CPU%d", cpu);
90 			*found = true;
91 			return;
92 		}
93 		__deadlock_check(cpu, &on_cpu_info[i].waiters, found);
94 		if (*found) {
95 			printf(" <=> CPU%d", i);
96 			return;
97 		}
98 	}
99 }
100 
101 static void deadlock_check(int me, int cpu)
102 {
103 	bool found = false;
104 
105 	__deadlock_check(cpu, &on_cpu_info[me].waiters, &found);
106 	if (found) {
107 		printf(" <=> CPU%d deadlock detectd\n", me);
108 		assert(0);
109 	}
110 }
111 
112 static void cpu_wait(int cpu)
113 {
114 	int me = smp_processor_id();
115 
116 	if (cpu == me)
117 		return;
118 
119 	cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
120 	deadlock_check(me, cpu);
121 	while (!cpu_idle(cpu))
122 		wfe();
123 	cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
124 }
125 
126 void do_idle(void)
127 {
128 	int cpu = smp_processor_id();
129 
130 	if (cpu == 0)
131 		cpu0_calls_idle = true;
132 
133 	set_cpu_idle(cpu, true);
134 	sev();
135 
136 	for (;;) {
137 		while (cpu_idle(cpu))
138 			wfe();
139 		smp_rmb();
140 		on_cpu_info[cpu].func(on_cpu_info[cpu].data);
141 		on_cpu_info[cpu].func = NULL;
142 		smp_wmb();
143 		set_cpu_idle(cpu, true);
144 		sev();
145 	}
146 }
147 
148 void on_cpu_async(int cpu, void (*func)(void *data), void *data)
149 {
150 	if (cpu == smp_processor_id()) {
151 		func(data);
152 		return;
153 	}
154 
155 	assert_msg(cpu != 0 || cpu0_calls_idle, "Waiting on CPU0, which is unlikely to idle. "
156 						"If this is intended set cpu0_calls_idle=1");
157 
158 	spin_lock(&lock);
159 	if (!cpu_online(cpu))
160 		__smp_boot_secondary(cpu, do_idle);
161 	spin_unlock(&lock);
162 
163 	for (;;) {
164 		cpu_wait(cpu);
165 		spin_lock(&lock);
166 		if ((volatile void *)on_cpu_info[cpu].func == NULL)
167 			break;
168 		spin_unlock(&lock);
169 	}
170 	on_cpu_info[cpu].func = func;
171 	on_cpu_info[cpu].data = data;
172 	spin_unlock(&lock);
173 	set_cpu_idle(cpu, false);
174 	sev();
175 }
176 
177 void on_cpu(int cpu, void (*func)(void *data), void *data)
178 {
179 	on_cpu_async(cpu, func, data);
180 	cpu_wait(cpu);
181 }
182 
183 void on_cpus(void (*func)(void *data), void *data)
184 {
185 	int cpu, me = smp_processor_id();
186 
187 	for_each_present_cpu(cpu) {
188 		if (cpu == me)
189 			continue;
190 		on_cpu_async(cpu, func, data);
191 	}
192 	func(data);
193 
194 	for_each_present_cpu(cpu) {
195 		if (cpu == me)
196 			continue;
197 		cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
198 		deadlock_check(me, cpu);
199 	}
200 	while (cpumask_weight(&cpu_idle_mask) < nr_cpus - 1)
201 		wfe();
202 	for_each_present_cpu(cpu)
203 		cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
204 }
205