xref: /kvm-unit-tests/lib/on-cpus.c (revision b2d54669c9111da6dfbcacd83ebcda96fd26e413)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * on_cpus() support based on cpumasks.
4  *
5  * Copyright (C) 2015, Red Hat Inc, Andrew Jones <drjones@redhat.com>
6  */
7 #include <libcflat.h>
8 #include <cpumask.h>
9 #include <on-cpus.h>
10 #include <asm/barrier.h>
11 #include <asm/smp.h>
12 
13 bool cpu0_calls_idle;
14 
15 struct on_cpu_info {
16 	void (*func)(void *data);
17 	void *data;
18 	cpumask_t waiters;
19 };
20 static struct on_cpu_info on_cpu_info[NR_CPUS];
21 static cpumask_t on_cpu_info_lock;
22 
23 static bool get_on_cpu_info(int cpu)
24 {
25 	return !cpumask_test_and_set_cpu(cpu, &on_cpu_info_lock);
26 }
27 
28 static void put_on_cpu_info(int cpu)
29 {
30 	int ret = cpumask_test_and_clear_cpu(cpu, &on_cpu_info_lock);
31 	assert(ret);
32 }
33 
34 static void __deadlock_check(int cpu, const cpumask_t *waiters, bool *found)
35 {
36 	int i;
37 
38 	for_each_cpu(i, waiters) {
39 		if (i == cpu) {
40 			printf("CPU%d", cpu);
41 			*found = true;
42 			return;
43 		}
44 		__deadlock_check(cpu, &on_cpu_info[i].waiters, found);
45 		if (*found) {
46 			printf(" <=> CPU%d", i);
47 			return;
48 		}
49 	}
50 }
51 
52 static void deadlock_check(int me, int cpu)
53 {
54 	bool found = false;
55 
56 	__deadlock_check(cpu, &on_cpu_info[me].waiters, &found);
57 	if (found) {
58 		printf(" <=> CPU%d deadlock detectd\n", me);
59 		assert(0);
60 	}
61 }
62 
63 static void cpu_wait(int cpu)
64 {
65 	int me = smp_processor_id();
66 
67 	if (cpu == me)
68 		return;
69 
70 	cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
71 	deadlock_check(me, cpu);
72 	while (!cpu_idle(cpu))
73 		smp_wait_for_event();
74 	cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
75 }
76 
77 void do_idle(void)
78 {
79 	int cpu = smp_processor_id();
80 
81 	if (cpu == 0)
82 		cpu0_calls_idle = true;
83 
84 	set_cpu_idle(cpu, true);
85 	smp_send_event();
86 
87 	for (;;) {
88 		while (cpu_idle(cpu))
89 			smp_wait_for_event();
90 		smp_rmb();
91 		on_cpu_info[cpu].func(on_cpu_info[cpu].data);
92 		on_cpu_info[cpu].func = NULL;
93 		smp_wmb();
94 		set_cpu_idle(cpu, true);
95 		smp_send_event();
96 	}
97 }
98 
99 void on_cpu_async(int cpu, void (*func)(void *data), void *data)
100 {
101 	if (cpu == smp_processor_id()) {
102 		func(data);
103 		return;
104 	}
105 
106 	assert_msg(cpu != 0 || cpu0_calls_idle, "Waiting on CPU0, which is unlikely to idle. "
107 						"If this is intended set cpu0_calls_idle=1");
108 
109 	smp_boot_secondary_nofail(cpu, do_idle);
110 
111 	for (;;) {
112 		cpu_wait(cpu);
113 		if (get_on_cpu_info(cpu)) {
114 			if ((volatile void *)on_cpu_info[cpu].func == NULL)
115 				break;
116 			put_on_cpu_info(cpu);
117 		}
118 	}
119 
120 	on_cpu_info[cpu].func = func;
121 	on_cpu_info[cpu].data = data;
122 	set_cpu_idle(cpu, false);
123 	put_on_cpu_info(cpu);
124 	smp_send_event();
125 }
126 
127 void on_cpu(int cpu, void (*func)(void *data), void *data)
128 {
129 	on_cpu_async(cpu, func, data);
130 	cpu_wait(cpu);
131 }
132 
133 void on_cpus(void (*func)(void *data), void *data)
134 {
135 	int cpu, me = smp_processor_id();
136 
137 	for_each_present_cpu(cpu) {
138 		if (cpu == me)
139 			continue;
140 		on_cpu_async(cpu, func, data);
141 	}
142 	func(data);
143 
144 	for_each_present_cpu(cpu) {
145 		if (cpu == me)
146 			continue;
147 		cpumask_set_cpu(me, &on_cpu_info[cpu].waiters);
148 		deadlock_check(me, cpu);
149 	}
150 	while (cpumask_weight(&cpu_idle_mask) < nr_cpus - 1)
151 		smp_wait_for_event();
152 	for_each_present_cpu(cpu)
153 		cpumask_clear_cpu(me, &on_cpu_info[cpu].waiters);
154 }
155