xref: /qemu/cpu-common.c (revision d73415a315471ac0b127ed3fad45c8ec5d711de1)
1267f685bSPaolo Bonzini /*
2267f685bSPaolo Bonzini  * CPU thread main loop - common bits for user and system mode emulation
3267f685bSPaolo Bonzini  *
4267f685bSPaolo Bonzini  *  Copyright (c) 2003-2005 Fabrice Bellard
5267f685bSPaolo Bonzini  *
6267f685bSPaolo Bonzini  * This library is free software; you can redistribute it and/or
7267f685bSPaolo Bonzini  * modify it under the terms of the GNU Lesser General Public
8267f685bSPaolo Bonzini  * License as published by the Free Software Foundation; either
9267f685bSPaolo Bonzini  * version 2 of the License, or (at your option) any later version.
10267f685bSPaolo Bonzini  *
11267f685bSPaolo Bonzini  * This library is distributed in the hope that it will be useful,
12267f685bSPaolo Bonzini  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13267f685bSPaolo Bonzini  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14267f685bSPaolo Bonzini  * Lesser General Public License for more details.
15267f685bSPaolo Bonzini  *
16267f685bSPaolo Bonzini  * You should have received a copy of the GNU Lesser General Public
17267f685bSPaolo Bonzini  * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18267f685bSPaolo Bonzini  */
19267f685bSPaolo Bonzini 
20267f685bSPaolo Bonzini #include "qemu/osdep.h"
2153f5ed95SPaolo Bonzini #include "qemu/main-loop.h"
22267f685bSPaolo Bonzini #include "exec/cpu-common.h"
232e5b09fdSMarkus Armbruster #include "hw/core/cpu.h"
24267f685bSPaolo Bonzini #include "sysemu/cpus.h"
256e8a355dSDaniel Brodsky #include "qemu/lockable.h"
26267f685bSPaolo Bonzini 
27267f685bSPaolo Bonzini static QemuMutex qemu_cpu_list_lock;
28ab129972SPaolo Bonzini static QemuCond exclusive_cond;
29ab129972SPaolo Bonzini static QemuCond exclusive_resume;
30d148d90eSSergey Fedorov static QemuCond qemu_work_cond;
31267f685bSPaolo Bonzini 
32c265e976SPaolo Bonzini /* >= 1 if a thread is inside start_exclusive/end_exclusive.  Written
33c265e976SPaolo Bonzini  * under qemu_cpu_list_lock, read with atomic operations.
34c265e976SPaolo Bonzini  */
35ab129972SPaolo Bonzini static int pending_cpus;
36ab129972SPaolo Bonzini 
37267f685bSPaolo Bonzini void qemu_init_cpu_list(void)
38267f685bSPaolo Bonzini {
39ab129972SPaolo Bonzini     /* This is needed because qemu_init_cpu_list is also called by the
40ab129972SPaolo Bonzini      * child process in a fork.  */
41ab129972SPaolo Bonzini     pending_cpus = 0;
42ab129972SPaolo Bonzini 
43267f685bSPaolo Bonzini     qemu_mutex_init(&qemu_cpu_list_lock);
44ab129972SPaolo Bonzini     qemu_cond_init(&exclusive_cond);
45ab129972SPaolo Bonzini     qemu_cond_init(&exclusive_resume);
46d148d90eSSergey Fedorov     qemu_cond_init(&qemu_work_cond);
47267f685bSPaolo Bonzini }
48267f685bSPaolo Bonzini 
49267f685bSPaolo Bonzini void cpu_list_lock(void)
50267f685bSPaolo Bonzini {
51267f685bSPaolo Bonzini     qemu_mutex_lock(&qemu_cpu_list_lock);
52267f685bSPaolo Bonzini }
53267f685bSPaolo Bonzini 
54267f685bSPaolo Bonzini void cpu_list_unlock(void)
55267f685bSPaolo Bonzini {
56267f685bSPaolo Bonzini     qemu_mutex_unlock(&qemu_cpu_list_lock);
57267f685bSPaolo Bonzini }
58267f685bSPaolo Bonzini 
59267f685bSPaolo Bonzini static bool cpu_index_auto_assigned;
60267f685bSPaolo Bonzini 
61267f685bSPaolo Bonzini static int cpu_get_free_index(void)
62267f685bSPaolo Bonzini {
63267f685bSPaolo Bonzini     CPUState *some_cpu;
64716386e3SAlex Bennée     int max_cpu_index = 0;
65267f685bSPaolo Bonzini 
66267f685bSPaolo Bonzini     cpu_index_auto_assigned = true;
67267f685bSPaolo Bonzini     CPU_FOREACH(some_cpu) {
68716386e3SAlex Bennée         if (some_cpu->cpu_index >= max_cpu_index) {
69716386e3SAlex Bennée             max_cpu_index = some_cpu->cpu_index + 1;
70267f685bSPaolo Bonzini         }
71716386e3SAlex Bennée     }
72716386e3SAlex Bennée     return max_cpu_index;
73267f685bSPaolo Bonzini }
74267f685bSPaolo Bonzini 
75421a75e2SPhilippe Mathieu-Daudé CPUTailQ cpus = QTAILQ_HEAD_INITIALIZER(cpus);
76421a75e2SPhilippe Mathieu-Daudé 
77267f685bSPaolo Bonzini void cpu_list_add(CPUState *cpu)
78267f685bSPaolo Bonzini {
796e8a355dSDaniel Brodsky     QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
80267f685bSPaolo Bonzini     if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) {
81267f685bSPaolo Bonzini         cpu->cpu_index = cpu_get_free_index();
82267f685bSPaolo Bonzini         assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX);
83267f685bSPaolo Bonzini     } else {
84267f685bSPaolo Bonzini         assert(!cpu_index_auto_assigned);
85267f685bSPaolo Bonzini     }
86068a5ea0SEmilio G. Cota     QTAILQ_INSERT_TAIL_RCU(&cpus, cpu, node);
87267f685bSPaolo Bonzini }
88267f685bSPaolo Bonzini 
89267f685bSPaolo Bonzini void cpu_list_remove(CPUState *cpu)
90267f685bSPaolo Bonzini {
916e8a355dSDaniel Brodsky     QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
92267f685bSPaolo Bonzini     if (!QTAILQ_IN_USE(cpu, node)) {
93267f685bSPaolo Bonzini         /* there is nothing to undo since cpu_exec_init() hasn't been called */
94267f685bSPaolo Bonzini         return;
95267f685bSPaolo Bonzini     }
96267f685bSPaolo Bonzini 
97068a5ea0SEmilio G. Cota     QTAILQ_REMOVE_RCU(&cpus, cpu, node);
98267f685bSPaolo Bonzini     cpu->cpu_index = UNASSIGNED_CPU_INDEX;
99267f685bSPaolo Bonzini }
100d148d90eSSergey Fedorov 
101421a75e2SPhilippe Mathieu-Daudé CPUState *qemu_get_cpu(int index)
102421a75e2SPhilippe Mathieu-Daudé {
103421a75e2SPhilippe Mathieu-Daudé     CPUState *cpu;
104421a75e2SPhilippe Mathieu-Daudé 
105421a75e2SPhilippe Mathieu-Daudé     CPU_FOREACH(cpu) {
106421a75e2SPhilippe Mathieu-Daudé         if (cpu->cpu_index == index) {
107421a75e2SPhilippe Mathieu-Daudé             return cpu;
108421a75e2SPhilippe Mathieu-Daudé         }
109421a75e2SPhilippe Mathieu-Daudé     }
110421a75e2SPhilippe Mathieu-Daudé 
111421a75e2SPhilippe Mathieu-Daudé     return NULL;
112421a75e2SPhilippe Mathieu-Daudé }
113421a75e2SPhilippe Mathieu-Daudé 
114421a75e2SPhilippe Mathieu-Daudé /* current CPU in the current thread. It is only valid inside cpu_exec() */
115421a75e2SPhilippe Mathieu-Daudé __thread CPUState *current_cpu;
116421a75e2SPhilippe Mathieu-Daudé 
117d148d90eSSergey Fedorov struct qemu_work_item {
1180c0fcc20SEmilio G. Cota     QSIMPLEQ_ENTRY(qemu_work_item) node;
119d148d90eSSergey Fedorov     run_on_cpu_func func;
12014e6fe12SPaolo Bonzini     run_on_cpu_data data;
12153f5ed95SPaolo Bonzini     bool free, exclusive, done;
122d148d90eSSergey Fedorov };
123d148d90eSSergey Fedorov 
124d148d90eSSergey Fedorov static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi)
125d148d90eSSergey Fedorov {
126d148d90eSSergey Fedorov     qemu_mutex_lock(&cpu->work_mutex);
1270c0fcc20SEmilio G. Cota     QSIMPLEQ_INSERT_TAIL(&cpu->work_list, wi, node);
128d148d90eSSergey Fedorov     wi->done = false;
129d148d90eSSergey Fedorov     qemu_mutex_unlock(&cpu->work_mutex);
130d148d90eSSergey Fedorov 
131d148d90eSSergey Fedorov     qemu_cpu_kick(cpu);
132d148d90eSSergey Fedorov }
133d148d90eSSergey Fedorov 
13414e6fe12SPaolo Bonzini void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data,
135d148d90eSSergey Fedorov                    QemuMutex *mutex)
136d148d90eSSergey Fedorov {
137d148d90eSSergey Fedorov     struct qemu_work_item wi;
138d148d90eSSergey Fedorov 
139d148d90eSSergey Fedorov     if (qemu_cpu_is_self(cpu)) {
140d148d90eSSergey Fedorov         func(cpu, data);
141d148d90eSSergey Fedorov         return;
142d148d90eSSergey Fedorov     }
143d148d90eSSergey Fedorov 
144d148d90eSSergey Fedorov     wi.func = func;
145d148d90eSSergey Fedorov     wi.data = data;
1460e55539cSPaolo Bonzini     wi.done = false;
147d148d90eSSergey Fedorov     wi.free = false;
14853f5ed95SPaolo Bonzini     wi.exclusive = false;
149d148d90eSSergey Fedorov 
150d148d90eSSergey Fedorov     queue_work_on_cpu(cpu, &wi);
151*d73415a3SStefan Hajnoczi     while (!qatomic_mb_read(&wi.done)) {
152d148d90eSSergey Fedorov         CPUState *self_cpu = current_cpu;
153d148d90eSSergey Fedorov 
154d148d90eSSergey Fedorov         qemu_cond_wait(&qemu_work_cond, mutex);
155d148d90eSSergey Fedorov         current_cpu = self_cpu;
156d148d90eSSergey Fedorov     }
157d148d90eSSergey Fedorov }
158d148d90eSSergey Fedorov 
15914e6fe12SPaolo Bonzini void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
160d148d90eSSergey Fedorov {
161d148d90eSSergey Fedorov     struct qemu_work_item *wi;
162d148d90eSSergey Fedorov 
163d148d90eSSergey Fedorov     wi = g_malloc0(sizeof(struct qemu_work_item));
164d148d90eSSergey Fedorov     wi->func = func;
165d148d90eSSergey Fedorov     wi->data = data;
166d148d90eSSergey Fedorov     wi->free = true;
167d148d90eSSergey Fedorov 
168d148d90eSSergey Fedorov     queue_work_on_cpu(cpu, wi);
169d148d90eSSergey Fedorov }
170d148d90eSSergey Fedorov 
171ab129972SPaolo Bonzini /* Wait for pending exclusive operations to complete.  The CPU list lock
172ab129972SPaolo Bonzini    must be held.  */
173ab129972SPaolo Bonzini static inline void exclusive_idle(void)
174ab129972SPaolo Bonzini {
175ab129972SPaolo Bonzini     while (pending_cpus) {
176ab129972SPaolo Bonzini         qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock);
177ab129972SPaolo Bonzini     }
178ab129972SPaolo Bonzini }
179ab129972SPaolo Bonzini 
180ab129972SPaolo Bonzini /* Start an exclusive operation.
181758e1b2bSPaolo Bonzini    Must only be called from outside cpu_exec.  */
182ab129972SPaolo Bonzini void start_exclusive(void)
183ab129972SPaolo Bonzini {
184ab129972SPaolo Bonzini     CPUState *other_cpu;
185c265e976SPaolo Bonzini     int running_cpus;
186ab129972SPaolo Bonzini 
187ab129972SPaolo Bonzini     qemu_mutex_lock(&qemu_cpu_list_lock);
188ab129972SPaolo Bonzini     exclusive_idle();
189ab129972SPaolo Bonzini 
190ab129972SPaolo Bonzini     /* Make all other cpus stop executing.  */
191*d73415a3SStefan Hajnoczi     qatomic_set(&pending_cpus, 1);
192c265e976SPaolo Bonzini 
193c265e976SPaolo Bonzini     /* Write pending_cpus before reading other_cpu->running.  */
194c265e976SPaolo Bonzini     smp_mb();
195c265e976SPaolo Bonzini     running_cpus = 0;
196ab129972SPaolo Bonzini     CPU_FOREACH(other_cpu) {
197*d73415a3SStefan Hajnoczi         if (qatomic_read(&other_cpu->running)) {
198c265e976SPaolo Bonzini             other_cpu->has_waiter = true;
199c265e976SPaolo Bonzini             running_cpus++;
200ab129972SPaolo Bonzini             qemu_cpu_kick(other_cpu);
201ab129972SPaolo Bonzini         }
202ab129972SPaolo Bonzini     }
203c265e976SPaolo Bonzini 
204*d73415a3SStefan Hajnoczi     qatomic_set(&pending_cpus, running_cpus + 1);
205ab129972SPaolo Bonzini     while (pending_cpus > 1) {
206ab129972SPaolo Bonzini         qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock);
207ab129972SPaolo Bonzini     }
208758e1b2bSPaolo Bonzini 
209758e1b2bSPaolo Bonzini     /* Can release mutex, no one will enter another exclusive
210758e1b2bSPaolo Bonzini      * section until end_exclusive resets pending_cpus to 0.
211758e1b2bSPaolo Bonzini      */
212758e1b2bSPaolo Bonzini     qemu_mutex_unlock(&qemu_cpu_list_lock);
213cfbc3c60SEmilio G. Cota 
214cfbc3c60SEmilio G. Cota     current_cpu->in_exclusive_context = true;
215ab129972SPaolo Bonzini }
216ab129972SPaolo Bonzini 
217758e1b2bSPaolo Bonzini /* Finish an exclusive operation.  */
218ab129972SPaolo Bonzini void end_exclusive(void)
219ab129972SPaolo Bonzini {
220cfbc3c60SEmilio G. Cota     current_cpu->in_exclusive_context = false;
221cfbc3c60SEmilio G. Cota 
222758e1b2bSPaolo Bonzini     qemu_mutex_lock(&qemu_cpu_list_lock);
223*d73415a3SStefan Hajnoczi     qatomic_set(&pending_cpus, 0);
224ab129972SPaolo Bonzini     qemu_cond_broadcast(&exclusive_resume);
225ab129972SPaolo Bonzini     qemu_mutex_unlock(&qemu_cpu_list_lock);
226ab129972SPaolo Bonzini }
227ab129972SPaolo Bonzini 
228ab129972SPaolo Bonzini /* Wait for exclusive ops to finish, and begin cpu execution.  */
229ab129972SPaolo Bonzini void cpu_exec_start(CPUState *cpu)
230ab129972SPaolo Bonzini {
231*d73415a3SStefan Hajnoczi     qatomic_set(&cpu->running, true);
232c265e976SPaolo Bonzini 
233c265e976SPaolo Bonzini     /* Write cpu->running before reading pending_cpus.  */
234c265e976SPaolo Bonzini     smp_mb();
235c265e976SPaolo Bonzini 
236c265e976SPaolo Bonzini     /* 1. start_exclusive saw cpu->running == true and pending_cpus >= 1.
237c265e976SPaolo Bonzini      * After taking the lock we'll see cpu->has_waiter == true and run---not
238c265e976SPaolo Bonzini      * for long because start_exclusive kicked us.  cpu_exec_end will
239c265e976SPaolo Bonzini      * decrement pending_cpus and signal the waiter.
240c265e976SPaolo Bonzini      *
241c265e976SPaolo Bonzini      * 2. start_exclusive saw cpu->running == false but pending_cpus >= 1.
242c265e976SPaolo Bonzini      * This includes the case when an exclusive item is running now.
243c265e976SPaolo Bonzini      * Then we'll see cpu->has_waiter == false and wait for the item to
244c265e976SPaolo Bonzini      * complete.
245c265e976SPaolo Bonzini      *
246c265e976SPaolo Bonzini      * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
247c265e976SPaolo Bonzini      * see cpu->running == true, and it will kick the CPU.
248c265e976SPaolo Bonzini      */
249*d73415a3SStefan Hajnoczi     if (unlikely(qatomic_read(&pending_cpus))) {
2506e8a355dSDaniel Brodsky         QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
251c265e976SPaolo Bonzini         if (!cpu->has_waiter) {
252c265e976SPaolo Bonzini             /* Not counted in pending_cpus, let the exclusive item
253c265e976SPaolo Bonzini              * run.  Since we have the lock, just set cpu->running to true
254c265e976SPaolo Bonzini              * while holding it; no need to check pending_cpus again.
255c265e976SPaolo Bonzini              */
256*d73415a3SStefan Hajnoczi             qatomic_set(&cpu->running, false);
257ab129972SPaolo Bonzini             exclusive_idle();
258c265e976SPaolo Bonzini             /* Now pending_cpus is zero.  */
259*d73415a3SStefan Hajnoczi             qatomic_set(&cpu->running, true);
260c265e976SPaolo Bonzini         } else {
261c265e976SPaolo Bonzini             /* Counted in pending_cpus, go ahead and release the
262c265e976SPaolo Bonzini              * waiter at cpu_exec_end.
263c265e976SPaolo Bonzini              */
264c265e976SPaolo Bonzini         }
265ab129972SPaolo Bonzini     }
266c265e976SPaolo Bonzini }
267ab129972SPaolo Bonzini 
268ab129972SPaolo Bonzini /* Mark cpu as not executing, and release pending exclusive ops.  */
269ab129972SPaolo Bonzini void cpu_exec_end(CPUState *cpu)
270ab129972SPaolo Bonzini {
271*d73415a3SStefan Hajnoczi     qatomic_set(&cpu->running, false);
272c265e976SPaolo Bonzini 
273c265e976SPaolo Bonzini     /* Write cpu->running before reading pending_cpus.  */
274c265e976SPaolo Bonzini     smp_mb();
275c265e976SPaolo Bonzini 
276c265e976SPaolo Bonzini     /* 1. start_exclusive saw cpu->running == true.  Then it will increment
277c265e976SPaolo Bonzini      * pending_cpus and wait for exclusive_cond.  After taking the lock
278c265e976SPaolo Bonzini      * we'll see cpu->has_waiter == true.
279c265e976SPaolo Bonzini      *
280c265e976SPaolo Bonzini      * 2. start_exclusive saw cpu->running == false but here pending_cpus >= 1.
281c265e976SPaolo Bonzini      * This includes the case when an exclusive item started after setting
282c265e976SPaolo Bonzini      * cpu->running to false and before we read pending_cpus.  Then we'll see
283c265e976SPaolo Bonzini      * cpu->has_waiter == false and not touch pending_cpus.  The next call to
284c265e976SPaolo Bonzini      * cpu_exec_start will run exclusive_idle if still necessary, thus waiting
285c265e976SPaolo Bonzini      * for the item to complete.
286c265e976SPaolo Bonzini      *
287c265e976SPaolo Bonzini      * 3. pending_cpus == 0.  Then start_exclusive is definitely going to
288c265e976SPaolo Bonzini      * see cpu->running == false, and it can ignore this CPU until the
289c265e976SPaolo Bonzini      * next cpu_exec_start.
290c265e976SPaolo Bonzini      */
291*d73415a3SStefan Hajnoczi     if (unlikely(qatomic_read(&pending_cpus))) {
2926e8a355dSDaniel Brodsky         QEMU_LOCK_GUARD(&qemu_cpu_list_lock);
293c265e976SPaolo Bonzini         if (cpu->has_waiter) {
294c265e976SPaolo Bonzini             cpu->has_waiter = false;
295*d73415a3SStefan Hajnoczi             qatomic_set(&pending_cpus, pending_cpus - 1);
296ab129972SPaolo Bonzini             if (pending_cpus == 1) {
297ab129972SPaolo Bonzini                 qemu_cond_signal(&exclusive_cond);
298ab129972SPaolo Bonzini             }
299ab129972SPaolo Bonzini         }
300ab129972SPaolo Bonzini     }
301c265e976SPaolo Bonzini }
302ab129972SPaolo Bonzini 
30314e6fe12SPaolo Bonzini void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func,
30414e6fe12SPaolo Bonzini                            run_on_cpu_data data)
30553f5ed95SPaolo Bonzini {
30653f5ed95SPaolo Bonzini     struct qemu_work_item *wi;
30753f5ed95SPaolo Bonzini 
30853f5ed95SPaolo Bonzini     wi = g_malloc0(sizeof(struct qemu_work_item));
30953f5ed95SPaolo Bonzini     wi->func = func;
31053f5ed95SPaolo Bonzini     wi->data = data;
31153f5ed95SPaolo Bonzini     wi->free = true;
31253f5ed95SPaolo Bonzini     wi->exclusive = true;
31353f5ed95SPaolo Bonzini 
31453f5ed95SPaolo Bonzini     queue_work_on_cpu(cpu, wi);
31553f5ed95SPaolo Bonzini }
31653f5ed95SPaolo Bonzini 
317d148d90eSSergey Fedorov void process_queued_cpu_work(CPUState *cpu)
318d148d90eSSergey Fedorov {
319d148d90eSSergey Fedorov     struct qemu_work_item *wi;
320d148d90eSSergey Fedorov 
3210c0fcc20SEmilio G. Cota     qemu_mutex_lock(&cpu->work_mutex);
3220c0fcc20SEmilio G. Cota     if (QSIMPLEQ_EMPTY(&cpu->work_list)) {
3230c0fcc20SEmilio G. Cota         qemu_mutex_unlock(&cpu->work_mutex);
324d148d90eSSergey Fedorov         return;
325d148d90eSSergey Fedorov     }
3260c0fcc20SEmilio G. Cota     while (!QSIMPLEQ_EMPTY(&cpu->work_list)) {
3270c0fcc20SEmilio G. Cota         wi = QSIMPLEQ_FIRST(&cpu->work_list);
3280c0fcc20SEmilio G. Cota         QSIMPLEQ_REMOVE_HEAD(&cpu->work_list, node);
329d148d90eSSergey Fedorov         qemu_mutex_unlock(&cpu->work_mutex);
33053f5ed95SPaolo Bonzini         if (wi->exclusive) {
33153f5ed95SPaolo Bonzini             /* Running work items outside the BQL avoids the following deadlock:
33253f5ed95SPaolo Bonzini              * 1) start_exclusive() is called with the BQL taken while another
33353f5ed95SPaolo Bonzini              * CPU is running; 2) cpu_exec in the other CPU tries to takes the
33453f5ed95SPaolo Bonzini              * BQL, so it goes to sleep; start_exclusive() is sleeping too, so
33553f5ed95SPaolo Bonzini              * neither CPU can proceed.
33653f5ed95SPaolo Bonzini              */
33753f5ed95SPaolo Bonzini             qemu_mutex_unlock_iothread();
33853f5ed95SPaolo Bonzini             start_exclusive();
339d148d90eSSergey Fedorov             wi->func(cpu, wi->data);
34053f5ed95SPaolo Bonzini             end_exclusive();
34153f5ed95SPaolo Bonzini             qemu_mutex_lock_iothread();
34253f5ed95SPaolo Bonzini         } else {
34353f5ed95SPaolo Bonzini             wi->func(cpu, wi->data);
34453f5ed95SPaolo Bonzini         }
345d148d90eSSergey Fedorov         qemu_mutex_lock(&cpu->work_mutex);
346d148d90eSSergey Fedorov         if (wi->free) {
347d148d90eSSergey Fedorov             g_free(wi);
348d148d90eSSergey Fedorov         } else {
349*d73415a3SStefan Hajnoczi             qatomic_mb_set(&wi->done, true);
350d148d90eSSergey Fedorov         }
351d148d90eSSergey Fedorov     }
352d148d90eSSergey Fedorov     qemu_mutex_unlock(&cpu->work_mutex);
353d148d90eSSergey Fedorov     qemu_cond_broadcast(&qemu_work_cond);
354d148d90eSSergey Fedorov }
355