1267f685bSPaolo Bonzini /* 2267f685bSPaolo Bonzini * CPU thread main loop - common bits for user and system mode emulation 3267f685bSPaolo Bonzini * 4267f685bSPaolo Bonzini * Copyright (c) 2003-2005 Fabrice Bellard 5267f685bSPaolo Bonzini * 6267f685bSPaolo Bonzini * This library is free software; you can redistribute it and/or 7267f685bSPaolo Bonzini * modify it under the terms of the GNU Lesser General Public 8267f685bSPaolo Bonzini * License as published by the Free Software Foundation; either 9267f685bSPaolo Bonzini * version 2 of the License, or (at your option) any later version. 10267f685bSPaolo Bonzini * 11267f685bSPaolo Bonzini * This library is distributed in the hope that it will be useful, 12267f685bSPaolo Bonzini * but WITHOUT ANY WARRANTY; without even the implied warranty of 13267f685bSPaolo Bonzini * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14267f685bSPaolo Bonzini * Lesser General Public License for more details. 15267f685bSPaolo Bonzini * 16267f685bSPaolo Bonzini * You should have received a copy of the GNU Lesser General Public 17267f685bSPaolo Bonzini * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18267f685bSPaolo Bonzini */ 19267f685bSPaolo Bonzini 20267f685bSPaolo Bonzini #include "qemu/osdep.h" 21*53f5ed95SPaolo Bonzini #include "qemu/main-loop.h" 22267f685bSPaolo Bonzini #include "exec/cpu-common.h" 23267f685bSPaolo Bonzini #include "qom/cpu.h" 24267f685bSPaolo Bonzini #include "sysemu/cpus.h" 25267f685bSPaolo Bonzini 26267f685bSPaolo Bonzini static QemuMutex qemu_cpu_list_lock; 27ab129972SPaolo Bonzini static QemuCond exclusive_cond; 28ab129972SPaolo Bonzini static QemuCond exclusive_resume; 29d148d90eSSergey Fedorov static QemuCond qemu_work_cond; 30267f685bSPaolo Bonzini 31ab129972SPaolo Bonzini static int pending_cpus; 32ab129972SPaolo Bonzini 33267f685bSPaolo Bonzini void qemu_init_cpu_list(void) 34267f685bSPaolo Bonzini { 35ab129972SPaolo Bonzini /* This is needed because qemu_init_cpu_list is also called by the 36ab129972SPaolo Bonzini * child process in a fork. */ 37ab129972SPaolo Bonzini pending_cpus = 0; 38ab129972SPaolo Bonzini 39267f685bSPaolo Bonzini qemu_mutex_init(&qemu_cpu_list_lock); 40ab129972SPaolo Bonzini qemu_cond_init(&exclusive_cond); 41ab129972SPaolo Bonzini qemu_cond_init(&exclusive_resume); 42d148d90eSSergey Fedorov qemu_cond_init(&qemu_work_cond); 43267f685bSPaolo Bonzini } 44267f685bSPaolo Bonzini 45267f685bSPaolo Bonzini void cpu_list_lock(void) 46267f685bSPaolo Bonzini { 47267f685bSPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 48267f685bSPaolo Bonzini } 49267f685bSPaolo Bonzini 50267f685bSPaolo Bonzini void cpu_list_unlock(void) 51267f685bSPaolo Bonzini { 52267f685bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 53267f685bSPaolo Bonzini } 54267f685bSPaolo Bonzini 55267f685bSPaolo Bonzini static bool cpu_index_auto_assigned; 56267f685bSPaolo Bonzini 57267f685bSPaolo Bonzini static int cpu_get_free_index(void) 58267f685bSPaolo Bonzini { 59267f685bSPaolo Bonzini CPUState *some_cpu; 60267f685bSPaolo Bonzini int cpu_index = 0; 61267f685bSPaolo Bonzini 62267f685bSPaolo Bonzini cpu_index_auto_assigned = true; 63267f685bSPaolo Bonzini CPU_FOREACH(some_cpu) { 64267f685bSPaolo Bonzini cpu_index++; 65267f685bSPaolo Bonzini } 66267f685bSPaolo Bonzini return cpu_index; 67267f685bSPaolo Bonzini } 68267f685bSPaolo Bonzini 69ab129972SPaolo Bonzini static void finish_safe_work(CPUState *cpu) 70ab129972SPaolo Bonzini { 71ab129972SPaolo Bonzini cpu_exec_start(cpu); 72ab129972SPaolo Bonzini cpu_exec_end(cpu); 73ab129972SPaolo Bonzini } 74ab129972SPaolo Bonzini 75267f685bSPaolo Bonzini void cpu_list_add(CPUState *cpu) 76267f685bSPaolo Bonzini { 77267f685bSPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 78267f685bSPaolo Bonzini if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { 79267f685bSPaolo Bonzini cpu->cpu_index = cpu_get_free_index(); 80267f685bSPaolo Bonzini assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); 81267f685bSPaolo Bonzini } else { 82267f685bSPaolo Bonzini assert(!cpu_index_auto_assigned); 83267f685bSPaolo Bonzini } 84267f685bSPaolo Bonzini QTAILQ_INSERT_TAIL(&cpus, cpu, node); 85267f685bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 86ab129972SPaolo Bonzini 87ab129972SPaolo Bonzini finish_safe_work(cpu); 88267f685bSPaolo Bonzini } 89267f685bSPaolo Bonzini 90267f685bSPaolo Bonzini void cpu_list_remove(CPUState *cpu) 91267f685bSPaolo Bonzini { 92267f685bSPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 93267f685bSPaolo Bonzini if (!QTAILQ_IN_USE(cpu, node)) { 94267f685bSPaolo Bonzini /* there is nothing to undo since cpu_exec_init() hasn't been called */ 95267f685bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 96267f685bSPaolo Bonzini return; 97267f685bSPaolo Bonzini } 98267f685bSPaolo Bonzini 99267f685bSPaolo Bonzini assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ))); 100267f685bSPaolo Bonzini 101267f685bSPaolo Bonzini QTAILQ_REMOVE(&cpus, cpu, node); 102267f685bSPaolo Bonzini cpu->cpu_index = UNASSIGNED_CPU_INDEX; 103267f685bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 104267f685bSPaolo Bonzini } 105d148d90eSSergey Fedorov 106d148d90eSSergey Fedorov struct qemu_work_item { 107d148d90eSSergey Fedorov struct qemu_work_item *next; 108d148d90eSSergey Fedorov run_on_cpu_func func; 109d148d90eSSergey Fedorov void *data; 110*53f5ed95SPaolo Bonzini bool free, exclusive, done; 111d148d90eSSergey Fedorov }; 112d148d90eSSergey Fedorov 113d148d90eSSergey Fedorov static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) 114d148d90eSSergey Fedorov { 115d148d90eSSergey Fedorov qemu_mutex_lock(&cpu->work_mutex); 116d148d90eSSergey Fedorov if (cpu->queued_work_first == NULL) { 117d148d90eSSergey Fedorov cpu->queued_work_first = wi; 118d148d90eSSergey Fedorov } else { 119d148d90eSSergey Fedorov cpu->queued_work_last->next = wi; 120d148d90eSSergey Fedorov } 121d148d90eSSergey Fedorov cpu->queued_work_last = wi; 122d148d90eSSergey Fedorov wi->next = NULL; 123d148d90eSSergey Fedorov wi->done = false; 124d148d90eSSergey Fedorov qemu_mutex_unlock(&cpu->work_mutex); 125d148d90eSSergey Fedorov 126d148d90eSSergey Fedorov qemu_cpu_kick(cpu); 127d148d90eSSergey Fedorov } 128d148d90eSSergey Fedorov 129d148d90eSSergey Fedorov void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data, 130d148d90eSSergey Fedorov QemuMutex *mutex) 131d148d90eSSergey Fedorov { 132d148d90eSSergey Fedorov struct qemu_work_item wi; 133d148d90eSSergey Fedorov 134d148d90eSSergey Fedorov if (qemu_cpu_is_self(cpu)) { 135d148d90eSSergey Fedorov func(cpu, data); 136d148d90eSSergey Fedorov return; 137d148d90eSSergey Fedorov } 138d148d90eSSergey Fedorov 139d148d90eSSergey Fedorov wi.func = func; 140d148d90eSSergey Fedorov wi.data = data; 1410e55539cSPaolo Bonzini wi.done = false; 142d148d90eSSergey Fedorov wi.free = false; 143*53f5ed95SPaolo Bonzini wi.exclusive = false; 144d148d90eSSergey Fedorov 145d148d90eSSergey Fedorov queue_work_on_cpu(cpu, &wi); 146d148d90eSSergey Fedorov while (!atomic_mb_read(&wi.done)) { 147d148d90eSSergey Fedorov CPUState *self_cpu = current_cpu; 148d148d90eSSergey Fedorov 149d148d90eSSergey Fedorov qemu_cond_wait(&qemu_work_cond, mutex); 150d148d90eSSergey Fedorov current_cpu = self_cpu; 151d148d90eSSergey Fedorov } 152d148d90eSSergey Fedorov } 153d148d90eSSergey Fedorov 154d148d90eSSergey Fedorov void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) 155d148d90eSSergey Fedorov { 156d148d90eSSergey Fedorov struct qemu_work_item *wi; 157d148d90eSSergey Fedorov 158d148d90eSSergey Fedorov wi = g_malloc0(sizeof(struct qemu_work_item)); 159d148d90eSSergey Fedorov wi->func = func; 160d148d90eSSergey Fedorov wi->data = data; 161d148d90eSSergey Fedorov wi->free = true; 162d148d90eSSergey Fedorov 163d148d90eSSergey Fedorov queue_work_on_cpu(cpu, wi); 164d148d90eSSergey Fedorov } 165d148d90eSSergey Fedorov 166ab129972SPaolo Bonzini /* Wait for pending exclusive operations to complete. The CPU list lock 167ab129972SPaolo Bonzini must be held. */ 168ab129972SPaolo Bonzini static inline void exclusive_idle(void) 169ab129972SPaolo Bonzini { 170ab129972SPaolo Bonzini while (pending_cpus) { 171ab129972SPaolo Bonzini qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock); 172ab129972SPaolo Bonzini } 173ab129972SPaolo Bonzini } 174ab129972SPaolo Bonzini 175ab129972SPaolo Bonzini /* Start an exclusive operation. 176758e1b2bSPaolo Bonzini Must only be called from outside cpu_exec. */ 177ab129972SPaolo Bonzini void start_exclusive(void) 178ab129972SPaolo Bonzini { 179ab129972SPaolo Bonzini CPUState *other_cpu; 180ab129972SPaolo Bonzini 181ab129972SPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 182ab129972SPaolo Bonzini exclusive_idle(); 183ab129972SPaolo Bonzini 184ab129972SPaolo Bonzini /* Make all other cpus stop executing. */ 185ab129972SPaolo Bonzini pending_cpus = 1; 186ab129972SPaolo Bonzini CPU_FOREACH(other_cpu) { 187ab129972SPaolo Bonzini if (other_cpu->running) { 188ab129972SPaolo Bonzini pending_cpus++; 189ab129972SPaolo Bonzini qemu_cpu_kick(other_cpu); 190ab129972SPaolo Bonzini } 191ab129972SPaolo Bonzini } 192ab129972SPaolo Bonzini while (pending_cpus > 1) { 193ab129972SPaolo Bonzini qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); 194ab129972SPaolo Bonzini } 195758e1b2bSPaolo Bonzini 196758e1b2bSPaolo Bonzini /* Can release mutex, no one will enter another exclusive 197758e1b2bSPaolo Bonzini * section until end_exclusive resets pending_cpus to 0. 198758e1b2bSPaolo Bonzini */ 199758e1b2bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 200ab129972SPaolo Bonzini } 201ab129972SPaolo Bonzini 202758e1b2bSPaolo Bonzini /* Finish an exclusive operation. */ 203ab129972SPaolo Bonzini void end_exclusive(void) 204ab129972SPaolo Bonzini { 205758e1b2bSPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 206ab129972SPaolo Bonzini pending_cpus = 0; 207ab129972SPaolo Bonzini qemu_cond_broadcast(&exclusive_resume); 208ab129972SPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 209ab129972SPaolo Bonzini } 210ab129972SPaolo Bonzini 211ab129972SPaolo Bonzini /* Wait for exclusive ops to finish, and begin cpu execution. */ 212ab129972SPaolo Bonzini void cpu_exec_start(CPUState *cpu) 213ab129972SPaolo Bonzini { 214ab129972SPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 215ab129972SPaolo Bonzini exclusive_idle(); 216ab129972SPaolo Bonzini cpu->running = true; 217ab129972SPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 218ab129972SPaolo Bonzini } 219ab129972SPaolo Bonzini 220ab129972SPaolo Bonzini /* Mark cpu as not executing, and release pending exclusive ops. */ 221ab129972SPaolo Bonzini void cpu_exec_end(CPUState *cpu) 222ab129972SPaolo Bonzini { 223ab129972SPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 224ab129972SPaolo Bonzini cpu->running = false; 225ab129972SPaolo Bonzini if (pending_cpus > 1) { 226ab129972SPaolo Bonzini pending_cpus--; 227ab129972SPaolo Bonzini if (pending_cpus == 1) { 228ab129972SPaolo Bonzini qemu_cond_signal(&exclusive_cond); 229ab129972SPaolo Bonzini } 230ab129972SPaolo Bonzini } 231ab129972SPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 232ab129972SPaolo Bonzini } 233ab129972SPaolo Bonzini 234*53f5ed95SPaolo Bonzini void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) 235*53f5ed95SPaolo Bonzini { 236*53f5ed95SPaolo Bonzini struct qemu_work_item *wi; 237*53f5ed95SPaolo Bonzini 238*53f5ed95SPaolo Bonzini wi = g_malloc0(sizeof(struct qemu_work_item)); 239*53f5ed95SPaolo Bonzini wi->func = func; 240*53f5ed95SPaolo Bonzini wi->data = data; 241*53f5ed95SPaolo Bonzini wi->free = true; 242*53f5ed95SPaolo Bonzini wi->exclusive = true; 243*53f5ed95SPaolo Bonzini 244*53f5ed95SPaolo Bonzini queue_work_on_cpu(cpu, wi); 245*53f5ed95SPaolo Bonzini } 246*53f5ed95SPaolo Bonzini 247d148d90eSSergey Fedorov void process_queued_cpu_work(CPUState *cpu) 248d148d90eSSergey Fedorov { 249d148d90eSSergey Fedorov struct qemu_work_item *wi; 250d148d90eSSergey Fedorov 251d148d90eSSergey Fedorov if (cpu->queued_work_first == NULL) { 252d148d90eSSergey Fedorov return; 253d148d90eSSergey Fedorov } 254d148d90eSSergey Fedorov 255d148d90eSSergey Fedorov qemu_mutex_lock(&cpu->work_mutex); 256d148d90eSSergey Fedorov while (cpu->queued_work_first != NULL) { 257d148d90eSSergey Fedorov wi = cpu->queued_work_first; 258d148d90eSSergey Fedorov cpu->queued_work_first = wi->next; 259d148d90eSSergey Fedorov if (!cpu->queued_work_first) { 260d148d90eSSergey Fedorov cpu->queued_work_last = NULL; 261d148d90eSSergey Fedorov } 262d148d90eSSergey Fedorov qemu_mutex_unlock(&cpu->work_mutex); 263*53f5ed95SPaolo Bonzini if (wi->exclusive) { 264*53f5ed95SPaolo Bonzini /* Running work items outside the BQL avoids the following deadlock: 265*53f5ed95SPaolo Bonzini * 1) start_exclusive() is called with the BQL taken while another 266*53f5ed95SPaolo Bonzini * CPU is running; 2) cpu_exec in the other CPU tries to takes the 267*53f5ed95SPaolo Bonzini * BQL, so it goes to sleep; start_exclusive() is sleeping too, so 268*53f5ed95SPaolo Bonzini * neither CPU can proceed. 269*53f5ed95SPaolo Bonzini */ 270*53f5ed95SPaolo Bonzini qemu_mutex_unlock_iothread(); 271*53f5ed95SPaolo Bonzini start_exclusive(); 272d148d90eSSergey Fedorov wi->func(cpu, wi->data); 273*53f5ed95SPaolo Bonzini end_exclusive(); 274*53f5ed95SPaolo Bonzini qemu_mutex_lock_iothread(); 275*53f5ed95SPaolo Bonzini } else { 276*53f5ed95SPaolo Bonzini wi->func(cpu, wi->data); 277*53f5ed95SPaolo Bonzini } 278d148d90eSSergey Fedorov qemu_mutex_lock(&cpu->work_mutex); 279d148d90eSSergey Fedorov if (wi->free) { 280d148d90eSSergey Fedorov g_free(wi); 281d148d90eSSergey Fedorov } else { 282d148d90eSSergey Fedorov atomic_mb_set(&wi->done, true); 283d148d90eSSergey Fedorov } 284d148d90eSSergey Fedorov } 285d148d90eSSergey Fedorov qemu_mutex_unlock(&cpu->work_mutex); 286d148d90eSSergey Fedorov qemu_cond_broadcast(&qemu_work_cond); 287d148d90eSSergey Fedorov } 288