1267f685bSPaolo Bonzini /* 2267f685bSPaolo Bonzini * CPU thread main loop - common bits for user and system mode emulation 3267f685bSPaolo Bonzini * 4267f685bSPaolo Bonzini * Copyright (c) 2003-2005 Fabrice Bellard 5267f685bSPaolo Bonzini * 6267f685bSPaolo Bonzini * This library is free software; you can redistribute it and/or 7267f685bSPaolo Bonzini * modify it under the terms of the GNU Lesser General Public 8267f685bSPaolo Bonzini * License as published by the Free Software Foundation; either 9267f685bSPaolo Bonzini * version 2 of the License, or (at your option) any later version. 10267f685bSPaolo Bonzini * 11267f685bSPaolo Bonzini * This library is distributed in the hope that it will be useful, 12267f685bSPaolo Bonzini * but WITHOUT ANY WARRANTY; without even the implied warranty of 13267f685bSPaolo Bonzini * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 14267f685bSPaolo Bonzini * Lesser General Public License for more details. 15267f685bSPaolo Bonzini * 16267f685bSPaolo Bonzini * You should have received a copy of the GNU Lesser General Public 17267f685bSPaolo Bonzini * License along with this library; if not, see <http://www.gnu.org/licenses/>. 18267f685bSPaolo Bonzini */ 19267f685bSPaolo Bonzini 20267f685bSPaolo Bonzini #include "qemu/osdep.h" 21267f685bSPaolo Bonzini #include "exec/cpu-common.h" 22267f685bSPaolo Bonzini #include "qom/cpu.h" 23267f685bSPaolo Bonzini #include "sysemu/cpus.h" 24267f685bSPaolo Bonzini 25267f685bSPaolo Bonzini static QemuMutex qemu_cpu_list_lock; 26*ab129972SPaolo Bonzini static QemuCond exclusive_cond; 27*ab129972SPaolo Bonzini static QemuCond exclusive_resume; 28d148d90eSSergey Fedorov static QemuCond qemu_work_cond; 29267f685bSPaolo Bonzini 30*ab129972SPaolo Bonzini static int pending_cpus; 31*ab129972SPaolo Bonzini 32267f685bSPaolo Bonzini void qemu_init_cpu_list(void) 33267f685bSPaolo Bonzini { 34*ab129972SPaolo Bonzini /* This is needed because qemu_init_cpu_list is also called by the 35*ab129972SPaolo Bonzini * child process in a fork. */ 36*ab129972SPaolo Bonzini pending_cpus = 0; 37*ab129972SPaolo Bonzini 38267f685bSPaolo Bonzini qemu_mutex_init(&qemu_cpu_list_lock); 39*ab129972SPaolo Bonzini qemu_cond_init(&exclusive_cond); 40*ab129972SPaolo Bonzini qemu_cond_init(&exclusive_resume); 41d148d90eSSergey Fedorov qemu_cond_init(&qemu_work_cond); 42267f685bSPaolo Bonzini } 43267f685bSPaolo Bonzini 44267f685bSPaolo Bonzini void cpu_list_lock(void) 45267f685bSPaolo Bonzini { 46267f685bSPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 47267f685bSPaolo Bonzini } 48267f685bSPaolo Bonzini 49267f685bSPaolo Bonzini void cpu_list_unlock(void) 50267f685bSPaolo Bonzini { 51267f685bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 52267f685bSPaolo Bonzini } 53267f685bSPaolo Bonzini 54267f685bSPaolo Bonzini static bool cpu_index_auto_assigned; 55267f685bSPaolo Bonzini 56267f685bSPaolo Bonzini static int cpu_get_free_index(void) 57267f685bSPaolo Bonzini { 58267f685bSPaolo Bonzini CPUState *some_cpu; 59267f685bSPaolo Bonzini int cpu_index = 0; 60267f685bSPaolo Bonzini 61267f685bSPaolo Bonzini cpu_index_auto_assigned = true; 62267f685bSPaolo Bonzini CPU_FOREACH(some_cpu) { 63267f685bSPaolo Bonzini cpu_index++; 64267f685bSPaolo Bonzini } 65267f685bSPaolo Bonzini return cpu_index; 66267f685bSPaolo Bonzini } 67267f685bSPaolo Bonzini 68*ab129972SPaolo Bonzini static void finish_safe_work(CPUState *cpu) 69*ab129972SPaolo Bonzini { 70*ab129972SPaolo Bonzini cpu_exec_start(cpu); 71*ab129972SPaolo Bonzini cpu_exec_end(cpu); 72*ab129972SPaolo Bonzini } 73*ab129972SPaolo Bonzini 74267f685bSPaolo Bonzini void cpu_list_add(CPUState *cpu) 75267f685bSPaolo Bonzini { 76267f685bSPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 77267f685bSPaolo Bonzini if (cpu->cpu_index == UNASSIGNED_CPU_INDEX) { 78267f685bSPaolo Bonzini cpu->cpu_index = cpu_get_free_index(); 79267f685bSPaolo Bonzini assert(cpu->cpu_index != UNASSIGNED_CPU_INDEX); 80267f685bSPaolo Bonzini } else { 81267f685bSPaolo Bonzini assert(!cpu_index_auto_assigned); 82267f685bSPaolo Bonzini } 83267f685bSPaolo Bonzini QTAILQ_INSERT_TAIL(&cpus, cpu, node); 84267f685bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 85*ab129972SPaolo Bonzini 86*ab129972SPaolo Bonzini finish_safe_work(cpu); 87267f685bSPaolo Bonzini } 88267f685bSPaolo Bonzini 89267f685bSPaolo Bonzini void cpu_list_remove(CPUState *cpu) 90267f685bSPaolo Bonzini { 91267f685bSPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 92267f685bSPaolo Bonzini if (!QTAILQ_IN_USE(cpu, node)) { 93267f685bSPaolo Bonzini /* there is nothing to undo since cpu_exec_init() hasn't been called */ 94267f685bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 95267f685bSPaolo Bonzini return; 96267f685bSPaolo Bonzini } 97267f685bSPaolo Bonzini 98267f685bSPaolo Bonzini assert(!(cpu_index_auto_assigned && cpu != QTAILQ_LAST(&cpus, CPUTailQ))); 99267f685bSPaolo Bonzini 100267f685bSPaolo Bonzini QTAILQ_REMOVE(&cpus, cpu, node); 101267f685bSPaolo Bonzini cpu->cpu_index = UNASSIGNED_CPU_INDEX; 102267f685bSPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 103267f685bSPaolo Bonzini } 104d148d90eSSergey Fedorov 105d148d90eSSergey Fedorov struct qemu_work_item { 106d148d90eSSergey Fedorov struct qemu_work_item *next; 107d148d90eSSergey Fedorov run_on_cpu_func func; 108d148d90eSSergey Fedorov void *data; 1090e55539cSPaolo Bonzini bool free, done; 110d148d90eSSergey Fedorov }; 111d148d90eSSergey Fedorov 112d148d90eSSergey Fedorov static void queue_work_on_cpu(CPUState *cpu, struct qemu_work_item *wi) 113d148d90eSSergey Fedorov { 114d148d90eSSergey Fedorov qemu_mutex_lock(&cpu->work_mutex); 115d148d90eSSergey Fedorov if (cpu->queued_work_first == NULL) { 116d148d90eSSergey Fedorov cpu->queued_work_first = wi; 117d148d90eSSergey Fedorov } else { 118d148d90eSSergey Fedorov cpu->queued_work_last->next = wi; 119d148d90eSSergey Fedorov } 120d148d90eSSergey Fedorov cpu->queued_work_last = wi; 121d148d90eSSergey Fedorov wi->next = NULL; 122d148d90eSSergey Fedorov wi->done = false; 123d148d90eSSergey Fedorov qemu_mutex_unlock(&cpu->work_mutex); 124d148d90eSSergey Fedorov 125d148d90eSSergey Fedorov qemu_cpu_kick(cpu); 126d148d90eSSergey Fedorov } 127d148d90eSSergey Fedorov 128d148d90eSSergey Fedorov void do_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data, 129d148d90eSSergey Fedorov QemuMutex *mutex) 130d148d90eSSergey Fedorov { 131d148d90eSSergey Fedorov struct qemu_work_item wi; 132d148d90eSSergey Fedorov 133d148d90eSSergey Fedorov if (qemu_cpu_is_self(cpu)) { 134d148d90eSSergey Fedorov func(cpu, data); 135d148d90eSSergey Fedorov return; 136d148d90eSSergey Fedorov } 137d148d90eSSergey Fedorov 138d148d90eSSergey Fedorov wi.func = func; 139d148d90eSSergey Fedorov wi.data = data; 1400e55539cSPaolo Bonzini wi.done = false; 141d148d90eSSergey Fedorov wi.free = false; 142d148d90eSSergey Fedorov 143d148d90eSSergey Fedorov queue_work_on_cpu(cpu, &wi); 144d148d90eSSergey Fedorov while (!atomic_mb_read(&wi.done)) { 145d148d90eSSergey Fedorov CPUState *self_cpu = current_cpu; 146d148d90eSSergey Fedorov 147d148d90eSSergey Fedorov qemu_cond_wait(&qemu_work_cond, mutex); 148d148d90eSSergey Fedorov current_cpu = self_cpu; 149d148d90eSSergey Fedorov } 150d148d90eSSergey Fedorov } 151d148d90eSSergey Fedorov 152d148d90eSSergey Fedorov void async_run_on_cpu(CPUState *cpu, run_on_cpu_func func, void *data) 153d148d90eSSergey Fedorov { 154d148d90eSSergey Fedorov struct qemu_work_item *wi; 155d148d90eSSergey Fedorov 156d148d90eSSergey Fedorov if (qemu_cpu_is_self(cpu)) { 157d148d90eSSergey Fedorov func(cpu, data); 158d148d90eSSergey Fedorov return; 159d148d90eSSergey Fedorov } 160d148d90eSSergey Fedorov 161d148d90eSSergey Fedorov wi = g_malloc0(sizeof(struct qemu_work_item)); 162d148d90eSSergey Fedorov wi->func = func; 163d148d90eSSergey Fedorov wi->data = data; 164d148d90eSSergey Fedorov wi->free = true; 165d148d90eSSergey Fedorov 166d148d90eSSergey Fedorov queue_work_on_cpu(cpu, wi); 167d148d90eSSergey Fedorov } 168d148d90eSSergey Fedorov 169*ab129972SPaolo Bonzini /* Wait for pending exclusive operations to complete. The CPU list lock 170*ab129972SPaolo Bonzini must be held. */ 171*ab129972SPaolo Bonzini static inline void exclusive_idle(void) 172*ab129972SPaolo Bonzini { 173*ab129972SPaolo Bonzini while (pending_cpus) { 174*ab129972SPaolo Bonzini qemu_cond_wait(&exclusive_resume, &qemu_cpu_list_lock); 175*ab129972SPaolo Bonzini } 176*ab129972SPaolo Bonzini } 177*ab129972SPaolo Bonzini 178*ab129972SPaolo Bonzini /* Start an exclusive operation. 179*ab129972SPaolo Bonzini Must only be called from outside cpu_exec, takes 180*ab129972SPaolo Bonzini qemu_cpu_list_lock. */ 181*ab129972SPaolo Bonzini void start_exclusive(void) 182*ab129972SPaolo Bonzini { 183*ab129972SPaolo Bonzini CPUState *other_cpu; 184*ab129972SPaolo Bonzini 185*ab129972SPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 186*ab129972SPaolo Bonzini exclusive_idle(); 187*ab129972SPaolo Bonzini 188*ab129972SPaolo Bonzini /* Make all other cpus stop executing. */ 189*ab129972SPaolo Bonzini pending_cpus = 1; 190*ab129972SPaolo Bonzini CPU_FOREACH(other_cpu) { 191*ab129972SPaolo Bonzini if (other_cpu->running) { 192*ab129972SPaolo Bonzini pending_cpus++; 193*ab129972SPaolo Bonzini qemu_cpu_kick(other_cpu); 194*ab129972SPaolo Bonzini } 195*ab129972SPaolo Bonzini } 196*ab129972SPaolo Bonzini while (pending_cpus > 1) { 197*ab129972SPaolo Bonzini qemu_cond_wait(&exclusive_cond, &qemu_cpu_list_lock); 198*ab129972SPaolo Bonzini } 199*ab129972SPaolo Bonzini } 200*ab129972SPaolo Bonzini 201*ab129972SPaolo Bonzini /* Finish an exclusive operation. Releases qemu_cpu_list_lock. */ 202*ab129972SPaolo Bonzini void end_exclusive(void) 203*ab129972SPaolo Bonzini { 204*ab129972SPaolo Bonzini pending_cpus = 0; 205*ab129972SPaolo Bonzini qemu_cond_broadcast(&exclusive_resume); 206*ab129972SPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 207*ab129972SPaolo Bonzini } 208*ab129972SPaolo Bonzini 209*ab129972SPaolo Bonzini /* Wait for exclusive ops to finish, and begin cpu execution. */ 210*ab129972SPaolo Bonzini void cpu_exec_start(CPUState *cpu) 211*ab129972SPaolo Bonzini { 212*ab129972SPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 213*ab129972SPaolo Bonzini exclusive_idle(); 214*ab129972SPaolo Bonzini cpu->running = true; 215*ab129972SPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 216*ab129972SPaolo Bonzini } 217*ab129972SPaolo Bonzini 218*ab129972SPaolo Bonzini /* Mark cpu as not executing, and release pending exclusive ops. */ 219*ab129972SPaolo Bonzini void cpu_exec_end(CPUState *cpu) 220*ab129972SPaolo Bonzini { 221*ab129972SPaolo Bonzini qemu_mutex_lock(&qemu_cpu_list_lock); 222*ab129972SPaolo Bonzini cpu->running = false; 223*ab129972SPaolo Bonzini if (pending_cpus > 1) { 224*ab129972SPaolo Bonzini pending_cpus--; 225*ab129972SPaolo Bonzini if (pending_cpus == 1) { 226*ab129972SPaolo Bonzini qemu_cond_signal(&exclusive_cond); 227*ab129972SPaolo Bonzini } 228*ab129972SPaolo Bonzini } 229*ab129972SPaolo Bonzini exclusive_idle(); 230*ab129972SPaolo Bonzini qemu_mutex_unlock(&qemu_cpu_list_lock); 231*ab129972SPaolo Bonzini } 232*ab129972SPaolo Bonzini 233d148d90eSSergey Fedorov void process_queued_cpu_work(CPUState *cpu) 234d148d90eSSergey Fedorov { 235d148d90eSSergey Fedorov struct qemu_work_item *wi; 236d148d90eSSergey Fedorov 237d148d90eSSergey Fedorov if (cpu->queued_work_first == NULL) { 238d148d90eSSergey Fedorov return; 239d148d90eSSergey Fedorov } 240d148d90eSSergey Fedorov 241d148d90eSSergey Fedorov qemu_mutex_lock(&cpu->work_mutex); 242d148d90eSSergey Fedorov while (cpu->queued_work_first != NULL) { 243d148d90eSSergey Fedorov wi = cpu->queued_work_first; 244d148d90eSSergey Fedorov cpu->queued_work_first = wi->next; 245d148d90eSSergey Fedorov if (!cpu->queued_work_first) { 246d148d90eSSergey Fedorov cpu->queued_work_last = NULL; 247d148d90eSSergey Fedorov } 248d148d90eSSergey Fedorov qemu_mutex_unlock(&cpu->work_mutex); 249d148d90eSSergey Fedorov wi->func(cpu, wi->data); 250d148d90eSSergey Fedorov qemu_mutex_lock(&cpu->work_mutex); 251d148d90eSSergey Fedorov if (wi->free) { 252d148d90eSSergey Fedorov g_free(wi); 253d148d90eSSergey Fedorov } else { 254d148d90eSSergey Fedorov atomic_mb_set(&wi->done, true); 255d148d90eSSergey Fedorov } 256d148d90eSSergey Fedorov } 257d148d90eSSergey Fedorov qemu_mutex_unlock(&cpu->work_mutex); 258d148d90eSSergey Fedorov qemu_cond_broadcast(&qemu_work_cond); 259d148d90eSSergey Fedorov } 260