/qemu/io/ |
H A D | task.c | 2 * QEMU I/O task 22 #include "io/task.h" 56 QIOTask *task; in qio_task_new() local 58 task = g_new0(QIOTask, 1); in qio_task_new() 60 task->source = source; in qio_task_new() 62 task->func = func; in qio_task_new() 63 task->opaque = opaque; in qio_task_new() 64 task->destroy = destroy; in qio_task_new() 65 qemu_mutex_init(&task->thread_lock); in qio_task_new() 66 qemu_cond_init(&task->thread_cond); in qio_task_new() [all …]
|
H A D | trace-events | 3 # task.c 4 qio_task_new(void *task, void *source, void *func, void *opaque) "Task new task=%p source=%p func=%… 5 qio_task_complete(void *task) "Task complete task=%p" 6 qio_task_thread_start(void *task, void *worker, void *opaque) "Task thread start task=%p worker=%p … 7 qio_task_thread_run(void *task) "Task thread run task=%p" 8 qio_task_thread_exit(void *task) "Task thread exit task=%p" 9 qio_task_thread_result(void *task) "Task thread result task=%p" 10 qio_task_thread_source_attach(void *task, void *source) "Task thread source attach task=%p source=%… 11 qio_task_thread_source_cancel(void *task, void *source) "Task thread source cancel task=%p source=%…
|
H A D | channel-tls.c | 151 QIOTask *task; member 161 QIOTask *task, in qio_channel_tls_handshake_task() argument 171 qio_task_set_error(task, err); in qio_channel_tls_handshake_task() 172 qio_task_complete(task); in qio_channel_tls_handshake_task() 181 qio_task_set_error(task, err); in qio_channel_tls_handshake_task() 185 qio_task_complete(task); in qio_channel_tls_handshake_task() 190 data->task = task; in qio_channel_tls_handshake_task() 220 QIOTask *task = data->task; in qio_channel_tls_handshake_io() local 223 qio_task_get_source(task)); in qio_channel_tls_handshake_io() 227 qio_channel_tls_handshake_task(tioc, task, context); in qio_channel_tls_handshake_io() [all …]
|
H A D | dns-resolver.c | 192 static void qio_dns_resolver_lookup_worker(QIOTask *task, in qio_dns_resolver_lookup_worker() argument 195 QIODNSResolver *resolver = QIO_DNS_RESOLVER(qio_task_get_source(task)); in qio_dns_resolver_lookup_worker() 205 qio_task_set_error(task, err); in qio_dns_resolver_lookup_worker() 207 qio_task_set_result_pointer(task, opaque, NULL); in qio_dns_resolver_lookup_worker() 220 QIOTask *task; in qio_dns_resolver_lookup_async() local 226 task = qio_task_new(OBJECT(resolver), func, opaque, notify); in qio_dns_resolver_lookup_async() 228 qio_task_run_in_thread(task, in qio_dns_resolver_lookup_async() 237 QIOTask *task, in qio_dns_resolver_lookup_result() argument 242 qio_task_get_result_pointer(task); in qio_dns_resolver_lookup_result()
|
H A D | channel-socket.c | 195 static void qio_channel_socket_connect_worker(QIOTask *task, in qio_channel_socket_connect_worker() argument 198 QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task)); in qio_channel_socket_connect_worker() 204 qio_task_set_error(task, err); in qio_channel_socket_connect_worker() 215 QIOTask *task = qio_task_new( in qio_channel_socket_connect_async() local 224 qio_task_run_in_thread(task, in qio_channel_socket_connect_async() 270 static void qio_channel_socket_listen_worker(QIOTask *task, in qio_channel_socket_listen_worker() argument 273 QIOChannelSocket *ioc = QIO_CHANNEL_SOCKET(qio_task_get_source(task)); in qio_channel_socket_listen_worker() 279 qio_task_set_error(task, err); in qio_channel_socket_listen_worker() 291 QIOTask *task = qio_task_new( in qio_channel_socket_listen_async() local 301 qio_task_run_in_thread(task, in qio_channel_socket_listen_async() [all …]
|
H A D | channel-websock.c | 533 QIOTask *task = user_data; in qio_channel_websock_handshake_send() local 535 qio_task_get_source(task)); in qio_channel_websock_handshake_send() 546 qio_task_set_error(task, err); in qio_channel_websock_handshake_send() 547 qio_task_complete(task); in qio_channel_websock_handshake_send() 556 qio_task_set_error(task, wioc->io_err); in qio_channel_websock_handshake_send() 558 qio_task_complete(task); in qio_channel_websock_handshake_send() 561 qio_task_complete(task); in qio_channel_websock_handshake_send() 573 QIOTask *task = user_data; in qio_channel_websock_handshake_io() local 575 qio_task_get_source(task)); in qio_channel_websock_handshake_io() 587 qio_task_set_error(task, err); in qio_channel_websock_handshake_io() [all …]
|
/qemu/include/io/ |
H A D | task.h | 2 * QEMU I/O task 26 typedef void (*QIOTaskFunc)(QIOTask *task, 29 typedef void (*QIOTaskWorker)(QIOTask *task, 39 * a public API which accepts a task callback: 42 * <title>Task function signature</title> 61 * <title>Task callback implementation</title> 63 * static void myobject_operation_notify(QIOTask *task, 67 * if (qio_task_propagate_error(task, &err)) { 71 * QMyObject *src = QMY_OBJECT(qio_task_get_source(task)); 79 * task wants to set a timer to run once a second checking [all …]
|
H A D | dns-resolver.h | 26 #include "io/task.h" 88 * void mylistenresult(QIOTask *task, void *opaque) { 91 * QIO_DNS_RESOLVER(qio_task_get_source(task); 96 * if (qio_task_propagate_error(task, &data->err)) { 100 * qio_dns_resolver_lookup_result(resolver, task, 203 * @task: the task object to get results for 215 QIOTask *task,
|
/qemu/tests/unit/ |
H A D | test-io-task.c | 2 * QEMU I/O task tests 24 #include "io/task.h" 55 static void task_callback(QIOTask *task, in task_callback() argument 60 data->source = qio_task_get_source(task); in task_callback() 61 qio_task_propagate_error(task, &data->err); in task_callback() 67 QIOTask *task; in test_task_complete() local 72 task = qio_task_new(obj, task_callback, &data, NULL); in test_task_complete() 73 src = qio_task_get_source(task); in test_task_complete() 75 qio_task_complete(task); in test_task_complete() 97 QIOTask *task; in test_task_data_free() local [all …]
|
/qemu/block/ |
H A D | block-copy.c | 47 static coroutine_fn int block_copy_task_entry(AioTask *task); 76 * failed task (and untouched if no task failed). 84 AioTask task; member 103 * is only set on task creation, so may be read concurrently after creation. 110 static int64_t task_end(BlockCopyTask *task) in task_end() argument 112 return task->req.offset + task->req.bytes; in task_end() 185 * Search for the first dirty area in offset/bytes range and create task at 192 BlockCopyTask *task; in block_copy_task_create() local 213 task = g_new(BlockCopyTask, 1); in block_copy_task_create() 214 *task = (BlockCopyTask) { in block_copy_task_create() [all …]
|
H A D | iscsi.c | 113 struct scsi_task *task; member 125 struct scsi_task *task; member 163 if (acb->task != NULL) { in iscsi_bh_cb() 164 scsi_free_scsi_task(acb->task); in iscsi_bh_cb() 165 acb->task = NULL; in iscsi_bh_cb() 241 struct scsi_task *task = command_data; in iscsi_co_generic_cb() local 246 iTask->task = task; in iscsi_co_generic_cb() 274 int error = iscsi_translate_sense(&task->sense); in iscsi_co_generic_cb() 313 /* If the command callback hasn't been called yet, drop the task */ in iscsi_abort_task_cb() 316 iscsi_scsi_cancel_task(iscsi, acb->task); in iscsi_abort_task_cb() [all …]
|
H A D | nfs.c | 224 static void coroutine_fn nfs_co_init_task(BlockDriverState *bs, NFSRPC *task) in nfs_co_init_task() argument 226 *task = (NFSRPC) { in nfs_co_init_task() 235 NFSRPC *task = opaque; in nfs_co_generic_bh_cb() local 237 task->complete = 1; in nfs_co_generic_bh_cb() 238 aio_co_wake(task->co); in nfs_co_generic_bh_cb() 246 NFSRPC *task = private_data; in nfs_co_generic_cb() local 247 task->ret = ret; in nfs_co_generic_cb() 248 assert(!task->st); in nfs_co_generic_cb() 249 if (task->ret > 0 && task->iov) { in nfs_co_generic_cb() 250 if (task->ret <= task->iov->size) { in nfs_co_generic_cb() [all …]
|
H A D | aio_task.c | 39 AioTask *task = opaque; in aio_task_co() local 40 AioTaskPool *pool = task->pool; in aio_task_co() 45 task->ret = task->func(task); in aio_task_co() 49 if (task->ret < 0 && pool->status == 0) { in aio_task_co() 50 pool->status = task->ret; in aio_task_co() 53 g_free(task); in aio_task_co() 89 void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task) in aio_task_pool_start_task() argument 93 task->pool = pool; in aio_task_pool_start_task() 94 qemu_coroutine_enter(qemu_coroutine_create(aio_task_co, task)); in aio_task_pool_start_task()
|
H A D | rbd.c | 1253 RBDTask *task = opaque; in qemu_rbd_finish_bh() local 1254 task->complete = true; in qemu_rbd_finish_bh() 1255 aio_co_wake(task->co); in qemu_rbd_finish_bh() 1267 static void qemu_rbd_completion_cb(rbd_completion_t c, RBDTask *task) in qemu_rbd_completion_cb() argument 1269 task->ret = rbd_aio_get_return_value(c); in qemu_rbd_completion_cb() 1271 aio_bh_schedule_oneshot(bdrv_get_aio_context(task->bs), in qemu_rbd_completion_cb() 1272 qemu_rbd_finish_bh, task); in qemu_rbd_completion_cb() 1283 RBDTask task = { .bs = bs, .co = qemu_coroutine_self() }; in qemu_rbd_start_co() local 1303 r = rbd_aio_create_completion(&task, in qemu_rbd_start_co() 1346 while (!task.complete) { in qemu_rbd_start_co() [all …]
|
/qemu/backends/ |
H A D | cryptodev-lkcf.c | 112 CryptoDevLKCFTask *task, *next; in cryptodev_lkcf_handle_response() local 122 QSIMPLEQ_FOREACH_SAFE(task, &responses, queue, next) { in cryptodev_lkcf_handle_response() 123 if (task->cb) { in cryptodev_lkcf_handle_response() 124 task->cb(task->opaque, task->status); in cryptodev_lkcf_handle_response() 126 g_free(task); in cryptodev_lkcf_handle_response() 257 CryptoDevLKCFTask *task, *next; in cryptodev_lkcf_cleanup() local 269 QSIMPLEQ_FOREACH_SAFE(task, &lkcf->requests, queue, next) { in cryptodev_lkcf_cleanup() 270 if (task->cb) { in cryptodev_lkcf_cleanup() 271 task->cb(task->opaque, task->status); in cryptodev_lkcf_cleanup() 273 g_free(task); in cryptodev_lkcf_cleanup() [all …]
|
/qemu/contrib/vhost-user-scsi/ |
H A D | vhost-user-scsi.c | 95 struct scsi_task *task; in scsi_task_new() local 100 task = g_new0(struct scsi_task, 1); in scsi_task_new() 101 memcpy(task->cdb, cdb, cdb_len); in scsi_task_new() 102 task->cdb_size = cdb_len; in scsi_task_new() 103 task->xfer_dir = dir; in scsi_task_new() 104 task->expxferlen = xfer_len; in scsi_task_new() 106 return task; in scsi_task_new() 130 struct scsi_task *task; in handle_cmd_sync() local 175 task = scsi_task_new(cdb_len, req->cdb, dir, len); in handle_cmd_sync() 178 task->iovector_out.iov = (struct scsi_iovec *)out; in handle_cmd_sync() [all …]
|
/qemu/hw/display/ |
H A D | apple-gfx.m | 48 /* ------ PGTask and task operations: new/destroy/map/unmap ------ */ 56 * A "task" in PVG terminology represents a host-virtual contiguous address 57 * range which is reserved in a large chunk on task creation. The mapMemory 64 * task memory. 73 * task, and on which we have thus called memory_region_ref(). There are 85 PGTask_t *task; 93 task = g_new0(PGTask_t, 1); 94 task->s = s; 95 task->address = task_mem; 96 task->len = len; [all …]
|
/qemu/python/tests/ |
H A D | protocol.py | 87 await on disconnect(), which awaits the disconnect task and 115 Run a given coroutine as a task. 261 This launches a task alongside (most) tests below to confirm that 275 # Kick the loop and force the task to block on the event. 340 # Note that accept() cannot be cancelled outright, as it isn't a task. 341 # However, we can wrap it in a task and cancel *that*. 343 task = run_as_task(self._hanging_connection(), allow_cancellation=True) 353 task.cancel() 354 await task 362 task = run_as_task(self._hanging_connection()) [all …]
|
/qemu/include/block/ |
H A D | aio_task.h | 30 typedef int coroutine_fn (*AioTaskFunc)(AioTask *task); 40 /* error code of failed task or 0 if all is OK */ 43 /* User provides filled @task, however task->pool will be set automatically */ 44 void coroutine_fn aio_task_pool_start_task(AioTaskPool *pool, AioTask *task);
|
H A D | thread-pool.h | 56 * Submit a new work (task) for the pool. 65 * Submit a new work (task) for the pool, making sure it starts getting 88 * Adjust the maximum number of threads in the pool to give each task its 89 * own thread (exactly one thread per task).
|
/qemu/migration/ |
H A D | tls.c | 58 static void migration_tls_incoming_handshake(QIOTask *task, in migration_tls_incoming_handshake() argument 61 QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task)); in migration_tls_incoming_handshake() 64 if (qio_task_propagate_error(task, &err)) { in migration_tls_incoming_handshake() 101 static void migration_tls_outgoing_handshake(QIOTask *task, in migration_tls_outgoing_handshake() argument 105 QIOChannel *ioc = QIO_CHANNEL(qio_task_get_source(task)); in migration_tls_outgoing_handshake() 108 if (qio_task_propagate_error(task, &err)) { in migration_tls_outgoing_handshake()
|
H A D | socket.c | 60 static void socket_outgoing_migration(QIOTask *task, in socket_outgoing_migration() argument 64 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task)); in socket_outgoing_migration() 67 if (qio_task_propagate_error(task, &err)) { in socket_outgoing_migration()
|
/qemu/python/qemu/qmp/ |
H A D | protocol.py | 236 #: Disconnect task. The disconnect implementation runs in a task 729 def _paranoid_task_erase(task: Optional['asyncio.Future[_U]'] 731 # Help to erase a task, ENSURING it is fully quiesced first. 732 assert (task is None) or task.done() 733 return None if (task and task.done()) else task 757 It is designed to be called from its task context, 758 :py:obj:`~AsyncProtocol._dc_task`. By running in its own task, 764 def _done(task: Optional['asyncio.Future[Any]']) -> bool: 765 return task is not None and task.done() 774 # NB: We can't use _bh_tasks to check for premature task [all …]
|
H A D | events.py | 93 Using asyncio.Task to concurrently retrieve events 98 create event handlers by using `asyncio.Task` to create concurrent 111 task = asyncio.Task(print_events(listener)) 114 task.cancel() 115 await task 147 self.task = asyncio.Task(self.print_events) 153 self.task.cancel() 154 await self.task 178 task = asyncio.Task(print_events(qmp.events)) 183 task.cancel() [all …]
|
/qemu/ui/ |
H A D | vnc-ws.c | 28 static void vncws_tls_handshake_done(QIOTask *task, in vncws_tls_handshake_done() argument 34 if (qio_task_propagate_error(task, &err)) { in vncws_tls_handshake_done() 97 static void vncws_handshake_done(QIOTask *task, in vncws_handshake_done() argument 103 if (qio_task_propagate_error(task, &err)) { in vncws_handshake_done()
|