Lines Matching +full:dma +full:- +full:requests
2 * DMA helper functions
11 #include "system/block-backend.h"
12 #include "system/dma.h"
15 #include "qemu/main-loop.h"
32 qsg->sg = g_new(ScatterGatherEntry, alloc_hint); in qemu_sglist_init()
33 qsg->nsg = 0; in qemu_sglist_init()
34 qsg->nalloc = alloc_hint; in qemu_sglist_init()
35 qsg->size = 0; in qemu_sglist_init()
36 qsg->as = as; in qemu_sglist_init()
37 qsg->dev = dev; in qemu_sglist_init()
43 if (qsg->nsg == qsg->nalloc) { in qemu_sglist_add()
44 qsg->nalloc = 2 * qsg->nalloc + 1; in qemu_sglist_add()
45 qsg->sg = g_renew(ScatterGatherEntry, qsg->sg, qsg->nalloc); in qemu_sglist_add()
47 qsg->sg[qsg->nsg].base = base; in qemu_sglist_add()
48 qsg->sg[qsg->nsg].len = len; in qemu_sglist_add()
49 qsg->size += len; in qemu_sglist_add()
50 ++qsg->nsg; in qemu_sglist_add()
55 object_unref(OBJECT(qsg->dev)); in qemu_sglist_destroy()
56 g_free(qsg->sg); in qemu_sglist_destroy()
82 assert(!dbs->acb && dbs->bh); in reschedule_dma()
83 qemu_bh_delete(dbs->bh); in reschedule_dma()
84 dbs->bh = NULL; in reschedule_dma()
92 for (i = 0; i < dbs->iov.niov; ++i) { in dma_blk_unmap()
93 dma_memory_unmap(dbs->sg->as, dbs->iov.iov[i].iov_base, in dma_blk_unmap()
94 dbs->iov.iov[i].iov_len, dbs->dir, in dma_blk_unmap()
95 dbs->iov.iov[i].iov_len); in dma_blk_unmap()
97 qemu_iovec_reset(&dbs->iov); in dma_blk_unmap()
102 trace_dma_complete(dbs, ret, dbs->common.cb); in dma_complete()
104 assert(!dbs->acb && !dbs->bh); in dma_complete()
106 if (dbs->common.cb) { in dma_complete()
107 dbs->common.cb(dbs->common.opaque, ret); in dma_complete()
109 qemu_iovec_destroy(&dbs->iov); in dma_complete()
116 AioContext *ctx = dbs->ctx; in dma_blk_cb()
122 /* DMAAIOCB is not thread-safe and must be accessed only from dbs->ctx */ in dma_blk_cb()
125 dbs->acb = NULL; in dma_blk_cb()
126 dbs->offset += dbs->iov.size; in dma_blk_cb()
128 if (dbs->sg_cur_index == dbs->sg->nsg || ret < 0) { in dma_blk_cb()
134 while (dbs->sg_cur_index < dbs->sg->nsg) { in dma_blk_cb()
135 cur_addr = dbs->sg->sg[dbs->sg_cur_index].base + dbs->sg_cur_byte; in dma_blk_cb()
136 cur_len = dbs->sg->sg[dbs->sg_cur_index].len - dbs->sg_cur_byte; in dma_blk_cb()
137 mem = dma_memory_map(dbs->sg->as, cur_addr, &cur_len, dbs->dir, in dma_blk_cb()
141 * disk read requests with overlapping SGs. It leads in dma_blk_cb()
142 * to non-determinism, because resulting buffer contents may be mixed in dma_blk_cb()
146 if (mem && icount_enabled() && dbs->dir == DMA_DIRECTION_FROM_DEVICE) { in dma_blk_cb()
148 for (i = 0 ; i < dbs->iov.niov ; ++i) { in dma_blk_cb()
149 if (ranges_overlap((intptr_t)dbs->iov.iov[i].iov_base, in dma_blk_cb()
150 dbs->iov.iov[i].iov_len, (intptr_t)mem, in dma_blk_cb()
152 dma_memory_unmap(dbs->sg->as, mem, cur_len, in dma_blk_cb()
153 dbs->dir, cur_len); in dma_blk_cb()
161 qemu_iovec_add(&dbs->iov, mem, cur_len); in dma_blk_cb()
162 dbs->sg_cur_byte += cur_len; in dma_blk_cb()
163 if (dbs->sg_cur_byte == dbs->sg->sg[dbs->sg_cur_index].len) { in dma_blk_cb()
164 dbs->sg_cur_byte = 0; in dma_blk_cb()
165 ++dbs->sg_cur_index; in dma_blk_cb()
169 if (dbs->iov.size == 0) { in dma_blk_cb()
171 dbs->bh = aio_bh_new(ctx, reschedule_dma, dbs); in dma_blk_cb()
172 address_space_register_map_client(dbs->sg->as, dbs->bh); in dma_blk_cb()
176 if (!QEMU_IS_ALIGNED(dbs->iov.size, dbs->align)) { in dma_blk_cb()
177 qemu_iovec_discard_back(&dbs->iov, in dma_blk_cb()
178 QEMU_ALIGN_DOWN(dbs->iov.size, dbs->align)); in dma_blk_cb()
181 dbs->acb = dbs->io_func(dbs->offset, &dbs->iov, in dma_blk_cb()
182 dma_blk_cb, dbs, dbs->io_func_opaque); in dma_blk_cb()
183 assert(dbs->acb); in dma_blk_cb()
192 assert(!(dbs->acb && dbs->bh)); in dma_aio_cancel()
193 if (dbs->acb) { in dma_aio_cancel()
195 blk_aio_cancel_async(dbs->acb); in dma_aio_cancel()
199 if (dbs->bh) { in dma_aio_cancel()
200 address_space_unregister_map_client(dbs->sg->as, dbs->bh); in dma_aio_cancel()
201 qemu_bh_delete(dbs->bh); in dma_aio_cancel()
202 dbs->bh = NULL; in dma_aio_cancel()
204 if (dbs->common.cb) { in dma_aio_cancel()
205 dbs->common.cb(dbs->common.opaque, -ECANCELED); in dma_aio_cancel()
224 dbs->acb = NULL; in dma_blk_io()
225 dbs->sg = sg; in dma_blk_io()
226 dbs->ctx = qemu_get_current_aio_context(); in dma_blk_io()
227 dbs->offset = offset; in dma_blk_io()
228 dbs->align = align; in dma_blk_io()
229 dbs->sg_cur_index = 0; in dma_blk_io()
230 dbs->sg_cur_byte = 0; in dma_blk_io()
231 dbs->dir = dir; in dma_blk_io()
232 dbs->io_func = io_func; in dma_blk_io()
233 dbs->io_func_opaque = io_func_opaque; in dma_blk_io()
234 dbs->bh = NULL; in dma_blk_io()
235 qemu_iovec_init(&dbs->iov, sg->nsg); in dma_blk_io()
237 return &dbs->common; in dma_blk_io()
287 xresidual = sg->size; in dma_buf_rw()
291 ScatterGatherEntry entry = sg->sg[sg_cur_index++]; in dma_buf_rw()
293 res |= dma_memory_rw(sg->as, entry.base, ptr, xfer, dir, attrs); in dma_buf_rw()
295 len -= xfer; in dma_buf_rw()
296 xresidual -= xfer; in dma_buf_rw()
320 block_acct_start(blk_get_stats(blk), cookie, sg->size, type); in dma_acct_start()
325 uint64_t max_mask = UINT64_MAX, addr_mask = end - start; in dma_aligned_pow2_mask()
329 max_mask = (1ULL << max_addr_bits) - 1; in dma_aligned_pow2_mask()
332 alignment_mask = start ? (start & -start) - 1 : max_mask; in dma_aligned_pow2_mask()
344 return (1ULL << (63 - clz64(addr_mask + 1))) - 1; in dma_aligned_pow2_mask()