Lines Matching +full:pull +full:- +full:requests

17  * Contributions after 2012-01-13 are licensed under the terms of the
22 #include "qemu/defer-call.h"
23 #include "qemu/error-report.h"
24 #include "qemu/main-loop.h"
30 #include "system/block-backend.h"
32 #include "xen-block.h"
74 memset(&request->req, 0, sizeof(request->req)); in reset_request()
75 request->status = 0; in reset_request()
76 request->start = 0; in reset_request()
77 request->size = 0; in reset_request()
78 request->presync = 0; in reset_request()
80 request->aio_inflight = 0; in reset_request()
81 request->aio_errors = 0; in reset_request()
83 request->dataplane = NULL; in reset_request()
84 memset(&request->list, 0, sizeof(request->list)); in reset_request()
85 memset(&request->acct, 0, sizeof(request->acct)); in reset_request()
87 qemu_iovec_reset(&request->v); in reset_request()
94 if (QLIST_EMPTY(&dataplane->freelist)) { in xen_block_start_request()
95 if (dataplane->requests_total >= dataplane->max_requests) { in xen_block_start_request()
100 request->dataplane = dataplane; in xen_block_start_request()
102 * We cannot need more pages per requests than this, and since we in xen_block_start_request()
103 * re-use requests, allocate the memory once here. It will be freed in xen_block_start_request()
106 request->buf = qemu_memalign(XEN_PAGE_SIZE, in xen_block_start_request()
109 dataplane->requests_total++; in xen_block_start_request()
110 qemu_iovec_init(&request->v, 1); in xen_block_start_request()
113 request = QLIST_FIRST(&dataplane->freelist); in xen_block_start_request()
116 QLIST_INSERT_HEAD(&dataplane->inflight, request, list); in xen_block_start_request()
117 dataplane->requests_inflight++; in xen_block_start_request()
125 XenBlockDataPlane *dataplane = request->dataplane; in xen_block_complete_request()
130 xen_device_notify_event_channel(dataplane->xendev, in xen_block_complete_request()
131 dataplane->event_channel, in xen_block_complete_request()
139 dataplane->requests_inflight--; in xen_block_complete_request()
141 request->dataplane = dataplane; in xen_block_complete_request()
142 QLIST_INSERT_HEAD(&dataplane->freelist, request, list); in xen_block_complete_request()
151 XenBlockDataPlane *dataplane = request->dataplane; in xen_block_parse_request()
155 switch (request->req.operation) { in xen_block_parse_request()
159 request->presync = 1; in xen_block_parse_request()
160 if (!request->req.nr_segments) { in xen_block_parse_request()
169 error_report("error: unknown operation (%d)", request->req.operation); in xen_block_parse_request()
173 if (request->req.operation != BLKIF_OP_READ && in xen_block_parse_request()
174 !blk_is_writable(dataplane->blk)) { in xen_block_parse_request()
179 request->start = request->req.sector_number * dataplane->sector_size; in xen_block_parse_request()
180 for (i = 0; i < request->req.nr_segments; i++) { in xen_block_parse_request()
185 if (request->req.seg[i].first_sect > request->req.seg[i].last_sect) { in xen_block_parse_request()
189 if (request->req.seg[i].last_sect * dataplane->sector_size >= in xen_block_parse_request()
195 len = (request->req.seg[i].last_sect - in xen_block_parse_request()
196 request->req.seg[i].first_sect + 1) * dataplane->sector_size; in xen_block_parse_request()
197 request->size += len; in xen_block_parse_request()
199 if (request->start + request->size > blk_getlength(dataplane->blk)) { in xen_block_parse_request()
206 request->status = BLKIF_RSP_ERROR; in xen_block_parse_request()
207 return -1; in xen_block_parse_request()
212 XenBlockDataPlane *dataplane = request->dataplane; in xen_block_copy_request()
213 XenDevice *xendev = dataplane->xendev; in xen_block_copy_request()
216 bool to_domain = (request->req.operation == BLKIF_OP_READ); in xen_block_copy_request()
217 void *virt = request->buf; in xen_block_copy_request()
220 if (request->req.nr_segments == 0) { in xen_block_copy_request()
224 count = request->req.nr_segments; in xen_block_copy_request()
228 segs[i].dest.foreign.ref = request->req.seg[i].gref; in xen_block_copy_request()
229 segs[i].dest.foreign.offset = request->req.seg[i].first_sect * in xen_block_copy_request()
230 dataplane->sector_size; in xen_block_copy_request()
233 segs[i].source.foreign.ref = request->req.seg[i].gref; in xen_block_copy_request()
234 segs[i].source.foreign.offset = request->req.seg[i].first_sect * in xen_block_copy_request()
235 dataplane->sector_size; in xen_block_copy_request()
238 segs[i].len = (request->req.seg[i].last_sect - in xen_block_copy_request()
239 request->req.seg[i].first_sect + 1) * in xen_block_copy_request()
240 dataplane->sector_size; in xen_block_copy_request()
249 request->aio_errors++; in xen_block_copy_request()
250 return -1; in xen_block_copy_request()
261 XenBlockDataPlane *dataplane = request->dataplane; in xen_block_complete_aio()
265 request->req.operation == BLKIF_OP_READ ? in xen_block_complete_aio()
267 request->aio_errors++; in xen_block_complete_aio()
270 request->aio_inflight--; in xen_block_complete_aio()
271 if (request->presync) { in xen_block_complete_aio()
272 request->presync = 0; in xen_block_complete_aio()
276 if (request->aio_inflight > 0) { in xen_block_complete_aio()
280 switch (request->req.operation) { in xen_block_complete_aio()
282 /* in case of failure request->aio_errors is increased */ in xen_block_complete_aio()
293 request->status = request->aio_errors ? BLKIF_RSP_ERROR : BLKIF_RSP_OKAY; in xen_block_complete_aio()
295 switch (request->req.operation) { in xen_block_complete_aio()
298 if (!request->req.nr_segments) { in xen_block_complete_aio()
303 if (request->status == BLKIF_RSP_OKAY) { in xen_block_complete_aio()
304 block_acct_done(blk_get_stats(dataplane->blk), &request->acct); in xen_block_complete_aio()
306 block_acct_failed(blk_get_stats(dataplane->blk), &request->acct); in xen_block_complete_aio()
316 if (dataplane->more_work) { in xen_block_complete_aio()
317 qemu_bh_schedule(dataplane->bh); in xen_block_complete_aio()
325 XenBlockDataPlane *dataplane = request->dataplane; in xen_block_split_discard()
334 sec_start + sec_count > INT64_MAX / dataplane->sector_size) { in xen_block_split_discard()
338 byte_offset = sec_start * dataplane->sector_size; in xen_block_split_discard()
339 byte_remaining = sec_count * dataplane->sector_size; in xen_block_split_discard()
344 request->aio_inflight++; in xen_block_split_discard()
345 blk_aio_pdiscard(dataplane->blk, byte_offset, byte_chunk, in xen_block_split_discard()
347 byte_remaining -= byte_chunk; in xen_block_split_discard()
356 XenBlockDataPlane *dataplane = request->dataplane; in xen_block_do_aio()
358 if (request->req.nr_segments && in xen_block_do_aio()
359 (request->req.operation == BLKIF_OP_WRITE || in xen_block_do_aio()
360 request->req.operation == BLKIF_OP_FLUSH_DISKCACHE) && in xen_block_do_aio()
365 request->aio_inflight++; in xen_block_do_aio()
366 if (request->presync) { in xen_block_do_aio()
367 blk_aio_flush(request->dataplane->blk, xen_block_complete_aio, in xen_block_do_aio()
372 switch (request->req.operation) { in xen_block_do_aio()
374 qemu_iovec_add(&request->v, request->buf, request->size); in xen_block_do_aio()
375 block_acct_start(blk_get_stats(dataplane->blk), &request->acct, in xen_block_do_aio()
376 request->v.size, BLOCK_ACCT_READ); in xen_block_do_aio()
377 request->aio_inflight++; in xen_block_do_aio()
378 blk_aio_preadv(dataplane->blk, request->start, &request->v, 0, in xen_block_do_aio()
383 if (!request->req.nr_segments) { in xen_block_do_aio()
387 qemu_iovec_add(&request->v, request->buf, request->size); in xen_block_do_aio()
388 block_acct_start(blk_get_stats(dataplane->blk), &request->acct, in xen_block_do_aio()
389 request->v.size, in xen_block_do_aio()
390 request->req.operation == BLKIF_OP_WRITE ? in xen_block_do_aio()
392 request->aio_inflight++; in xen_block_do_aio()
393 blk_aio_pwritev(dataplane->blk, request->start, &request->v, 0, in xen_block_do_aio()
398 struct blkif_request_discard *req = (void *)&request->req; in xen_block_do_aio()
399 if (!xen_block_split_discard(request, req->sector_number, in xen_block_do_aio()
400 req->nr_sectors)) { in xen_block_do_aio()
406 /* unknown operation (shouldn't happen -- parse catches this) */ in xen_block_do_aio()
415 request->status = BLKIF_RSP_ERROR; in xen_block_do_aio()
417 return -1; in xen_block_do_aio()
422 XenBlockDataPlane *dataplane = request->dataplane; in xen_block_send_response()
428 switch (dataplane->protocol) { in xen_block_send_response()
431 &dataplane->rings.native, in xen_block_send_response()
432 dataplane->rings.native.rsp_prod_pvt); in xen_block_send_response()
436 &dataplane->rings.x86_32_part, in xen_block_send_response()
437 dataplane->rings.x86_32_part.rsp_prod_pvt); in xen_block_send_response()
441 &dataplane->rings.x86_64_part, in xen_block_send_response()
442 dataplane->rings.x86_64_part.rsp_prod_pvt); in xen_block_send_response()
448 resp->id = request->req.id; in xen_block_send_response()
449 resp->operation = request->req.operation; in xen_block_send_response()
450 resp->status = request->status; in xen_block_send_response()
452 dataplane->rings.common.rsp_prod_pvt++; in xen_block_send_response()
454 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&dataplane->rings.common, in xen_block_send_response()
456 if (dataplane->rings.common.rsp_prod_pvt == in xen_block_send_response()
457 dataplane->rings.common.req_cons) { in xen_block_send_response()
459 * Tail check for pending requests. Allows frontend to avoid in xen_block_send_response()
460 * notifications if requests are already in flight (lower in xen_block_send_response()
463 RING_FINAL_CHECK_FOR_REQUESTS(&dataplane->rings.common, in xen_block_send_response()
465 } else if (RING_HAS_UNCONSUMED_REQUESTS(&dataplane->rings.common)) { in xen_block_send_response()
470 dataplane->more_work++; in xen_block_send_response()
478 switch (dataplane->protocol) { in xen_block_get_request()
481 RING_GET_REQUEST(&dataplane->rings.native, rc); in xen_block_get_request()
483 memcpy(&request->req, req, sizeof(request->req)); in xen_block_get_request()
488 RING_GET_REQUEST(&dataplane->rings.x86_32_part, rc); in xen_block_get_request()
490 blkif_get_x86_32_req(&request->req, req); in xen_block_get_request()
495 RING_GET_REQUEST(&dataplane->rings.x86_64_part, rc); in xen_block_get_request()
497 blkif_get_x86_64_req(&request->req, req); in xen_block_get_request()
501 /* Prevent the compiler from accessing the on-ring fields instead. */ in xen_block_get_request()
507 * Threshold of in-flight requests above which we will start using
508 * defer_call_begin()/defer_call_end() to batch requests.
516 int inflight_atstart = dataplane->requests_inflight; in xen_block_handle_requests()
520 dataplane->more_work = 0; in xen_block_handle_requests()
522 rc = dataplane->rings.common.req_cons; in xen_block_handle_requests()
523 rp = dataplane->rings.common.sring->req_prod; in xen_block_handle_requests()
524 xen_rmb(); /* Ensure we see queued requests up to 'rp'. */ in xen_block_handle_requests()
527 * If there was more than IO_PLUG_THRESHOLD requests in flight in xen_block_handle_requests()
529 * is below us, so it's worth beginning to batch up I/O requests in xen_block_handle_requests()
531 * of requests we're willing to batch is the number already in in xen_block_handle_requests()
539 /* pull request from ring */ in xen_block_handle_requests()
540 if (RING_REQUEST_CONS_OVERFLOW(&dataplane->rings.common, rc)) { in xen_block_handle_requests()
545 dataplane->more_work++; in xen_block_handle_requests()
549 dataplane->rings.common.req_cons = ++rc; in xen_block_handle_requests()
554 switch (request->req.operation) { in xen_block_handle_requests()
556 block_acct_invalid(blk_get_stats(dataplane->blk), in xen_block_handle_requests()
560 block_acct_invalid(blk_get_stats(dataplane->blk), in xen_block_handle_requests()
564 block_acct_invalid(blk_get_stats(dataplane->blk), in xen_block_handle_requests()
616 dataplane->xendev = xendev; in xen_block_dataplane_create()
617 dataplane->blk = blk; in xen_block_dataplane_create()
618 dataplane->sector_size = sector_size; in xen_block_dataplane_create()
620 QLIST_INIT(&dataplane->inflight); in xen_block_dataplane_create()
621 QLIST_INIT(&dataplane->freelist); in xen_block_dataplane_create()
624 dataplane->iothread = iothread; in xen_block_dataplane_create()
625 object_ref(OBJECT(dataplane->iothread)); in xen_block_dataplane_create()
626 dataplane->ctx = iothread_get_aio_context(dataplane->iothread); in xen_block_dataplane_create()
628 dataplane->ctx = qemu_get_aio_context(); in xen_block_dataplane_create()
630 dataplane->bh = aio_bh_new_guarded(dataplane->ctx, xen_block_dataplane_bh, in xen_block_dataplane_create()
632 &DEVICE(xendev)->mem_reentrancy_guard); in xen_block_dataplane_create()
645 while (!QLIST_EMPTY(&dataplane->freelist)) { in xen_block_dataplane_destroy()
646 request = QLIST_FIRST(&dataplane->freelist); in xen_block_dataplane_destroy()
648 qemu_iovec_destroy(&request->v); in xen_block_dataplane_destroy()
649 qemu_vfree(request->buf); in xen_block_dataplane_destroy()
653 qemu_bh_delete(dataplane->bh); in xen_block_dataplane_destroy()
654 if (dataplane->iothread) { in xen_block_dataplane_destroy()
655 object_unref(OBJECT(dataplane->iothread)); in xen_block_dataplane_destroy()
663 if (!dataplane || !dataplane->event_channel) { in xen_block_dataplane_detach()
668 xen_device_set_event_channel_context(dataplane->xendev, in xen_block_dataplane_detach()
669 dataplane->event_channel, in xen_block_dataplane_detach()
675 if (!dataplane || !dataplane->event_channel) { in xen_block_dataplane_attach()
680 xen_device_set_event_channel_context(dataplane->xendev, in xen_block_dataplane_attach()
681 dataplane->event_channel, in xen_block_dataplane_attach()
682 dataplane->ctx, &error_abort); in xen_block_dataplane_attach()
693 xendev = dataplane->xendev; in xen_block_dataplane_stop()
695 if (!blk_in_drain(dataplane->blk)) { in xen_block_dataplane_stop()
700 blk_set_aio_context(dataplane->blk, qemu_get_aio_context(), &error_abort); in xen_block_dataplane_stop()
706 qemu_bh_cancel(dataplane->bh); in xen_block_dataplane_stop()
708 if (dataplane->event_channel) { in xen_block_dataplane_stop()
711 xen_device_unbind_event_channel(xendev, dataplane->event_channel, in xen_block_dataplane_stop()
713 dataplane->event_channel = NULL; in xen_block_dataplane_stop()
720 if (dataplane->sring) { in xen_block_dataplane_stop()
723 xen_device_unmap_grant_refs(xendev, dataplane->sring, in xen_block_dataplane_stop()
724 dataplane->ring_ref, in xen_block_dataplane_stop()
725 dataplane->nr_ring_ref, &local_err); in xen_block_dataplane_stop()
726 dataplane->sring = NULL; in xen_block_dataplane_stop()
733 g_free(dataplane->ring_ref); in xen_block_dataplane_stop()
734 dataplane->ring_ref = NULL; in xen_block_dataplane_stop()
745 XenDevice *xendev = dataplane->xendev; in xen_block_dataplane_start()
749 dataplane->nr_ring_ref = nr_ring_ref; in xen_block_dataplane_start()
750 dataplane->ring_ref = g_new(unsigned int, nr_ring_ref); in xen_block_dataplane_start()
753 dataplane->ring_ref[i] = ring_ref[i]; in xen_block_dataplane_start()
756 dataplane->protocol = protocol; in xen_block_dataplane_start()
758 ring_size = XEN_PAGE_SIZE * dataplane->nr_ring_ref; in xen_block_dataplane_start()
759 switch (dataplane->protocol) { in xen_block_dataplane_start()
762 dataplane->max_requests = __CONST_RING_SIZE(blkif, ring_size); in xen_block_dataplane_start()
767 dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_32, ring_size); in xen_block_dataplane_start()
772 dataplane->max_requests = __CONST_RING_SIZE(blkif_x86_64, ring_size); in xen_block_dataplane_start()
776 error_setg(errp, "unknown protocol %u", dataplane->protocol); in xen_block_dataplane_start()
780 xen_device_set_max_grant_refs(xendev, dataplane->nr_ring_ref, in xen_block_dataplane_start()
786 dataplane->sring = xen_device_map_grant_refs(xendev, in xen_block_dataplane_start()
787 dataplane->ring_ref, in xen_block_dataplane_start()
788 dataplane->nr_ring_ref, in xen_block_dataplane_start()
795 switch (dataplane->protocol) { in xen_block_dataplane_start()
798 blkif_sring_t *sring_native = dataplane->sring; in xen_block_dataplane_start()
800 BACK_RING_INIT(&dataplane->rings.native, sring_native, ring_size); in xen_block_dataplane_start()
805 blkif_x86_32_sring_t *sring_x86_32 = dataplane->sring; in xen_block_dataplane_start()
807 BACK_RING_INIT(&dataplane->rings.x86_32_part, sring_x86_32, in xen_block_dataplane_start()
813 blkif_x86_64_sring_t *sring_x86_64 = dataplane->sring; in xen_block_dataplane_start()
815 BACK_RING_INIT(&dataplane->rings.x86_64_part, sring_x86_64, in xen_block_dataplane_start()
821 dataplane->event_channel = in xen_block_dataplane_start()
830 blk_set_aio_context(dataplane->blk, dataplane->ctx, NULL); in xen_block_dataplane_start()
832 if (!blk_in_drain(dataplane->blk)) { in xen_block_dataplane_start()