Lines Matching full:state

75     XenIOState *state = container_of(listener, XenIOState, memory_listener);  in xen_set_memory()  local
81 xen_map_memory_section(xen_domid, state->ioservid, in xen_set_memory()
84 xen_unmap_memory_section(xen_domid, state->ioservid, in xen_set_memory()
89 arch_xen_set_memory(state, section, add); in xen_set_memory()
109 XenIOState *state = container_of(listener, XenIOState, io_listener); in xen_io_add() local
118 xen_map_io_section(xen_domid, state->ioservid, section); in xen_io_add()
124 XenIOState *state = container_of(listener, XenIOState, io_listener); in xen_io_del() local
131 xen_unmap_io_section(xen_domid, state->ioservid, section); in xen_io_del()
139 XenIOState *state = container_of(listener, XenIOState, device_listener); in xen_device_realize() local
148 QLIST_INSERT_HEAD(&state->dev_list, xendev, entry); in xen_device_realize()
150 xen_map_pcidev(xen_domid, state->ioservid, pci_dev); in xen_device_realize()
157 XenIOState *state = container_of(listener, XenIOState, device_listener); in xen_device_unrealize() local
163 xen_unmap_pcidev(xen_domid, state->ioservid, pci_dev); in xen_device_unrealize()
165 QLIST_FOREACH_SAFE(xendev, &state->dev_list, entry, next) { in xen_device_unrealize()
188 static ioreq_t *cpu_get_ioreq_from_shared_memory(XenIOState *state, int vcpu) in cpu_get_ioreq_from_shared_memory() argument
190 ioreq_t *req = xen_vcpu_ioreq(state->shared_page, vcpu); in cpu_get_ioreq_from_shared_memory()
192 if (req->state != STATE_IOREQ_READY) { in cpu_get_ioreq_from_shared_memory()
193 trace_cpu_get_ioreq_from_shared_memory_req_not_ready(req->state, in cpu_get_ioreq_from_shared_memory()
204 req->state = STATE_IOREQ_INPROCESS; in cpu_get_ioreq_from_shared_memory()
211 static ioreq_t *cpu_get_ioreq(XenIOState *state) in cpu_get_ioreq() argument
218 port = qemu_xen_evtchn_pending(state->xce_handle); in cpu_get_ioreq()
219 if (port == state->bufioreq_local_port) { in cpu_get_ioreq()
220 timer_mod(state->buffered_io_timer, in cpu_get_ioreq()
227 if (state->ioreq_local_port[i] == port) { in cpu_get_ioreq()
237 qemu_xen_evtchn_unmask(state->xce_handle, port); in cpu_get_ioreq()
240 state->send_vcpu = i; in cpu_get_ioreq()
241 return cpu_get_ioreq_from_shared_memory(state, i); in cpu_get_ioreq()
391 static void cpu_ioreq_config(XenIOState *state, ioreq_t *req) in cpu_ioreq_config() argument
406 QLIST_FOREACH(xendev, &state->dev_list, entry) { in cpu_ioreq_config()
447 static void handle_ioreq(XenIOState *state, ioreq_t *req) in handle_ioreq() argument
474 cpu_ioreq_config(state, req); in handle_ioreq()
477 arch_handle_ioreq(state, req); in handle_ioreq()
485 static unsigned int handle_buffered_iopage(XenIOState *state) in handle_buffered_iopage() argument
487 buffered_iopage_t *buf_page = state->buffered_io_page; in handle_buffered_iopage()
498 req.state = STATE_IOREQ_READY; in handle_buffered_iopage()
531 handle_ioreq(state, &req); in handle_buffered_iopage()
537 assert(req.state == STATE_IOREQ_READY); in handle_buffered_iopage()
552 XenIOState *state = opaque; in handle_buffered_io() local
554 handled = handle_buffered_iopage(state); in handle_buffered_io()
559 timer_mod(state->buffered_io_timer, in handle_buffered_io()
562 timer_del(state->buffered_io_timer); in handle_buffered_io()
563 qemu_xen_evtchn_unmask(state->xce_handle, state->bufioreq_local_port); in handle_buffered_io()
565 timer_mod(state->buffered_io_timer, in handle_buffered_io()
572 XenIOState *state = opaque; in cpu_handle_ioreq() local
573 ioreq_t *req = cpu_get_ioreq(state); in cpu_handle_ioreq()
575 handle_buffered_iopage(state); in cpu_handle_ioreq()
580 handle_ioreq(state, &copy); in cpu_handle_ioreq()
583 if (req->state != STATE_IOREQ_INPROCESS) { in cpu_handle_ioreq()
587 req->state, req->data_is_ptr, req->addr, in cpu_handle_ioreq()
593 xen_wmb(); /* Update ioreq contents /then/ update state. */ in cpu_handle_ioreq()
614 req->state = STATE_IORESP_READY; in cpu_handle_ioreq()
615 qemu_xen_evtchn_notify(state->xce_handle, in cpu_handle_ioreq()
616 state->ioreq_local_port[state->send_vcpu]); in cpu_handle_ioreq()
620 static void xen_main_loop_prepare(XenIOState *state) in xen_main_loop_prepare() argument
624 if (state->xce_handle != NULL) { in xen_main_loop_prepare()
625 evtchn_fd = qemu_xen_evtchn_fd(state->xce_handle); in xen_main_loop_prepare()
628 state->buffered_io_timer = timer_new_ms(QEMU_CLOCK_REALTIME, handle_buffered_io, in xen_main_loop_prepare()
629 state); in xen_main_loop_prepare()
637 state->cpu_by_vcpu_id[cpu_state->cpu_index] = cpu_state; in xen_main_loop_prepare()
639 qemu_set_fd_handler(evtchn_fd, cpu_handle_ioreq, NULL, state); in xen_main_loop_prepare()
647 XenIOState *state = opaque; in xen_hvm_change_state_handler() local
650 xen_main_loop_prepare(state); in xen_hvm_change_state_handler()
654 state->ioservid, in xen_hvm_change_state_handler()
660 XenIOState *state = container_of(n, XenIOState, exit); in xen_exit_notifier() local
662 xen_destroy_ioreq_server(xen_domid, state->ioservid); in xen_exit_notifier()
663 if (state->fres != NULL) { in xen_exit_notifier()
664 xenforeignmemory_unmap_resource(xen_fmem, state->fres); in xen_exit_notifier()
667 qemu_xen_evtchn_close(state->xce_handle); in xen_exit_notifier()
668 xs_daemon_close(state->xenstore); in xen_exit_notifier()
671 static int xen_map_ioreq_server(XenIOState *state) in xen_map_ioreq_server() argument
688 if (state->has_bufioreq) { in xen_map_ioreq_server()
692 state->fres = xenforeignmemory_map_resource(xen_fmem, xen_domid, in xen_map_ioreq_server()
694 state->ioservid, in xen_map_ioreq_server()
698 if (state->fres != NULL) { in xen_map_ioreq_server()
699 trace_xen_map_resource_ioreq(state->ioservid, addr); in xen_map_ioreq_server()
700 state->shared_page = addr; in xen_map_ioreq_server()
701 if (state->has_bufioreq) { in xen_map_ioreq_server()
702 state->buffered_io_page = addr; in xen_map_ioreq_server()
703 state->shared_page = addr + XC_PAGE_SIZE; in xen_map_ioreq_server()
717 if (state->shared_page == NULL || state->has_bufioreq) { in xen_map_ioreq_server()
718 rc = xen_get_ioreq_server_info(xen_domid, state->ioservid, in xen_map_ioreq_server()
719 (state->shared_page == NULL) ? in xen_map_ioreq_server()
721 (state->has_bufioreq && in xen_map_ioreq_server()
722 state->buffered_io_page == NULL) ? in xen_map_ioreq_server()
731 if (state->shared_page == NULL) { in xen_map_ioreq_server()
734 state->shared_page = xenforeignmemory_map(xen_fmem, xen_domid, in xen_map_ioreq_server()
738 if (state->shared_page == NULL) { in xen_map_ioreq_server()
743 if (state->has_bufioreq && state->buffered_io_page == NULL) { in xen_map_ioreq_server()
746 state->buffered_io_page = xenforeignmemory_map(xen_fmem, xen_domid, in xen_map_ioreq_server()
750 if (state->buffered_io_page == NULL) { in xen_map_ioreq_server()
757 if (state->shared_page == NULL || in xen_map_ioreq_server()
758 (state->has_bufioreq && state->buffered_io_page == NULL)) { in xen_map_ioreq_server()
762 if (state->has_bufioreq) { in xen_map_ioreq_server()
764 state->bufioreq_remote_port = bufioreq_evtchn; in xen_map_ioreq_server()
819 static void xen_do_ioreq_register(XenIOState *state, in xen_do_ioreq_register() argument
825 state->exit.notify = xen_exit_notifier; in xen_do_ioreq_register()
826 qemu_add_exit_notifier(&state->exit); in xen_do_ioreq_register()
833 rc = xen_map_ioreq_server(state); in xen_do_ioreq_register()
839 state->cpu_by_vcpu_id = g_new0(CPUState *, max_cpus); in xen_do_ioreq_register()
841 rc = xen_set_ioreq_server_state(xen_domid, state->ioservid, true); in xen_do_ioreq_register()
848 state->ioreq_local_port = g_new0(evtchn_port_t, max_cpus); in xen_do_ioreq_register()
852 rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, in xen_do_ioreq_register()
853 xen_vcpu_eport(state->shared_page, in xen_do_ioreq_register()
859 state->ioreq_local_port[i] = rc; in xen_do_ioreq_register()
862 if (state->has_bufioreq) { in xen_do_ioreq_register()
863 rc = qemu_xen_evtchn_bind_interdomain(state->xce_handle, xen_domid, in xen_do_ioreq_register()
864 state->bufioreq_remote_port); in xen_do_ioreq_register()
869 state->bufioreq_local_port = rc; in xen_do_ioreq_register()
873 xen_map_cache_init(xen_phys_offset_to_gaddr, state); in xen_do_ioreq_register()
875 xen_map_cache_init(NULL, state); in xen_do_ioreq_register()
878 qemu_add_vm_change_state_handler(xen_hvm_change_state_handler, state); in xen_do_ioreq_register()
880 state->memory_listener = *xen_memory_listener; in xen_do_ioreq_register()
881 memory_listener_register(&state->memory_listener, &address_space_memory); in xen_do_ioreq_register()
883 state->io_listener = xen_io_listener; in xen_do_ioreq_register()
884 memory_listener_register(&state->io_listener, &address_space_io); in xen_do_ioreq_register()
886 state->device_listener = xen_device_listener; in xen_do_ioreq_register()
887 QLIST_INIT(&state->dev_list); in xen_do_ioreq_register()
888 device_listener_register(&state->device_listener); in xen_do_ioreq_register()
897 void xen_register_ioreq(XenIOState *state, unsigned int max_cpus, in xen_register_ioreq() argument
905 state->xce_handle = qemu_xen_evtchn_open(); in xen_register_ioreq()
906 if (state->xce_handle == NULL) { in xen_register_ioreq()
911 state->xenstore = xs_daemon_open(); in xen_register_ioreq()
912 if (state->xenstore == NULL) { in xen_register_ioreq()
917 state->has_bufioreq = handle_bufioreq != HVM_IOREQSRV_BUFIOREQ_OFF; in xen_register_ioreq()
918 rc = xen_create_ioreq_server(xen_domid, handle_bufioreq, &state->ioservid); in xen_register_ioreq()
920 xen_do_ioreq_register(state, max_cpus, xen_memory_listener); in xen_register_ioreq()