Lines Matching defs:req

48 	struct vmmdev_hypervisorinfo *req;
55 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO,
57 if (!req)
60 req->hypervisor_start = 0;
61 req->hypervisor_size = 0;
62 rc = vbg_req_perform(gdev, req);
70 if (req->hypervisor_size == 0)
73 hypervisor_size = req->hypervisor_size;
75 size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
98 req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
99 req->header.rc = VERR_INTERNAL_ERROR;
100 req->hypervisor_size = hypervisor_size;
101 req->hypervisor_start =
104 rc = vbg_req_perform(gdev, req);
122 vbg_req_free(req, sizeof(*req));
133 struct vmmdev_hypervisorinfo *req;
143 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO,
145 if (!req)
148 req->hypervisor_start = 0;
149 req->hypervisor_size = 0;
151 rc = vbg_req_perform(gdev, req);
153 vbg_req_free(req, sizeof(*req));
240 struct vmmdev_guest_status *req;
243 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS,
245 if (!req)
248 req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
250 req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
252 req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
253 req->flags = 0;
255 rc = vbg_req_perform(gdev, req);
259 vbg_req_free(req, sizeof(*req));
274 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
284 req->header.size = sizeof(*req);
285 req->inflate = true;
286 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
295 req->phys_page[i] = page_to_phys(pages[i]);
298 rc = vbg_req_perform(gdev, req);
327 struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
331 req->header.size = sizeof(*req);
332 req->inflate = false;
333 req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
336 req->phys_page[i] = page_to_phys(pages[i]);
338 rc = vbg_req_perform(gdev, req);
360 struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
368 req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
369 rc = vbg_req_perform(gdev, req);
381 devm_kcalloc(gdev->dev, req->phys_mem_chunks,
386 gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
389 chunks = req->balloon_chunks;
439 struct vmmdev_heartbeat *req;
442 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE,
444 if (!req)
447 req->enabled = enabled;
448 req->interval_ns = 0;
449 rc = vbg_req_perform(gdev, req);
450 do_div(req->interval_ns, 1000000); /* ns -> ms */
451 gdev->heartbeat_interval_ms = req->interval_ns;
452 vbg_req_free(req, sizeof(*req));
552 struct vmmdev_mask *req;
555 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
557 if (!req)
560 req->not_mask = U32_MAX & ~fixed_events;
561 req->or_mask = fixed_events;
562 rc = vbg_req_perform(gdev, req);
566 vbg_req_free(req, sizeof(*req));
592 struct vmmdev_mask *req;
601 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK,
604 if (!req) {
625 if (gdev->event_filter_host == or_mask || !req)
629 req->or_mask = or_mask;
630 req->not_mask = ~or_mask;
631 rc = vbg_req_perform(gdev, req);
647 vbg_req_free(req, sizeof(*req));
661 struct vmmdev_mask *req;
664 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
666 if (!req)
669 req->not_mask = U32_MAX;
670 req->or_mask = 0;
671 rc = vbg_req_perform(gdev, req);
675 vbg_req_free(req, sizeof(*req));
693 struct vmmdev_mask *req;
705 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES,
708 if (!req) {
713 req->or_mask = caps;
714 req->not_mask = ~caps;
715 rc = vbg_req_perform(gdev, req);
716 vbg_req_free(req, sizeof(*req));
898 struct vmmdev_host_version *req;
901 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION,
903 if (!req)
906 rc = vbg_req_perform(gdev, req);
914 req->major, req->minor, req->build, req->revision);
915 gdev->host_features = req->features;
920 if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
926 vbg_req_free(req, sizeof(*req));
1281 * @req: The request.
1286 const struct vmmdev_request_header *req)
1291 switch (req->request_type) {
1340 guest_status = (const struct vmmdev_guest_status *)req;
1362 req->request_type);
1369 req->request_type);
1681 struct vmmdev_write_core_dump *req;
1686 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP,
1688 if (!req)
1691 req->flags = dump->u.in.flags;
1692 dump->hdr.rc = vbg_req_perform(gdev, req);
1694 vbg_req_free(req, sizeof(*req));
1701 * @req: The requested function.
1706 int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
1708 unsigned int req_no_size = req & ~IOCSIZE_MASK;
1724 req == VBG_IOCTL_VMMDEV_REQUEST_BIG ||
1725 req == VBG_IOCTL_VMMDEV_REQUEST_BIG_ALT)
1732 switch (req) {
1769 vbg_err_ratelimited("Userspace made an unknown ioctl req %#08x\n", req);
1783 struct vmmdev_mouse_status *req;
1786 req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS,
1788 if (!req)
1791 req->mouse_features = features;
1792 req->pointer_pos_x = 0;
1793 req->pointer_pos_y = 0;
1795 rc = vbg_req_perform(gdev, req);
1799 vbg_req_free(req, sizeof(*req));
1807 struct vmmdev_events *req = gdev->ack_events_req;
1817 req->header.rc = VERR_INTERNAL_ERROR;
1818 req->events = 0;
1819 rc = vbg_req_perform(gdev, req);
1821 vbg_err("Error performing events req, rc: %d\n", rc);
1825 events = req->events;