xref: /qemu/hw/virtio/vhost-vdpa.c (revision c1a1008685af0327d9d03f03d43bdb77e7af5bea) !
1108a6481SCindy Lu /*
2108a6481SCindy Lu  * vhost-vdpa
3108a6481SCindy Lu  *
4108a6481SCindy Lu  *  Copyright(c) 2017-2018 Intel Corporation.
5108a6481SCindy Lu  *  Copyright(c) 2020 Red Hat, Inc.
6108a6481SCindy Lu  *
7108a6481SCindy Lu  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8108a6481SCindy Lu  * See the COPYING file in the top-level directory.
9108a6481SCindy Lu  *
10108a6481SCindy Lu  */
11108a6481SCindy Lu 
12108a6481SCindy Lu #include "qemu/osdep.h"
13108a6481SCindy Lu #include <linux/vhost.h>
14108a6481SCindy Lu #include <linux/vfio.h>
15108a6481SCindy Lu #include <sys/eventfd.h>
16108a6481SCindy Lu #include <sys/ioctl.h>
17108a6481SCindy Lu #include "hw/virtio/vhost.h"
18108a6481SCindy Lu #include "hw/virtio/vhost-backend.h"
19108a6481SCindy Lu #include "hw/virtio/virtio-net.h"
20dff4426fSEugenio Pérez #include "hw/virtio/vhost-shadow-virtqueue.h"
21108a6481SCindy Lu #include "hw/virtio/vhost-vdpa.h"
22df77d45aSXie Yongji #include "exec/address-spaces.h"
23c156d5bfSEugenio Pérez #include "migration/blocker.h"
24415b7327SMarc-André Lureau #include "qemu/cutils.h"
25108a6481SCindy Lu #include "qemu/main-loop.h"
264dc5acc0SCindy Lu #include "cpu.h"
27778e67deSLaurent Vivier #include "trace.h"
28dff4426fSEugenio Pérez #include "qapi/error.h"
29108a6481SCindy Lu 
30032e4d68SEugenio Pérez /*
31032e4d68SEugenio Pérez  * Return one past the end of the end of section. Be careful with uint64_t
32032e4d68SEugenio Pérez  * conversions!
33032e4d68SEugenio Pérez  */
34032e4d68SEugenio Pérez static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
35032e4d68SEugenio Pérez {
36032e4d68SEugenio Pérez     Int128 llend = int128_make64(section->offset_within_address_space);
37032e4d68SEugenio Pérez     llend = int128_add(llend, section->size);
38032e4d68SEugenio Pérez     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
39032e4d68SEugenio Pérez 
40032e4d68SEugenio Pérez     return llend;
41032e4d68SEugenio Pérez }
42032e4d68SEugenio Pérez 
43013108b6SEugenio Pérez static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
44013108b6SEugenio Pérez                                                 uint64_t iova_min,
45013108b6SEugenio Pérez                                                 uint64_t iova_max)
46108a6481SCindy Lu {
47013108b6SEugenio Pérez     Int128 llend;
48013108b6SEugenio Pérez 
49013108b6SEugenio Pérez     if ((!memory_region_is_ram(section->mr) &&
50108a6481SCindy Lu          !memory_region_is_iommu(section->mr)) ||
51c64038c9SEugenio Pérez         memory_region_is_protected(section->mr) ||
52d60c75d2SJason Wang         /* vhost-vDPA doesn't allow MMIO to be mapped  */
53013108b6SEugenio Pérez         memory_region_is_ram_device(section->mr)) {
54013108b6SEugenio Pérez         return true;
55013108b6SEugenio Pérez     }
56013108b6SEugenio Pérez 
57013108b6SEugenio Pérez     if (section->offset_within_address_space < iova_min) {
58013108b6SEugenio Pérez         error_report("RAM section out of device range (min=0x%" PRIx64
59013108b6SEugenio Pérez                      ", addr=0x%" HWADDR_PRIx ")",
60013108b6SEugenio Pérez                      iova_min, section->offset_within_address_space);
61013108b6SEugenio Pérez         return true;
62013108b6SEugenio Pérez     }
63013108b6SEugenio Pérez 
64013108b6SEugenio Pérez     llend = vhost_vdpa_section_end(section);
65013108b6SEugenio Pérez     if (int128_gt(llend, int128_make64(iova_max))) {
66013108b6SEugenio Pérez         error_report("RAM section out of device range (max=0x%" PRIx64
67013108b6SEugenio Pérez                      ", end addr=0x%" PRIx64 ")",
68013108b6SEugenio Pérez                      iova_max, int128_get64(llend));
69013108b6SEugenio Pérez         return true;
70013108b6SEugenio Pérez     }
71013108b6SEugenio Pérez 
72013108b6SEugenio Pérez     return false;
73108a6481SCindy Lu }
74108a6481SCindy Lu 
75cd831ed5SEugenio Pérez /*
76cd831ed5SEugenio Pérez  * The caller must set asid = 0 if the device does not support asid.
77cd831ed5SEugenio Pérez  * This is not an ABI break since it is set to 0 by the initializer anyway.
78cd831ed5SEugenio Pérez  */
79cd831ed5SEugenio Pérez int vhost_vdpa_dma_map(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
80cd831ed5SEugenio Pérez                        hwaddr size, void *vaddr, bool readonly)
81108a6481SCindy Lu {
82386494f2SCindy Lu     struct vhost_msg_v2 msg = {};
83108a6481SCindy Lu     int fd = v->device_fd;
84108a6481SCindy Lu     int ret = 0;
85108a6481SCindy Lu 
86108a6481SCindy Lu     msg.type = v->msg_type;
87cd831ed5SEugenio Pérez     msg.asid = asid;
88108a6481SCindy Lu     msg.iotlb.iova = iova;
89108a6481SCindy Lu     msg.iotlb.size = size;
90108a6481SCindy Lu     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
91108a6481SCindy Lu     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
92108a6481SCindy Lu     msg.iotlb.type = VHOST_IOTLB_UPDATE;
93108a6481SCindy Lu 
94cd831ed5SEugenio Pérez     trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.asid, msg.iotlb.iova,
95cd831ed5SEugenio Pérez                              msg.iotlb.size, msg.iotlb.uaddr, msg.iotlb.perm,
96cd831ed5SEugenio Pérez                              msg.iotlb.type);
97778e67deSLaurent Vivier 
98108a6481SCindy Lu     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
99108a6481SCindy Lu         error_report("failed to write, fd=%d, errno=%d (%s)",
100108a6481SCindy Lu             fd, errno, strerror(errno));
101108a6481SCindy Lu         return -EIO ;
102108a6481SCindy Lu     }
103108a6481SCindy Lu 
104108a6481SCindy Lu     return ret;
105108a6481SCindy Lu }
106108a6481SCindy Lu 
107cd831ed5SEugenio Pérez /*
108cd831ed5SEugenio Pérez  * The caller must set asid = 0 if the device does not support asid.
109cd831ed5SEugenio Pérez  * This is not an ABI break since it is set to 0 by the initializer anyway.
110cd831ed5SEugenio Pérez  */
111cd831ed5SEugenio Pérez int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, uint32_t asid, hwaddr iova,
112cd831ed5SEugenio Pérez                          hwaddr size)
113108a6481SCindy Lu {
114386494f2SCindy Lu     struct vhost_msg_v2 msg = {};
115108a6481SCindy Lu     int fd = v->device_fd;
116108a6481SCindy Lu     int ret = 0;
117108a6481SCindy Lu 
118108a6481SCindy Lu     msg.type = v->msg_type;
119cd831ed5SEugenio Pérez     msg.asid = asid;
120108a6481SCindy Lu     msg.iotlb.iova = iova;
121108a6481SCindy Lu     msg.iotlb.size = size;
122108a6481SCindy Lu     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
123108a6481SCindy Lu 
124cd831ed5SEugenio Pérez     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.asid, msg.iotlb.iova,
125778e67deSLaurent Vivier                                msg.iotlb.size, msg.iotlb.type);
126778e67deSLaurent Vivier 
127108a6481SCindy Lu     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
128108a6481SCindy Lu         error_report("failed to write, fd=%d, errno=%d (%s)",
129108a6481SCindy Lu             fd, errno, strerror(errno));
130108a6481SCindy Lu         return -EIO ;
131108a6481SCindy Lu     }
132108a6481SCindy Lu 
133108a6481SCindy Lu     return ret;
134108a6481SCindy Lu }
135108a6481SCindy Lu 
136e6db5df7SEugenio Pérez static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
137a5bd0580SJason Wang {
138a5bd0580SJason Wang     int fd = v->device_fd;
139e6db5df7SEugenio Pérez     struct vhost_msg_v2 msg = {
140e6db5df7SEugenio Pérez         .type = v->msg_type,
141e6db5df7SEugenio Pérez         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
142e6db5df7SEugenio Pérez     };
143a5bd0580SJason Wang 
1445580b9f0SEugenio Pérez     trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type);
145a5bd0580SJason Wang     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
146a5bd0580SJason Wang         error_report("failed to write, fd=%d, errno=%d (%s)",
147a5bd0580SJason Wang                      fd, errno, strerror(errno));
148a5bd0580SJason Wang     }
149a5bd0580SJason Wang }
150a5bd0580SJason Wang 
151e6db5df7SEugenio Pérez static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
152e6db5df7SEugenio Pérez {
153e6db5df7SEugenio Pérez     if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
154e6db5df7SEugenio Pérez         !v->iotlb_batch_begin_sent) {
155e6db5df7SEugenio Pérez         vhost_vdpa_listener_begin_batch(v);
156e6db5df7SEugenio Pérez     }
157e6db5df7SEugenio Pérez 
158e6db5df7SEugenio Pérez     v->iotlb_batch_begin_sent = true;
159e6db5df7SEugenio Pérez }
160e6db5df7SEugenio Pérez 
161a5bd0580SJason Wang static void vhost_vdpa_listener_commit(MemoryListener *listener)
162a5bd0580SJason Wang {
163a5bd0580SJason Wang     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
164a5bd0580SJason Wang     struct vhost_dev *dev = v->dev;
1658acb3218SPhilippe Mathieu-Daudé     struct vhost_msg_v2 msg = {};
166a5bd0580SJason Wang     int fd = v->device_fd;
167a5bd0580SJason Wang 
168a5bd0580SJason Wang     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
169a5bd0580SJason Wang         return;
170a5bd0580SJason Wang     }
171a5bd0580SJason Wang 
172e6db5df7SEugenio Pérez     if (!v->iotlb_batch_begin_sent) {
173e6db5df7SEugenio Pérez         return;
174e6db5df7SEugenio Pérez     }
175e6db5df7SEugenio Pérez 
176a5bd0580SJason Wang     msg.type = v->msg_type;
177a5bd0580SJason Wang     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
178a5bd0580SJason Wang 
1795580b9f0SEugenio Pérez     trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type);
180a5bd0580SJason Wang     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
181a5bd0580SJason Wang         error_report("failed to write, fd=%d, errno=%d (%s)",
182a5bd0580SJason Wang                      fd, errno, strerror(errno));
183a5bd0580SJason Wang     }
184e6db5df7SEugenio Pérez 
185e6db5df7SEugenio Pérez     v->iotlb_batch_begin_sent = false;
186a5bd0580SJason Wang }
187a5bd0580SJason Wang 
188108a6481SCindy Lu static void vhost_vdpa_listener_region_add(MemoryListener *listener,
189108a6481SCindy Lu                                            MemoryRegionSection *section)
190108a6481SCindy Lu {
1917dab70beSEugenio Pérez     DMAMap mem_region = {};
192108a6481SCindy Lu     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
193108a6481SCindy Lu     hwaddr iova;
194108a6481SCindy Lu     Int128 llend, llsize;
195108a6481SCindy Lu     void *vaddr;
196108a6481SCindy Lu     int ret;
197108a6481SCindy Lu 
198013108b6SEugenio Pérez     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
199013108b6SEugenio Pérez                                             v->iova_range.last)) {
200108a6481SCindy Lu         return;
201108a6481SCindy Lu     }
202108a6481SCindy Lu 
203108a6481SCindy Lu     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
204108a6481SCindy Lu                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
205108a6481SCindy Lu         error_report("%s received unaligned region", __func__);
206108a6481SCindy Lu         return;
207108a6481SCindy Lu     }
208108a6481SCindy Lu 
209108a6481SCindy Lu     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
210032e4d68SEugenio Pérez     llend = vhost_vdpa_section_end(section);
211108a6481SCindy Lu     if (int128_ge(int128_make64(iova), llend)) {
212108a6481SCindy Lu         return;
213108a6481SCindy Lu     }
214108a6481SCindy Lu 
215108a6481SCindy Lu     memory_region_ref(section->mr);
216108a6481SCindy Lu 
217108a6481SCindy Lu     /* Here we assume that memory_region_is_ram(section->mr)==true */
218108a6481SCindy Lu 
219108a6481SCindy Lu     vaddr = memory_region_get_ram_ptr(section->mr) +
220108a6481SCindy Lu             section->offset_within_region +
221108a6481SCindy Lu             (iova - section->offset_within_address_space);
222108a6481SCindy Lu 
223778e67deSLaurent Vivier     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
224778e67deSLaurent Vivier                                          vaddr, section->readonly);
225778e67deSLaurent Vivier 
226108a6481SCindy Lu     llsize = int128_sub(llend, int128_make64(iova));
2276188d78aSEugenio Pérez     if (v->shadow_data) {
2287dab70beSEugenio Pérez         int r;
22934e3c94eSEugenio Pérez 
2307dab70beSEugenio Pérez         mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
2317dab70beSEugenio Pérez         mem_region.size = int128_get64(llsize) - 1,
2327dab70beSEugenio Pérez         mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
2337dab70beSEugenio Pérez 
2347dab70beSEugenio Pérez         r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region);
23534e3c94eSEugenio Pérez         if (unlikely(r != IOVA_OK)) {
23634e3c94eSEugenio Pérez             error_report("Can't allocate a mapping (%d)", r);
23734e3c94eSEugenio Pérez             goto fail;
23834e3c94eSEugenio Pérez         }
23934e3c94eSEugenio Pérez 
24034e3c94eSEugenio Pérez         iova = mem_region.iova;
24134e3c94eSEugenio Pérez     }
242108a6481SCindy Lu 
243e6db5df7SEugenio Pérez     vhost_vdpa_iotlb_batch_begin_once(v);
244cd831ed5SEugenio Pérez     ret = vhost_vdpa_dma_map(v, VHOST_VDPA_GUEST_PA_ASID, iova,
245cd831ed5SEugenio Pérez                              int128_get64(llsize), vaddr, section->readonly);
246108a6481SCindy Lu     if (ret) {
247108a6481SCindy Lu         error_report("vhost vdpa map fail!");
2487dab70beSEugenio Pérez         goto fail_map;
249108a6481SCindy Lu     }
250108a6481SCindy Lu 
251108a6481SCindy Lu     return;
252108a6481SCindy Lu 
2537dab70beSEugenio Pérez fail_map:
2546188d78aSEugenio Pérez     if (v->shadow_data) {
25569292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, mem_region);
2567dab70beSEugenio Pérez     }
2577dab70beSEugenio Pérez 
258108a6481SCindy Lu fail:
259108a6481SCindy Lu     /*
260108a6481SCindy Lu      * On the initfn path, store the first error in the container so we
261108a6481SCindy Lu      * can gracefully fail.  Runtime, there's not much we can do other
262108a6481SCindy Lu      * than throw a hardware error.
263108a6481SCindy Lu      */
264108a6481SCindy Lu     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
265108a6481SCindy Lu     return;
266108a6481SCindy Lu 
267108a6481SCindy Lu }
268108a6481SCindy Lu 
269108a6481SCindy Lu static void vhost_vdpa_listener_region_del(MemoryListener *listener,
270108a6481SCindy Lu                                            MemoryRegionSection *section)
271108a6481SCindy Lu {
272108a6481SCindy Lu     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
273108a6481SCindy Lu     hwaddr iova;
274108a6481SCindy Lu     Int128 llend, llsize;
275108a6481SCindy Lu     int ret;
276108a6481SCindy Lu 
277013108b6SEugenio Pérez     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
278013108b6SEugenio Pérez                                             v->iova_range.last)) {
279108a6481SCindy Lu         return;
280108a6481SCindy Lu     }
281108a6481SCindy Lu 
282108a6481SCindy Lu     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
283108a6481SCindy Lu                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
284108a6481SCindy Lu         error_report("%s received unaligned region", __func__);
285108a6481SCindy Lu         return;
286108a6481SCindy Lu     }
287108a6481SCindy Lu 
288108a6481SCindy Lu     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
289032e4d68SEugenio Pérez     llend = vhost_vdpa_section_end(section);
290108a6481SCindy Lu 
291778e67deSLaurent Vivier     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
292778e67deSLaurent Vivier 
293108a6481SCindy Lu     if (int128_ge(int128_make64(iova), llend)) {
294108a6481SCindy Lu         return;
295108a6481SCindy Lu     }
296108a6481SCindy Lu 
297108a6481SCindy Lu     llsize = int128_sub(llend, int128_make64(iova));
298108a6481SCindy Lu 
2996188d78aSEugenio Pérez     if (v->shadow_data) {
30034e3c94eSEugenio Pérez         const DMAMap *result;
30134e3c94eSEugenio Pérez         const void *vaddr = memory_region_get_ram_ptr(section->mr) +
30234e3c94eSEugenio Pérez             section->offset_within_region +
30334e3c94eSEugenio Pérez             (iova - section->offset_within_address_space);
30434e3c94eSEugenio Pérez         DMAMap mem_region = {
30534e3c94eSEugenio Pérez             .translated_addr = (hwaddr)(uintptr_t)vaddr,
30634e3c94eSEugenio Pérez             .size = int128_get64(llsize) - 1,
30734e3c94eSEugenio Pérez         };
30834e3c94eSEugenio Pérez 
30934e3c94eSEugenio Pérez         result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region);
31010dab9f2SEugenio Pérez         if (!result) {
31110dab9f2SEugenio Pérez             /* The memory listener map wasn't mapped */
31210dab9f2SEugenio Pérez             return;
31310dab9f2SEugenio Pérez         }
31434e3c94eSEugenio Pérez         iova = result->iova;
31569292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, *result);
31634e3c94eSEugenio Pérez     }
317e6db5df7SEugenio Pérez     vhost_vdpa_iotlb_batch_begin_once(v);
318cd831ed5SEugenio Pérez     ret = vhost_vdpa_dma_unmap(v, VHOST_VDPA_GUEST_PA_ASID, iova,
319cd831ed5SEugenio Pérez                                int128_get64(llsize));
320108a6481SCindy Lu     if (ret) {
321108a6481SCindy Lu         error_report("vhost_vdpa dma unmap error!");
322108a6481SCindy Lu     }
323108a6481SCindy Lu 
324108a6481SCindy Lu     memory_region_unref(section->mr);
325108a6481SCindy Lu }
326108a6481SCindy Lu /*
327ef4ff56cSStefano Garzarella  * IOTLB API is used by vhost-vdpa which requires incremental updating
328108a6481SCindy Lu  * of the mapping. So we can not use generic vhost memory listener which
329108a6481SCindy Lu  * depends on the addnop().
330108a6481SCindy Lu  */
331108a6481SCindy Lu static const MemoryListener vhost_vdpa_memory_listener = {
332142518bdSPeter Xu     .name = "vhost-vdpa",
333a5bd0580SJason Wang     .commit = vhost_vdpa_listener_commit,
334108a6481SCindy Lu     .region_add = vhost_vdpa_listener_region_add,
335108a6481SCindy Lu     .region_del = vhost_vdpa_listener_region_del,
336108a6481SCindy Lu };
337108a6481SCindy Lu 
338108a6481SCindy Lu static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
339108a6481SCindy Lu                              void *arg)
340108a6481SCindy Lu {
341108a6481SCindy Lu     struct vhost_vdpa *v = dev->opaque;
342108a6481SCindy Lu     int fd = v->device_fd;
343f2a6e6c4SKevin Wolf     int ret;
344108a6481SCindy Lu 
345108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
346108a6481SCindy Lu 
347f2a6e6c4SKevin Wolf     ret = ioctl(fd, request, arg);
348f2a6e6c4SKevin Wolf     return ret < 0 ? -errno : ret;
349108a6481SCindy Lu }
350108a6481SCindy Lu 
3513631151bSRoman Kagan static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
352108a6481SCindy Lu {
353108a6481SCindy Lu     uint8_t s;
3543631151bSRoman Kagan     int ret;
355108a6481SCindy Lu 
356778e67deSLaurent Vivier     trace_vhost_vdpa_add_status(dev, status);
3573631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
3583631151bSRoman Kagan     if (ret < 0) {
3593631151bSRoman Kagan         return ret;
360108a6481SCindy Lu     }
361108a6481SCindy Lu 
362108a6481SCindy Lu     s |= status;
363108a6481SCindy Lu 
3643631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
3653631151bSRoman Kagan     if (ret < 0) {
3663631151bSRoman Kagan         return ret;
3673631151bSRoman Kagan     }
3683631151bSRoman Kagan 
3693631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
3703631151bSRoman Kagan     if (ret < 0) {
3713631151bSRoman Kagan         return ret;
3723631151bSRoman Kagan     }
3733631151bSRoman Kagan 
3743631151bSRoman Kagan     if (!(s & status)) {
3753631151bSRoman Kagan         return -EIO;
3763631151bSRoman Kagan     }
3773631151bSRoman Kagan 
3783631151bSRoman Kagan     return 0;
379108a6481SCindy Lu }
380108a6481SCindy Lu 
381d71b0609SSi-Wei Liu /*
382d71b0609SSi-Wei Liu  * The use of this function is for requests that only need to be
383d71b0609SSi-Wei Liu  * applied once. Typically such request occurs at the beginning
384d71b0609SSi-Wei Liu  * of operation, and before setting up queues. It should not be
385d71b0609SSi-Wei Liu  * used for request that performs operation until all queues are
386d71b0609SSi-Wei Liu  * set, which would need to check dev->vq_index_end instead.
387d71b0609SSi-Wei Liu  */
388d71b0609SSi-Wei Liu static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
3894d191cfdSJason Wang {
3904d191cfdSJason Wang     struct vhost_vdpa *v = dev->opaque;
3914d191cfdSJason Wang 
392d71b0609SSi-Wei Liu     return v->index == 0;
3934d191cfdSJason Wang }
3944d191cfdSJason Wang 
39512a195faSEugenio Pérez static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
39612a195faSEugenio Pérez                                        uint64_t *features)
39712a195faSEugenio Pérez {
39812a195faSEugenio Pérez     int ret;
39912a195faSEugenio Pérez 
40012a195faSEugenio Pérez     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
40112a195faSEugenio Pérez     trace_vhost_vdpa_get_features(dev, *features);
40212a195faSEugenio Pérez     return ret;
40312a195faSEugenio Pérez }
40412a195faSEugenio Pérez 
405258a0394SEugenio Pérez static void vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v)
406dff4426fSEugenio Pérez {
407dff4426fSEugenio Pérez     g_autoptr(GPtrArray) shadow_vqs = NULL;
4084725a418SEugenio Pérez 
409dff4426fSEugenio Pérez     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
410dff4426fSEugenio Pérez     for (unsigned n = 0; n < hdev->nvqs; ++n) {
4113cfb4d06SEugenio Pérez         VhostShadowVirtqueue *svq;
412dff4426fSEugenio Pérez 
4135fde952bSEugenio Pérez         svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
4143cfb4d06SEugenio Pérez         g_ptr_array_add(shadow_vqs, svq);
415dff4426fSEugenio Pérez     }
416dff4426fSEugenio Pérez 
417dff4426fSEugenio Pérez     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
418dff4426fSEugenio Pérez }
419dff4426fSEugenio Pérez 
42028770ff9SKevin Wolf static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
421108a6481SCindy Lu {
422108a6481SCindy Lu     struct vhost_vdpa *v;
423108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
424778e67deSLaurent Vivier     trace_vhost_vdpa_init(dev, opaque);
425e1c1915bSDavid Hildenbrand     int ret;
426e1c1915bSDavid Hildenbrand 
427e1c1915bSDavid Hildenbrand     /*
428e1c1915bSDavid Hildenbrand      * Similar to VFIO, we end up pinning all guest memory and have to
429e1c1915bSDavid Hildenbrand      * disable discarding of RAM.
430e1c1915bSDavid Hildenbrand      */
431e1c1915bSDavid Hildenbrand     ret = ram_block_discard_disable(true);
432e1c1915bSDavid Hildenbrand     if (ret) {
433e1c1915bSDavid Hildenbrand         error_report("Cannot set discarding of RAM broken");
434e1c1915bSDavid Hildenbrand         return ret;
435e1c1915bSDavid Hildenbrand     }
436108a6481SCindy Lu 
437108a6481SCindy Lu     v = opaque;
438a5bd0580SJason Wang     v->dev = dev;
439108a6481SCindy Lu     dev->opaque =  opaque ;
440108a6481SCindy Lu     v->listener = vhost_vdpa_memory_listener;
441108a6481SCindy Lu     v->msg_type = VHOST_IOTLB_MSG_V2;
442258a0394SEugenio Pérez     vhost_vdpa_init_svq(dev, v);
443108a6481SCindy Lu 
444d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
4454d191cfdSJason Wang         return 0;
4464d191cfdSJason Wang     }
4474d191cfdSJason Wang 
448108a6481SCindy Lu     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
449108a6481SCindy Lu                                VIRTIO_CONFIG_S_DRIVER);
450108a6481SCindy Lu 
451108a6481SCindy Lu     return 0;
452108a6481SCindy Lu }
453108a6481SCindy Lu 
454d0416d48SJason Wang static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
455d0416d48SJason Wang                                             int queue_index)
456d0416d48SJason Wang {
4578e3b0cbbSMarc-André Lureau     size_t page_size = qemu_real_host_page_size();
458d0416d48SJason Wang     struct vhost_vdpa *v = dev->opaque;
459d0416d48SJason Wang     VirtIODevice *vdev = dev->vdev;
460d0416d48SJason Wang     VhostVDPAHostNotifier *n;
461d0416d48SJason Wang 
462d0416d48SJason Wang     n = &v->notifier[queue_index];
463d0416d48SJason Wang 
464d0416d48SJason Wang     if (n->addr) {
465d0416d48SJason Wang         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
466d0416d48SJason Wang         object_unparent(OBJECT(&n->mr));
467d0416d48SJason Wang         munmap(n->addr, page_size);
468d0416d48SJason Wang         n->addr = NULL;
469d0416d48SJason Wang     }
470d0416d48SJason Wang }
471d0416d48SJason Wang 
472d0416d48SJason Wang static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
473d0416d48SJason Wang {
4748e3b0cbbSMarc-André Lureau     size_t page_size = qemu_real_host_page_size();
475d0416d48SJason Wang     struct vhost_vdpa *v = dev->opaque;
476d0416d48SJason Wang     VirtIODevice *vdev = dev->vdev;
477d0416d48SJason Wang     VhostVDPAHostNotifier *n;
478d0416d48SJason Wang     int fd = v->device_fd;
479d0416d48SJason Wang     void *addr;
480d0416d48SJason Wang     char *name;
481d0416d48SJason Wang 
482d0416d48SJason Wang     vhost_vdpa_host_notifier_uninit(dev, queue_index);
483d0416d48SJason Wang 
484d0416d48SJason Wang     n = &v->notifier[queue_index];
485d0416d48SJason Wang 
486d0416d48SJason Wang     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
487d0416d48SJason Wang                 queue_index * page_size);
488d0416d48SJason Wang     if (addr == MAP_FAILED) {
489d0416d48SJason Wang         goto err;
490d0416d48SJason Wang     }
491d0416d48SJason Wang 
492d0416d48SJason Wang     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
493d0416d48SJason Wang                            v, queue_index);
494d0416d48SJason Wang     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
495d0416d48SJason Wang                                       page_size, addr);
496d0416d48SJason Wang     g_free(name);
497d0416d48SJason Wang 
498d0416d48SJason Wang     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
49998f7607eSLaurent Vivier         object_unparent(OBJECT(&n->mr));
500d0416d48SJason Wang         munmap(addr, page_size);
501d0416d48SJason Wang         goto err;
502d0416d48SJason Wang     }
503d0416d48SJason Wang     n->addr = addr;
504d0416d48SJason Wang 
505d0416d48SJason Wang     return 0;
506d0416d48SJason Wang 
507d0416d48SJason Wang err:
508d0416d48SJason Wang     return -1;
509d0416d48SJason Wang }
510d0416d48SJason Wang 
511b1f030a0SLaurent Vivier static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
512b1f030a0SLaurent Vivier {
513b1f030a0SLaurent Vivier     int i;
514b1f030a0SLaurent Vivier 
515b1f030a0SLaurent Vivier     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
516b1f030a0SLaurent Vivier         vhost_vdpa_host_notifier_uninit(dev, i);
517b1f030a0SLaurent Vivier     }
518b1f030a0SLaurent Vivier }
519b1f030a0SLaurent Vivier 
520d0416d48SJason Wang static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
521d0416d48SJason Wang {
522dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
523d0416d48SJason Wang     int i;
524d0416d48SJason Wang 
525dff4426fSEugenio Pérez     if (v->shadow_vqs_enabled) {
526dff4426fSEugenio Pérez         /* FIXME SVQ is not compatible with host notifiers mr */
527dff4426fSEugenio Pérez         return;
528dff4426fSEugenio Pérez     }
529dff4426fSEugenio Pérez 
530d0416d48SJason Wang     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
531d0416d48SJason Wang         if (vhost_vdpa_host_notifier_init(dev, i)) {
532d0416d48SJason Wang             goto err;
533d0416d48SJason Wang         }
534d0416d48SJason Wang     }
535d0416d48SJason Wang 
536d0416d48SJason Wang     return;
537d0416d48SJason Wang 
538d0416d48SJason Wang err:
539b1f030a0SLaurent Vivier     vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
540d0416d48SJason Wang     return;
541d0416d48SJason Wang }
542d0416d48SJason Wang 
543dff4426fSEugenio Pérez static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
544dff4426fSEugenio Pérez {
545dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
546dff4426fSEugenio Pérez     size_t idx;
547dff4426fSEugenio Pérez 
548dff4426fSEugenio Pérez     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
549dff4426fSEugenio Pérez         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
550dff4426fSEugenio Pérez     }
551dff4426fSEugenio Pérez     g_ptr_array_free(v->shadow_vqs, true);
552dff4426fSEugenio Pérez }
553dff4426fSEugenio Pérez 
554108a6481SCindy Lu static int vhost_vdpa_cleanup(struct vhost_dev *dev)
555108a6481SCindy Lu {
556108a6481SCindy Lu     struct vhost_vdpa *v;
557108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
558108a6481SCindy Lu     v = dev->opaque;
559778e67deSLaurent Vivier     trace_vhost_vdpa_cleanup(dev, v);
560d0416d48SJason Wang     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
561108a6481SCindy Lu     memory_listener_unregister(&v->listener);
562dff4426fSEugenio Pérez     vhost_vdpa_svq_cleanup(dev);
563108a6481SCindy Lu 
564108a6481SCindy Lu     dev->opaque = NULL;
565e1c1915bSDavid Hildenbrand     ram_block_discard_disable(false);
566e1c1915bSDavid Hildenbrand 
567108a6481SCindy Lu     return 0;
568108a6481SCindy Lu }
569108a6481SCindy Lu 
570108a6481SCindy Lu static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
571108a6481SCindy Lu {
572778e67deSLaurent Vivier     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
573108a6481SCindy Lu     return INT_MAX;
574108a6481SCindy Lu }
575108a6481SCindy Lu 
576108a6481SCindy Lu static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
577108a6481SCindy Lu                                     struct vhost_memory *mem)
578108a6481SCindy Lu {
579d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
5804d191cfdSJason Wang         return 0;
5814d191cfdSJason Wang     }
5824d191cfdSJason Wang 
583778e67deSLaurent Vivier     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
584778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
585778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
586778e67deSLaurent Vivier         int i;
587778e67deSLaurent Vivier         for (i = 0; i < mem->nregions; i++) {
588778e67deSLaurent Vivier             trace_vhost_vdpa_dump_regions(dev, i,
589778e67deSLaurent Vivier                                           mem->regions[i].guest_phys_addr,
590778e67deSLaurent Vivier                                           mem->regions[i].memory_size,
591778e67deSLaurent Vivier                                           mem->regions[i].userspace_addr,
592778e67deSLaurent Vivier                                           mem->regions[i].flags_padding);
593778e67deSLaurent Vivier         }
594778e67deSLaurent Vivier     }
595108a6481SCindy Lu     if (mem->padding) {
5963631151bSRoman Kagan         return -EINVAL;
597108a6481SCindy Lu     }
598108a6481SCindy Lu 
599108a6481SCindy Lu     return 0;
600108a6481SCindy Lu }
601108a6481SCindy Lu 
602108a6481SCindy Lu static int vhost_vdpa_set_features(struct vhost_dev *dev,
603108a6481SCindy Lu                                    uint64_t features)
604108a6481SCindy Lu {
60512a195faSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
606108a6481SCindy Lu     int ret;
6074d191cfdSJason Wang 
608d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
6094d191cfdSJason Wang         return 0;
6104d191cfdSJason Wang     }
6114d191cfdSJason Wang 
61212a195faSEugenio Pérez     if (v->shadow_vqs_enabled) {
61312a195faSEugenio Pérez         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
61412a195faSEugenio Pérez             /*
61512a195faSEugenio Pérez              * QEMU is just trying to enable or disable logging. SVQ handles
61612a195faSEugenio Pérez              * this sepparately, so no need to forward this.
61712a195faSEugenio Pérez              */
61812a195faSEugenio Pérez             v->acked_features = features;
61912a195faSEugenio Pérez             return 0;
62012a195faSEugenio Pérez         }
62112a195faSEugenio Pérez 
62212a195faSEugenio Pérez         v->acked_features = features;
62312a195faSEugenio Pérez 
62412a195faSEugenio Pérez         /* We must not ack _F_LOG if SVQ is enabled */
62512a195faSEugenio Pérez         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
62612a195faSEugenio Pérez     }
62712a195faSEugenio Pérez 
628778e67deSLaurent Vivier     trace_vhost_vdpa_set_features(dev, features);
629108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
630108a6481SCindy Lu     if (ret) {
631108a6481SCindy Lu         return ret;
632108a6481SCindy Lu     }
633108a6481SCindy Lu 
6343631151bSRoman Kagan     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
635108a6481SCindy Lu }
636108a6481SCindy Lu 
637a5bd0580SJason Wang static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
638a5bd0580SJason Wang {
639a5bd0580SJason Wang     uint64_t features;
640a5bd0580SJason Wang     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
641*c1a10086SEugenio Pérez         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH |
642*c1a10086SEugenio Pérez         0x1ULL << VHOST_BACKEND_F_IOTLB_ASID;
643a5bd0580SJason Wang     int r;
644a5bd0580SJason Wang 
645a5bd0580SJason Wang     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
6462a83e97eSJason Wang         return -EFAULT;
647a5bd0580SJason Wang     }
648a5bd0580SJason Wang 
649a5bd0580SJason Wang     features &= f;
6504d191cfdSJason Wang 
651d71b0609SSi-Wei Liu     if (vhost_vdpa_first_dev(dev)) {
652a5bd0580SJason Wang         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
653a5bd0580SJason Wang         if (r) {
6542a83e97eSJason Wang             return -EFAULT;
655a5bd0580SJason Wang         }
6564d191cfdSJason Wang     }
657a5bd0580SJason Wang 
658a5bd0580SJason Wang     dev->backend_cap = features;
659a5bd0580SJason Wang 
660a5bd0580SJason Wang     return 0;
661a5bd0580SJason Wang }
662a5bd0580SJason Wang 
663c232b8f4SZenghui Yu static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
664108a6481SCindy Lu                                     uint32_t *device_id)
665108a6481SCindy Lu {
666778e67deSLaurent Vivier     int ret;
667778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
668778e67deSLaurent Vivier     trace_vhost_vdpa_get_device_id(dev, *device_id);
669778e67deSLaurent Vivier     return ret;
670108a6481SCindy Lu }
671108a6481SCindy Lu 
672dff4426fSEugenio Pérez static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
673dff4426fSEugenio Pérez {
674dff4426fSEugenio Pérez     if (!v->shadow_vqs_enabled) {
675dff4426fSEugenio Pérez         return;
676dff4426fSEugenio Pérez     }
677dff4426fSEugenio Pérez 
678dff4426fSEugenio Pérez     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
679dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
680dff4426fSEugenio Pérez         vhost_svq_stop(svq);
681dff4426fSEugenio Pérez     }
682dff4426fSEugenio Pérez }
683dff4426fSEugenio Pérez 
684108a6481SCindy Lu static int vhost_vdpa_reset_device(struct vhost_dev *dev)
685108a6481SCindy Lu {
686dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
687778e67deSLaurent Vivier     int ret;
688108a6481SCindy Lu     uint8_t status = 0;
689108a6481SCindy Lu 
690dff4426fSEugenio Pérez     vhost_vdpa_reset_svq(v);
691dff4426fSEugenio Pérez 
692778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
693778e67deSLaurent Vivier     trace_vhost_vdpa_reset_device(dev, status);
694778e67deSLaurent Vivier     return ret;
695108a6481SCindy Lu }
696108a6481SCindy Lu 
697108a6481SCindy Lu static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
698108a6481SCindy Lu {
699108a6481SCindy Lu     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
700108a6481SCindy Lu 
701353244d8SJason Wang     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
702353244d8SJason Wang     return idx;
703108a6481SCindy Lu }
704108a6481SCindy Lu 
705108a6481SCindy Lu static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
706108a6481SCindy Lu {
707108a6481SCindy Lu     int i;
708778e67deSLaurent Vivier     trace_vhost_vdpa_set_vring_ready(dev);
709108a6481SCindy Lu     for (i = 0; i < dev->nvqs; ++i) {
710108a6481SCindy Lu         struct vhost_vring_state state = {
711108a6481SCindy Lu             .index = dev->vq_index + i,
712108a6481SCindy Lu             .num = 1,
713108a6481SCindy Lu         };
714108a6481SCindy Lu         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
715108a6481SCindy Lu     }
716108a6481SCindy Lu     return 0;
717108a6481SCindy Lu }
718108a6481SCindy Lu 
719778e67deSLaurent Vivier static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
720778e67deSLaurent Vivier                                    uint32_t config_len)
721778e67deSLaurent Vivier {
722778e67deSLaurent Vivier     int b, len;
723778e67deSLaurent Vivier     char line[QEMU_HEXDUMP_LINE_LEN];
724778e67deSLaurent Vivier 
725778e67deSLaurent Vivier     for (b = 0; b < config_len; b += 16) {
726778e67deSLaurent Vivier         len = config_len - b;
727778e67deSLaurent Vivier         qemu_hexdump_line(line, b, config, len, false);
728778e67deSLaurent Vivier         trace_vhost_vdpa_dump_config(dev, line);
729778e67deSLaurent Vivier     }
730778e67deSLaurent Vivier }
731778e67deSLaurent Vivier 
732108a6481SCindy Lu static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
733108a6481SCindy Lu                                    uint32_t offset, uint32_t size,
734108a6481SCindy Lu                                    uint32_t flags)
735108a6481SCindy Lu {
736108a6481SCindy Lu     struct vhost_vdpa_config *config;
737108a6481SCindy Lu     int ret;
738108a6481SCindy Lu     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
739986d4f78SLi Qiang 
740778e67deSLaurent Vivier     trace_vhost_vdpa_set_config(dev, offset, size, flags);
741108a6481SCindy Lu     config = g_malloc(size + config_size);
742108a6481SCindy Lu     config->off = offset;
743108a6481SCindy Lu     config->len = size;
744108a6481SCindy Lu     memcpy(config->buf, data, size);
745778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
746778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
747778e67deSLaurent Vivier         vhost_vdpa_dump_config(dev, data, size);
748778e67deSLaurent Vivier     }
749108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
750108a6481SCindy Lu     g_free(config);
751108a6481SCindy Lu     return ret;
752108a6481SCindy Lu }
753108a6481SCindy Lu 
754108a6481SCindy Lu static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
75550de5138SKevin Wolf                                    uint32_t config_len, Error **errp)
756108a6481SCindy Lu {
757108a6481SCindy Lu     struct vhost_vdpa_config *v_config;
758108a6481SCindy Lu     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
759108a6481SCindy Lu     int ret;
760108a6481SCindy Lu 
761778e67deSLaurent Vivier     trace_vhost_vdpa_get_config(dev, config, config_len);
762108a6481SCindy Lu     v_config = g_malloc(config_len + config_size);
763108a6481SCindy Lu     v_config->len = config_len;
764108a6481SCindy Lu     v_config->off = 0;
765108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
766108a6481SCindy Lu     memcpy(config, v_config->buf, config_len);
767108a6481SCindy Lu     g_free(v_config);
768778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
769778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
770778e67deSLaurent Vivier         vhost_vdpa_dump_config(dev, config, config_len);
771778e67deSLaurent Vivier     }
772108a6481SCindy Lu     return ret;
773108a6481SCindy Lu  }
774108a6481SCindy Lu 
775d96be4c8SEugenio Pérez static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
776d96be4c8SEugenio Pérez                                          struct vhost_vring_state *ring)
777d96be4c8SEugenio Pérez {
778d96be4c8SEugenio Pérez     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
779d96be4c8SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
780d96be4c8SEugenio Pérez }
781d96be4c8SEugenio Pérez 
782dff4426fSEugenio Pérez static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
783dff4426fSEugenio Pérez                                          struct vhost_vring_file *file)
784dff4426fSEugenio Pérez {
785dff4426fSEugenio Pérez     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
786dff4426fSEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
787dff4426fSEugenio Pérez }
788dff4426fSEugenio Pérez 
789a8ac8858SEugenio Pérez static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
790a8ac8858SEugenio Pérez                                          struct vhost_vring_file *file)
791a8ac8858SEugenio Pérez {
792a8ac8858SEugenio Pérez     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
793a8ac8858SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
794a8ac8858SEugenio Pérez }
795a8ac8858SEugenio Pérez 
796d96be4c8SEugenio Pérez static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
797d96be4c8SEugenio Pérez                                          struct vhost_vring_addr *addr)
798d96be4c8SEugenio Pérez {
799d96be4c8SEugenio Pérez     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
800d96be4c8SEugenio Pérez                                 addr->desc_user_addr, addr->used_user_addr,
801d96be4c8SEugenio Pérez                                 addr->avail_user_addr,
802d96be4c8SEugenio Pérez                                 addr->log_guest_addr);
803d96be4c8SEugenio Pérez 
804d96be4c8SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
805d96be4c8SEugenio Pérez 
806d96be4c8SEugenio Pérez }
807d96be4c8SEugenio Pérez 
808dff4426fSEugenio Pérez /**
809dff4426fSEugenio Pérez  * Set the shadow virtqueue descriptors to the device
810dff4426fSEugenio Pérez  *
811dff4426fSEugenio Pérez  * @dev: The vhost device model
812dff4426fSEugenio Pérez  * @svq: The shadow virtqueue
813dff4426fSEugenio Pérez  * @idx: The index of the virtqueue in the vhost device
814dff4426fSEugenio Pérez  * @errp: Error
815a8ac8858SEugenio Pérez  *
816a8ac8858SEugenio Pérez  * Note that this function does not rewind kick file descriptor if cannot set
817a8ac8858SEugenio Pérez  * call one.
818dff4426fSEugenio Pérez  */
819100890f7SEugenio Pérez static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
820dff4426fSEugenio Pérez                                   VhostShadowVirtqueue *svq, unsigned idx,
821dff4426fSEugenio Pérez                                   Error **errp)
822dff4426fSEugenio Pérez {
823dff4426fSEugenio Pérez     struct vhost_vring_file file = {
824dff4426fSEugenio Pérez         .index = dev->vq_index + idx,
825dff4426fSEugenio Pérez     };
826dff4426fSEugenio Pérez     const EventNotifier *event_notifier = &svq->hdev_kick;
827dff4426fSEugenio Pérez     int r;
828dff4426fSEugenio Pérez 
8293cfb4d06SEugenio Pérez     r = event_notifier_init(&svq->hdev_kick, 0);
8303cfb4d06SEugenio Pérez     if (r != 0) {
8313cfb4d06SEugenio Pérez         error_setg_errno(errp, -r, "Couldn't create kick event notifier");
8323cfb4d06SEugenio Pérez         goto err_init_hdev_kick;
8333cfb4d06SEugenio Pérez     }
8343cfb4d06SEugenio Pérez 
8353cfb4d06SEugenio Pérez     r = event_notifier_init(&svq->hdev_call, 0);
8363cfb4d06SEugenio Pérez     if (r != 0) {
8373cfb4d06SEugenio Pérez         error_setg_errno(errp, -r, "Couldn't create call event notifier");
8383cfb4d06SEugenio Pérez         goto err_init_hdev_call;
8393cfb4d06SEugenio Pérez     }
8403cfb4d06SEugenio Pérez 
841dff4426fSEugenio Pérez     file.fd = event_notifier_get_fd(event_notifier);
842dff4426fSEugenio Pérez     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
843dff4426fSEugenio Pérez     if (unlikely(r != 0)) {
844dff4426fSEugenio Pérez         error_setg_errno(errp, -r, "Can't set device kick fd");
8453cfb4d06SEugenio Pérez         goto err_init_set_dev_fd;
846a8ac8858SEugenio Pérez     }
847a8ac8858SEugenio Pérez 
848a8ac8858SEugenio Pérez     event_notifier = &svq->hdev_call;
849a8ac8858SEugenio Pérez     file.fd = event_notifier_get_fd(event_notifier);
850a8ac8858SEugenio Pérez     r = vhost_vdpa_set_vring_dev_call(dev, &file);
851a8ac8858SEugenio Pérez     if (unlikely(r != 0)) {
852a8ac8858SEugenio Pérez         error_setg_errno(errp, -r, "Can't set device call fd");
8533cfb4d06SEugenio Pérez         goto err_init_set_dev_fd;
854dff4426fSEugenio Pérez     }
855dff4426fSEugenio Pérez 
8563cfb4d06SEugenio Pérez     return 0;
8573cfb4d06SEugenio Pérez 
8583cfb4d06SEugenio Pérez err_init_set_dev_fd:
8593cfb4d06SEugenio Pérez     event_notifier_set_handler(&svq->hdev_call, NULL);
8603cfb4d06SEugenio Pérez 
8613cfb4d06SEugenio Pérez err_init_hdev_call:
8623cfb4d06SEugenio Pérez     event_notifier_cleanup(&svq->hdev_kick);
8633cfb4d06SEugenio Pérez 
8643cfb4d06SEugenio Pérez err_init_hdev_kick:
865100890f7SEugenio Pérez     return r;
866100890f7SEugenio Pérez }
867100890f7SEugenio Pérez 
868100890f7SEugenio Pérez /**
869100890f7SEugenio Pérez  * Unmap a SVQ area in the device
870100890f7SEugenio Pérez  */
8718b6d6119SEugenio Pérez static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
872100890f7SEugenio Pérez {
8738b6d6119SEugenio Pérez     const DMAMap needle = {
8748b6d6119SEugenio Pérez         .translated_addr = addr,
8758b6d6119SEugenio Pérez     };
8768b6d6119SEugenio Pérez     const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle);
87734e3c94eSEugenio Pérez     hwaddr size;
878100890f7SEugenio Pérez     int r;
879100890f7SEugenio Pérez 
88034e3c94eSEugenio Pérez     if (unlikely(!result)) {
88134e3c94eSEugenio Pérez         error_report("Unable to find SVQ address to unmap");
8825b590f51SEugenio Pérez         return;
88334e3c94eSEugenio Pérez     }
88434e3c94eSEugenio Pérez 
8858e3b0cbbSMarc-André Lureau     size = ROUND_UP(result->size, qemu_real_host_page_size());
886cd831ed5SEugenio Pérez     r = vhost_vdpa_dma_unmap(v, v->address_space_id, result->iova, size);
887b37c12beSEugenio Pérez     if (unlikely(r < 0)) {
888b37c12beSEugenio Pérez         error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
8895b590f51SEugenio Pérez         return;
890b37c12beSEugenio Pérez     }
891b37c12beSEugenio Pérez 
892b37c12beSEugenio Pérez     vhost_iova_tree_remove(v->iova_tree, *result);
893100890f7SEugenio Pérez }
894100890f7SEugenio Pérez 
8955b590f51SEugenio Pérez static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
896100890f7SEugenio Pérez                                        const VhostShadowVirtqueue *svq)
897100890f7SEugenio Pérez {
898100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
899100890f7SEugenio Pérez     struct vhost_vring_addr svq_addr;
900100890f7SEugenio Pérez 
901100890f7SEugenio Pérez     vhost_svq_get_vring_addr(svq, &svq_addr);
902100890f7SEugenio Pérez 
9038b6d6119SEugenio Pérez     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
904100890f7SEugenio Pérez 
9058b6d6119SEugenio Pérez     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
90634e3c94eSEugenio Pérez }
90734e3c94eSEugenio Pérez 
90834e3c94eSEugenio Pérez /**
90934e3c94eSEugenio Pérez  * Map the SVQ area in the device
91034e3c94eSEugenio Pérez  *
91134e3c94eSEugenio Pérez  * @v: Vhost-vdpa device
91234e3c94eSEugenio Pérez  * @needle: The area to search iova
91334e3c94eSEugenio Pérez  * @errorp: Error pointer
91434e3c94eSEugenio Pérez  */
91534e3c94eSEugenio Pérez static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
91634e3c94eSEugenio Pérez                                     Error **errp)
91734e3c94eSEugenio Pérez {
91834e3c94eSEugenio Pérez     int r;
91934e3c94eSEugenio Pérez 
92034e3c94eSEugenio Pérez     r = vhost_iova_tree_map_alloc(v->iova_tree, needle);
92134e3c94eSEugenio Pérez     if (unlikely(r != IOVA_OK)) {
92234e3c94eSEugenio Pérez         error_setg(errp, "Cannot allocate iova (%d)", r);
92334e3c94eSEugenio Pérez         return false;
92434e3c94eSEugenio Pérez     }
92534e3c94eSEugenio Pérez 
926cd831ed5SEugenio Pérez     r = vhost_vdpa_dma_map(v, v->address_space_id, needle->iova,
927cd831ed5SEugenio Pérez                            needle->size + 1,
92834e3c94eSEugenio Pérez                            (void *)(uintptr_t)needle->translated_addr,
92934e3c94eSEugenio Pérez                            needle->perm == IOMMU_RO);
93034e3c94eSEugenio Pérez     if (unlikely(r != 0)) {
93134e3c94eSEugenio Pérez         error_setg_errno(errp, -r, "Cannot map region to device");
93269292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, *needle);
93334e3c94eSEugenio Pérez     }
93434e3c94eSEugenio Pérez 
93534e3c94eSEugenio Pérez     return r == 0;
936100890f7SEugenio Pérez }
937100890f7SEugenio Pérez 
938100890f7SEugenio Pérez /**
939100890f7SEugenio Pérez  * Map the shadow virtqueue rings in the device
940100890f7SEugenio Pérez  *
941100890f7SEugenio Pérez  * @dev: The vhost device
942100890f7SEugenio Pérez  * @svq: The shadow virtqueue
943100890f7SEugenio Pérez  * @addr: Assigned IOVA addresses
944100890f7SEugenio Pérez  * @errp: Error pointer
945100890f7SEugenio Pérez  */
946100890f7SEugenio Pérez static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
947100890f7SEugenio Pérez                                      const VhostShadowVirtqueue *svq,
948100890f7SEugenio Pérez                                      struct vhost_vring_addr *addr,
949100890f7SEugenio Pérez                                      Error **errp)
950100890f7SEugenio Pérez {
95105e385d2SMarkus Armbruster     ERRP_GUARD();
95234e3c94eSEugenio Pérez     DMAMap device_region, driver_region;
95334e3c94eSEugenio Pérez     struct vhost_vring_addr svq_addr;
954100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
955100890f7SEugenio Pérez     size_t device_size = vhost_svq_device_area_size(svq);
956100890f7SEugenio Pérez     size_t driver_size = vhost_svq_driver_area_size(svq);
95734e3c94eSEugenio Pérez     size_t avail_offset;
95834e3c94eSEugenio Pérez     bool ok;
959100890f7SEugenio Pérez 
96034e3c94eSEugenio Pérez     vhost_svq_get_vring_addr(svq, &svq_addr);
961100890f7SEugenio Pérez 
96234e3c94eSEugenio Pérez     driver_region = (DMAMap) {
96334e3c94eSEugenio Pérez         .translated_addr = svq_addr.desc_user_addr,
96434e3c94eSEugenio Pérez         .size = driver_size - 1,
96534e3c94eSEugenio Pérez         .perm = IOMMU_RO,
96634e3c94eSEugenio Pérez     };
96734e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
96834e3c94eSEugenio Pérez     if (unlikely(!ok)) {
96934e3c94eSEugenio Pérez         error_prepend(errp, "Cannot create vq driver region: ");
970100890f7SEugenio Pérez         return false;
971100890f7SEugenio Pérez     }
97234e3c94eSEugenio Pérez     addr->desc_user_addr = driver_region.iova;
97334e3c94eSEugenio Pérez     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
97434e3c94eSEugenio Pérez     addr->avail_user_addr = driver_region.iova + avail_offset;
975100890f7SEugenio Pérez 
97634e3c94eSEugenio Pérez     device_region = (DMAMap) {
97734e3c94eSEugenio Pérez         .translated_addr = svq_addr.used_user_addr,
97834e3c94eSEugenio Pérez         .size = device_size - 1,
97934e3c94eSEugenio Pérez         .perm = IOMMU_RW,
98034e3c94eSEugenio Pérez     };
98134e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
98234e3c94eSEugenio Pérez     if (unlikely(!ok)) {
98334e3c94eSEugenio Pérez         error_prepend(errp, "Cannot create vq device region: ");
9848b6d6119SEugenio Pérez         vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
985100890f7SEugenio Pérez     }
98634e3c94eSEugenio Pérez     addr->used_user_addr = device_region.iova;
987100890f7SEugenio Pérez 
98834e3c94eSEugenio Pérez     return ok;
989100890f7SEugenio Pérez }
990100890f7SEugenio Pérez 
991100890f7SEugenio Pérez static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
992100890f7SEugenio Pérez                                  VhostShadowVirtqueue *svq, unsigned idx,
993100890f7SEugenio Pérez                                  Error **errp)
994100890f7SEugenio Pérez {
995100890f7SEugenio Pérez     uint16_t vq_index = dev->vq_index + idx;
996100890f7SEugenio Pérez     struct vhost_vring_state s = {
997100890f7SEugenio Pérez         .index = vq_index,
998100890f7SEugenio Pérez     };
999100890f7SEugenio Pérez     int r;
1000100890f7SEugenio Pérez 
1001100890f7SEugenio Pérez     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1002100890f7SEugenio Pérez     if (unlikely(r)) {
1003100890f7SEugenio Pérez         error_setg_errno(errp, -r, "Cannot set vring base");
1004100890f7SEugenio Pérez         return false;
1005100890f7SEugenio Pérez     }
1006100890f7SEugenio Pérez 
1007100890f7SEugenio Pérez     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1008dff4426fSEugenio Pérez     return r == 0;
1009dff4426fSEugenio Pérez }
1010dff4426fSEugenio Pérez 
1011dff4426fSEugenio Pérez static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1012dff4426fSEugenio Pérez {
1013dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1014dff4426fSEugenio Pérez     Error *err = NULL;
1015dff4426fSEugenio Pérez     unsigned i;
1016dff4426fSEugenio Pérez 
1017712c1a31SEugenio Pérez     if (!v->shadow_vqs_enabled) {
1018dff4426fSEugenio Pérez         return true;
1019dff4426fSEugenio Pérez     }
1020dff4426fSEugenio Pérez 
1021dff4426fSEugenio Pérez     for (i = 0; i < v->shadow_vqs->len; ++i) {
1022100890f7SEugenio Pérez         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1023dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1024100890f7SEugenio Pérez         struct vhost_vring_addr addr = {
10251c82fdfeSEugenio Pérez             .index = dev->vq_index + i,
1026100890f7SEugenio Pérez         };
1027100890f7SEugenio Pérez         int r;
1028dff4426fSEugenio Pérez         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1029dff4426fSEugenio Pérez         if (unlikely(!ok)) {
1030100890f7SEugenio Pérez             goto err;
1031100890f7SEugenio Pérez         }
1032100890f7SEugenio Pérez 
10335fde952bSEugenio Pérez         vhost_svq_start(svq, dev->vdev, vq, v->iova_tree);
1034100890f7SEugenio Pérez         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1035100890f7SEugenio Pérez         if (unlikely(!ok)) {
1036100890f7SEugenio Pérez             goto err_map;
1037100890f7SEugenio Pérez         }
1038100890f7SEugenio Pérez 
1039100890f7SEugenio Pérez         /* Override vring GPA set by vhost subsystem */
1040100890f7SEugenio Pérez         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1041100890f7SEugenio Pérez         if (unlikely(r != 0)) {
1042100890f7SEugenio Pérez             error_setg_errno(&err, -r, "Cannot set device address");
1043100890f7SEugenio Pérez             goto err_set_addr;
1044100890f7SEugenio Pérez         }
1045100890f7SEugenio Pérez     }
1046100890f7SEugenio Pérez 
1047100890f7SEugenio Pérez     return true;
1048100890f7SEugenio Pérez 
1049100890f7SEugenio Pérez err_set_addr:
1050100890f7SEugenio Pérez     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1051100890f7SEugenio Pérez 
1052100890f7SEugenio Pérez err_map:
1053100890f7SEugenio Pérez     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1054100890f7SEugenio Pérez 
1055100890f7SEugenio Pérez err:
1056dff4426fSEugenio Pérez     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1057100890f7SEugenio Pérez     for (unsigned j = 0; j < i; ++j) {
1058100890f7SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1059100890f7SEugenio Pérez         vhost_vdpa_svq_unmap_rings(dev, svq);
1060100890f7SEugenio Pérez         vhost_svq_stop(svq);
1061100890f7SEugenio Pérez     }
1062100890f7SEugenio Pérez 
1063100890f7SEugenio Pérez     return false;
1064100890f7SEugenio Pérez }
1065100890f7SEugenio Pérez 
10665b590f51SEugenio Pérez static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1067100890f7SEugenio Pérez {
1068100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1069100890f7SEugenio Pérez 
1070712c1a31SEugenio Pérez     if (!v->shadow_vqs_enabled) {
10715b590f51SEugenio Pérez         return;
1072100890f7SEugenio Pérez     }
1073100890f7SEugenio Pérez 
1074100890f7SEugenio Pérez     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1075100890f7SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
10765b590f51SEugenio Pérez         vhost_vdpa_svq_unmap_rings(dev, svq);
10773cfb4d06SEugenio Pérez 
10783cfb4d06SEugenio Pérez         event_notifier_cleanup(&svq->hdev_kick);
10793cfb4d06SEugenio Pérez         event_notifier_cleanup(&svq->hdev_call);
1080dff4426fSEugenio Pérez     }
1081dff4426fSEugenio Pérez }
1082dff4426fSEugenio Pérez 
1083108a6481SCindy Lu static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1084108a6481SCindy Lu {
1085108a6481SCindy Lu     struct vhost_vdpa *v = dev->opaque;
1086dff4426fSEugenio Pérez     bool ok;
1087778e67deSLaurent Vivier     trace_vhost_vdpa_dev_start(dev, started);
10884d191cfdSJason Wang 
10894d191cfdSJason Wang     if (started) {
10904d191cfdSJason Wang         vhost_vdpa_host_notifiers_init(dev);
1091dff4426fSEugenio Pérez         ok = vhost_vdpa_svqs_start(dev);
1092dff4426fSEugenio Pérez         if (unlikely(!ok)) {
1093dff4426fSEugenio Pérez             return -1;
1094dff4426fSEugenio Pérez         }
10954d191cfdSJason Wang         vhost_vdpa_set_vring_ready(dev);
10964d191cfdSJason Wang     } else {
10975b590f51SEugenio Pérez         vhost_vdpa_svqs_stop(dev);
10984d191cfdSJason Wang         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
10994d191cfdSJason Wang     }
11004d191cfdSJason Wang 
1101245cf2c2SEugenio Pérez     if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
11024d191cfdSJason Wang         return 0;
11034d191cfdSJason Wang     }
11044d191cfdSJason Wang 
1105108a6481SCindy Lu     if (started) {
1106108a6481SCindy Lu         memory_listener_register(&v->listener, &address_space_memory);
11073631151bSRoman Kagan         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1108108a6481SCindy Lu     } else {
1109108a6481SCindy Lu         vhost_vdpa_reset_device(dev);
1110108a6481SCindy Lu         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1111108a6481SCindy Lu                                    VIRTIO_CONFIG_S_DRIVER);
1112108a6481SCindy Lu         memory_listener_unregister(&v->listener);
1113108a6481SCindy Lu 
1114108a6481SCindy Lu         return 0;
1115108a6481SCindy Lu     }
1116108a6481SCindy Lu }
1117108a6481SCindy Lu 
1118108a6481SCindy Lu static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1119108a6481SCindy Lu                                      struct vhost_log *log)
1120108a6481SCindy Lu {
1121773ebc95SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1122d71b0609SSi-Wei Liu     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
11234d191cfdSJason Wang         return 0;
11244d191cfdSJason Wang     }
11254d191cfdSJason Wang 
1126778e67deSLaurent Vivier     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1127778e67deSLaurent Vivier                                   log->log);
1128108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1129108a6481SCindy Lu }
1130108a6481SCindy Lu 
1131108a6481SCindy Lu static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1132108a6481SCindy Lu                                        struct vhost_vring_addr *addr)
1133108a6481SCindy Lu {
1134d96be4c8SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1135d96be4c8SEugenio Pérez 
1136d96be4c8SEugenio Pérez     if (v->shadow_vqs_enabled) {
1137d96be4c8SEugenio Pérez         /*
1138d96be4c8SEugenio Pérez          * Device vring addr was set at device start. SVQ base is handled by
1139d96be4c8SEugenio Pérez          * VirtQueue code.
1140d96be4c8SEugenio Pérez          */
1141d96be4c8SEugenio Pérez         return 0;
1142d96be4c8SEugenio Pérez     }
1143d96be4c8SEugenio Pérez 
1144d96be4c8SEugenio Pérez     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1145108a6481SCindy Lu }
1146108a6481SCindy Lu 
1147108a6481SCindy Lu static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1148108a6481SCindy Lu                                       struct vhost_vring_state *ring)
1149108a6481SCindy Lu {
1150778e67deSLaurent Vivier     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1151108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1152108a6481SCindy Lu }
1153108a6481SCindy Lu 
1154108a6481SCindy Lu static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1155108a6481SCindy Lu                                        struct vhost_vring_state *ring)
1156108a6481SCindy Lu {
1157d96be4c8SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
11582fdac348SEugenio Pérez     VirtQueue *vq = virtio_get_queue(dev->vdev, ring->index);
1159d96be4c8SEugenio Pérez 
11602fdac348SEugenio Pérez     /*
11612fdac348SEugenio Pérez      * vhost-vdpa devices does not support in-flight requests. Set all of them
11622fdac348SEugenio Pérez      * as available.
11632fdac348SEugenio Pérez      *
11642fdac348SEugenio Pérez      * TODO: This is ok for networking, but other kinds of devices might
11652fdac348SEugenio Pérez      * have problems with these retransmissions.
11662fdac348SEugenio Pérez      */
11672fdac348SEugenio Pérez     while (virtqueue_rewind(vq, 1)) {
11682fdac348SEugenio Pérez         continue;
11692fdac348SEugenio Pérez     }
1170d96be4c8SEugenio Pérez     if (v->shadow_vqs_enabled) {
1171d96be4c8SEugenio Pérez         /*
1172d96be4c8SEugenio Pérez          * Device vring base was set at device start. SVQ base is handled by
1173d96be4c8SEugenio Pérez          * VirtQueue code.
1174d96be4c8SEugenio Pérez          */
1175d96be4c8SEugenio Pérez         return 0;
1176d96be4c8SEugenio Pérez     }
1177d96be4c8SEugenio Pérez 
1178d96be4c8SEugenio Pérez     return vhost_vdpa_set_dev_vring_base(dev, ring);
1179108a6481SCindy Lu }
1180108a6481SCindy Lu 
1181108a6481SCindy Lu static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1182108a6481SCindy Lu                                        struct vhost_vring_state *ring)
1183108a6481SCindy Lu {
11846d0b2226SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1185778e67deSLaurent Vivier     int ret;
1186778e67deSLaurent Vivier 
11876d0b2226SEugenio Pérez     if (v->shadow_vqs_enabled) {
11882fdac348SEugenio Pérez         ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
11896d0b2226SEugenio Pérez         return 0;
11906d0b2226SEugenio Pérez     }
11916d0b2226SEugenio Pérez 
1192778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1193778e67deSLaurent Vivier     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
1194778e67deSLaurent Vivier     return ret;
1195108a6481SCindy Lu }
1196108a6481SCindy Lu 
1197108a6481SCindy Lu static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1198108a6481SCindy Lu                                        struct vhost_vring_file *file)
1199108a6481SCindy Lu {
1200dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1201dff4426fSEugenio Pérez     int vdpa_idx = file->index - dev->vq_index;
1202dff4426fSEugenio Pérez 
1203dff4426fSEugenio Pérez     if (v->shadow_vqs_enabled) {
1204dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1205dff4426fSEugenio Pérez         vhost_svq_set_svq_kick_fd(svq, file->fd);
1206dff4426fSEugenio Pérez         return 0;
1207dff4426fSEugenio Pérez     } else {
1208dff4426fSEugenio Pérez         return vhost_vdpa_set_vring_dev_kick(dev, file);
1209dff4426fSEugenio Pérez     }
1210108a6481SCindy Lu }
1211108a6481SCindy Lu 
1212108a6481SCindy Lu static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1213108a6481SCindy Lu                                        struct vhost_vring_file *file)
1214108a6481SCindy Lu {
1215a8ac8858SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1216a8ac8858SEugenio Pérez 
1217a8ac8858SEugenio Pérez     if (v->shadow_vqs_enabled) {
1218a8ac8858SEugenio Pérez         int vdpa_idx = file->index - dev->vq_index;
1219a8ac8858SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1220a8ac8858SEugenio Pérez 
1221a8ac8858SEugenio Pérez         vhost_svq_set_svq_call_fd(svq, file->fd);
1222a8ac8858SEugenio Pérez         return 0;
1223a8ac8858SEugenio Pérez     } else {
1224a8ac8858SEugenio Pérez         return vhost_vdpa_set_vring_dev_call(dev, file);
1225a8ac8858SEugenio Pérez     }
1226108a6481SCindy Lu }
1227108a6481SCindy Lu 
1228108a6481SCindy Lu static int vhost_vdpa_get_features(struct vhost_dev *dev,
1229108a6481SCindy Lu                                      uint64_t *features)
1230108a6481SCindy Lu {
123112a195faSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
123212a195faSEugenio Pérez     int ret = vhost_vdpa_get_dev_features(dev, features);
1233778e67deSLaurent Vivier 
123412a195faSEugenio Pérez     if (ret == 0 && v->shadow_vqs_enabled) {
123512a195faSEugenio Pérez         /* Add SVQ logging capabilities */
123612a195faSEugenio Pérez         *features |= BIT_ULL(VHOST_F_LOG_ALL);
123712a195faSEugenio Pérez     }
123812a195faSEugenio Pérez 
1239778e67deSLaurent Vivier     return ret;
1240108a6481SCindy Lu }
1241108a6481SCindy Lu 
1242108a6481SCindy Lu static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1243108a6481SCindy Lu {
1244d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
12454d191cfdSJason Wang         return 0;
12464d191cfdSJason Wang     }
12474d191cfdSJason Wang 
1248778e67deSLaurent Vivier     trace_vhost_vdpa_set_owner(dev);
1249108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1250108a6481SCindy Lu }
1251108a6481SCindy Lu 
1252108a6481SCindy Lu static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1253108a6481SCindy Lu                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1254108a6481SCindy Lu {
1255108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1256108a6481SCindy Lu     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1257108a6481SCindy Lu     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1258108a6481SCindy Lu     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1259778e67deSLaurent Vivier     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1260778e67deSLaurent Vivier                                  addr->avail_user_addr, addr->used_user_addr);
1261108a6481SCindy Lu     return 0;
1262108a6481SCindy Lu }
1263108a6481SCindy Lu 
1264108a6481SCindy Lu static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1265108a6481SCindy Lu {
1266108a6481SCindy Lu     return true;
1267108a6481SCindy Lu }
1268108a6481SCindy Lu 
1269108a6481SCindy Lu const VhostOps vdpa_ops = {
1270108a6481SCindy Lu         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1271108a6481SCindy Lu         .vhost_backend_init = vhost_vdpa_init,
1272108a6481SCindy Lu         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1273108a6481SCindy Lu         .vhost_set_log_base = vhost_vdpa_set_log_base,
1274108a6481SCindy Lu         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1275108a6481SCindy Lu         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1276108a6481SCindy Lu         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1277108a6481SCindy Lu         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1278108a6481SCindy Lu         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1279108a6481SCindy Lu         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1280108a6481SCindy Lu         .vhost_get_features = vhost_vdpa_get_features,
1281a5bd0580SJason Wang         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
1282108a6481SCindy Lu         .vhost_set_owner = vhost_vdpa_set_owner,
1283108a6481SCindy Lu         .vhost_set_vring_endian = NULL,
1284108a6481SCindy Lu         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1285108a6481SCindy Lu         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1286108a6481SCindy Lu         .vhost_set_features = vhost_vdpa_set_features,
1287108a6481SCindy Lu         .vhost_reset_device = vhost_vdpa_reset_device,
1288108a6481SCindy Lu         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1289108a6481SCindy Lu         .vhost_get_config  = vhost_vdpa_get_config,
1290108a6481SCindy Lu         .vhost_set_config = vhost_vdpa_set_config,
1291108a6481SCindy Lu         .vhost_requires_shm_log = NULL,
1292108a6481SCindy Lu         .vhost_migration_done = NULL,
1293108a6481SCindy Lu         .vhost_backend_can_merge = NULL,
1294108a6481SCindy Lu         .vhost_net_set_mtu = NULL,
1295108a6481SCindy Lu         .vhost_set_iotlb_callback = NULL,
1296108a6481SCindy Lu         .vhost_send_device_iotlb_msg = NULL,
1297108a6481SCindy Lu         .vhost_dev_start = vhost_vdpa_dev_start,
1298108a6481SCindy Lu         .vhost_get_device_id = vhost_vdpa_get_device_id,
1299108a6481SCindy Lu         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1300108a6481SCindy Lu         .vhost_force_iommu = vhost_vdpa_force_iommu,
1301108a6481SCindy Lu };
1302