xref: /qemu/hw/virtio/vhost-vdpa.c (revision 5fde952bbdd521c10fc018ee04f922a7dca5f663)
1108a6481SCindy Lu /*
2108a6481SCindy Lu  * vhost-vdpa
3108a6481SCindy Lu  *
4108a6481SCindy Lu  *  Copyright(c) 2017-2018 Intel Corporation.
5108a6481SCindy Lu  *  Copyright(c) 2020 Red Hat, Inc.
6108a6481SCindy Lu  *
7108a6481SCindy Lu  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8108a6481SCindy Lu  * See the COPYING file in the top-level directory.
9108a6481SCindy Lu  *
10108a6481SCindy Lu  */
11108a6481SCindy Lu 
12108a6481SCindy Lu #include "qemu/osdep.h"
13108a6481SCindy Lu #include <linux/vhost.h>
14108a6481SCindy Lu #include <linux/vfio.h>
15108a6481SCindy Lu #include <sys/eventfd.h>
16108a6481SCindy Lu #include <sys/ioctl.h>
17108a6481SCindy Lu #include "hw/virtio/vhost.h"
18108a6481SCindy Lu #include "hw/virtio/vhost-backend.h"
19108a6481SCindy Lu #include "hw/virtio/virtio-net.h"
20dff4426fSEugenio Pérez #include "hw/virtio/vhost-shadow-virtqueue.h"
21108a6481SCindy Lu #include "hw/virtio/vhost-vdpa.h"
22df77d45aSXie Yongji #include "exec/address-spaces.h"
23c156d5bfSEugenio Pérez #include "migration/blocker.h"
24415b7327SMarc-André Lureau #include "qemu/cutils.h"
25108a6481SCindy Lu #include "qemu/main-loop.h"
264dc5acc0SCindy Lu #include "cpu.h"
27778e67deSLaurent Vivier #include "trace.h"
28dff4426fSEugenio Pérez #include "qapi/error.h"
29108a6481SCindy Lu 
30032e4d68SEugenio Pérez /*
31032e4d68SEugenio Pérez  * Return one past the end of the end of section. Be careful with uint64_t
32032e4d68SEugenio Pérez  * conversions!
33032e4d68SEugenio Pérez  */
34032e4d68SEugenio Pérez static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
35032e4d68SEugenio Pérez {
36032e4d68SEugenio Pérez     Int128 llend = int128_make64(section->offset_within_address_space);
37032e4d68SEugenio Pérez     llend = int128_add(llend, section->size);
38032e4d68SEugenio Pérez     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
39032e4d68SEugenio Pérez 
40032e4d68SEugenio Pérez     return llend;
41032e4d68SEugenio Pérez }
42032e4d68SEugenio Pérez 
43013108b6SEugenio Pérez static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
44013108b6SEugenio Pérez                                                 uint64_t iova_min,
45013108b6SEugenio Pérez                                                 uint64_t iova_max)
46108a6481SCindy Lu {
47013108b6SEugenio Pérez     Int128 llend;
48013108b6SEugenio Pérez 
49013108b6SEugenio Pérez     if ((!memory_region_is_ram(section->mr) &&
50108a6481SCindy Lu          !memory_region_is_iommu(section->mr)) ||
51c64038c9SEugenio Pérez         memory_region_is_protected(section->mr) ||
52d60c75d2SJason Wang         /* vhost-vDPA doesn't allow MMIO to be mapped  */
53013108b6SEugenio Pérez         memory_region_is_ram_device(section->mr)) {
54013108b6SEugenio Pérez         return true;
55013108b6SEugenio Pérez     }
56013108b6SEugenio Pérez 
57013108b6SEugenio Pérez     if (section->offset_within_address_space < iova_min) {
58013108b6SEugenio Pérez         error_report("RAM section out of device range (min=0x%" PRIx64
59013108b6SEugenio Pérez                      ", addr=0x%" HWADDR_PRIx ")",
60013108b6SEugenio Pérez                      iova_min, section->offset_within_address_space);
61013108b6SEugenio Pérez         return true;
62013108b6SEugenio Pérez     }
63013108b6SEugenio Pérez 
64013108b6SEugenio Pérez     llend = vhost_vdpa_section_end(section);
65013108b6SEugenio Pérez     if (int128_gt(llend, int128_make64(iova_max))) {
66013108b6SEugenio Pérez         error_report("RAM section out of device range (max=0x%" PRIx64
67013108b6SEugenio Pérez                      ", end addr=0x%" PRIx64 ")",
68013108b6SEugenio Pérez                      iova_max, int128_get64(llend));
69013108b6SEugenio Pérez         return true;
70013108b6SEugenio Pérez     }
71013108b6SEugenio Pérez 
72013108b6SEugenio Pérez     return false;
73108a6481SCindy Lu }
74108a6481SCindy Lu 
75463ba1e3SEugenio Pérez int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
76108a6481SCindy Lu                        void *vaddr, bool readonly)
77108a6481SCindy Lu {
78386494f2SCindy Lu     struct vhost_msg_v2 msg = {};
79108a6481SCindy Lu     int fd = v->device_fd;
80108a6481SCindy Lu     int ret = 0;
81108a6481SCindy Lu 
82108a6481SCindy Lu     msg.type = v->msg_type;
83108a6481SCindy Lu     msg.iotlb.iova = iova;
84108a6481SCindy Lu     msg.iotlb.size = size;
85108a6481SCindy Lu     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
86108a6481SCindy Lu     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
87108a6481SCindy Lu     msg.iotlb.type = VHOST_IOTLB_UPDATE;
88108a6481SCindy Lu 
89778e67deSLaurent Vivier    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
90778e67deSLaurent Vivier                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
91778e67deSLaurent Vivier 
92108a6481SCindy Lu     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
93108a6481SCindy Lu         error_report("failed to write, fd=%d, errno=%d (%s)",
94108a6481SCindy Lu             fd, errno, strerror(errno));
95108a6481SCindy Lu         return -EIO ;
96108a6481SCindy Lu     }
97108a6481SCindy Lu 
98108a6481SCindy Lu     return ret;
99108a6481SCindy Lu }
100108a6481SCindy Lu 
101463ba1e3SEugenio Pérez int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
102108a6481SCindy Lu {
103386494f2SCindy Lu     struct vhost_msg_v2 msg = {};
104108a6481SCindy Lu     int fd = v->device_fd;
105108a6481SCindy Lu     int ret = 0;
106108a6481SCindy Lu 
107108a6481SCindy Lu     msg.type = v->msg_type;
108108a6481SCindy Lu     msg.iotlb.iova = iova;
109108a6481SCindy Lu     msg.iotlb.size = size;
110108a6481SCindy Lu     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
111108a6481SCindy Lu 
112778e67deSLaurent Vivier     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
113778e67deSLaurent Vivier                                msg.iotlb.size, msg.iotlb.type);
114778e67deSLaurent Vivier 
115108a6481SCindy Lu     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
116108a6481SCindy Lu         error_report("failed to write, fd=%d, errno=%d (%s)",
117108a6481SCindy Lu             fd, errno, strerror(errno));
118108a6481SCindy Lu         return -EIO ;
119108a6481SCindy Lu     }
120108a6481SCindy Lu 
121108a6481SCindy Lu     return ret;
122108a6481SCindy Lu }
123108a6481SCindy Lu 
124e6db5df7SEugenio Pérez static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
125a5bd0580SJason Wang {
126a5bd0580SJason Wang     int fd = v->device_fd;
127e6db5df7SEugenio Pérez     struct vhost_msg_v2 msg = {
128e6db5df7SEugenio Pérez         .type = v->msg_type,
129e6db5df7SEugenio Pérez         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
130e6db5df7SEugenio Pérez     };
131a5bd0580SJason Wang 
1325580b9f0SEugenio Pérez     trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type);
133a5bd0580SJason Wang     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
134a5bd0580SJason Wang         error_report("failed to write, fd=%d, errno=%d (%s)",
135a5bd0580SJason Wang                      fd, errno, strerror(errno));
136a5bd0580SJason Wang     }
137a5bd0580SJason Wang }
138a5bd0580SJason Wang 
139e6db5df7SEugenio Pérez static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
140e6db5df7SEugenio Pérez {
141e6db5df7SEugenio Pérez     if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
142e6db5df7SEugenio Pérez         !v->iotlb_batch_begin_sent) {
143e6db5df7SEugenio Pérez         vhost_vdpa_listener_begin_batch(v);
144e6db5df7SEugenio Pérez     }
145e6db5df7SEugenio Pérez 
146e6db5df7SEugenio Pérez     v->iotlb_batch_begin_sent = true;
147e6db5df7SEugenio Pérez }
148e6db5df7SEugenio Pérez 
149a5bd0580SJason Wang static void vhost_vdpa_listener_commit(MemoryListener *listener)
150a5bd0580SJason Wang {
151a5bd0580SJason Wang     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
152a5bd0580SJason Wang     struct vhost_dev *dev = v->dev;
1538acb3218SPhilippe Mathieu-Daudé     struct vhost_msg_v2 msg = {};
154a5bd0580SJason Wang     int fd = v->device_fd;
155a5bd0580SJason Wang 
156a5bd0580SJason Wang     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
157a5bd0580SJason Wang         return;
158a5bd0580SJason Wang     }
159a5bd0580SJason Wang 
160e6db5df7SEugenio Pérez     if (!v->iotlb_batch_begin_sent) {
161e6db5df7SEugenio Pérez         return;
162e6db5df7SEugenio Pérez     }
163e6db5df7SEugenio Pérez 
164a5bd0580SJason Wang     msg.type = v->msg_type;
165a5bd0580SJason Wang     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
166a5bd0580SJason Wang 
1675580b9f0SEugenio Pérez     trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type);
168a5bd0580SJason Wang     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
169a5bd0580SJason Wang         error_report("failed to write, fd=%d, errno=%d (%s)",
170a5bd0580SJason Wang                      fd, errno, strerror(errno));
171a5bd0580SJason Wang     }
172e6db5df7SEugenio Pérez 
173e6db5df7SEugenio Pérez     v->iotlb_batch_begin_sent = false;
174a5bd0580SJason Wang }
175a5bd0580SJason Wang 
176108a6481SCindy Lu static void vhost_vdpa_listener_region_add(MemoryListener *listener,
177108a6481SCindy Lu                                            MemoryRegionSection *section)
178108a6481SCindy Lu {
1797dab70beSEugenio Pérez     DMAMap mem_region = {};
180108a6481SCindy Lu     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
181108a6481SCindy Lu     hwaddr iova;
182108a6481SCindy Lu     Int128 llend, llsize;
183108a6481SCindy Lu     void *vaddr;
184108a6481SCindy Lu     int ret;
185108a6481SCindy Lu 
186013108b6SEugenio Pérez     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
187013108b6SEugenio Pérez                                             v->iova_range.last)) {
188108a6481SCindy Lu         return;
189108a6481SCindy Lu     }
190108a6481SCindy Lu 
191108a6481SCindy Lu     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
192108a6481SCindy Lu                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
193108a6481SCindy Lu         error_report("%s received unaligned region", __func__);
194108a6481SCindy Lu         return;
195108a6481SCindy Lu     }
196108a6481SCindy Lu 
197108a6481SCindy Lu     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
198032e4d68SEugenio Pérez     llend = vhost_vdpa_section_end(section);
199108a6481SCindy Lu     if (int128_ge(int128_make64(iova), llend)) {
200108a6481SCindy Lu         return;
201108a6481SCindy Lu     }
202108a6481SCindy Lu 
203108a6481SCindy Lu     memory_region_ref(section->mr);
204108a6481SCindy Lu 
205108a6481SCindy Lu     /* Here we assume that memory_region_is_ram(section->mr)==true */
206108a6481SCindy Lu 
207108a6481SCindy Lu     vaddr = memory_region_get_ram_ptr(section->mr) +
208108a6481SCindy Lu             section->offset_within_region +
209108a6481SCindy Lu             (iova - section->offset_within_address_space);
210108a6481SCindy Lu 
211778e67deSLaurent Vivier     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
212778e67deSLaurent Vivier                                          vaddr, section->readonly);
213778e67deSLaurent Vivier 
214108a6481SCindy Lu     llsize = int128_sub(llend, int128_make64(iova));
21534e3c94eSEugenio Pérez     if (v->shadow_vqs_enabled) {
2167dab70beSEugenio Pérez         int r;
21734e3c94eSEugenio Pérez 
2187dab70beSEugenio Pérez         mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
2197dab70beSEugenio Pérez         mem_region.size = int128_get64(llsize) - 1,
2207dab70beSEugenio Pérez         mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
2217dab70beSEugenio Pérez 
2227dab70beSEugenio Pérez         r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region);
22334e3c94eSEugenio Pérez         if (unlikely(r != IOVA_OK)) {
22434e3c94eSEugenio Pérez             error_report("Can't allocate a mapping (%d)", r);
22534e3c94eSEugenio Pérez             goto fail;
22634e3c94eSEugenio Pérez         }
22734e3c94eSEugenio Pérez 
22834e3c94eSEugenio Pérez         iova = mem_region.iova;
22934e3c94eSEugenio Pérez     }
230108a6481SCindy Lu 
231e6db5df7SEugenio Pérez     vhost_vdpa_iotlb_batch_begin_once(v);
232108a6481SCindy Lu     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
233108a6481SCindy Lu                              vaddr, section->readonly);
234108a6481SCindy Lu     if (ret) {
235108a6481SCindy Lu         error_report("vhost vdpa map fail!");
2367dab70beSEugenio Pérez         goto fail_map;
237108a6481SCindy Lu     }
238108a6481SCindy Lu 
239108a6481SCindy Lu     return;
240108a6481SCindy Lu 
2417dab70beSEugenio Pérez fail_map:
2427dab70beSEugenio Pérez     if (v->shadow_vqs_enabled) {
24369292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, mem_region);
2447dab70beSEugenio Pérez     }
2457dab70beSEugenio Pérez 
246108a6481SCindy Lu fail:
247108a6481SCindy Lu     /*
248108a6481SCindy Lu      * On the initfn path, store the first error in the container so we
249108a6481SCindy Lu      * can gracefully fail.  Runtime, there's not much we can do other
250108a6481SCindy Lu      * than throw a hardware error.
251108a6481SCindy Lu      */
252108a6481SCindy Lu     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
253108a6481SCindy Lu     return;
254108a6481SCindy Lu 
255108a6481SCindy Lu }
256108a6481SCindy Lu 
257108a6481SCindy Lu static void vhost_vdpa_listener_region_del(MemoryListener *listener,
258108a6481SCindy Lu                                            MemoryRegionSection *section)
259108a6481SCindy Lu {
260108a6481SCindy Lu     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
261108a6481SCindy Lu     hwaddr iova;
262108a6481SCindy Lu     Int128 llend, llsize;
263108a6481SCindy Lu     int ret;
264108a6481SCindy Lu 
265013108b6SEugenio Pérez     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
266013108b6SEugenio Pérez                                             v->iova_range.last)) {
267108a6481SCindy Lu         return;
268108a6481SCindy Lu     }
269108a6481SCindy Lu 
270108a6481SCindy Lu     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
271108a6481SCindy Lu                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
272108a6481SCindy Lu         error_report("%s received unaligned region", __func__);
273108a6481SCindy Lu         return;
274108a6481SCindy Lu     }
275108a6481SCindy Lu 
276108a6481SCindy Lu     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
277032e4d68SEugenio Pérez     llend = vhost_vdpa_section_end(section);
278108a6481SCindy Lu 
279778e67deSLaurent Vivier     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
280778e67deSLaurent Vivier 
281108a6481SCindy Lu     if (int128_ge(int128_make64(iova), llend)) {
282108a6481SCindy Lu         return;
283108a6481SCindy Lu     }
284108a6481SCindy Lu 
285108a6481SCindy Lu     llsize = int128_sub(llend, int128_make64(iova));
286108a6481SCindy Lu 
28734e3c94eSEugenio Pérez     if (v->shadow_vqs_enabled) {
28834e3c94eSEugenio Pérez         const DMAMap *result;
28934e3c94eSEugenio Pérez         const void *vaddr = memory_region_get_ram_ptr(section->mr) +
29034e3c94eSEugenio Pérez             section->offset_within_region +
29134e3c94eSEugenio Pérez             (iova - section->offset_within_address_space);
29234e3c94eSEugenio Pérez         DMAMap mem_region = {
29334e3c94eSEugenio Pérez             .translated_addr = (hwaddr)(uintptr_t)vaddr,
29434e3c94eSEugenio Pérez             .size = int128_get64(llsize) - 1,
29534e3c94eSEugenio Pérez         };
29634e3c94eSEugenio Pérez 
29734e3c94eSEugenio Pérez         result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region);
29810dab9f2SEugenio Pérez         if (!result) {
29910dab9f2SEugenio Pérez             /* The memory listener map wasn't mapped */
30010dab9f2SEugenio Pérez             return;
30110dab9f2SEugenio Pérez         }
30234e3c94eSEugenio Pérez         iova = result->iova;
30369292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, *result);
30434e3c94eSEugenio Pérez     }
305e6db5df7SEugenio Pérez     vhost_vdpa_iotlb_batch_begin_once(v);
306108a6481SCindy Lu     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
307108a6481SCindy Lu     if (ret) {
308108a6481SCindy Lu         error_report("vhost_vdpa dma unmap error!");
309108a6481SCindy Lu     }
310108a6481SCindy Lu 
311108a6481SCindy Lu     memory_region_unref(section->mr);
312108a6481SCindy Lu }
313108a6481SCindy Lu /*
314ef4ff56cSStefano Garzarella  * IOTLB API is used by vhost-vdpa which requires incremental updating
315108a6481SCindy Lu  * of the mapping. So we can not use generic vhost memory listener which
316108a6481SCindy Lu  * depends on the addnop().
317108a6481SCindy Lu  */
318108a6481SCindy Lu static const MemoryListener vhost_vdpa_memory_listener = {
319142518bdSPeter Xu     .name = "vhost-vdpa",
320a5bd0580SJason Wang     .commit = vhost_vdpa_listener_commit,
321108a6481SCindy Lu     .region_add = vhost_vdpa_listener_region_add,
322108a6481SCindy Lu     .region_del = vhost_vdpa_listener_region_del,
323108a6481SCindy Lu };
324108a6481SCindy Lu 
325108a6481SCindy Lu static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
326108a6481SCindy Lu                              void *arg)
327108a6481SCindy Lu {
328108a6481SCindy Lu     struct vhost_vdpa *v = dev->opaque;
329108a6481SCindy Lu     int fd = v->device_fd;
330f2a6e6c4SKevin Wolf     int ret;
331108a6481SCindy Lu 
332108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
333108a6481SCindy Lu 
334f2a6e6c4SKevin Wolf     ret = ioctl(fd, request, arg);
335f2a6e6c4SKevin Wolf     return ret < 0 ? -errno : ret;
336108a6481SCindy Lu }
337108a6481SCindy Lu 
3383631151bSRoman Kagan static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
339108a6481SCindy Lu {
340108a6481SCindy Lu     uint8_t s;
3413631151bSRoman Kagan     int ret;
342108a6481SCindy Lu 
343778e67deSLaurent Vivier     trace_vhost_vdpa_add_status(dev, status);
3443631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
3453631151bSRoman Kagan     if (ret < 0) {
3463631151bSRoman Kagan         return ret;
347108a6481SCindy Lu     }
348108a6481SCindy Lu 
349108a6481SCindy Lu     s |= status;
350108a6481SCindy Lu 
3513631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
3523631151bSRoman Kagan     if (ret < 0) {
3533631151bSRoman Kagan         return ret;
3543631151bSRoman Kagan     }
3553631151bSRoman Kagan 
3563631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
3573631151bSRoman Kagan     if (ret < 0) {
3583631151bSRoman Kagan         return ret;
3593631151bSRoman Kagan     }
3603631151bSRoman Kagan 
3613631151bSRoman Kagan     if (!(s & status)) {
3623631151bSRoman Kagan         return -EIO;
3633631151bSRoman Kagan     }
3643631151bSRoman Kagan 
3653631151bSRoman Kagan     return 0;
366108a6481SCindy Lu }
367108a6481SCindy Lu 
368013108b6SEugenio Pérez static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
369013108b6SEugenio Pérez {
370013108b6SEugenio Pérez     int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
371013108b6SEugenio Pérez                               &v->iova_range);
372013108b6SEugenio Pérez     if (ret != 0) {
373013108b6SEugenio Pérez         v->iova_range.first = 0;
374013108b6SEugenio Pérez         v->iova_range.last = UINT64_MAX;
375013108b6SEugenio Pérez     }
376013108b6SEugenio Pérez 
377013108b6SEugenio Pérez     trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
378013108b6SEugenio Pérez                                     v->iova_range.last);
379013108b6SEugenio Pérez }
380013108b6SEugenio Pérez 
381d71b0609SSi-Wei Liu /*
382d71b0609SSi-Wei Liu  * The use of this function is for requests that only need to be
383d71b0609SSi-Wei Liu  * applied once. Typically such request occurs at the beginning
384d71b0609SSi-Wei Liu  * of operation, and before setting up queues. It should not be
385d71b0609SSi-Wei Liu  * used for request that performs operation until all queues are
386d71b0609SSi-Wei Liu  * set, which would need to check dev->vq_index_end instead.
387d71b0609SSi-Wei Liu  */
388d71b0609SSi-Wei Liu static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
3894d191cfdSJason Wang {
3904d191cfdSJason Wang     struct vhost_vdpa *v = dev->opaque;
3914d191cfdSJason Wang 
392d71b0609SSi-Wei Liu     return v->index == 0;
3934d191cfdSJason Wang }
3944d191cfdSJason Wang 
39512a195faSEugenio Pérez static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
39612a195faSEugenio Pérez                                        uint64_t *features)
39712a195faSEugenio Pérez {
39812a195faSEugenio Pérez     int ret;
39912a195faSEugenio Pérez 
40012a195faSEugenio Pérez     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
40112a195faSEugenio Pérez     trace_vhost_vdpa_get_features(dev, *features);
40212a195faSEugenio Pérez     return ret;
40312a195faSEugenio Pérez }
40412a195faSEugenio Pérez 
405dff4426fSEugenio Pérez static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
406dff4426fSEugenio Pérez                                Error **errp)
407dff4426fSEugenio Pérez {
408dff4426fSEugenio Pérez     g_autoptr(GPtrArray) shadow_vqs = NULL;
4094725a418SEugenio Pérez     uint64_t dev_features, svq_features;
4104725a418SEugenio Pérez     int r;
4114725a418SEugenio Pérez     bool ok;
412dff4426fSEugenio Pérez 
413dff4426fSEugenio Pérez     if (!v->shadow_vqs_enabled) {
414dff4426fSEugenio Pérez         return 0;
415dff4426fSEugenio Pérez     }
416dff4426fSEugenio Pérez 
41712a195faSEugenio Pérez     r = vhost_vdpa_get_dev_features(hdev, &dev_features);
4184725a418SEugenio Pérez     if (r != 0) {
4194725a418SEugenio Pérez         error_setg_errno(errp, -r, "Can't get vdpa device features");
4204725a418SEugenio Pérez         return r;
4214725a418SEugenio Pérez     }
4224725a418SEugenio Pérez 
4234725a418SEugenio Pérez     svq_features = dev_features;
4244725a418SEugenio Pérez     ok = vhost_svq_valid_features(svq_features, errp);
4254725a418SEugenio Pérez     if (unlikely(!ok)) {
4264725a418SEugenio Pérez         return -1;
4274725a418SEugenio Pérez     }
4284725a418SEugenio Pérez 
429dff4426fSEugenio Pérez     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
430dff4426fSEugenio Pérez     for (unsigned n = 0; n < hdev->nvqs; ++n) {
4313cfb4d06SEugenio Pérez         VhostShadowVirtqueue *svq;
432dff4426fSEugenio Pérez 
433*5fde952bSEugenio Pérez         svq = vhost_svq_new(v->shadow_vq_ops, v->shadow_vq_ops_opaque);
4343cfb4d06SEugenio Pérez         g_ptr_array_add(shadow_vqs, svq);
435dff4426fSEugenio Pérez     }
436dff4426fSEugenio Pérez 
437dff4426fSEugenio Pérez     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
438dff4426fSEugenio Pérez     return 0;
439dff4426fSEugenio Pérez }
440dff4426fSEugenio Pérez 
44128770ff9SKevin Wolf static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
442108a6481SCindy Lu {
443108a6481SCindy Lu     struct vhost_vdpa *v;
444108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
445778e67deSLaurent Vivier     trace_vhost_vdpa_init(dev, opaque);
446e1c1915bSDavid Hildenbrand     int ret;
447e1c1915bSDavid Hildenbrand 
448e1c1915bSDavid Hildenbrand     /*
449e1c1915bSDavid Hildenbrand      * Similar to VFIO, we end up pinning all guest memory and have to
450e1c1915bSDavid Hildenbrand      * disable discarding of RAM.
451e1c1915bSDavid Hildenbrand      */
452e1c1915bSDavid Hildenbrand     ret = ram_block_discard_disable(true);
453e1c1915bSDavid Hildenbrand     if (ret) {
454e1c1915bSDavid Hildenbrand         error_report("Cannot set discarding of RAM broken");
455e1c1915bSDavid Hildenbrand         return ret;
456e1c1915bSDavid Hildenbrand     }
457108a6481SCindy Lu 
458108a6481SCindy Lu     v = opaque;
459a5bd0580SJason Wang     v->dev = dev;
460108a6481SCindy Lu     dev->opaque =  opaque ;
461108a6481SCindy Lu     v->listener = vhost_vdpa_memory_listener;
462108a6481SCindy Lu     v->msg_type = VHOST_IOTLB_MSG_V2;
463dff4426fSEugenio Pérez     ret = vhost_vdpa_init_svq(dev, v, errp);
464dff4426fSEugenio Pérez     if (ret) {
465dff4426fSEugenio Pérez         goto err;
466dff4426fSEugenio Pérez     }
467108a6481SCindy Lu 
468013108b6SEugenio Pérez     vhost_vdpa_get_iova_range(v);
4694d191cfdSJason Wang 
470d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
4714d191cfdSJason Wang         return 0;
4724d191cfdSJason Wang     }
4734d191cfdSJason Wang 
474108a6481SCindy Lu     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
475108a6481SCindy Lu                                VIRTIO_CONFIG_S_DRIVER);
476108a6481SCindy Lu 
477108a6481SCindy Lu     return 0;
478dff4426fSEugenio Pérez 
479dff4426fSEugenio Pérez err:
480dff4426fSEugenio Pérez     ram_block_discard_disable(false);
481dff4426fSEugenio Pérez     return ret;
482108a6481SCindy Lu }
483108a6481SCindy Lu 
484d0416d48SJason Wang static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
485d0416d48SJason Wang                                             int queue_index)
486d0416d48SJason Wang {
4878e3b0cbbSMarc-André Lureau     size_t page_size = qemu_real_host_page_size();
488d0416d48SJason Wang     struct vhost_vdpa *v = dev->opaque;
489d0416d48SJason Wang     VirtIODevice *vdev = dev->vdev;
490d0416d48SJason Wang     VhostVDPAHostNotifier *n;
491d0416d48SJason Wang 
492d0416d48SJason Wang     n = &v->notifier[queue_index];
493d0416d48SJason Wang 
494d0416d48SJason Wang     if (n->addr) {
495d0416d48SJason Wang         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
496d0416d48SJason Wang         object_unparent(OBJECT(&n->mr));
497d0416d48SJason Wang         munmap(n->addr, page_size);
498d0416d48SJason Wang         n->addr = NULL;
499d0416d48SJason Wang     }
500d0416d48SJason Wang }
501d0416d48SJason Wang 
502d0416d48SJason Wang static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
503d0416d48SJason Wang {
5048e3b0cbbSMarc-André Lureau     size_t page_size = qemu_real_host_page_size();
505d0416d48SJason Wang     struct vhost_vdpa *v = dev->opaque;
506d0416d48SJason Wang     VirtIODevice *vdev = dev->vdev;
507d0416d48SJason Wang     VhostVDPAHostNotifier *n;
508d0416d48SJason Wang     int fd = v->device_fd;
509d0416d48SJason Wang     void *addr;
510d0416d48SJason Wang     char *name;
511d0416d48SJason Wang 
512d0416d48SJason Wang     vhost_vdpa_host_notifier_uninit(dev, queue_index);
513d0416d48SJason Wang 
514d0416d48SJason Wang     n = &v->notifier[queue_index];
515d0416d48SJason Wang 
516d0416d48SJason Wang     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
517d0416d48SJason Wang                 queue_index * page_size);
518d0416d48SJason Wang     if (addr == MAP_FAILED) {
519d0416d48SJason Wang         goto err;
520d0416d48SJason Wang     }
521d0416d48SJason Wang 
522d0416d48SJason Wang     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
523d0416d48SJason Wang                            v, queue_index);
524d0416d48SJason Wang     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
525d0416d48SJason Wang                                       page_size, addr);
526d0416d48SJason Wang     g_free(name);
527d0416d48SJason Wang 
528d0416d48SJason Wang     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
52998f7607eSLaurent Vivier         object_unparent(OBJECT(&n->mr));
530d0416d48SJason Wang         munmap(addr, page_size);
531d0416d48SJason Wang         goto err;
532d0416d48SJason Wang     }
533d0416d48SJason Wang     n->addr = addr;
534d0416d48SJason Wang 
535d0416d48SJason Wang     return 0;
536d0416d48SJason Wang 
537d0416d48SJason Wang err:
538d0416d48SJason Wang     return -1;
539d0416d48SJason Wang }
540d0416d48SJason Wang 
541b1f030a0SLaurent Vivier static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
542b1f030a0SLaurent Vivier {
543b1f030a0SLaurent Vivier     int i;
544b1f030a0SLaurent Vivier 
545b1f030a0SLaurent Vivier     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
546b1f030a0SLaurent Vivier         vhost_vdpa_host_notifier_uninit(dev, i);
547b1f030a0SLaurent Vivier     }
548b1f030a0SLaurent Vivier }
549b1f030a0SLaurent Vivier 
550d0416d48SJason Wang static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
551d0416d48SJason Wang {
552dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
553d0416d48SJason Wang     int i;
554d0416d48SJason Wang 
555dff4426fSEugenio Pérez     if (v->shadow_vqs_enabled) {
556dff4426fSEugenio Pérez         /* FIXME SVQ is not compatible with host notifiers mr */
557dff4426fSEugenio Pérez         return;
558dff4426fSEugenio Pérez     }
559dff4426fSEugenio Pérez 
560d0416d48SJason Wang     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
561d0416d48SJason Wang         if (vhost_vdpa_host_notifier_init(dev, i)) {
562d0416d48SJason Wang             goto err;
563d0416d48SJason Wang         }
564d0416d48SJason Wang     }
565d0416d48SJason Wang 
566d0416d48SJason Wang     return;
567d0416d48SJason Wang 
568d0416d48SJason Wang err:
569b1f030a0SLaurent Vivier     vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
570d0416d48SJason Wang     return;
571d0416d48SJason Wang }
572d0416d48SJason Wang 
573dff4426fSEugenio Pérez static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
574dff4426fSEugenio Pérez {
575dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
576dff4426fSEugenio Pérez     size_t idx;
577dff4426fSEugenio Pérez 
578dff4426fSEugenio Pérez     if (!v->shadow_vqs) {
579dff4426fSEugenio Pérez         return;
580dff4426fSEugenio Pérez     }
581dff4426fSEugenio Pérez 
582dff4426fSEugenio Pérez     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
583dff4426fSEugenio Pérez         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
584dff4426fSEugenio Pérez     }
585dff4426fSEugenio Pérez     g_ptr_array_free(v->shadow_vqs, true);
586dff4426fSEugenio Pérez }
587dff4426fSEugenio Pérez 
588108a6481SCindy Lu static int vhost_vdpa_cleanup(struct vhost_dev *dev)
589108a6481SCindy Lu {
590108a6481SCindy Lu     struct vhost_vdpa *v;
591108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
592108a6481SCindy Lu     v = dev->opaque;
593778e67deSLaurent Vivier     trace_vhost_vdpa_cleanup(dev, v);
594d0416d48SJason Wang     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
595108a6481SCindy Lu     memory_listener_unregister(&v->listener);
596dff4426fSEugenio Pérez     vhost_vdpa_svq_cleanup(dev);
597108a6481SCindy Lu 
598108a6481SCindy Lu     dev->opaque = NULL;
599e1c1915bSDavid Hildenbrand     ram_block_discard_disable(false);
600e1c1915bSDavid Hildenbrand 
601108a6481SCindy Lu     return 0;
602108a6481SCindy Lu }
603108a6481SCindy Lu 
604108a6481SCindy Lu static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
605108a6481SCindy Lu {
606778e67deSLaurent Vivier     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
607108a6481SCindy Lu     return INT_MAX;
608108a6481SCindy Lu }
609108a6481SCindy Lu 
610108a6481SCindy Lu static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
611108a6481SCindy Lu                                     struct vhost_memory *mem)
612108a6481SCindy Lu {
613d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
6144d191cfdSJason Wang         return 0;
6154d191cfdSJason Wang     }
6164d191cfdSJason Wang 
617778e67deSLaurent Vivier     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
618778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
619778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
620778e67deSLaurent Vivier         int i;
621778e67deSLaurent Vivier         for (i = 0; i < mem->nregions; i++) {
622778e67deSLaurent Vivier             trace_vhost_vdpa_dump_regions(dev, i,
623778e67deSLaurent Vivier                                           mem->regions[i].guest_phys_addr,
624778e67deSLaurent Vivier                                           mem->regions[i].memory_size,
625778e67deSLaurent Vivier                                           mem->regions[i].userspace_addr,
626778e67deSLaurent Vivier                                           mem->regions[i].flags_padding);
627778e67deSLaurent Vivier         }
628778e67deSLaurent Vivier     }
629108a6481SCindy Lu     if (mem->padding) {
6303631151bSRoman Kagan         return -EINVAL;
631108a6481SCindy Lu     }
632108a6481SCindy Lu 
633108a6481SCindy Lu     return 0;
634108a6481SCindy Lu }
635108a6481SCindy Lu 
636108a6481SCindy Lu static int vhost_vdpa_set_features(struct vhost_dev *dev,
637108a6481SCindy Lu                                    uint64_t features)
638108a6481SCindy Lu {
63912a195faSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
640108a6481SCindy Lu     int ret;
6414d191cfdSJason Wang 
642d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
6434d191cfdSJason Wang         return 0;
6444d191cfdSJason Wang     }
6454d191cfdSJason Wang 
64612a195faSEugenio Pérez     if (v->shadow_vqs_enabled) {
64712a195faSEugenio Pérez         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
64812a195faSEugenio Pérez             /*
64912a195faSEugenio Pérez              * QEMU is just trying to enable or disable logging. SVQ handles
65012a195faSEugenio Pérez              * this sepparately, so no need to forward this.
65112a195faSEugenio Pérez              */
65212a195faSEugenio Pérez             v->acked_features = features;
65312a195faSEugenio Pérez             return 0;
65412a195faSEugenio Pérez         }
65512a195faSEugenio Pérez 
65612a195faSEugenio Pérez         v->acked_features = features;
65712a195faSEugenio Pérez 
65812a195faSEugenio Pérez         /* We must not ack _F_LOG if SVQ is enabled */
65912a195faSEugenio Pérez         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
66012a195faSEugenio Pérez     }
66112a195faSEugenio Pérez 
662778e67deSLaurent Vivier     trace_vhost_vdpa_set_features(dev, features);
663108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
664108a6481SCindy Lu     if (ret) {
665108a6481SCindy Lu         return ret;
666108a6481SCindy Lu     }
667108a6481SCindy Lu 
6683631151bSRoman Kagan     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
669108a6481SCindy Lu }
670108a6481SCindy Lu 
671a5bd0580SJason Wang static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
672a5bd0580SJason Wang {
673a5bd0580SJason Wang     uint64_t features;
674a5bd0580SJason Wang     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
675a5bd0580SJason Wang         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
676a5bd0580SJason Wang     int r;
677a5bd0580SJason Wang 
678a5bd0580SJason Wang     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
6792a83e97eSJason Wang         return -EFAULT;
680a5bd0580SJason Wang     }
681a5bd0580SJason Wang 
682a5bd0580SJason Wang     features &= f;
6834d191cfdSJason Wang 
684d71b0609SSi-Wei Liu     if (vhost_vdpa_first_dev(dev)) {
685a5bd0580SJason Wang         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
686a5bd0580SJason Wang         if (r) {
6872a83e97eSJason Wang             return -EFAULT;
688a5bd0580SJason Wang         }
6894d191cfdSJason Wang     }
690a5bd0580SJason Wang 
691a5bd0580SJason Wang     dev->backend_cap = features;
692a5bd0580SJason Wang 
693a5bd0580SJason Wang     return 0;
694a5bd0580SJason Wang }
695a5bd0580SJason Wang 
696c232b8f4SZenghui Yu static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
697108a6481SCindy Lu                                     uint32_t *device_id)
698108a6481SCindy Lu {
699778e67deSLaurent Vivier     int ret;
700778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
701778e67deSLaurent Vivier     trace_vhost_vdpa_get_device_id(dev, *device_id);
702778e67deSLaurent Vivier     return ret;
703108a6481SCindy Lu }
704108a6481SCindy Lu 
705dff4426fSEugenio Pérez static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
706dff4426fSEugenio Pérez {
707dff4426fSEugenio Pérez     if (!v->shadow_vqs_enabled) {
708dff4426fSEugenio Pérez         return;
709dff4426fSEugenio Pérez     }
710dff4426fSEugenio Pérez 
711dff4426fSEugenio Pérez     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
712dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
713dff4426fSEugenio Pérez         vhost_svq_stop(svq);
714dff4426fSEugenio Pérez     }
715dff4426fSEugenio Pérez }
716dff4426fSEugenio Pérez 
717108a6481SCindy Lu static int vhost_vdpa_reset_device(struct vhost_dev *dev)
718108a6481SCindy Lu {
719dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
720778e67deSLaurent Vivier     int ret;
721108a6481SCindy Lu     uint8_t status = 0;
722108a6481SCindy Lu 
723dff4426fSEugenio Pérez     vhost_vdpa_reset_svq(v);
724dff4426fSEugenio Pérez 
725778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
726778e67deSLaurent Vivier     trace_vhost_vdpa_reset_device(dev, status);
727778e67deSLaurent Vivier     return ret;
728108a6481SCindy Lu }
729108a6481SCindy Lu 
730108a6481SCindy Lu static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
731108a6481SCindy Lu {
732108a6481SCindy Lu     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
733108a6481SCindy Lu 
734353244d8SJason Wang     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
735353244d8SJason Wang     return idx;
736108a6481SCindy Lu }
737108a6481SCindy Lu 
738108a6481SCindy Lu static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
739108a6481SCindy Lu {
740108a6481SCindy Lu     int i;
741778e67deSLaurent Vivier     trace_vhost_vdpa_set_vring_ready(dev);
742108a6481SCindy Lu     for (i = 0; i < dev->nvqs; ++i) {
743108a6481SCindy Lu         struct vhost_vring_state state = {
744108a6481SCindy Lu             .index = dev->vq_index + i,
745108a6481SCindy Lu             .num = 1,
746108a6481SCindy Lu         };
747108a6481SCindy Lu         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
748108a6481SCindy Lu     }
749108a6481SCindy Lu     return 0;
750108a6481SCindy Lu }
751108a6481SCindy Lu 
752778e67deSLaurent Vivier static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
753778e67deSLaurent Vivier                                    uint32_t config_len)
754778e67deSLaurent Vivier {
755778e67deSLaurent Vivier     int b, len;
756778e67deSLaurent Vivier     char line[QEMU_HEXDUMP_LINE_LEN];
757778e67deSLaurent Vivier 
758778e67deSLaurent Vivier     for (b = 0; b < config_len; b += 16) {
759778e67deSLaurent Vivier         len = config_len - b;
760778e67deSLaurent Vivier         qemu_hexdump_line(line, b, config, len, false);
761778e67deSLaurent Vivier         trace_vhost_vdpa_dump_config(dev, line);
762778e67deSLaurent Vivier     }
763778e67deSLaurent Vivier }
764778e67deSLaurent Vivier 
765108a6481SCindy Lu static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
766108a6481SCindy Lu                                    uint32_t offset, uint32_t size,
767108a6481SCindy Lu                                    uint32_t flags)
768108a6481SCindy Lu {
769108a6481SCindy Lu     struct vhost_vdpa_config *config;
770108a6481SCindy Lu     int ret;
771108a6481SCindy Lu     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
772986d4f78SLi Qiang 
773778e67deSLaurent Vivier     trace_vhost_vdpa_set_config(dev, offset, size, flags);
774108a6481SCindy Lu     config = g_malloc(size + config_size);
775108a6481SCindy Lu     config->off = offset;
776108a6481SCindy Lu     config->len = size;
777108a6481SCindy Lu     memcpy(config->buf, data, size);
778778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
779778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
780778e67deSLaurent Vivier         vhost_vdpa_dump_config(dev, data, size);
781778e67deSLaurent Vivier     }
782108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
783108a6481SCindy Lu     g_free(config);
784108a6481SCindy Lu     return ret;
785108a6481SCindy Lu }
786108a6481SCindy Lu 
787108a6481SCindy Lu static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
78850de5138SKevin Wolf                                    uint32_t config_len, Error **errp)
789108a6481SCindy Lu {
790108a6481SCindy Lu     struct vhost_vdpa_config *v_config;
791108a6481SCindy Lu     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
792108a6481SCindy Lu     int ret;
793108a6481SCindy Lu 
794778e67deSLaurent Vivier     trace_vhost_vdpa_get_config(dev, config, config_len);
795108a6481SCindy Lu     v_config = g_malloc(config_len + config_size);
796108a6481SCindy Lu     v_config->len = config_len;
797108a6481SCindy Lu     v_config->off = 0;
798108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
799108a6481SCindy Lu     memcpy(config, v_config->buf, config_len);
800108a6481SCindy Lu     g_free(v_config);
801778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
802778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
803778e67deSLaurent Vivier         vhost_vdpa_dump_config(dev, config, config_len);
804778e67deSLaurent Vivier     }
805108a6481SCindy Lu     return ret;
806108a6481SCindy Lu  }
807108a6481SCindy Lu 
808d96be4c8SEugenio Pérez static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
809d96be4c8SEugenio Pérez                                          struct vhost_vring_state *ring)
810d96be4c8SEugenio Pérez {
811d96be4c8SEugenio Pérez     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
812d96be4c8SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
813d96be4c8SEugenio Pérez }
814d96be4c8SEugenio Pérez 
815dff4426fSEugenio Pérez static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
816dff4426fSEugenio Pérez                                          struct vhost_vring_file *file)
817dff4426fSEugenio Pérez {
818dff4426fSEugenio Pérez     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
819dff4426fSEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
820dff4426fSEugenio Pérez }
821dff4426fSEugenio Pérez 
822a8ac8858SEugenio Pérez static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
823a8ac8858SEugenio Pérez                                          struct vhost_vring_file *file)
824a8ac8858SEugenio Pérez {
825a8ac8858SEugenio Pérez     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
826a8ac8858SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
827a8ac8858SEugenio Pérez }
828a8ac8858SEugenio Pérez 
829d96be4c8SEugenio Pérez static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
830d96be4c8SEugenio Pérez                                          struct vhost_vring_addr *addr)
831d96be4c8SEugenio Pérez {
832d96be4c8SEugenio Pérez     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
833d96be4c8SEugenio Pérez                                 addr->desc_user_addr, addr->used_user_addr,
834d96be4c8SEugenio Pérez                                 addr->avail_user_addr,
835d96be4c8SEugenio Pérez                                 addr->log_guest_addr);
836d96be4c8SEugenio Pérez 
837d96be4c8SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
838d96be4c8SEugenio Pérez 
839d96be4c8SEugenio Pérez }
840d96be4c8SEugenio Pérez 
841dff4426fSEugenio Pérez /**
842dff4426fSEugenio Pérez  * Set the shadow virtqueue descriptors to the device
843dff4426fSEugenio Pérez  *
844dff4426fSEugenio Pérez  * @dev: The vhost device model
845dff4426fSEugenio Pérez  * @svq: The shadow virtqueue
846dff4426fSEugenio Pérez  * @idx: The index of the virtqueue in the vhost device
847dff4426fSEugenio Pérez  * @errp: Error
848a8ac8858SEugenio Pérez  *
849a8ac8858SEugenio Pérez  * Note that this function does not rewind kick file descriptor if cannot set
850a8ac8858SEugenio Pérez  * call one.
851dff4426fSEugenio Pérez  */
852100890f7SEugenio Pérez static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
853dff4426fSEugenio Pérez                                   VhostShadowVirtqueue *svq, unsigned idx,
854dff4426fSEugenio Pérez                                   Error **errp)
855dff4426fSEugenio Pérez {
856dff4426fSEugenio Pérez     struct vhost_vring_file file = {
857dff4426fSEugenio Pérez         .index = dev->vq_index + idx,
858dff4426fSEugenio Pérez     };
859dff4426fSEugenio Pérez     const EventNotifier *event_notifier = &svq->hdev_kick;
860dff4426fSEugenio Pérez     int r;
861dff4426fSEugenio Pérez 
8623cfb4d06SEugenio Pérez     r = event_notifier_init(&svq->hdev_kick, 0);
8633cfb4d06SEugenio Pérez     if (r != 0) {
8643cfb4d06SEugenio Pérez         error_setg_errno(errp, -r, "Couldn't create kick event notifier");
8653cfb4d06SEugenio Pérez         goto err_init_hdev_kick;
8663cfb4d06SEugenio Pérez     }
8673cfb4d06SEugenio Pérez 
8683cfb4d06SEugenio Pérez     r = event_notifier_init(&svq->hdev_call, 0);
8693cfb4d06SEugenio Pérez     if (r != 0) {
8703cfb4d06SEugenio Pérez         error_setg_errno(errp, -r, "Couldn't create call event notifier");
8713cfb4d06SEugenio Pérez         goto err_init_hdev_call;
8723cfb4d06SEugenio Pérez     }
8733cfb4d06SEugenio Pérez 
874dff4426fSEugenio Pérez     file.fd = event_notifier_get_fd(event_notifier);
875dff4426fSEugenio Pérez     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
876dff4426fSEugenio Pérez     if (unlikely(r != 0)) {
877dff4426fSEugenio Pérez         error_setg_errno(errp, -r, "Can't set device kick fd");
8783cfb4d06SEugenio Pérez         goto err_init_set_dev_fd;
879a8ac8858SEugenio Pérez     }
880a8ac8858SEugenio Pérez 
881a8ac8858SEugenio Pérez     event_notifier = &svq->hdev_call;
882a8ac8858SEugenio Pérez     file.fd = event_notifier_get_fd(event_notifier);
883a8ac8858SEugenio Pérez     r = vhost_vdpa_set_vring_dev_call(dev, &file);
884a8ac8858SEugenio Pérez     if (unlikely(r != 0)) {
885a8ac8858SEugenio Pérez         error_setg_errno(errp, -r, "Can't set device call fd");
8863cfb4d06SEugenio Pérez         goto err_init_set_dev_fd;
887dff4426fSEugenio Pérez     }
888dff4426fSEugenio Pérez 
8893cfb4d06SEugenio Pérez     return 0;
8903cfb4d06SEugenio Pérez 
8913cfb4d06SEugenio Pérez err_init_set_dev_fd:
8923cfb4d06SEugenio Pérez     event_notifier_set_handler(&svq->hdev_call, NULL);
8933cfb4d06SEugenio Pérez 
8943cfb4d06SEugenio Pérez err_init_hdev_call:
8953cfb4d06SEugenio Pérez     event_notifier_cleanup(&svq->hdev_kick);
8963cfb4d06SEugenio Pérez 
8973cfb4d06SEugenio Pérez err_init_hdev_kick:
898100890f7SEugenio Pérez     return r;
899100890f7SEugenio Pérez }
900100890f7SEugenio Pérez 
901100890f7SEugenio Pérez /**
902100890f7SEugenio Pérez  * Unmap a SVQ area in the device
903100890f7SEugenio Pérez  */
9048b6d6119SEugenio Pérez static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
905100890f7SEugenio Pérez {
9068b6d6119SEugenio Pérez     const DMAMap needle = {
9078b6d6119SEugenio Pérez         .translated_addr = addr,
9088b6d6119SEugenio Pérez     };
9098b6d6119SEugenio Pérez     const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle);
91034e3c94eSEugenio Pérez     hwaddr size;
911100890f7SEugenio Pérez     int r;
912100890f7SEugenio Pérez 
91334e3c94eSEugenio Pérez     if (unlikely(!result)) {
91434e3c94eSEugenio Pérez         error_report("Unable to find SVQ address to unmap");
9155b590f51SEugenio Pérez         return;
91634e3c94eSEugenio Pérez     }
91734e3c94eSEugenio Pérez 
9188e3b0cbbSMarc-André Lureau     size = ROUND_UP(result->size, qemu_real_host_page_size());
91934e3c94eSEugenio Pérez     r = vhost_vdpa_dma_unmap(v, result->iova, size);
920b37c12beSEugenio Pérez     if (unlikely(r < 0)) {
921b37c12beSEugenio Pérez         error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
9225b590f51SEugenio Pérez         return;
923b37c12beSEugenio Pérez     }
924b37c12beSEugenio Pérez 
925b37c12beSEugenio Pérez     vhost_iova_tree_remove(v->iova_tree, *result);
926100890f7SEugenio Pérez }
927100890f7SEugenio Pérez 
9285b590f51SEugenio Pérez static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
929100890f7SEugenio Pérez                                        const VhostShadowVirtqueue *svq)
930100890f7SEugenio Pérez {
931100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
932100890f7SEugenio Pérez     struct vhost_vring_addr svq_addr;
933100890f7SEugenio Pérez 
934100890f7SEugenio Pérez     vhost_svq_get_vring_addr(svq, &svq_addr);
935100890f7SEugenio Pérez 
9368b6d6119SEugenio Pérez     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
937100890f7SEugenio Pérez 
9388b6d6119SEugenio Pérez     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
93934e3c94eSEugenio Pérez }
94034e3c94eSEugenio Pérez 
94134e3c94eSEugenio Pérez /**
94234e3c94eSEugenio Pérez  * Map the SVQ area in the device
94334e3c94eSEugenio Pérez  *
94434e3c94eSEugenio Pérez  * @v: Vhost-vdpa device
94534e3c94eSEugenio Pérez  * @needle: The area to search iova
94634e3c94eSEugenio Pérez  * @errorp: Error pointer
94734e3c94eSEugenio Pérez  */
94834e3c94eSEugenio Pérez static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
94934e3c94eSEugenio Pérez                                     Error **errp)
95034e3c94eSEugenio Pérez {
95134e3c94eSEugenio Pérez     int r;
95234e3c94eSEugenio Pérez 
95334e3c94eSEugenio Pérez     r = vhost_iova_tree_map_alloc(v->iova_tree, needle);
95434e3c94eSEugenio Pérez     if (unlikely(r != IOVA_OK)) {
95534e3c94eSEugenio Pérez         error_setg(errp, "Cannot allocate iova (%d)", r);
95634e3c94eSEugenio Pérez         return false;
95734e3c94eSEugenio Pérez     }
95834e3c94eSEugenio Pérez 
95934e3c94eSEugenio Pérez     r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
96034e3c94eSEugenio Pérez                            (void *)(uintptr_t)needle->translated_addr,
96134e3c94eSEugenio Pérez                            needle->perm == IOMMU_RO);
96234e3c94eSEugenio Pérez     if (unlikely(r != 0)) {
96334e3c94eSEugenio Pérez         error_setg_errno(errp, -r, "Cannot map region to device");
96469292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, *needle);
96534e3c94eSEugenio Pérez     }
96634e3c94eSEugenio Pérez 
96734e3c94eSEugenio Pérez     return r == 0;
968100890f7SEugenio Pérez }
969100890f7SEugenio Pérez 
970100890f7SEugenio Pérez /**
971100890f7SEugenio Pérez  * Map the shadow virtqueue rings in the device
972100890f7SEugenio Pérez  *
973100890f7SEugenio Pérez  * @dev: The vhost device
974100890f7SEugenio Pérez  * @svq: The shadow virtqueue
975100890f7SEugenio Pérez  * @addr: Assigned IOVA addresses
976100890f7SEugenio Pérez  * @errp: Error pointer
977100890f7SEugenio Pérez  */
978100890f7SEugenio Pérez static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
979100890f7SEugenio Pérez                                      const VhostShadowVirtqueue *svq,
980100890f7SEugenio Pérez                                      struct vhost_vring_addr *addr,
981100890f7SEugenio Pérez                                      Error **errp)
982100890f7SEugenio Pérez {
98305e385d2SMarkus Armbruster     ERRP_GUARD();
98434e3c94eSEugenio Pérez     DMAMap device_region, driver_region;
98534e3c94eSEugenio Pérez     struct vhost_vring_addr svq_addr;
986100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
987100890f7SEugenio Pérez     size_t device_size = vhost_svq_device_area_size(svq);
988100890f7SEugenio Pérez     size_t driver_size = vhost_svq_driver_area_size(svq);
98934e3c94eSEugenio Pérez     size_t avail_offset;
99034e3c94eSEugenio Pérez     bool ok;
991100890f7SEugenio Pérez 
99234e3c94eSEugenio Pérez     vhost_svq_get_vring_addr(svq, &svq_addr);
993100890f7SEugenio Pérez 
99434e3c94eSEugenio Pérez     driver_region = (DMAMap) {
99534e3c94eSEugenio Pérez         .translated_addr = svq_addr.desc_user_addr,
99634e3c94eSEugenio Pérez         .size = driver_size - 1,
99734e3c94eSEugenio Pérez         .perm = IOMMU_RO,
99834e3c94eSEugenio Pérez     };
99934e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
100034e3c94eSEugenio Pérez     if (unlikely(!ok)) {
100134e3c94eSEugenio Pérez         error_prepend(errp, "Cannot create vq driver region: ");
1002100890f7SEugenio Pérez         return false;
1003100890f7SEugenio Pérez     }
100434e3c94eSEugenio Pérez     addr->desc_user_addr = driver_region.iova;
100534e3c94eSEugenio Pérez     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
100634e3c94eSEugenio Pérez     addr->avail_user_addr = driver_region.iova + avail_offset;
1007100890f7SEugenio Pérez 
100834e3c94eSEugenio Pérez     device_region = (DMAMap) {
100934e3c94eSEugenio Pérez         .translated_addr = svq_addr.used_user_addr,
101034e3c94eSEugenio Pérez         .size = device_size - 1,
101134e3c94eSEugenio Pérez         .perm = IOMMU_RW,
101234e3c94eSEugenio Pérez     };
101334e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
101434e3c94eSEugenio Pérez     if (unlikely(!ok)) {
101534e3c94eSEugenio Pérez         error_prepend(errp, "Cannot create vq device region: ");
10168b6d6119SEugenio Pérez         vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
1017100890f7SEugenio Pérez     }
101834e3c94eSEugenio Pérez     addr->used_user_addr = device_region.iova;
1019100890f7SEugenio Pérez 
102034e3c94eSEugenio Pérez     return ok;
1021100890f7SEugenio Pérez }
1022100890f7SEugenio Pérez 
1023100890f7SEugenio Pérez static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
1024100890f7SEugenio Pérez                                  VhostShadowVirtqueue *svq, unsigned idx,
1025100890f7SEugenio Pérez                                  Error **errp)
1026100890f7SEugenio Pérez {
1027100890f7SEugenio Pérez     uint16_t vq_index = dev->vq_index + idx;
1028100890f7SEugenio Pérez     struct vhost_vring_state s = {
1029100890f7SEugenio Pérez         .index = vq_index,
1030100890f7SEugenio Pérez     };
1031100890f7SEugenio Pérez     int r;
1032100890f7SEugenio Pérez 
1033100890f7SEugenio Pérez     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1034100890f7SEugenio Pérez     if (unlikely(r)) {
1035100890f7SEugenio Pérez         error_setg_errno(errp, -r, "Cannot set vring base");
1036100890f7SEugenio Pérez         return false;
1037100890f7SEugenio Pérez     }
1038100890f7SEugenio Pérez 
1039100890f7SEugenio Pérez     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1040dff4426fSEugenio Pérez     return r == 0;
1041dff4426fSEugenio Pérez }
1042dff4426fSEugenio Pérez 
1043dff4426fSEugenio Pérez static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1044dff4426fSEugenio Pérez {
1045dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1046dff4426fSEugenio Pérez     Error *err = NULL;
1047dff4426fSEugenio Pérez     unsigned i;
1048dff4426fSEugenio Pérez 
1049712c1a31SEugenio Pérez     if (!v->shadow_vqs_enabled) {
1050dff4426fSEugenio Pérez         return true;
1051dff4426fSEugenio Pérez     }
1052dff4426fSEugenio Pérez 
1053dff4426fSEugenio Pérez     for (i = 0; i < v->shadow_vqs->len; ++i) {
1054100890f7SEugenio Pérez         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1055dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1056100890f7SEugenio Pérez         struct vhost_vring_addr addr = {
10571c82fdfeSEugenio Pérez             .index = dev->vq_index + i,
1058100890f7SEugenio Pérez         };
1059100890f7SEugenio Pérez         int r;
1060dff4426fSEugenio Pérez         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1061dff4426fSEugenio Pérez         if (unlikely(!ok)) {
1062100890f7SEugenio Pérez             goto err;
1063100890f7SEugenio Pérez         }
1064100890f7SEugenio Pérez 
1065*5fde952bSEugenio Pérez         vhost_svq_start(svq, dev->vdev, vq, v->iova_tree);
1066100890f7SEugenio Pérez         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1067100890f7SEugenio Pérez         if (unlikely(!ok)) {
1068100890f7SEugenio Pérez             goto err_map;
1069100890f7SEugenio Pérez         }
1070100890f7SEugenio Pérez 
1071100890f7SEugenio Pérez         /* Override vring GPA set by vhost subsystem */
1072100890f7SEugenio Pérez         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1073100890f7SEugenio Pérez         if (unlikely(r != 0)) {
1074100890f7SEugenio Pérez             error_setg_errno(&err, -r, "Cannot set device address");
1075100890f7SEugenio Pérez             goto err_set_addr;
1076100890f7SEugenio Pérez         }
1077100890f7SEugenio Pérez     }
1078100890f7SEugenio Pérez 
1079100890f7SEugenio Pérez     return true;
1080100890f7SEugenio Pérez 
1081100890f7SEugenio Pérez err_set_addr:
1082100890f7SEugenio Pérez     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1083100890f7SEugenio Pérez 
1084100890f7SEugenio Pérez err_map:
1085100890f7SEugenio Pérez     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1086100890f7SEugenio Pérez 
1087100890f7SEugenio Pérez err:
1088dff4426fSEugenio Pérez     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1089100890f7SEugenio Pérez     for (unsigned j = 0; j < i; ++j) {
1090100890f7SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1091100890f7SEugenio Pérez         vhost_vdpa_svq_unmap_rings(dev, svq);
1092100890f7SEugenio Pérez         vhost_svq_stop(svq);
1093100890f7SEugenio Pérez     }
1094100890f7SEugenio Pérez 
1095100890f7SEugenio Pérez     return false;
1096100890f7SEugenio Pérez }
1097100890f7SEugenio Pérez 
10985b590f51SEugenio Pérez static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1099100890f7SEugenio Pérez {
1100100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1101100890f7SEugenio Pérez 
1102712c1a31SEugenio Pérez     if (!v->shadow_vqs_enabled) {
11035b590f51SEugenio Pérez         return;
1104100890f7SEugenio Pérez     }
1105100890f7SEugenio Pérez 
1106100890f7SEugenio Pérez     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1107100890f7SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
11085b590f51SEugenio Pérez         vhost_vdpa_svq_unmap_rings(dev, svq);
11093cfb4d06SEugenio Pérez 
11103cfb4d06SEugenio Pérez         event_notifier_cleanup(&svq->hdev_kick);
11113cfb4d06SEugenio Pérez         event_notifier_cleanup(&svq->hdev_call);
1112dff4426fSEugenio Pérez     }
1113dff4426fSEugenio Pérez }
1114dff4426fSEugenio Pérez 
1115108a6481SCindy Lu static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1116108a6481SCindy Lu {
1117108a6481SCindy Lu     struct vhost_vdpa *v = dev->opaque;
1118dff4426fSEugenio Pérez     bool ok;
1119778e67deSLaurent Vivier     trace_vhost_vdpa_dev_start(dev, started);
11204d191cfdSJason Wang 
11214d191cfdSJason Wang     if (started) {
11224d191cfdSJason Wang         vhost_vdpa_host_notifiers_init(dev);
1123dff4426fSEugenio Pérez         ok = vhost_vdpa_svqs_start(dev);
1124dff4426fSEugenio Pérez         if (unlikely(!ok)) {
1125dff4426fSEugenio Pérez             return -1;
1126dff4426fSEugenio Pérez         }
11274d191cfdSJason Wang         vhost_vdpa_set_vring_ready(dev);
11284d191cfdSJason Wang     } else {
11295b590f51SEugenio Pérez         vhost_vdpa_svqs_stop(dev);
11304d191cfdSJason Wang         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
11314d191cfdSJason Wang     }
11324d191cfdSJason Wang 
1133245cf2c2SEugenio Pérez     if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
11344d191cfdSJason Wang         return 0;
11354d191cfdSJason Wang     }
11364d191cfdSJason Wang 
1137108a6481SCindy Lu     if (started) {
1138108a6481SCindy Lu         memory_listener_register(&v->listener, &address_space_memory);
11393631151bSRoman Kagan         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1140108a6481SCindy Lu     } else {
1141108a6481SCindy Lu         vhost_vdpa_reset_device(dev);
1142108a6481SCindy Lu         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1143108a6481SCindy Lu                                    VIRTIO_CONFIG_S_DRIVER);
1144108a6481SCindy Lu         memory_listener_unregister(&v->listener);
1145108a6481SCindy Lu 
1146108a6481SCindy Lu         return 0;
1147108a6481SCindy Lu     }
1148108a6481SCindy Lu }
1149108a6481SCindy Lu 
1150108a6481SCindy Lu static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1151108a6481SCindy Lu                                      struct vhost_log *log)
1152108a6481SCindy Lu {
1153773ebc95SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1154d71b0609SSi-Wei Liu     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
11554d191cfdSJason Wang         return 0;
11564d191cfdSJason Wang     }
11574d191cfdSJason Wang 
1158778e67deSLaurent Vivier     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1159778e67deSLaurent Vivier                                   log->log);
1160108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1161108a6481SCindy Lu }
1162108a6481SCindy Lu 
1163108a6481SCindy Lu static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1164108a6481SCindy Lu                                        struct vhost_vring_addr *addr)
1165108a6481SCindy Lu {
1166d96be4c8SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1167d96be4c8SEugenio Pérez 
1168d96be4c8SEugenio Pérez     if (v->shadow_vqs_enabled) {
1169d96be4c8SEugenio Pérez         /*
1170d96be4c8SEugenio Pérez          * Device vring addr was set at device start. SVQ base is handled by
1171d96be4c8SEugenio Pérez          * VirtQueue code.
1172d96be4c8SEugenio Pérez          */
1173d96be4c8SEugenio Pérez         return 0;
1174d96be4c8SEugenio Pérez     }
1175d96be4c8SEugenio Pérez 
1176d96be4c8SEugenio Pérez     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1177108a6481SCindy Lu }
1178108a6481SCindy Lu 
1179108a6481SCindy Lu static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1180108a6481SCindy Lu                                       struct vhost_vring_state *ring)
1181108a6481SCindy Lu {
1182778e67deSLaurent Vivier     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1183108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1184108a6481SCindy Lu }
1185108a6481SCindy Lu 
1186108a6481SCindy Lu static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1187108a6481SCindy Lu                                        struct vhost_vring_state *ring)
1188108a6481SCindy Lu {
1189d96be4c8SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
11902fdac348SEugenio Pérez     VirtQueue *vq = virtio_get_queue(dev->vdev, ring->index);
1191d96be4c8SEugenio Pérez 
11922fdac348SEugenio Pérez     /*
11932fdac348SEugenio Pérez      * vhost-vdpa devices does not support in-flight requests. Set all of them
11942fdac348SEugenio Pérez      * as available.
11952fdac348SEugenio Pérez      *
11962fdac348SEugenio Pérez      * TODO: This is ok for networking, but other kinds of devices might
11972fdac348SEugenio Pérez      * have problems with these retransmissions.
11982fdac348SEugenio Pérez      */
11992fdac348SEugenio Pérez     while (virtqueue_rewind(vq, 1)) {
12002fdac348SEugenio Pérez         continue;
12012fdac348SEugenio Pérez     }
1202d96be4c8SEugenio Pérez     if (v->shadow_vqs_enabled) {
1203d96be4c8SEugenio Pérez         /*
1204d96be4c8SEugenio Pérez          * Device vring base was set at device start. SVQ base is handled by
1205d96be4c8SEugenio Pérez          * VirtQueue code.
1206d96be4c8SEugenio Pérez          */
1207d96be4c8SEugenio Pérez         return 0;
1208d96be4c8SEugenio Pérez     }
1209d96be4c8SEugenio Pérez 
1210d96be4c8SEugenio Pérez     return vhost_vdpa_set_dev_vring_base(dev, ring);
1211108a6481SCindy Lu }
1212108a6481SCindy Lu 
1213108a6481SCindy Lu static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1214108a6481SCindy Lu                                        struct vhost_vring_state *ring)
1215108a6481SCindy Lu {
12166d0b2226SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1217778e67deSLaurent Vivier     int ret;
1218778e67deSLaurent Vivier 
12196d0b2226SEugenio Pérez     if (v->shadow_vqs_enabled) {
12202fdac348SEugenio Pérez         ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
12216d0b2226SEugenio Pérez         return 0;
12226d0b2226SEugenio Pérez     }
12236d0b2226SEugenio Pérez 
1224778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1225778e67deSLaurent Vivier     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
1226778e67deSLaurent Vivier     return ret;
1227108a6481SCindy Lu }
1228108a6481SCindy Lu 
1229108a6481SCindy Lu static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1230108a6481SCindy Lu                                        struct vhost_vring_file *file)
1231108a6481SCindy Lu {
1232dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1233dff4426fSEugenio Pérez     int vdpa_idx = file->index - dev->vq_index;
1234dff4426fSEugenio Pérez 
1235dff4426fSEugenio Pérez     if (v->shadow_vqs_enabled) {
1236dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1237dff4426fSEugenio Pérez         vhost_svq_set_svq_kick_fd(svq, file->fd);
1238dff4426fSEugenio Pérez         return 0;
1239dff4426fSEugenio Pérez     } else {
1240dff4426fSEugenio Pérez         return vhost_vdpa_set_vring_dev_kick(dev, file);
1241dff4426fSEugenio Pérez     }
1242108a6481SCindy Lu }
1243108a6481SCindy Lu 
1244108a6481SCindy Lu static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1245108a6481SCindy Lu                                        struct vhost_vring_file *file)
1246108a6481SCindy Lu {
1247a8ac8858SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1248a8ac8858SEugenio Pérez 
1249a8ac8858SEugenio Pérez     if (v->shadow_vqs_enabled) {
1250a8ac8858SEugenio Pérez         int vdpa_idx = file->index - dev->vq_index;
1251a8ac8858SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1252a8ac8858SEugenio Pérez 
1253a8ac8858SEugenio Pérez         vhost_svq_set_svq_call_fd(svq, file->fd);
1254a8ac8858SEugenio Pérez         return 0;
1255a8ac8858SEugenio Pérez     } else {
1256a8ac8858SEugenio Pérez         return vhost_vdpa_set_vring_dev_call(dev, file);
1257a8ac8858SEugenio Pérez     }
1258108a6481SCindy Lu }
1259108a6481SCindy Lu 
1260108a6481SCindy Lu static int vhost_vdpa_get_features(struct vhost_dev *dev,
1261108a6481SCindy Lu                                      uint64_t *features)
1262108a6481SCindy Lu {
126312a195faSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
126412a195faSEugenio Pérez     int ret = vhost_vdpa_get_dev_features(dev, features);
1265778e67deSLaurent Vivier 
126612a195faSEugenio Pérez     if (ret == 0 && v->shadow_vqs_enabled) {
126712a195faSEugenio Pérez         /* Add SVQ logging capabilities */
126812a195faSEugenio Pérez         *features |= BIT_ULL(VHOST_F_LOG_ALL);
126912a195faSEugenio Pérez     }
127012a195faSEugenio Pérez 
1271778e67deSLaurent Vivier     return ret;
1272108a6481SCindy Lu }
1273108a6481SCindy Lu 
1274108a6481SCindy Lu static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1275108a6481SCindy Lu {
1276d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
12774d191cfdSJason Wang         return 0;
12784d191cfdSJason Wang     }
12794d191cfdSJason Wang 
1280778e67deSLaurent Vivier     trace_vhost_vdpa_set_owner(dev);
1281108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1282108a6481SCindy Lu }
1283108a6481SCindy Lu 
1284108a6481SCindy Lu static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1285108a6481SCindy Lu                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1286108a6481SCindy Lu {
1287108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1288108a6481SCindy Lu     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1289108a6481SCindy Lu     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1290108a6481SCindy Lu     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1291778e67deSLaurent Vivier     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1292778e67deSLaurent Vivier                                  addr->avail_user_addr, addr->used_user_addr);
1293108a6481SCindy Lu     return 0;
1294108a6481SCindy Lu }
1295108a6481SCindy Lu 
1296108a6481SCindy Lu static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1297108a6481SCindy Lu {
1298108a6481SCindy Lu     return true;
1299108a6481SCindy Lu }
1300108a6481SCindy Lu 
1301108a6481SCindy Lu const VhostOps vdpa_ops = {
1302108a6481SCindy Lu         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1303108a6481SCindy Lu         .vhost_backend_init = vhost_vdpa_init,
1304108a6481SCindy Lu         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1305108a6481SCindy Lu         .vhost_set_log_base = vhost_vdpa_set_log_base,
1306108a6481SCindy Lu         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1307108a6481SCindy Lu         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1308108a6481SCindy Lu         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1309108a6481SCindy Lu         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1310108a6481SCindy Lu         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1311108a6481SCindy Lu         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1312108a6481SCindy Lu         .vhost_get_features = vhost_vdpa_get_features,
1313a5bd0580SJason Wang         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
1314108a6481SCindy Lu         .vhost_set_owner = vhost_vdpa_set_owner,
1315108a6481SCindy Lu         .vhost_set_vring_endian = NULL,
1316108a6481SCindy Lu         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1317108a6481SCindy Lu         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1318108a6481SCindy Lu         .vhost_set_features = vhost_vdpa_set_features,
1319108a6481SCindy Lu         .vhost_reset_device = vhost_vdpa_reset_device,
1320108a6481SCindy Lu         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1321108a6481SCindy Lu         .vhost_get_config  = vhost_vdpa_get_config,
1322108a6481SCindy Lu         .vhost_set_config = vhost_vdpa_set_config,
1323108a6481SCindy Lu         .vhost_requires_shm_log = NULL,
1324108a6481SCindy Lu         .vhost_migration_done = NULL,
1325108a6481SCindy Lu         .vhost_backend_can_merge = NULL,
1326108a6481SCindy Lu         .vhost_net_set_mtu = NULL,
1327108a6481SCindy Lu         .vhost_set_iotlb_callback = NULL,
1328108a6481SCindy Lu         .vhost_send_device_iotlb_msg = NULL,
1329108a6481SCindy Lu         .vhost_dev_start = vhost_vdpa_dev_start,
1330108a6481SCindy Lu         .vhost_get_device_id = vhost_vdpa_get_device_id,
1331108a6481SCindy Lu         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1332108a6481SCindy Lu         .vhost_force_iommu = vhost_vdpa_force_iommu,
1333108a6481SCindy Lu };
1334