xref: /qemu/hw/virtio/vhost-vdpa.c (revision 3cfb4d069cd2977b707fb519c455d7d416e1f4b0)
1108a6481SCindy Lu /*
2108a6481SCindy Lu  * vhost-vdpa
3108a6481SCindy Lu  *
4108a6481SCindy Lu  *  Copyright(c) 2017-2018 Intel Corporation.
5108a6481SCindy Lu  *  Copyright(c) 2020 Red Hat, Inc.
6108a6481SCindy Lu  *
7108a6481SCindy Lu  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8108a6481SCindy Lu  * See the COPYING file in the top-level directory.
9108a6481SCindy Lu  *
10108a6481SCindy Lu  */
11108a6481SCindy Lu 
12108a6481SCindy Lu #include "qemu/osdep.h"
13108a6481SCindy Lu #include <linux/vhost.h>
14108a6481SCindy Lu #include <linux/vfio.h>
15108a6481SCindy Lu #include <sys/eventfd.h>
16108a6481SCindy Lu #include <sys/ioctl.h>
17108a6481SCindy Lu #include "hw/virtio/vhost.h"
18108a6481SCindy Lu #include "hw/virtio/vhost-backend.h"
19108a6481SCindy Lu #include "hw/virtio/virtio-net.h"
20dff4426fSEugenio Pérez #include "hw/virtio/vhost-shadow-virtqueue.h"
21108a6481SCindy Lu #include "hw/virtio/vhost-vdpa.h"
22df77d45aSXie Yongji #include "exec/address-spaces.h"
23c156d5bfSEugenio Pérez #include "migration/blocker.h"
24415b7327SMarc-André Lureau #include "qemu/cutils.h"
25108a6481SCindy Lu #include "qemu/main-loop.h"
264dc5acc0SCindy Lu #include "cpu.h"
27778e67deSLaurent Vivier #include "trace.h"
28dff4426fSEugenio Pérez #include "qapi/error.h"
29108a6481SCindy Lu 
30032e4d68SEugenio Pérez /*
31032e4d68SEugenio Pérez  * Return one past the end of the end of section. Be careful with uint64_t
32032e4d68SEugenio Pérez  * conversions!
33032e4d68SEugenio Pérez  */
34032e4d68SEugenio Pérez static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
35032e4d68SEugenio Pérez {
36032e4d68SEugenio Pérez     Int128 llend = int128_make64(section->offset_within_address_space);
37032e4d68SEugenio Pérez     llend = int128_add(llend, section->size);
38032e4d68SEugenio Pérez     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
39032e4d68SEugenio Pérez 
40032e4d68SEugenio Pérez     return llend;
41032e4d68SEugenio Pérez }
42032e4d68SEugenio Pérez 
43013108b6SEugenio Pérez static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
44013108b6SEugenio Pérez                                                 uint64_t iova_min,
45013108b6SEugenio Pérez                                                 uint64_t iova_max)
46108a6481SCindy Lu {
47013108b6SEugenio Pérez     Int128 llend;
48013108b6SEugenio Pérez 
49013108b6SEugenio Pérez     if ((!memory_region_is_ram(section->mr) &&
50108a6481SCindy Lu          !memory_region_is_iommu(section->mr)) ||
51c64038c9SEugenio Pérez         memory_region_is_protected(section->mr) ||
52d60c75d2SJason Wang         /* vhost-vDPA doesn't allow MMIO to be mapped  */
53013108b6SEugenio Pérez         memory_region_is_ram_device(section->mr)) {
54013108b6SEugenio Pérez         return true;
55013108b6SEugenio Pérez     }
56013108b6SEugenio Pérez 
57013108b6SEugenio Pérez     if (section->offset_within_address_space < iova_min) {
58013108b6SEugenio Pérez         error_report("RAM section out of device range (min=0x%" PRIx64
59013108b6SEugenio Pérez                      ", addr=0x%" HWADDR_PRIx ")",
60013108b6SEugenio Pérez                      iova_min, section->offset_within_address_space);
61013108b6SEugenio Pérez         return true;
62013108b6SEugenio Pérez     }
63013108b6SEugenio Pérez 
64013108b6SEugenio Pérez     llend = vhost_vdpa_section_end(section);
65013108b6SEugenio Pérez     if (int128_gt(llend, int128_make64(iova_max))) {
66013108b6SEugenio Pérez         error_report("RAM section out of device range (max=0x%" PRIx64
67013108b6SEugenio Pérez                      ", end addr=0x%" PRIx64 ")",
68013108b6SEugenio Pérez                      iova_max, int128_get64(llend));
69013108b6SEugenio Pérez         return true;
70013108b6SEugenio Pérez     }
71013108b6SEugenio Pérez 
72013108b6SEugenio Pérez     return false;
73108a6481SCindy Lu }
74108a6481SCindy Lu 
75463ba1e3SEugenio Pérez int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
76108a6481SCindy Lu                        void *vaddr, bool readonly)
77108a6481SCindy Lu {
78386494f2SCindy Lu     struct vhost_msg_v2 msg = {};
79108a6481SCindy Lu     int fd = v->device_fd;
80108a6481SCindy Lu     int ret = 0;
81108a6481SCindy Lu 
82108a6481SCindy Lu     msg.type = v->msg_type;
83108a6481SCindy Lu     msg.iotlb.iova = iova;
84108a6481SCindy Lu     msg.iotlb.size = size;
85108a6481SCindy Lu     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
86108a6481SCindy Lu     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
87108a6481SCindy Lu     msg.iotlb.type = VHOST_IOTLB_UPDATE;
88108a6481SCindy Lu 
89778e67deSLaurent Vivier    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
90778e67deSLaurent Vivier                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
91778e67deSLaurent Vivier 
92108a6481SCindy Lu     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
93108a6481SCindy Lu         error_report("failed to write, fd=%d, errno=%d (%s)",
94108a6481SCindy Lu             fd, errno, strerror(errno));
95108a6481SCindy Lu         return -EIO ;
96108a6481SCindy Lu     }
97108a6481SCindy Lu 
98108a6481SCindy Lu     return ret;
99108a6481SCindy Lu }
100108a6481SCindy Lu 
101463ba1e3SEugenio Pérez int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova, hwaddr size)
102108a6481SCindy Lu {
103386494f2SCindy Lu     struct vhost_msg_v2 msg = {};
104108a6481SCindy Lu     int fd = v->device_fd;
105108a6481SCindy Lu     int ret = 0;
106108a6481SCindy Lu 
107108a6481SCindy Lu     msg.type = v->msg_type;
108108a6481SCindy Lu     msg.iotlb.iova = iova;
109108a6481SCindy Lu     msg.iotlb.size = size;
110108a6481SCindy Lu     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
111108a6481SCindy Lu 
112778e67deSLaurent Vivier     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
113778e67deSLaurent Vivier                                msg.iotlb.size, msg.iotlb.type);
114778e67deSLaurent Vivier 
115108a6481SCindy Lu     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
116108a6481SCindy Lu         error_report("failed to write, fd=%d, errno=%d (%s)",
117108a6481SCindy Lu             fd, errno, strerror(errno));
118108a6481SCindy Lu         return -EIO ;
119108a6481SCindy Lu     }
120108a6481SCindy Lu 
121108a6481SCindy Lu     return ret;
122108a6481SCindy Lu }
123108a6481SCindy Lu 
124e6db5df7SEugenio Pérez static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
125a5bd0580SJason Wang {
126a5bd0580SJason Wang     int fd = v->device_fd;
127e6db5df7SEugenio Pérez     struct vhost_msg_v2 msg = {
128e6db5df7SEugenio Pérez         .type = v->msg_type,
129e6db5df7SEugenio Pérez         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
130e6db5df7SEugenio Pérez     };
131a5bd0580SJason Wang 
1325580b9f0SEugenio Pérez     trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type);
133a5bd0580SJason Wang     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
134a5bd0580SJason Wang         error_report("failed to write, fd=%d, errno=%d (%s)",
135a5bd0580SJason Wang                      fd, errno, strerror(errno));
136a5bd0580SJason Wang     }
137a5bd0580SJason Wang }
138a5bd0580SJason Wang 
139e6db5df7SEugenio Pérez static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
140e6db5df7SEugenio Pérez {
141e6db5df7SEugenio Pérez     if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
142e6db5df7SEugenio Pérez         !v->iotlb_batch_begin_sent) {
143e6db5df7SEugenio Pérez         vhost_vdpa_listener_begin_batch(v);
144e6db5df7SEugenio Pérez     }
145e6db5df7SEugenio Pérez 
146e6db5df7SEugenio Pérez     v->iotlb_batch_begin_sent = true;
147e6db5df7SEugenio Pérez }
148e6db5df7SEugenio Pérez 
149a5bd0580SJason Wang static void vhost_vdpa_listener_commit(MemoryListener *listener)
150a5bd0580SJason Wang {
151a5bd0580SJason Wang     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
152a5bd0580SJason Wang     struct vhost_dev *dev = v->dev;
1538acb3218SPhilippe Mathieu-Daudé     struct vhost_msg_v2 msg = {};
154a5bd0580SJason Wang     int fd = v->device_fd;
155a5bd0580SJason Wang 
156a5bd0580SJason Wang     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
157a5bd0580SJason Wang         return;
158a5bd0580SJason Wang     }
159a5bd0580SJason Wang 
160e6db5df7SEugenio Pérez     if (!v->iotlb_batch_begin_sent) {
161e6db5df7SEugenio Pérez         return;
162e6db5df7SEugenio Pérez     }
163e6db5df7SEugenio Pérez 
164a5bd0580SJason Wang     msg.type = v->msg_type;
165a5bd0580SJason Wang     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
166a5bd0580SJason Wang 
1675580b9f0SEugenio Pérez     trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type);
168a5bd0580SJason Wang     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
169a5bd0580SJason Wang         error_report("failed to write, fd=%d, errno=%d (%s)",
170a5bd0580SJason Wang                      fd, errno, strerror(errno));
171a5bd0580SJason Wang     }
172e6db5df7SEugenio Pérez 
173e6db5df7SEugenio Pérez     v->iotlb_batch_begin_sent = false;
174a5bd0580SJason Wang }
175a5bd0580SJason Wang 
176108a6481SCindy Lu static void vhost_vdpa_listener_region_add(MemoryListener *listener,
177108a6481SCindy Lu                                            MemoryRegionSection *section)
178108a6481SCindy Lu {
1797dab70beSEugenio Pérez     DMAMap mem_region = {};
180108a6481SCindy Lu     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
181108a6481SCindy Lu     hwaddr iova;
182108a6481SCindy Lu     Int128 llend, llsize;
183108a6481SCindy Lu     void *vaddr;
184108a6481SCindy Lu     int ret;
185108a6481SCindy Lu 
186013108b6SEugenio Pérez     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
187013108b6SEugenio Pérez                                             v->iova_range.last)) {
188108a6481SCindy Lu         return;
189108a6481SCindy Lu     }
190108a6481SCindy Lu 
191108a6481SCindy Lu     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
192108a6481SCindy Lu                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
193108a6481SCindy Lu         error_report("%s received unaligned region", __func__);
194108a6481SCindy Lu         return;
195108a6481SCindy Lu     }
196108a6481SCindy Lu 
197108a6481SCindy Lu     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
198032e4d68SEugenio Pérez     llend = vhost_vdpa_section_end(section);
199108a6481SCindy Lu     if (int128_ge(int128_make64(iova), llend)) {
200108a6481SCindy Lu         return;
201108a6481SCindy Lu     }
202108a6481SCindy Lu 
203108a6481SCindy Lu     memory_region_ref(section->mr);
204108a6481SCindy Lu 
205108a6481SCindy Lu     /* Here we assume that memory_region_is_ram(section->mr)==true */
206108a6481SCindy Lu 
207108a6481SCindy Lu     vaddr = memory_region_get_ram_ptr(section->mr) +
208108a6481SCindy Lu             section->offset_within_region +
209108a6481SCindy Lu             (iova - section->offset_within_address_space);
210108a6481SCindy Lu 
211778e67deSLaurent Vivier     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
212778e67deSLaurent Vivier                                          vaddr, section->readonly);
213778e67deSLaurent Vivier 
214108a6481SCindy Lu     llsize = int128_sub(llend, int128_make64(iova));
21534e3c94eSEugenio Pérez     if (v->shadow_vqs_enabled) {
2167dab70beSEugenio Pérez         int r;
21734e3c94eSEugenio Pérez 
2187dab70beSEugenio Pérez         mem_region.translated_addr = (hwaddr)(uintptr_t)vaddr,
2197dab70beSEugenio Pérez         mem_region.size = int128_get64(llsize) - 1,
2207dab70beSEugenio Pérez         mem_region.perm = IOMMU_ACCESS_FLAG(true, section->readonly),
2217dab70beSEugenio Pérez 
2227dab70beSEugenio Pérez         r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region);
22334e3c94eSEugenio Pérez         if (unlikely(r != IOVA_OK)) {
22434e3c94eSEugenio Pérez             error_report("Can't allocate a mapping (%d)", r);
22534e3c94eSEugenio Pérez             goto fail;
22634e3c94eSEugenio Pérez         }
22734e3c94eSEugenio Pérez 
22834e3c94eSEugenio Pérez         iova = mem_region.iova;
22934e3c94eSEugenio Pérez     }
230108a6481SCindy Lu 
231e6db5df7SEugenio Pérez     vhost_vdpa_iotlb_batch_begin_once(v);
232108a6481SCindy Lu     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
233108a6481SCindy Lu                              vaddr, section->readonly);
234108a6481SCindy Lu     if (ret) {
235108a6481SCindy Lu         error_report("vhost vdpa map fail!");
2367dab70beSEugenio Pérez         goto fail_map;
237108a6481SCindy Lu     }
238108a6481SCindy Lu 
239108a6481SCindy Lu     return;
240108a6481SCindy Lu 
2417dab70beSEugenio Pérez fail_map:
2427dab70beSEugenio Pérez     if (v->shadow_vqs_enabled) {
24369292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, mem_region);
2447dab70beSEugenio Pérez     }
2457dab70beSEugenio Pérez 
246108a6481SCindy Lu fail:
247108a6481SCindy Lu     /*
248108a6481SCindy Lu      * On the initfn path, store the first error in the container so we
249108a6481SCindy Lu      * can gracefully fail.  Runtime, there's not much we can do other
250108a6481SCindy Lu      * than throw a hardware error.
251108a6481SCindy Lu      */
252108a6481SCindy Lu     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
253108a6481SCindy Lu     return;
254108a6481SCindy Lu 
255108a6481SCindy Lu }
256108a6481SCindy Lu 
257108a6481SCindy Lu static void vhost_vdpa_listener_region_del(MemoryListener *listener,
258108a6481SCindy Lu                                            MemoryRegionSection *section)
259108a6481SCindy Lu {
260108a6481SCindy Lu     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
261108a6481SCindy Lu     hwaddr iova;
262108a6481SCindy Lu     Int128 llend, llsize;
263108a6481SCindy Lu     int ret;
264108a6481SCindy Lu 
265013108b6SEugenio Pérez     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
266013108b6SEugenio Pérez                                             v->iova_range.last)) {
267108a6481SCindy Lu         return;
268108a6481SCindy Lu     }
269108a6481SCindy Lu 
270108a6481SCindy Lu     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
271108a6481SCindy Lu                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
272108a6481SCindy Lu         error_report("%s received unaligned region", __func__);
273108a6481SCindy Lu         return;
274108a6481SCindy Lu     }
275108a6481SCindy Lu 
276108a6481SCindy Lu     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
277032e4d68SEugenio Pérez     llend = vhost_vdpa_section_end(section);
278108a6481SCindy Lu 
279778e67deSLaurent Vivier     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
280778e67deSLaurent Vivier 
281108a6481SCindy Lu     if (int128_ge(int128_make64(iova), llend)) {
282108a6481SCindy Lu         return;
283108a6481SCindy Lu     }
284108a6481SCindy Lu 
285108a6481SCindy Lu     llsize = int128_sub(llend, int128_make64(iova));
286108a6481SCindy Lu 
28734e3c94eSEugenio Pérez     if (v->shadow_vqs_enabled) {
28834e3c94eSEugenio Pérez         const DMAMap *result;
28934e3c94eSEugenio Pérez         const void *vaddr = memory_region_get_ram_ptr(section->mr) +
29034e3c94eSEugenio Pérez             section->offset_within_region +
29134e3c94eSEugenio Pérez             (iova - section->offset_within_address_space);
29234e3c94eSEugenio Pérez         DMAMap mem_region = {
29334e3c94eSEugenio Pérez             .translated_addr = (hwaddr)(uintptr_t)vaddr,
29434e3c94eSEugenio Pérez             .size = int128_get64(llsize) - 1,
29534e3c94eSEugenio Pérez         };
29634e3c94eSEugenio Pérez 
29734e3c94eSEugenio Pérez         result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region);
29810dab9f2SEugenio Pérez         if (!result) {
29910dab9f2SEugenio Pérez             /* The memory listener map wasn't mapped */
30010dab9f2SEugenio Pérez             return;
30110dab9f2SEugenio Pérez         }
30234e3c94eSEugenio Pérez         iova = result->iova;
30369292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, *result);
30434e3c94eSEugenio Pérez     }
305e6db5df7SEugenio Pérez     vhost_vdpa_iotlb_batch_begin_once(v);
306108a6481SCindy Lu     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
307108a6481SCindy Lu     if (ret) {
308108a6481SCindy Lu         error_report("vhost_vdpa dma unmap error!");
309108a6481SCindy Lu     }
310108a6481SCindy Lu 
311108a6481SCindy Lu     memory_region_unref(section->mr);
312108a6481SCindy Lu }
313108a6481SCindy Lu /*
314ef4ff56cSStefano Garzarella  * IOTLB API is used by vhost-vdpa which requires incremental updating
315108a6481SCindy Lu  * of the mapping. So we can not use generic vhost memory listener which
316108a6481SCindy Lu  * depends on the addnop().
317108a6481SCindy Lu  */
318108a6481SCindy Lu static const MemoryListener vhost_vdpa_memory_listener = {
319142518bdSPeter Xu     .name = "vhost-vdpa",
320a5bd0580SJason Wang     .commit = vhost_vdpa_listener_commit,
321108a6481SCindy Lu     .region_add = vhost_vdpa_listener_region_add,
322108a6481SCindy Lu     .region_del = vhost_vdpa_listener_region_del,
323108a6481SCindy Lu };
324108a6481SCindy Lu 
325108a6481SCindy Lu static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
326108a6481SCindy Lu                              void *arg)
327108a6481SCindy Lu {
328108a6481SCindy Lu     struct vhost_vdpa *v = dev->opaque;
329108a6481SCindy Lu     int fd = v->device_fd;
330f2a6e6c4SKevin Wolf     int ret;
331108a6481SCindy Lu 
332108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
333108a6481SCindy Lu 
334f2a6e6c4SKevin Wolf     ret = ioctl(fd, request, arg);
335f2a6e6c4SKevin Wolf     return ret < 0 ? -errno : ret;
336108a6481SCindy Lu }
337108a6481SCindy Lu 
3383631151bSRoman Kagan static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
339108a6481SCindy Lu {
340108a6481SCindy Lu     uint8_t s;
3413631151bSRoman Kagan     int ret;
342108a6481SCindy Lu 
343778e67deSLaurent Vivier     trace_vhost_vdpa_add_status(dev, status);
3443631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
3453631151bSRoman Kagan     if (ret < 0) {
3463631151bSRoman Kagan         return ret;
347108a6481SCindy Lu     }
348108a6481SCindy Lu 
349108a6481SCindy Lu     s |= status;
350108a6481SCindy Lu 
3513631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
3523631151bSRoman Kagan     if (ret < 0) {
3533631151bSRoman Kagan         return ret;
3543631151bSRoman Kagan     }
3553631151bSRoman Kagan 
3563631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
3573631151bSRoman Kagan     if (ret < 0) {
3583631151bSRoman Kagan         return ret;
3593631151bSRoman Kagan     }
3603631151bSRoman Kagan 
3613631151bSRoman Kagan     if (!(s & status)) {
3623631151bSRoman Kagan         return -EIO;
3633631151bSRoman Kagan     }
3643631151bSRoman Kagan 
3653631151bSRoman Kagan     return 0;
366108a6481SCindy Lu }
367108a6481SCindy Lu 
368013108b6SEugenio Pérez static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
369013108b6SEugenio Pérez {
370013108b6SEugenio Pérez     int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
371013108b6SEugenio Pérez                               &v->iova_range);
372013108b6SEugenio Pérez     if (ret != 0) {
373013108b6SEugenio Pérez         v->iova_range.first = 0;
374013108b6SEugenio Pérez         v->iova_range.last = UINT64_MAX;
375013108b6SEugenio Pérez     }
376013108b6SEugenio Pérez 
377013108b6SEugenio Pérez     trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
378013108b6SEugenio Pérez                                     v->iova_range.last);
379013108b6SEugenio Pérez }
380013108b6SEugenio Pérez 
381d71b0609SSi-Wei Liu /*
382d71b0609SSi-Wei Liu  * The use of this function is for requests that only need to be
383d71b0609SSi-Wei Liu  * applied once. Typically such request occurs at the beginning
384d71b0609SSi-Wei Liu  * of operation, and before setting up queues. It should not be
385d71b0609SSi-Wei Liu  * used for request that performs operation until all queues are
386d71b0609SSi-Wei Liu  * set, which would need to check dev->vq_index_end instead.
387d71b0609SSi-Wei Liu  */
388d71b0609SSi-Wei Liu static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
3894d191cfdSJason Wang {
3904d191cfdSJason Wang     struct vhost_vdpa *v = dev->opaque;
3914d191cfdSJason Wang 
392d71b0609SSi-Wei Liu     return v->index == 0;
3934d191cfdSJason Wang }
3944d191cfdSJason Wang 
39512a195faSEugenio Pérez static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
39612a195faSEugenio Pérez                                        uint64_t *features)
39712a195faSEugenio Pérez {
39812a195faSEugenio Pérez     int ret;
39912a195faSEugenio Pérez 
40012a195faSEugenio Pérez     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
40112a195faSEugenio Pérez     trace_vhost_vdpa_get_features(dev, *features);
40212a195faSEugenio Pérez     return ret;
40312a195faSEugenio Pérez }
40412a195faSEugenio Pérez 
405dff4426fSEugenio Pérez static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
406dff4426fSEugenio Pérez                                Error **errp)
407dff4426fSEugenio Pérez {
408dff4426fSEugenio Pérez     g_autoptr(GPtrArray) shadow_vqs = NULL;
4094725a418SEugenio Pérez     uint64_t dev_features, svq_features;
4104725a418SEugenio Pérez     int r;
4114725a418SEugenio Pérez     bool ok;
412dff4426fSEugenio Pérez 
413dff4426fSEugenio Pérez     if (!v->shadow_vqs_enabled) {
414dff4426fSEugenio Pérez         return 0;
415dff4426fSEugenio Pérez     }
416dff4426fSEugenio Pérez 
41712a195faSEugenio Pérez     r = vhost_vdpa_get_dev_features(hdev, &dev_features);
4184725a418SEugenio Pérez     if (r != 0) {
4194725a418SEugenio Pérez         error_setg_errno(errp, -r, "Can't get vdpa device features");
4204725a418SEugenio Pérez         return r;
4214725a418SEugenio Pérez     }
4224725a418SEugenio Pérez 
4234725a418SEugenio Pérez     svq_features = dev_features;
4244725a418SEugenio Pérez     ok = vhost_svq_valid_features(svq_features, errp);
4254725a418SEugenio Pérez     if (unlikely(!ok)) {
4264725a418SEugenio Pérez         return -1;
4274725a418SEugenio Pérez     }
4284725a418SEugenio Pérez 
429dff4426fSEugenio Pérez     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
430dff4426fSEugenio Pérez     for (unsigned n = 0; n < hdev->nvqs; ++n) {
431*3cfb4d06SEugenio Pérez         VhostShadowVirtqueue *svq;
432dff4426fSEugenio Pérez 
433bd907ae4SEugenio Pérez         svq = vhost_svq_new(v->iova_tree, v->shadow_vq_ops,
434bd907ae4SEugenio Pérez                             v->shadow_vq_ops_opaque);
435*3cfb4d06SEugenio Pérez         g_ptr_array_add(shadow_vqs, svq);
436dff4426fSEugenio Pérez     }
437dff4426fSEugenio Pérez 
438dff4426fSEugenio Pérez     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
439dff4426fSEugenio Pérez     return 0;
440dff4426fSEugenio Pérez }
441dff4426fSEugenio Pérez 
44228770ff9SKevin Wolf static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
443108a6481SCindy Lu {
444108a6481SCindy Lu     struct vhost_vdpa *v;
445108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
446778e67deSLaurent Vivier     trace_vhost_vdpa_init(dev, opaque);
447e1c1915bSDavid Hildenbrand     int ret;
448e1c1915bSDavid Hildenbrand 
449e1c1915bSDavid Hildenbrand     /*
450e1c1915bSDavid Hildenbrand      * Similar to VFIO, we end up pinning all guest memory and have to
451e1c1915bSDavid Hildenbrand      * disable discarding of RAM.
452e1c1915bSDavid Hildenbrand      */
453e1c1915bSDavid Hildenbrand     ret = ram_block_discard_disable(true);
454e1c1915bSDavid Hildenbrand     if (ret) {
455e1c1915bSDavid Hildenbrand         error_report("Cannot set discarding of RAM broken");
456e1c1915bSDavid Hildenbrand         return ret;
457e1c1915bSDavid Hildenbrand     }
458108a6481SCindy Lu 
459108a6481SCindy Lu     v = opaque;
460a5bd0580SJason Wang     v->dev = dev;
461108a6481SCindy Lu     dev->opaque =  opaque ;
462108a6481SCindy Lu     v->listener = vhost_vdpa_memory_listener;
463108a6481SCindy Lu     v->msg_type = VHOST_IOTLB_MSG_V2;
464dff4426fSEugenio Pérez     ret = vhost_vdpa_init_svq(dev, v, errp);
465dff4426fSEugenio Pérez     if (ret) {
466dff4426fSEugenio Pérez         goto err;
467dff4426fSEugenio Pérez     }
468108a6481SCindy Lu 
469013108b6SEugenio Pérez     vhost_vdpa_get_iova_range(v);
4704d191cfdSJason Wang 
471d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
4724d191cfdSJason Wang         return 0;
4734d191cfdSJason Wang     }
4744d191cfdSJason Wang 
475108a6481SCindy Lu     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
476108a6481SCindy Lu                                VIRTIO_CONFIG_S_DRIVER);
477108a6481SCindy Lu 
478108a6481SCindy Lu     return 0;
479dff4426fSEugenio Pérez 
480dff4426fSEugenio Pérez err:
481dff4426fSEugenio Pérez     ram_block_discard_disable(false);
482dff4426fSEugenio Pérez     return ret;
483108a6481SCindy Lu }
484108a6481SCindy Lu 
485d0416d48SJason Wang static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
486d0416d48SJason Wang                                             int queue_index)
487d0416d48SJason Wang {
4888e3b0cbbSMarc-André Lureau     size_t page_size = qemu_real_host_page_size();
489d0416d48SJason Wang     struct vhost_vdpa *v = dev->opaque;
490d0416d48SJason Wang     VirtIODevice *vdev = dev->vdev;
491d0416d48SJason Wang     VhostVDPAHostNotifier *n;
492d0416d48SJason Wang 
493d0416d48SJason Wang     n = &v->notifier[queue_index];
494d0416d48SJason Wang 
495d0416d48SJason Wang     if (n->addr) {
496d0416d48SJason Wang         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
497d0416d48SJason Wang         object_unparent(OBJECT(&n->mr));
498d0416d48SJason Wang         munmap(n->addr, page_size);
499d0416d48SJason Wang         n->addr = NULL;
500d0416d48SJason Wang     }
501d0416d48SJason Wang }
502d0416d48SJason Wang 
503d0416d48SJason Wang static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
504d0416d48SJason Wang {
5058e3b0cbbSMarc-André Lureau     size_t page_size = qemu_real_host_page_size();
506d0416d48SJason Wang     struct vhost_vdpa *v = dev->opaque;
507d0416d48SJason Wang     VirtIODevice *vdev = dev->vdev;
508d0416d48SJason Wang     VhostVDPAHostNotifier *n;
509d0416d48SJason Wang     int fd = v->device_fd;
510d0416d48SJason Wang     void *addr;
511d0416d48SJason Wang     char *name;
512d0416d48SJason Wang 
513d0416d48SJason Wang     vhost_vdpa_host_notifier_uninit(dev, queue_index);
514d0416d48SJason Wang 
515d0416d48SJason Wang     n = &v->notifier[queue_index];
516d0416d48SJason Wang 
517d0416d48SJason Wang     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
518d0416d48SJason Wang                 queue_index * page_size);
519d0416d48SJason Wang     if (addr == MAP_FAILED) {
520d0416d48SJason Wang         goto err;
521d0416d48SJason Wang     }
522d0416d48SJason Wang 
523d0416d48SJason Wang     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
524d0416d48SJason Wang                            v, queue_index);
525d0416d48SJason Wang     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
526d0416d48SJason Wang                                       page_size, addr);
527d0416d48SJason Wang     g_free(name);
528d0416d48SJason Wang 
529d0416d48SJason Wang     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
53098f7607eSLaurent Vivier         object_unparent(OBJECT(&n->mr));
531d0416d48SJason Wang         munmap(addr, page_size);
532d0416d48SJason Wang         goto err;
533d0416d48SJason Wang     }
534d0416d48SJason Wang     n->addr = addr;
535d0416d48SJason Wang 
536d0416d48SJason Wang     return 0;
537d0416d48SJason Wang 
538d0416d48SJason Wang err:
539d0416d48SJason Wang     return -1;
540d0416d48SJason Wang }
541d0416d48SJason Wang 
542b1f030a0SLaurent Vivier static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
543b1f030a0SLaurent Vivier {
544b1f030a0SLaurent Vivier     int i;
545b1f030a0SLaurent Vivier 
546b1f030a0SLaurent Vivier     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
547b1f030a0SLaurent Vivier         vhost_vdpa_host_notifier_uninit(dev, i);
548b1f030a0SLaurent Vivier     }
549b1f030a0SLaurent Vivier }
550b1f030a0SLaurent Vivier 
551d0416d48SJason Wang static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
552d0416d48SJason Wang {
553dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
554d0416d48SJason Wang     int i;
555d0416d48SJason Wang 
556dff4426fSEugenio Pérez     if (v->shadow_vqs_enabled) {
557dff4426fSEugenio Pérez         /* FIXME SVQ is not compatible with host notifiers mr */
558dff4426fSEugenio Pérez         return;
559dff4426fSEugenio Pérez     }
560dff4426fSEugenio Pérez 
561d0416d48SJason Wang     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
562d0416d48SJason Wang         if (vhost_vdpa_host_notifier_init(dev, i)) {
563d0416d48SJason Wang             goto err;
564d0416d48SJason Wang         }
565d0416d48SJason Wang     }
566d0416d48SJason Wang 
567d0416d48SJason Wang     return;
568d0416d48SJason Wang 
569d0416d48SJason Wang err:
570b1f030a0SLaurent Vivier     vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
571d0416d48SJason Wang     return;
572d0416d48SJason Wang }
573d0416d48SJason Wang 
574dff4426fSEugenio Pérez static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
575dff4426fSEugenio Pérez {
576dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
577dff4426fSEugenio Pérez     size_t idx;
578dff4426fSEugenio Pérez 
579dff4426fSEugenio Pérez     if (!v->shadow_vqs) {
580dff4426fSEugenio Pérez         return;
581dff4426fSEugenio Pérez     }
582dff4426fSEugenio Pérez 
583dff4426fSEugenio Pérez     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
584dff4426fSEugenio Pérez         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
585dff4426fSEugenio Pérez     }
586dff4426fSEugenio Pérez     g_ptr_array_free(v->shadow_vqs, true);
587dff4426fSEugenio Pérez }
588dff4426fSEugenio Pérez 
589108a6481SCindy Lu static int vhost_vdpa_cleanup(struct vhost_dev *dev)
590108a6481SCindy Lu {
591108a6481SCindy Lu     struct vhost_vdpa *v;
592108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
593108a6481SCindy Lu     v = dev->opaque;
594778e67deSLaurent Vivier     trace_vhost_vdpa_cleanup(dev, v);
595d0416d48SJason Wang     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
596108a6481SCindy Lu     memory_listener_unregister(&v->listener);
597dff4426fSEugenio Pérez     vhost_vdpa_svq_cleanup(dev);
598108a6481SCindy Lu 
599108a6481SCindy Lu     dev->opaque = NULL;
600e1c1915bSDavid Hildenbrand     ram_block_discard_disable(false);
601e1c1915bSDavid Hildenbrand 
602108a6481SCindy Lu     return 0;
603108a6481SCindy Lu }
604108a6481SCindy Lu 
605108a6481SCindy Lu static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
606108a6481SCindy Lu {
607778e67deSLaurent Vivier     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
608108a6481SCindy Lu     return INT_MAX;
609108a6481SCindy Lu }
610108a6481SCindy Lu 
611108a6481SCindy Lu static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
612108a6481SCindy Lu                                     struct vhost_memory *mem)
613108a6481SCindy Lu {
614d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
6154d191cfdSJason Wang         return 0;
6164d191cfdSJason Wang     }
6174d191cfdSJason Wang 
618778e67deSLaurent Vivier     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
619778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
620778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
621778e67deSLaurent Vivier         int i;
622778e67deSLaurent Vivier         for (i = 0; i < mem->nregions; i++) {
623778e67deSLaurent Vivier             trace_vhost_vdpa_dump_regions(dev, i,
624778e67deSLaurent Vivier                                           mem->regions[i].guest_phys_addr,
625778e67deSLaurent Vivier                                           mem->regions[i].memory_size,
626778e67deSLaurent Vivier                                           mem->regions[i].userspace_addr,
627778e67deSLaurent Vivier                                           mem->regions[i].flags_padding);
628778e67deSLaurent Vivier         }
629778e67deSLaurent Vivier     }
630108a6481SCindy Lu     if (mem->padding) {
6313631151bSRoman Kagan         return -EINVAL;
632108a6481SCindy Lu     }
633108a6481SCindy Lu 
634108a6481SCindy Lu     return 0;
635108a6481SCindy Lu }
636108a6481SCindy Lu 
637108a6481SCindy Lu static int vhost_vdpa_set_features(struct vhost_dev *dev,
638108a6481SCindy Lu                                    uint64_t features)
639108a6481SCindy Lu {
64012a195faSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
641108a6481SCindy Lu     int ret;
6424d191cfdSJason Wang 
643d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
6444d191cfdSJason Wang         return 0;
6454d191cfdSJason Wang     }
6464d191cfdSJason Wang 
64712a195faSEugenio Pérez     if (v->shadow_vqs_enabled) {
64812a195faSEugenio Pérez         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
64912a195faSEugenio Pérez             /*
65012a195faSEugenio Pérez              * QEMU is just trying to enable or disable logging. SVQ handles
65112a195faSEugenio Pérez              * this sepparately, so no need to forward this.
65212a195faSEugenio Pérez              */
65312a195faSEugenio Pérez             v->acked_features = features;
65412a195faSEugenio Pérez             return 0;
65512a195faSEugenio Pérez         }
65612a195faSEugenio Pérez 
65712a195faSEugenio Pérez         v->acked_features = features;
65812a195faSEugenio Pérez 
65912a195faSEugenio Pérez         /* We must not ack _F_LOG if SVQ is enabled */
66012a195faSEugenio Pérez         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
66112a195faSEugenio Pérez     }
66212a195faSEugenio Pérez 
663778e67deSLaurent Vivier     trace_vhost_vdpa_set_features(dev, features);
664108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
665108a6481SCindy Lu     if (ret) {
666108a6481SCindy Lu         return ret;
667108a6481SCindy Lu     }
668108a6481SCindy Lu 
6693631151bSRoman Kagan     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
670108a6481SCindy Lu }
671108a6481SCindy Lu 
672a5bd0580SJason Wang static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
673a5bd0580SJason Wang {
674a5bd0580SJason Wang     uint64_t features;
675a5bd0580SJason Wang     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
676a5bd0580SJason Wang         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
677a5bd0580SJason Wang     int r;
678a5bd0580SJason Wang 
679a5bd0580SJason Wang     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
6802a83e97eSJason Wang         return -EFAULT;
681a5bd0580SJason Wang     }
682a5bd0580SJason Wang 
683a5bd0580SJason Wang     features &= f;
6844d191cfdSJason Wang 
685d71b0609SSi-Wei Liu     if (vhost_vdpa_first_dev(dev)) {
686a5bd0580SJason Wang         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
687a5bd0580SJason Wang         if (r) {
6882a83e97eSJason Wang             return -EFAULT;
689a5bd0580SJason Wang         }
6904d191cfdSJason Wang     }
691a5bd0580SJason Wang 
692a5bd0580SJason Wang     dev->backend_cap = features;
693a5bd0580SJason Wang 
694a5bd0580SJason Wang     return 0;
695a5bd0580SJason Wang }
696a5bd0580SJason Wang 
697c232b8f4SZenghui Yu static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
698108a6481SCindy Lu                                     uint32_t *device_id)
699108a6481SCindy Lu {
700778e67deSLaurent Vivier     int ret;
701778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
702778e67deSLaurent Vivier     trace_vhost_vdpa_get_device_id(dev, *device_id);
703778e67deSLaurent Vivier     return ret;
704108a6481SCindy Lu }
705108a6481SCindy Lu 
706dff4426fSEugenio Pérez static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
707dff4426fSEugenio Pérez {
708dff4426fSEugenio Pérez     if (!v->shadow_vqs_enabled) {
709dff4426fSEugenio Pérez         return;
710dff4426fSEugenio Pérez     }
711dff4426fSEugenio Pérez 
712dff4426fSEugenio Pérez     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
713dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
714dff4426fSEugenio Pérez         vhost_svq_stop(svq);
715dff4426fSEugenio Pérez     }
716dff4426fSEugenio Pérez }
717dff4426fSEugenio Pérez 
718108a6481SCindy Lu static int vhost_vdpa_reset_device(struct vhost_dev *dev)
719108a6481SCindy Lu {
720dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
721778e67deSLaurent Vivier     int ret;
722108a6481SCindy Lu     uint8_t status = 0;
723108a6481SCindy Lu 
724dff4426fSEugenio Pérez     vhost_vdpa_reset_svq(v);
725dff4426fSEugenio Pérez 
726778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
727778e67deSLaurent Vivier     trace_vhost_vdpa_reset_device(dev, status);
728778e67deSLaurent Vivier     return ret;
729108a6481SCindy Lu }
730108a6481SCindy Lu 
731108a6481SCindy Lu static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
732108a6481SCindy Lu {
733108a6481SCindy Lu     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
734108a6481SCindy Lu 
735353244d8SJason Wang     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
736353244d8SJason Wang     return idx;
737108a6481SCindy Lu }
738108a6481SCindy Lu 
739108a6481SCindy Lu static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
740108a6481SCindy Lu {
741108a6481SCindy Lu     int i;
742778e67deSLaurent Vivier     trace_vhost_vdpa_set_vring_ready(dev);
743108a6481SCindy Lu     for (i = 0; i < dev->nvqs; ++i) {
744108a6481SCindy Lu         struct vhost_vring_state state = {
745108a6481SCindy Lu             .index = dev->vq_index + i,
746108a6481SCindy Lu             .num = 1,
747108a6481SCindy Lu         };
748108a6481SCindy Lu         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
749108a6481SCindy Lu     }
750108a6481SCindy Lu     return 0;
751108a6481SCindy Lu }
752108a6481SCindy Lu 
753778e67deSLaurent Vivier static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
754778e67deSLaurent Vivier                                    uint32_t config_len)
755778e67deSLaurent Vivier {
756778e67deSLaurent Vivier     int b, len;
757778e67deSLaurent Vivier     char line[QEMU_HEXDUMP_LINE_LEN];
758778e67deSLaurent Vivier 
759778e67deSLaurent Vivier     for (b = 0; b < config_len; b += 16) {
760778e67deSLaurent Vivier         len = config_len - b;
761778e67deSLaurent Vivier         qemu_hexdump_line(line, b, config, len, false);
762778e67deSLaurent Vivier         trace_vhost_vdpa_dump_config(dev, line);
763778e67deSLaurent Vivier     }
764778e67deSLaurent Vivier }
765778e67deSLaurent Vivier 
766108a6481SCindy Lu static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
767108a6481SCindy Lu                                    uint32_t offset, uint32_t size,
768108a6481SCindy Lu                                    uint32_t flags)
769108a6481SCindy Lu {
770108a6481SCindy Lu     struct vhost_vdpa_config *config;
771108a6481SCindy Lu     int ret;
772108a6481SCindy Lu     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
773986d4f78SLi Qiang 
774778e67deSLaurent Vivier     trace_vhost_vdpa_set_config(dev, offset, size, flags);
775108a6481SCindy Lu     config = g_malloc(size + config_size);
776108a6481SCindy Lu     config->off = offset;
777108a6481SCindy Lu     config->len = size;
778108a6481SCindy Lu     memcpy(config->buf, data, size);
779778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
780778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
781778e67deSLaurent Vivier         vhost_vdpa_dump_config(dev, data, size);
782778e67deSLaurent Vivier     }
783108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
784108a6481SCindy Lu     g_free(config);
785108a6481SCindy Lu     return ret;
786108a6481SCindy Lu }
787108a6481SCindy Lu 
788108a6481SCindy Lu static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
78950de5138SKevin Wolf                                    uint32_t config_len, Error **errp)
790108a6481SCindy Lu {
791108a6481SCindy Lu     struct vhost_vdpa_config *v_config;
792108a6481SCindy Lu     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
793108a6481SCindy Lu     int ret;
794108a6481SCindy Lu 
795778e67deSLaurent Vivier     trace_vhost_vdpa_get_config(dev, config, config_len);
796108a6481SCindy Lu     v_config = g_malloc(config_len + config_size);
797108a6481SCindy Lu     v_config->len = config_len;
798108a6481SCindy Lu     v_config->off = 0;
799108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
800108a6481SCindy Lu     memcpy(config, v_config->buf, config_len);
801108a6481SCindy Lu     g_free(v_config);
802778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
803778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
804778e67deSLaurent Vivier         vhost_vdpa_dump_config(dev, config, config_len);
805778e67deSLaurent Vivier     }
806108a6481SCindy Lu     return ret;
807108a6481SCindy Lu  }
808108a6481SCindy Lu 
809d96be4c8SEugenio Pérez static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
810d96be4c8SEugenio Pérez                                          struct vhost_vring_state *ring)
811d96be4c8SEugenio Pérez {
812d96be4c8SEugenio Pérez     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
813d96be4c8SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
814d96be4c8SEugenio Pérez }
815d96be4c8SEugenio Pérez 
816dff4426fSEugenio Pérez static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
817dff4426fSEugenio Pérez                                          struct vhost_vring_file *file)
818dff4426fSEugenio Pérez {
819dff4426fSEugenio Pérez     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
820dff4426fSEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
821dff4426fSEugenio Pérez }
822dff4426fSEugenio Pérez 
823a8ac8858SEugenio Pérez static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
824a8ac8858SEugenio Pérez                                          struct vhost_vring_file *file)
825a8ac8858SEugenio Pérez {
826a8ac8858SEugenio Pérez     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
827a8ac8858SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
828a8ac8858SEugenio Pérez }
829a8ac8858SEugenio Pérez 
830d96be4c8SEugenio Pérez static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
831d96be4c8SEugenio Pérez                                          struct vhost_vring_addr *addr)
832d96be4c8SEugenio Pérez {
833d96be4c8SEugenio Pérez     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
834d96be4c8SEugenio Pérez                                 addr->desc_user_addr, addr->used_user_addr,
835d96be4c8SEugenio Pérez                                 addr->avail_user_addr,
836d96be4c8SEugenio Pérez                                 addr->log_guest_addr);
837d96be4c8SEugenio Pérez 
838d96be4c8SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
839d96be4c8SEugenio Pérez 
840d96be4c8SEugenio Pérez }
841d96be4c8SEugenio Pérez 
842dff4426fSEugenio Pérez /**
843dff4426fSEugenio Pérez  * Set the shadow virtqueue descriptors to the device
844dff4426fSEugenio Pérez  *
845dff4426fSEugenio Pérez  * @dev: The vhost device model
846dff4426fSEugenio Pérez  * @svq: The shadow virtqueue
847dff4426fSEugenio Pérez  * @idx: The index of the virtqueue in the vhost device
848dff4426fSEugenio Pérez  * @errp: Error
849a8ac8858SEugenio Pérez  *
850a8ac8858SEugenio Pérez  * Note that this function does not rewind kick file descriptor if cannot set
851a8ac8858SEugenio Pérez  * call one.
852dff4426fSEugenio Pérez  */
853100890f7SEugenio Pérez static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
854dff4426fSEugenio Pérez                                   VhostShadowVirtqueue *svq, unsigned idx,
855dff4426fSEugenio Pérez                                   Error **errp)
856dff4426fSEugenio Pérez {
857dff4426fSEugenio Pérez     struct vhost_vring_file file = {
858dff4426fSEugenio Pérez         .index = dev->vq_index + idx,
859dff4426fSEugenio Pérez     };
860dff4426fSEugenio Pérez     const EventNotifier *event_notifier = &svq->hdev_kick;
861dff4426fSEugenio Pérez     int r;
862dff4426fSEugenio Pérez 
863*3cfb4d06SEugenio Pérez     r = event_notifier_init(&svq->hdev_kick, 0);
864*3cfb4d06SEugenio Pérez     if (r != 0) {
865*3cfb4d06SEugenio Pérez         error_setg_errno(errp, -r, "Couldn't create kick event notifier");
866*3cfb4d06SEugenio Pérez         goto err_init_hdev_kick;
867*3cfb4d06SEugenio Pérez     }
868*3cfb4d06SEugenio Pérez 
869*3cfb4d06SEugenio Pérez     r = event_notifier_init(&svq->hdev_call, 0);
870*3cfb4d06SEugenio Pérez     if (r != 0) {
871*3cfb4d06SEugenio Pérez         error_setg_errno(errp, -r, "Couldn't create call event notifier");
872*3cfb4d06SEugenio Pérez         goto err_init_hdev_call;
873*3cfb4d06SEugenio Pérez     }
874*3cfb4d06SEugenio Pérez 
875dff4426fSEugenio Pérez     file.fd = event_notifier_get_fd(event_notifier);
876dff4426fSEugenio Pérez     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
877dff4426fSEugenio Pérez     if (unlikely(r != 0)) {
878dff4426fSEugenio Pérez         error_setg_errno(errp, -r, "Can't set device kick fd");
879*3cfb4d06SEugenio Pérez         goto err_init_set_dev_fd;
880a8ac8858SEugenio Pérez     }
881a8ac8858SEugenio Pérez 
882a8ac8858SEugenio Pérez     event_notifier = &svq->hdev_call;
883a8ac8858SEugenio Pérez     file.fd = event_notifier_get_fd(event_notifier);
884a8ac8858SEugenio Pérez     r = vhost_vdpa_set_vring_dev_call(dev, &file);
885a8ac8858SEugenio Pérez     if (unlikely(r != 0)) {
886a8ac8858SEugenio Pérez         error_setg_errno(errp, -r, "Can't set device call fd");
887*3cfb4d06SEugenio Pérez         goto err_init_set_dev_fd;
888dff4426fSEugenio Pérez     }
889dff4426fSEugenio Pérez 
890*3cfb4d06SEugenio Pérez     return 0;
891*3cfb4d06SEugenio Pérez 
892*3cfb4d06SEugenio Pérez err_init_set_dev_fd:
893*3cfb4d06SEugenio Pérez     event_notifier_set_handler(&svq->hdev_call, NULL);
894*3cfb4d06SEugenio Pérez 
895*3cfb4d06SEugenio Pérez err_init_hdev_call:
896*3cfb4d06SEugenio Pérez     event_notifier_cleanup(&svq->hdev_kick);
897*3cfb4d06SEugenio Pérez 
898*3cfb4d06SEugenio Pérez err_init_hdev_kick:
899100890f7SEugenio Pérez     return r;
900100890f7SEugenio Pérez }
901100890f7SEugenio Pérez 
902100890f7SEugenio Pérez /**
903100890f7SEugenio Pérez  * Unmap a SVQ area in the device
904100890f7SEugenio Pérez  */
9058b6d6119SEugenio Pérez static void vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v, hwaddr addr)
906100890f7SEugenio Pérez {
9078b6d6119SEugenio Pérez     const DMAMap needle = {
9088b6d6119SEugenio Pérez         .translated_addr = addr,
9098b6d6119SEugenio Pérez     };
9108b6d6119SEugenio Pérez     const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, &needle);
91134e3c94eSEugenio Pérez     hwaddr size;
912100890f7SEugenio Pérez     int r;
913100890f7SEugenio Pérez 
91434e3c94eSEugenio Pérez     if (unlikely(!result)) {
91534e3c94eSEugenio Pérez         error_report("Unable to find SVQ address to unmap");
9165b590f51SEugenio Pérez         return;
91734e3c94eSEugenio Pérez     }
91834e3c94eSEugenio Pérez 
9198e3b0cbbSMarc-André Lureau     size = ROUND_UP(result->size, qemu_real_host_page_size());
92034e3c94eSEugenio Pérez     r = vhost_vdpa_dma_unmap(v, result->iova, size);
921b37c12beSEugenio Pérez     if (unlikely(r < 0)) {
922b37c12beSEugenio Pérez         error_report("Unable to unmap SVQ vring: %s (%d)", g_strerror(-r), -r);
9235b590f51SEugenio Pérez         return;
924b37c12beSEugenio Pérez     }
925b37c12beSEugenio Pérez 
926b37c12beSEugenio Pérez     vhost_iova_tree_remove(v->iova_tree, *result);
927100890f7SEugenio Pérez }
928100890f7SEugenio Pérez 
9295b590f51SEugenio Pérez static void vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
930100890f7SEugenio Pérez                                        const VhostShadowVirtqueue *svq)
931100890f7SEugenio Pérez {
932100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
933100890f7SEugenio Pérez     struct vhost_vring_addr svq_addr;
934100890f7SEugenio Pérez 
935100890f7SEugenio Pérez     vhost_svq_get_vring_addr(svq, &svq_addr);
936100890f7SEugenio Pérez 
9378b6d6119SEugenio Pérez     vhost_vdpa_svq_unmap_ring(v, svq_addr.desc_user_addr);
938100890f7SEugenio Pérez 
9398b6d6119SEugenio Pérez     vhost_vdpa_svq_unmap_ring(v, svq_addr.used_user_addr);
94034e3c94eSEugenio Pérez }
94134e3c94eSEugenio Pérez 
94234e3c94eSEugenio Pérez /**
94334e3c94eSEugenio Pérez  * Map the SVQ area in the device
94434e3c94eSEugenio Pérez  *
94534e3c94eSEugenio Pérez  * @v: Vhost-vdpa device
94634e3c94eSEugenio Pérez  * @needle: The area to search iova
94734e3c94eSEugenio Pérez  * @errorp: Error pointer
94834e3c94eSEugenio Pérez  */
94934e3c94eSEugenio Pérez static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
95034e3c94eSEugenio Pérez                                     Error **errp)
95134e3c94eSEugenio Pérez {
95234e3c94eSEugenio Pérez     int r;
95334e3c94eSEugenio Pérez 
95434e3c94eSEugenio Pérez     r = vhost_iova_tree_map_alloc(v->iova_tree, needle);
95534e3c94eSEugenio Pérez     if (unlikely(r != IOVA_OK)) {
95634e3c94eSEugenio Pérez         error_setg(errp, "Cannot allocate iova (%d)", r);
95734e3c94eSEugenio Pérez         return false;
95834e3c94eSEugenio Pérez     }
95934e3c94eSEugenio Pérez 
96034e3c94eSEugenio Pérez     r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
96134e3c94eSEugenio Pérez                            (void *)(uintptr_t)needle->translated_addr,
96234e3c94eSEugenio Pérez                            needle->perm == IOMMU_RO);
96334e3c94eSEugenio Pérez     if (unlikely(r != 0)) {
96434e3c94eSEugenio Pérez         error_setg_errno(errp, -r, "Cannot map region to device");
96569292a8eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, *needle);
96634e3c94eSEugenio Pérez     }
96734e3c94eSEugenio Pérez 
96834e3c94eSEugenio Pérez     return r == 0;
969100890f7SEugenio Pérez }
970100890f7SEugenio Pérez 
971100890f7SEugenio Pérez /**
972100890f7SEugenio Pérez  * Map the shadow virtqueue rings in the device
973100890f7SEugenio Pérez  *
974100890f7SEugenio Pérez  * @dev: The vhost device
975100890f7SEugenio Pérez  * @svq: The shadow virtqueue
976100890f7SEugenio Pérez  * @addr: Assigned IOVA addresses
977100890f7SEugenio Pérez  * @errp: Error pointer
978100890f7SEugenio Pérez  */
979100890f7SEugenio Pérez static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
980100890f7SEugenio Pérez                                      const VhostShadowVirtqueue *svq,
981100890f7SEugenio Pérez                                      struct vhost_vring_addr *addr,
982100890f7SEugenio Pérez                                      Error **errp)
983100890f7SEugenio Pérez {
98405e385d2SMarkus Armbruster     ERRP_GUARD();
98534e3c94eSEugenio Pérez     DMAMap device_region, driver_region;
98634e3c94eSEugenio Pérez     struct vhost_vring_addr svq_addr;
987100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
988100890f7SEugenio Pérez     size_t device_size = vhost_svq_device_area_size(svq);
989100890f7SEugenio Pérez     size_t driver_size = vhost_svq_driver_area_size(svq);
99034e3c94eSEugenio Pérez     size_t avail_offset;
99134e3c94eSEugenio Pérez     bool ok;
992100890f7SEugenio Pérez 
99334e3c94eSEugenio Pérez     vhost_svq_get_vring_addr(svq, &svq_addr);
994100890f7SEugenio Pérez 
99534e3c94eSEugenio Pérez     driver_region = (DMAMap) {
99634e3c94eSEugenio Pérez         .translated_addr = svq_addr.desc_user_addr,
99734e3c94eSEugenio Pérez         .size = driver_size - 1,
99834e3c94eSEugenio Pérez         .perm = IOMMU_RO,
99934e3c94eSEugenio Pérez     };
100034e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
100134e3c94eSEugenio Pérez     if (unlikely(!ok)) {
100234e3c94eSEugenio Pérez         error_prepend(errp, "Cannot create vq driver region: ");
1003100890f7SEugenio Pérez         return false;
1004100890f7SEugenio Pérez     }
100534e3c94eSEugenio Pérez     addr->desc_user_addr = driver_region.iova;
100634e3c94eSEugenio Pérez     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
100734e3c94eSEugenio Pérez     addr->avail_user_addr = driver_region.iova + avail_offset;
1008100890f7SEugenio Pérez 
100934e3c94eSEugenio Pérez     device_region = (DMAMap) {
101034e3c94eSEugenio Pérez         .translated_addr = svq_addr.used_user_addr,
101134e3c94eSEugenio Pérez         .size = device_size - 1,
101234e3c94eSEugenio Pérez         .perm = IOMMU_RW,
101334e3c94eSEugenio Pérez     };
101434e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
101534e3c94eSEugenio Pérez     if (unlikely(!ok)) {
101634e3c94eSEugenio Pérez         error_prepend(errp, "Cannot create vq device region: ");
10178b6d6119SEugenio Pérez         vhost_vdpa_svq_unmap_ring(v, driver_region.translated_addr);
1018100890f7SEugenio Pérez     }
101934e3c94eSEugenio Pérez     addr->used_user_addr = device_region.iova;
1020100890f7SEugenio Pérez 
102134e3c94eSEugenio Pérez     return ok;
1022100890f7SEugenio Pérez }
1023100890f7SEugenio Pérez 
1024100890f7SEugenio Pérez static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
1025100890f7SEugenio Pérez                                  VhostShadowVirtqueue *svq, unsigned idx,
1026100890f7SEugenio Pérez                                  Error **errp)
1027100890f7SEugenio Pérez {
1028100890f7SEugenio Pérez     uint16_t vq_index = dev->vq_index + idx;
1029100890f7SEugenio Pérez     struct vhost_vring_state s = {
1030100890f7SEugenio Pérez         .index = vq_index,
1031100890f7SEugenio Pérez     };
1032100890f7SEugenio Pérez     int r;
1033100890f7SEugenio Pérez 
1034100890f7SEugenio Pérez     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1035100890f7SEugenio Pérez     if (unlikely(r)) {
1036100890f7SEugenio Pérez         error_setg_errno(errp, -r, "Cannot set vring base");
1037100890f7SEugenio Pérez         return false;
1038100890f7SEugenio Pérez     }
1039100890f7SEugenio Pérez 
1040100890f7SEugenio Pérez     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1041dff4426fSEugenio Pérez     return r == 0;
1042dff4426fSEugenio Pérez }
1043dff4426fSEugenio Pérez 
1044dff4426fSEugenio Pérez static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1045dff4426fSEugenio Pérez {
1046dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1047dff4426fSEugenio Pérez     Error *err = NULL;
1048dff4426fSEugenio Pérez     unsigned i;
1049dff4426fSEugenio Pérez 
1050712c1a31SEugenio Pérez     if (!v->shadow_vqs_enabled) {
1051dff4426fSEugenio Pérez         return true;
1052dff4426fSEugenio Pérez     }
1053dff4426fSEugenio Pérez 
1054dff4426fSEugenio Pérez     for (i = 0; i < v->shadow_vqs->len; ++i) {
1055100890f7SEugenio Pérez         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1056dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1057100890f7SEugenio Pérez         struct vhost_vring_addr addr = {
10581c82fdfeSEugenio Pérez             .index = dev->vq_index + i,
1059100890f7SEugenio Pérez         };
1060100890f7SEugenio Pérez         int r;
1061dff4426fSEugenio Pérez         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1062dff4426fSEugenio Pérez         if (unlikely(!ok)) {
1063100890f7SEugenio Pérez             goto err;
1064100890f7SEugenio Pérez         }
1065100890f7SEugenio Pérez 
1066100890f7SEugenio Pérez         vhost_svq_start(svq, dev->vdev, vq);
1067100890f7SEugenio Pérez         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1068100890f7SEugenio Pérez         if (unlikely(!ok)) {
1069100890f7SEugenio Pérez             goto err_map;
1070100890f7SEugenio Pérez         }
1071100890f7SEugenio Pérez 
1072100890f7SEugenio Pérez         /* Override vring GPA set by vhost subsystem */
1073100890f7SEugenio Pérez         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1074100890f7SEugenio Pérez         if (unlikely(r != 0)) {
1075100890f7SEugenio Pérez             error_setg_errno(&err, -r, "Cannot set device address");
1076100890f7SEugenio Pérez             goto err_set_addr;
1077100890f7SEugenio Pérez         }
1078100890f7SEugenio Pérez     }
1079100890f7SEugenio Pérez 
1080100890f7SEugenio Pérez     return true;
1081100890f7SEugenio Pérez 
1082100890f7SEugenio Pérez err_set_addr:
1083100890f7SEugenio Pérez     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1084100890f7SEugenio Pérez 
1085100890f7SEugenio Pérez err_map:
1086100890f7SEugenio Pérez     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1087100890f7SEugenio Pérez 
1088100890f7SEugenio Pérez err:
1089dff4426fSEugenio Pérez     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1090100890f7SEugenio Pérez     for (unsigned j = 0; j < i; ++j) {
1091100890f7SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1092100890f7SEugenio Pérez         vhost_vdpa_svq_unmap_rings(dev, svq);
1093100890f7SEugenio Pérez         vhost_svq_stop(svq);
1094100890f7SEugenio Pérez     }
1095100890f7SEugenio Pérez 
1096100890f7SEugenio Pérez     return false;
1097100890f7SEugenio Pérez }
1098100890f7SEugenio Pérez 
10995b590f51SEugenio Pérez static void vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1100100890f7SEugenio Pérez {
1101100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1102100890f7SEugenio Pérez 
1103712c1a31SEugenio Pérez     if (!v->shadow_vqs_enabled) {
11045b590f51SEugenio Pérez         return;
1105100890f7SEugenio Pérez     }
1106100890f7SEugenio Pérez 
1107100890f7SEugenio Pérez     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1108100890f7SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
11095b590f51SEugenio Pérez         vhost_vdpa_svq_unmap_rings(dev, svq);
1110*3cfb4d06SEugenio Pérez 
1111*3cfb4d06SEugenio Pérez         event_notifier_cleanup(&svq->hdev_kick);
1112*3cfb4d06SEugenio Pérez         event_notifier_cleanup(&svq->hdev_call);
1113dff4426fSEugenio Pérez     }
1114dff4426fSEugenio Pérez }
1115dff4426fSEugenio Pérez 
1116108a6481SCindy Lu static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1117108a6481SCindy Lu {
1118108a6481SCindy Lu     struct vhost_vdpa *v = dev->opaque;
1119dff4426fSEugenio Pérez     bool ok;
1120778e67deSLaurent Vivier     trace_vhost_vdpa_dev_start(dev, started);
11214d191cfdSJason Wang 
11224d191cfdSJason Wang     if (started) {
11234d191cfdSJason Wang         vhost_vdpa_host_notifiers_init(dev);
1124dff4426fSEugenio Pérez         ok = vhost_vdpa_svqs_start(dev);
1125dff4426fSEugenio Pérez         if (unlikely(!ok)) {
1126dff4426fSEugenio Pérez             return -1;
1127dff4426fSEugenio Pérez         }
11284d191cfdSJason Wang         vhost_vdpa_set_vring_ready(dev);
11294d191cfdSJason Wang     } else {
11305b590f51SEugenio Pérez         vhost_vdpa_svqs_stop(dev);
11314d191cfdSJason Wang         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
11324d191cfdSJason Wang     }
11334d191cfdSJason Wang 
1134245cf2c2SEugenio Pérez     if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
11354d191cfdSJason Wang         return 0;
11364d191cfdSJason Wang     }
11374d191cfdSJason Wang 
1138108a6481SCindy Lu     if (started) {
1139108a6481SCindy Lu         memory_listener_register(&v->listener, &address_space_memory);
11403631151bSRoman Kagan         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1141108a6481SCindy Lu     } else {
1142108a6481SCindy Lu         vhost_vdpa_reset_device(dev);
1143108a6481SCindy Lu         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1144108a6481SCindy Lu                                    VIRTIO_CONFIG_S_DRIVER);
1145108a6481SCindy Lu         memory_listener_unregister(&v->listener);
1146108a6481SCindy Lu 
1147108a6481SCindy Lu         return 0;
1148108a6481SCindy Lu     }
1149108a6481SCindy Lu }
1150108a6481SCindy Lu 
1151108a6481SCindy Lu static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1152108a6481SCindy Lu                                      struct vhost_log *log)
1153108a6481SCindy Lu {
1154773ebc95SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1155d71b0609SSi-Wei Liu     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
11564d191cfdSJason Wang         return 0;
11574d191cfdSJason Wang     }
11584d191cfdSJason Wang 
1159778e67deSLaurent Vivier     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1160778e67deSLaurent Vivier                                   log->log);
1161108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1162108a6481SCindy Lu }
1163108a6481SCindy Lu 
1164108a6481SCindy Lu static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1165108a6481SCindy Lu                                        struct vhost_vring_addr *addr)
1166108a6481SCindy Lu {
1167d96be4c8SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1168d96be4c8SEugenio Pérez 
1169d96be4c8SEugenio Pérez     if (v->shadow_vqs_enabled) {
1170d96be4c8SEugenio Pérez         /*
1171d96be4c8SEugenio Pérez          * Device vring addr was set at device start. SVQ base is handled by
1172d96be4c8SEugenio Pérez          * VirtQueue code.
1173d96be4c8SEugenio Pérez          */
1174d96be4c8SEugenio Pérez         return 0;
1175d96be4c8SEugenio Pérez     }
1176d96be4c8SEugenio Pérez 
1177d96be4c8SEugenio Pérez     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1178108a6481SCindy Lu }
1179108a6481SCindy Lu 
1180108a6481SCindy Lu static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1181108a6481SCindy Lu                                       struct vhost_vring_state *ring)
1182108a6481SCindy Lu {
1183778e67deSLaurent Vivier     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1184108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1185108a6481SCindy Lu }
1186108a6481SCindy Lu 
1187108a6481SCindy Lu static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1188108a6481SCindy Lu                                        struct vhost_vring_state *ring)
1189108a6481SCindy Lu {
1190d96be4c8SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
11912fdac348SEugenio Pérez     VirtQueue *vq = virtio_get_queue(dev->vdev, ring->index);
1192d96be4c8SEugenio Pérez 
11932fdac348SEugenio Pérez     /*
11942fdac348SEugenio Pérez      * vhost-vdpa devices does not support in-flight requests. Set all of them
11952fdac348SEugenio Pérez      * as available.
11962fdac348SEugenio Pérez      *
11972fdac348SEugenio Pérez      * TODO: This is ok for networking, but other kinds of devices might
11982fdac348SEugenio Pérez      * have problems with these retransmissions.
11992fdac348SEugenio Pérez      */
12002fdac348SEugenio Pérez     while (virtqueue_rewind(vq, 1)) {
12012fdac348SEugenio Pérez         continue;
12022fdac348SEugenio Pérez     }
1203d96be4c8SEugenio Pérez     if (v->shadow_vqs_enabled) {
1204d96be4c8SEugenio Pérez         /*
1205d96be4c8SEugenio Pérez          * Device vring base was set at device start. SVQ base is handled by
1206d96be4c8SEugenio Pérez          * VirtQueue code.
1207d96be4c8SEugenio Pérez          */
1208d96be4c8SEugenio Pérez         return 0;
1209d96be4c8SEugenio Pérez     }
1210d96be4c8SEugenio Pérez 
1211d96be4c8SEugenio Pérez     return vhost_vdpa_set_dev_vring_base(dev, ring);
1212108a6481SCindy Lu }
1213108a6481SCindy Lu 
1214108a6481SCindy Lu static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1215108a6481SCindy Lu                                        struct vhost_vring_state *ring)
1216108a6481SCindy Lu {
12176d0b2226SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1218778e67deSLaurent Vivier     int ret;
1219778e67deSLaurent Vivier 
12206d0b2226SEugenio Pérez     if (v->shadow_vqs_enabled) {
12212fdac348SEugenio Pérez         ring->num = virtio_queue_get_last_avail_idx(dev->vdev, ring->index);
12226d0b2226SEugenio Pérez         return 0;
12236d0b2226SEugenio Pérez     }
12246d0b2226SEugenio Pérez 
1225778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1226778e67deSLaurent Vivier     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
1227778e67deSLaurent Vivier     return ret;
1228108a6481SCindy Lu }
1229108a6481SCindy Lu 
1230108a6481SCindy Lu static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1231108a6481SCindy Lu                                        struct vhost_vring_file *file)
1232108a6481SCindy Lu {
1233dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1234dff4426fSEugenio Pérez     int vdpa_idx = file->index - dev->vq_index;
1235dff4426fSEugenio Pérez 
1236dff4426fSEugenio Pérez     if (v->shadow_vqs_enabled) {
1237dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1238dff4426fSEugenio Pérez         vhost_svq_set_svq_kick_fd(svq, file->fd);
1239dff4426fSEugenio Pérez         return 0;
1240dff4426fSEugenio Pérez     } else {
1241dff4426fSEugenio Pérez         return vhost_vdpa_set_vring_dev_kick(dev, file);
1242dff4426fSEugenio Pérez     }
1243108a6481SCindy Lu }
1244108a6481SCindy Lu 
1245108a6481SCindy Lu static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1246108a6481SCindy Lu                                        struct vhost_vring_file *file)
1247108a6481SCindy Lu {
1248a8ac8858SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1249a8ac8858SEugenio Pérez 
1250a8ac8858SEugenio Pérez     if (v->shadow_vqs_enabled) {
1251a8ac8858SEugenio Pérez         int vdpa_idx = file->index - dev->vq_index;
1252a8ac8858SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1253a8ac8858SEugenio Pérez 
1254a8ac8858SEugenio Pérez         vhost_svq_set_svq_call_fd(svq, file->fd);
1255a8ac8858SEugenio Pérez         return 0;
1256a8ac8858SEugenio Pérez     } else {
1257a8ac8858SEugenio Pérez         return vhost_vdpa_set_vring_dev_call(dev, file);
1258a8ac8858SEugenio Pérez     }
1259108a6481SCindy Lu }
1260108a6481SCindy Lu 
1261108a6481SCindy Lu static int vhost_vdpa_get_features(struct vhost_dev *dev,
1262108a6481SCindy Lu                                      uint64_t *features)
1263108a6481SCindy Lu {
126412a195faSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
126512a195faSEugenio Pérez     int ret = vhost_vdpa_get_dev_features(dev, features);
1266778e67deSLaurent Vivier 
126712a195faSEugenio Pérez     if (ret == 0 && v->shadow_vqs_enabled) {
126812a195faSEugenio Pérez         /* Add SVQ logging capabilities */
126912a195faSEugenio Pérez         *features |= BIT_ULL(VHOST_F_LOG_ALL);
127012a195faSEugenio Pérez     }
127112a195faSEugenio Pérez 
1272778e67deSLaurent Vivier     return ret;
1273108a6481SCindy Lu }
1274108a6481SCindy Lu 
1275108a6481SCindy Lu static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1276108a6481SCindy Lu {
1277d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
12784d191cfdSJason Wang         return 0;
12794d191cfdSJason Wang     }
12804d191cfdSJason Wang 
1281778e67deSLaurent Vivier     trace_vhost_vdpa_set_owner(dev);
1282108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1283108a6481SCindy Lu }
1284108a6481SCindy Lu 
1285108a6481SCindy Lu static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1286108a6481SCindy Lu                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1287108a6481SCindy Lu {
1288108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1289108a6481SCindy Lu     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1290108a6481SCindy Lu     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1291108a6481SCindy Lu     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1292778e67deSLaurent Vivier     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1293778e67deSLaurent Vivier                                  addr->avail_user_addr, addr->used_user_addr);
1294108a6481SCindy Lu     return 0;
1295108a6481SCindy Lu }
1296108a6481SCindy Lu 
1297108a6481SCindy Lu static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1298108a6481SCindy Lu {
1299108a6481SCindy Lu     return true;
1300108a6481SCindy Lu }
1301108a6481SCindy Lu 
1302108a6481SCindy Lu const VhostOps vdpa_ops = {
1303108a6481SCindy Lu         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1304108a6481SCindy Lu         .vhost_backend_init = vhost_vdpa_init,
1305108a6481SCindy Lu         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1306108a6481SCindy Lu         .vhost_set_log_base = vhost_vdpa_set_log_base,
1307108a6481SCindy Lu         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1308108a6481SCindy Lu         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1309108a6481SCindy Lu         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1310108a6481SCindy Lu         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1311108a6481SCindy Lu         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1312108a6481SCindy Lu         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1313108a6481SCindy Lu         .vhost_get_features = vhost_vdpa_get_features,
1314a5bd0580SJason Wang         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
1315108a6481SCindy Lu         .vhost_set_owner = vhost_vdpa_set_owner,
1316108a6481SCindy Lu         .vhost_set_vring_endian = NULL,
1317108a6481SCindy Lu         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1318108a6481SCindy Lu         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1319108a6481SCindy Lu         .vhost_set_features = vhost_vdpa_set_features,
1320108a6481SCindy Lu         .vhost_reset_device = vhost_vdpa_reset_device,
1321108a6481SCindy Lu         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1322108a6481SCindy Lu         .vhost_get_config  = vhost_vdpa_get_config,
1323108a6481SCindy Lu         .vhost_set_config = vhost_vdpa_set_config,
1324108a6481SCindy Lu         .vhost_requires_shm_log = NULL,
1325108a6481SCindy Lu         .vhost_migration_done = NULL,
1326108a6481SCindy Lu         .vhost_backend_can_merge = NULL,
1327108a6481SCindy Lu         .vhost_net_set_mtu = NULL,
1328108a6481SCindy Lu         .vhost_set_iotlb_callback = NULL,
1329108a6481SCindy Lu         .vhost_send_device_iotlb_msg = NULL,
1330108a6481SCindy Lu         .vhost_dev_start = vhost_vdpa_dev_start,
1331108a6481SCindy Lu         .vhost_get_device_id = vhost_vdpa_get_device_id,
1332108a6481SCindy Lu         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1333108a6481SCindy Lu         .vhost_force_iommu = vhost_vdpa_force_iommu,
1334108a6481SCindy Lu };
1335