xref: /qemu/hw/virtio/vhost-vdpa.c (revision e966c0b781aebabd2c0f5eef91678f08ce1d068c)
1108a6481SCindy Lu /*
2108a6481SCindy Lu  * vhost-vdpa
3108a6481SCindy Lu  *
4108a6481SCindy Lu  *  Copyright(c) 2017-2018 Intel Corporation.
5108a6481SCindy Lu  *  Copyright(c) 2020 Red Hat, Inc.
6108a6481SCindy Lu  *
7108a6481SCindy Lu  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8108a6481SCindy Lu  * See the COPYING file in the top-level directory.
9108a6481SCindy Lu  *
10108a6481SCindy Lu  */
11108a6481SCindy Lu 
12108a6481SCindy Lu #include "qemu/osdep.h"
13108a6481SCindy Lu #include <linux/vhost.h>
14108a6481SCindy Lu #include <linux/vfio.h>
15108a6481SCindy Lu #include <sys/eventfd.h>
16108a6481SCindy Lu #include <sys/ioctl.h>
17108a6481SCindy Lu #include "hw/virtio/vhost.h"
18108a6481SCindy Lu #include "hw/virtio/vhost-backend.h"
19108a6481SCindy Lu #include "hw/virtio/virtio-net.h"
20dff4426fSEugenio Pérez #include "hw/virtio/vhost-shadow-virtqueue.h"
21108a6481SCindy Lu #include "hw/virtio/vhost-vdpa.h"
22df77d45aSXie Yongji #include "exec/address-spaces.h"
23415b7327SMarc-André Lureau #include "qemu/cutils.h"
24108a6481SCindy Lu #include "qemu/main-loop.h"
254dc5acc0SCindy Lu #include "cpu.h"
26778e67deSLaurent Vivier #include "trace.h"
27dff4426fSEugenio Pérez #include "qapi/error.h"
28108a6481SCindy Lu 
29032e4d68SEugenio Pérez /*
30032e4d68SEugenio Pérez  * Return one past the end of the end of section. Be careful with uint64_t
31032e4d68SEugenio Pérez  * conversions!
32032e4d68SEugenio Pérez  */
33032e4d68SEugenio Pérez static Int128 vhost_vdpa_section_end(const MemoryRegionSection *section)
34032e4d68SEugenio Pérez {
35032e4d68SEugenio Pérez     Int128 llend = int128_make64(section->offset_within_address_space);
36032e4d68SEugenio Pérez     llend = int128_add(llend, section->size);
37032e4d68SEugenio Pérez     llend = int128_and(llend, int128_exts64(TARGET_PAGE_MASK));
38032e4d68SEugenio Pérez 
39032e4d68SEugenio Pérez     return llend;
40032e4d68SEugenio Pérez }
41032e4d68SEugenio Pérez 
42013108b6SEugenio Pérez static bool vhost_vdpa_listener_skipped_section(MemoryRegionSection *section,
43013108b6SEugenio Pérez                                                 uint64_t iova_min,
44013108b6SEugenio Pérez                                                 uint64_t iova_max)
45108a6481SCindy Lu {
46013108b6SEugenio Pérez     Int128 llend;
47013108b6SEugenio Pérez 
48013108b6SEugenio Pérez     if ((!memory_region_is_ram(section->mr) &&
49108a6481SCindy Lu          !memory_region_is_iommu(section->mr)) ||
50c64038c9SEugenio Pérez         memory_region_is_protected(section->mr) ||
51d60c75d2SJason Wang         /* vhost-vDPA doesn't allow MMIO to be mapped  */
52013108b6SEugenio Pérez         memory_region_is_ram_device(section->mr)) {
53013108b6SEugenio Pérez         return true;
54013108b6SEugenio Pérez     }
55013108b6SEugenio Pérez 
56013108b6SEugenio Pérez     if (section->offset_within_address_space < iova_min) {
57013108b6SEugenio Pérez         error_report("RAM section out of device range (min=0x%" PRIx64
58013108b6SEugenio Pérez                      ", addr=0x%" HWADDR_PRIx ")",
59013108b6SEugenio Pérez                      iova_min, section->offset_within_address_space);
60013108b6SEugenio Pérez         return true;
61013108b6SEugenio Pérez     }
62013108b6SEugenio Pérez 
63013108b6SEugenio Pérez     llend = vhost_vdpa_section_end(section);
64013108b6SEugenio Pérez     if (int128_gt(llend, int128_make64(iova_max))) {
65013108b6SEugenio Pérez         error_report("RAM section out of device range (max=0x%" PRIx64
66013108b6SEugenio Pérez                      ", end addr=0x%" PRIx64 ")",
67013108b6SEugenio Pérez                      iova_max, int128_get64(llend));
68013108b6SEugenio Pérez         return true;
69013108b6SEugenio Pérez     }
70013108b6SEugenio Pérez 
71013108b6SEugenio Pérez     return false;
72108a6481SCindy Lu }
73108a6481SCindy Lu 
74108a6481SCindy Lu static int vhost_vdpa_dma_map(struct vhost_vdpa *v, hwaddr iova, hwaddr size,
75108a6481SCindy Lu                               void *vaddr, bool readonly)
76108a6481SCindy Lu {
77386494f2SCindy Lu     struct vhost_msg_v2 msg = {};
78108a6481SCindy Lu     int fd = v->device_fd;
79108a6481SCindy Lu     int ret = 0;
80108a6481SCindy Lu 
81108a6481SCindy Lu     msg.type = v->msg_type;
82108a6481SCindy Lu     msg.iotlb.iova = iova;
83108a6481SCindy Lu     msg.iotlb.size = size;
84108a6481SCindy Lu     msg.iotlb.uaddr = (uint64_t)(uintptr_t)vaddr;
85108a6481SCindy Lu     msg.iotlb.perm = readonly ? VHOST_ACCESS_RO : VHOST_ACCESS_RW;
86108a6481SCindy Lu     msg.iotlb.type = VHOST_IOTLB_UPDATE;
87108a6481SCindy Lu 
88778e67deSLaurent Vivier    trace_vhost_vdpa_dma_map(v, fd, msg.type, msg.iotlb.iova, msg.iotlb.size,
89778e67deSLaurent Vivier                             msg.iotlb.uaddr, msg.iotlb.perm, msg.iotlb.type);
90778e67deSLaurent Vivier 
91108a6481SCindy Lu     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
92108a6481SCindy Lu         error_report("failed to write, fd=%d, errno=%d (%s)",
93108a6481SCindy Lu             fd, errno, strerror(errno));
94108a6481SCindy Lu         return -EIO ;
95108a6481SCindy Lu     }
96108a6481SCindy Lu 
97108a6481SCindy Lu     return ret;
98108a6481SCindy Lu }
99108a6481SCindy Lu 
100108a6481SCindy Lu static int vhost_vdpa_dma_unmap(struct vhost_vdpa *v, hwaddr iova,
101108a6481SCindy Lu                                 hwaddr size)
102108a6481SCindy Lu {
103386494f2SCindy Lu     struct vhost_msg_v2 msg = {};
104108a6481SCindy Lu     int fd = v->device_fd;
105108a6481SCindy Lu     int ret = 0;
106108a6481SCindy Lu 
107108a6481SCindy Lu     msg.type = v->msg_type;
108108a6481SCindy Lu     msg.iotlb.iova = iova;
109108a6481SCindy Lu     msg.iotlb.size = size;
110108a6481SCindy Lu     msg.iotlb.type = VHOST_IOTLB_INVALIDATE;
111108a6481SCindy Lu 
112778e67deSLaurent Vivier     trace_vhost_vdpa_dma_unmap(v, fd, msg.type, msg.iotlb.iova,
113778e67deSLaurent Vivier                                msg.iotlb.size, msg.iotlb.type);
114778e67deSLaurent Vivier 
115108a6481SCindy Lu     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
116108a6481SCindy Lu         error_report("failed to write, fd=%d, errno=%d (%s)",
117108a6481SCindy Lu             fd, errno, strerror(errno));
118108a6481SCindy Lu         return -EIO ;
119108a6481SCindy Lu     }
120108a6481SCindy Lu 
121108a6481SCindy Lu     return ret;
122108a6481SCindy Lu }
123108a6481SCindy Lu 
124e6db5df7SEugenio Pérez static void vhost_vdpa_listener_begin_batch(struct vhost_vdpa *v)
125a5bd0580SJason Wang {
126a5bd0580SJason Wang     int fd = v->device_fd;
127e6db5df7SEugenio Pérez     struct vhost_msg_v2 msg = {
128e6db5df7SEugenio Pérez         .type = v->msg_type,
129e6db5df7SEugenio Pérez         .iotlb.type = VHOST_IOTLB_BATCH_BEGIN,
130e6db5df7SEugenio Pérez     };
131a5bd0580SJason Wang 
1325580b9f0SEugenio Pérez     trace_vhost_vdpa_listener_begin_batch(v, fd, msg.type, msg.iotlb.type);
133a5bd0580SJason Wang     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
134a5bd0580SJason Wang         error_report("failed to write, fd=%d, errno=%d (%s)",
135a5bd0580SJason Wang                      fd, errno, strerror(errno));
136a5bd0580SJason Wang     }
137a5bd0580SJason Wang }
138a5bd0580SJason Wang 
139e6db5df7SEugenio Pérez static void vhost_vdpa_iotlb_batch_begin_once(struct vhost_vdpa *v)
140e6db5df7SEugenio Pérez {
141e6db5df7SEugenio Pérez     if (v->dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH) &&
142e6db5df7SEugenio Pérez         !v->iotlb_batch_begin_sent) {
143e6db5df7SEugenio Pérez         vhost_vdpa_listener_begin_batch(v);
144e6db5df7SEugenio Pérez     }
145e6db5df7SEugenio Pérez 
146e6db5df7SEugenio Pérez     v->iotlb_batch_begin_sent = true;
147e6db5df7SEugenio Pérez }
148e6db5df7SEugenio Pérez 
149a5bd0580SJason Wang static void vhost_vdpa_listener_commit(MemoryListener *listener)
150a5bd0580SJason Wang {
151a5bd0580SJason Wang     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
152a5bd0580SJason Wang     struct vhost_dev *dev = v->dev;
1538acb3218SPhilippe Mathieu-Daudé     struct vhost_msg_v2 msg = {};
154a5bd0580SJason Wang     int fd = v->device_fd;
155a5bd0580SJason Wang 
156a5bd0580SJason Wang     if (!(dev->backend_cap & (0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH))) {
157a5bd0580SJason Wang         return;
158a5bd0580SJason Wang     }
159a5bd0580SJason Wang 
160e6db5df7SEugenio Pérez     if (!v->iotlb_batch_begin_sent) {
161e6db5df7SEugenio Pérez         return;
162e6db5df7SEugenio Pérez     }
163e6db5df7SEugenio Pérez 
164a5bd0580SJason Wang     msg.type = v->msg_type;
165a5bd0580SJason Wang     msg.iotlb.type = VHOST_IOTLB_BATCH_END;
166a5bd0580SJason Wang 
1675580b9f0SEugenio Pérez     trace_vhost_vdpa_listener_commit(v, fd, msg.type, msg.iotlb.type);
168a5bd0580SJason Wang     if (write(fd, &msg, sizeof(msg)) != sizeof(msg)) {
169a5bd0580SJason Wang         error_report("failed to write, fd=%d, errno=%d (%s)",
170a5bd0580SJason Wang                      fd, errno, strerror(errno));
171a5bd0580SJason Wang     }
172e6db5df7SEugenio Pérez 
173e6db5df7SEugenio Pérez     v->iotlb_batch_begin_sent = false;
174a5bd0580SJason Wang }
175a5bd0580SJason Wang 
176108a6481SCindy Lu static void vhost_vdpa_listener_region_add(MemoryListener *listener,
177108a6481SCindy Lu                                            MemoryRegionSection *section)
178108a6481SCindy Lu {
179108a6481SCindy Lu     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
180108a6481SCindy Lu     hwaddr iova;
181108a6481SCindy Lu     Int128 llend, llsize;
182108a6481SCindy Lu     void *vaddr;
183108a6481SCindy Lu     int ret;
184108a6481SCindy Lu 
185013108b6SEugenio Pérez     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
186013108b6SEugenio Pérez                                             v->iova_range.last)) {
187108a6481SCindy Lu         return;
188108a6481SCindy Lu     }
189108a6481SCindy Lu 
190108a6481SCindy Lu     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
191108a6481SCindy Lu                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
192108a6481SCindy Lu         error_report("%s received unaligned region", __func__);
193108a6481SCindy Lu         return;
194108a6481SCindy Lu     }
195108a6481SCindy Lu 
196108a6481SCindy Lu     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
197032e4d68SEugenio Pérez     llend = vhost_vdpa_section_end(section);
198108a6481SCindy Lu     if (int128_ge(int128_make64(iova), llend)) {
199108a6481SCindy Lu         return;
200108a6481SCindy Lu     }
201108a6481SCindy Lu 
202108a6481SCindy Lu     memory_region_ref(section->mr);
203108a6481SCindy Lu 
204108a6481SCindy Lu     /* Here we assume that memory_region_is_ram(section->mr)==true */
205108a6481SCindy Lu 
206108a6481SCindy Lu     vaddr = memory_region_get_ram_ptr(section->mr) +
207108a6481SCindy Lu             section->offset_within_region +
208108a6481SCindy Lu             (iova - section->offset_within_address_space);
209108a6481SCindy Lu 
210778e67deSLaurent Vivier     trace_vhost_vdpa_listener_region_add(v, iova, int128_get64(llend),
211778e67deSLaurent Vivier                                          vaddr, section->readonly);
212778e67deSLaurent Vivier 
213108a6481SCindy Lu     llsize = int128_sub(llend, int128_make64(iova));
21434e3c94eSEugenio Pérez     if (v->shadow_vqs_enabled) {
21534e3c94eSEugenio Pérez         DMAMap mem_region = {
21634e3c94eSEugenio Pérez             .translated_addr = (hwaddr)(uintptr_t)vaddr,
21734e3c94eSEugenio Pérez             .size = int128_get64(llsize) - 1,
21834e3c94eSEugenio Pérez             .perm = IOMMU_ACCESS_FLAG(true, section->readonly),
21934e3c94eSEugenio Pérez         };
22034e3c94eSEugenio Pérez 
22134e3c94eSEugenio Pérez         int r = vhost_iova_tree_map_alloc(v->iova_tree, &mem_region);
22234e3c94eSEugenio Pérez         if (unlikely(r != IOVA_OK)) {
22334e3c94eSEugenio Pérez             error_report("Can't allocate a mapping (%d)", r);
22434e3c94eSEugenio Pérez             goto fail;
22534e3c94eSEugenio Pérez         }
22634e3c94eSEugenio Pérez 
22734e3c94eSEugenio Pérez         iova = mem_region.iova;
22834e3c94eSEugenio Pérez     }
229108a6481SCindy Lu 
230e6db5df7SEugenio Pérez     vhost_vdpa_iotlb_batch_begin_once(v);
231108a6481SCindy Lu     ret = vhost_vdpa_dma_map(v, iova, int128_get64(llsize),
232108a6481SCindy Lu                              vaddr, section->readonly);
233108a6481SCindy Lu     if (ret) {
234108a6481SCindy Lu         error_report("vhost vdpa map fail!");
235108a6481SCindy Lu         goto fail;
236108a6481SCindy Lu     }
237108a6481SCindy Lu 
238108a6481SCindy Lu     return;
239108a6481SCindy Lu 
240108a6481SCindy Lu fail:
241108a6481SCindy Lu     /*
242108a6481SCindy Lu      * On the initfn path, store the first error in the container so we
243108a6481SCindy Lu      * can gracefully fail.  Runtime, there's not much we can do other
244108a6481SCindy Lu      * than throw a hardware error.
245108a6481SCindy Lu      */
246108a6481SCindy Lu     error_report("vhost-vdpa: DMA mapping failed, unable to continue");
247108a6481SCindy Lu     return;
248108a6481SCindy Lu 
249108a6481SCindy Lu }
250108a6481SCindy Lu 
251108a6481SCindy Lu static void vhost_vdpa_listener_region_del(MemoryListener *listener,
252108a6481SCindy Lu                                            MemoryRegionSection *section)
253108a6481SCindy Lu {
254108a6481SCindy Lu     struct vhost_vdpa *v = container_of(listener, struct vhost_vdpa, listener);
255108a6481SCindy Lu     hwaddr iova;
256108a6481SCindy Lu     Int128 llend, llsize;
257108a6481SCindy Lu     int ret;
258108a6481SCindy Lu 
259013108b6SEugenio Pérez     if (vhost_vdpa_listener_skipped_section(section, v->iova_range.first,
260013108b6SEugenio Pérez                                             v->iova_range.last)) {
261108a6481SCindy Lu         return;
262108a6481SCindy Lu     }
263108a6481SCindy Lu 
264108a6481SCindy Lu     if (unlikely((section->offset_within_address_space & ~TARGET_PAGE_MASK) !=
265108a6481SCindy Lu                  (section->offset_within_region & ~TARGET_PAGE_MASK))) {
266108a6481SCindy Lu         error_report("%s received unaligned region", __func__);
267108a6481SCindy Lu         return;
268108a6481SCindy Lu     }
269108a6481SCindy Lu 
270108a6481SCindy Lu     iova = TARGET_PAGE_ALIGN(section->offset_within_address_space);
271032e4d68SEugenio Pérez     llend = vhost_vdpa_section_end(section);
272108a6481SCindy Lu 
273778e67deSLaurent Vivier     trace_vhost_vdpa_listener_region_del(v, iova, int128_get64(llend));
274778e67deSLaurent Vivier 
275108a6481SCindy Lu     if (int128_ge(int128_make64(iova), llend)) {
276108a6481SCindy Lu         return;
277108a6481SCindy Lu     }
278108a6481SCindy Lu 
279108a6481SCindy Lu     llsize = int128_sub(llend, int128_make64(iova));
280108a6481SCindy Lu 
28134e3c94eSEugenio Pérez     if (v->shadow_vqs_enabled) {
28234e3c94eSEugenio Pérez         const DMAMap *result;
28334e3c94eSEugenio Pérez         const void *vaddr = memory_region_get_ram_ptr(section->mr) +
28434e3c94eSEugenio Pérez             section->offset_within_region +
28534e3c94eSEugenio Pérez             (iova - section->offset_within_address_space);
28634e3c94eSEugenio Pérez         DMAMap mem_region = {
28734e3c94eSEugenio Pérez             .translated_addr = (hwaddr)(uintptr_t)vaddr,
28834e3c94eSEugenio Pérez             .size = int128_get64(llsize) - 1,
28934e3c94eSEugenio Pérez         };
29034e3c94eSEugenio Pérez 
29134e3c94eSEugenio Pérez         result = vhost_iova_tree_find_iova(v->iova_tree, &mem_region);
29234e3c94eSEugenio Pérez         iova = result->iova;
29334e3c94eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, &mem_region);
29434e3c94eSEugenio Pérez     }
295e6db5df7SEugenio Pérez     vhost_vdpa_iotlb_batch_begin_once(v);
296108a6481SCindy Lu     ret = vhost_vdpa_dma_unmap(v, iova, int128_get64(llsize));
297108a6481SCindy Lu     if (ret) {
298108a6481SCindy Lu         error_report("vhost_vdpa dma unmap error!");
299108a6481SCindy Lu     }
300108a6481SCindy Lu 
301108a6481SCindy Lu     memory_region_unref(section->mr);
302108a6481SCindy Lu }
303108a6481SCindy Lu /*
304ef4ff56cSStefano Garzarella  * IOTLB API is used by vhost-vdpa which requires incremental updating
305108a6481SCindy Lu  * of the mapping. So we can not use generic vhost memory listener which
306108a6481SCindy Lu  * depends on the addnop().
307108a6481SCindy Lu  */
308108a6481SCindy Lu static const MemoryListener vhost_vdpa_memory_listener = {
309142518bdSPeter Xu     .name = "vhost-vdpa",
310a5bd0580SJason Wang     .commit = vhost_vdpa_listener_commit,
311108a6481SCindy Lu     .region_add = vhost_vdpa_listener_region_add,
312108a6481SCindy Lu     .region_del = vhost_vdpa_listener_region_del,
313108a6481SCindy Lu };
314108a6481SCindy Lu 
315108a6481SCindy Lu static int vhost_vdpa_call(struct vhost_dev *dev, unsigned long int request,
316108a6481SCindy Lu                              void *arg)
317108a6481SCindy Lu {
318108a6481SCindy Lu     struct vhost_vdpa *v = dev->opaque;
319108a6481SCindy Lu     int fd = v->device_fd;
320f2a6e6c4SKevin Wolf     int ret;
321108a6481SCindy Lu 
322108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
323108a6481SCindy Lu 
324f2a6e6c4SKevin Wolf     ret = ioctl(fd, request, arg);
325f2a6e6c4SKevin Wolf     return ret < 0 ? -errno : ret;
326108a6481SCindy Lu }
327108a6481SCindy Lu 
3283631151bSRoman Kagan static int vhost_vdpa_add_status(struct vhost_dev *dev, uint8_t status)
329108a6481SCindy Lu {
330108a6481SCindy Lu     uint8_t s;
3313631151bSRoman Kagan     int ret;
332108a6481SCindy Lu 
333778e67deSLaurent Vivier     trace_vhost_vdpa_add_status(dev, status);
3343631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
3353631151bSRoman Kagan     if (ret < 0) {
3363631151bSRoman Kagan         return ret;
337108a6481SCindy Lu     }
338108a6481SCindy Lu 
339108a6481SCindy Lu     s |= status;
340108a6481SCindy Lu 
3413631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &s);
3423631151bSRoman Kagan     if (ret < 0) {
3433631151bSRoman Kagan         return ret;
3443631151bSRoman Kagan     }
3453631151bSRoman Kagan 
3463631151bSRoman Kagan     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_STATUS, &s);
3473631151bSRoman Kagan     if (ret < 0) {
3483631151bSRoman Kagan         return ret;
3493631151bSRoman Kagan     }
3503631151bSRoman Kagan 
3513631151bSRoman Kagan     if (!(s & status)) {
3523631151bSRoman Kagan         return -EIO;
3533631151bSRoman Kagan     }
3543631151bSRoman Kagan 
3553631151bSRoman Kagan     return 0;
356108a6481SCindy Lu }
357108a6481SCindy Lu 
358013108b6SEugenio Pérez static void vhost_vdpa_get_iova_range(struct vhost_vdpa *v)
359013108b6SEugenio Pérez {
360013108b6SEugenio Pérez     int ret = vhost_vdpa_call(v->dev, VHOST_VDPA_GET_IOVA_RANGE,
361013108b6SEugenio Pérez                               &v->iova_range);
362013108b6SEugenio Pérez     if (ret != 0) {
363013108b6SEugenio Pérez         v->iova_range.first = 0;
364013108b6SEugenio Pérez         v->iova_range.last = UINT64_MAX;
365013108b6SEugenio Pérez     }
366013108b6SEugenio Pérez 
367013108b6SEugenio Pérez     trace_vhost_vdpa_get_iova_range(v->dev, v->iova_range.first,
368013108b6SEugenio Pérez                                     v->iova_range.last);
369013108b6SEugenio Pérez }
370013108b6SEugenio Pérez 
371d71b0609SSi-Wei Liu /*
372d71b0609SSi-Wei Liu  * The use of this function is for requests that only need to be
373d71b0609SSi-Wei Liu  * applied once. Typically such request occurs at the beginning
374d71b0609SSi-Wei Liu  * of operation, and before setting up queues. It should not be
375d71b0609SSi-Wei Liu  * used for request that performs operation until all queues are
376d71b0609SSi-Wei Liu  * set, which would need to check dev->vq_index_end instead.
377d71b0609SSi-Wei Liu  */
378d71b0609SSi-Wei Liu static bool vhost_vdpa_first_dev(struct vhost_dev *dev)
3794d191cfdSJason Wang {
3804d191cfdSJason Wang     struct vhost_vdpa *v = dev->opaque;
3814d191cfdSJason Wang 
382d71b0609SSi-Wei Liu     return v->index == 0;
3834d191cfdSJason Wang }
3844d191cfdSJason Wang 
38512a195faSEugenio Pérez static int vhost_vdpa_get_dev_features(struct vhost_dev *dev,
38612a195faSEugenio Pérez                                        uint64_t *features)
38712a195faSEugenio Pérez {
38812a195faSEugenio Pérez     int ret;
38912a195faSEugenio Pérez 
39012a195faSEugenio Pérez     ret = vhost_vdpa_call(dev, VHOST_GET_FEATURES, features);
39112a195faSEugenio Pérez     trace_vhost_vdpa_get_features(dev, *features);
39212a195faSEugenio Pérez     return ret;
39312a195faSEugenio Pérez }
39412a195faSEugenio Pérez 
395dff4426fSEugenio Pérez static int vhost_vdpa_init_svq(struct vhost_dev *hdev, struct vhost_vdpa *v,
396dff4426fSEugenio Pérez                                Error **errp)
397dff4426fSEugenio Pérez {
398dff4426fSEugenio Pérez     g_autoptr(GPtrArray) shadow_vqs = NULL;
3994725a418SEugenio Pérez     uint64_t dev_features, svq_features;
4004725a418SEugenio Pérez     int r;
4014725a418SEugenio Pérez     bool ok;
402dff4426fSEugenio Pérez 
403dff4426fSEugenio Pérez     if (!v->shadow_vqs_enabled) {
404dff4426fSEugenio Pérez         return 0;
405dff4426fSEugenio Pérez     }
406dff4426fSEugenio Pérez 
40712a195faSEugenio Pérez     r = vhost_vdpa_get_dev_features(hdev, &dev_features);
4084725a418SEugenio Pérez     if (r != 0) {
4094725a418SEugenio Pérez         error_setg_errno(errp, -r, "Can't get vdpa device features");
4104725a418SEugenio Pérez         return r;
4114725a418SEugenio Pérez     }
4124725a418SEugenio Pérez 
4134725a418SEugenio Pérez     svq_features = dev_features;
4144725a418SEugenio Pérez     ok = vhost_svq_valid_features(svq_features, errp);
4154725a418SEugenio Pérez     if (unlikely(!ok)) {
4164725a418SEugenio Pérez         return -1;
4174725a418SEugenio Pérez     }
4184725a418SEugenio Pérez 
419dff4426fSEugenio Pérez     shadow_vqs = g_ptr_array_new_full(hdev->nvqs, vhost_svq_free);
420dff4426fSEugenio Pérez     for (unsigned n = 0; n < hdev->nvqs; ++n) {
421*e966c0b7SEugenio Pérez         g_autoptr(VhostShadowVirtqueue) svq;
422dff4426fSEugenio Pérez 
423*e966c0b7SEugenio Pérez         svq = vhost_svq_new(v->iova_tree, NULL, NULL);
424dff4426fSEugenio Pérez         if (unlikely(!svq)) {
425dff4426fSEugenio Pérez             error_setg(errp, "Cannot create svq %u", n);
426dff4426fSEugenio Pérez             return -1;
427dff4426fSEugenio Pérez         }
428dff4426fSEugenio Pérez         g_ptr_array_add(shadow_vqs, g_steal_pointer(&svq));
429dff4426fSEugenio Pérez     }
430dff4426fSEugenio Pérez 
431dff4426fSEugenio Pérez     v->shadow_vqs = g_steal_pointer(&shadow_vqs);
432dff4426fSEugenio Pérez     return 0;
433dff4426fSEugenio Pérez }
434dff4426fSEugenio Pérez 
43528770ff9SKevin Wolf static int vhost_vdpa_init(struct vhost_dev *dev, void *opaque, Error **errp)
436108a6481SCindy Lu {
437108a6481SCindy Lu     struct vhost_vdpa *v;
438108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
439778e67deSLaurent Vivier     trace_vhost_vdpa_init(dev, opaque);
440e1c1915bSDavid Hildenbrand     int ret;
441e1c1915bSDavid Hildenbrand 
442e1c1915bSDavid Hildenbrand     /*
443e1c1915bSDavid Hildenbrand      * Similar to VFIO, we end up pinning all guest memory and have to
444e1c1915bSDavid Hildenbrand      * disable discarding of RAM.
445e1c1915bSDavid Hildenbrand      */
446e1c1915bSDavid Hildenbrand     ret = ram_block_discard_disable(true);
447e1c1915bSDavid Hildenbrand     if (ret) {
448e1c1915bSDavid Hildenbrand         error_report("Cannot set discarding of RAM broken");
449e1c1915bSDavid Hildenbrand         return ret;
450e1c1915bSDavid Hildenbrand     }
451108a6481SCindy Lu 
452108a6481SCindy Lu     v = opaque;
453a5bd0580SJason Wang     v->dev = dev;
454108a6481SCindy Lu     dev->opaque =  opaque ;
455108a6481SCindy Lu     v->listener = vhost_vdpa_memory_listener;
456108a6481SCindy Lu     v->msg_type = VHOST_IOTLB_MSG_V2;
457dff4426fSEugenio Pérez     ret = vhost_vdpa_init_svq(dev, v, errp);
458dff4426fSEugenio Pérez     if (ret) {
459dff4426fSEugenio Pérez         goto err;
460dff4426fSEugenio Pérez     }
461108a6481SCindy Lu 
462013108b6SEugenio Pérez     vhost_vdpa_get_iova_range(v);
4634d191cfdSJason Wang 
464d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
4654d191cfdSJason Wang         return 0;
4664d191cfdSJason Wang     }
4674d191cfdSJason Wang 
468108a6481SCindy Lu     vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
469108a6481SCindy Lu                                VIRTIO_CONFIG_S_DRIVER);
470108a6481SCindy Lu 
471108a6481SCindy Lu     return 0;
472dff4426fSEugenio Pérez 
473dff4426fSEugenio Pérez err:
474dff4426fSEugenio Pérez     ram_block_discard_disable(false);
475dff4426fSEugenio Pérez     return ret;
476108a6481SCindy Lu }
477108a6481SCindy Lu 
478d0416d48SJason Wang static void vhost_vdpa_host_notifier_uninit(struct vhost_dev *dev,
479d0416d48SJason Wang                                             int queue_index)
480d0416d48SJason Wang {
4818e3b0cbbSMarc-André Lureau     size_t page_size = qemu_real_host_page_size();
482d0416d48SJason Wang     struct vhost_vdpa *v = dev->opaque;
483d0416d48SJason Wang     VirtIODevice *vdev = dev->vdev;
484d0416d48SJason Wang     VhostVDPAHostNotifier *n;
485d0416d48SJason Wang 
486d0416d48SJason Wang     n = &v->notifier[queue_index];
487d0416d48SJason Wang 
488d0416d48SJason Wang     if (n->addr) {
489d0416d48SJason Wang         virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, false);
490d0416d48SJason Wang         object_unparent(OBJECT(&n->mr));
491d0416d48SJason Wang         munmap(n->addr, page_size);
492d0416d48SJason Wang         n->addr = NULL;
493d0416d48SJason Wang     }
494d0416d48SJason Wang }
495d0416d48SJason Wang 
496d0416d48SJason Wang static int vhost_vdpa_host_notifier_init(struct vhost_dev *dev, int queue_index)
497d0416d48SJason Wang {
4988e3b0cbbSMarc-André Lureau     size_t page_size = qemu_real_host_page_size();
499d0416d48SJason Wang     struct vhost_vdpa *v = dev->opaque;
500d0416d48SJason Wang     VirtIODevice *vdev = dev->vdev;
501d0416d48SJason Wang     VhostVDPAHostNotifier *n;
502d0416d48SJason Wang     int fd = v->device_fd;
503d0416d48SJason Wang     void *addr;
504d0416d48SJason Wang     char *name;
505d0416d48SJason Wang 
506d0416d48SJason Wang     vhost_vdpa_host_notifier_uninit(dev, queue_index);
507d0416d48SJason Wang 
508d0416d48SJason Wang     n = &v->notifier[queue_index];
509d0416d48SJason Wang 
510d0416d48SJason Wang     addr = mmap(NULL, page_size, PROT_WRITE, MAP_SHARED, fd,
511d0416d48SJason Wang                 queue_index * page_size);
512d0416d48SJason Wang     if (addr == MAP_FAILED) {
513d0416d48SJason Wang         goto err;
514d0416d48SJason Wang     }
515d0416d48SJason Wang 
516d0416d48SJason Wang     name = g_strdup_printf("vhost-vdpa/host-notifier@%p mmaps[%d]",
517d0416d48SJason Wang                            v, queue_index);
518d0416d48SJason Wang     memory_region_init_ram_device_ptr(&n->mr, OBJECT(vdev), name,
519d0416d48SJason Wang                                       page_size, addr);
520d0416d48SJason Wang     g_free(name);
521d0416d48SJason Wang 
522d0416d48SJason Wang     if (virtio_queue_set_host_notifier_mr(vdev, queue_index, &n->mr, true)) {
52398f7607eSLaurent Vivier         object_unparent(OBJECT(&n->mr));
524d0416d48SJason Wang         munmap(addr, page_size);
525d0416d48SJason Wang         goto err;
526d0416d48SJason Wang     }
527d0416d48SJason Wang     n->addr = addr;
528d0416d48SJason Wang 
529d0416d48SJason Wang     return 0;
530d0416d48SJason Wang 
531d0416d48SJason Wang err:
532d0416d48SJason Wang     return -1;
533d0416d48SJason Wang }
534d0416d48SJason Wang 
535b1f030a0SLaurent Vivier static void vhost_vdpa_host_notifiers_uninit(struct vhost_dev *dev, int n)
536b1f030a0SLaurent Vivier {
537b1f030a0SLaurent Vivier     int i;
538b1f030a0SLaurent Vivier 
539b1f030a0SLaurent Vivier     for (i = dev->vq_index; i < dev->vq_index + n; i++) {
540b1f030a0SLaurent Vivier         vhost_vdpa_host_notifier_uninit(dev, i);
541b1f030a0SLaurent Vivier     }
542b1f030a0SLaurent Vivier }
543b1f030a0SLaurent Vivier 
544d0416d48SJason Wang static void vhost_vdpa_host_notifiers_init(struct vhost_dev *dev)
545d0416d48SJason Wang {
546dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
547d0416d48SJason Wang     int i;
548d0416d48SJason Wang 
549dff4426fSEugenio Pérez     if (v->shadow_vqs_enabled) {
550dff4426fSEugenio Pérez         /* FIXME SVQ is not compatible with host notifiers mr */
551dff4426fSEugenio Pérez         return;
552dff4426fSEugenio Pérez     }
553dff4426fSEugenio Pérez 
554d0416d48SJason Wang     for (i = dev->vq_index; i < dev->vq_index + dev->nvqs; i++) {
555d0416d48SJason Wang         if (vhost_vdpa_host_notifier_init(dev, i)) {
556d0416d48SJason Wang             goto err;
557d0416d48SJason Wang         }
558d0416d48SJason Wang     }
559d0416d48SJason Wang 
560d0416d48SJason Wang     return;
561d0416d48SJason Wang 
562d0416d48SJason Wang err:
563b1f030a0SLaurent Vivier     vhost_vdpa_host_notifiers_uninit(dev, i - dev->vq_index);
564d0416d48SJason Wang     return;
565d0416d48SJason Wang }
566d0416d48SJason Wang 
567dff4426fSEugenio Pérez static void vhost_vdpa_svq_cleanup(struct vhost_dev *dev)
568dff4426fSEugenio Pérez {
569dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
570dff4426fSEugenio Pérez     size_t idx;
571dff4426fSEugenio Pérez 
572dff4426fSEugenio Pérez     if (!v->shadow_vqs) {
573dff4426fSEugenio Pérez         return;
574dff4426fSEugenio Pérez     }
575dff4426fSEugenio Pérez 
576dff4426fSEugenio Pérez     for (idx = 0; idx < v->shadow_vqs->len; ++idx) {
577dff4426fSEugenio Pérez         vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, idx));
578dff4426fSEugenio Pérez     }
579dff4426fSEugenio Pérez     g_ptr_array_free(v->shadow_vqs, true);
580dff4426fSEugenio Pérez }
581dff4426fSEugenio Pérez 
582108a6481SCindy Lu static int vhost_vdpa_cleanup(struct vhost_dev *dev)
583108a6481SCindy Lu {
584108a6481SCindy Lu     struct vhost_vdpa *v;
585108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
586108a6481SCindy Lu     v = dev->opaque;
587778e67deSLaurent Vivier     trace_vhost_vdpa_cleanup(dev, v);
588d0416d48SJason Wang     vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
589108a6481SCindy Lu     memory_listener_unregister(&v->listener);
590dff4426fSEugenio Pérez     vhost_vdpa_svq_cleanup(dev);
591108a6481SCindy Lu 
592108a6481SCindy Lu     dev->opaque = NULL;
593e1c1915bSDavid Hildenbrand     ram_block_discard_disable(false);
594e1c1915bSDavid Hildenbrand 
595108a6481SCindy Lu     return 0;
596108a6481SCindy Lu }
597108a6481SCindy Lu 
598108a6481SCindy Lu static int vhost_vdpa_memslots_limit(struct vhost_dev *dev)
599108a6481SCindy Lu {
600778e67deSLaurent Vivier     trace_vhost_vdpa_memslots_limit(dev, INT_MAX);
601108a6481SCindy Lu     return INT_MAX;
602108a6481SCindy Lu }
603108a6481SCindy Lu 
604108a6481SCindy Lu static int vhost_vdpa_set_mem_table(struct vhost_dev *dev,
605108a6481SCindy Lu                                     struct vhost_memory *mem)
606108a6481SCindy Lu {
607d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
6084d191cfdSJason Wang         return 0;
6094d191cfdSJason Wang     }
6104d191cfdSJason Wang 
611778e67deSLaurent Vivier     trace_vhost_vdpa_set_mem_table(dev, mem->nregions, mem->padding);
612778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_MEM_TABLE) &&
613778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_REGIONS)) {
614778e67deSLaurent Vivier         int i;
615778e67deSLaurent Vivier         for (i = 0; i < mem->nregions; i++) {
616778e67deSLaurent Vivier             trace_vhost_vdpa_dump_regions(dev, i,
617778e67deSLaurent Vivier                                           mem->regions[i].guest_phys_addr,
618778e67deSLaurent Vivier                                           mem->regions[i].memory_size,
619778e67deSLaurent Vivier                                           mem->regions[i].userspace_addr,
620778e67deSLaurent Vivier                                           mem->regions[i].flags_padding);
621778e67deSLaurent Vivier         }
622778e67deSLaurent Vivier     }
623108a6481SCindy Lu     if (mem->padding) {
6243631151bSRoman Kagan         return -EINVAL;
625108a6481SCindy Lu     }
626108a6481SCindy Lu 
627108a6481SCindy Lu     return 0;
628108a6481SCindy Lu }
629108a6481SCindy Lu 
630108a6481SCindy Lu static int vhost_vdpa_set_features(struct vhost_dev *dev,
631108a6481SCindy Lu                                    uint64_t features)
632108a6481SCindy Lu {
63312a195faSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
634108a6481SCindy Lu     int ret;
6354d191cfdSJason Wang 
636d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
6374d191cfdSJason Wang         return 0;
6384d191cfdSJason Wang     }
6394d191cfdSJason Wang 
64012a195faSEugenio Pérez     if (v->shadow_vqs_enabled) {
64112a195faSEugenio Pérez         if ((v->acked_features ^ features) == BIT_ULL(VHOST_F_LOG_ALL)) {
64212a195faSEugenio Pérez             /*
64312a195faSEugenio Pérez              * QEMU is just trying to enable or disable logging. SVQ handles
64412a195faSEugenio Pérez              * this sepparately, so no need to forward this.
64512a195faSEugenio Pérez              */
64612a195faSEugenio Pérez             v->acked_features = features;
64712a195faSEugenio Pérez             return 0;
64812a195faSEugenio Pérez         }
64912a195faSEugenio Pérez 
65012a195faSEugenio Pérez         v->acked_features = features;
65112a195faSEugenio Pérez 
65212a195faSEugenio Pérez         /* We must not ack _F_LOG if SVQ is enabled */
65312a195faSEugenio Pérez         features &= ~BIT_ULL(VHOST_F_LOG_ALL);
65412a195faSEugenio Pérez     }
65512a195faSEugenio Pérez 
656778e67deSLaurent Vivier     trace_vhost_vdpa_set_features(dev, features);
657108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_SET_FEATURES, &features);
658108a6481SCindy Lu     if (ret) {
659108a6481SCindy Lu         return ret;
660108a6481SCindy Lu     }
661108a6481SCindy Lu 
6623631151bSRoman Kagan     return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_FEATURES_OK);
663108a6481SCindy Lu }
664108a6481SCindy Lu 
665a5bd0580SJason Wang static int vhost_vdpa_set_backend_cap(struct vhost_dev *dev)
666a5bd0580SJason Wang {
667a5bd0580SJason Wang     uint64_t features;
668a5bd0580SJason Wang     uint64_t f = 0x1ULL << VHOST_BACKEND_F_IOTLB_MSG_V2 |
669a5bd0580SJason Wang         0x1ULL << VHOST_BACKEND_F_IOTLB_BATCH;
670a5bd0580SJason Wang     int r;
671a5bd0580SJason Wang 
672a5bd0580SJason Wang     if (vhost_vdpa_call(dev, VHOST_GET_BACKEND_FEATURES, &features)) {
6732a83e97eSJason Wang         return -EFAULT;
674a5bd0580SJason Wang     }
675a5bd0580SJason Wang 
676a5bd0580SJason Wang     features &= f;
6774d191cfdSJason Wang 
678d71b0609SSi-Wei Liu     if (vhost_vdpa_first_dev(dev)) {
679a5bd0580SJason Wang         r = vhost_vdpa_call(dev, VHOST_SET_BACKEND_FEATURES, &features);
680a5bd0580SJason Wang         if (r) {
6812a83e97eSJason Wang             return -EFAULT;
682a5bd0580SJason Wang         }
6834d191cfdSJason Wang     }
684a5bd0580SJason Wang 
685a5bd0580SJason Wang     dev->backend_cap = features;
686a5bd0580SJason Wang 
687a5bd0580SJason Wang     return 0;
688a5bd0580SJason Wang }
689a5bd0580SJason Wang 
690c232b8f4SZenghui Yu static int vhost_vdpa_get_device_id(struct vhost_dev *dev,
691108a6481SCindy Lu                                     uint32_t *device_id)
692108a6481SCindy Lu {
693778e67deSLaurent Vivier     int ret;
694778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_DEVICE_ID, device_id);
695778e67deSLaurent Vivier     trace_vhost_vdpa_get_device_id(dev, *device_id);
696778e67deSLaurent Vivier     return ret;
697108a6481SCindy Lu }
698108a6481SCindy Lu 
699dff4426fSEugenio Pérez static void vhost_vdpa_reset_svq(struct vhost_vdpa *v)
700dff4426fSEugenio Pérez {
701dff4426fSEugenio Pérez     if (!v->shadow_vqs_enabled) {
702dff4426fSEugenio Pérez         return;
703dff4426fSEugenio Pérez     }
704dff4426fSEugenio Pérez 
705dff4426fSEugenio Pérez     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
706dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
707dff4426fSEugenio Pérez         vhost_svq_stop(svq);
708dff4426fSEugenio Pérez     }
709dff4426fSEugenio Pérez }
710dff4426fSEugenio Pérez 
711108a6481SCindy Lu static int vhost_vdpa_reset_device(struct vhost_dev *dev)
712108a6481SCindy Lu {
713dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
714778e67deSLaurent Vivier     int ret;
715108a6481SCindy Lu     uint8_t status = 0;
716108a6481SCindy Lu 
717dff4426fSEugenio Pérez     vhost_vdpa_reset_svq(v);
718dff4426fSEugenio Pérez 
719778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_STATUS, &status);
720778e67deSLaurent Vivier     trace_vhost_vdpa_reset_device(dev, status);
721778e67deSLaurent Vivier     return ret;
722108a6481SCindy Lu }
723108a6481SCindy Lu 
724108a6481SCindy Lu static int vhost_vdpa_get_vq_index(struct vhost_dev *dev, int idx)
725108a6481SCindy Lu {
726108a6481SCindy Lu     assert(idx >= dev->vq_index && idx < dev->vq_index + dev->nvqs);
727108a6481SCindy Lu 
728353244d8SJason Wang     trace_vhost_vdpa_get_vq_index(dev, idx, idx);
729353244d8SJason Wang     return idx;
730108a6481SCindy Lu }
731108a6481SCindy Lu 
732108a6481SCindy Lu static int vhost_vdpa_set_vring_ready(struct vhost_dev *dev)
733108a6481SCindy Lu {
734108a6481SCindy Lu     int i;
735778e67deSLaurent Vivier     trace_vhost_vdpa_set_vring_ready(dev);
736108a6481SCindy Lu     for (i = 0; i < dev->nvqs; ++i) {
737108a6481SCindy Lu         struct vhost_vring_state state = {
738108a6481SCindy Lu             .index = dev->vq_index + i,
739108a6481SCindy Lu             .num = 1,
740108a6481SCindy Lu         };
741108a6481SCindy Lu         vhost_vdpa_call(dev, VHOST_VDPA_SET_VRING_ENABLE, &state);
742108a6481SCindy Lu     }
743108a6481SCindy Lu     return 0;
744108a6481SCindy Lu }
745108a6481SCindy Lu 
746778e67deSLaurent Vivier static void vhost_vdpa_dump_config(struct vhost_dev *dev, const uint8_t *config,
747778e67deSLaurent Vivier                                    uint32_t config_len)
748778e67deSLaurent Vivier {
749778e67deSLaurent Vivier     int b, len;
750778e67deSLaurent Vivier     char line[QEMU_HEXDUMP_LINE_LEN];
751778e67deSLaurent Vivier 
752778e67deSLaurent Vivier     for (b = 0; b < config_len; b += 16) {
753778e67deSLaurent Vivier         len = config_len - b;
754778e67deSLaurent Vivier         qemu_hexdump_line(line, b, config, len, false);
755778e67deSLaurent Vivier         trace_vhost_vdpa_dump_config(dev, line);
756778e67deSLaurent Vivier     }
757778e67deSLaurent Vivier }
758778e67deSLaurent Vivier 
759108a6481SCindy Lu static int vhost_vdpa_set_config(struct vhost_dev *dev, const uint8_t *data,
760108a6481SCindy Lu                                    uint32_t offset, uint32_t size,
761108a6481SCindy Lu                                    uint32_t flags)
762108a6481SCindy Lu {
763108a6481SCindy Lu     struct vhost_vdpa_config *config;
764108a6481SCindy Lu     int ret;
765108a6481SCindy Lu     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
766986d4f78SLi Qiang 
767778e67deSLaurent Vivier     trace_vhost_vdpa_set_config(dev, offset, size, flags);
768108a6481SCindy Lu     config = g_malloc(size + config_size);
769108a6481SCindy Lu     config->off = offset;
770108a6481SCindy Lu     config->len = size;
771108a6481SCindy Lu     memcpy(config->buf, data, size);
772778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_SET_CONFIG) &&
773778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
774778e67deSLaurent Vivier         vhost_vdpa_dump_config(dev, data, size);
775778e67deSLaurent Vivier     }
776108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_VDPA_SET_CONFIG, config);
777108a6481SCindy Lu     g_free(config);
778108a6481SCindy Lu     return ret;
779108a6481SCindy Lu }
780108a6481SCindy Lu 
781108a6481SCindy Lu static int vhost_vdpa_get_config(struct vhost_dev *dev, uint8_t *config,
78250de5138SKevin Wolf                                    uint32_t config_len, Error **errp)
783108a6481SCindy Lu {
784108a6481SCindy Lu     struct vhost_vdpa_config *v_config;
785108a6481SCindy Lu     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
786108a6481SCindy Lu     int ret;
787108a6481SCindy Lu 
788778e67deSLaurent Vivier     trace_vhost_vdpa_get_config(dev, config, config_len);
789108a6481SCindy Lu     v_config = g_malloc(config_len + config_size);
790108a6481SCindy Lu     v_config->len = config_len;
791108a6481SCindy Lu     v_config->off = 0;
792108a6481SCindy Lu     ret = vhost_vdpa_call(dev, VHOST_VDPA_GET_CONFIG, v_config);
793108a6481SCindy Lu     memcpy(config, v_config->buf, config_len);
794108a6481SCindy Lu     g_free(v_config);
795778e67deSLaurent Vivier     if (trace_event_get_state_backends(TRACE_VHOST_VDPA_GET_CONFIG) &&
796778e67deSLaurent Vivier         trace_event_get_state_backends(TRACE_VHOST_VDPA_DUMP_CONFIG)) {
797778e67deSLaurent Vivier         vhost_vdpa_dump_config(dev, config, config_len);
798778e67deSLaurent Vivier     }
799108a6481SCindy Lu     return ret;
800108a6481SCindy Lu  }
801108a6481SCindy Lu 
802d96be4c8SEugenio Pérez static int vhost_vdpa_set_dev_vring_base(struct vhost_dev *dev,
803d96be4c8SEugenio Pérez                                          struct vhost_vring_state *ring)
804d96be4c8SEugenio Pérez {
805d96be4c8SEugenio Pérez     trace_vhost_vdpa_set_vring_base(dev, ring->index, ring->num);
806d96be4c8SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_BASE, ring);
807d96be4c8SEugenio Pérez }
808d96be4c8SEugenio Pérez 
809dff4426fSEugenio Pérez static int vhost_vdpa_set_vring_dev_kick(struct vhost_dev *dev,
810dff4426fSEugenio Pérez                                          struct vhost_vring_file *file)
811dff4426fSEugenio Pérez {
812dff4426fSEugenio Pérez     trace_vhost_vdpa_set_vring_kick(dev, file->index, file->fd);
813dff4426fSEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_KICK, file);
814dff4426fSEugenio Pérez }
815dff4426fSEugenio Pérez 
816a8ac8858SEugenio Pérez static int vhost_vdpa_set_vring_dev_call(struct vhost_dev *dev,
817a8ac8858SEugenio Pérez                                          struct vhost_vring_file *file)
818a8ac8858SEugenio Pérez {
819a8ac8858SEugenio Pérez     trace_vhost_vdpa_set_vring_call(dev, file->index, file->fd);
820a8ac8858SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_CALL, file);
821a8ac8858SEugenio Pérez }
822a8ac8858SEugenio Pérez 
823d96be4c8SEugenio Pérez static int vhost_vdpa_set_vring_dev_addr(struct vhost_dev *dev,
824d96be4c8SEugenio Pérez                                          struct vhost_vring_addr *addr)
825d96be4c8SEugenio Pérez {
826d96be4c8SEugenio Pérez     trace_vhost_vdpa_set_vring_addr(dev, addr->index, addr->flags,
827d96be4c8SEugenio Pérez                                 addr->desc_user_addr, addr->used_user_addr,
828d96be4c8SEugenio Pérez                                 addr->avail_user_addr,
829d96be4c8SEugenio Pérez                                 addr->log_guest_addr);
830d96be4c8SEugenio Pérez 
831d96be4c8SEugenio Pérez     return vhost_vdpa_call(dev, VHOST_SET_VRING_ADDR, addr);
832d96be4c8SEugenio Pérez 
833d96be4c8SEugenio Pérez }
834d96be4c8SEugenio Pérez 
835dff4426fSEugenio Pérez /**
836dff4426fSEugenio Pérez  * Set the shadow virtqueue descriptors to the device
837dff4426fSEugenio Pérez  *
838dff4426fSEugenio Pérez  * @dev: The vhost device model
839dff4426fSEugenio Pérez  * @svq: The shadow virtqueue
840dff4426fSEugenio Pérez  * @idx: The index of the virtqueue in the vhost device
841dff4426fSEugenio Pérez  * @errp: Error
842a8ac8858SEugenio Pérez  *
843a8ac8858SEugenio Pérez  * Note that this function does not rewind kick file descriptor if cannot set
844a8ac8858SEugenio Pérez  * call one.
845dff4426fSEugenio Pérez  */
846100890f7SEugenio Pérez static int vhost_vdpa_svq_set_fds(struct vhost_dev *dev,
847dff4426fSEugenio Pérez                                   VhostShadowVirtqueue *svq, unsigned idx,
848dff4426fSEugenio Pérez                                   Error **errp)
849dff4426fSEugenio Pérez {
850dff4426fSEugenio Pérez     struct vhost_vring_file file = {
851dff4426fSEugenio Pérez         .index = dev->vq_index + idx,
852dff4426fSEugenio Pérez     };
853dff4426fSEugenio Pérez     const EventNotifier *event_notifier = &svq->hdev_kick;
854dff4426fSEugenio Pérez     int r;
855dff4426fSEugenio Pérez 
856dff4426fSEugenio Pérez     file.fd = event_notifier_get_fd(event_notifier);
857dff4426fSEugenio Pérez     r = vhost_vdpa_set_vring_dev_kick(dev, &file);
858dff4426fSEugenio Pérez     if (unlikely(r != 0)) {
859dff4426fSEugenio Pérez         error_setg_errno(errp, -r, "Can't set device kick fd");
860100890f7SEugenio Pérez         return r;
861a8ac8858SEugenio Pérez     }
862a8ac8858SEugenio Pérez 
863a8ac8858SEugenio Pérez     event_notifier = &svq->hdev_call;
864a8ac8858SEugenio Pérez     file.fd = event_notifier_get_fd(event_notifier);
865a8ac8858SEugenio Pérez     r = vhost_vdpa_set_vring_dev_call(dev, &file);
866a8ac8858SEugenio Pérez     if (unlikely(r != 0)) {
867a8ac8858SEugenio Pérez         error_setg_errno(errp, -r, "Can't set device call fd");
868dff4426fSEugenio Pérez     }
869dff4426fSEugenio Pérez 
870100890f7SEugenio Pérez     return r;
871100890f7SEugenio Pérez }
872100890f7SEugenio Pérez 
873100890f7SEugenio Pérez /**
874100890f7SEugenio Pérez  * Unmap a SVQ area in the device
875100890f7SEugenio Pérez  */
87634e3c94eSEugenio Pérez static bool vhost_vdpa_svq_unmap_ring(struct vhost_vdpa *v,
87734e3c94eSEugenio Pérez                                       const DMAMap *needle)
878100890f7SEugenio Pérez {
87934e3c94eSEugenio Pérez     const DMAMap *result = vhost_iova_tree_find_iova(v->iova_tree, needle);
88034e3c94eSEugenio Pérez     hwaddr size;
881100890f7SEugenio Pérez     int r;
882100890f7SEugenio Pérez 
88334e3c94eSEugenio Pérez     if (unlikely(!result)) {
88434e3c94eSEugenio Pérez         error_report("Unable to find SVQ address to unmap");
88534e3c94eSEugenio Pérez         return false;
88634e3c94eSEugenio Pérez     }
88734e3c94eSEugenio Pérez 
8888e3b0cbbSMarc-André Lureau     size = ROUND_UP(result->size, qemu_real_host_page_size());
88934e3c94eSEugenio Pérez     r = vhost_vdpa_dma_unmap(v, result->iova, size);
890100890f7SEugenio Pérez     return r == 0;
891100890f7SEugenio Pérez }
892100890f7SEugenio Pérez 
893100890f7SEugenio Pérez static bool vhost_vdpa_svq_unmap_rings(struct vhost_dev *dev,
894100890f7SEugenio Pérez                                        const VhostShadowVirtqueue *svq)
895100890f7SEugenio Pérez {
89634e3c94eSEugenio Pérez     DMAMap needle = {};
897100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
898100890f7SEugenio Pérez     struct vhost_vring_addr svq_addr;
899100890f7SEugenio Pérez     bool ok;
900100890f7SEugenio Pérez 
901100890f7SEugenio Pérez     vhost_svq_get_vring_addr(svq, &svq_addr);
902100890f7SEugenio Pérez 
90334e3c94eSEugenio Pérez     needle.translated_addr = svq_addr.desc_user_addr;
90434e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_unmap_ring(v, &needle);
905100890f7SEugenio Pérez     if (unlikely(!ok)) {
906100890f7SEugenio Pérez         return false;
907100890f7SEugenio Pérez     }
908100890f7SEugenio Pérez 
90934e3c94eSEugenio Pérez     needle.translated_addr = svq_addr.used_user_addr;
91034e3c94eSEugenio Pérez     return vhost_vdpa_svq_unmap_ring(v, &needle);
91134e3c94eSEugenio Pérez }
91234e3c94eSEugenio Pérez 
91334e3c94eSEugenio Pérez /**
91434e3c94eSEugenio Pérez  * Map the SVQ area in the device
91534e3c94eSEugenio Pérez  *
91634e3c94eSEugenio Pérez  * @v: Vhost-vdpa device
91734e3c94eSEugenio Pérez  * @needle: The area to search iova
91834e3c94eSEugenio Pérez  * @errorp: Error pointer
91934e3c94eSEugenio Pérez  */
92034e3c94eSEugenio Pérez static bool vhost_vdpa_svq_map_ring(struct vhost_vdpa *v, DMAMap *needle,
92134e3c94eSEugenio Pérez                                     Error **errp)
92234e3c94eSEugenio Pérez {
92334e3c94eSEugenio Pérez     int r;
92434e3c94eSEugenio Pérez 
92534e3c94eSEugenio Pérez     r = vhost_iova_tree_map_alloc(v->iova_tree, needle);
92634e3c94eSEugenio Pérez     if (unlikely(r != IOVA_OK)) {
92734e3c94eSEugenio Pérez         error_setg(errp, "Cannot allocate iova (%d)", r);
92834e3c94eSEugenio Pérez         return false;
92934e3c94eSEugenio Pérez     }
93034e3c94eSEugenio Pérez 
93134e3c94eSEugenio Pérez     r = vhost_vdpa_dma_map(v, needle->iova, needle->size + 1,
93234e3c94eSEugenio Pérez                            (void *)(uintptr_t)needle->translated_addr,
93334e3c94eSEugenio Pérez                            needle->perm == IOMMU_RO);
93434e3c94eSEugenio Pérez     if (unlikely(r != 0)) {
93534e3c94eSEugenio Pérez         error_setg_errno(errp, -r, "Cannot map region to device");
93634e3c94eSEugenio Pérez         vhost_iova_tree_remove(v->iova_tree, needle);
93734e3c94eSEugenio Pérez     }
93834e3c94eSEugenio Pérez 
93934e3c94eSEugenio Pérez     return r == 0;
940100890f7SEugenio Pérez }
941100890f7SEugenio Pérez 
942100890f7SEugenio Pérez /**
943100890f7SEugenio Pérez  * Map the shadow virtqueue rings in the device
944100890f7SEugenio Pérez  *
945100890f7SEugenio Pérez  * @dev: The vhost device
946100890f7SEugenio Pérez  * @svq: The shadow virtqueue
947100890f7SEugenio Pérez  * @addr: Assigned IOVA addresses
948100890f7SEugenio Pérez  * @errp: Error pointer
949100890f7SEugenio Pérez  */
950100890f7SEugenio Pérez static bool vhost_vdpa_svq_map_rings(struct vhost_dev *dev,
951100890f7SEugenio Pérez                                      const VhostShadowVirtqueue *svq,
952100890f7SEugenio Pérez                                      struct vhost_vring_addr *addr,
953100890f7SEugenio Pérez                                      Error **errp)
954100890f7SEugenio Pérez {
95534e3c94eSEugenio Pérez     DMAMap device_region, driver_region;
95634e3c94eSEugenio Pérez     struct vhost_vring_addr svq_addr;
957100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
958100890f7SEugenio Pérez     size_t device_size = vhost_svq_device_area_size(svq);
959100890f7SEugenio Pérez     size_t driver_size = vhost_svq_driver_area_size(svq);
96034e3c94eSEugenio Pérez     size_t avail_offset;
96134e3c94eSEugenio Pérez     bool ok;
962100890f7SEugenio Pérez 
963100890f7SEugenio Pérez     ERRP_GUARD();
96434e3c94eSEugenio Pérez     vhost_svq_get_vring_addr(svq, &svq_addr);
965100890f7SEugenio Pérez 
96634e3c94eSEugenio Pérez     driver_region = (DMAMap) {
96734e3c94eSEugenio Pérez         .translated_addr = svq_addr.desc_user_addr,
96834e3c94eSEugenio Pérez         .size = driver_size - 1,
96934e3c94eSEugenio Pérez         .perm = IOMMU_RO,
97034e3c94eSEugenio Pérez     };
97134e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_map_ring(v, &driver_region, errp);
97234e3c94eSEugenio Pérez     if (unlikely(!ok)) {
97334e3c94eSEugenio Pérez         error_prepend(errp, "Cannot create vq driver region: ");
974100890f7SEugenio Pérez         return false;
975100890f7SEugenio Pérez     }
97634e3c94eSEugenio Pérez     addr->desc_user_addr = driver_region.iova;
97734e3c94eSEugenio Pérez     avail_offset = svq_addr.avail_user_addr - svq_addr.desc_user_addr;
97834e3c94eSEugenio Pérez     addr->avail_user_addr = driver_region.iova + avail_offset;
979100890f7SEugenio Pérez 
98034e3c94eSEugenio Pérez     device_region = (DMAMap) {
98134e3c94eSEugenio Pérez         .translated_addr = svq_addr.used_user_addr,
98234e3c94eSEugenio Pérez         .size = device_size - 1,
98334e3c94eSEugenio Pérez         .perm = IOMMU_RW,
98434e3c94eSEugenio Pérez     };
98534e3c94eSEugenio Pérez     ok = vhost_vdpa_svq_map_ring(v, &device_region, errp);
98634e3c94eSEugenio Pérez     if (unlikely(!ok)) {
98734e3c94eSEugenio Pérez         error_prepend(errp, "Cannot create vq device region: ");
98834e3c94eSEugenio Pérez         vhost_vdpa_svq_unmap_ring(v, &driver_region);
989100890f7SEugenio Pérez     }
99034e3c94eSEugenio Pérez     addr->used_user_addr = device_region.iova;
991100890f7SEugenio Pérez 
99234e3c94eSEugenio Pérez     return ok;
993100890f7SEugenio Pérez }
994100890f7SEugenio Pérez 
995100890f7SEugenio Pérez static bool vhost_vdpa_svq_setup(struct vhost_dev *dev,
996100890f7SEugenio Pérez                                  VhostShadowVirtqueue *svq, unsigned idx,
997100890f7SEugenio Pérez                                  Error **errp)
998100890f7SEugenio Pérez {
999100890f7SEugenio Pérez     uint16_t vq_index = dev->vq_index + idx;
1000100890f7SEugenio Pérez     struct vhost_vring_state s = {
1001100890f7SEugenio Pérez         .index = vq_index,
1002100890f7SEugenio Pérez     };
1003100890f7SEugenio Pérez     int r;
1004100890f7SEugenio Pérez 
1005100890f7SEugenio Pérez     r = vhost_vdpa_set_dev_vring_base(dev, &s);
1006100890f7SEugenio Pérez     if (unlikely(r)) {
1007100890f7SEugenio Pérez         error_setg_errno(errp, -r, "Cannot set vring base");
1008100890f7SEugenio Pérez         return false;
1009100890f7SEugenio Pérez     }
1010100890f7SEugenio Pérez 
1011100890f7SEugenio Pérez     r = vhost_vdpa_svq_set_fds(dev, svq, idx, errp);
1012dff4426fSEugenio Pérez     return r == 0;
1013dff4426fSEugenio Pérez }
1014dff4426fSEugenio Pérez 
1015dff4426fSEugenio Pérez static bool vhost_vdpa_svqs_start(struct vhost_dev *dev)
1016dff4426fSEugenio Pérez {
1017dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1018dff4426fSEugenio Pérez     Error *err = NULL;
1019dff4426fSEugenio Pérez     unsigned i;
1020dff4426fSEugenio Pérez 
1021dff4426fSEugenio Pérez     if (!v->shadow_vqs) {
1022dff4426fSEugenio Pérez         return true;
1023dff4426fSEugenio Pérez     }
1024dff4426fSEugenio Pérez 
1025dff4426fSEugenio Pérez     for (i = 0; i < v->shadow_vqs->len; ++i) {
1026100890f7SEugenio Pérez         VirtQueue *vq = virtio_get_queue(dev->vdev, dev->vq_index + i);
1027dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1028100890f7SEugenio Pérez         struct vhost_vring_addr addr = {
10291c82fdfeSEugenio Pérez             .index = dev->vq_index + i,
1030100890f7SEugenio Pérez         };
1031100890f7SEugenio Pérez         int r;
1032dff4426fSEugenio Pérez         bool ok = vhost_vdpa_svq_setup(dev, svq, i, &err);
1033dff4426fSEugenio Pérez         if (unlikely(!ok)) {
1034100890f7SEugenio Pérez             goto err;
1035100890f7SEugenio Pérez         }
1036100890f7SEugenio Pérez 
1037100890f7SEugenio Pérez         vhost_svq_start(svq, dev->vdev, vq);
1038100890f7SEugenio Pérez         ok = vhost_vdpa_svq_map_rings(dev, svq, &addr, &err);
1039100890f7SEugenio Pérez         if (unlikely(!ok)) {
1040100890f7SEugenio Pérez             goto err_map;
1041100890f7SEugenio Pérez         }
1042100890f7SEugenio Pérez 
1043100890f7SEugenio Pérez         /* Override vring GPA set by vhost subsystem */
1044100890f7SEugenio Pérez         r = vhost_vdpa_set_vring_dev_addr(dev, &addr);
1045100890f7SEugenio Pérez         if (unlikely(r != 0)) {
1046100890f7SEugenio Pérez             error_setg_errno(&err, -r, "Cannot set device address");
1047100890f7SEugenio Pérez             goto err_set_addr;
1048100890f7SEugenio Pérez         }
1049100890f7SEugenio Pérez     }
1050100890f7SEugenio Pérez 
1051100890f7SEugenio Pérez     return true;
1052100890f7SEugenio Pérez 
1053100890f7SEugenio Pérez err_set_addr:
1054100890f7SEugenio Pérez     vhost_vdpa_svq_unmap_rings(dev, g_ptr_array_index(v->shadow_vqs, i));
1055100890f7SEugenio Pérez 
1056100890f7SEugenio Pérez err_map:
1057100890f7SEugenio Pérez     vhost_svq_stop(g_ptr_array_index(v->shadow_vqs, i));
1058100890f7SEugenio Pérez 
1059100890f7SEugenio Pérez err:
1060dff4426fSEugenio Pérez     error_reportf_err(err, "Cannot setup SVQ %u: ", i);
1061100890f7SEugenio Pérez     for (unsigned j = 0; j < i; ++j) {
1062100890f7SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, j);
1063100890f7SEugenio Pérez         vhost_vdpa_svq_unmap_rings(dev, svq);
1064100890f7SEugenio Pérez         vhost_svq_stop(svq);
1065100890f7SEugenio Pérez     }
1066100890f7SEugenio Pérez 
1067100890f7SEugenio Pérez     return false;
1068100890f7SEugenio Pérez }
1069100890f7SEugenio Pérez 
1070100890f7SEugenio Pérez static bool vhost_vdpa_svqs_stop(struct vhost_dev *dev)
1071100890f7SEugenio Pérez {
1072100890f7SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1073100890f7SEugenio Pérez 
1074100890f7SEugenio Pérez     if (!v->shadow_vqs) {
1075100890f7SEugenio Pérez         return true;
1076100890f7SEugenio Pérez     }
1077100890f7SEugenio Pérez 
1078100890f7SEugenio Pérez     for (unsigned i = 0; i < v->shadow_vqs->len; ++i) {
1079100890f7SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, i);
1080100890f7SEugenio Pérez         bool ok = vhost_vdpa_svq_unmap_rings(dev, svq);
1081100890f7SEugenio Pérez         if (unlikely(!ok)) {
1082dff4426fSEugenio Pérez             return false;
1083dff4426fSEugenio Pérez         }
1084dff4426fSEugenio Pérez     }
1085dff4426fSEugenio Pérez 
1086dff4426fSEugenio Pérez     return true;
1087dff4426fSEugenio Pérez }
1088dff4426fSEugenio Pérez 
1089108a6481SCindy Lu static int vhost_vdpa_dev_start(struct vhost_dev *dev, bool started)
1090108a6481SCindy Lu {
1091108a6481SCindy Lu     struct vhost_vdpa *v = dev->opaque;
1092dff4426fSEugenio Pérez     bool ok;
1093778e67deSLaurent Vivier     trace_vhost_vdpa_dev_start(dev, started);
10944d191cfdSJason Wang 
10954d191cfdSJason Wang     if (started) {
10964d191cfdSJason Wang         vhost_vdpa_host_notifiers_init(dev);
1097dff4426fSEugenio Pérez         ok = vhost_vdpa_svqs_start(dev);
1098dff4426fSEugenio Pérez         if (unlikely(!ok)) {
1099dff4426fSEugenio Pérez             return -1;
1100dff4426fSEugenio Pérez         }
11014d191cfdSJason Wang         vhost_vdpa_set_vring_ready(dev);
11024d191cfdSJason Wang     } else {
1103100890f7SEugenio Pérez         ok = vhost_vdpa_svqs_stop(dev);
1104100890f7SEugenio Pérez         if (unlikely(!ok)) {
1105100890f7SEugenio Pérez             return -1;
1106100890f7SEugenio Pérez         }
11074d191cfdSJason Wang         vhost_vdpa_host_notifiers_uninit(dev, dev->nvqs);
11084d191cfdSJason Wang     }
11094d191cfdSJason Wang 
1110245cf2c2SEugenio Pérez     if (dev->vq_index + dev->nvqs != dev->vq_index_end) {
11114d191cfdSJason Wang         return 0;
11124d191cfdSJason Wang     }
11134d191cfdSJason Wang 
1114108a6481SCindy Lu     if (started) {
1115108a6481SCindy Lu         memory_listener_register(&v->listener, &address_space_memory);
11163631151bSRoman Kagan         return vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_DRIVER_OK);
1117108a6481SCindy Lu     } else {
1118108a6481SCindy Lu         vhost_vdpa_reset_device(dev);
1119108a6481SCindy Lu         vhost_vdpa_add_status(dev, VIRTIO_CONFIG_S_ACKNOWLEDGE |
1120108a6481SCindy Lu                                    VIRTIO_CONFIG_S_DRIVER);
1121108a6481SCindy Lu         memory_listener_unregister(&v->listener);
1122108a6481SCindy Lu 
1123108a6481SCindy Lu         return 0;
1124108a6481SCindy Lu     }
1125108a6481SCindy Lu }
1126108a6481SCindy Lu 
1127108a6481SCindy Lu static int vhost_vdpa_set_log_base(struct vhost_dev *dev, uint64_t base,
1128108a6481SCindy Lu                                      struct vhost_log *log)
1129108a6481SCindy Lu {
1130773ebc95SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1131d71b0609SSi-Wei Liu     if (v->shadow_vqs_enabled || !vhost_vdpa_first_dev(dev)) {
11324d191cfdSJason Wang         return 0;
11334d191cfdSJason Wang     }
11344d191cfdSJason Wang 
1135778e67deSLaurent Vivier     trace_vhost_vdpa_set_log_base(dev, base, log->size, log->refcnt, log->fd,
1136778e67deSLaurent Vivier                                   log->log);
1137108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_LOG_BASE, &base);
1138108a6481SCindy Lu }
1139108a6481SCindy Lu 
1140108a6481SCindy Lu static int vhost_vdpa_set_vring_addr(struct vhost_dev *dev,
1141108a6481SCindy Lu                                        struct vhost_vring_addr *addr)
1142108a6481SCindy Lu {
1143d96be4c8SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1144d96be4c8SEugenio Pérez 
1145d96be4c8SEugenio Pérez     if (v->shadow_vqs_enabled) {
1146d96be4c8SEugenio Pérez         /*
1147d96be4c8SEugenio Pérez          * Device vring addr was set at device start. SVQ base is handled by
1148d96be4c8SEugenio Pérez          * VirtQueue code.
1149d96be4c8SEugenio Pérez          */
1150d96be4c8SEugenio Pérez         return 0;
1151d96be4c8SEugenio Pérez     }
1152d96be4c8SEugenio Pérez 
1153d96be4c8SEugenio Pérez     return vhost_vdpa_set_vring_dev_addr(dev, addr);
1154108a6481SCindy Lu }
1155108a6481SCindy Lu 
1156108a6481SCindy Lu static int vhost_vdpa_set_vring_num(struct vhost_dev *dev,
1157108a6481SCindy Lu                                       struct vhost_vring_state *ring)
1158108a6481SCindy Lu {
1159778e67deSLaurent Vivier     trace_vhost_vdpa_set_vring_num(dev, ring->index, ring->num);
1160108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_VRING_NUM, ring);
1161108a6481SCindy Lu }
1162108a6481SCindy Lu 
1163108a6481SCindy Lu static int vhost_vdpa_set_vring_base(struct vhost_dev *dev,
1164108a6481SCindy Lu                                        struct vhost_vring_state *ring)
1165108a6481SCindy Lu {
1166d96be4c8SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1167d96be4c8SEugenio Pérez 
1168d96be4c8SEugenio Pérez     if (v->shadow_vqs_enabled) {
1169d96be4c8SEugenio Pérez         /*
1170d96be4c8SEugenio Pérez          * Device vring base was set at device start. SVQ base is handled by
1171d96be4c8SEugenio Pérez          * VirtQueue code.
1172d96be4c8SEugenio Pérez          */
1173d96be4c8SEugenio Pérez         return 0;
1174d96be4c8SEugenio Pérez     }
1175d96be4c8SEugenio Pérez 
1176d96be4c8SEugenio Pérez     return vhost_vdpa_set_dev_vring_base(dev, ring);
1177108a6481SCindy Lu }
1178108a6481SCindy Lu 
1179108a6481SCindy Lu static int vhost_vdpa_get_vring_base(struct vhost_dev *dev,
1180108a6481SCindy Lu                                        struct vhost_vring_state *ring)
1181108a6481SCindy Lu {
11826d0b2226SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
118363903647SEugenio Pérez     int vdpa_idx = ring->index - dev->vq_index;
1184778e67deSLaurent Vivier     int ret;
1185778e67deSLaurent Vivier 
11866d0b2226SEugenio Pérez     if (v->shadow_vqs_enabled) {
118763903647SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
11886d0b2226SEugenio Pérez 
11896d0b2226SEugenio Pérez         /*
11906d0b2226SEugenio Pérez          * Setting base as last used idx, so destination will see as available
11916d0b2226SEugenio Pérez          * all the entries that the device did not use, including the in-flight
11926d0b2226SEugenio Pérez          * processing ones.
11936d0b2226SEugenio Pérez          *
11946d0b2226SEugenio Pérez          * TODO: This is ok for networking, but other kinds of devices might
11956d0b2226SEugenio Pérez          * have problems with these retransmissions.
11966d0b2226SEugenio Pérez          */
11976d0b2226SEugenio Pérez         ring->num = svq->last_used_idx;
11986d0b2226SEugenio Pérez         return 0;
11996d0b2226SEugenio Pérez     }
12006d0b2226SEugenio Pérez 
1201778e67deSLaurent Vivier     ret = vhost_vdpa_call(dev, VHOST_GET_VRING_BASE, ring);
1202778e67deSLaurent Vivier     trace_vhost_vdpa_get_vring_base(dev, ring->index, ring->num);
1203778e67deSLaurent Vivier     return ret;
1204108a6481SCindy Lu }
1205108a6481SCindy Lu 
1206108a6481SCindy Lu static int vhost_vdpa_set_vring_kick(struct vhost_dev *dev,
1207108a6481SCindy Lu                                        struct vhost_vring_file *file)
1208108a6481SCindy Lu {
1209dff4426fSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1210dff4426fSEugenio Pérez     int vdpa_idx = file->index - dev->vq_index;
1211dff4426fSEugenio Pérez 
1212dff4426fSEugenio Pérez     if (v->shadow_vqs_enabled) {
1213dff4426fSEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1214dff4426fSEugenio Pérez         vhost_svq_set_svq_kick_fd(svq, file->fd);
1215dff4426fSEugenio Pérez         return 0;
1216dff4426fSEugenio Pérez     } else {
1217dff4426fSEugenio Pérez         return vhost_vdpa_set_vring_dev_kick(dev, file);
1218dff4426fSEugenio Pérez     }
1219108a6481SCindy Lu }
1220108a6481SCindy Lu 
1221108a6481SCindy Lu static int vhost_vdpa_set_vring_call(struct vhost_dev *dev,
1222108a6481SCindy Lu                                        struct vhost_vring_file *file)
1223108a6481SCindy Lu {
1224a8ac8858SEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
1225a8ac8858SEugenio Pérez 
1226a8ac8858SEugenio Pérez     if (v->shadow_vqs_enabled) {
1227a8ac8858SEugenio Pérez         int vdpa_idx = file->index - dev->vq_index;
1228a8ac8858SEugenio Pérez         VhostShadowVirtqueue *svq = g_ptr_array_index(v->shadow_vqs, vdpa_idx);
1229a8ac8858SEugenio Pérez 
1230a8ac8858SEugenio Pérez         vhost_svq_set_svq_call_fd(svq, file->fd);
1231a8ac8858SEugenio Pérez         return 0;
1232a8ac8858SEugenio Pérez     } else {
1233a8ac8858SEugenio Pérez         return vhost_vdpa_set_vring_dev_call(dev, file);
1234a8ac8858SEugenio Pérez     }
1235108a6481SCindy Lu }
1236108a6481SCindy Lu 
1237108a6481SCindy Lu static int vhost_vdpa_get_features(struct vhost_dev *dev,
1238108a6481SCindy Lu                                      uint64_t *features)
1239108a6481SCindy Lu {
124012a195faSEugenio Pérez     struct vhost_vdpa *v = dev->opaque;
124112a195faSEugenio Pérez     int ret = vhost_vdpa_get_dev_features(dev, features);
1242778e67deSLaurent Vivier 
124312a195faSEugenio Pérez     if (ret == 0 && v->shadow_vqs_enabled) {
124412a195faSEugenio Pérez         /* Add SVQ logging capabilities */
124512a195faSEugenio Pérez         *features |= BIT_ULL(VHOST_F_LOG_ALL);
124612a195faSEugenio Pérez     }
124712a195faSEugenio Pérez 
1248778e67deSLaurent Vivier     return ret;
1249108a6481SCindy Lu }
1250108a6481SCindy Lu 
1251108a6481SCindy Lu static int vhost_vdpa_set_owner(struct vhost_dev *dev)
1252108a6481SCindy Lu {
1253d71b0609SSi-Wei Liu     if (!vhost_vdpa_first_dev(dev)) {
12544d191cfdSJason Wang         return 0;
12554d191cfdSJason Wang     }
12564d191cfdSJason Wang 
1257778e67deSLaurent Vivier     trace_vhost_vdpa_set_owner(dev);
1258108a6481SCindy Lu     return vhost_vdpa_call(dev, VHOST_SET_OWNER, NULL);
1259108a6481SCindy Lu }
1260108a6481SCindy Lu 
1261108a6481SCindy Lu static int vhost_vdpa_vq_get_addr(struct vhost_dev *dev,
1262108a6481SCindy Lu                     struct vhost_vring_addr *addr, struct vhost_virtqueue *vq)
1263108a6481SCindy Lu {
1264108a6481SCindy Lu     assert(dev->vhost_ops->backend_type == VHOST_BACKEND_TYPE_VDPA);
1265108a6481SCindy Lu     addr->desc_user_addr = (uint64_t)(unsigned long)vq->desc_phys;
1266108a6481SCindy Lu     addr->avail_user_addr = (uint64_t)(unsigned long)vq->avail_phys;
1267108a6481SCindy Lu     addr->used_user_addr = (uint64_t)(unsigned long)vq->used_phys;
1268778e67deSLaurent Vivier     trace_vhost_vdpa_vq_get_addr(dev, vq, addr->desc_user_addr,
1269778e67deSLaurent Vivier                                  addr->avail_user_addr, addr->used_user_addr);
1270108a6481SCindy Lu     return 0;
1271108a6481SCindy Lu }
1272108a6481SCindy Lu 
1273108a6481SCindy Lu static bool  vhost_vdpa_force_iommu(struct vhost_dev *dev)
1274108a6481SCindy Lu {
1275108a6481SCindy Lu     return true;
1276108a6481SCindy Lu }
1277108a6481SCindy Lu 
1278108a6481SCindy Lu const VhostOps vdpa_ops = {
1279108a6481SCindy Lu         .backend_type = VHOST_BACKEND_TYPE_VDPA,
1280108a6481SCindy Lu         .vhost_backend_init = vhost_vdpa_init,
1281108a6481SCindy Lu         .vhost_backend_cleanup = vhost_vdpa_cleanup,
1282108a6481SCindy Lu         .vhost_set_log_base = vhost_vdpa_set_log_base,
1283108a6481SCindy Lu         .vhost_set_vring_addr = vhost_vdpa_set_vring_addr,
1284108a6481SCindy Lu         .vhost_set_vring_num = vhost_vdpa_set_vring_num,
1285108a6481SCindy Lu         .vhost_set_vring_base = vhost_vdpa_set_vring_base,
1286108a6481SCindy Lu         .vhost_get_vring_base = vhost_vdpa_get_vring_base,
1287108a6481SCindy Lu         .vhost_set_vring_kick = vhost_vdpa_set_vring_kick,
1288108a6481SCindy Lu         .vhost_set_vring_call = vhost_vdpa_set_vring_call,
1289108a6481SCindy Lu         .vhost_get_features = vhost_vdpa_get_features,
1290a5bd0580SJason Wang         .vhost_set_backend_cap = vhost_vdpa_set_backend_cap,
1291108a6481SCindy Lu         .vhost_set_owner = vhost_vdpa_set_owner,
1292108a6481SCindy Lu         .vhost_set_vring_endian = NULL,
1293108a6481SCindy Lu         .vhost_backend_memslots_limit = vhost_vdpa_memslots_limit,
1294108a6481SCindy Lu         .vhost_set_mem_table = vhost_vdpa_set_mem_table,
1295108a6481SCindy Lu         .vhost_set_features = vhost_vdpa_set_features,
1296108a6481SCindy Lu         .vhost_reset_device = vhost_vdpa_reset_device,
1297108a6481SCindy Lu         .vhost_get_vq_index = vhost_vdpa_get_vq_index,
1298108a6481SCindy Lu         .vhost_get_config  = vhost_vdpa_get_config,
1299108a6481SCindy Lu         .vhost_set_config = vhost_vdpa_set_config,
1300108a6481SCindy Lu         .vhost_requires_shm_log = NULL,
1301108a6481SCindy Lu         .vhost_migration_done = NULL,
1302108a6481SCindy Lu         .vhost_backend_can_merge = NULL,
1303108a6481SCindy Lu         .vhost_net_set_mtu = NULL,
1304108a6481SCindy Lu         .vhost_set_iotlb_callback = NULL,
1305108a6481SCindy Lu         .vhost_send_device_iotlb_msg = NULL,
1306108a6481SCindy Lu         .vhost_dev_start = vhost_vdpa_dev_start,
1307108a6481SCindy Lu         .vhost_get_device_id = vhost_vdpa_get_device_id,
1308108a6481SCindy Lu         .vhost_vq_get_addr = vhost_vdpa_vq_get_addr,
1309108a6481SCindy Lu         .vhost_force_iommu = vhost_vdpa_force_iommu,
1310108a6481SCindy Lu };
1311