xref: /qemu/net/vhost-vdpa.c (revision 6322b753f798337835e205b6d805356bea582c86)
1 /*
2  * vhost-vdpa.c
3  *
4  * Copyright(c) 2017-2018 Intel Corporation.
5  * Copyright(c) 2020 Red Hat, Inc.
6  *
7  * This work is licensed under the terms of the GNU GPL, version 2 or later.
8  * See the COPYING file in the top-level directory.
9  *
10  */
11 
12 #include "qemu/osdep.h"
13 #include "clients.h"
14 #include "hw/virtio/virtio-net.h"
15 #include "net/vhost_net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
20 #include "qemu/log.h"
21 #include "qemu/memalign.h"
22 #include "qemu/option.h"
23 #include "qapi/error.h"
24 #include <linux/vhost.h>
25 #include <sys/ioctl.h>
26 #include <err.h>
27 #include "standard-headers/linux/virtio_net.h"
28 #include "monitor/monitor.h"
29 #include "migration/misc.h"
30 #include "hw/virtio/vhost.h"
31 #include "trace.h"
32 
33 /* Todo:need to add the multiqueue support here */
34 typedef struct VhostVDPAState {
35     NetClientState nc;
36     struct vhost_vdpa vhost_vdpa;
37     NotifierWithReturn migration_state;
38     VHostNetState *vhost_net;
39 
40     /* Control commands shadow buffers */
41     void *cvq_cmd_out_buffer;
42     virtio_net_ctrl_ack *status;
43 
44     /* The device always have SVQ enabled */
45     bool always_svq;
46 
47     /* The device can isolate CVQ in its own ASID */
48     bool cvq_isolated;
49 
50     bool started;
51 } VhostVDPAState;
52 
53 /*
54  * The array is sorted alphabetically in ascending order,
55  * with the exception of VHOST_INVALID_FEATURE_BIT,
56  * which should always be the last entry.
57  */
58 const int vdpa_feature_bits[] = {
59     VIRTIO_F_ANY_LAYOUT,
60     VIRTIO_F_IOMMU_PLATFORM,
61     VIRTIO_F_NOTIFY_ON_EMPTY,
62     VIRTIO_F_RING_PACKED,
63     VIRTIO_F_RING_RESET,
64     VIRTIO_F_VERSION_1,
65     VIRTIO_F_IN_ORDER,
66     VIRTIO_F_NOTIFICATION_DATA,
67     VIRTIO_NET_F_CSUM,
68     VIRTIO_NET_F_CTRL_GUEST_OFFLOADS,
69     VIRTIO_NET_F_CTRL_MAC_ADDR,
70     VIRTIO_NET_F_CTRL_RX,
71     VIRTIO_NET_F_CTRL_RX_EXTRA,
72     VIRTIO_NET_F_CTRL_VLAN,
73     VIRTIO_NET_F_CTRL_VQ,
74     VIRTIO_NET_F_GSO,
75     VIRTIO_NET_F_GUEST_CSUM,
76     VIRTIO_NET_F_GUEST_ECN,
77     VIRTIO_NET_F_GUEST_TSO4,
78     VIRTIO_NET_F_GUEST_TSO6,
79     VIRTIO_NET_F_GUEST_UFO,
80     VIRTIO_NET_F_GUEST_USO4,
81     VIRTIO_NET_F_GUEST_USO6,
82     VIRTIO_NET_F_HASH_REPORT,
83     VIRTIO_NET_F_HOST_ECN,
84     VIRTIO_NET_F_HOST_TSO4,
85     VIRTIO_NET_F_HOST_TSO6,
86     VIRTIO_NET_F_HOST_UFO,
87     VIRTIO_NET_F_HOST_USO,
88     VIRTIO_NET_F_MQ,
89     VIRTIO_NET_F_MRG_RXBUF,
90     VIRTIO_NET_F_MTU,
91     VIRTIO_NET_F_RSC_EXT,
92     VIRTIO_NET_F_RSS,
93     VIRTIO_NET_F_STATUS,
94     VIRTIO_RING_F_EVENT_IDX,
95     VIRTIO_RING_F_INDIRECT_DESC,
96 
97     /* VHOST_INVALID_FEATURE_BIT should always be the last entry */
98     VHOST_INVALID_FEATURE_BIT
99 };
100 
101 /** Supported device specific feature bits with SVQ */
102 static const uint64_t vdpa_svq_device_features =
103     BIT_ULL(VIRTIO_NET_F_CSUM) |
104     BIT_ULL(VIRTIO_NET_F_GUEST_CSUM) |
105     BIT_ULL(VIRTIO_NET_F_CTRL_GUEST_OFFLOADS) |
106     BIT_ULL(VIRTIO_NET_F_MTU) |
107     BIT_ULL(VIRTIO_NET_F_MAC) |
108     BIT_ULL(VIRTIO_NET_F_GUEST_TSO4) |
109     BIT_ULL(VIRTIO_NET_F_GUEST_TSO6) |
110     BIT_ULL(VIRTIO_NET_F_GUEST_ECN) |
111     BIT_ULL(VIRTIO_NET_F_GUEST_UFO) |
112     BIT_ULL(VIRTIO_NET_F_HOST_TSO4) |
113     BIT_ULL(VIRTIO_NET_F_HOST_TSO6) |
114     BIT_ULL(VIRTIO_NET_F_HOST_ECN) |
115     BIT_ULL(VIRTIO_NET_F_HOST_UFO) |
116     BIT_ULL(VIRTIO_NET_F_MRG_RXBUF) |
117     BIT_ULL(VIRTIO_NET_F_STATUS) |
118     BIT_ULL(VIRTIO_NET_F_CTRL_VQ) |
119     BIT_ULL(VIRTIO_NET_F_CTRL_RX) |
120     BIT_ULL(VIRTIO_NET_F_CTRL_VLAN) |
121     BIT_ULL(VIRTIO_NET_F_CTRL_RX_EXTRA) |
122     BIT_ULL(VIRTIO_NET_F_MQ) |
123     BIT_ULL(VIRTIO_F_ANY_LAYOUT) |
124     BIT_ULL(VIRTIO_NET_F_CTRL_MAC_ADDR) |
125     /* VHOST_F_LOG_ALL is exposed by SVQ */
126     BIT_ULL(VHOST_F_LOG_ALL) |
127     BIT_ULL(VIRTIO_NET_F_HASH_REPORT) |
128     BIT_ULL(VIRTIO_NET_F_RSS) |
129     BIT_ULL(VIRTIO_NET_F_RSC_EXT) |
130     BIT_ULL(VIRTIO_NET_F_STANDBY) |
131     BIT_ULL(VIRTIO_NET_F_SPEED_DUPLEX);
132 
133 #define VHOST_VDPA_NET_CVQ_ASID 1
134 
vhost_vdpa_get_vhost_net(NetClientState * nc)135 VHostNetState *vhost_vdpa_get_vhost_net(NetClientState *nc)
136 {
137     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
138     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
139     return s->vhost_net;
140 }
141 
vhost_vdpa_net_cvq_cmd_len(void)142 static size_t vhost_vdpa_net_cvq_cmd_len(void)
143 {
144     /*
145      * MAC_TABLE_SET is the ctrl command that produces the longer out buffer.
146      * In buffer is always 1 byte, so it should fit here
147      */
148     return sizeof(struct virtio_net_ctrl_hdr) +
149            2 * sizeof(struct virtio_net_ctrl_mac) +
150            MAC_TABLE_ENTRIES * ETH_ALEN;
151 }
152 
vhost_vdpa_net_cvq_cmd_page_len(void)153 static size_t vhost_vdpa_net_cvq_cmd_page_len(void)
154 {
155     return ROUND_UP(vhost_vdpa_net_cvq_cmd_len(), qemu_real_host_page_size());
156 }
157 
vhost_vdpa_net_valid_svq_features(uint64_t features,Error ** errp)158 static bool vhost_vdpa_net_valid_svq_features(uint64_t features, Error **errp)
159 {
160     uint64_t invalid_dev_features =
161         features & ~vdpa_svq_device_features &
162         /* Transport are all accepted at this point */
163         ~MAKE_64BIT_MASK(VIRTIO_TRANSPORT_F_START,
164                          VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START);
165 
166     if (invalid_dev_features) {
167         error_setg(errp, "vdpa svq does not work with features 0x%" PRIx64,
168                    invalid_dev_features);
169         return false;
170     }
171 
172     return vhost_svq_valid_features(features, errp);
173 }
174 
vhost_vdpa_net_check_device_id(struct vhost_net * net)175 static int vhost_vdpa_net_check_device_id(struct vhost_net *net)
176 {
177     uint32_t device_id;
178     int ret;
179     struct vhost_dev *hdev;
180 
181     hdev = (struct vhost_dev *)&net->dev;
182     ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id);
183     if (device_id != VIRTIO_ID_NET) {
184         return -ENOTSUP;
185     }
186     return ret;
187 }
188 
vhost_vdpa_add(NetClientState * ncs,void * be,int queue_pair_index,int nvqs)189 static int vhost_vdpa_add(NetClientState *ncs, void *be,
190                           int queue_pair_index, int nvqs)
191 {
192     VhostNetOptions options;
193     struct vhost_net *net = NULL;
194     VhostVDPAState *s;
195     int ret;
196 
197     options.backend_type = VHOST_BACKEND_TYPE_VDPA;
198     assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
199     s = DO_UPCAST(VhostVDPAState, nc, ncs);
200     options.net_backend = ncs;
201     options.opaque      = be;
202     options.busyloop_timeout = 0;
203     options.nvqs = nvqs;
204 
205     net = vhost_net_init(&options);
206     if (!net) {
207         error_report("failed to init vhost_net for queue");
208         goto err_init;
209     }
210     s->vhost_net = net;
211     ret = vhost_vdpa_net_check_device_id(net);
212     if (ret) {
213         goto err_check;
214     }
215     return 0;
216 err_check:
217     vhost_net_cleanup(net);
218     g_free(net);
219 err_init:
220     return -1;
221 }
222 
vhost_vdpa_cleanup(NetClientState * nc)223 static void vhost_vdpa_cleanup(NetClientState *nc)
224 {
225     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
226 
227     munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len());
228     munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len());
229     if (s->vhost_net) {
230         vhost_net_cleanup(s->vhost_net);
231         g_free(s->vhost_net);
232         s->vhost_net = NULL;
233     }
234     if (s->vhost_vdpa.index != 0) {
235         return;
236     }
237     qemu_close(s->vhost_vdpa.shared->device_fd);
238     g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete);
239     g_free(s->vhost_vdpa.shared);
240 }
241 
242 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend  */
vhost_vdpa_set_steering_ebpf(NetClientState * nc,int prog_fd)243 static bool vhost_vdpa_set_steering_ebpf(NetClientState *nc, int prog_fd)
244 {
245     return true;
246 }
247 
vhost_vdpa_has_vnet_hdr(NetClientState * nc)248 static bool vhost_vdpa_has_vnet_hdr(NetClientState *nc)
249 {
250     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
251 
252     return true;
253 }
254 
vhost_vdpa_has_ufo(NetClientState * nc)255 static bool vhost_vdpa_has_ufo(NetClientState *nc)
256 {
257     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
258     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
259     uint64_t features = 0;
260     features |= (1ULL << VIRTIO_NET_F_HOST_UFO);
261     features = vhost_net_get_features(s->vhost_net, features);
262     return !!(features & (1ULL << VIRTIO_NET_F_HOST_UFO));
263 
264 }
265 
266 /*
267  * FIXME: vhost_vdpa doesn't have an API to "set h/w endianness". But it's
268  * reasonable to assume that h/w is LE by default, because LE is what
269  * virtio 1.0 and later ask for. So, this function just says "yes, the h/w is
270  * LE". Otherwise, on a BE machine, higher-level code would mistakely think
271  * the h/w is BE and can't support VDPA for a virtio 1.0 client.
272  */
vhost_vdpa_set_vnet_le(NetClientState * nc,bool enable)273 static int vhost_vdpa_set_vnet_le(NetClientState *nc, bool enable)
274 {
275     return 0;
276 }
277 
vhost_vdpa_check_peer_type(NetClientState * nc,ObjectClass * oc,Error ** errp)278 static bool vhost_vdpa_check_peer_type(NetClientState *nc, ObjectClass *oc,
279                                        Error **errp)
280 {
281     const char *driver = object_class_get_name(oc);
282 
283     if (!g_str_has_prefix(driver, "virtio-net-")) {
284         error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*");
285         return false;
286     }
287 
288     return true;
289 }
290 
291 /** Dummy receive in case qemu falls back to userland tap networking */
vhost_vdpa_receive(NetClientState * nc,const uint8_t * buf,size_t size)292 static ssize_t vhost_vdpa_receive(NetClientState *nc, const uint8_t *buf,
293                                   size_t size)
294 {
295     return size;
296 }
297 
298 
299 /** From any vdpa net client, get the netclient of the i-th queue pair */
vhost_vdpa_net_get_nc_vdpa(VhostVDPAState * s,int i)300 static VhostVDPAState *vhost_vdpa_net_get_nc_vdpa(VhostVDPAState *s, int i)
301 {
302     NICState *nic = qemu_get_nic(s->nc.peer);
303     NetClientState *nc_i = qemu_get_peer(nic->ncs, i);
304 
305     return DO_UPCAST(VhostVDPAState, nc, nc_i);
306 }
307 
vhost_vdpa_net_first_nc_vdpa(VhostVDPAState * s)308 static VhostVDPAState *vhost_vdpa_net_first_nc_vdpa(VhostVDPAState *s)
309 {
310     return vhost_vdpa_net_get_nc_vdpa(s, 0);
311 }
312 
vhost_vdpa_net_log_global_enable(VhostVDPAState * s,bool enable)313 static void vhost_vdpa_net_log_global_enable(VhostVDPAState *s, bool enable)
314 {
315     struct vhost_vdpa *v = &s->vhost_vdpa;
316     VirtIONet *n;
317     VirtIODevice *vdev;
318     int data_queue_pairs, cvq, r;
319 
320     /* We are only called on the first data vqs and only if x-svq is not set */
321     if (s->vhost_vdpa.shadow_vqs_enabled == enable) {
322         return;
323     }
324 
325     vdev = v->dev->vdev;
326     n = VIRTIO_NET(vdev);
327     if (!n->vhost_started) {
328         return;
329     }
330 
331     data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1;
332     cvq = virtio_vdev_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) ?
333                                   n->max_ncs - n->max_queue_pairs : 0;
334     v->shared->svq_switching = enable ?
335         SVQ_TSTATE_ENABLING : SVQ_TSTATE_DISABLING;
336     /*
337      * TODO: vhost_net_stop does suspend, get_base and reset. We can be smarter
338      * in the future and resume the device if read-only operations between
339      * suspend and reset goes wrong.
340      */
341     vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq);
342 
343     /* Start will check migration setup_or_active to configure or not SVQ */
344     r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq);
345     if (unlikely(r < 0)) {
346         error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r);
347     }
348     v->shared->svq_switching = SVQ_TSTATE_DONE;
349 }
350 
vdpa_net_migration_state_notifier(NotifierWithReturn * notifier,MigrationEvent * e,Error ** errp)351 static int vdpa_net_migration_state_notifier(NotifierWithReturn *notifier,
352                                              MigrationEvent *e, Error **errp)
353 {
354     VhostVDPAState *s = container_of(notifier, VhostVDPAState, migration_state);
355 
356     if (e->type == MIG_EVENT_PRECOPY_SETUP) {
357         vhost_vdpa_net_log_global_enable(s, true);
358     } else if (e->type == MIG_EVENT_PRECOPY_FAILED) {
359         vhost_vdpa_net_log_global_enable(s, false);
360     }
361     return 0;
362 }
363 
vhost_vdpa_net_data_start_first(VhostVDPAState * s)364 static void vhost_vdpa_net_data_start_first(VhostVDPAState *s)
365 {
366     migration_add_notifier(&s->migration_state,
367                            vdpa_net_migration_state_notifier);
368 }
369 
vhost_vdpa_net_data_start(NetClientState * nc)370 static int vhost_vdpa_net_data_start(NetClientState *nc)
371 {
372     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
373     struct vhost_vdpa *v = &s->vhost_vdpa;
374 
375     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
376 
377     if (s->always_svq || migration_is_running()) {
378         v->shadow_vqs_enabled = true;
379     } else {
380         v->shadow_vqs_enabled = false;
381     }
382 
383     if (v->index == 0) {
384         v->shared->shadow_data = v->shadow_vqs_enabled;
385         vhost_vdpa_net_data_start_first(s);
386         return 0;
387     }
388 
389     return 0;
390 }
391 
vhost_vdpa_net_data_load(NetClientState * nc)392 static int vhost_vdpa_net_data_load(NetClientState *nc)
393 {
394     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
395     struct vhost_vdpa *v = &s->vhost_vdpa;
396     bool has_cvq = v->dev->vq_index_end % 2;
397 
398     if (has_cvq) {
399         return 0;
400     }
401 
402     for (int i = 0; i < v->dev->nvqs; ++i) {
403         int ret = vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index);
404         if (ret < 0) {
405             return ret;
406         }
407     }
408     return 0;
409 }
410 
vhost_vdpa_net_client_stop(NetClientState * nc)411 static void vhost_vdpa_net_client_stop(NetClientState *nc)
412 {
413     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
414 
415     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
416 
417     if (s->vhost_vdpa.index == 0) {
418         migration_remove_notifier(&s->migration_state);
419     }
420 }
421 
422 static NetClientInfo net_vhost_vdpa_info = {
423         .type = NET_CLIENT_DRIVER_VHOST_VDPA,
424         .size = sizeof(VhostVDPAState),
425         .receive = vhost_vdpa_receive,
426         .start = vhost_vdpa_net_data_start,
427         .load = vhost_vdpa_net_data_load,
428         .stop = vhost_vdpa_net_client_stop,
429         .cleanup = vhost_vdpa_cleanup,
430         .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
431         .has_ufo = vhost_vdpa_has_ufo,
432         .set_vnet_le = vhost_vdpa_set_vnet_le,
433         .check_peer_type = vhost_vdpa_check_peer_type,
434         .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
435 };
436 
vhost_vdpa_get_vring_group(int device_fd,unsigned vq_index,Error ** errp)437 static int64_t vhost_vdpa_get_vring_group(int device_fd, unsigned vq_index,
438                                           Error **errp)
439 {
440     struct vhost_vring_state state = {
441         .index = vq_index,
442     };
443     int r = ioctl(device_fd, VHOST_VDPA_GET_VRING_GROUP, &state);
444 
445     if (unlikely(r < 0)) {
446         r = -errno;
447         error_setg_errno(errp, errno, "Cannot get VQ %u group", vq_index);
448         return r;
449     }
450 
451     return state.num;
452 }
453 
vhost_vdpa_set_address_space_id(struct vhost_vdpa * v,unsigned vq_group,unsigned asid_num)454 static int vhost_vdpa_set_address_space_id(struct vhost_vdpa *v,
455                                            unsigned vq_group,
456                                            unsigned asid_num)
457 {
458     struct vhost_vring_state asid = {
459         .index = vq_group,
460         .num = asid_num,
461     };
462     int r;
463 
464     trace_vhost_vdpa_set_address_space_id(v, vq_group, asid_num);
465 
466     r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid);
467     if (unlikely(r < 0)) {
468         error_report("Can't set vq group %u asid %u, errno=%d (%s)",
469                      asid.index, asid.num, errno, g_strerror(errno));
470     }
471     return r;
472 }
473 
vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa * v,void * addr)474 static void vhost_vdpa_cvq_unmap_buf(struct vhost_vdpa *v, void *addr)
475 {
476     VhostIOVATree *tree = v->shared->iova_tree;
477     DMAMap needle = {
478         /*
479          * No need to specify size or to look for more translations since
480          * this contiguous chunk was allocated by us.
481          */
482         .translated_addr = (hwaddr)(uintptr_t)addr,
483     };
484     const DMAMap *map = vhost_iova_tree_find_iova(tree, &needle);
485     int r;
486 
487     if (unlikely(!map)) {
488         error_report("Cannot locate expected map");
489         return;
490     }
491 
492     r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova,
493                              map->size + 1);
494     if (unlikely(r != 0)) {
495         error_report("Device cannot unmap: %s(%d)", g_strerror(r), r);
496     }
497 
498     vhost_iova_tree_remove(tree, *map);
499 }
500 
501 /** Map CVQ buffer. */
vhost_vdpa_cvq_map_buf(struct vhost_vdpa * v,void * buf,size_t size,bool write)502 static int vhost_vdpa_cvq_map_buf(struct vhost_vdpa *v, void *buf, size_t size,
503                                   bool write)
504 {
505     DMAMap map = {};
506     hwaddr taddr = (hwaddr)(uintptr_t)buf;
507     int r;
508 
509     map.size = size - 1;
510     map.perm = write ? IOMMU_RW : IOMMU_RO,
511     r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr);
512     if (unlikely(r != IOVA_OK)) {
513         error_report("Cannot map injected element");
514 
515         if (map.translated_addr == taddr) {
516             error_report("Insertion to IOVA->HVA tree failed");
517             /* Remove the mapping from the IOVA-only tree */
518             goto dma_map_err;
519         }
520         return r;
521     }
522 
523     r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova,
524                            vhost_vdpa_net_cvq_cmd_page_len(), buf, !write);
525     if (unlikely(r < 0)) {
526         goto dma_map_err;
527     }
528 
529     return 0;
530 
531 dma_map_err:
532     vhost_iova_tree_remove(v->shared->iova_tree, map);
533     return r;
534 }
535 
vhost_vdpa_net_cvq_start(NetClientState * nc)536 static int vhost_vdpa_net_cvq_start(NetClientState *nc)
537 {
538     VhostVDPAState *s, *s0;
539     struct vhost_vdpa *v;
540     int64_t cvq_group;
541     int r;
542     Error *err = NULL;
543 
544     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
545 
546     s = DO_UPCAST(VhostVDPAState, nc, nc);
547     v = &s->vhost_vdpa;
548 
549     s0 = vhost_vdpa_net_first_nc_vdpa(s);
550     v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled;
551     s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID;
552 
553     if (v->shared->shadow_data) {
554         /* SVQ is already configured for all virtqueues */
555         goto out;
556     }
557 
558     /*
559      * If we early return in these cases SVQ will not be enabled. The migration
560      * will be blocked as long as vhost-vdpa backends will not offer _F_LOG.
561      */
562     if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) {
563         return 0;
564     }
565 
566     if (!s->cvq_isolated) {
567         return 0;
568     }
569 
570     cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd,
571                                            v->dev->vq_index_end - 1,
572                                            &err);
573     if (unlikely(cvq_group < 0)) {
574         error_report_err(err);
575         return cvq_group;
576     }
577 
578     r = vhost_vdpa_set_address_space_id(v, cvq_group, VHOST_VDPA_NET_CVQ_ASID);
579     if (unlikely(r < 0)) {
580         return r;
581     }
582 
583     v->shadow_vqs_enabled = true;
584     s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID;
585 
586 out:
587     if (!s->vhost_vdpa.shadow_vqs_enabled) {
588         return 0;
589     }
590 
591     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer,
592                                vhost_vdpa_net_cvq_cmd_page_len(), false);
593     if (unlikely(r < 0)) {
594         return r;
595     }
596 
597     r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status,
598                                vhost_vdpa_net_cvq_cmd_page_len(), true);
599     if (unlikely(r < 0)) {
600         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
601     }
602 
603     return r;
604 }
605 
vhost_vdpa_net_cvq_stop(NetClientState * nc)606 static void vhost_vdpa_net_cvq_stop(NetClientState *nc)
607 {
608     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
609 
610     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
611 
612     if (s->vhost_vdpa.shadow_vqs_enabled) {
613         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer);
614         vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status);
615     }
616 
617     vhost_vdpa_net_client_stop(nc);
618 }
619 
vhost_vdpa_net_cvq_add(VhostVDPAState * s,const struct iovec * out_sg,size_t out_num,const struct iovec * in_sg,size_t in_num)620 static ssize_t vhost_vdpa_net_cvq_add(VhostVDPAState *s,
621                                     const struct iovec *out_sg, size_t out_num,
622                                     const struct iovec *in_sg, size_t in_num)
623 {
624     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
625     int r;
626 
627     r = vhost_svq_add(svq, out_sg, out_num, NULL, in_sg, in_num, NULL, NULL);
628     if (unlikely(r != 0)) {
629         if (unlikely(r == -ENOSPC)) {
630             qemu_log_mask(LOG_GUEST_ERROR, "%s: No space on device queue\n",
631                           __func__);
632         }
633     }
634 
635     return r;
636 }
637 
638 /*
639  * Convenience wrapper to poll SVQ for multiple control commands.
640  *
641  * Caller should hold the BQL when invoking this function, and should take
642  * the answer before SVQ pulls by itself when BQL is released.
643  */
vhost_vdpa_net_svq_poll(VhostVDPAState * s,size_t cmds_in_flight)644 static ssize_t vhost_vdpa_net_svq_poll(VhostVDPAState *s, size_t cmds_in_flight)
645 {
646     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
647     return vhost_svq_poll(svq, cmds_in_flight);
648 }
649 
vhost_vdpa_net_load_cursor_reset(VhostVDPAState * s,struct iovec * out_cursor,struct iovec * in_cursor)650 static void vhost_vdpa_net_load_cursor_reset(VhostVDPAState *s,
651                                              struct iovec *out_cursor,
652                                              struct iovec *in_cursor)
653 {
654     /* reset the cursor of the output buffer for the device */
655     out_cursor->iov_base = s->cvq_cmd_out_buffer;
656     out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
657 
658     /* reset the cursor of the in buffer for the device */
659     in_cursor->iov_base = s->status;
660     in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len();
661 }
662 
663 /*
664  * Poll SVQ for multiple pending control commands and check the device's ack.
665  *
666  * Caller should hold the BQL when invoking this function.
667  *
668  * @s: The VhostVDPAState
669  * @len: The length of the pending status shadow buffer
670  */
vhost_vdpa_net_svq_flush(VhostVDPAState * s,size_t len)671 static ssize_t vhost_vdpa_net_svq_flush(VhostVDPAState *s, size_t len)
672 {
673     /* device uses a one-byte length ack for each control command */
674     ssize_t dev_written = vhost_vdpa_net_svq_poll(s, len);
675     if (unlikely(dev_written != len)) {
676         return -EIO;
677     }
678 
679     /* check the device's ack */
680     for (int i = 0; i < len; ++i) {
681         if (s->status[i] != VIRTIO_NET_OK) {
682             return -EIO;
683         }
684     }
685     return 0;
686 }
687 
vhost_vdpa_net_load_cmd(VhostVDPAState * s,struct iovec * out_cursor,struct iovec * in_cursor,uint8_t class,uint8_t cmd,const struct iovec * data_sg,size_t data_num)688 static ssize_t vhost_vdpa_net_load_cmd(VhostVDPAState *s,
689                                        struct iovec *out_cursor,
690                                        struct iovec *in_cursor, uint8_t class,
691                                        uint8_t cmd, const struct iovec *data_sg,
692                                        size_t data_num)
693 {
694     const struct virtio_net_ctrl_hdr ctrl = {
695         .class = class,
696         .cmd = cmd,
697     };
698     size_t data_size = iov_size(data_sg, data_num), cmd_size;
699     struct iovec out, in;
700     ssize_t r;
701     unsigned dummy_cursor_iov_cnt;
702     VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0);
703 
704     assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl));
705     cmd_size = sizeof(ctrl) + data_size;
706     trace_vhost_vdpa_net_load_cmd(s, class, cmd, data_num, data_size);
707     if (vhost_svq_available_slots(svq) < 2 ||
708         iov_size(out_cursor, 1) < cmd_size) {
709         /*
710          * It is time to flush all pending control commands if SVQ is full
711          * or control commands shadow buffers are full.
712          *
713          * We can poll here since we've had BQL from the time
714          * we sent the descriptor.
715          */
716         r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base -
717                                      (void *)s->status);
718         if (unlikely(r < 0)) {
719             return r;
720         }
721 
722         vhost_vdpa_net_load_cursor_reset(s, out_cursor, in_cursor);
723     }
724 
725     /* pack the CVQ command header */
726     iov_from_buf(out_cursor, 1, 0, &ctrl, sizeof(ctrl));
727     /* pack the CVQ command command-specific-data */
728     iov_to_buf(data_sg, data_num, 0,
729                out_cursor->iov_base + sizeof(ctrl), data_size);
730 
731     /* extract the required buffer from the cursor for output */
732     iov_copy(&out, 1, out_cursor, 1, 0, cmd_size);
733     /* extract the required buffer from the cursor for input */
734     iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status));
735 
736     r = vhost_vdpa_net_cvq_add(s, &out, 1, &in, 1);
737     if (unlikely(r < 0)) {
738         trace_vhost_vdpa_net_load_cmd_retval(s, class, cmd, r);
739         return r;
740     }
741 
742     /* iterate the cursors */
743     dummy_cursor_iov_cnt = 1;
744     iov_discard_front(&out_cursor, &dummy_cursor_iov_cnt, cmd_size);
745     dummy_cursor_iov_cnt = 1;
746     iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status));
747 
748     return 0;
749 }
750 
vhost_vdpa_net_load_mac(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)751 static int vhost_vdpa_net_load_mac(VhostVDPAState *s, const VirtIONet *n,
752                                    struct iovec *out_cursor,
753                                    struct iovec *in_cursor)
754 {
755     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) {
756         const struct iovec data = {
757             .iov_base = (void *)n->mac,
758             .iov_len = sizeof(n->mac),
759         };
760         ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
761                                             VIRTIO_NET_CTRL_MAC,
762                                             VIRTIO_NET_CTRL_MAC_ADDR_SET,
763                                             &data, 1);
764         if (unlikely(r < 0)) {
765             return r;
766         }
767     }
768 
769     /*
770      * According to VirtIO standard, "The device MUST have an
771      * empty MAC filtering table on reset.".
772      *
773      * Therefore, there is no need to send this CVQ command if the
774      * driver also sets an empty MAC filter table, which aligns with
775      * the device's defaults.
776      *
777      * Note that the device's defaults can mismatch the driver's
778      * configuration only at live migration.
779      */
780     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) ||
781         n->mac_table.in_use == 0) {
782         return 0;
783     }
784 
785     uint32_t uni_entries = n->mac_table.first_multi,
786              uni_macs_size = uni_entries * ETH_ALEN,
787              mul_entries = n->mac_table.in_use - uni_entries,
788              mul_macs_size = mul_entries * ETH_ALEN;
789     struct virtio_net_ctrl_mac uni = {
790         .entries = cpu_to_le32(uni_entries),
791     };
792     struct virtio_net_ctrl_mac mul = {
793         .entries = cpu_to_le32(mul_entries),
794     };
795     const struct iovec data[] = {
796         {
797             .iov_base = &uni,
798             .iov_len = sizeof(uni),
799         }, {
800             .iov_base = n->mac_table.macs,
801             .iov_len = uni_macs_size,
802         }, {
803             .iov_base = &mul,
804             .iov_len = sizeof(mul),
805         }, {
806             .iov_base = &n->mac_table.macs[uni_macs_size],
807             .iov_len = mul_macs_size,
808         },
809     };
810     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
811                                         VIRTIO_NET_CTRL_MAC,
812                                         VIRTIO_NET_CTRL_MAC_TABLE_SET,
813                                         data, ARRAY_SIZE(data));
814     if (unlikely(r < 0)) {
815         return r;
816     }
817 
818     return 0;
819 }
820 
vhost_vdpa_net_load_rss(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor,bool do_rss)821 static int vhost_vdpa_net_load_rss(VhostVDPAState *s, const VirtIONet *n,
822                                    struct iovec *out_cursor,
823                                    struct iovec *in_cursor, bool do_rss)
824 {
825     struct virtio_net_rss_config cfg = {};
826     ssize_t r;
827     g_autofree uint16_t *table = NULL;
828 
829     /*
830      * According to VirtIO standard, "Initially the device has all hash
831      * types disabled and reports only VIRTIO_NET_HASH_REPORT_NONE.".
832      *
833      * Therefore, there is no need to send this CVQ command if the
834      * driver disables the all hash types, which aligns with
835      * the device's defaults.
836      *
837      * Note that the device's defaults can mismatch the driver's
838      * configuration only at live migration.
839      */
840     if (!n->rss_data.enabled ||
841         n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) {
842         return 0;
843     }
844 
845     table = g_malloc_n(n->rss_data.indirections_len,
846                        sizeof(n->rss_data.indirections_table[0]));
847     cfg.hash_types = cpu_to_le32(n->rss_data.hash_types);
848 
849     if (do_rss) {
850         /*
851          * According to VirtIO standard, "Number of entries in indirection_table
852          * is (indirection_table_mask + 1)".
853          */
854         cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len -
855                                                  1);
856         cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue);
857         for (int i = 0; i < n->rss_data.indirections_len; ++i) {
858             table[i] = cpu_to_le16(n->rss_data.indirections_table[i]);
859         }
860         cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs);
861     } else {
862         /*
863          * According to VirtIO standard, "Field reserved MUST contain zeroes.
864          * It is defined to make the structure to match the layout of
865          * virtio_net_rss_config structure, defined in 5.1.6.5.7.".
866          *
867          * Therefore, we need to zero the fields in
868          * struct virtio_net_rss_config, which corresponds to the
869          * `reserved` field in struct virtio_net_hash_config.
870          *
871          * Note that all other fields are zeroed at their definitions,
872          * except for the `indirection_table` field, where the actual data
873          * is stored in the `table` variable to ensure compatibility
874          * with RSS case. Therefore, we need to zero the `table` variable here.
875          */
876         table[0] = 0;
877     }
878 
879     /*
880      * Considering that virtio_net_handle_rss() currently does not restore
881      * the hash key length parsed from the CVQ command sent from the guest
882      * into n->rss_data and uses the maximum key length in other code, so
883      * we also employ the maximum key length here.
884      */
885     cfg.hash_key_length = sizeof(n->rss_data.key);
886 
887     const struct iovec data[] = {
888         {
889             .iov_base = &cfg,
890             .iov_len = offsetof(struct virtio_net_rss_config,
891                                 indirection_table),
892         }, {
893             .iov_base = table,
894             .iov_len = n->rss_data.indirections_len *
895                        sizeof(n->rss_data.indirections_table[0]),
896         }, {
897             .iov_base = &cfg.max_tx_vq,
898             .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) -
899                        offsetof(struct virtio_net_rss_config, max_tx_vq),
900         }, {
901             .iov_base = (void *)n->rss_data.key,
902             .iov_len = sizeof(n->rss_data.key),
903         }
904     };
905 
906     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
907                                 VIRTIO_NET_CTRL_MQ,
908                                 do_rss ? VIRTIO_NET_CTRL_MQ_RSS_CONFIG :
909                                 VIRTIO_NET_CTRL_MQ_HASH_CONFIG,
910                                 data, ARRAY_SIZE(data));
911     if (unlikely(r < 0)) {
912         return r;
913     }
914 
915     return 0;
916 }
917 
vhost_vdpa_net_load_mq(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)918 static int vhost_vdpa_net_load_mq(VhostVDPAState *s,
919                                   const VirtIONet *n,
920                                   struct iovec *out_cursor,
921                                   struct iovec *in_cursor)
922 {
923     struct virtio_net_ctrl_mq mq;
924     ssize_t r;
925 
926     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) {
927         return 0;
928     }
929 
930     trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs);
931 
932     mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs);
933     const struct iovec data = {
934         .iov_base = &mq,
935         .iov_len = sizeof(mq),
936     };
937     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
938                                 VIRTIO_NET_CTRL_MQ,
939                                 VIRTIO_NET_CTRL_MQ_VQ_PAIRS_SET,
940                                 &data, 1);
941     if (unlikely(r < 0)) {
942         return r;
943     }
944 
945     if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) {
946         /* load the receive-side scaling state */
947         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, true);
948         if (unlikely(r < 0)) {
949             return r;
950         }
951     } else if (virtio_vdev_has_feature(&n->parent_obj,
952                                        VIRTIO_NET_F_HASH_REPORT)) {
953         /* load the hash calculation state */
954         r = vhost_vdpa_net_load_rss(s, n, out_cursor, in_cursor, false);
955         if (unlikely(r < 0)) {
956             return r;
957         }
958     }
959 
960     return 0;
961 }
962 
vhost_vdpa_net_load_offloads(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)963 static int vhost_vdpa_net_load_offloads(VhostVDPAState *s,
964                                         const VirtIONet *n,
965                                         struct iovec *out_cursor,
966                                         struct iovec *in_cursor)
967 {
968     uint64_t offloads;
969     ssize_t r;
970 
971     if (!virtio_vdev_has_feature(&n->parent_obj,
972                                  VIRTIO_NET_F_CTRL_GUEST_OFFLOADS)) {
973         return 0;
974     }
975 
976     if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) {
977         /*
978          * According to VirtIO standard, "Upon feature negotiation
979          * corresponding offload gets enabled to preserve
980          * backward compatibility.".
981          *
982          * Therefore, there is no need to send this CVQ command if the
983          * driver also enables all supported offloads, which aligns with
984          * the device's defaults.
985          *
986          * Note that the device's defaults can mismatch the driver's
987          * configuration only at live migration.
988          */
989         return 0;
990     }
991 
992     offloads = cpu_to_le64(n->curr_guest_offloads);
993     const struct iovec data = {
994         .iov_base = &offloads,
995         .iov_len = sizeof(offloads),
996     };
997     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
998                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS,
999                                 VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET,
1000                                 &data, 1);
1001     if (unlikely(r < 0)) {
1002         return r;
1003     }
1004 
1005     return 0;
1006 }
1007 
vhost_vdpa_net_load_rx_mode(VhostVDPAState * s,struct iovec * out_cursor,struct iovec * in_cursor,uint8_t cmd,uint8_t on)1008 static int vhost_vdpa_net_load_rx_mode(VhostVDPAState *s,
1009                                        struct iovec *out_cursor,
1010                                        struct iovec *in_cursor,
1011                                        uint8_t cmd,
1012                                        uint8_t on)
1013 {
1014     const struct iovec data = {
1015         .iov_base = &on,
1016         .iov_len = sizeof(on),
1017     };
1018     ssize_t r;
1019 
1020     r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1021                                 VIRTIO_NET_CTRL_RX, cmd, &data, 1);
1022     if (unlikely(r < 0)) {
1023         return r;
1024     }
1025 
1026     return 0;
1027 }
1028 
vhost_vdpa_net_load_rx(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)1029 static int vhost_vdpa_net_load_rx(VhostVDPAState *s,
1030                                   const VirtIONet *n,
1031                                   struct iovec *out_cursor,
1032                                   struct iovec *in_cursor)
1033 {
1034     ssize_t r;
1035 
1036     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) {
1037         return 0;
1038     }
1039 
1040     /*
1041      * According to virtio_net_reset(), device turns promiscuous mode
1042      * on by default.
1043      *
1044      * Additionally, according to VirtIO standard, "Since there are
1045      * no guarantees, it can use a hash filter or silently switch to
1046      * allmulti or promiscuous mode if it is given too many addresses.".
1047      * QEMU marks `n->mac_table.uni_overflow` if guest sets too many
1048      * non-multicast MAC addresses, indicating that promiscuous mode
1049      * should be enabled.
1050      *
1051      * Therefore, QEMU should only send this CVQ command if the
1052      * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off,
1053      * which sets promiscuous mode on, different from the device's defaults.
1054      *
1055      * Note that the device's defaults can mismatch the driver's
1056      * configuration only at live migration.
1057      */
1058     if (!n->mac_table.uni_overflow && !n->promisc) {
1059         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1060                                         VIRTIO_NET_CTRL_RX_PROMISC, 0);
1061         if (unlikely(r < 0)) {
1062             return r;
1063         }
1064     }
1065 
1066     /*
1067      * According to virtio_net_reset(), device turns all-multicast mode
1068      * off by default.
1069      *
1070      * According to VirtIO standard, "Since there are no guarantees,
1071      * it can use a hash filter or silently switch to allmulti or
1072      * promiscuous mode if it is given too many addresses.". QEMU marks
1073      * `n->mac_table.multi_overflow` if guest sets too many
1074      * non-multicast MAC addresses.
1075      *
1076      * Therefore, QEMU should only send this CVQ command if the
1077      * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on,
1078      * which sets all-multicast mode on, different from the device's defaults.
1079      *
1080      * Note that the device's defaults can mismatch the driver's
1081      * configuration only at live migration.
1082      */
1083     if (n->mac_table.multi_overflow || n->allmulti) {
1084         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1085                                         VIRTIO_NET_CTRL_RX_ALLMULTI, 1);
1086         if (unlikely(r < 0)) {
1087             return r;
1088         }
1089     }
1090 
1091     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) {
1092         return 0;
1093     }
1094 
1095     /*
1096      * According to virtio_net_reset(), device turns all-unicast mode
1097      * off by default.
1098      *
1099      * Therefore, QEMU should only send this CVQ command if the driver
1100      * sets all-unicast mode on, different from the device's defaults.
1101      *
1102      * Note that the device's defaults can mismatch the driver's
1103      * configuration only at live migration.
1104      */
1105     if (n->alluni) {
1106         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1107                                         VIRTIO_NET_CTRL_RX_ALLUNI, 1);
1108         if (r < 0) {
1109             return r;
1110         }
1111     }
1112 
1113     /*
1114      * According to virtio_net_reset(), device turns non-multicast mode
1115      * off by default.
1116      *
1117      * Therefore, QEMU should only send this CVQ command if the driver
1118      * sets non-multicast mode on, different from the device's defaults.
1119      *
1120      * Note that the device's defaults can mismatch the driver's
1121      * configuration only at live migration.
1122      */
1123     if (n->nomulti) {
1124         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1125                                         VIRTIO_NET_CTRL_RX_NOMULTI, 1);
1126         if (r < 0) {
1127             return r;
1128         }
1129     }
1130 
1131     /*
1132      * According to virtio_net_reset(), device turns non-unicast mode
1133      * off by default.
1134      *
1135      * Therefore, QEMU should only send this CVQ command if the driver
1136      * sets non-unicast mode on, different from the device's defaults.
1137      *
1138      * Note that the device's defaults can mismatch the driver's
1139      * configuration only at live migration.
1140      */
1141     if (n->nouni) {
1142         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1143                                         VIRTIO_NET_CTRL_RX_NOUNI, 1);
1144         if (r < 0) {
1145             return r;
1146         }
1147     }
1148 
1149     /*
1150      * According to virtio_net_reset(), device turns non-broadcast mode
1151      * off by default.
1152      *
1153      * Therefore, QEMU should only send this CVQ command if the driver
1154      * sets non-broadcast mode on, different from the device's defaults.
1155      *
1156      * Note that the device's defaults can mismatch the driver's
1157      * configuration only at live migration.
1158      */
1159     if (n->nobcast) {
1160         r = vhost_vdpa_net_load_rx_mode(s, out_cursor, in_cursor,
1161                                         VIRTIO_NET_CTRL_RX_NOBCAST, 1);
1162         if (r < 0) {
1163             return r;
1164         }
1165     }
1166 
1167     return 0;
1168 }
1169 
vhost_vdpa_net_load_single_vlan(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor,uint16_t vid)1170 static int vhost_vdpa_net_load_single_vlan(VhostVDPAState *s,
1171                                            const VirtIONet *n,
1172                                            struct iovec *out_cursor,
1173                                            struct iovec *in_cursor,
1174                                            uint16_t vid)
1175 {
1176     const struct iovec data = {
1177         .iov_base = &vid,
1178         .iov_len = sizeof(vid),
1179     };
1180     ssize_t r = vhost_vdpa_net_load_cmd(s, out_cursor, in_cursor,
1181                                         VIRTIO_NET_CTRL_VLAN,
1182                                         VIRTIO_NET_CTRL_VLAN_ADD,
1183                                         &data, 1);
1184     if (unlikely(r < 0)) {
1185         return r;
1186     }
1187 
1188     return 0;
1189 }
1190 
vhost_vdpa_net_load_vlan(VhostVDPAState * s,const VirtIONet * n,struct iovec * out_cursor,struct iovec * in_cursor)1191 static int vhost_vdpa_net_load_vlan(VhostVDPAState *s,
1192                                     const VirtIONet *n,
1193                                     struct iovec *out_cursor,
1194                                     struct iovec *in_cursor)
1195 {
1196     int r;
1197 
1198     if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) {
1199         return 0;
1200     }
1201 
1202     for (int i = 0; i < MAX_VLAN >> 5; i++) {
1203         for (int j = 0; n->vlans[i] && j <= 0x1f; j++) {
1204             if (n->vlans[i] & (1U << j)) {
1205                 r = vhost_vdpa_net_load_single_vlan(s, n, out_cursor,
1206                                                     in_cursor, (i << 5) + j);
1207                 if (unlikely(r != 0)) {
1208                     return r;
1209                 }
1210             }
1211         }
1212     }
1213 
1214     return 0;
1215 }
1216 
vhost_vdpa_net_cvq_load(NetClientState * nc)1217 static int vhost_vdpa_net_cvq_load(NetClientState *nc)
1218 {
1219     VhostVDPAState *s = DO_UPCAST(VhostVDPAState, nc, nc);
1220     struct vhost_vdpa *v = &s->vhost_vdpa;
1221     const VirtIONet *n;
1222     int r;
1223     struct iovec out_cursor, in_cursor;
1224 
1225     assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1226 
1227     r = vhost_vdpa_set_vring_ready(v, v->dev->vq_index);
1228     if (unlikely(r < 0)) {
1229         return r;
1230     }
1231 
1232     if (v->shadow_vqs_enabled) {
1233         n = VIRTIO_NET(v->dev->vdev);
1234         vhost_vdpa_net_load_cursor_reset(s, &out_cursor, &in_cursor);
1235         r = vhost_vdpa_net_load_mac(s, n, &out_cursor, &in_cursor);
1236         if (unlikely(r < 0)) {
1237             return r;
1238         }
1239         r = vhost_vdpa_net_load_mq(s, n, &out_cursor, &in_cursor);
1240         if (unlikely(r)) {
1241             return r;
1242         }
1243         r = vhost_vdpa_net_load_offloads(s, n, &out_cursor, &in_cursor);
1244         if (unlikely(r)) {
1245             return r;
1246         }
1247         r = vhost_vdpa_net_load_rx(s, n, &out_cursor, &in_cursor);
1248         if (unlikely(r)) {
1249             return r;
1250         }
1251         r = vhost_vdpa_net_load_vlan(s, n, &out_cursor, &in_cursor);
1252         if (unlikely(r)) {
1253             return r;
1254         }
1255 
1256         /*
1257          * We need to poll and check all pending device's used buffers.
1258          *
1259          * We can poll here since we've had BQL from the time
1260          * we sent the descriptor.
1261          */
1262         r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status);
1263         if (unlikely(r)) {
1264             return r;
1265         }
1266     }
1267 
1268     for (int i = 0; i < v->dev->vq_index; ++i) {
1269         r = vhost_vdpa_set_vring_ready(v, i);
1270         if (unlikely(r < 0)) {
1271             return r;
1272         }
1273     }
1274 
1275     return 0;
1276 }
1277 
1278 static NetClientInfo net_vhost_vdpa_cvq_info = {
1279     .type = NET_CLIENT_DRIVER_VHOST_VDPA,
1280     .size = sizeof(VhostVDPAState),
1281     .receive = vhost_vdpa_receive,
1282     .start = vhost_vdpa_net_cvq_start,
1283     .load = vhost_vdpa_net_cvq_load,
1284     .stop = vhost_vdpa_net_cvq_stop,
1285     .cleanup = vhost_vdpa_cleanup,
1286     .has_vnet_hdr = vhost_vdpa_has_vnet_hdr,
1287     .has_ufo = vhost_vdpa_has_ufo,
1288     .check_peer_type = vhost_vdpa_check_peer_type,
1289     .set_steering_ebpf = vhost_vdpa_set_steering_ebpf,
1290 };
1291 
1292 /*
1293  * Forward the excessive VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command to
1294  * vdpa device.
1295  *
1296  * Considering that QEMU cannot send the entire filter table to the
1297  * vdpa device, it should send the VIRTIO_NET_CTRL_RX_PROMISC CVQ
1298  * command to enable promiscuous mode to receive all packets,
1299  * according to VirtIO standard, "Since there are no guarantees,
1300  * it can use a hash filter or silently switch to allmulti or
1301  * promiscuous mode if it is given too many addresses.".
1302  *
1303  * Since QEMU ignores MAC addresses beyond `MAC_TABLE_ENTRIES` and
1304  * marks `n->mac_table.x_overflow` accordingly, it should have
1305  * the same effect on the device model to receive
1306  * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1307  * The same applies to multicast MAC addresses.
1308  *
1309  * Therefore, QEMU can provide the device model with a fake
1310  * VIRTIO_NET_CTRL_MAC_TABLE_SET command with (`MAC_TABLE_ENTRIES` + 1)
1311  * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1312  * MAC addresses. This ensures that the device model marks
1313  * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1314  * allowing all packets to be received, which aligns with the
1315  * state of the vdpa device.
1316  */
vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState * s,VirtQueueElement * elem,struct iovec * out,const struct iovec * in)1317 static int vhost_vdpa_net_excessive_mac_filter_cvq_add(VhostVDPAState *s,
1318                                                        VirtQueueElement *elem,
1319                                                        struct iovec *out,
1320                                                        const struct iovec *in)
1321 {
1322     struct virtio_net_ctrl_mac mac_data, *mac_ptr;
1323     struct virtio_net_ctrl_hdr *hdr_ptr;
1324     uint32_t cursor;
1325     ssize_t r;
1326     uint8_t on = 1;
1327 
1328     /* parse the non-multicast MAC address entries from CVQ command */
1329     cursor = sizeof(*hdr_ptr);
1330     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1331                    &mac_data, sizeof(mac_data));
1332     if (unlikely(r != sizeof(mac_data))) {
1333         /*
1334          * If the CVQ command is invalid, we should simulate the vdpa device
1335          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1336          */
1337         *s->status = VIRTIO_NET_ERR;
1338         return sizeof(*s->status);
1339     }
1340     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1341 
1342     /* parse the multicast MAC address entries from CVQ command */
1343     r = iov_to_buf(elem->out_sg, elem->out_num, cursor,
1344                    &mac_data, sizeof(mac_data));
1345     if (r != sizeof(mac_data)) {
1346         /*
1347          * If the CVQ command is invalid, we should simulate the vdpa device
1348          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1349          */
1350         *s->status = VIRTIO_NET_ERR;
1351         return sizeof(*s->status);
1352     }
1353     cursor += sizeof(mac_data) + le32_to_cpu(mac_data.entries) * ETH_ALEN;
1354 
1355     /* validate the CVQ command */
1356     if (iov_size(elem->out_sg, elem->out_num) != cursor) {
1357         /*
1358          * If the CVQ command is invalid, we should simulate the vdpa device
1359          * to reject the VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1360          */
1361         *s->status = VIRTIO_NET_ERR;
1362         return sizeof(*s->status);
1363     }
1364 
1365     /*
1366      * According to VirtIO standard, "Since there are no guarantees,
1367      * it can use a hash filter or silently switch to allmulti or
1368      * promiscuous mode if it is given too many addresses.".
1369      *
1370      * Therefore, considering that QEMU is unable to send the entire
1371      * filter table to the vdpa device, it should send the
1372      * VIRTIO_NET_CTRL_RX_PROMISC CVQ command to enable promiscuous mode
1373      */
1374     hdr_ptr = out->iov_base;
1375     out->iov_len = sizeof(*hdr_ptr) + sizeof(on);
1376 
1377     hdr_ptr->class = VIRTIO_NET_CTRL_RX;
1378     hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC;
1379     iov_from_buf(out, 1, sizeof(*hdr_ptr), &on, sizeof(on));
1380     r = vhost_vdpa_net_cvq_add(s, out, 1, in, 1);
1381     if (unlikely(r < 0)) {
1382         return r;
1383     }
1384 
1385     /*
1386      * We can poll here since we've had BQL from the time
1387      * we sent the descriptor.
1388      */
1389     r = vhost_vdpa_net_svq_poll(s, 1);
1390     if (unlikely(r < sizeof(*s->status))) {
1391         return r;
1392     }
1393     if (*s->status != VIRTIO_NET_OK) {
1394         return sizeof(*s->status);
1395     }
1396 
1397     /*
1398      * QEMU should also send a fake VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ
1399      * command to the device model, including (`MAC_TABLE_ENTRIES` + 1)
1400      * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1)
1401      * multicast MAC addresses.
1402      *
1403      * By doing so, the device model can mark `n->mac_table.uni_overflow`
1404      * and `n->mac_table.multi_overflow`, enabling all packets to be
1405      * received, which aligns with the state of the vdpa device.
1406      */
1407     cursor = 0;
1408     uint32_t fake_uni_entries = MAC_TABLE_ENTRIES + 1,
1409              fake_mul_entries = MAC_TABLE_ENTRIES + 1,
1410              fake_cvq_size = sizeof(struct virtio_net_ctrl_hdr) +
1411                              sizeof(mac_data) + fake_uni_entries * ETH_ALEN +
1412                              sizeof(mac_data) + fake_mul_entries * ETH_ALEN;
1413 
1414     assert(fake_cvq_size < vhost_vdpa_net_cvq_cmd_page_len());
1415     out->iov_len = fake_cvq_size;
1416 
1417     /* pack the header for fake CVQ command */
1418     hdr_ptr = out->iov_base + cursor;
1419     hdr_ptr->class = VIRTIO_NET_CTRL_MAC;
1420     hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET;
1421     cursor += sizeof(*hdr_ptr);
1422 
1423     /*
1424      * Pack the non-multicast MAC addresses part for fake CVQ command.
1425      *
1426      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1427      * addresses provided in CVQ command. Therefore, only the entries
1428      * field need to be prepared in the CVQ command.
1429      */
1430     mac_ptr = out->iov_base + cursor;
1431     mac_ptr->entries = cpu_to_le32(fake_uni_entries);
1432     cursor += sizeof(*mac_ptr) + fake_uni_entries * ETH_ALEN;
1433 
1434     /*
1435      * Pack the multicast MAC addresses part for fake CVQ command.
1436      *
1437      * According to virtio_net_handle_mac(), QEMU doesn't verify the MAC
1438      * addresses provided in CVQ command. Therefore, only the entries
1439      * field need to be prepared in the CVQ command.
1440      */
1441     mac_ptr = out->iov_base + cursor;
1442     mac_ptr->entries = cpu_to_le32(fake_mul_entries);
1443 
1444     /*
1445      * Simulating QEMU poll a vdpa device used buffer
1446      * for VIRTIO_NET_CTRL_MAC_TABLE_SET CVQ command
1447      */
1448     return sizeof(*s->status);
1449 }
1450 
1451 /**
1452  * Validate and copy control virtqueue commands.
1453  *
1454  * Following QEMU guidelines, we offer a copy of the buffers to the device to
1455  * prevent TOCTOU bugs.
1456  */
vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue * svq,VirtQueueElement * elem,void * opaque)1457 static int vhost_vdpa_net_handle_ctrl_avail(VhostShadowVirtqueue *svq,
1458                                             VirtQueueElement *elem,
1459                                             void *opaque)
1460 {
1461     VhostVDPAState *s = opaque;
1462     size_t in_len;
1463     const struct virtio_net_ctrl_hdr *ctrl;
1464     virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
1465     /* Out buffer sent to both the vdpa device and the device model */
1466     struct iovec out = {
1467         .iov_base = s->cvq_cmd_out_buffer,
1468     };
1469     /* in buffer used for device model */
1470     const struct iovec model_in = {
1471         .iov_base = &status,
1472         .iov_len = sizeof(status),
1473     };
1474     /* in buffer used for vdpa device */
1475     const struct iovec vdpa_in = {
1476         .iov_base = s->status,
1477         .iov_len = sizeof(*s->status),
1478     };
1479     ssize_t dev_written = -EINVAL;
1480 
1481     out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0,
1482                              s->cvq_cmd_out_buffer,
1483                              vhost_vdpa_net_cvq_cmd_page_len());
1484 
1485     ctrl = s->cvq_cmd_out_buffer;
1486     if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) {
1487         /*
1488          * Guest announce capability is emulated by qemu, so don't forward to
1489          * the device.
1490          */
1491         dev_written = sizeof(status);
1492         *s->status = VIRTIO_NET_OK;
1493     } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC &&
1494                         ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET &&
1495                         iov_size(elem->out_sg, elem->out_num) > out.iov_len)) {
1496         /*
1497          * Due to the size limitation of the out buffer sent to the vdpa device,
1498          * which is determined by vhost_vdpa_net_cvq_cmd_page_len(), excessive
1499          * MAC addresses set by the driver for the filter table can cause
1500          * truncation of the CVQ command in QEMU. As a result, the vdpa device
1501          * rejects the flawed CVQ command.
1502          *
1503          * Therefore, QEMU must handle this situation instead of sending
1504          * the CVQ command directly.
1505          */
1506         dev_written = vhost_vdpa_net_excessive_mac_filter_cvq_add(s, elem,
1507                                                             &out, &vdpa_in);
1508         if (unlikely(dev_written < 0)) {
1509             goto out;
1510         }
1511     } else {
1512         ssize_t r;
1513         r = vhost_vdpa_net_cvq_add(s, &out, 1, &vdpa_in, 1);
1514         if (unlikely(r < 0)) {
1515             dev_written = r;
1516             goto out;
1517         }
1518 
1519         /*
1520          * We can poll here since we've had BQL from the time
1521          * we sent the descriptor.
1522          */
1523         dev_written = vhost_vdpa_net_svq_poll(s, 1);
1524     }
1525 
1526     if (unlikely(dev_written < sizeof(status))) {
1527         error_report("Insufficient written data (%zu)", dev_written);
1528         goto out;
1529     }
1530 
1531     if (*s->status != VIRTIO_NET_OK) {
1532         goto out;
1533     }
1534 
1535     status = VIRTIO_NET_ERR;
1536     virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1);
1537     if (status != VIRTIO_NET_OK) {
1538         error_report("Bad CVQ processing in model");
1539     }
1540 
1541 out:
1542     in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status,
1543                           sizeof(status));
1544     if (unlikely(in_len < sizeof(status))) {
1545         error_report("Bad device CVQ written length");
1546     }
1547     vhost_svq_push_elem(svq, elem, MIN(in_len, sizeof(status)));
1548     /*
1549      * `elem` belongs to vhost_vdpa_net_handle_ctrl_avail() only when
1550      * the function successfully forwards the CVQ command, indicated
1551      * by a non-negative value of `dev_written`. Otherwise, it still
1552      * belongs to SVQ.
1553      * This function should only free the `elem` when it owns.
1554      */
1555     if (dev_written >= 0) {
1556         g_free(elem);
1557     }
1558     return dev_written < 0 ? dev_written : 0;
1559 }
1560 
1561 static const VhostShadowVirtqueueOps vhost_vdpa_net_svq_ops = {
1562     .avail_handler = vhost_vdpa_net_handle_ctrl_avail,
1563 };
1564 
1565 /**
1566  * Probe if CVQ is isolated
1567  *
1568  * @device_fd         The vdpa device fd
1569  * @features          Features offered by the device.
1570  * @cvq_index         The control vq pair index
1571  *
1572  * Returns <0 in case of failure, 0 if false and 1 if true.
1573  */
vhost_vdpa_probe_cvq_isolation(int device_fd,uint64_t features,int cvq_index,Error ** errp)1574 static int vhost_vdpa_probe_cvq_isolation(int device_fd, uint64_t features,
1575                                           int cvq_index, Error **errp)
1576 {
1577     ERRP_GUARD();
1578     uint64_t backend_features;
1579     int64_t cvq_group;
1580     uint8_t status = VIRTIO_CONFIG_S_ACKNOWLEDGE |
1581                      VIRTIO_CONFIG_S_DRIVER;
1582     int r;
1583 
1584     r = ioctl(device_fd, VHOST_GET_BACKEND_FEATURES, &backend_features);
1585     if (unlikely(r < 0)) {
1586         error_setg_errno(errp, errno, "Cannot get vdpa backend_features");
1587         return r;
1588     }
1589 
1590     if (!(backend_features & BIT_ULL(VHOST_BACKEND_F_IOTLB_ASID))) {
1591         return 0;
1592     }
1593 
1594     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1595     if (unlikely(r)) {
1596         error_setg_errno(errp, -r, "Cannot set device status");
1597         goto out;
1598     }
1599 
1600     r = ioctl(device_fd, VHOST_SET_FEATURES, &features);
1601     if (unlikely(r)) {
1602         error_setg_errno(errp, -r, "Cannot set features");
1603         goto out;
1604     }
1605 
1606     status |= VIRTIO_CONFIG_S_FEATURES_OK;
1607     r = ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1608     if (unlikely(r)) {
1609         error_setg_errno(errp, -r, "Cannot set device status");
1610         goto out;
1611     }
1612 
1613     cvq_group = vhost_vdpa_get_vring_group(device_fd, cvq_index, errp);
1614     if (unlikely(cvq_group < 0)) {
1615         if (cvq_group != -ENOTSUP) {
1616             r = cvq_group;
1617             goto out;
1618         }
1619 
1620         /*
1621          * The kernel report VHOST_BACKEND_F_IOTLB_ASID if the vdpa frontend
1622          * support ASID even if the parent driver does not.  The CVQ cannot be
1623          * isolated in this case.
1624          */
1625         error_free(*errp);
1626         *errp = NULL;
1627         r = 0;
1628         goto out;
1629     }
1630 
1631     for (int i = 0; i < cvq_index; ++i) {
1632         int64_t group = vhost_vdpa_get_vring_group(device_fd, i, errp);
1633         if (unlikely(group < 0)) {
1634             r = group;
1635             goto out;
1636         }
1637 
1638         if (group == (int64_t)cvq_group) {
1639             r = 0;
1640             goto out;
1641         }
1642     }
1643 
1644     r = 1;
1645 
1646 out:
1647     status = 0;
1648     ioctl(device_fd, VHOST_VDPA_SET_STATUS, &status);
1649     return r;
1650 }
1651 
net_vhost_vdpa_init(NetClientState * peer,const char * device,const char * name,int vdpa_device_fd,int queue_pair_index,int nvqs,bool is_datapath,bool svq,struct vhost_vdpa_iova_range iova_range,uint64_t features,VhostVDPAShared * shared,Error ** errp)1652 static NetClientState *net_vhost_vdpa_init(NetClientState *peer,
1653                                        const char *device,
1654                                        const char *name,
1655                                        int vdpa_device_fd,
1656                                        int queue_pair_index,
1657                                        int nvqs,
1658                                        bool is_datapath,
1659                                        bool svq,
1660                                        struct vhost_vdpa_iova_range iova_range,
1661                                        uint64_t features,
1662                                        VhostVDPAShared *shared,
1663                                        Error **errp)
1664 {
1665     NetClientState *nc = NULL;
1666     VhostVDPAState *s;
1667     int ret = 0;
1668     assert(name);
1669     int cvq_isolated = 0;
1670 
1671     if (is_datapath) {
1672         nc = qemu_new_net_client(&net_vhost_vdpa_info, peer, device,
1673                                  name);
1674     } else {
1675         cvq_isolated = vhost_vdpa_probe_cvq_isolation(vdpa_device_fd, features,
1676                                                       queue_pair_index * 2,
1677                                                       errp);
1678         if (unlikely(cvq_isolated < 0)) {
1679             return NULL;
1680         }
1681 
1682         nc = qemu_new_net_control_client(&net_vhost_vdpa_cvq_info, peer,
1683                                          device, name);
1684     }
1685     qemu_set_info_str(nc, TYPE_VHOST_VDPA);
1686     s = DO_UPCAST(VhostVDPAState, nc, nc);
1687 
1688     s->vhost_vdpa.index = queue_pair_index;
1689     s->always_svq = svq;
1690     s->migration_state.notify = NULL;
1691     s->vhost_vdpa.shadow_vqs_enabled = svq;
1692     if (queue_pair_index == 0) {
1693         vhost_vdpa_net_valid_svq_features(features,
1694                                           &s->vhost_vdpa.migration_blocker);
1695         s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1);
1696         s->vhost_vdpa.shared->device_fd = vdpa_device_fd;
1697         s->vhost_vdpa.shared->iova_range = iova_range;
1698         s->vhost_vdpa.shared->shadow_data = svq;
1699         s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first,
1700                                                               iova_range.last);
1701     } else if (!is_datapath) {
1702         s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1703                                      PROT_READ | PROT_WRITE,
1704                                      MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1705         s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(),
1706                          PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS,
1707                          -1, 0);
1708 
1709         s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops;
1710         s->vhost_vdpa.shadow_vq_ops_opaque = s;
1711         s->cvq_isolated = cvq_isolated;
1712     }
1713     if (queue_pair_index != 0) {
1714         s->vhost_vdpa.shared = shared;
1715     }
1716 
1717     ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs);
1718     if (ret) {
1719         qemu_del_net_client(nc);
1720         return NULL;
1721     }
1722 
1723     return nc;
1724 }
1725 
vhost_vdpa_get_features(int fd,uint64_t * features,Error ** errp)1726 static int vhost_vdpa_get_features(int fd, uint64_t *features, Error **errp)
1727 {
1728     int ret = ioctl(fd, VHOST_GET_FEATURES, features);
1729     if (unlikely(ret < 0)) {
1730         error_setg_errno(errp, errno,
1731                          "Fail to query features from vhost-vDPA device");
1732     }
1733     return ret;
1734 }
1735 
vhost_vdpa_get_max_queue_pairs(int fd,uint64_t features,int * has_cvq,Error ** errp)1736 static int vhost_vdpa_get_max_queue_pairs(int fd, uint64_t features,
1737                                           int *has_cvq, Error **errp)
1738 {
1739     unsigned long config_size = offsetof(struct vhost_vdpa_config, buf);
1740     g_autofree struct vhost_vdpa_config *config = NULL;
1741     __virtio16 *max_queue_pairs;
1742     int ret;
1743 
1744     if (features & (1 << VIRTIO_NET_F_CTRL_VQ)) {
1745         *has_cvq = 1;
1746     } else {
1747         *has_cvq = 0;
1748     }
1749 
1750     if (features & (1 << VIRTIO_NET_F_MQ)) {
1751         config = g_malloc0(config_size + sizeof(*max_queue_pairs));
1752         config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs);
1753         config->len = sizeof(*max_queue_pairs);
1754 
1755         ret = ioctl(fd, VHOST_VDPA_GET_CONFIG, config);
1756         if (ret) {
1757             error_setg(errp, "Fail to get config from vhost-vDPA device");
1758             return -ret;
1759         }
1760 
1761         max_queue_pairs = (__virtio16 *)&config->buf;
1762 
1763         return lduw_le_p(max_queue_pairs);
1764     }
1765 
1766     return 1;
1767 }
1768 
net_init_vhost_vdpa(const Netdev * netdev,const char * name,NetClientState * peer,Error ** errp)1769 int net_init_vhost_vdpa(const Netdev *netdev, const char *name,
1770                         NetClientState *peer, Error **errp)
1771 {
1772     ERRP_GUARD();
1773     const NetdevVhostVDPAOptions *opts;
1774     uint64_t features;
1775     int vdpa_device_fd;
1776     g_autofree NetClientState **ncs = NULL;
1777     struct vhost_vdpa_iova_range iova_range;
1778     NetClientState *nc;
1779     int queue_pairs, r, i = 0, has_cvq = 0;
1780 
1781     assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA);
1782     opts = &netdev->u.vhost_vdpa;
1783     if (!opts->vhostdev && !opts->vhostfd) {
1784         error_setg(errp,
1785                    "vhost-vdpa: neither vhostdev= nor vhostfd= was specified");
1786         return -1;
1787     }
1788 
1789     if (opts->vhostdev && opts->vhostfd) {
1790         error_setg(errp,
1791                    "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive");
1792         return -1;
1793     }
1794 
1795     if (opts->vhostdev) {
1796         vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp);
1797         if (vdpa_device_fd == -1) {
1798             return -errno;
1799         }
1800     } else {
1801         /* has_vhostfd */
1802         vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp);
1803         if (vdpa_device_fd == -1) {
1804             error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: ");
1805             return -1;
1806         }
1807     }
1808 
1809     r = vhost_vdpa_get_features(vdpa_device_fd, &features, errp);
1810     if (unlikely(r < 0)) {
1811         goto err;
1812     }
1813 
1814     queue_pairs = vhost_vdpa_get_max_queue_pairs(vdpa_device_fd, features,
1815                                                  &has_cvq, errp);
1816     if (queue_pairs < 0) {
1817         qemu_close(vdpa_device_fd);
1818         return queue_pairs;
1819     }
1820 
1821     r = vhost_vdpa_get_iova_range(vdpa_device_fd, &iova_range);
1822     if (unlikely(r < 0)) {
1823         error_setg(errp, "vhost-vdpa: get iova range failed: %s",
1824                    strerror(-r));
1825         goto err;
1826     }
1827 
1828     if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) {
1829         goto err;
1830     }
1831 
1832     ncs = g_malloc0(sizeof(*ncs) * queue_pairs);
1833 
1834     for (i = 0; i < queue_pairs; i++) {
1835         VhostVDPAShared *shared = NULL;
1836 
1837         if (i) {
1838             shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared;
1839         }
1840         ncs[i] = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1841                                      vdpa_device_fd, i, 2, true, opts->x_svq,
1842                                      iova_range, features, shared, errp);
1843         if (!ncs[i])
1844             goto err;
1845     }
1846 
1847     if (has_cvq) {
1848         VhostVDPAState *s0 = DO_UPCAST(VhostVDPAState, nc, ncs[0]);
1849         VhostVDPAShared *shared = s0->vhost_vdpa.shared;
1850 
1851         nc = net_vhost_vdpa_init(peer, TYPE_VHOST_VDPA, name,
1852                                  vdpa_device_fd, i, 1, false,
1853                                  opts->x_svq, iova_range, features, shared,
1854                                  errp);
1855         if (!nc)
1856             goto err;
1857     }
1858 
1859     return 0;
1860 
1861 err:
1862     if (i) {
1863         for (i--; i >= 0; i--) {
1864             qemu_del_net_client(ncs[i]);
1865         }
1866     }
1867 
1868     qemu_close(vdpa_device_fd);
1869 
1870     return -1;
1871 }
1872