Lines Matching +full:stop +full:- +full:ack
2 * vhost-vdpa.c
4 * Copyright(c) 2017-2018 Intel Corporation.
8 * See the COPYING file in the top-level directory.
14 #include "hw/virtio/virtio-net.h"
16 #include "net/vhost-vdpa.h"
17 #include "hw/virtio/vhost-vdpa.h"
18 #include "qemu/config-file.h"
19 #include "qemu/error-report.h"
27 #include "standard-headers/linux/virtio_net.h"
138 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_get_vhost_net()
139 return s->vhost_net; in vhost_vdpa_get_vhost_net()
164 VIRTIO_TRANSPORT_F_END - VIRTIO_TRANSPORT_F_START); in vhost_vdpa_net_valid_svq_features()
181 hdev = (struct vhost_dev *)&net->dev; in vhost_vdpa_net_check_device_id()
182 ret = hdev->vhost_ops->vhost_get_device_id(hdev, &device_id); in vhost_vdpa_net_check_device_id()
184 return -ENOTSUP; in vhost_vdpa_net_check_device_id()
198 assert(ncs->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_add()
210 s->vhost_net = net; in vhost_vdpa_add()
220 return -1; in vhost_vdpa_add()
227 munmap(s->cvq_cmd_out_buffer, vhost_vdpa_net_cvq_cmd_page_len()); in vhost_vdpa_cleanup()
228 munmap(s->status, vhost_vdpa_net_cvq_cmd_page_len()); in vhost_vdpa_cleanup()
229 if (s->vhost_net) { in vhost_vdpa_cleanup()
230 vhost_net_cleanup(s->vhost_net); in vhost_vdpa_cleanup()
231 g_free(s->vhost_net); in vhost_vdpa_cleanup()
232 s->vhost_net = NULL; in vhost_vdpa_cleanup()
234 if (s->vhost_vdpa.index != 0) { in vhost_vdpa_cleanup()
237 qemu_close(s->vhost_vdpa.shared->device_fd); in vhost_vdpa_cleanup()
238 g_clear_pointer(&s->vhost_vdpa.shared->iova_tree, vhost_iova_tree_delete); in vhost_vdpa_cleanup()
239 g_free(s->vhost_vdpa.shared); in vhost_vdpa_cleanup()
242 /** Dummy SetSteeringEBPF to support RSS for vhost-vdpa backend */
250 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_has_vnet_hdr()
257 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_has_ufo()
261 features = vhost_net_get_features(s->vhost_net, features); in vhost_vdpa_has_ufo()
270 * LE". Otherwise, on a BE machine, higher-level code would mistakely think
283 if (!g_str_has_prefix(driver, "virtio-net-")) { in vhost_vdpa_check_peer_type()
284 error_setg(errp, "vhost-vdpa requires frontend driver virtio-net-*"); in vhost_vdpa_check_peer_type()
299 /** From any vdpa net client, get the netclient of the i-th queue pair */
302 NICState *nic = qemu_get_nic(s->nc.peer); in vhost_vdpa_net_get_nc_vdpa()
303 NetClientState *nc_i = qemu_get_peer(nic->ncs, i); in vhost_vdpa_net_get_nc_vdpa()
315 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_log_global_enable()
320 /* We are only called on the first data vqs and only if x-svq is not set */ in vhost_vdpa_net_log_global_enable()
321 if (s->vhost_vdpa.shadow_vqs_enabled == enable) { in vhost_vdpa_net_log_global_enable()
325 vdev = v->dev->vdev; in vhost_vdpa_net_log_global_enable()
327 if (!n->vhost_started) { in vhost_vdpa_net_log_global_enable()
331 data_queue_pairs = n->multiqueue ? n->max_queue_pairs : 1; in vhost_vdpa_net_log_global_enable()
333 n->max_ncs - n->max_queue_pairs : 0; in vhost_vdpa_net_log_global_enable()
334 v->shared->svq_switching = enable ? in vhost_vdpa_net_log_global_enable()
338 * in the future and resume the device if read-only operations between in vhost_vdpa_net_log_global_enable()
341 vhost_net_stop(vdev, n->nic->ncs, data_queue_pairs, cvq); in vhost_vdpa_net_log_global_enable()
344 r = vhost_net_start(vdev, n->nic->ncs, data_queue_pairs, cvq); in vhost_vdpa_net_log_global_enable()
346 error_report("unable to start vhost net: %s(%d)", g_strerror(-r), -r); in vhost_vdpa_net_log_global_enable()
348 v->shared->svq_switching = SVQ_TSTATE_DONE; in vhost_vdpa_net_log_global_enable()
356 if (e->type == MIG_EVENT_PRECOPY_SETUP) { in vdpa_net_migration_state_notifier()
358 } else if (e->type == MIG_EVENT_PRECOPY_FAILED) { in vdpa_net_migration_state_notifier()
366 migration_add_notifier(&s->migration_state, in vhost_vdpa_net_data_start_first()
373 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_data_start()
375 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_data_start()
377 if (s->always_svq || migration_is_running()) { in vhost_vdpa_net_data_start()
378 v->shadow_vqs_enabled = true; in vhost_vdpa_net_data_start()
380 v->shadow_vqs_enabled = false; in vhost_vdpa_net_data_start()
383 if (v->index == 0) { in vhost_vdpa_net_data_start()
384 v->shared->shadow_data = v->shadow_vqs_enabled; in vhost_vdpa_net_data_start()
395 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_data_load()
396 bool has_cvq = v->dev->vq_index_end % 2; in vhost_vdpa_net_data_load()
402 for (int i = 0; i < v->dev->nvqs; ++i) { in vhost_vdpa_net_data_load()
403 int ret = vhost_vdpa_set_vring_ready(v, i + v->dev->vq_index); in vhost_vdpa_net_data_load()
415 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_client_stop()
417 if (s->vhost_vdpa.index == 0) { in vhost_vdpa_net_client_stop()
418 migration_remove_notifier(&s->migration_state); in vhost_vdpa_net_client_stop()
428 .stop = vhost_vdpa_net_client_stop,
446 r = -errno; in vhost_vdpa_get_vring_group()
466 r = ioctl(v->shared->device_fd, VHOST_VDPA_SET_GROUP_ASID, &asid); in vhost_vdpa_set_address_space_id()
476 VhostIOVATree *tree = v->shared->iova_tree; in vhost_vdpa_cvq_unmap_buf()
492 r = vhost_vdpa_dma_unmap(v->shared, v->address_space_id, map->iova, in vhost_vdpa_cvq_unmap_buf()
493 map->size + 1); in vhost_vdpa_cvq_unmap_buf()
509 map.size = size - 1; in vhost_vdpa_cvq_map_buf()
511 r = vhost_iova_tree_map_alloc(v->shared->iova_tree, &map, taddr); in vhost_vdpa_cvq_map_buf()
516 error_report("Insertion to IOVA->HVA tree failed"); in vhost_vdpa_cvq_map_buf()
517 /* Remove the mapping from the IOVA-only tree */ in vhost_vdpa_cvq_map_buf()
523 r = vhost_vdpa_dma_map(v->shared, v->address_space_id, map.iova, in vhost_vdpa_cvq_map_buf()
532 vhost_iova_tree_remove(v->shared->iova_tree, map); in vhost_vdpa_cvq_map_buf()
544 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_start()
547 v = &s->vhost_vdpa; in vhost_vdpa_net_cvq_start()
550 v->shadow_vqs_enabled = s0->vhost_vdpa.shadow_vqs_enabled; in vhost_vdpa_net_cvq_start()
551 s->vhost_vdpa.address_space_id = VHOST_VDPA_GUEST_PA_ASID; in vhost_vdpa_net_cvq_start()
553 if (v->shared->shadow_data) { in vhost_vdpa_net_cvq_start()
560 * will be blocked as long as vhost-vdpa backends will not offer _F_LOG. in vhost_vdpa_net_cvq_start()
562 if (!vhost_vdpa_net_valid_svq_features(v->dev->features, NULL)) { in vhost_vdpa_net_cvq_start()
566 if (!s->cvq_isolated) { in vhost_vdpa_net_cvq_start()
570 cvq_group = vhost_vdpa_get_vring_group(v->shared->device_fd, in vhost_vdpa_net_cvq_start()
571 v->dev->vq_index_end - 1, in vhost_vdpa_net_cvq_start()
583 v->shadow_vqs_enabled = true; in vhost_vdpa_net_cvq_start()
584 s->vhost_vdpa.address_space_id = VHOST_VDPA_NET_CVQ_ASID; in vhost_vdpa_net_cvq_start()
587 if (!s->vhost_vdpa.shadow_vqs_enabled) { in vhost_vdpa_net_cvq_start()
591 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer, in vhost_vdpa_net_cvq_start()
597 r = vhost_vdpa_cvq_map_buf(&s->vhost_vdpa, s->status, in vhost_vdpa_net_cvq_start()
600 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); in vhost_vdpa_net_cvq_start()
610 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_stop()
612 if (s->vhost_vdpa.shadow_vqs_enabled) { in vhost_vdpa_net_cvq_stop()
613 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->cvq_cmd_out_buffer); in vhost_vdpa_net_cvq_stop()
614 vhost_vdpa_cvq_unmap_buf(&s->vhost_vdpa, s->status); in vhost_vdpa_net_cvq_stop()
624 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_cvq_add()
629 if (unlikely(r == -ENOSPC)) { in vhost_vdpa_net_cvq_add()
646 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_svq_poll()
655 out_cursor->iov_base = s->cvq_cmd_out_buffer; in vhost_vdpa_net_load_cursor_reset()
656 out_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); in vhost_vdpa_net_load_cursor_reset()
659 in_cursor->iov_base = s->status; in vhost_vdpa_net_load_cursor_reset()
660 in_cursor->iov_len = vhost_vdpa_net_cvq_cmd_page_len(); in vhost_vdpa_net_load_cursor_reset()
664 * Poll SVQ for multiple pending control commands and check the device's ack.
673 /* device uses a one-byte length ack for each control command */ in vhost_vdpa_net_svq_flush()
676 return -EIO; in vhost_vdpa_net_svq_flush()
679 /* check the device's ack */ in vhost_vdpa_net_svq_flush()
681 if (s->status[i] != VIRTIO_NET_OK) { in vhost_vdpa_net_svq_flush()
682 return -EIO; in vhost_vdpa_net_svq_flush()
702 VhostShadowVirtqueue *svq = g_ptr_array_index(s->vhost_vdpa.shadow_vqs, 0); in vhost_vdpa_net_load_cmd()
704 assert(data_size < vhost_vdpa_net_cvq_cmd_page_len() - sizeof(ctrl)); in vhost_vdpa_net_load_cmd()
716 r = vhost_vdpa_net_svq_flush(s, in_cursor->iov_base - in vhost_vdpa_net_load_cmd()
717 (void *)s->status); in vhost_vdpa_net_load_cmd()
727 /* pack the CVQ command command-specific-data */ in vhost_vdpa_net_load_cmd()
729 out_cursor->iov_base + sizeof(ctrl), data_size); in vhost_vdpa_net_load_cmd()
734 iov_copy(&in, 1, in_cursor, 1, 0, sizeof(*s->status)); in vhost_vdpa_net_load_cmd()
746 iov_discard_front(&in_cursor, &dummy_cursor_iov_cnt, sizeof(*s->status)); in vhost_vdpa_net_load_cmd()
755 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_MAC_ADDR)) { in vhost_vdpa_net_load_mac()
757 .iov_base = (void *)n->mac, in vhost_vdpa_net_load_mac()
758 .iov_len = sizeof(n->mac), in vhost_vdpa_net_load_mac()
780 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX) || in vhost_vdpa_net_load_mac()
781 n->mac_table.in_use == 0) { in vhost_vdpa_net_load_mac()
785 uint32_t uni_entries = n->mac_table.first_multi, in vhost_vdpa_net_load_mac()
787 mul_entries = n->mac_table.in_use - uni_entries, in vhost_vdpa_net_load_mac()
800 .iov_base = n->mac_table.macs, in vhost_vdpa_net_load_mac()
806 .iov_base = &n->mac_table.macs[uni_macs_size], in vhost_vdpa_net_load_mac()
840 if (!n->rss_data.enabled || in vhost_vdpa_net_load_rss()
841 n->rss_data.hash_types == VIRTIO_NET_HASH_REPORT_NONE) { in vhost_vdpa_net_load_rss()
845 table = g_malloc_n(n->rss_data.indirections_len, in vhost_vdpa_net_load_rss()
846 sizeof(n->rss_data.indirections_table[0])); in vhost_vdpa_net_load_rss()
847 cfg.hash_types = cpu_to_le32(n->rss_data.hash_types); in vhost_vdpa_net_load_rss()
854 cfg.indirection_table_mask = cpu_to_le16(n->rss_data.indirections_len - in vhost_vdpa_net_load_rss()
856 cfg.unclassified_queue = cpu_to_le16(n->rss_data.default_queue); in vhost_vdpa_net_load_rss()
857 for (int i = 0; i < n->rss_data.indirections_len; ++i) { in vhost_vdpa_net_load_rss()
858 table[i] = cpu_to_le16(n->rss_data.indirections_table[i]); in vhost_vdpa_net_load_rss()
860 cfg.max_tx_vq = cpu_to_le16(n->curr_queue_pairs); in vhost_vdpa_net_load_rss()
882 * into n->rss_data and uses the maximum key length in other code, so in vhost_vdpa_net_load_rss()
885 cfg.hash_key_length = sizeof(n->rss_data.key); in vhost_vdpa_net_load_rss()
894 .iov_len = n->rss_data.indirections_len * in vhost_vdpa_net_load_rss()
895 sizeof(n->rss_data.indirections_table[0]), in vhost_vdpa_net_load_rss()
898 .iov_len = offsetof(struct virtio_net_rss_config, hash_key_data) - in vhost_vdpa_net_load_rss()
901 .iov_base = (void *)n->rss_data.key, in vhost_vdpa_net_load_rss()
902 .iov_len = sizeof(n->rss_data.key), in vhost_vdpa_net_load_rss()
926 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_MQ)) { in vhost_vdpa_net_load_mq()
930 trace_vhost_vdpa_net_load_mq(s, n->curr_queue_pairs); in vhost_vdpa_net_load_mq()
932 mq.virtqueue_pairs = cpu_to_le16(n->curr_queue_pairs); in vhost_vdpa_net_load_mq()
945 if (virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_RSS)) { in vhost_vdpa_net_load_mq()
946 /* load the receive-side scaling state */ in vhost_vdpa_net_load_mq()
951 } else if (virtio_vdev_has_feature(&n->parent_obj, in vhost_vdpa_net_load_mq()
971 if (!virtio_vdev_has_feature(&n->parent_obj, in vhost_vdpa_net_load_offloads()
976 if (n->curr_guest_offloads == virtio_net_supported_guest_offloads(n)) { in vhost_vdpa_net_load_offloads()
992 offloads = cpu_to_le64(n->curr_guest_offloads); in vhost_vdpa_net_load_offloads()
1036 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX)) { in vhost_vdpa_net_load_rx()
1047 * QEMU marks `n->mac_table.uni_overflow` if guest sets too many in vhost_vdpa_net_load_rx()
1048 * non-multicast MAC addresses, indicating that promiscuous mode in vhost_vdpa_net_load_rx()
1052 * `n->mac_table.uni_overflow` is not marked and `n->promisc` is off, in vhost_vdpa_net_load_rx()
1058 if (!n->mac_table.uni_overflow && !n->promisc) { in vhost_vdpa_net_load_rx()
1067 * According to virtio_net_reset(), device turns all-multicast mode in vhost_vdpa_net_load_rx()
1073 * `n->mac_table.multi_overflow` if guest sets too many in vhost_vdpa_net_load_rx()
1074 * non-multicast MAC addresses. in vhost_vdpa_net_load_rx()
1077 * `n->mac_table.multi_overflow` is marked or `n->allmulti` is on, in vhost_vdpa_net_load_rx()
1078 * which sets all-multicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1083 if (n->mac_table.multi_overflow || n->allmulti) { in vhost_vdpa_net_load_rx()
1091 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_RX_EXTRA)) { in vhost_vdpa_net_load_rx()
1096 * According to virtio_net_reset(), device turns all-unicast mode in vhost_vdpa_net_load_rx()
1100 * sets all-unicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1105 if (n->alluni) { in vhost_vdpa_net_load_rx()
1114 * According to virtio_net_reset(), device turns non-multicast mode in vhost_vdpa_net_load_rx()
1118 * sets non-multicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1123 if (n->nomulti) { in vhost_vdpa_net_load_rx()
1132 * According to virtio_net_reset(), device turns non-unicast mode in vhost_vdpa_net_load_rx()
1136 * sets non-unicast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1141 if (n->nouni) { in vhost_vdpa_net_load_rx()
1150 * According to virtio_net_reset(), device turns non-broadcast mode in vhost_vdpa_net_load_rx()
1154 * sets non-broadcast mode on, different from the device's defaults. in vhost_vdpa_net_load_rx()
1159 if (n->nobcast) { in vhost_vdpa_net_load_rx()
1198 if (!virtio_vdev_has_feature(&n->parent_obj, VIRTIO_NET_F_CTRL_VLAN)) { in vhost_vdpa_net_load_vlan()
1203 for (int j = 0; n->vlans[i] && j <= 0x1f; j++) { in vhost_vdpa_net_load_vlan()
1204 if (n->vlans[i] & (1U << j)) { in vhost_vdpa_net_load_vlan()
1220 struct vhost_vdpa *v = &s->vhost_vdpa; in vhost_vdpa_net_cvq_load()
1225 assert(nc->info->type == NET_CLIENT_DRIVER_VHOST_VDPA); in vhost_vdpa_net_cvq_load()
1227 r = vhost_vdpa_set_vring_ready(v, v->dev->vq_index); in vhost_vdpa_net_cvq_load()
1232 if (v->shadow_vqs_enabled) { in vhost_vdpa_net_cvq_load()
1233 n = VIRTIO_NET(v->dev->vdev); in vhost_vdpa_net_cvq_load()
1262 r = vhost_vdpa_net_svq_flush(s, in_cursor.iov_base - (void *)s->status); in vhost_vdpa_net_cvq_load()
1268 for (int i = 0; i < v->dev->vq_index; ++i) { in vhost_vdpa_net_cvq_load()
1284 .stop = vhost_vdpa_net_cvq_stop,
1304 * marks `n->mac_table.x_overflow` accordingly, it should have
1306 * (`MAC_TABLE_ENTRIES` + 1) or more non-multicast MAC addresses.
1311 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) multicast
1313 * `n->mac_table.uni_overflow` and `n->mac_table.multi_overflow`,
1328 /* parse the non-multicast MAC address entries from CVQ command */ in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1330 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1337 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1338 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1343 r = iov_to_buf(elem->out_sg, elem->out_num, cursor, in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1350 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1351 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1356 if (iov_size(elem->out_sg, elem->out_num) != cursor) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1361 *s->status = VIRTIO_NET_ERR; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1362 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1374 hdr_ptr = out->iov_base; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1375 out->iov_len = sizeof(*hdr_ptr) + sizeof(on); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1377 hdr_ptr->class = VIRTIO_NET_CTRL_RX; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1378 hdr_ptr->cmd = VIRTIO_NET_CTRL_RX_PROMISC; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1390 if (unlikely(r < sizeof(*s->status))) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1393 if (*s->status != VIRTIO_NET_OK) { in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1394 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1400 * non-multicast MAC addresses and (`MAC_TABLE_ENTRIES` + 1) in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1403 * By doing so, the device model can mark `n->mac_table.uni_overflow` in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1404 * and `n->mac_table.multi_overflow`, enabling all packets to be in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1415 out->iov_len = fake_cvq_size; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1418 hdr_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1419 hdr_ptr->class = VIRTIO_NET_CTRL_MAC; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1420 hdr_ptr->cmd = VIRTIO_NET_CTRL_MAC_TABLE_SET; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1424 * Pack the non-multicast MAC addresses part for fake CVQ command. in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1430 mac_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1431 mac_ptr->entries = cpu_to_le32(fake_uni_entries); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1441 mac_ptr = out->iov_base + cursor; in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1442 mac_ptr->entries = cpu_to_le32(fake_mul_entries); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1448 return sizeof(*s->status); in vhost_vdpa_net_excessive_mac_filter_cvq_add()
1467 .iov_base = s->cvq_cmd_out_buffer, in vhost_vdpa_net_handle_ctrl_avail()
1476 .iov_base = s->status, in vhost_vdpa_net_handle_ctrl_avail()
1477 .iov_len = sizeof(*s->status), in vhost_vdpa_net_handle_ctrl_avail()
1479 ssize_t dev_written = -EINVAL; in vhost_vdpa_net_handle_ctrl_avail()
1481 out.iov_len = iov_to_buf(elem->out_sg, elem->out_num, 0, in vhost_vdpa_net_handle_ctrl_avail()
1482 s->cvq_cmd_out_buffer, in vhost_vdpa_net_handle_ctrl_avail()
1485 ctrl = s->cvq_cmd_out_buffer; in vhost_vdpa_net_handle_ctrl_avail()
1486 if (ctrl->class == VIRTIO_NET_CTRL_ANNOUNCE) { in vhost_vdpa_net_handle_ctrl_avail()
1492 *s->status = VIRTIO_NET_OK; in vhost_vdpa_net_handle_ctrl_avail()
1493 } else if (unlikely(ctrl->class == VIRTIO_NET_CTRL_MAC && in vhost_vdpa_net_handle_ctrl_avail()
1494 ctrl->cmd == VIRTIO_NET_CTRL_MAC_TABLE_SET && in vhost_vdpa_net_handle_ctrl_avail()
1495 iov_size(elem->out_sg, elem->out_num) > out.iov_len)) { in vhost_vdpa_net_handle_ctrl_avail()
1531 if (*s->status != VIRTIO_NET_OK) { in vhost_vdpa_net_handle_ctrl_avail()
1536 virtio_net_handle_ctrl_iov(svq->vdev, &model_in, 1, &out, 1); in vhost_vdpa_net_handle_ctrl_avail()
1542 in_len = iov_from_buf(elem->in_sg, elem->in_num, 0, &status, in vhost_vdpa_net_handle_ctrl_avail()
1551 * by a non-negative value of `dev_written`. Otherwise, it still in vhost_vdpa_net_handle_ctrl_avail()
1596 error_setg_errno(errp, -r, "Cannot set device status"); in vhost_vdpa_probe_cvq_isolation()
1602 error_setg_errno(errp, -r, "Cannot set features"); in vhost_vdpa_probe_cvq_isolation()
1609 error_setg_errno(errp, -r, "Cannot set device status"); in vhost_vdpa_probe_cvq_isolation()
1615 if (cvq_group != -ENOTSUP) { in vhost_vdpa_probe_cvq_isolation()
1688 s->vhost_vdpa.index = queue_pair_index; in net_vhost_vdpa_init()
1689 s->always_svq = svq; in net_vhost_vdpa_init()
1690 s->migration_state.notify = NULL; in net_vhost_vdpa_init()
1691 s->vhost_vdpa.shadow_vqs_enabled = svq; in net_vhost_vdpa_init()
1694 &s->vhost_vdpa.migration_blocker); in net_vhost_vdpa_init()
1695 s->vhost_vdpa.shared = g_new0(VhostVDPAShared, 1); in net_vhost_vdpa_init()
1696 s->vhost_vdpa.shared->device_fd = vdpa_device_fd; in net_vhost_vdpa_init()
1697 s->vhost_vdpa.shared->iova_range = iova_range; in net_vhost_vdpa_init()
1698 s->vhost_vdpa.shared->shadow_data = svq; in net_vhost_vdpa_init()
1699 s->vhost_vdpa.shared->iova_tree = vhost_iova_tree_new(iova_range.first, in net_vhost_vdpa_init()
1702 s->cvq_cmd_out_buffer = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), in net_vhost_vdpa_init()
1704 MAP_SHARED | MAP_ANONYMOUS, -1, 0); in net_vhost_vdpa_init()
1705 s->status = mmap(NULL, vhost_vdpa_net_cvq_cmd_page_len(), in net_vhost_vdpa_init()
1707 -1, 0); in net_vhost_vdpa_init()
1709 s->vhost_vdpa.shadow_vq_ops = &vhost_vdpa_net_svq_ops; in net_vhost_vdpa_init()
1710 s->vhost_vdpa.shadow_vq_ops_opaque = s; in net_vhost_vdpa_init()
1711 s->cvq_isolated = cvq_isolated; in net_vhost_vdpa_init()
1714 s->vhost_vdpa.shared = shared; in net_vhost_vdpa_init()
1717 ret = vhost_vdpa_add(nc, (void *)&s->vhost_vdpa, queue_pair_index, nvqs); in net_vhost_vdpa_init()
1731 "Fail to query features from vhost-vDPA device"); in vhost_vdpa_get_features()
1752 config->off = offsetof(struct virtio_net_config, max_virtqueue_pairs); in vhost_vdpa_get_max_queue_pairs()
1753 config->len = sizeof(*max_queue_pairs); in vhost_vdpa_get_max_queue_pairs()
1757 error_setg(errp, "Fail to get config from vhost-vDPA device"); in vhost_vdpa_get_max_queue_pairs()
1758 return -ret; in vhost_vdpa_get_max_queue_pairs()
1761 max_queue_pairs = (__virtio16 *)&config->buf; in vhost_vdpa_get_max_queue_pairs()
1781 assert(netdev->type == NET_CLIENT_DRIVER_VHOST_VDPA); in net_init_vhost_vdpa()
1782 opts = &netdev->u.vhost_vdpa; in net_init_vhost_vdpa()
1783 if (!opts->vhostdev && !opts->vhostfd) { in net_init_vhost_vdpa()
1785 "vhost-vdpa: neither vhostdev= nor vhostfd= was specified"); in net_init_vhost_vdpa()
1786 return -1; in net_init_vhost_vdpa()
1789 if (opts->vhostdev && opts->vhostfd) { in net_init_vhost_vdpa()
1791 "vhost-vdpa: vhostdev= and vhostfd= are mutually exclusive"); in net_init_vhost_vdpa()
1792 return -1; in net_init_vhost_vdpa()
1795 if (opts->vhostdev) { in net_init_vhost_vdpa()
1796 vdpa_device_fd = qemu_open(opts->vhostdev, O_RDWR, errp); in net_init_vhost_vdpa()
1797 if (vdpa_device_fd == -1) { in net_init_vhost_vdpa()
1798 return -errno; in net_init_vhost_vdpa()
1802 vdpa_device_fd = monitor_fd_param(monitor_cur(), opts->vhostfd, errp); in net_init_vhost_vdpa()
1803 if (vdpa_device_fd == -1) { in net_init_vhost_vdpa()
1804 error_prepend(errp, "vhost-vdpa: unable to parse vhostfd: "); in net_init_vhost_vdpa()
1805 return -1; in net_init_vhost_vdpa()
1823 error_setg(errp, "vhost-vdpa: get iova range failed: %s", in net_init_vhost_vdpa()
1824 strerror(-r)); in net_init_vhost_vdpa()
1828 if (opts->x_svq && !vhost_vdpa_net_valid_svq_features(features, errp)) { in net_init_vhost_vdpa()
1838 shared = DO_UPCAST(VhostVDPAState, nc, ncs[0])->vhost_vdpa.shared; in net_init_vhost_vdpa()
1841 vdpa_device_fd, i, 2, true, opts->x_svq, in net_init_vhost_vdpa()
1849 VhostVDPAShared *shared = s0->vhost_vdpa.shared; in net_init_vhost_vdpa()
1853 opts->x_svq, iova_range, features, shared, in net_init_vhost_vdpa()
1863 for (i--; i >= 0; i--) { in net_init_vhost_vdpa()
1870 return -1; in net_init_vhost_vdpa()