Lines Matching +full:ctx +full:- +full:asid

1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
47 #define MLX5_FEATURE(_mvdev, _feature) (!!((_mvdev)->actual_features & BIT_ULL(_feature)))
137 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) { in is_index_valid()
138 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in is_index_valid()
144 return idx <= mvdev->max_idx; in is_index_valid()
168 /* TODO: cross-endian support */
172 (mvdev->actual_features & BIT_ULL(VIRTIO_F_VERSION_1)); in mlx5_vdpa_is_little_endian()
187 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_MQ))) in ctrl_vq_idx()
190 return mvdev->max_vqs; in ctrl_vq_idx()
272 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
277 tisc = MLX5_ADDR_OF(create_tis_in, in, ctx); in create_tis()
278 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
279 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
288 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
296 struct mlx5_frag_buf *frag_buf = &buf->frag_buf; in cq_frag_buf_alloc()
301 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
302 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
306 mlx5_init_fbc(frag_buf->frags, log_wq_stride, log_wq_sz, &buf->fbc); in cq_frag_buf_alloc()
308 buf->cqe_size = MLX5_VDPA_CQE_SIZE; in cq_frag_buf_alloc()
309 buf->nent = nent; in cq_frag_buf_alloc()
316 struct mlx5_frag_buf *frag_buf = &umem->frag_buf; in umem_frag_buf_alloc()
318 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
319 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
324 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
329 return mlx5_frag_buf_get_wqe(&vcq->buf.fbc, n); in get_cqe()
338 for (i = 0; i < buf->nent; i++) { in cq_frag_buf_init()
341 cqe64->op_own = MLX5_CQE_INVALID << 4; in cq_frag_buf_init()
347 struct mlx5_cqe64 *cqe64 = get_cqe(cq, n & (cq->cqe - 1)); in get_sw_cqe()
350 !((cqe64->op_own & MLX5_CQE_OWNER_MASK) ^ !!(n & cq->cqe))) in get_sw_cqe()
358 vqp->head += n; in rx_post()
359 vqp->db.db[0] = cpu_to_be32(vqp->head); in rx_post()
369 vqp = fw ? &mvq->fwqp : &mvq->vqqp; in qp_prepare()
370 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
372 if (vqp->fw) { in qp_prepare()
384 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
386 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
387 MLX5_SET(qpc, qpc, log_page_size, vqp->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in qp_prepare()
389 MLX5_SET(qpc, qpc, cqn_rcv, mvq->cq.mcq.cqn); in qp_prepare()
393 mlx5_fill_page_frag_array(&vqp->frag_buf, pas); in qp_prepare()
398 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
399 num_ent * sizeof(struct mlx5_wqe_data_seg), &vqp->frag_buf, in rq_buf_alloc()
400 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
405 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
411 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
418 if (!vqp->fw) { in qp_create()
419 vqp = &mvq->vqqp; in qp_create()
420 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
424 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
427 inlen += vqp->frag_buf.npages * sizeof(__be64); in qp_create()
432 err = -ENOMEM; in qp_create()
436 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
440 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
442 if (!vqp->fw) in qp_create()
443 MLX5_SET64(qpc, qpc, dbr_addr, vqp->db.dma); in qp_create()
450 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
451 vqp->mqp.qpn = MLX5_GET(create_qp_out, out, qpn); in qp_create()
453 if (!vqp->fw) in qp_create()
454 rx_post(vqp, mvq->num_ent); in qp_create()
459 if (!vqp->fw) in qp_create()
460 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
462 if (!vqp->fw) in qp_create()
473 MLX5_SET(destroy_qp_in, in, qpn, vqp->mqp.qpn); in qp_destroy()
474 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
475 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
476 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
477 if (!vqp->fw) { in qp_destroy()
478 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
485 return get_sw_cqe(cq, cq->mcq.cons_index); in next_cqe_sw()
494 return -EAGAIN; in mlx5_vdpa_poll_one()
496 vcq->mcq.cons_index++; in mlx5_vdpa_poll_one()
502 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions()
505 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
506 mlx5_cq_set_ci(&mvq->cq.mcq); in mlx5_vdpa_handle_completions()
512 rx_post(&mvq->vqqp, num); in mlx5_vdpa_handle_completions()
513 if (event_cb->callback) in mlx5_vdpa_handle_completions()
514 event_cb->callback(event_cb->private); in mlx5_vdpa_handle_completions()
520 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp()
521 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
524 while (!mlx5_vdpa_poll_one(&mvq->cq)) { in mlx5_vdpa_cq_comp()
526 if (num > mvq->num_ent / 2) { in mlx5_vdpa_cq_comp()
541 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in mlx5_vdpa_cq_comp()
546 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
547 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
548 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
550 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_create()
558 err = mlx5_db_alloc(mdev, &vcq->db); in cq_create()
562 vcq->mcq.set_ci_db = vcq->db.db; in cq_create()
563 vcq->mcq.arm_db = vcq->db.db + 1; in cq_create()
564 vcq->mcq.cqe_sz = 64; in cq_create()
566 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
570 cq_frag_buf_init(vcq, &vcq->buf); in cq_create()
573 MLX5_FLD_SZ_BYTES(create_cq_in, pas[0]) * vcq->buf.frag_buf.npages; in cq_create()
576 err = -ENOMEM; in cq_create()
580 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
582 mlx5_fill_page_frag_array(&vcq->buf.frag_buf, pas); in cq_create()
585 MLX5_SET(cqc, cqc, log_page_size, vcq->buf.frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in cq_create()
596 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
598 MLX5_SET64(cqc, cqc, dbr_addr, vcq->db.dma); in cq_create()
600 err = mlx5_core_create_cq(mdev, &vcq->mcq, in, inlen, out, sizeof(out)); in cq_create()
604 vcq->mcq.comp = mlx5_vdpa_cq_comp; in cq_create()
605 vcq->cqe = num_ent; in cq_create()
606 vcq->mcq.set_ci_db = vcq->db.db; in cq_create()
607 vcq->mcq.arm_db = vcq->db.db + 1; in cq_create()
608 mlx5_cq_arm(&mvq->cq.mcq, MLX5_CQ_DB_REQ_NOT, uar_page, mvq->cq.mcq.cons_index); in cq_create()
615 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
617 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
623 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
624 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
625 struct mlx5_vdpa_cq *vcq = &mvq->cq; in cq_destroy()
627 if (mlx5_core_destroy_cq(mdev, &vcq->mcq)) { in cq_destroy()
628 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
631 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
632 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
639 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in read_umem_params()
648 return -ENOMEM; in read_umem_params()
654 mlx5_vdpa_warn(&ndev->mvdev, in read_umem_params()
661 ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a); in read_umem_params()
662 ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b); in read_umem_params()
664 ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a); in read_umem_params()
665 ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b); in read_umem_params()
667 ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a); in read_umem_params()
668 ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b); in read_umem_params()
683 p_a = ndev->umem_1_buffer_param_a; in set_umem_size()
684 p_b = ndev->umem_1_buffer_param_b; in set_umem_size()
685 *umemp = &mvq->umem1; in set_umem_size()
688 p_a = ndev->umem_2_buffer_param_a; in set_umem_size()
689 p_b = ndev->umem_2_buffer_param_b; in set_umem_size()
690 *umemp = &mvq->umem2; in set_umem_size()
693 p_a = ndev->umem_3_buffer_param_a; in set_umem_size()
694 p_b = ndev->umem_3_buffer_param_b; in set_umem_size()
695 *umemp = &mvq->umem3; in set_umem_size()
699 (*umemp)->size = p_a * mvq->num_ent + p_b; in set_umem_size()
704 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
718 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
722 inlen = MLX5_ST_SZ_BYTES(create_umem_in) + MLX5_ST_SZ_BYTES(mtt) * umem->frag_buf.npages; in create_umem()
726 err = -ENOMEM; in create_umem()
731 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
733 MLX5_SET(umem, um, log_page_size, umem->frag_buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT); in create_umem()
734 MLX5_SET64(umem, um, num_of_mtt, umem->frag_buf.npages); in create_umem()
737 mlx5_fill_page_frag_array_perm(&umem->frag_buf, pas, MLX5_MTT_PERM_RW); in create_umem()
739 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
741 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
746 umem->id = MLX5_GET(create_umem_out, out, umem_id); in create_umem()
765 umem = &mvq->umem1; in umem_destroy()
768 umem = &mvq->umem2; in umem_destroy()
771 umem = &mvq->umem3; in umem_destroy()
776 MLX5_SET(destroy_umem_in, in, umem_id, umem->id); in umem_destroy()
777 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
796 for (num--; num > 0; num--) in umems_create()
806 for (num = 3; num > 0; num--) in umems_destroy()
814 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
856 return MLX5_CAP_GEN_64(mvdev->mdev, general_obj_types) & in counters_supported()
862 return MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, event_mode) & in msix_mode_supported()
864 pci_msix_can_alloc_dyn(mvdev->mdev->pdev); in msix_mode_supported()
871 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_virtqueue()
887 err = -ENOMEM; in create_virtqueue()
891 mlx_features = get_features(ndev->mvdev.actual_features); in create_virtqueue()
896 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
899 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in create_virtqueue()
900 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in create_virtqueue()
908 if (vq_is_tx(mvq->index)) in create_virtqueue()
909 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
911 if (mvq->map.virq) { in create_virtqueue()
913 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->map.index); in create_virtqueue()
916 MLX5_SET(virtio_q, vq_ctx, event_qpn_or_msix, mvq->fwqp.mqp.qpn); in create_virtqueue()
919 MLX5_SET(virtio_q, vq_ctx, queue_index, mvq->index); in create_virtqueue()
920 MLX5_SET(virtio_q, vq_ctx, queue_size, mvq->num_ent); in create_virtqueue()
922 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
923 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in create_virtqueue()
924 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in create_virtqueue()
925 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in create_virtqueue()
926 vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in create_virtqueue()
928 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); in create_virtqueue()
930 vq_desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in create_virtqueue()
931 if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) in create_virtqueue()
932 MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, vq_desc_mr->mkey); in create_virtqueue()
934 MLX5_SET(virtio_q, vq_ctx, umem_1_id, mvq->umem1.id); in create_virtqueue()
935 MLX5_SET(virtio_q, vq_ctx, umem_1_size, mvq->umem1.size); in create_virtqueue()
936 MLX5_SET(virtio_q, vq_ctx, umem_2_id, mvq->umem2.id); in create_virtqueue()
937 MLX5_SET(virtio_q, vq_ctx, umem_2_size, mvq->umem2.size); in create_virtqueue()
938 MLX5_SET(virtio_q, vq_ctx, umem_3_id, mvq->umem3.id); in create_virtqueue()
939 MLX5_SET(virtio_q, vq_ctx, umem_3_size, mvq->umem3.size); in create_virtqueue()
940 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
941 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
942 MLX5_SET(virtio_q, vq_ctx, counter_set_id, mvq->counter_set_id); in create_virtqueue()
944 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
948 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT; in create_virtqueue()
950 mvq->virtq_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in create_virtqueue()
953 mvq->vq_mr = vq_mr; in create_virtqueue()
955 if (vq_desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) { in create_virtqueue()
957 mvq->desc_mr = vq_desc_mr; in create_virtqueue()
976 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.obj_id, mvq->virtq_id); in destroy_virtqueue()
977 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
980 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
981 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
984 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in destroy_virtqueue()
987 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr); in destroy_virtqueue()
988 mvq->vq_mr = NULL; in destroy_virtqueue()
990 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr); in destroy_virtqueue()
991 mvq->desc_mr = NULL; in destroy_virtqueue()
996 return fw ? mvq->vqqp.mqp.qpn : mvq->fwqp.mqp.qpn; in get_rqpn()
1001 return fw ? mvq->fwqp.mqp.qpn : mvq->vqqp.mqp.qpn; in get_qpn()
1020 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1032 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1049 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1067 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1109 return -ENOMEM; in modify_qp()
1111 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1165 return -ENOMEM; in query_virtqueue()
1171 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in query_virtqueue()
1172 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1173 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1179 attr->state = MLX5_GET(virtio_net_q_object, obj_context, state); in query_virtqueue()
1180 attr->available_index = MLX5_GET(virtio_net_q_object, obj_context, hw_available_index); in query_virtqueue()
1181 attr->used_index = MLX5_GET(virtio_net_q_object, obj_context, hw_used_index); in query_virtqueue()
1192 return ndev->mvdev.vdev.config->resume; in is_resumable()
1213 if (mvq->modified_fields & ~MLX5_VIRTQ_MODIFY_MASK_STATE) in modifiable_virtqueue_fields()
1214 return mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_INIT || in modifiable_virtqueue_fields()
1215 mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND; in modifiable_virtqueue_fields()
1226 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in modify_virtqueue()
1236 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_NONE) in modify_virtqueue()
1240 return -EINVAL; in modify_virtqueue()
1244 return -ENOMEM; in modify_virtqueue()
1250 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->virtq_id); in modify_virtqueue()
1251 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1256 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_STATE) { in modify_virtqueue()
1257 if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { in modify_virtqueue()
1258 err = -EINVAL; in modify_virtqueue()
1266 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS) { in modify_virtqueue()
1267 MLX5_SET64(virtio_q, vq_ctx, desc_addr, mvq->desc_addr); in modify_virtqueue()
1268 MLX5_SET64(virtio_q, vq_ctx, used_addr, mvq->device_addr); in modify_virtqueue()
1269 MLX5_SET64(virtio_q, vq_ctx, available_addr, mvq->driver_addr); in modify_virtqueue()
1272 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX) in modify_virtqueue()
1273 MLX5_SET(virtio_net_q_object, obj_context, hw_available_index, mvq->avail_idx); in modify_virtqueue()
1275 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_USED_IDX) in modify_virtqueue()
1276 MLX5_SET(virtio_net_q_object, obj_context, hw_used_index, mvq->used_idx); in modify_virtqueue()
1278 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { in modify_virtqueue()
1279 vq_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_GROUP]]; in modify_virtqueue()
1282 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, vq_mr->mkey); in modify_virtqueue()
1284 mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY; in modify_virtqueue()
1287 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { in modify_virtqueue()
1288 desc_mr = mvdev->mr[mvdev->group2asid[MLX5_VDPA_DATAVQ_DESC_GROUP]]; in modify_virtqueue()
1290 if (desc_mr && MLX5_CAP_DEV_VDPA_EMULATION(mvdev->mdev, desc_group_mkey_supported)) in modify_virtqueue()
1291 MLX5_SET(virtio_q, vq_ctx, desc_group_mkey, desc_mr->mkey); in modify_virtqueue()
1293 mvq->modified_fields &= ~MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY; in modify_virtqueue()
1296 MLX5_SET64(virtio_net_q_object, obj_context, modify_field_select, mvq->modified_fields); in modify_virtqueue()
1297 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1302 mvq->fw_state = state; in modify_virtqueue()
1304 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY) { in modify_virtqueue()
1305 mlx5_vdpa_put_mr(mvdev, mvq->vq_mr); in modify_virtqueue()
1307 mvq->vq_mr = vq_mr; in modify_virtqueue()
1310 if (mvq->modified_fields & MLX5_VIRTQ_MODIFY_MASK_DESC_GROUP_MKEY) { in modify_virtqueue()
1311 mlx5_vdpa_put_mr(mvdev, mvq->desc_mr); in modify_virtqueue()
1313 mvq->desc_mr = desc_mr; in modify_virtqueue()
1316 mvq->modified_fields = 0; in modify_virtqueue()
1327 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_STATE; in modify_virtqueue_state()
1338 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1345 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1347 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1351 mvq->counter_set_id = MLX5_GET(general_obj_out_cmd_hdr, out, obj_id); in counter_set_alloc()
1361 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1365 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.obj_id, mvq->counter_set_id); in counter_set_dealloc()
1366 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1368 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1369 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1376 if (cb->callback) in mlx5_vdpa_int_handler()
1377 return cb->callback(cb->private); in mlx5_vdpa_int_handler()
1385 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in alloc_vector()
1390 for (i = 0; i < irqp->num_ent; i++) { in alloc_vector()
1391 ent = &irqp->entries[i]; in alloc_vector()
1392 if (!ent->used) { in alloc_vector()
1393 snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d", in alloc_vector()
1394 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1395 ent->dev_id = &ndev->event_cbs[mvq->index]; in alloc_vector()
1396 err = request_irq(ent->map.virq, mlx5_vdpa_int_handler, 0, in alloc_vector()
1397 ent->name, ent->dev_id); in alloc_vector()
1401 ent->used = true; in alloc_vector()
1402 mvq->map = ent->map; in alloc_vector()
1411 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in dealloc_vector()
1414 for (i = 0; i < irqp->num_ent; i++) in dealloc_vector()
1415 if (mvq->map.virq == irqp->entries[i].map.virq) { in dealloc_vector()
1416 free_irq(mvq->map.virq, irqp->entries[i].dev_id); in dealloc_vector()
1417 irqp->entries[i].used = false; in dealloc_vector()
1424 u16 idx = mvq->index; in setup_vq()
1427 if (!mvq->num_ent) in setup_vq()
1430 if (mvq->initialized) in setup_vq()
1433 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1437 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1441 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1458 if (mvq->ready) { in setup_vq()
1461 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1467 mvq->initialized = true; in setup_vq()
1476 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1478 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1488 if (!mvq->initialized) in suspend_vq()
1491 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in suspend_vq()
1495 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1498 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1501 mvq->avail_idx = attr.available_index; in suspend_vq()
1502 mvq->used_idx = attr.used_index; in suspend_vq()
1509 for (i = 0; i < ndev->mvdev.max_vqs; i++) in suspend_vqs()
1510 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1515 if (!mvq->initialized || !is_resumable(ndev)) in resume_vq()
1518 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND) in resume_vq()
1522 mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index); in resume_vq()
1527 for (int i = 0; i < ndev->mvdev.max_vqs; i++) in resume_vqs()
1528 resume_vq(ndev, &ndev->vqs[i]); in resume_vqs()
1533 if (!mvq->initialized) in teardown_vq()
1537 mvq->modified_fields = 0; in teardown_vq()
1541 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1542 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1543 cq_destroy(ndev, mvq->index); in teardown_vq()
1544 mvq->initialized = false; in teardown_vq()
1549 int rqt_table_size = roundup_pow_of_two(ndev->rqt_size); in create_rqt()
1550 int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2); in create_rqt()
1561 return -ENOMEM; in create_rqt()
1563 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1570 list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); in create_rqt()
1573 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1596 return -ENOMEM; in modify_rqt()
1598 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1600 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx); in modify_rqt()
1605 list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); in modify_rqt()
1608 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1618 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1639 return -ENOMEM; in create_tir()
1641 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1642 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx); in create_tir()
1655 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1656 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1658 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1670 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1690 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1691 if (IS_ERR(node->ucast_counter.counter)) in add_steering_counters()
1692 return PTR_ERR(node->ucast_counter.counter); in add_steering_counters()
1694 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1695 if (IS_ERR(node->mcast_counter.counter)) { in add_steering_counters()
1696 err = PTR_ERR(node->mcast_counter.counter); in add_steering_counters()
1701 flow_act->action |= MLX5_FLOW_CONTEXT_ACTION_COUNT; in add_steering_counters()
1705 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in add_steering_counters()
1716 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter); in remove_steering_counters()
1717 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in remove_steering_counters()
1736 return -ENOMEM; in mlx5_vdpa_add_mac_vlan_rules()
1738 vid = key2vid(node->macvlan); in mlx5_vdpa_add_mac_vlan_rules()
1739 spec->match_criteria_enable = MLX5_MATCH_OUTER_HEADERS; in mlx5_vdpa_add_mac_vlan_rules()
1740 headers_c = MLX5_ADDR_OF(fte_match_param, spec->match_criteria, outer_headers); in mlx5_vdpa_add_mac_vlan_rules()
1741 headers_v = MLX5_ADDR_OF(fte_match_param, spec->match_value, outer_headers); in mlx5_vdpa_add_mac_vlan_rules()
1746 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
1750 if (node->tagged) { in mlx5_vdpa_add_mac_vlan_rules()
1756 dests[0].tir_num = ndev->res.tirn; in mlx5_vdpa_add_mac_vlan_rules()
1762 dests[1].counter_id = mlx5_fc_id(node->ucast_counter.counter); in mlx5_vdpa_add_mac_vlan_rules()
1764 node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1765 if (IS_ERR(node->ucast_rule)) { in mlx5_vdpa_add_mac_vlan_rules()
1766 err = PTR_ERR(node->ucast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1771 dests[1].counter_id = mlx5_fc_id(node->mcast_counter.counter); in mlx5_vdpa_add_mac_vlan_rules()
1778 node->mcast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1779 if (IS_ERR(node->mcast_rule)) { in mlx5_vdpa_add_mac_vlan_rules()
1780 err = PTR_ERR(node->mcast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1788 mlx5_del_flow_rules(node->ucast_rule); in mlx5_vdpa_add_mac_vlan_rules()
1800 mlx5_del_flow_rules(node->ucast_rule); in mlx5_vdpa_del_mac_vlan_rules()
1801 mlx5_del_flow_rules(node->mcast_rule); in mlx5_vdpa_del_mac_vlan_rules()
1828 hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) { in mac_vlan_lookup()
1829 if (pos->macvlan == value) in mac_vlan_lookup()
1844 return -EEXIST; in mac_vlan_add()
1848 return -ENOMEM; in mac_vlan_add()
1850 ptr->tagged = tagged; in mac_vlan_add()
1851 ptr->macvlan = val; in mac_vlan_add()
1852 ptr->ndev = ndev; in mac_vlan_add()
1853 err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, ptr); in mac_vlan_add()
1858 hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]); in mac_vlan_add()
1874 hlist_del(&ptr->hlist); in mac_vlan_del()
1887 hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) { in clear_mac_vlan_table()
1888 hlist_del(&pos->hlist); in clear_mac_vlan_table()
1905 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
1907 mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
1908 return -EOPNOTSUPP; in setup_steering()
1911 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in setup_steering()
1912 if (IS_ERR(ndev->rxft)) { in setup_steering()
1913 mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
1914 return PTR_ERR(ndev->rxft); in setup_steering()
1918 err = mac_vlan_add(ndev, ndev->config.mac, 0, false); in setup_steering()
1926 mlx5_destroy_flow_table(ndev->rxft); in setup_steering()
1934 mlx5_destroy_flow_table(ndev->rxft); in teardown_steering()
1940 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mac()
1946 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in handle_ctrl_mac()
1949 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)mac, ETH_ALEN); in handle_ctrl_mac()
1953 if (!memcmp(ndev->config.mac, mac, 6)) { in handle_ctrl_mac()
1961 if (!is_zero_ether_addr(ndev->config.mac)) { in handle_ctrl_mac()
1962 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
1964 ndev->config.mac); in handle_ctrl_mac()
1978 memcpy(mac_back, ndev->config.mac, ETH_ALEN); in handle_ctrl_mac()
1980 memcpy(ndev->config.mac, mac, ETH_ALEN); in handle_ctrl_mac()
1986 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { in handle_ctrl_mac()
1998 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
2000 ndev->config.mac); in handle_ctrl_mac()
2008 memcpy(ndev->config.mac, mac_back, ETH_ALEN); in handle_ctrl_mac()
2010 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) in handle_ctrl_mac()
2029 int cur_qps = ndev->cur_num_vqs / 2; in change_num_qps()
2038 for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) in change_num_qps()
2039 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2041 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
2043 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
2045 err = setup_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2056 for (--i; i >= 2 * cur_qps; --i) in change_num_qps()
2057 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2059 ndev->cur_num_vqs = 2 * cur_qps; in change_num_qps()
2068 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_mq()
2075 /* This mq feature check aligns with pre-existing userspace in handle_ctrl_mq()
2079 * request down to a non-mq device that may cause kernel to in handle_ctrl_mq()
2082 * changing the number of vqs on a non-mq device. in handle_ctrl_mq()
2087 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, (void *)&mq, sizeof(mq)); in handle_ctrl_mq()
2093 newqps > ndev->rqt_size) in handle_ctrl_mq()
2096 if (ndev->cur_num_vqs == 2 * newqps) { in handle_ctrl_mq()
2116 struct mlx5_control_vq *cvq = &mvdev->cvq; in handle_ctrl_vlan()
2121 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
2126 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
2131 if (mac_vlan_add(ndev, ndev->config.mac, id, true)) in handle_ctrl_vlan()
2137 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &vlan, sizeof(vlan)); in handle_ctrl_vlan()
2142 mac_vlan_del(ndev, ndev->config.mac, id, true); in handle_ctrl_vlan()
2164 mvdev = wqent->mvdev; in mlx5_cvq_kick_handler()
2166 cvq = &mvdev->cvq; in mlx5_cvq_kick_handler()
2168 down_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2170 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_cvq_kick_handler()
2173 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
2176 if (!cvq->ready) in mlx5_cvq_kick_handler()
2180 err = vringh_getdesc_iotlb(&cvq->vring, &cvq->riov, &cvq->wiov, &cvq->head, in mlx5_cvq_kick_handler()
2185 read = vringh_iov_pull_iotlb(&cvq->vring, &cvq->riov, &ctrl, sizeof(ctrl)); in mlx5_cvq_kick_handler()
2189 cvq->received_desc++; in mlx5_cvq_kick_handler()
2207 write = vringh_iov_push_iotlb(&cvq->vring, &cvq->wiov, &status, sizeof(status)); in mlx5_cvq_kick_handler()
2208 vringh_complete_iotlb(&cvq->vring, cvq->head, write); in mlx5_cvq_kick_handler()
2209 vringh_kiov_cleanup(&cvq->riov); in mlx5_cvq_kick_handler()
2210 vringh_kiov_cleanup(&cvq->wiov); in mlx5_cvq_kick_handler()
2212 if (vringh_need_notify_iotlb(&cvq->vring)) in mlx5_cvq_kick_handler()
2213 vringh_notify(&cvq->vring); in mlx5_cvq_kick_handler()
2215 cvq->completed_desc++; in mlx5_cvq_kick_handler()
2216 queue_work(mvdev->wq, &wqent->work); in mlx5_cvq_kick_handler()
2221 up_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2234 if (!mvdev->wq || !mvdev->cvq.ready) in mlx5_vdpa_kick_vq()
2237 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
2241 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
2242 if (unlikely(!mvq->ready)) in mlx5_vdpa_kick_vq()
2245 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
2256 return -EINVAL; in mlx5_vdpa_set_vq_address()
2259 mvdev->cvq.desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2260 mvdev->cvq.device_addr = device_area; in mlx5_vdpa_set_vq_address()
2261 mvdev->cvq.driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2265 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
2266 mvq->desc_addr = desc_area; in mlx5_vdpa_set_vq_address()
2267 mvq->device_addr = device_area; in mlx5_vdpa_set_vq_address()
2268 mvq->driver_addr = driver_area; in mlx5_vdpa_set_vq_address()
2269 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_ADDRS; in mlx5_vdpa_set_vq_address()
2282 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
2283 mvq->num_ent = num; in mlx5_vdpa_set_vq_num()
2291 ndev->event_cbs[idx] = *cb; in mlx5_vdpa_set_vq_cb()
2293 mvdev->cvq.event_cb = *cb; in mlx5_vdpa_set_vq_cb()
2300 if (!cvq->event_cb.callback) in mlx5_cvq_notify()
2303 cvq->event_cb.callback(cvq->event_cb.private); in mlx5_cvq_notify()
2308 struct mlx5_control_vq *cvq = &mvdev->cvq; in set_cvq_ready()
2310 cvq->ready = ready; in set_cvq_ready()
2314 cvq->vring.notify = mlx5_cvq_notify; in set_cvq_ready()
2324 if (!mvdev->actual_features) in mlx5_vdpa_set_vq_ready()
2335 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2347 mvq->ready = ready; in mlx5_vdpa_set_vq_ready()
2359 return mvdev->cvq.ready; in mlx5_vdpa_get_vq_ready()
2361 return ndev->vqs[idx].ready; in mlx5_vdpa_get_vq_ready()
2372 return -EINVAL; in mlx5_vdpa_set_vq_state()
2375 mvdev->cvq.vring.last_avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2379 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2380 if (mvq->fw_state == MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) { in mlx5_vdpa_set_vq_state()
2382 return -EINVAL; in mlx5_vdpa_set_vq_state()
2385 mvq->used_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2386 mvq->avail_idx = state->split.avail_index; in mlx5_vdpa_set_vq_state()
2387 mvq->modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_AVAIL_IDX | in mlx5_vdpa_set_vq_state()
2401 return -EINVAL; in mlx5_vdpa_get_vq_state()
2404 state->split.avail_index = mvdev->cvq.vring.last_avail_idx; in mlx5_vdpa_get_vq_state()
2408 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2413 if (!mvq->initialized) { in mlx5_vdpa_get_vq_state()
2418 state->split.avail_index = mvq->used_idx; in mlx5_vdpa_get_vq_state()
2427 state->split.avail_index = attr.used_index; in mlx5_vdpa_get_vq_state()
2508 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2509 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2516 return -EOPNOTSUPP; in verify_driver_features()
2529 return -EINVAL; in verify_driver_features()
2540 for (i = 0; i < mvdev->max_vqs; i++) { in setup_virtqueues()
2541 err = setup_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2549 for (--i; i >= 0; i--) in setup_virtqueues()
2550 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2560 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
2561 mvq = &ndev->vqs[i]; in teardown_virtqueues()
2562 if (!mvq->initialized) in teardown_virtqueues()
2574 mvdev->max_idx = mvdev->max_vqs; in update_cvq_info()
2579 mvdev->max_idx = 2; in update_cvq_info()
2583 mvdev->max_idx = 1; in update_cvq_info()
2608 if (query_vport_state(mvdev->mdev, MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT, 0) == in get_link_state()
2622 mvdev = wqent->mvdev; in update_carrier()
2625 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
2627 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
2629 if (ndev->config_cb.callback) in update_carrier()
2630 ndev->config_cb.callback(ndev->config_cb.private); in update_carrier()
2641 return -ENOMEM; in queue_link_work()
2643 wqent->mvdev = &ndev->mvdev; in queue_link_work()
2644 INIT_WORK(&wqent->work, update_carrier); in queue_link_work()
2645 queue_work(ndev->mvdev.wq, &wqent->work); in queue_link_work()
2656 switch (eqe->sub_type) { in event_handler()
2674 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS))) in register_link_notifier()
2677 ndev->nb.notifier_call = event_handler; in register_link_notifier()
2678 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb); in register_link_notifier()
2679 ndev->nb_registered = true; in register_link_notifier()
2685 if (!ndev->nb_registered) in unregister_link_notifier()
2688 ndev->nb_registered = false; in unregister_link_notifier()
2689 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb); in unregister_link_notifier()
2690 if (ndev->mvdev.wq) in unregister_link_notifier()
2691 flush_workqueue(ndev->mvdev.wq); in unregister_link_notifier()
2711 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2712 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)) in mlx5_vdpa_set_driver_features()
2713 ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs); in mlx5_vdpa_set_driver_features()
2715 ndev->rqt_size = 1; in mlx5_vdpa_set_driver_features()
2725 ndev->cur_num_vqs = 2; in mlx5_vdpa_set_driver_features()
2736 ndev->config_cb = *cb; in mlx5_vdpa_set_config_cb()
2760 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2761 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2766 struct mlx5_vq_restore_info *ri = &mvq->ri; in save_channel_info()
2770 if (mvq->initialized) { in save_channel_info()
2776 ri->avail_index = attr.available_index; in save_channel_info()
2777 ri->used_index = attr.used_index; in save_channel_info()
2778 ri->ready = mvq->ready; in save_channel_info()
2779 ri->num_ent = mvq->num_ent; in save_channel_info()
2780 ri->desc_addr = mvq->desc_addr; in save_channel_info()
2781 ri->device_addr = mvq->device_addr; in save_channel_info()
2782 ri->driver_addr = mvq->driver_addr; in save_channel_info()
2783 ri->map = mvq->map; in save_channel_info()
2784 ri->restore = true; in save_channel_info()
2792 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2793 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
2794 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
2803 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2804 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
2815 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2816 mvq = &ndev->vqs[i]; in restore_channels_info()
2817 ri = &mvq->ri; in restore_channels_info()
2818 if (!ri->restore) in restore_channels_info()
2821 mvq->avail_idx = ri->avail_index; in restore_channels_info()
2822 mvq->used_idx = ri->used_index; in restore_channels_info()
2823 mvq->ready = ri->ready; in restore_channels_info()
2824 mvq->num_ent = ri->num_ent; in restore_channels_info()
2825 mvq->desc_addr = ri->desc_addr; in restore_channels_info()
2826 mvq->device_addr = ri->device_addr; in restore_channels_info()
2827 mvq->driver_addr = ri->driver_addr; in restore_channels_info()
2828 mvq->map = ri->map; in restore_channels_info()
2834 unsigned int asid) in mlx5_vdpa_change_map() argument
2849 mlx5_vdpa_update_mr(mvdev, new_mr, asid); in mlx5_vdpa_change_map()
2851 for (int i = 0; i < ndev->cur_num_vqs; i++) in mlx5_vdpa_change_map()
2852 ndev->vqs[i].modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY | in mlx5_vdpa_change_map()
2855 if (!(mvdev->status & VIRTIO_CONFIG_S_DRIVER_OK) || mvdev->suspended) in mlx5_vdpa_change_map()
2876 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in setup_driver()
2878 if (ndev->setup) { in setup_driver()
2912 ndev->setup = true; in setup_driver()
2932 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in teardown_driver()
2934 if (!ndev->setup) in teardown_driver()
2942 ndev->setup = false; in teardown_driver()
2949 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in clear_vqs_ready()
2950 ndev->vqs[i].ready = false; in clear_vqs_ready()
2951 ndev->vqs[i].modified_fields = 0; in clear_vqs_ready()
2954 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2959 struct mlx5_control_vq *cvq = &mvdev->cvq; in setup_cvq_vring()
2962 if (mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ)) { in setup_cvq_vring()
2963 u16 idx = cvq->vring.last_avail_idx; in setup_cvq_vring()
2965 err = vringh_init_iotlb(&cvq->vring, mvdev->actual_features, in setup_cvq_vring()
2967 (struct vring_desc *)(uintptr_t)cvq->desc_addr, in setup_cvq_vring()
2968 (struct vring_avail *)(uintptr_t)cvq->driver_addr, in setup_cvq_vring()
2969 (struct vring_used *)(uintptr_t)cvq->device_addr); in setup_cvq_vring()
2972 cvq->vring.last_avail_idx = cvq->vring.last_used_idx = idx; in setup_cvq_vring()
2985 down_write(&ndev->reslock); in mlx5_vdpa_set_status()
2987 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
3006 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
3007 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
3013 mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); in mlx5_vdpa_set_status()
3014 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
3016 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
3023 /* default mapping all groups are mapped to asid 0 */ in init_group_to_asid_map()
3025 mvdev->group2asid[i] = 0; in init_group_to_asid_map()
3036 down_write(&ndev->reslock); in mlx5_vdpa_compat_reset()
3041 mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); in mlx5_vdpa_compat_reset()
3042 ndev->mvdev.status = 0; in mlx5_vdpa_compat_reset()
3043 ndev->mvdev.suspended = false; in mlx5_vdpa_compat_reset()
3044 ndev->cur_num_vqs = 0; in mlx5_vdpa_compat_reset()
3045 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_compat_reset()
3046 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_compat_reset()
3047 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_compat_reset()
3048 ndev->mvdev.actual_features = 0; in mlx5_vdpa_compat_reset()
3050 ++mvdev->generation; in mlx5_vdpa_compat_reset()
3053 MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_compat_reset()
3057 up_write(&ndev->reslock); in mlx5_vdpa_compat_reset()
3079 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
3092 return mvdev->generation; in mlx5_vdpa_get_generation()
3096 unsigned int asid) in set_map_data() argument
3101 if (asid >= MLX5_VDPA_NUM_AS) in set_map_data()
3102 return -EINVAL; in set_map_data()
3116 if (!mvdev->mr[asid]) { in set_map_data()
3117 mlx5_vdpa_update_mr(mvdev, new_mr, asid); in set_map_data()
3119 err = mlx5_vdpa_change_map(mvdev, new_mr, asid); in set_map_data()
3126 return mlx5_vdpa_update_cvq_iotlb(mvdev, iotlb, asid); in set_map_data()
3133 static int mlx5_vdpa_set_map(struct vdpa_device *vdev, unsigned int asid, in mlx5_vdpa_set_map() argument
3138 int err = -EINVAL; in mlx5_vdpa_set_map()
3140 down_write(&ndev->reslock); in mlx5_vdpa_set_map()
3141 err = set_map_data(mvdev, iotlb, asid); in mlx5_vdpa_set_map()
3142 up_write(&ndev->reslock); in mlx5_vdpa_set_map()
3146 static int mlx5_vdpa_reset_map(struct vdpa_device *vdev, unsigned int asid) in mlx5_vdpa_reset_map() argument
3152 down_write(&ndev->reslock); in mlx5_vdpa_reset_map()
3153 err = mlx5_vdpa_reset_mr(mvdev, asid); in mlx5_vdpa_reset_map()
3154 up_write(&ndev->reslock); in mlx5_vdpa_reset_map()
3163 return &vdev->dev; in mlx5_get_vq_dma_dev()
3165 return mvdev->vdev.dma_dev; in mlx5_get_vq_dma_dev()
3173 if (!msix_mode_supported(&ndev->mvdev)) in free_irqs()
3176 if (!ndev->irqp.entries) in free_irqs()
3179 for (i = ndev->irqp.num_ent - 1; i >= 0; i--) { in free_irqs()
3180 ent = ndev->irqp.entries + i; in free_irqs()
3181 if (ent->map.virq) in free_irqs()
3182 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map); in free_irqs()
3184 kfree(ndev->irqp.entries); in free_irqs()
3197 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
3198 pfmdev = pci_get_drvdata(pci_physfn(mvdev->mdev->pdev)); in mlx5_vdpa_free()
3199 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
3201 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
3203 kfree(ndev->event_cbs); in mlx5_vdpa_free()
3204 kfree(ndev->vqs); in mlx5_vdpa_free()
3221 if (MLX5_CAP_GEN(mvdev->mdev, log_min_sf_size) + 12 < PAGE_SHIFT) in mlx5_get_vq_notification()
3225 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
3238 return -EINVAL; in mlx5_get_vq_irq()
3241 return -EOPNOTSUPP; in mlx5_get_vq_irq()
3243 mvq = &ndev->vqs[idx]; in mlx5_get_vq_irq()
3244 if (!mvq->map.virq) in mlx5_get_vq_irq()
3245 return -EOPNOTSUPP; in mlx5_get_vq_irq()
3247 return mvq->map.virq; in mlx5_get_vq_irq()
3254 return mvdev->actual_features; in mlx5_vdpa_get_driver_features()
3263 void *ctx; in counter_set_query() local
3266 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
3267 return -EOPNOTSUPP; in counter_set_query()
3269 if (mvq->fw_state != MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY) in counter_set_query()
3270 return -EAGAIN; in counter_set_query()
3276 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
3277 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, obj_id, mvq->counter_set_id); in counter_set_query()
3279 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
3283 ctx = MLX5_ADDR_OF(query_virtio_q_counters_out, out, counters); in counter_set_query()
3284 *received_desc = MLX5_GET64(virtio_q_counters, ctx, received_desc); in counter_set_query()
3285 *completed_desc = MLX5_GET64(virtio_q_counters, ctx, completed_desc); in counter_set_query()
3301 down_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3304 err = -EINVAL; in mlx5_vdpa_get_vendor_vq_stats()
3309 cvq = &mvdev->cvq; in mlx5_vdpa_get_vendor_vq_stats()
3310 received_desc = cvq->received_desc; in mlx5_vdpa_get_vendor_vq_stats()
3311 completed_desc = cvq->completed_desc; in mlx5_vdpa_get_vendor_vq_stats()
3315 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
3323 err = -EMSGSIZE; in mlx5_vdpa_get_vendor_vq_stats()
3340 up_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3348 if (!(mvdev->actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_vdpa_cvq_suspend()
3351 cvq = &mvdev->cvq; in mlx5_vdpa_cvq_suspend()
3352 cvq->ready = false; in mlx5_vdpa_cvq_suspend()
3364 down_write(&ndev->reslock); in mlx5_vdpa_suspend()
3366 for (i = 0; i < ndev->cur_num_vqs; i++) { in mlx5_vdpa_suspend()
3367 mvq = &ndev->vqs[i]; in mlx5_vdpa_suspend()
3371 mvdev->suspended = true; in mlx5_vdpa_suspend()
3372 up_write(&ndev->reslock); in mlx5_vdpa_suspend()
3385 down_write(&ndev->reslock); in mlx5_vdpa_resume()
3386 mvdev->suspended = false; in mlx5_vdpa_resume()
3389 up_write(&ndev->reslock); in mlx5_vdpa_resume()
3394 unsigned int asid) in mlx5_set_group_asid() argument
3400 return -EINVAL; in mlx5_set_group_asid()
3402 mvdev->group2asid[group] = asid; in mlx5_set_group_asid()
3404 mutex_lock(&mvdev->mr_mtx); in mlx5_set_group_asid()
3405 if (group == MLX5_VDPA_CVQ_GROUP && mvdev->mr[asid]) in mlx5_set_group_asid()
3406 err = mlx5_vdpa_update_cvq_iotlb(mvdev, mvdev->mr[asid]->iotlb, asid); in mlx5_set_group_asid()
3407 mutex_unlock(&mvdev->mr_mtx); in mlx5_set_group_asid()
3461 *mtu = hw_mtu - MLX5V_ETH_HARD_MTU; in query_mtu()
3467 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_resources()
3470 if (res->valid) { in alloc_resources()
3471 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
3472 return -EEXIST; in alloc_resources()
3475 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
3483 res->valid = true; in alloc_resources()
3488 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
3494 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_resources()
3496 if (!res->valid) in free_resources()
3500 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
3501 res->valid = false; in free_resources()
3509 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in init_mvqs()
3510 mvq = &ndev->vqs[i]; in init_mvqs()
3512 mvq->index = i; in init_mvqs()
3513 mvq->ndev = ndev; in init_mvqs()
3514 mvq->fwqp.fw = true; in init_mvqs()
3515 mvq->fw_state = MLX5_VIRTIO_NET_Q_OBJECT_NONE; in init_mvqs()
3517 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
3518 mvq = &ndev->vqs[i]; in init_mvqs()
3520 mvq->index = i; in init_mvqs()
3521 mvq->ndev = ndev; in init_mvqs()
3540 return -ENOMEM; in config_func_mtu()
3559 if (!msix_mode_supported(&ndev->mvdev)) in allocate_irqs()
3562 if (!ndev->mvdev.mdev->pdev) in allocate_irqs()
3565 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL); in allocate_irqs()
3566 if (!ndev->irqp.entries) in allocate_irqs()
3570 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in allocate_irqs()
3571 ent = ndev->irqp.entries + i; in allocate_irqs()
3572 snprintf(ent->name, MLX5_VDPA_IRQ_NAME_LEN, "%s-vq-%d", in allocate_irqs()
3573 dev_name(&ndev->mvdev.vdev.dev), i); in allocate_irqs()
3574 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL); in allocate_irqs()
3575 if (!ent->map.virq) in allocate_irqs()
3578 ndev->irqp.num_ent++; in allocate_irqs()
3596 if (mgtdev->ndev) in mlx5_vdpa_dev_add()
3597 return -ENOSPC; in mlx5_vdpa_dev_add()
3599 mdev = mgtdev->madev->mdev; in mlx5_vdpa_dev_add()
3600 device_features = mgtdev->mgtdev.supported_features; in mlx5_vdpa_dev_add()
3601 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) { in mlx5_vdpa_dev_add()
3602 if (add_config->device_features & ~device_features) { in mlx5_vdpa_dev_add()
3603 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3605 add_config->device_features, device_features); in mlx5_vdpa_dev_add()
3606 return -EINVAL; in mlx5_vdpa_dev_add()
3608 device_features &= add_config->device_features; in mlx5_vdpa_dev_add()
3614 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3617 return -EOPNOTSUPP; in mlx5_vdpa_dev_add()
3622 dev_warn(mdev->device, "missing support for split virtqueues\n"); in mlx5_vdpa_dev_add()
3623 return -EOPNOTSUPP; in mlx5_vdpa_dev_add()
3629 dev_warn(mdev->device, in mlx5_vdpa_dev_add()
3632 return -EAGAIN; in mlx5_vdpa_dev_add()
3635 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MAX_VQP)) { in mlx5_vdpa_dev_add()
3636 if (add_config->net.max_vq_pairs > max_vqs / 2) in mlx5_vdpa_dev_add()
3637 return -EINVAL; in mlx5_vdpa_dev_add()
3638 max_vqs = min_t(u32, max_vqs, 2 * add_config->net.max_vq_pairs); in mlx5_vdpa_dev_add()
3643 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mgtdev->vdpa_ops, in mlx5_vdpa_dev_add()
3648 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3649 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3650 mvdev->mdev = mdev; in mlx5_vdpa_dev_add()
3652 ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3653 ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3654 if (!ndev->vqs || !ndev->event_cbs) { in mlx5_vdpa_dev_add()
3655 err = -ENOMEM; in mlx5_vdpa_dev_add()
3661 init_rwsem(&ndev->reslock); in mlx5_vdpa_dev_add()
3662 config = &ndev->config; in mlx5_vdpa_dev_add()
3664 if (add_config->mask & BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MTU)) { in mlx5_vdpa_dev_add()
3665 err = config_func_mtu(mdev, add_config->net.mtu); in mlx5_vdpa_dev_add()
3675 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3680 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3682 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3685 if (add_config->mask & (1 << VDPA_ATTR_DEV_NET_CFG_MACADDR)) { in mlx5_vdpa_dev_add()
3686 memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN); in mlx5_vdpa_dev_add()
3688 } else if ((add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) == 0 || in mlx5_vdpa_dev_add()
3690 err = mlx5_query_nic_vport_mac_address(mdev, 0, 0, config->mac); in mlx5_vdpa_dev_add()
3695 if (!is_zero_ether_addr(config->mac)) { in mlx5_vdpa_dev_add()
3696 pfmdev = pci_get_drvdata(pci_physfn(mdev->pdev)); in mlx5_vdpa_dev_add()
3697 err = mlx5_mpfs_add_mac(pfmdev, config->mac); in mlx5_vdpa_dev_add()
3700 } else if ((add_config->mask & BIT_ULL(VDPA_ATTR_DEV_FEATURES)) == 0) { in mlx5_vdpa_dev_add()
3710 mlx5_vdpa_warn(&ndev->mvdev, in mlx5_vdpa_dev_add()
3712 err = -EINVAL; in mlx5_vdpa_dev_add()
3717 config->max_virtqueue_pairs = cpu_to_mlx5vdpa16(mvdev, max_vqs / 2); in mlx5_vdpa_dev_add()
3719 ndev->mvdev.mlx_features = device_features; in mlx5_vdpa_dev_add()
3720 mvdev->vdev.dma_dev = &mdev->pdev->dev; in mlx5_vdpa_dev_add()
3721 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3725 INIT_LIST_HEAD(&mvdev->mr_list_head); in mlx5_vdpa_dev_add()
3727 if (MLX5_CAP_GEN(mvdev->mdev, umem_uid_0)) { in mlx5_vdpa_dev_add()
3737 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3738 INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); in mlx5_vdpa_dev_add()
3739 mvdev->wq = create_singlethread_workqueue("mlx5_vdpa_wq"); in mlx5_vdpa_dev_add()
3740 if (!mvdev->wq) { in mlx5_vdpa_dev_add()
3741 err = -ENOMEM; in mlx5_vdpa_dev_add()
3745 mvdev->vdev.mdev = &mgtdev->mgtdev; in mlx5_vdpa_dev_add()
3746 err = _vdpa_register_device(&mvdev->vdev, max_vqs + 1); in mlx5_vdpa_dev_add()
3750 mgtdev->ndev = ndev; in mlx5_vdpa_dev_add()
3754 destroy_workqueue(mvdev->wq); in mlx5_vdpa_dev_add()
3760 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3762 if (!is_zero_ether_addr(config->mac)) in mlx5_vdpa_dev_add()
3763 mlx5_mpfs_del_mac(pfmdev, config->mac); in mlx5_vdpa_dev_add()
3765 put_device(&mvdev->vdev.dev); in mlx5_vdpa_dev_add()
3778 wq = mvdev->wq; in mlx5_vdpa_dev_del()
3779 mvdev->wq = NULL; in mlx5_vdpa_dev_del()
3781 mgtdev->ndev = NULL; in mlx5_vdpa_dev_del()
3799 struct mlx5_core_dev *mdev = madev->mdev; in mlx5v_probe()
3805 return -ENOMEM; in mlx5v_probe()
3807 mgtdev->mgtdev.ops = &mdev_ops; in mlx5v_probe()
3808 mgtdev->mgtdev.device = mdev->device; in mlx5v_probe()
3809 mgtdev->mgtdev.id_table = id_table; in mlx5v_probe()
3810 mgtdev->mgtdev.config_attr_mask = BIT_ULL(VDPA_ATTR_DEV_NET_CFG_MACADDR) | in mlx5v_probe()
3814 mgtdev->mgtdev.max_supported_vqs = in mlx5v_probe()
3816 mgtdev->mgtdev.supported_features = get_supported_features(mdev); in mlx5v_probe()
3817 mgtdev->madev = madev; in mlx5v_probe()
3818 mgtdev->vdpa_ops = mlx5_vdpa_ops; in mlx5v_probe()
3821 mgtdev->vdpa_ops.get_vq_desc_group = NULL; in mlx5v_probe()
3824 mgtdev->vdpa_ops.resume = NULL; in mlx5v_probe()
3826 err = vdpa_mgmtdev_register(&mgtdev->mgtdev); in mlx5v_probe()
3844 vdpa_mgmtdev_unregister(&mgtdev->mgtdev); in mlx5v_remove()