Lines Matching full:ndev
112 struct mlx5_vdpa_net *ndev; member
143 static void free_resources(struct mlx5_vdpa_net *ndev);
144 static void init_mvqs(struct mlx5_vdpa_net *ndev);
145 static int setup_driver(struct mlx5_vdpa_net *ndev);
146 static void teardown_driver(struct mlx5_vdpa_net *ndev);
234 static int create_tis(struct mlx5_vdpa_net *ndev) in create_tis() argument
236 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
242 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
243 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
250 static void destroy_tis(struct mlx5_vdpa_net *ndev) in destroy_tis() argument
252 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
258 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) in cq_frag_buf_alloc() argument
265 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
266 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
278 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size) in umem_frag_buf_alloc() argument
282 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
283 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
286 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf) in cq_frag_buf_free() argument
288 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
326 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in, in qp_prepare() argument
334 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
348 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
350 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
360 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent) in rq_buf_alloc() argument
362 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
364 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
367 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in rq_buf_free() argument
369 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
372 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
375 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
384 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
388 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
400 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
404 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
414 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
424 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
427 rq_buf_free(ndev, vqp); in qp_create()
432 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in qp_destroy() argument
438 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
439 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
440 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
442 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
443 rq_buf_free(ndev, vqp); in qp_destroy()
475 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp() local
476 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
499 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) in cq_create() argument
501 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
502 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
503 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
522 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
536 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
552 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
571 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
573 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
577 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) in cq_destroy() argument
579 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
580 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
584 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
587 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
588 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
591 static int umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in umem_size() argument
594 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in umem_size()
618 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) in umem_frag_buf_free() argument
620 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
623 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
634 size = umem_size(ndev, mvq, num, &umem); in create_umem()
639 err = umem_frag_buf_alloc(ndev, umem, size); in create_umem()
652 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
660 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
662 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
674 umem_frag_buf_free(ndev, umem); in create_umem()
678 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
698 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
701 umem_frag_buf_free(ndev, umem); in umem_destroy()
704 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
710 err = create_umem(ndev, mvq, num); in umems_create()
718 umem_destroy(ndev, mvq, num); in umems_create()
723 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
728 umem_destroy(ndev, mvq, num); in umems_destroy()
731 static int get_queue_type(struct mlx5_vdpa_net *ndev) in get_queue_type() argument
735 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
759 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
769 err = umems_create(ndev, mvq); in create_virtqueue()
783 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
788 get_features_12_3(ndev->mvdev.actual_features)); in create_virtqueue()
790 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev)); in create_virtqueue()
793 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
800 !!(ndev->mvdev.actual_features & VIRTIO_F_VERSION_1)); in create_virtqueue()
804 MLX5_SET(virtio_q, vq_ctx, virtio_q_mkey, ndev->mvdev.mr.mkey.key); in create_virtqueue()
811 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
812 if (MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, eth_frame_offload_type)) in create_virtqueue()
815 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
827 umems_destroy(ndev, mvq); in create_virtqueue()
831 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
839 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
842 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
843 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
846 umems_destroy(ndev, mvq); in destroy_virtqueue()
859 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out, in alloc_inout() argument
875 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
887 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
904 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
922 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
954 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
962 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
966 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
971 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
975 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
979 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
983 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
987 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
991 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
995 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
999 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1007 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1026 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1027 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1043 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int state) in modify_virtqueue() argument
1061 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1067 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1075 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1084 mlx5_vdpa_warn(&ndev->mvdev, "attempt re init\n"); in setup_vq()
1088 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1092 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1096 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1100 err = connect_qps(ndev, mvq); in setup_vq()
1104 err = create_virtqueue(ndev, mvq); in setup_vq()
1109 err = modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1111 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1121 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1123 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1125 cq_destroy(ndev, idx); in setup_vq()
1129 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1139 if (modify_virtqueue(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1140 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1142 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1143 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1149 static void suspend_vqs(struct mlx5_vdpa_net *ndev) in suspend_vqs() argument
1154 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1157 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1162 suspend_vq(ndev, mvq); in teardown_vq()
1163 destroy_virtqueue(ndev, mvq); in teardown_vq()
1164 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1165 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1166 cq_destroy(ndev, mvq->index); in teardown_vq()
1170 static int create_rqt(struct mlx5_vdpa_net *ndev) in create_rqt() argument
1180 log_max_rqt = min_t(int, 1, MLX5_CAP_GEN(ndev->mvdev.mdev, log_max_rqt_size)); in create_rqt()
1189 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1196 for (i = 0, j = 0; j < ndev->mvdev.max_vqs; j++) { in create_rqt()
1197 if (!ndev->vqs[j].initialized) in create_rqt()
1200 if (!vq_is_tx(ndev->vqs[j].index)) { in create_rqt()
1201 list[i] = cpu_to_be32(ndev->vqs[j].virtq_id); in create_rqt()
1206 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1214 static void destroy_rqt(struct mlx5_vdpa_net *ndev) in destroy_rqt() argument
1216 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1219 static int create_tir(struct mlx5_vdpa_net *ndev) in create_tir() argument
1239 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1253 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1254 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1256 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1261 static void destroy_tir(struct mlx5_vdpa_net *ndev) in destroy_tir() argument
1263 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1266 static int add_fwd_to_tir(struct mlx5_vdpa_net *ndev) in add_fwd_to_tir() argument
1278 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in add_fwd_to_tir()
1280 mlx5_vdpa_warn(&ndev->mvdev, "get flow namespace\n"); in add_fwd_to_tir()
1284 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in add_fwd_to_tir()
1285 if (IS_ERR(ndev->rxft)) in add_fwd_to_tir()
1286 return PTR_ERR(ndev->rxft); in add_fwd_to_tir()
1288 ndev->rx_counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_fwd_to_tir()
1289 if (IS_ERR(ndev->rx_counter)) { in add_fwd_to_tir()
1290 err = PTR_ERR(ndev->rx_counter); in add_fwd_to_tir()
1296 dest[0].tir_num = ndev->res.tirn; in add_fwd_to_tir()
1298 dest[1].counter_id = mlx5_fc_id(ndev->rx_counter); in add_fwd_to_tir()
1299 ndev->rx_rule = mlx5_add_flow_rules(ndev->rxft, NULL, &flow_act, dest, 2); in add_fwd_to_tir()
1300 if (IS_ERR(ndev->rx_rule)) { in add_fwd_to_tir()
1301 err = PTR_ERR(ndev->rx_rule); in add_fwd_to_tir()
1302 ndev->rx_rule = NULL; in add_fwd_to_tir()
1309 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in add_fwd_to_tir()
1311 mlx5_destroy_flow_table(ndev->rxft); in add_fwd_to_tir()
1315 static void remove_fwd_to_tir(struct mlx5_vdpa_net *ndev) in remove_fwd_to_tir() argument
1317 if (!ndev->rx_rule) in remove_fwd_to_tir()
1320 mlx5_del_flow_rules(ndev->rx_rule); in remove_fwd_to_tir()
1321 mlx5_fc_destroy(ndev->mvdev.mdev, ndev->rx_counter); in remove_fwd_to_tir()
1322 mlx5_destroy_flow_table(ndev->rxft); in remove_fwd_to_tir()
1324 ndev->rx_rule = NULL; in remove_fwd_to_tir()
1330 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq() local
1331 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
1336 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
1343 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address() local
1344 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
1355 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num() local
1358 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
1365 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb() local
1366 struct mlx5_vdpa_virtqueue *vq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_cb()
1374 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready() local
1375 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
1378 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
1386 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready() local
1387 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_ready()
1396 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state() local
1397 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
1411 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state() local
1412 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
1425 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
1464 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_features() local
1468 ndev->mvdev.mlx_features = mlx_to_vritio_features(dev_features); in mlx5_vdpa_get_features()
1470 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_VERSION_1); in mlx5_vdpa_get_features()
1471 ndev->mvdev.mlx_features |= BIT_ULL(VIRTIO_F_ACCESS_PLATFORM); in mlx5_vdpa_get_features()
1472 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_features()
1473 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_features()
1484 static int setup_virtqueues(struct mlx5_vdpa_net *ndev) in setup_virtqueues() argument
1489 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); i++) { in setup_virtqueues()
1490 err = setup_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
1499 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
1504 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) in teardown_virtqueues() argument
1509 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
1510 mvq = &ndev->vqs[i]; in teardown_virtqueues()
1514 teardown_vq(ndev, mvq); in teardown_virtqueues()
1533 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_features() local
1542 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_features()
1543 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, ndev->mtu); in mlx5_vdpa_set_features()
1544 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_set_features()
1573 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status() local
1575 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
1576 return ndev->mvdev.status; in mlx5_vdpa_get_status()
1579 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
1588 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
1603 static int save_channels_info(struct mlx5_vdpa_net *ndev) in save_channels_info() argument
1607 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
1608 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
1609 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
1614 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev) in mlx5_clear_vqs() argument
1618 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
1619 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
1622 static void restore_channels_info(struct mlx5_vdpa_net *ndev) in restore_channels_info() argument
1628 mlx5_clear_vqs(ndev); in restore_channels_info()
1629 init_mvqs(ndev); in restore_channels_info()
1630 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
1631 mvq = &ndev->vqs[i]; in restore_channels_info()
1646 static int mlx5_vdpa_change_map(struct mlx5_vdpa_net *ndev, struct vhost_iotlb *iotlb) in mlx5_vdpa_change_map() argument
1650 suspend_vqs(ndev); in mlx5_vdpa_change_map()
1651 err = save_channels_info(ndev); in mlx5_vdpa_change_map()
1655 teardown_driver(ndev); in mlx5_vdpa_change_map()
1656 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_change_map()
1657 err = mlx5_vdpa_create_mr(&ndev->mvdev, iotlb); in mlx5_vdpa_change_map()
1661 if (!(ndev->mvdev.status & VIRTIO_CONFIG_S_DRIVER_OK)) in mlx5_vdpa_change_map()
1664 restore_channels_info(ndev); in mlx5_vdpa_change_map()
1665 err = setup_driver(ndev); in mlx5_vdpa_change_map()
1672 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_change_map()
1677 static int setup_driver(struct mlx5_vdpa_net *ndev) in setup_driver() argument
1681 mutex_lock(&ndev->reslock); in setup_driver()
1682 if (ndev->setup) { in setup_driver()
1683 mlx5_vdpa_warn(&ndev->mvdev, "setup driver called for already setup driver\n"); in setup_driver()
1687 err = setup_virtqueues(ndev); in setup_driver()
1689 mlx5_vdpa_warn(&ndev->mvdev, "setup_virtqueues\n"); in setup_driver()
1693 err = create_rqt(ndev); in setup_driver()
1695 mlx5_vdpa_warn(&ndev->mvdev, "create_rqt\n"); in setup_driver()
1699 err = create_tir(ndev); in setup_driver()
1701 mlx5_vdpa_warn(&ndev->mvdev, "create_tir\n"); in setup_driver()
1705 err = add_fwd_to_tir(ndev); in setup_driver()
1707 mlx5_vdpa_warn(&ndev->mvdev, "add_fwd_to_tir\n"); in setup_driver()
1710 ndev->setup = true; in setup_driver()
1711 mutex_unlock(&ndev->reslock); in setup_driver()
1716 destroy_tir(ndev); in setup_driver()
1718 destroy_rqt(ndev); in setup_driver()
1720 teardown_virtqueues(ndev); in setup_driver()
1722 mutex_unlock(&ndev->reslock); in setup_driver()
1726 static void teardown_driver(struct mlx5_vdpa_net *ndev) in teardown_driver() argument
1728 mutex_lock(&ndev->reslock); in teardown_driver()
1729 if (!ndev->setup) in teardown_driver()
1732 remove_fwd_to_tir(ndev); in teardown_driver()
1733 destroy_tir(ndev); in teardown_driver()
1734 destroy_rqt(ndev); in teardown_driver()
1735 teardown_virtqueues(ndev); in teardown_driver()
1736 ndev->setup = false; in teardown_driver()
1738 mutex_unlock(&ndev->reslock); in teardown_driver()
1744 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status() local
1750 teardown_driver(ndev); in mlx5_vdpa_set_status()
1751 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
1752 ndev->mvdev.status = 0; in mlx5_vdpa_set_status()
1753 ndev->mvdev.mlx_features = 0; in mlx5_vdpa_set_status()
1758 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
1760 err = setup_driver(ndev); in mlx5_vdpa_set_status()
1771 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
1775 mlx5_vdpa_destroy_mr(&ndev->mvdev); in mlx5_vdpa_set_status()
1776 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
1783 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config() local
1786 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
1805 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map() local
1816 return mlx5_vdpa_change_map(ndev, iotlb); in mlx5_vdpa_set_map()
1824 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_free() local
1826 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
1828 free_resources(ndev); in mlx5_vdpa_free()
1829 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
1830 mutex_destroy(&ndev->reslock); in mlx5_vdpa_free()
1872 static int alloc_resources(struct mlx5_vdpa_net *ndev) in alloc_resources() argument
1874 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_resources()
1878 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
1882 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
1886 err = create_tis(ndev); in alloc_resources()
1895 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
1899 static void free_resources(struct mlx5_vdpa_net *ndev) in free_resources() argument
1901 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_resources()
1906 destroy_tis(ndev); in free_resources()
1907 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
1911 static void init_mvqs(struct mlx5_vdpa_net *ndev) in init_mvqs() argument
1916 for (i = 0; i < 2 * mlx5_vdpa_max_qps(ndev->mvdev.max_vqs); ++i) { in init_mvqs()
1917 mvq = &ndev->vqs[i]; in init_mvqs()
1920 mvq->ndev = ndev; in init_mvqs()
1923 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
1924 mvq = &ndev->vqs[i]; in init_mvqs()
1927 mvq->ndev = ndev; in init_mvqs()
1935 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_add_dev() local
1943 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mlx5_vdpa_ops, in mlx5_vdpa_add_dev()
1945 if (IS_ERR(ndev)) in mlx5_vdpa_add_dev()
1946 return ndev; in mlx5_vdpa_add_dev()
1948 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_add_dev()
1949 mvdev = &ndev->mvdev; in mlx5_vdpa_add_dev()
1951 init_mvqs(ndev); in mlx5_vdpa_add_dev()
1952 mutex_init(&ndev->reslock); in mlx5_vdpa_add_dev()
1953 config = &ndev->config; in mlx5_vdpa_add_dev()
1954 err = mlx5_query_nic_vport_mtu(mdev, &ndev->mtu); in mlx5_vdpa_add_dev()
1963 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_add_dev()
1967 err = alloc_resources(ndev); in mlx5_vdpa_add_dev()
1975 return ndev; in mlx5_vdpa_add_dev()
1978 free_resources(ndev); in mlx5_vdpa_add_dev()
1980 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_add_dev()
1982 mutex_destroy(&ndev->reslock); in mlx5_vdpa_add_dev()