Lines Matching full:ndev

119 	struct mlx5_vdpa_net *ndev;  member
147 static void free_resources(struct mlx5_vdpa_net *ndev);
148 static void init_mvqs(struct mlx5_vdpa_net *ndev);
150 static void teardown_driver(struct mlx5_vdpa_net *ndev);
270 static int create_tis(struct mlx5_vdpa_net *ndev) in create_tis() argument
272 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_tis()
278 MLX5_SET(tisc, tisc, transport_domain, ndev->res.tdn); in create_tis()
279 err = mlx5_vdpa_create_tis(mvdev, in, &ndev->res.tisn); in create_tis()
286 static void destroy_tis(struct mlx5_vdpa_net *ndev) in destroy_tis() argument
288 mlx5_vdpa_destroy_tis(&ndev->mvdev, ndev->res.tisn); in destroy_tis()
294 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) in cq_frag_buf_alloc() argument
301 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc()
302 ndev->mvdev.mdev->priv.numa_node); in cq_frag_buf_alloc()
314 static int umem_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem, int size) in umem_frag_buf_alloc() argument
318 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, size, frag_buf, in umem_frag_buf_alloc()
319 ndev->mvdev.mdev->priv.numa_node); in umem_frag_buf_alloc()
322 static void cq_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf) in cq_frag_buf_free() argument
324 mlx5_frag_buf_free(ndev->mvdev.mdev, &buf->frag_buf); in cq_frag_buf_free()
362 static void qp_prepare(struct mlx5_vdpa_net *ndev, bool fw, void *in, in qp_prepare() argument
370 MLX5_SET(create_qp_in, in, uid, ndev->mvdev.res.uid); in qp_prepare()
384 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_prepare()
386 MLX5_SET(qpc, qpc, uar_page, ndev->mvdev.res.uar->index); in qp_prepare()
396 static int rq_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp, u32 num_ent) in rq_buf_alloc() argument
398 return mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, in rq_buf_alloc()
400 ndev->mvdev.mdev->priv.numa_node); in rq_buf_alloc()
403 static void rq_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in rq_buf_free() argument
405 mlx5_frag_buf_free(ndev->mvdev.mdev, &vqp->frag_buf); in rq_buf_free()
408 static int qp_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in qp_create() argument
411 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in qp_create()
420 err = rq_buf_alloc(ndev, vqp, mvq->num_ent); in qp_create()
424 err = mlx5_db_alloc(ndev->mvdev.mdev, &vqp->db); in qp_create()
436 qp_prepare(ndev, vqp->fw, in, mvq, mvq->num_ent); in qp_create()
440 MLX5_SET(qpc, qpc, pd, ndev->mvdev.res.pdn); in qp_create()
450 vqp->mqp.uid = ndev->mvdev.res.uid; in qp_create()
460 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_create()
463 rq_buf_free(ndev, vqp); in qp_create()
468 static void qp_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_qp *vqp) in qp_destroy() argument
474 MLX5_SET(destroy_qp_in, in, uid, ndev->mvdev.res.uid); in qp_destroy()
475 if (mlx5_cmd_exec_in(ndev->mvdev.mdev, destroy_qp, in)) in qp_destroy()
476 mlx5_vdpa_warn(&ndev->mvdev, "destroy qp 0x%x\n", vqp->mqp.qpn); in qp_destroy()
478 mlx5_db_free(ndev->mvdev.mdev, &vqp->db); in qp_destroy()
479 rq_buf_free(ndev, vqp); in qp_destroy()
502 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_handle_completions() local
505 event_cb = &ndev->event_cbs[mvq->index]; in mlx5_vdpa_handle_completions()
520 struct mlx5_vdpa_net *ndev = mvq->ndev; in mlx5_vdpa_cq_comp() local
521 void __iomem *uar_page = ndev->mvdev.res.uar->map; in mlx5_vdpa_cq_comp()
544 static int cq_create(struct mlx5_vdpa_net *ndev, u16 idx, u32 num_ent) in cq_create() argument
546 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_create()
547 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_create()
548 void __iomem *uar_page = ndev->mvdev.res.uar->map; in cq_create()
566 err = cq_frag_buf_alloc(ndev, &vcq->buf, num_ent); in cq_create()
580 MLX5_SET(create_cq_in, in, uid, ndev->mvdev.res.uid); in cq_create()
596 MLX5_SET(cqc, cqc, uar_page, ndev->mvdev.res.uar->index); in cq_create()
615 cq_frag_buf_free(ndev, &vcq->buf); in cq_create()
617 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_create()
621 static void cq_destroy(struct mlx5_vdpa_net *ndev, u16 idx) in cq_destroy() argument
623 struct mlx5_vdpa_virtqueue *mvq = &ndev->vqs[idx]; in cq_destroy()
624 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in cq_destroy()
628 mlx5_vdpa_warn(&ndev->mvdev, "destroy CQ 0x%x\n", vcq->mcq.cqn); in cq_destroy()
631 cq_frag_buf_free(ndev, &vcq->buf); in cq_destroy()
632 mlx5_db_free(ndev->mvdev.mdev, &vcq->db); in cq_destroy()
635 static int read_umem_params(struct mlx5_vdpa_net *ndev) in read_umem_params() argument
639 struct mlx5_core_dev *mdev = ndev->mvdev.mdev; in read_umem_params()
654 mlx5_vdpa_warn(&ndev->mvdev, in read_umem_params()
661 ndev->umem_1_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_a); in read_umem_params()
662 ndev->umem_1_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_1_buffer_param_b); in read_umem_params()
664 ndev->umem_2_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_a); in read_umem_params()
665 ndev->umem_2_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_2_buffer_param_b); in read_umem_params()
667 ndev->umem_3_buffer_param_a = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_a); in read_umem_params()
668 ndev->umem_3_buffer_param_b = MLX5_GET(virtio_emulation_cap, caps, umem_3_buffer_param_b); in read_umem_params()
675 static void set_umem_size(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num, in set_umem_size() argument
683 p_a = ndev->umem_1_buffer_param_a; in set_umem_size()
684 p_b = ndev->umem_1_buffer_param_b; in set_umem_size()
688 p_a = ndev->umem_2_buffer_param_a; in set_umem_size()
689 p_b = ndev->umem_2_buffer_param_b; in set_umem_size()
693 p_a = ndev->umem_3_buffer_param_a; in set_umem_size()
694 p_b = ndev->umem_3_buffer_param_b; in set_umem_size()
702 static void umem_frag_buf_free(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_umem *umem) in umem_frag_buf_free() argument
704 mlx5_frag_buf_free(ndev->mvdev.mdev, &umem->frag_buf); in umem_frag_buf_free()
707 static int create_umem(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in create_umem() argument
717 set_umem_size(ndev, mvq, num, &umem); in create_umem()
718 err = umem_frag_buf_alloc(ndev, umem, umem->size); in create_umem()
731 MLX5_SET(create_umem_in, in, uid, ndev->mvdev.res.uid); in create_umem()
739 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_umem()
741 mlx5_vdpa_warn(&ndev->mvdev, "create umem(%d)\n", err); in create_umem()
753 umem_frag_buf_free(ndev, umem); in create_umem()
757 static void umem_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, int num) in umem_destroy() argument
777 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in umem_destroy()
780 umem_frag_buf_free(ndev, umem); in umem_destroy()
783 static int umems_create(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_create() argument
789 err = create_umem(ndev, mvq, num); in umems_create()
797 umem_destroy(ndev, mvq, num); in umems_create()
802 static void umems_destroy(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in umems_destroy() argument
807 umem_destroy(ndev, mvq, num); in umems_destroy()
810 static int get_queue_type(struct mlx5_vdpa_net *ndev) in get_queue_type() argument
814 type_mask = MLX5_CAP_DEV_VDPA_EMULATION(ndev->mvdev.mdev, virtio_queue_type); in get_queue_type()
867 static int create_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in create_virtqueue() argument
871 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in create_virtqueue()
881 err = umems_create(ndev, mvq); in create_virtqueue()
891 mlx_features = get_features(ndev->mvdev.actual_features); in create_virtqueue()
896 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in create_virtqueue()
906 MLX5_SET(virtio_q, vq_ctx, virtio_q_type, get_queue_type(ndev)); in create_virtqueue()
909 MLX5_SET(virtio_net_q_object, obj_context, tisn_or_qpn, ndev->res.tisn); in create_virtqueue()
922 !!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_F_VERSION_1))); in create_virtqueue()
940 MLX5_SET(virtio_q, vq_ctx, pd, ndev->mvdev.res.pdn); in create_virtqueue()
941 if (counters_supported(&ndev->mvdev)) in create_virtqueue()
944 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in create_virtqueue()
965 umems_destroy(ndev, mvq); in create_virtqueue()
969 static void destroy_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in destroy_virtqueue() argument
977 MLX5_SET(destroy_virtio_net_q_in, in, general_obj_out_cmd_hdr.uid, ndev->mvdev.res.uid); in destroy_virtqueue()
980 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) { in destroy_virtqueue()
981 mlx5_vdpa_warn(&ndev->mvdev, "destroy virtqueue 0x%x\n", mvq->virtq_id); in destroy_virtqueue()
985 umems_destroy(ndev, mvq); in destroy_virtqueue()
987 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->vq_mr); in destroy_virtqueue()
990 mlx5_vdpa_put_mr(&ndev->mvdev, mvq->desc_mr); in destroy_virtqueue()
1004 static void alloc_inout(struct mlx5_vdpa_net *ndev, int cmd, void **in, int *inlen, void **out, in alloc_inout() argument
1020 MLX5_SET(qp_2rst_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1032 MLX5_SET(rst2init_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1049 MLX5_SET(init2rtr_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1067 MLX5_SET(rtr2rts_qp_in, *in, uid, ndev->mvdev.res.uid); in alloc_inout()
1099 static int modify_qp(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, bool fw, int cmd) in modify_qp() argument
1107 alloc_inout(ndev, cmd, &in, &inlen, &out, &outlen, get_qpn(mvq, fw), get_rqpn(mvq, fw)); in modify_qp()
1111 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, outlen); in modify_qp()
1116 static int connect_qps(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in connect_qps() argument
1120 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_2RST_QP); in connect_qps()
1124 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_2RST_QP); in connect_qps()
1128 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1132 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_RST2INIT_QP); in connect_qps()
1136 err = modify_qp(ndev, mvq, true, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1140 err = modify_qp(ndev, mvq, false, MLX5_CMD_OP_INIT2RTR_QP); in connect_qps()
1144 return modify_qp(ndev, mvq, true, MLX5_CMD_OP_RTR2RTS_QP); in connect_qps()
1153 static int query_virtqueue(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in query_virtqueue() argument
1172 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in query_virtqueue()
1173 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, outlen); in query_virtqueue()
1190 static bool is_resumable(struct mlx5_vdpa_net *ndev) in is_resumable() argument
1192 return ndev->mvdev.vdev.config->resume; in is_resumable()
1220 static int modify_virtqueue(struct mlx5_vdpa_net *ndev, in modify_virtqueue() argument
1226 struct mlx5_vdpa_dev *mvdev = &ndev->mvdev; in modify_virtqueue()
1251 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in modify_virtqueue()
1257 if (!is_valid_state_change(mvq->fw_state, state, is_resumable(ndev))) { in modify_virtqueue()
1297 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, inlen, out, sizeof(out)); in modify_virtqueue()
1323 static int modify_virtqueue_state(struct mlx5_vdpa_net *ndev, in modify_virtqueue_state() argument
1328 return modify_virtqueue(ndev, mvq, state); in modify_virtqueue_state()
1331 static int counter_set_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_alloc() argument
1338 if (!counters_supported(&ndev->mvdev)) in counter_set_alloc()
1345 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_alloc()
1347 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_alloc()
1356 static void counter_set_dealloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in counter_set_dealloc() argument
1361 if (!counters_supported(&ndev->mvdev)) in counter_set_dealloc()
1366 MLX5_SET(destroy_virtio_q_counters_in, in, hdr.uid, ndev->mvdev.res.uid); in counter_set_dealloc()
1368 if (mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out))) in counter_set_dealloc()
1369 mlx5_vdpa_warn(&ndev->mvdev, "dealloc counter set 0x%x\n", mvq->counter_set_id); in counter_set_dealloc()
1382 static void alloc_vector(struct mlx5_vdpa_net *ndev, in alloc_vector() argument
1385 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in alloc_vector()
1394 dev_name(&ndev->mvdev.vdev.dev), mvq->index); in alloc_vector()
1395 ent->dev_id = &ndev->event_cbs[mvq->index]; in alloc_vector()
1408 static void dealloc_vector(struct mlx5_vdpa_net *ndev, in dealloc_vector() argument
1411 struct mlx5_vdpa_irq_pool *irqp = &ndev->irqp; in dealloc_vector()
1422 static int setup_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in setup_vq() argument
1433 err = cq_create(ndev, idx, mvq->num_ent); in setup_vq()
1437 err = qp_create(ndev, mvq, &mvq->fwqp); in setup_vq()
1441 err = qp_create(ndev, mvq, &mvq->vqqp); in setup_vq()
1445 err = connect_qps(ndev, mvq); in setup_vq()
1449 err = counter_set_alloc(ndev, mvq); in setup_vq()
1453 alloc_vector(ndev, mvq); in setup_vq()
1454 err = create_virtqueue(ndev, mvq); in setup_vq()
1459 err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in setup_vq()
1461 mlx5_vdpa_warn(&ndev->mvdev, "failed to modify to ready vq idx %d(%d)\n", in setup_vq()
1471 destroy_virtqueue(ndev, mvq); in setup_vq()
1473 dealloc_vector(ndev, mvq); in setup_vq()
1474 counter_set_dealloc(ndev, mvq); in setup_vq()
1476 qp_destroy(ndev, &mvq->vqqp); in setup_vq()
1478 qp_destroy(ndev, &mvq->fwqp); in setup_vq()
1480 cq_destroy(ndev, idx); in setup_vq()
1484 static void suspend_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in suspend_vq() argument
1494 if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_SUSPEND)) in suspend_vq()
1495 mlx5_vdpa_warn(&ndev->mvdev, "modify to suspend failed\n"); in suspend_vq()
1497 if (query_virtqueue(ndev, mvq, &attr)) { in suspend_vq()
1498 mlx5_vdpa_warn(&ndev->mvdev, "failed to query virtqueue\n"); in suspend_vq()
1505 static void suspend_vqs(struct mlx5_vdpa_net *ndev) in suspend_vqs() argument
1509 for (i = 0; i < ndev->mvdev.max_vqs; i++) in suspend_vqs()
1510 suspend_vq(ndev, &ndev->vqs[i]); in suspend_vqs()
1513 static void resume_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in resume_vq() argument
1515 if (!mvq->initialized || !is_resumable(ndev)) in resume_vq()
1521 if (modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY)) in resume_vq()
1522 mlx5_vdpa_warn(&ndev->mvdev, "modify to resume failed for vq %u\n", mvq->index); in resume_vq()
1525 static void resume_vqs(struct mlx5_vdpa_net *ndev) in resume_vqs() argument
1527 for (int i = 0; i < ndev->mvdev.max_vqs; i++) in resume_vqs()
1528 resume_vq(ndev, &ndev->vqs[i]); in resume_vqs()
1531 static void teardown_vq(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in teardown_vq() argument
1536 suspend_vq(ndev, mvq); in teardown_vq()
1538 destroy_virtqueue(ndev, mvq); in teardown_vq()
1539 dealloc_vector(ndev, mvq); in teardown_vq()
1540 counter_set_dealloc(ndev, mvq); in teardown_vq()
1541 qp_destroy(ndev, &mvq->vqqp); in teardown_vq()
1542 qp_destroy(ndev, &mvq->fwqp); in teardown_vq()
1543 cq_destroy(ndev, mvq->index); in teardown_vq()
1547 static int create_rqt(struct mlx5_vdpa_net *ndev) in create_rqt() argument
1549 int rqt_table_size = roundup_pow_of_two(ndev->rqt_size); in create_rqt()
1550 int act_sz = roundup_pow_of_two(ndev->cur_num_vqs / 2); in create_rqt()
1563 MLX5_SET(create_rqt_in, in, uid, ndev->mvdev.res.uid); in create_rqt()
1570 list[i] = cpu_to_be32(ndev->vqs[j % ndev->cur_num_vqs].virtq_id); in create_rqt()
1573 err = mlx5_vdpa_create_rqt(&ndev->mvdev, in, inlen, &ndev->res.rqtn); in create_rqt()
1583 static int modify_rqt(struct mlx5_vdpa_net *ndev, int num) in modify_rqt() argument
1598 MLX5_SET(modify_rqt_in, in, uid, ndev->mvdev.res.uid); in modify_rqt()
1605 list[i] = cpu_to_be32(ndev->vqs[j % num].virtq_id); in modify_rqt()
1608 err = mlx5_vdpa_modify_rqt(&ndev->mvdev, in, inlen, ndev->res.rqtn); in modify_rqt()
1616 static void destroy_rqt(struct mlx5_vdpa_net *ndev) in destroy_rqt() argument
1618 mlx5_vdpa_destroy_rqt(&ndev->mvdev, ndev->res.rqtn); in destroy_rqt()
1621 static int create_tir(struct mlx5_vdpa_net *ndev) in create_tir() argument
1641 MLX5_SET(create_tir_in, in, uid, ndev->mvdev.res.uid); in create_tir()
1655 MLX5_SET(tirc, tirc, indirect_table, ndev->res.rqtn); in create_tir()
1656 MLX5_SET(tirc, tirc, transport_domain, ndev->res.tdn); in create_tir()
1658 err = mlx5_vdpa_create_tir(&ndev->mvdev, in, &ndev->res.tirn); in create_tir()
1663 mlx5_vdpa_add_tirn(ndev); in create_tir()
1667 static void destroy_tir(struct mlx5_vdpa_net *ndev) in destroy_tir() argument
1669 mlx5_vdpa_remove_tirn(ndev); in destroy_tir()
1670 mlx5_vdpa_destroy_tir(&ndev->mvdev, ndev->res.tirn); in destroy_tir()
1682 static int add_steering_counters(struct mlx5_vdpa_net *ndev, in add_steering_counters() argument
1690 node->ucast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1694 node->mcast_counter.counter = mlx5_fc_create(ndev->mvdev.mdev, false); in add_steering_counters()
1705 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in add_steering_counters()
1712 static void remove_steering_counters(struct mlx5_vdpa_net *ndev, in remove_steering_counters() argument
1716 mlx5_fc_destroy(ndev->mvdev.mdev, node->mcast_counter.counter); in remove_steering_counters()
1717 mlx5_fc_destroy(ndev->mvdev.mdev, node->ucast_counter.counter); in remove_steering_counters()
1721 static int mlx5_vdpa_add_mac_vlan_rules(struct mlx5_vdpa_net *ndev, u8 *mac, in mlx5_vdpa_add_mac_vlan_rules() argument
1746 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN)) { in mlx5_vdpa_add_mac_vlan_rules()
1756 dests[0].tir_num = ndev->res.tirn; in mlx5_vdpa_add_mac_vlan_rules()
1757 err = add_steering_counters(ndev, node, &flow_act, dests); in mlx5_vdpa_add_mac_vlan_rules()
1764 node->ucast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1778 node->mcast_rule = mlx5_add_flow_rules(ndev->rxft, spec, &flow_act, dests, NUM_DESTS); in mlx5_vdpa_add_mac_vlan_rules()
1784 mlx5_vdpa_add_rx_counters(ndev, node); in mlx5_vdpa_add_mac_vlan_rules()
1790 remove_steering_counters(ndev, node); in mlx5_vdpa_add_mac_vlan_rules()
1796 static void mlx5_vdpa_del_mac_vlan_rules(struct mlx5_vdpa_net *ndev, in mlx5_vdpa_del_mac_vlan_rules() argument
1799 mlx5_vdpa_remove_rx_counters(ndev, node); in mlx5_vdpa_del_mac_vlan_rules()
1822 static struct macvlan_node *mac_vlan_lookup(struct mlx5_vdpa_net *ndev, u64 value) in mac_vlan_lookup() argument
1828 hlist_for_each_entry(pos, &ndev->macvlan_hash[idx], hlist) { in mac_vlan_lookup()
1835 static int mac_vlan_add(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vid, bool tagged) in mac_vlan_add() argument
1843 if (mac_vlan_lookup(ndev, val)) in mac_vlan_add()
1852 ptr->ndev = ndev; in mac_vlan_add()
1853 err = mlx5_vdpa_add_mac_vlan_rules(ndev, ndev->config.mac, ptr); in mac_vlan_add()
1858 hlist_add_head(&ptr->hlist, &ndev->macvlan_hash[idx]); in mac_vlan_add()
1866 static void mac_vlan_del(struct mlx5_vdpa_net *ndev, u8 *mac, u16 vlan, bool tagged) in mac_vlan_del() argument
1870 ptr = mac_vlan_lookup(ndev, search_val(mac, vlan, tagged)); in mac_vlan_del()
1875 mlx5_vdpa_del_mac_vlan_rules(ndev, ptr); in mac_vlan_del()
1876 remove_steering_counters(ndev, ptr); in mac_vlan_del()
1880 static void clear_mac_vlan_table(struct mlx5_vdpa_net *ndev) in clear_mac_vlan_table() argument
1887 hlist_for_each_entry_safe(pos, n, &ndev->macvlan_hash[i], hlist) { in clear_mac_vlan_table()
1889 mlx5_vdpa_del_mac_vlan_rules(ndev, pos); in clear_mac_vlan_table()
1890 remove_steering_counters(ndev, pos); in clear_mac_vlan_table()
1896 static int setup_steering(struct mlx5_vdpa_net *ndev) in setup_steering() argument
1905 ns = mlx5_get_flow_namespace(ndev->mvdev.mdev, MLX5_FLOW_NAMESPACE_BYPASS); in setup_steering()
1907 mlx5_vdpa_warn(&ndev->mvdev, "failed to get flow namespace\n"); in setup_steering()
1911 ndev->rxft = mlx5_create_auto_grouped_flow_table(ns, &ft_attr); in setup_steering()
1912 if (IS_ERR(ndev->rxft)) { in setup_steering()
1913 mlx5_vdpa_warn(&ndev->mvdev, "failed to create flow table\n"); in setup_steering()
1914 return PTR_ERR(ndev->rxft); in setup_steering()
1916 mlx5_vdpa_add_rx_flow_table(ndev); in setup_steering()
1918 err = mac_vlan_add(ndev, ndev->config.mac, 0, false); in setup_steering()
1925 mlx5_vdpa_remove_rx_flow_table(ndev); in setup_steering()
1926 mlx5_destroy_flow_table(ndev->rxft); in setup_steering()
1930 static void teardown_steering(struct mlx5_vdpa_net *ndev) in teardown_steering() argument
1932 clear_mac_vlan_table(ndev); in teardown_steering()
1933 mlx5_vdpa_remove_rx_flow_table(ndev); in teardown_steering()
1934 mlx5_destroy_flow_table(ndev->rxft); in teardown_steering()
1939 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mac() local
1953 if (!memcmp(ndev->config.mac, mac, 6)) { in handle_ctrl_mac()
1961 if (!is_zero_ether_addr(ndev->config.mac)) { in handle_ctrl_mac()
1962 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
1964 ndev->config.mac); in handle_ctrl_mac()
1978 memcpy(mac_back, ndev->config.mac, ETH_ALEN); in handle_ctrl_mac()
1980 memcpy(ndev->config.mac, mac, ETH_ALEN); in handle_ctrl_mac()
1984 mac_vlan_del(ndev, mac_back, 0, false); in handle_ctrl_mac()
1986 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) { in handle_ctrl_mac()
1998 if (mlx5_mpfs_del_mac(pfmdev, ndev->config.mac)) { in handle_ctrl_mac()
2000 ndev->config.mac); in handle_ctrl_mac()
2008 memcpy(ndev->config.mac, mac_back, ETH_ALEN); in handle_ctrl_mac()
2010 if (mac_vlan_add(ndev, ndev->config.mac, 0, false)) in handle_ctrl_mac()
2028 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in change_num_qps() local
2029 int cur_qps = ndev->cur_num_vqs / 2; in change_num_qps()
2034 err = modify_rqt(ndev, 2 * newqps); in change_num_qps()
2038 for (i = ndev->cur_num_vqs - 1; i >= 2 * newqps; i--) in change_num_qps()
2039 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2041 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
2043 ndev->cur_num_vqs = 2 * newqps; in change_num_qps()
2045 err = setup_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2049 err = modify_rqt(ndev, 2 * newqps); in change_num_qps()
2057 teardown_vq(ndev, &ndev->vqs[i]); in change_num_qps()
2059 ndev->cur_num_vqs = 2 * cur_qps; in change_num_qps()
2066 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_mq() local
2093 newqps > ndev->rqt_size) in handle_ctrl_mq()
2096 if (ndev->cur_num_vqs == 2 * newqps) { in handle_ctrl_mq()
2114 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in handle_ctrl_vlan() local
2121 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VLAN))) in handle_ctrl_vlan()
2131 if (mac_vlan_add(ndev, ndev->config.mac, id, true)) in handle_ctrl_vlan()
2142 mac_vlan_del(ndev, ndev->config.mac, id, true); in handle_ctrl_vlan()
2159 struct mlx5_vdpa_net *ndev; in mlx5_cvq_kick_handler() local
2165 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_cvq_kick_handler()
2168 down_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2173 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_CTRL_VQ))) in mlx5_cvq_kick_handler()
2221 up_write(&ndev->reslock); in mlx5_cvq_kick_handler()
2227 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_kick_vq() local
2237 queue_work(mvdev->wq, &ndev->cvq_ent.work); in mlx5_vdpa_kick_vq()
2241 mvq = &ndev->vqs[idx]; in mlx5_vdpa_kick_vq()
2245 iowrite16(idx, ndev->mvdev.res.kick_addr); in mlx5_vdpa_kick_vq()
2252 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_address() local
2265 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_address()
2276 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_num() local
2282 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_num()
2289 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_cb() local
2291 ndev->event_cbs[idx] = *cb; in mlx5_vdpa_set_vq_cb()
2320 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_ready() local
2335 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_ready()
2337 suspend_vq(ndev, mvq); in mlx5_vdpa_set_vq_ready()
2339 err = modify_virtqueue_state(ndev, mvq, MLX5_VIRTIO_NET_Q_OBJECT_STATE_RDY); in mlx5_vdpa_set_vq_ready()
2353 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_ready() local
2361 return ndev->vqs[idx].ready; in mlx5_vdpa_get_vq_ready()
2368 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_vq_state() local
2379 mvq = &ndev->vqs[idx]; in mlx5_vdpa_set_vq_state()
2395 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vq_state() local
2408 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vq_state()
2422 err = query_virtqueue(ndev, mvq, &attr); in mlx5_vdpa_get_vq_state()
2506 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_device_features() local
2508 print_features(mvdev, ndev->mvdev.mlx_features, false); in mlx5_vdpa_get_device_features()
2509 return ndev->mvdev.mlx_features; in mlx5_vdpa_get_device_features()
2536 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_virtqueues() local
2541 err = setup_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2550 teardown_vq(ndev, &ndev->vqs[i]); in setup_virtqueues()
2555 static void teardown_virtqueues(struct mlx5_vdpa_net *ndev) in teardown_virtqueues() argument
2560 for (i = ndev->mvdev.max_vqs - 1; i >= 0; i--) { in teardown_virtqueues()
2561 mvq = &ndev->vqs[i]; in teardown_virtqueues()
2565 teardown_vq(ndev, mvq); in teardown_virtqueues()
2619 struct mlx5_vdpa_net *ndev; in update_carrier() local
2623 ndev = to_mlx5_vdpa_ndev(mvdev); in update_carrier()
2625 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in update_carrier()
2627 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in update_carrier()
2629 if (ndev->config_cb.callback) in update_carrier()
2630 ndev->config_cb.callback(ndev->config_cb.private); in update_carrier()
2635 static int queue_link_work(struct mlx5_vdpa_net *ndev) in queue_link_work() argument
2643 wqent->mvdev = &ndev->mvdev; in queue_link_work()
2645 queue_work(ndev->mvdev.wq, &wqent->work); in queue_link_work()
2651 struct mlx5_vdpa_net *ndev = container_of(nb, struct mlx5_vdpa_net, nb); in event_handler() local
2659 if (queue_link_work(ndev)) in event_handler()
2672 static void register_link_notifier(struct mlx5_vdpa_net *ndev) in register_link_notifier() argument
2674 if (!(ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_STATUS))) in register_link_notifier()
2677 ndev->nb.notifier_call = event_handler; in register_link_notifier()
2678 mlx5_notifier_register(ndev->mvdev.mdev, &ndev->nb); in register_link_notifier()
2679 ndev->nb_registered = true; in register_link_notifier()
2680 queue_link_work(ndev); in register_link_notifier()
2683 static void unregister_link_notifier(struct mlx5_vdpa_net *ndev) in unregister_link_notifier() argument
2685 if (!ndev->nb_registered) in unregister_link_notifier()
2688 ndev->nb_registered = false; in unregister_link_notifier()
2689 mlx5_notifier_unregister(ndev->mvdev.mdev, &ndev->nb); in unregister_link_notifier()
2690 if (ndev->mvdev.wq) in unregister_link_notifier()
2691 flush_workqueue(ndev->mvdev.wq); in unregister_link_notifier()
2702 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_driver_features() local
2711 ndev->mvdev.actual_features = features & ndev->mvdev.mlx_features; in mlx5_vdpa_set_driver_features()
2712 if (ndev->mvdev.actual_features & BIT_ULL(VIRTIO_NET_F_MQ)) in mlx5_vdpa_set_driver_features()
2713 ndev->rqt_size = mlx5vdpa16_to_cpu(mvdev, ndev->config.max_virtqueue_pairs); in mlx5_vdpa_set_driver_features()
2715 ndev->rqt_size = 1; in mlx5_vdpa_set_driver_features()
2725 ndev->cur_num_vqs = 2; in mlx5_vdpa_set_driver_features()
2734 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_config_cb() local
2736 ndev->config_cb = *cb; in mlx5_vdpa_set_config_cb()
2758 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_status() local
2760 print_status(mvdev, ndev->mvdev.status, false); in mlx5_vdpa_get_status()
2761 return ndev->mvdev.status; in mlx5_vdpa_get_status()
2764 static int save_channel_info(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq) in save_channel_info() argument
2771 err = query_virtqueue(ndev, mvq, &attr); in save_channel_info()
2788 static int save_channels_info(struct mlx5_vdpa_net *ndev) in save_channels_info() argument
2792 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in save_channels_info()
2793 memset(&ndev->vqs[i].ri, 0, sizeof(ndev->vqs[i].ri)); in save_channels_info()
2794 save_channel_info(ndev, &ndev->vqs[i]); in save_channels_info()
2799 static void mlx5_clear_vqs(struct mlx5_vdpa_net *ndev) in mlx5_clear_vqs() argument
2803 for (i = 0; i < ndev->mvdev.max_vqs; i++) in mlx5_clear_vqs()
2804 memset(&ndev->vqs[i], 0, offsetof(struct mlx5_vdpa_virtqueue, ri)); in mlx5_clear_vqs()
2807 static void restore_channels_info(struct mlx5_vdpa_net *ndev) in restore_channels_info() argument
2813 mlx5_clear_vqs(ndev); in restore_channels_info()
2814 init_mvqs(ndev); in restore_channels_info()
2815 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in restore_channels_info()
2816 mvq = &ndev->vqs[i]; in restore_channels_info()
2836 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_change_map() local
2837 bool teardown = !is_resumable(ndev); in mlx5_vdpa_change_map()
2840 suspend_vqs(ndev); in mlx5_vdpa_change_map()
2842 err = save_channels_info(ndev); in mlx5_vdpa_change_map()
2846 teardown_driver(ndev); in mlx5_vdpa_change_map()
2851 for (int i = 0; i < ndev->cur_num_vqs; i++) in mlx5_vdpa_change_map()
2852 ndev->vqs[i].modified_fields |= MLX5_VIRTQ_MODIFY_MASK_VIRTIO_Q_MKEY | in mlx5_vdpa_change_map()
2859 restore_channels_info(ndev); in mlx5_vdpa_change_map()
2865 resume_vqs(ndev); in mlx5_vdpa_change_map()
2873 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in setup_driver() local
2876 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in setup_driver()
2878 if (ndev->setup) { in setup_driver()
2883 mlx5_vdpa_add_debugfs(ndev); in setup_driver()
2885 err = read_umem_params(ndev); in setup_driver()
2895 err = create_rqt(ndev); in setup_driver()
2901 err = create_tir(ndev); in setup_driver()
2907 err = setup_steering(ndev); in setup_driver()
2912 ndev->setup = true; in setup_driver()
2917 destroy_tir(ndev); in setup_driver()
2919 destroy_rqt(ndev); in setup_driver()
2921 teardown_virtqueues(ndev); in setup_driver()
2923 mlx5_vdpa_remove_debugfs(ndev); in setup_driver()
2929 static void teardown_driver(struct mlx5_vdpa_net *ndev) in teardown_driver() argument
2932 WARN_ON(!rwsem_is_locked(&ndev->reslock)); in teardown_driver()
2934 if (!ndev->setup) in teardown_driver()
2937 mlx5_vdpa_remove_debugfs(ndev); in teardown_driver()
2938 teardown_steering(ndev); in teardown_driver()
2939 destroy_tir(ndev); in teardown_driver()
2940 destroy_rqt(ndev); in teardown_driver()
2941 teardown_virtqueues(ndev); in teardown_driver()
2942 ndev->setup = false; in teardown_driver()
2945 static void clear_vqs_ready(struct mlx5_vdpa_net *ndev) in clear_vqs_ready() argument
2949 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in clear_vqs_ready()
2950 ndev->vqs[i].ready = false; in clear_vqs_ready()
2951 ndev->vqs[i].modified_fields = 0; in clear_vqs_ready()
2954 ndev->mvdev.cvq.ready = false; in clear_vqs_ready()
2980 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_status() local
2985 down_write(&ndev->reslock); in mlx5_vdpa_set_status()
2987 if ((status ^ ndev->mvdev.status) & VIRTIO_CONFIG_S_DRIVER_OK) { in mlx5_vdpa_set_status()
2994 register_link_notifier(ndev); in mlx5_vdpa_set_status()
3006 ndev->mvdev.status = status; in mlx5_vdpa_set_status()
3007 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
3011 unregister_link_notifier(ndev); in mlx5_vdpa_set_status()
3013 mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); in mlx5_vdpa_set_status()
3014 ndev->mvdev.status |= VIRTIO_CONFIG_S_FAILED; in mlx5_vdpa_set_status()
3016 up_write(&ndev->reslock); in mlx5_vdpa_set_status()
3031 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_compat_reset() local
3036 down_write(&ndev->reslock); in mlx5_vdpa_compat_reset()
3037 unregister_link_notifier(ndev); in mlx5_vdpa_compat_reset()
3038 teardown_driver(ndev); in mlx5_vdpa_compat_reset()
3039 clear_vqs_ready(ndev); in mlx5_vdpa_compat_reset()
3041 mlx5_vdpa_destroy_mr_resources(&ndev->mvdev); in mlx5_vdpa_compat_reset()
3042 ndev->mvdev.status = 0; in mlx5_vdpa_compat_reset()
3043 ndev->mvdev.suspended = false; in mlx5_vdpa_compat_reset()
3044 ndev->cur_num_vqs = 0; in mlx5_vdpa_compat_reset()
3045 ndev->mvdev.cvq.received_desc = 0; in mlx5_vdpa_compat_reset()
3046 ndev->mvdev.cvq.completed_desc = 0; in mlx5_vdpa_compat_reset()
3047 memset(ndev->event_cbs, 0, sizeof(*ndev->event_cbs) * (mvdev->max_vqs + 1)); in mlx5_vdpa_compat_reset()
3048 ndev->mvdev.actual_features = 0; in mlx5_vdpa_compat_reset()
3057 up_write(&ndev->reslock); in mlx5_vdpa_compat_reset()
3076 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_config() local
3079 memcpy(buf, (u8 *)&ndev->config + offset, len); in mlx5_vdpa_get_config()
3137 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_set_map() local
3140 down_write(&ndev->reslock); in mlx5_vdpa_set_map()
3142 up_write(&ndev->reslock); in mlx5_vdpa_set_map()
3149 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_reset_map() local
3152 down_write(&ndev->reslock); in mlx5_vdpa_reset_map()
3154 up_write(&ndev->reslock); in mlx5_vdpa_reset_map()
3168 static void free_irqs(struct mlx5_vdpa_net *ndev) in free_irqs() argument
3173 if (!msix_mode_supported(&ndev->mvdev)) in free_irqs()
3176 if (!ndev->irqp.entries) in free_irqs()
3179 for (i = ndev->irqp.num_ent - 1; i >= 0; i--) { in free_irqs()
3180 ent = ndev->irqp.entries + i; in free_irqs()
3182 pci_msix_free_irq(ndev->mvdev.mdev->pdev, ent->map); in free_irqs()
3184 kfree(ndev->irqp.entries); in free_irqs()
3191 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_free() local
3193 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_free()
3195 free_resources(ndev); in mlx5_vdpa_free()
3197 if (!is_zero_ether_addr(ndev->config.mac)) { in mlx5_vdpa_free()
3199 mlx5_mpfs_del_mac(pfmdev, ndev->config.mac); in mlx5_vdpa_free()
3201 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_free()
3202 free_irqs(ndev); in mlx5_vdpa_free()
3203 kfree(ndev->event_cbs); in mlx5_vdpa_free()
3204 kfree(ndev->vqs); in mlx5_vdpa_free()
3211 struct mlx5_vdpa_net *ndev; in mlx5_get_vq_notification() local
3224 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_notification()
3225 addr = (phys_addr_t)ndev->mvdev.res.phys_kick_addr; in mlx5_get_vq_notification()
3234 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_get_vq_irq() local
3243 mvq = &ndev->vqs[idx]; in mlx5_get_vq_irq()
3257 static int counter_set_query(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_virtqueue *mvq, in counter_set_query() argument
3266 if (!counters_supported(&ndev->mvdev)) in counter_set_query()
3276 MLX5_SET(general_obj_in_cmd_hdr, cmd_hdr, uid, ndev->mvdev.res.uid); in counter_set_query()
3279 err = mlx5_cmd_exec(ndev->mvdev.mdev, in, sizeof(in), out, sizeof(out)); in counter_set_query()
3294 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_get_vendor_vq_stats() local
3301 down_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3315 mvq = &ndev->vqs[idx]; in mlx5_vdpa_get_vendor_vq_stats()
3316 err = counter_set_query(ndev, mvq, &received_desc, &completed_desc); in mlx5_vdpa_get_vendor_vq_stats()
3340 up_read(&ndev->reslock); in mlx5_vdpa_get_vendor_vq_stats()
3358 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_suspend() local
3364 down_write(&ndev->reslock); in mlx5_vdpa_suspend()
3365 unregister_link_notifier(ndev); in mlx5_vdpa_suspend()
3366 for (i = 0; i < ndev->cur_num_vqs; i++) { in mlx5_vdpa_suspend()
3367 mvq = &ndev->vqs[i]; in mlx5_vdpa_suspend()
3368 suspend_vq(ndev, mvq); in mlx5_vdpa_suspend()
3372 up_write(&ndev->reslock); in mlx5_vdpa_suspend()
3379 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_resume() local
3381 ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_resume()
3385 down_write(&ndev->reslock); in mlx5_vdpa_resume()
3387 resume_vqs(ndev); in mlx5_vdpa_resume()
3388 register_link_notifier(ndev); in mlx5_vdpa_resume()
3389 up_write(&ndev->reslock); in mlx5_vdpa_resume()
3465 static int alloc_resources(struct mlx5_vdpa_net *ndev) in alloc_resources() argument
3467 struct mlx5_vdpa_net_resources *res = &ndev->res; in alloc_resources()
3471 mlx5_vdpa_warn(&ndev->mvdev, "resources already allocated\n"); in alloc_resources()
3475 err = mlx5_vdpa_alloc_transport_domain(&ndev->mvdev, &res->tdn); in alloc_resources()
3479 err = create_tis(ndev); in alloc_resources()
3488 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in alloc_resources()
3492 static void free_resources(struct mlx5_vdpa_net *ndev) in free_resources() argument
3494 struct mlx5_vdpa_net_resources *res = &ndev->res; in free_resources()
3499 destroy_tis(ndev); in free_resources()
3500 mlx5_vdpa_dealloc_transport_domain(&ndev->mvdev, res->tdn); in free_resources()
3504 static void init_mvqs(struct mlx5_vdpa_net *ndev) in init_mvqs() argument
3509 for (i = 0; i < ndev->mvdev.max_vqs; ++i) { in init_mvqs()
3510 mvq = &ndev->vqs[i]; in init_mvqs()
3513 mvq->ndev = ndev; in init_mvqs()
3517 for (; i < ndev->mvdev.max_vqs; i++) { in init_mvqs()
3518 mvq = &ndev->vqs[i]; in init_mvqs()
3521 mvq->ndev = ndev; in init_mvqs()
3528 struct mlx5_vdpa_net *ndev; member
3554 static void allocate_irqs(struct mlx5_vdpa_net *ndev) in allocate_irqs() argument
3559 if (!msix_mode_supported(&ndev->mvdev)) in allocate_irqs()
3562 if (!ndev->mvdev.mdev->pdev) in allocate_irqs()
3565 ndev->irqp.entries = kcalloc(ndev->mvdev.max_vqs, sizeof(*ndev->irqp.entries), GFP_KERNEL); in allocate_irqs()
3566 if (!ndev->irqp.entries) in allocate_irqs()
3570 for (i = 0; i < ndev->mvdev.max_vqs; i++) { in allocate_irqs()
3571 ent = ndev->irqp.entries + i; in allocate_irqs()
3573 dev_name(&ndev->mvdev.vdev.dev), i); in allocate_irqs()
3574 ent->map = pci_msix_alloc_irq_at(ndev->mvdev.mdev->pdev, MSI_ANY_INDEX, NULL); in allocate_irqs()
3578 ndev->irqp.num_ent++; in allocate_irqs()
3589 struct mlx5_vdpa_net *ndev; in mlx5_vdpa_dev_add() local
3596 if (mgtdev->ndev) in mlx5_vdpa_dev_add()
3643 ndev = vdpa_alloc_device(struct mlx5_vdpa_net, mvdev.vdev, mdev->device, &mgtdev->vdpa_ops, in mlx5_vdpa_dev_add()
3645 if (IS_ERR(ndev)) in mlx5_vdpa_dev_add()
3646 return PTR_ERR(ndev); in mlx5_vdpa_dev_add()
3648 ndev->mvdev.max_vqs = max_vqs; in mlx5_vdpa_dev_add()
3649 mvdev = &ndev->mvdev; in mlx5_vdpa_dev_add()
3652 ndev->vqs = kcalloc(max_vqs, sizeof(*ndev->vqs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3653 ndev->event_cbs = kcalloc(max_vqs + 1, sizeof(*ndev->event_cbs), GFP_KERNEL); in mlx5_vdpa_dev_add()
3654 if (!ndev->vqs || !ndev->event_cbs) { in mlx5_vdpa_dev_add()
3659 init_mvqs(ndev); in mlx5_vdpa_dev_add()
3660 allocate_irqs(ndev); in mlx5_vdpa_dev_add()
3661 init_rwsem(&ndev->reslock); in mlx5_vdpa_dev_add()
3662 config = &ndev->config; in mlx5_vdpa_dev_add()
3675 ndev->config.mtu = cpu_to_mlx5vdpa16(mvdev, mtu); in mlx5_vdpa_dev_add()
3680 ndev->config.status |= cpu_to_mlx5vdpa16(mvdev, VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3682 ndev->config.status &= cpu_to_mlx5vdpa16(mvdev, ~VIRTIO_NET_S_LINK_UP); in mlx5_vdpa_dev_add()
3686 memcpy(ndev->config.mac, add_config->net.mac, ETH_ALEN); in mlx5_vdpa_dev_add()
3710 mlx5_vdpa_warn(&ndev->mvdev, in mlx5_vdpa_dev_add()
3719 ndev->mvdev.mlx_features = device_features; in mlx5_vdpa_dev_add()
3721 err = mlx5_vdpa_alloc_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3733 err = alloc_resources(ndev); in mlx5_vdpa_dev_add()
3737 ndev->cvq_ent.mvdev = mvdev; in mlx5_vdpa_dev_add()
3738 INIT_WORK(&ndev->cvq_ent.work, mlx5_cvq_kick_handler); in mlx5_vdpa_dev_add()
3750 mgtdev->ndev = ndev; in mlx5_vdpa_dev_add()
3756 free_resources(ndev); in mlx5_vdpa_dev_add()
3760 mlx5_vdpa_free_resources(&ndev->mvdev); in mlx5_vdpa_dev_add()
3773 struct mlx5_vdpa_net *ndev = to_mlx5_vdpa_ndev(mvdev); in mlx5_vdpa_dev_del() local
3776 unregister_link_notifier(ndev); in mlx5_vdpa_dev_del()
3781 mgtdev->ndev = NULL; in mlx5_vdpa_dev_del()