/linux/drivers/net/ethernet/huawei/hinic/ |
H A D | hinic_hw_io.c | 33 #define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE) argument 197 * @num_qps: number of qps to write 202 u16 num_qps) in write_qp_ctxts() argument 204 return (write_sq_ctxts(func_to_io, base_qpn, num_qps) || in write_qp_ctxts() 205 write_rq_ctxts(func_to_io, base_qpn, num_qps)); in write_qp_ctxts() 365 * @num_qps: number queue pairs to create 372 u16 base_qpn, int num_qps, in hinic_io_create_qps() argument 381 func_to_io->qps = devm_kcalloc(&pdev->dev, num_qps, in hinic_io_create_qps() 386 func_to_io->sq_wq = devm_kcalloc(&pdev->dev, num_qps, in hinic_io_create_qps() 468 hinic_io_destroy_qps(struct hinic_func_to_io * func_to_io,int num_qps) hinic_io_destroy_qps() argument [all...] |
H A D | hinic_hw_dev.c | 65 nic_cap->num_qps = (num_irqs - (num_aeqs + num_ceqs)) / 2; in parse_capability() 67 if (nic_cap->num_qps > HINIC_Q_CTXT_MAX) in parse_capability() 68 nic_cap->num_qps = HINIC_Q_CTXT_MAX; in parse_capability() 75 if (nic_cap->num_qps > nic_cap->max_qps) in parse_capability() 76 nic_cap->num_qps = nic_cap->max_qps; in parse_capability() 443 int err, num_aeqs, num_ceqs, num_qps; in hinic_hwdev_ifup() local 472 num_qps = nic_cap->num_qps; in hinic_hwdev_ifup() 474 rq_msix_entries = &hwdev->msix_entries[num_aeqs + num_ceqs + num_qps]; in hinic_hwdev_ifup() 476 err = hinic_io_create_qps(func_to_io, base_qpn, num_qps, in hinic_hwdev_ifup() [all...] |
H A D | hinic_hw_io.h | 110 u16 base_qpn, int num_qps, 115 int num_qps);
|
H A D | hinic_ethtool.c | 643 if (queue >= nic_dev->num_qps) { in __hinic_get_coalesce() 697 q_id >= nic_dev->num_qps) in set_queue_coalesce() 731 if (queue >= nic_dev->num_qps) { in __set_hw_coal_param() 907 nic_dev->hwdev->nic_cap.num_qps = count; in hinic_set_channels() 916 nic_dev->hwdev->nic_cap.num_qps = count; in hinic_set_channels() 1112 cmd->data = nic_dev->num_qps; in hinic_get_rxnfc() 1358 for (qid = 0; qid < nic_dev->num_qps; qid++) { in get_drv_queue_stats() 1371 for (qid = 0; qid < nic_dev->num_qps; qid++) { in get_drv_queue_stats() 1440 q_num = nic_dev->num_qps; in hinic_get_sset_count() 1470 for (i = 0; i < nic_dev->num_qps; in hinic_get_strings() [all...] |
H A D | hinic_dev.h | 86 u16 num_qps; member
|
H A D | hinic_hw_dev.h | 35 u16 num_qps; member
|
/linux/drivers/infiniband/hw/mlx5/ |
H A D | gsi.c | 95 int num_qps = 0; in mlx5_ib_create_gsi() local 101 num_qps = pd->device->attrs.max_pkeys; in mlx5_ib_create_gsi() 103 num_qps = dev->lag_ports; in mlx5_ib_create_gsi() 107 gsi->tx_qps = kcalloc(num_qps, sizeof(*gsi->tx_qps), GFP_KERNEL); in mlx5_ib_create_gsi() 125 gsi->num_qps = num_qps; in mlx5_ib_create_gsi() 142 if (num_qps) { in mlx5_ib_create_gsi() 185 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) { in mlx5_ib_destroy_gsi() 340 for (qp_index = 0; qp_index < gsi->num_qps; ++qp_index) in mlx5_ib_gsi_modify_qp() 417 if (!gsi->num_qps) in get_tx_qp() [all...] |
/linux/drivers/net/ethernet/huawei/hinic3/ |
H A D | hinic3_nic_dev.h | 45 u16 num_qps; member
|
H A D | hinic3_nic_io.h | 100 u16 num_qps; member
|
H A D | hinic3_tx.c | 571 if (unlikely(q_id >= nic_dev->q_params.num_qps)) in hinic3_xmit_frame() 616 for (qid = 0; qid < nic_dev->q_params.num_qps; qid++) { in hinic3_flush_txqs()
|
/linux/drivers/net/ethernet/qlogic/qed/ |
H A D | qed_rdma.c | 155 p_rdma_info->num_qps = num_cons; in qed_rdma_alloc() 157 p_rdma_info->num_qps = num_cons / 2; /* 2 cids per qp */ in qed_rdma_alloc() 209 * the number of connections we support. (num_qps in iWARP or in qed_rdma_alloc() 210 * num_qps/2 in RoCE). in qed_rdma_alloc() 414 u32 num_qps; in qed_rdma_init_devinfo() local 454 num_qps = ROCE_MAX_QPS; in qed_rdma_init_devinfo() 455 num_qps = min_t(u64, num_qps, p_hwfn->p_rdma_info->num_qps); in qed_rdma_init_devinfo() 456 dev->max_qp = num_qps; in qed_rdma_init_devinfo() [all...] |
H A D | qed_rdma.h | 78 u32 num_qps; member
|
/linux/drivers/net/ethernet/mellanox/mlx4/ |
H A D | qp.c | 426 (dev->caps.num_qps - 1), qp); in mlx4_qp_alloc() 524 radix_tree_delete(&dev->qp_table_tree, qp->qpn & (dev->caps.num_qps - 1)); in mlx4_qp_remove() 574 err = mlx4_bitmap_init(*bitmap + MLX4_QP_TABLE_ZONE_GENERAL, dev->caps.num_qps, in mlx4_create_zones() 804 int last_base = dev->caps.num_qps; in mlx4_init_qp_table() 836 if (reserved_from_bot + reserved_from_top > dev->caps.num_qps) { in mlx4_init_qp_table()
|
H A D | profile.c | 184 dev->caps.num_qps = profile[i].num; in mlx4_make_profile()
|
/linux/drivers/infiniband/hw/hns/ |
H A D | hns_roce_main.c | 179 props->max_qp = hr_dev->caps.num_qps; in hns_roce_query_device() 376 resp.qp_tab_size = hr_dev->caps.num_qps; in hns_roce_alloc_ucontext() 799 hr_dev->caps.num_qps); in hns_roce_init_hem() 809 hr_dev->caps.num_qps); in hns_roce_init_hem() 821 hr_dev->caps.num_qps); in hns_roce_init_hem() 854 hr_dev->caps.num_qps); in hns_roce_init_hem()
|
/linux/drivers/net/ethernet/intel/i40e/ |
H A D | i40e_virtchnl_pf.h | 63 u16 num_qps; /* number of queue pairs requested by user */ member
|
H A D | i40e_virtchnl_pf.c | 348 if (queue_id < vf->ch[i].num_qps) { in i40e_get_real_pf_qid() 355 queue_id -= vf->ch[i].num_qps; in i40e_get_real_pf_qid() 899 qps = vf->ch[i].num_qps; in i40e_map_pf_queues_to_vsi() 947 qps = vf->ch[i].num_qps; in i40e_map_pf_to_vf_queues() 2354 num_qps_all += vf->ch[i].num_qps; in i40e_vc_config_queues_msg() 2409 if (j == (vf->ch[idx].num_qps - 1)) { in i40e_vc_config_queues_msg() 2426 vsi->num_queue_pairs = vf->ch[i].num_qps; in i40e_vc_config_queues_msg() 4100 vf->ch[i].num_qps = tci->list[i].count; in i40e_vc_add_qch_msg()
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_qp.c | 245 qp = mthca_array_get(&dev->qp_table.qp, qpn & (dev->limits.num_qps - 1)); in mthca_qp_event() 771 ((qp->qpn & (dev->limits.num_qps - 1)) * MTHCA_RDB_ENTRY_SIZE << in __mthca_modify_qp() 1330 qp->qpn & (dev->limits.num_qps - 1), qp); in mthca_alloc_qp() 1462 qp->qpn & (dev->limits.num_qps - 1)); in mthca_free_qp() 2273 dev->limits.num_qps, in mthca_init_qp_table() 2281 dev->limits.num_qps); in mthca_init_qp_table() 2302 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); in mthca_init_qp_table() 2315 mthca_array_cleanup(&dev->qp_table.qp, dev->limits.num_qps); in mthca_cleanup_qp_table()
|
H A D | mthca_main.c | 445 mdev->limits.num_qps, in mthca_init_icm() 456 mdev->limits.num_qps, in mthca_init_icm() 467 mdev->limits.num_qps << in mthca_init_icm()
|
H A D | mthca_profile.c | 173 dev->limits.num_qps = profile[i].num; in mthca_make_profile()
|
H A D | mthca_provider.c | 90 props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps; in mthca_query_device() 297 uresp.qp_tab_size = to_mdev(ibdev)->limits.num_qps; in mthca_alloc_ucontext()
|
H A D | mthca_dev.h | 143 int num_qps; member
|
/linux/include/linux/mlx4/ |
H A D | qp.h | 492 return radix_tree_lookup(&dev->qp_table_tree, qpn & (dev->caps.num_qps - 1)); in __mlx4_qp_lookup()
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_qp.c | 228 if (!atomic_add_unless(&dev->num_qps, 1, dev->dsr->caps.max_qp)) in pvrdma_create_qp() 426 atomic_dec(&dev->num_qps); in pvrdma_create_qp() 448 atomic_dec(&dev->num_qps); in _pvrdma_free_qp()
|
H A D | pvrdma.h | 241 atomic_t num_qps; member
|