Lines Matching refs:rf

76 static void irdma_puda_ce_handler(struct irdma_pci_f *rf,  in irdma_puda_ce_handler()  argument
79 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_puda_ce_handler()
106 static void irdma_process_ceq(struct irdma_pci_f *rf, struct irdma_ceq *ceq) in irdma_process_ceq() argument
108 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_process_ceq()
128 queue_work(rf->cqp_cmpl_wq, &rf->cqp_cmpl_work); in irdma_process_ceq()
131 irdma_puda_ce_handler(rf, cq); in irdma_process_ceq()
183 static void irdma_process_ae_def_cmpl(struct irdma_pci_f *rf, in irdma_process_ae_def_cmpl() argument
189 irdma_cqp_ce_handler(rf, &rf->ccq.sc_cq); in irdma_process_ae_def_cmpl()
191 irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, true, in irdma_process_ae_def_cmpl()
197 irdma_complete_cqp_request(&rf->cqp, cqp_request); in irdma_process_ae_def_cmpl()
198 irdma_sc_cqp_def_cmpl_ae_handler(&rf->sc_dev, info, false, in irdma_process_ae_def_cmpl()
207 static void irdma_process_aeq(struct irdma_pci_f *rf) in irdma_process_aeq() argument
209 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_process_aeq()
210 struct irdma_aeq *aeq = &rf->aeq; in irdma_process_aeq()
219 struct irdma_device *iwdev = rf->iwdev; in irdma_process_aeq()
236 rf->reset = true; in irdma_process_aeq()
237 rf->gen_ops.request_reset(rf); in irdma_process_aeq()
248 spin_lock_irqsave(&rf->qptable_lock, flags); in irdma_process_aeq()
249 iwqp = rf->qp_table[info->qp_cq_id]; in irdma_process_aeq()
251 spin_unlock_irqrestore(&rf->qptable_lock, in irdma_process_aeq()
263 spin_unlock_irqrestore(&rf->qptable_lock, flags); in irdma_process_aeq()
348 spin_lock_irqsave(&rf->cqtable_lock, flags); in irdma_process_aeq()
349 iwcq = rf->cq_table[info->qp_cq_id]; in irdma_process_aeq()
351 spin_unlock_irqrestore(&rf->cqtable_lock, in irdma_process_aeq()
358 spin_unlock_irqrestore(&rf->cqtable_lock, flags); in irdma_process_aeq()
381 irdma_process_ae_def_cmpl(rf, info); in irdma_process_aeq()
470 struct irdma_pci_f *rf = from_tasklet(rf, t, dpc_tasklet); in irdma_dpc() local
472 if (rf->msix_shared) in irdma_dpc()
473 irdma_process_ceq(rf, rf->ceqlist); in irdma_dpc()
474 irdma_process_aeq(rf); in irdma_dpc()
475 irdma_ena_intr(&rf->sc_dev, rf->iw_msixtbl[0].idx); in irdma_dpc()
485 struct irdma_pci_f *rf = iwceq->rf; in irdma_ceq_dpc() local
487 irdma_process_ceq(rf, iwceq); in irdma_ceq_dpc()
488 irdma_ena_intr(&rf->sc_dev, iwceq->msix_idx); in irdma_ceq_dpc()
498 static int irdma_save_msix_info(struct irdma_pci_f *rf) in irdma_save_msix_info() argument
507 if (!rf->msix_count) in irdma_save_msix_info()
510 size = sizeof(struct irdma_msix_vector) * rf->msix_count; in irdma_save_msix_info()
511 size += struct_size(iw_qvlist, qv_info, rf->msix_count); in irdma_save_msix_info()
512 rf->iw_msixtbl = kzalloc(size, GFP_KERNEL); in irdma_save_msix_info()
513 if (!rf->iw_msixtbl) in irdma_save_msix_info()
516 rf->iw_qvlist = (struct irdma_qvlist_info *) in irdma_save_msix_info()
517 (&rf->iw_msixtbl[rf->msix_count]); in irdma_save_msix_info()
518 iw_qvlist = rf->iw_qvlist; in irdma_save_msix_info()
520 iw_qvlist->num_vectors = rf->msix_count; in irdma_save_msix_info()
521 if (rf->msix_count <= num_online_cpus()) in irdma_save_msix_info()
522 rf->msix_shared = true; in irdma_save_msix_info()
524 pmsix = rf->msix_entries; in irdma_save_msix_info()
525 for (i = 0, ceq_idx = 0; i < rf->msix_count; i++, iw_qvinfo++) { in irdma_save_msix_info()
526 rf->iw_msixtbl[i].idx = pmsix->entry; in irdma_save_msix_info()
527 rf->iw_msixtbl[i].irq = pmsix->vector; in irdma_save_msix_info()
528 rf->iw_msixtbl[i].cpu_affinity = ceq_idx; in irdma_save_msix_info()
531 if (rf->msix_shared) in irdma_save_msix_info()
540 iw_qvinfo->v_idx = rf->iw_msixtbl[i].idx; in irdma_save_msix_info()
554 struct irdma_pci_f *rf = data; in irdma_irq_handler() local
556 tasklet_schedule(&rf->dpc_tasklet); in irdma_irq_handler()
571 ibdev_err(to_ibdev(&iwceq->rf->sc_dev), "expected irq = %d received irq = %d\n", in irdma_ceq_handler()
586 static void irdma_destroy_irq(struct irdma_pci_f *rf, in irdma_destroy_irq() argument
589 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_irq()
594 if (rf == dev_id) { in irdma_destroy_irq()
595 tasklet_kill(&rf->dpc_tasklet); in irdma_destroy_irq()
610 static void irdma_destroy_cqp(struct irdma_pci_f *rf) in irdma_destroy_cqp() argument
612 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_cqp()
613 struct irdma_cqp *cqp = &rf->cqp; in irdma_destroy_cqp()
620 irdma_cleanup_pending_cqp_op(rf); in irdma_destroy_cqp()
632 static void irdma_destroy_virt_aeq(struct irdma_pci_f *rf) in irdma_destroy_virt_aeq() argument
634 struct irdma_aeq *aeq = &rf->aeq; in irdma_destroy_virt_aeq()
638 irdma_unmap_vm_page_list(&rf->hw, pg_arr, pg_cnt); in irdma_destroy_virt_aeq()
639 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); in irdma_destroy_virt_aeq()
651 static void irdma_destroy_aeq(struct irdma_pci_f *rf) in irdma_destroy_aeq() argument
653 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_aeq()
654 struct irdma_aeq *aeq = &rf->aeq; in irdma_destroy_aeq()
657 if (!rf->msix_shared) { in irdma_destroy_aeq()
658 if (rf->sc_dev.privileged) in irdma_destroy_aeq()
659 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, in irdma_destroy_aeq()
660 rf->iw_msixtbl->idx, false); in irdma_destroy_aeq()
661 irdma_destroy_irq(rf, rf->iw_msixtbl, rf); in irdma_destroy_aeq()
663 if (rf->reset) in irdma_destroy_aeq()
673 irdma_destroy_virt_aeq(rf); in irdma_destroy_aeq()
689 static void irdma_destroy_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq) in irdma_destroy_ceq() argument
691 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_ceq()
694 if (rf->reset) in irdma_destroy_ceq()
719 static void irdma_del_ceq_0(struct irdma_pci_f *rf) in irdma_del_ceq_0() argument
721 struct irdma_ceq *iwceq = rf->ceqlist; in irdma_del_ceq_0()
724 if (rf->msix_shared) { in irdma_del_ceq_0()
725 msix_vec = &rf->iw_msixtbl[0]; in irdma_del_ceq_0()
726 if (rf->sc_dev.privileged) in irdma_del_ceq_0()
727 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, in irdma_del_ceq_0()
730 irdma_destroy_irq(rf, msix_vec, rf); in irdma_del_ceq_0()
732 msix_vec = &rf->iw_msixtbl[1]; in irdma_del_ceq_0()
733 irdma_destroy_irq(rf, msix_vec, iwceq); in irdma_del_ceq_0()
736 irdma_destroy_ceq(rf, iwceq); in irdma_del_ceq_0()
737 rf->sc_dev.ceq_valid = false; in irdma_del_ceq_0()
738 rf->ceqs_count = 0; in irdma_del_ceq_0()
748 static void irdma_del_ceqs(struct irdma_pci_f *rf) in irdma_del_ceqs() argument
750 struct irdma_ceq *iwceq = &rf->ceqlist[1]; in irdma_del_ceqs()
754 if (rf->msix_shared) in irdma_del_ceqs()
755 msix_vec = &rf->iw_msixtbl[1]; in irdma_del_ceqs()
757 msix_vec = &rf->iw_msixtbl[2]; in irdma_del_ceqs()
759 for (i = 1; i < rf->ceqs_count; i++, msix_vec++, iwceq++) { in irdma_del_ceqs()
760 if (rf->sc_dev.privileged) in irdma_del_ceqs()
761 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, in irdma_del_ceqs()
764 irdma_destroy_irq(rf, msix_vec, iwceq); in irdma_del_ceqs()
765 irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, in irdma_del_ceqs()
767 dma_free_coherent(rf->sc_dev.hw->device, iwceq->mem.size, in irdma_del_ceqs()
771 rf->ceqs_count = 1; in irdma_del_ceqs()
781 static void irdma_destroy_ccq(struct irdma_pci_f *rf) in irdma_destroy_ccq() argument
783 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_destroy_ccq()
784 struct irdma_ccq *ccq = &rf->ccq; in irdma_destroy_ccq()
787 if (rf->cqp_cmpl_wq) in irdma_destroy_ccq()
788 destroy_workqueue(rf->cqp_cmpl_wq); in irdma_destroy_ccq()
790 if (!rf->reset) in irdma_destroy_ccq()
866 static int irdma_create_hmc_objs(struct irdma_pci_f *rf, bool privileged, in irdma_create_hmc_objs() argument
869 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_hmc_objs()
875 info.entry_type = rf->sd_type; in irdma_create_hmc_objs()
923 static int irdma_obj_aligned_mem(struct irdma_pci_f *rf, in irdma_obj_aligned_mem() argument
930 va = (unsigned long)rf->obj_next.va; in irdma_obj_aligned_mem()
936 memptr->pa = rf->obj_next.pa + extra; in irdma_obj_aligned_mem()
938 if (((u8 *)memptr->va + size) > ((u8 *)rf->obj_mem.va + rf->obj_mem.size)) in irdma_obj_aligned_mem()
941 rf->obj_next.va = (u8 *)memptr->va + size; in irdma_obj_aligned_mem()
942 rf->obj_next.pa = memptr->pa + size; in irdma_obj_aligned_mem()
954 static int irdma_create_cqp(struct irdma_pci_f *rf) in irdma_create_cqp() argument
958 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_cqp()
960 struct irdma_cqp *cqp = &rf->cqp; in irdma_create_cqp()
992 status = irdma_obj_aligned_mem(rf, &mem, sizeof(struct irdma_cqp_ctx), in irdma_create_cqp()
1006 cqp_init_info.hmc_profile = rf->rsrc_profile; in irdma_create_cqp()
1008 cqp_init_info.protocol_used = rf->protocol_used; in irdma_create_cqp()
1010 switch (rf->rdma_ver) { in irdma_create_cqp()
1074 static int irdma_create_ccq(struct irdma_pci_f *rf) in irdma_create_ccq() argument
1076 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_ccq()
1078 struct irdma_ccq *ccq = &rf->ccq; in irdma_create_ccq()
1085 ccq_size = (rf->rdma_ver >= IRDMA_GEN_3) ? IW_GEN_3_CCQ_SIZE : IW_CCQ_SIZE; in irdma_create_ccq()
1094 status = irdma_obj_aligned_mem(rf, &ccq->shadow_area, in irdma_create_ccq()
1110 info.vsi = &rf->default_vsi; in irdma_create_ccq()
1135 status = irdma_alloc_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1138 status = irdma_add_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1142 irdma_del_local_mac_entry(iwdev->rf, in irdma_alloc_set_mac()
1159 static int irdma_cfg_ceq_vector(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, in irdma_cfg_ceq_vector() argument
1164 if (rf->msix_shared && !ceq_id) { in irdma_cfg_ceq_vector()
1166 "irdma-%s-AEQCEQ-0", dev_name(&rf->pcidev->dev)); in irdma_cfg_ceq_vector()
1167 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); in irdma_cfg_ceq_vector()
1169 msix_vec->name, rf); in irdma_cfg_ceq_vector()
1173 dev_name(&rf->pcidev->dev), ceq_id); in irdma_cfg_ceq_vector()
1183 ibdev_dbg(&rf->iwdev->ibdev, "ERR: ceq irq config fail\n"); in irdma_cfg_ceq_vector()
1188 if (rf->sc_dev.privileged) in irdma_cfg_ceq_vector()
1189 rf->sc_dev.irq_ops->irdma_cfg_ceq(&rf->sc_dev, ceq_id, in irdma_cfg_ceq_vector()
1192 status = irdma_vchnl_req_ceq_vec_map(&rf->sc_dev, ceq_id, in irdma_cfg_ceq_vector()
1204 static int irdma_cfg_aeq_vector(struct irdma_pci_f *rf) in irdma_cfg_aeq_vector() argument
1206 struct irdma_msix_vector *msix_vec = rf->iw_msixtbl; in irdma_cfg_aeq_vector()
1209 if (!rf->msix_shared) { in irdma_cfg_aeq_vector()
1211 "irdma-%s-AEQ", dev_name(&rf->pcidev->dev)); in irdma_cfg_aeq_vector()
1212 tasklet_setup(&rf->dpc_tasklet, irdma_dpc); in irdma_cfg_aeq_vector()
1214 msix_vec->name, rf); in irdma_cfg_aeq_vector()
1217 ibdev_dbg(&rf->iwdev->ibdev, "ERR: aeq irq config fail\n"); in irdma_cfg_aeq_vector()
1221 if (rf->sc_dev.privileged) in irdma_cfg_aeq_vector()
1222 rf->sc_dev.irq_ops->irdma_cfg_aeq(&rf->sc_dev, msix_vec->idx, in irdma_cfg_aeq_vector()
1225 ret = irdma_vchnl_req_aeq_vec_map(&rf->sc_dev, msix_vec->idx); in irdma_cfg_aeq_vector()
1240 static int irdma_create_ceq(struct irdma_pci_f *rf, struct irdma_ceq *iwceq, in irdma_create_ceq() argument
1245 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_ceq()
1249 iwceq->rf = rf; in irdma_create_ceq()
1250 ceq_size = min(rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt, in irdma_create_ceq()
1269 status = irdma_cqp_ceq_cmd(&rf->sc_dev, &iwceq->sc_ceq, in irdma_create_ceq()
1292 static int irdma_setup_ceq_0(struct irdma_pci_f *rf) in irdma_setup_ceq_0() argument
1300 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); in irdma_setup_ceq_0()
1301 rf->ceqlist = kcalloc(num_ceqs, sizeof(*rf->ceqlist), GFP_KERNEL); in irdma_setup_ceq_0()
1302 if (!rf->ceqlist) { in irdma_setup_ceq_0()
1307 iwceq = &rf->ceqlist[0]; in irdma_setup_ceq_0()
1308 status = irdma_create_ceq(rf, iwceq, 0, rf->default_vsi.vsi_idx); in irdma_setup_ceq_0()
1310 ibdev_dbg(&rf->iwdev->ibdev, "ERR: create ceq status = %d\n", in irdma_setup_ceq_0()
1316 i = rf->msix_shared ? 0 : 1; in irdma_setup_ceq_0()
1317 msix_vec = &rf->iw_msixtbl[i]; in irdma_setup_ceq_0()
1320 status = irdma_cfg_ceq_vector(rf, iwceq, 0, msix_vec); in irdma_setup_ceq_0()
1322 irdma_destroy_ceq(rf, iwceq); in irdma_setup_ceq_0()
1326 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); in irdma_setup_ceq_0()
1327 rf->ceqs_count++; in irdma_setup_ceq_0()
1330 if (status && !rf->ceqs_count) { in irdma_setup_ceq_0()
1331 kfree(rf->ceqlist); in irdma_setup_ceq_0()
1332 rf->ceqlist = NULL; in irdma_setup_ceq_0()
1335 rf->sc_dev.ceq_valid = true; in irdma_setup_ceq_0()
1349 static int irdma_setup_ceqs(struct irdma_pci_f *rf, u16 vsi_idx) in irdma_setup_ceqs() argument
1358 num_ceqs = min(rf->msix_count, rf->sc_dev.hmc_fpm_misc.max_ceqs); in irdma_setup_ceqs()
1359 i = (rf->msix_shared) ? 1 : 2; in irdma_setup_ceqs()
1361 iwceq = &rf->ceqlist[ceq_id]; in irdma_setup_ceqs()
1362 status = irdma_create_ceq(rf, iwceq, ceq_id, vsi_idx); in irdma_setup_ceqs()
1364 ibdev_dbg(&rf->iwdev->ibdev, in irdma_setup_ceqs()
1369 msix_vec = &rf->iw_msixtbl[i]; in irdma_setup_ceqs()
1372 status = irdma_cfg_ceq_vector(rf, iwceq, ceq_id, msix_vec); in irdma_setup_ceqs()
1374 irdma_destroy_ceq(rf, iwceq); in irdma_setup_ceqs()
1377 irdma_ena_intr(&rf->sc_dev, msix_vec->idx); in irdma_setup_ceqs()
1378 rf->ceqs_count++; in irdma_setup_ceqs()
1384 irdma_del_ceqs(rf); in irdma_setup_ceqs()
1389 static int irdma_create_virt_aeq(struct irdma_pci_f *rf, u32 size) in irdma_create_virt_aeq() argument
1391 struct irdma_aeq *aeq = &rf->aeq; in irdma_create_virt_aeq()
1396 if (rf->rdma_ver < IRDMA_GEN_2) in irdma_create_virt_aeq()
1406 status = irdma_get_pble(rf->pble_rsrc, &aeq->palloc, pg_cnt, true); in irdma_create_virt_aeq()
1413 status = irdma_map_vm_page_list(&rf->hw, aeq->mem.va, pg_arr, pg_cnt); in irdma_create_virt_aeq()
1415 irdma_free_pble(rf->pble_rsrc, &aeq->palloc); in irdma_create_virt_aeq()
1430 static int irdma_create_aeq(struct irdma_pci_f *rf) in irdma_create_aeq() argument
1433 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_create_aeq()
1434 struct irdma_aeq *aeq = &rf->aeq; in irdma_create_aeq()
1435 struct irdma_hmc_info *hmc_info = rf->sc_dev.hmc_info; in irdma_create_aeq()
1437 u8 multiplier = (rf->protocol_used == IRDMA_IWARP_PROTOCOL_ONLY) ? 2 : 1; in irdma_create_aeq()
1444 if (rf->rdma_ver == IRDMA_GEN_3) in irdma_create_aeq()
1454 else if (rf->rdma_ver == IRDMA_GEN_3) in irdma_create_aeq()
1458 status = irdma_create_virt_aeq(rf, aeq_size); in irdma_create_aeq()
1472 info.msix_idx = rf->iw_msixtbl->idx; in irdma_create_aeq()
1485 irdma_destroy_virt_aeq(rf); in irdma_create_aeq()
1502 static int irdma_setup_aeq(struct irdma_pci_f *rf) in irdma_setup_aeq() argument
1504 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_setup_aeq()
1507 status = irdma_create_aeq(rf); in irdma_setup_aeq()
1511 status = irdma_cfg_aeq_vector(rf); in irdma_setup_aeq()
1513 irdma_destroy_aeq(rf); in irdma_setup_aeq()
1517 if (!rf->msix_shared) in irdma_setup_aeq()
1518 irdma_ena_intr(dev, rf->iw_msixtbl[0].idx); in irdma_setup_aeq()
1540 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); in irdma_initialize_ilq()
1570 info.sq_size = min(iwdev->rf->max_qp / 2, (u32)32768); in irdma_initialize_ieq()
1588 struct irdma_pci_f *rf = iwdev->rf; in irdma_reinitialize_ieq() local
1592 iwdev->rf->reset = true; in irdma_reinitialize_ieq()
1593 rf->gen_ops.request_reset(rf); in irdma_reinitialize_ieq()
1605 static int irdma_hmc_setup(struct irdma_pci_f *rf) in irdma_hmc_setup() argument
1610 qpcnt = rsrc_limits_table[rf->limits_sel].qplimit; in irdma_hmc_setup()
1612 rf->sd_type = IRDMA_SD_TYPE_DIRECT; in irdma_hmc_setup()
1613 status = irdma_cfg_fpm_val(&rf->sc_dev, qpcnt); in irdma_hmc_setup()
1617 status = irdma_create_hmc_objs(rf, true, rf->rdma_ver); in irdma_hmc_setup()
1626 static void irdma_del_init_mem(struct irdma_pci_f *rf) in irdma_del_init_mem() argument
1628 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_del_init_mem()
1630 if (!rf->sc_dev.privileged) in irdma_del_init_mem()
1631 irdma_vchnl_req_put_hmc_fcn(&rf->sc_dev); in irdma_del_init_mem()
1634 vfree(rf->mem_rsrc); in irdma_del_init_mem()
1635 rf->mem_rsrc = NULL; in irdma_del_init_mem()
1636 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, in irdma_del_init_mem()
1637 rf->obj_mem.pa); in irdma_del_init_mem()
1638 rf->obj_mem.va = NULL; in irdma_del_init_mem()
1639 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_del_init_mem()
1640 bitmap_free(rf->allocated_ws_nodes); in irdma_del_init_mem()
1641 rf->allocated_ws_nodes = NULL; in irdma_del_init_mem()
1643 kfree(rf->ceqlist); in irdma_del_init_mem()
1644 rf->ceqlist = NULL; in irdma_del_init_mem()
1645 kfree(rf->iw_msixtbl); in irdma_del_init_mem()
1646 rf->iw_msixtbl = NULL; in irdma_del_init_mem()
1647 kfree(rf->hmc_info_mem); in irdma_del_init_mem()
1648 rf->hmc_info_mem = NULL; in irdma_del_init_mem()
1659 static int irdma_initialize_dev(struct irdma_pci_f *rf) in irdma_initialize_dev() argument
1662 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_initialize_dev()
1671 rf->hmc_info_mem = kzalloc(size, GFP_KERNEL); in irdma_initialize_dev()
1672 if (!rf->hmc_info_mem) in irdma_initialize_dev()
1675 rf->pble_rsrc = (struct irdma_hmc_pble_rsrc *)rf->hmc_info_mem; in irdma_initialize_dev()
1676 dev->hmc_info = &rf->hw.hmc; in irdma_initialize_dev()
1678 (rf->pble_rsrc + 1); in irdma_initialize_dev()
1680 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_QUERY_FPM_BUF_SIZE, in irdma_initialize_dev()
1688 status = irdma_obj_aligned_mem(rf, &mem, IRDMA_COMMIT_FPM_BUF_SIZE, in irdma_initialize_dev()
1696 info.bar0 = rf->hw.hw_addr; in irdma_initialize_dev()
1697 info.hmc_fn_id = rf->pf_id; in irdma_initialize_dev()
1698 info.protocol_used = rf->protocol_used; in irdma_initialize_dev()
1699 info.hw = &rf->hw; in irdma_initialize_dev()
1700 status = irdma_sc_dev_init(rf->rdma_ver, &rf->sc_dev, &info); in irdma_initialize_dev()
1706 kfree(rf->hmc_info_mem); in irdma_initialize_dev()
1707 rf->hmc_info_mem = NULL; in irdma_initialize_dev()
1725 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) in irdma_rt_deinit_hw()
1726 irdma_del_local_mac_entry(iwdev->rf, in irdma_rt_deinit_hw()
1732 iwdev->rf->reset); in irdma_rt_deinit_hw()
1738 iwdev->rf->reset); in irdma_rt_deinit_hw()
1754 static int irdma_setup_init_state(struct irdma_pci_f *rf) in irdma_setup_init_state() argument
1758 status = irdma_save_msix_info(rf); in irdma_setup_init_state()
1762 rf->hw.device = &rf->pcidev->dev; in irdma_setup_init_state()
1763 rf->obj_mem.size = ALIGN(8192, IRDMA_HW_PAGE_SIZE); in irdma_setup_init_state()
1764 rf->obj_mem.va = dma_alloc_coherent(rf->hw.device, rf->obj_mem.size, in irdma_setup_init_state()
1765 &rf->obj_mem.pa, GFP_KERNEL); in irdma_setup_init_state()
1766 if (!rf->obj_mem.va) { in irdma_setup_init_state()
1771 rf->obj_next = rf->obj_mem; in irdma_setup_init_state()
1772 status = irdma_initialize_dev(rf); in irdma_setup_init_state()
1779 dma_free_coherent(rf->hw.device, rf->obj_mem.size, rf->obj_mem.va, in irdma_setup_init_state()
1780 rf->obj_mem.pa); in irdma_setup_init_state()
1781 rf->obj_mem.va = NULL; in irdma_setup_init_state()
1783 kfree(rf->iw_msixtbl); in irdma_setup_init_state()
1784 rf->iw_msixtbl = NULL; in irdma_setup_init_state()
1796 iwdev->rf->used_pds = find_first_zero_bit(iwdev->rf->allocated_pds, in irdma_get_used_rsrc()
1797 iwdev->rf->max_pd); in irdma_get_used_rsrc()
1798 iwdev->rf->used_qps = find_first_zero_bit(iwdev->rf->allocated_qps, in irdma_get_used_rsrc()
1799 iwdev->rf->max_qp); in irdma_get_used_rsrc()
1800 iwdev->rf->used_cqs = find_first_zero_bit(iwdev->rf->allocated_cqs, in irdma_get_used_rsrc()
1801 iwdev->rf->max_cq); in irdma_get_used_rsrc()
1802 iwdev->rf->used_srqs = find_first_zero_bit(iwdev->rf->allocated_srqs, in irdma_get_used_rsrc()
1803 iwdev->rf->max_srq); in irdma_get_used_rsrc()
1804 iwdev->rf->used_mrs = find_first_zero_bit(iwdev->rf->allocated_mrs, in irdma_get_used_rsrc()
1805 iwdev->rf->max_mr); in irdma_get_used_rsrc()
1808 void irdma_ctrl_deinit_hw(struct irdma_pci_f *rf) in irdma_ctrl_deinit_hw() argument
1810 enum init_completion_state state = rf->init_state; in irdma_ctrl_deinit_hw()
1812 rf->init_state = INVALID_STATE; in irdma_ctrl_deinit_hw()
1816 irdma_destroy_aeq(rf); in irdma_ctrl_deinit_hw()
1819 irdma_destroy_pble_prm(rf->pble_rsrc); in irdma_ctrl_deinit_hw()
1822 irdma_del_ceqs(rf); in irdma_ctrl_deinit_hw()
1825 irdma_del_ceq_0(rf); in irdma_ctrl_deinit_hw()
1828 irdma_destroy_ccq(rf); in irdma_ctrl_deinit_hw()
1832 irdma_del_hmc_objects(&rf->sc_dev, rf->sc_dev.hmc_info, true, in irdma_ctrl_deinit_hw()
1833 rf->reset, rf->rdma_ver); in irdma_ctrl_deinit_hw()
1836 irdma_destroy_cqp(rf); in irdma_ctrl_deinit_hw()
1839 irdma_del_init_mem(rf); in irdma_ctrl_deinit_hw()
1843 ibdev_warn(&rf->iwdev->ibdev, "bad init_state = %d\n", rf->init_state); in irdma_ctrl_deinit_hw()
1859 struct irdma_pci_f *rf = iwdev->rf; in irdma_rt_init_hw() local
1860 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_rt_init_hw()
1869 vsi_info.register_qset = rf->gen_ops.register_qset; in irdma_rt_init_hw()
1870 vsi_info.unregister_qset = rf->gen_ops.unregister_qset; in irdma_rt_init_hw()
1874 status = irdma_setup_cm_core(iwdev, rf->rdma_ver); in irdma_rt_init_hw()
1902 if (iwdev->rf->sc_dev.hw_attrs.uk_attrs.hw_rev == IRDMA_GEN_1) in irdma_rt_init_hw()
1920 dev_err(&rf->pcidev->dev, "HW runtime init FAIL status = %d last cmpl = %d\n", in irdma_rt_init_hw()
1933 int irdma_ctrl_init_hw(struct irdma_pci_f *rf) in irdma_ctrl_init_hw() argument
1935 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_ctrl_init_hw()
1938 status = irdma_setup_init_state(rf); in irdma_ctrl_init_hw()
1941 rf->init_state = INITIAL_STATE; in irdma_ctrl_init_hw()
1943 status = irdma_create_cqp(rf); in irdma_ctrl_init_hw()
1946 rf->init_state = CQP_CREATED; in irdma_ctrl_init_hw()
1949 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_ctrl_init_hw()
1955 status = irdma_hmc_setup(rf); in irdma_ctrl_init_hw()
1958 rf->init_state = HMC_OBJS_CREATED; in irdma_ctrl_init_hw()
1960 status = irdma_initialize_hw_rsrc(rf); in irdma_ctrl_init_hw()
1963 rf->init_state = HW_RSRC_INITIALIZED; in irdma_ctrl_init_hw()
1965 status = irdma_create_ccq(rf); in irdma_ctrl_init_hw()
1968 rf->init_state = CCQ_CREATED; in irdma_ctrl_init_hw()
1970 status = irdma_setup_ceq_0(rf); in irdma_ctrl_init_hw()
1973 rf->init_state = CEQ0_CREATED; in irdma_ctrl_init_hw()
1975 rf->cqp_cmpl_wq = in irdma_ctrl_init_hw()
1977 if (!rf->cqp_cmpl_wq) { in irdma_ctrl_init_hw()
1981 INIT_WORK(&rf->cqp_cmpl_work, cqp_compl_worker); in irdma_ctrl_init_hw()
1984 status = irdma_setup_ceqs(rf, rf->iwdev ? rf->iwdev->vsi_num : 0); in irdma_ctrl_init_hw()
1988 rf->init_state = CEQS_CREATED; in irdma_ctrl_init_hw()
1990 status = irdma_hmc_init_pble(&rf->sc_dev, in irdma_ctrl_init_hw()
1991 rf->pble_rsrc); in irdma_ctrl_init_hw()
1995 rf->init_state = PBLE_CHUNK_MEM; in irdma_ctrl_init_hw()
1997 status = irdma_setup_aeq(rf); in irdma_ctrl_init_hw()
2000 rf->init_state = AEQ_CREATED; in irdma_ctrl_init_hw()
2005 dev_err(&rf->pcidev->dev, "IRDMA hardware initialization FAILED init_state=%d status=%d\n", in irdma_ctrl_init_hw()
2006 rf->init_state, status); in irdma_ctrl_init_hw()
2007 irdma_ctrl_deinit_hw(rf); in irdma_ctrl_init_hw()
2015 static void irdma_set_hw_rsrc(struct irdma_pci_f *rf) in irdma_set_hw_rsrc() argument
2017 rf->allocated_qps = (void *)(rf->mem_rsrc + in irdma_set_hw_rsrc()
2018 (sizeof(struct irdma_arp_entry) * rf->arp_table_size)); in irdma_set_hw_rsrc()
2019 rf->allocated_cqs = &rf->allocated_qps[BITS_TO_LONGS(rf->max_qp)]; in irdma_set_hw_rsrc()
2020 rf->allocated_srqs = &rf->allocated_cqs[BITS_TO_LONGS(rf->max_cq)]; in irdma_set_hw_rsrc()
2021 rf->allocated_mrs = &rf->allocated_srqs[BITS_TO_LONGS(rf->max_srq)]; in irdma_set_hw_rsrc()
2022 rf->allocated_pds = &rf->allocated_mrs[BITS_TO_LONGS(rf->max_mr)]; in irdma_set_hw_rsrc()
2023 rf->allocated_ahs = &rf->allocated_pds[BITS_TO_LONGS(rf->max_pd)]; in irdma_set_hw_rsrc()
2024 rf->allocated_mcgs = &rf->allocated_ahs[BITS_TO_LONGS(rf->max_ah)]; in irdma_set_hw_rsrc()
2025 rf->allocated_arps = &rf->allocated_mcgs[BITS_TO_LONGS(rf->max_mcg)]; in irdma_set_hw_rsrc()
2026 rf->qp_table = (struct irdma_qp **) in irdma_set_hw_rsrc()
2027 (&rf->allocated_arps[BITS_TO_LONGS(rf->arp_table_size)]); in irdma_set_hw_rsrc()
2028 rf->cq_table = (struct irdma_cq **)(&rf->qp_table[rf->max_qp]); in irdma_set_hw_rsrc()
2030 spin_lock_init(&rf->rsrc_lock); in irdma_set_hw_rsrc()
2031 spin_lock_init(&rf->arp_lock); in irdma_set_hw_rsrc()
2032 spin_lock_init(&rf->qptable_lock); in irdma_set_hw_rsrc()
2033 spin_lock_init(&rf->cqtable_lock); in irdma_set_hw_rsrc()
2034 spin_lock_init(&rf->qh_list_lock); in irdma_set_hw_rsrc()
2041 static u32 irdma_calc_mem_rsrc_size(struct irdma_pci_f *rf) in irdma_calc_mem_rsrc_size() argument
2045 rsrc_size = sizeof(struct irdma_arp_entry) * rf->arp_table_size; in irdma_calc_mem_rsrc_size()
2046 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_qp); in irdma_calc_mem_rsrc_size()
2047 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mr); in irdma_calc_mem_rsrc_size()
2048 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_cq); in irdma_calc_mem_rsrc_size()
2049 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_srq); in irdma_calc_mem_rsrc_size()
2050 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_pd); in irdma_calc_mem_rsrc_size()
2051 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->arp_table_size); in irdma_calc_mem_rsrc_size()
2052 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_ah); in irdma_calc_mem_rsrc_size()
2053 rsrc_size += sizeof(unsigned long) * BITS_TO_LONGS(rf->max_mcg); in irdma_calc_mem_rsrc_size()
2054 rsrc_size += sizeof(struct irdma_qp **) * rf->max_qp; in irdma_calc_mem_rsrc_size()
2055 rsrc_size += sizeof(struct irdma_cq **) * rf->max_cq; in irdma_calc_mem_rsrc_size()
2056 rsrc_size += sizeof(struct irdma_srq **) * rf->max_srq; in irdma_calc_mem_rsrc_size()
2065 u32 irdma_initialize_hw_rsrc(struct irdma_pci_f *rf) in irdma_initialize_hw_rsrc() argument
2071 if (rf->rdma_ver != IRDMA_GEN_1) { in irdma_initialize_hw_rsrc()
2072 rf->allocated_ws_nodes = bitmap_zalloc(IRDMA_MAX_WS_NODES, in irdma_initialize_hw_rsrc()
2074 if (!rf->allocated_ws_nodes) in irdma_initialize_hw_rsrc()
2077 set_bit(0, rf->allocated_ws_nodes); in irdma_initialize_hw_rsrc()
2078 rf->max_ws_node_id = IRDMA_MAX_WS_NODES; in irdma_initialize_hw_rsrc()
2080 rf->max_cqe = rf->sc_dev.hw_attrs.uk_attrs.max_hw_cq_size; in irdma_initialize_hw_rsrc()
2081 rf->max_qp = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_QP].cnt; in irdma_initialize_hw_rsrc()
2082 rf->max_mr = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_MR].cnt; in irdma_initialize_hw_rsrc()
2083 rf->max_cq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_CQ].cnt; in irdma_initialize_hw_rsrc()
2084 rf->max_srq = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_SRQ].cnt; in irdma_initialize_hw_rsrc()
2085 rf->max_pd = rf->sc_dev.hw_attrs.max_hw_pds; in irdma_initialize_hw_rsrc()
2086 rf->arp_table_size = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_ARP].cnt; in irdma_initialize_hw_rsrc()
2087 rf->max_ah = rf->sc_dev.hmc_info->hmc_obj[IRDMA_HMC_IW_FSIAV].cnt; in irdma_initialize_hw_rsrc()
2088 rf->max_mcg = rf->max_qp; in irdma_initialize_hw_rsrc()
2090 rsrc_size = irdma_calc_mem_rsrc_size(rf); in irdma_initialize_hw_rsrc()
2091 rf->mem_rsrc = vzalloc(rsrc_size); in irdma_initialize_hw_rsrc()
2092 if (!rf->mem_rsrc) { in irdma_initialize_hw_rsrc()
2097 rf->arp_table = (struct irdma_arp_entry *)rf->mem_rsrc; in irdma_initialize_hw_rsrc()
2099 irdma_set_hw_rsrc(rf); in irdma_initialize_hw_rsrc()
2101 set_bit(0, rf->allocated_mrs); in irdma_initialize_hw_rsrc()
2102 set_bit(0, rf->allocated_qps); in irdma_initialize_hw_rsrc()
2103 set_bit(0, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2104 set_bit(0, rf->allocated_srqs); in irdma_initialize_hw_rsrc()
2105 set_bit(0, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2106 set_bit(0, rf->allocated_arps); in irdma_initialize_hw_rsrc()
2107 set_bit(0, rf->allocated_ahs); in irdma_initialize_hw_rsrc()
2108 set_bit(0, rf->allocated_mcgs); in irdma_initialize_hw_rsrc()
2109 set_bit(2, rf->allocated_qps); /* qp 2 IEQ */ in irdma_initialize_hw_rsrc()
2110 set_bit(1, rf->allocated_qps); /* qp 1 ILQ */ in irdma_initialize_hw_rsrc()
2111 set_bit(1, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2112 set_bit(1, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2113 set_bit(2, rf->allocated_cqs); in irdma_initialize_hw_rsrc()
2114 set_bit(2, rf->allocated_pds); in irdma_initialize_hw_rsrc()
2116 INIT_LIST_HEAD(&rf->mc_qht_list.list); in irdma_initialize_hw_rsrc()
2118 mrdrvbits = 24 - max(get_count_order(rf->max_mr), 14); in irdma_initialize_hw_rsrc()
2119 rf->mr_stagmask = ~(((1 << mrdrvbits) - 1) << (32 - mrdrvbits)); in irdma_initialize_hw_rsrc()
2124 bitmap_free(rf->allocated_ws_nodes); in irdma_initialize_hw_rsrc()
2125 rf->allocated_ws_nodes = NULL; in irdma_initialize_hw_rsrc()
2135 void irdma_cqp_ce_handler(struct irdma_pci_f *rf, struct irdma_sc_cq *cq) in irdma_cqp_ce_handler() argument
2138 struct irdma_sc_dev *dev = &rf->sc_dev; in irdma_cqp_ce_handler()
2146 spin_lock_irqsave(&rf->cqp.compl_lock, flags); in irdma_cqp_ce_handler()
2148 spin_unlock_irqrestore(&rf->cqp.compl_lock, flags); in irdma_cqp_ce_handler()
2157 ibdev_err(&rf->iwdev->ibdev, "cqp opcode = 0x%x maj_err_code = 0x%x min_err_code = 0x%x\n", in irdma_cqp_ce_handler()
2173 irdma_complete_cqp_request(&rf->cqp, in irdma_cqp_ce_handler()
2192 struct irdma_pci_f *rf = container_of(work, struct irdma_pci_f, in cqp_compl_worker() local
2194 struct irdma_sc_cq *cq = &rf->ccq.sc_cq; in cqp_compl_worker()
2196 irdma_cqp_ce_handler(rf, cq); in cqp_compl_worker()
2257 void irdma_del_local_mac_entry(struct irdma_pci_f *rf, u16 idx) in irdma_del_local_mac_entry() argument
2259 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_del_local_mac_entry()
2275 irdma_handle_cqp_op(rf, cqp_request); in irdma_del_local_mac_entry()
2286 int irdma_add_local_mac_entry(struct irdma_pci_f *rf, const u8 *mac_addr, u16 idx) in irdma_add_local_mac_entry() argument
2289 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_add_local_mac_entry()
2308 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_add_local_mac_entry()
2323 int irdma_alloc_local_mac_entry(struct irdma_pci_f *rf, u16 *mac_tbl_idx) in irdma_alloc_local_mac_entry() argument
2325 struct irdma_cqp *iwcqp = &rf->cqp; in irdma_alloc_local_mac_entry()
2339 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_alloc_local_mac_entry()
2362 cqp_request = irdma_alloc_and_get_cqp_request(&iwdev->rf->cqp, add_port); in irdma_cqp_manage_apbvt_cmd()
2373 cqp_info->in.u.manage_apbvt_entry.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_cqp_manage_apbvt_cmd()
2378 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_cqp_manage_apbvt_cmd()
2379 irdma_put_cqp_request(&iwdev->rf->cqp, cqp_request); in irdma_cqp_manage_apbvt_cmd()
2456 void irdma_manage_arp_cache(struct irdma_pci_f *rf, in irdma_manage_arp_cache() argument
2465 arp_index = irdma_arp_table(rf, ip_addr, ipv4, mac_addr, action); in irdma_manage_arp_cache()
2469 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, false); in irdma_manage_arp_cache()
2483 cqp_info->in.u.add_arp_cache_entry.cqp = &rf->cqp.sc_cqp; in irdma_manage_arp_cache()
2488 cqp_info->in.u.del_arp_cache_entry.cqp = &rf->cqp.sc_cqp; in irdma_manage_arp_cache()
2493 irdma_handle_cqp_op(rf, cqp_request); in irdma_manage_arp_cache()
2494 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_manage_arp_cache()
2524 struct irdma_cqp *iwcqp = &iwdev->rf->cqp; in irdma_manage_qhash()
2589 cqp_info->in.u.manage_qhash_table_entry.cqp = &iwdev->rf->cqp.sc_cqp; in irdma_manage_qhash()
2593 status = irdma_handle_cqp_op(iwdev->rf, cqp_request); in irdma_manage_qhash()
2646 int irdma_hw_flush_wqes(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, in irdma_hw_flush_wqes() argument
2655 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_hw_flush_wqes()
2668 status = irdma_handle_cqp_op(rf, cqp_request); in irdma_hw_flush_wqes()
2672 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_flush_wqes()
2703 new_req = irdma_alloc_and_get_cqp_request(&rf->cqp, true); in irdma_hw_flush_wqes()
2716 status = irdma_handle_cqp_op(rf, new_req); in irdma_hw_flush_wqes()
2725 irdma_put_cqp_request(&rf->cqp, new_req); in irdma_hw_flush_wqes()
2736 ibdev_dbg(&rf->iwdev->ibdev, in irdma_hw_flush_wqes()
2738 iwqp->ibqp.qp_num, rf->protocol_used, iwqp->iwarp_state, in irdma_hw_flush_wqes()
2743 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_hw_flush_wqes()
2755 void irdma_gen_ae(struct irdma_pci_f *rf, struct irdma_sc_qp *qp, in irdma_gen_ae() argument
2762 cqp_request = irdma_alloc_and_get_cqp_request(&rf->cqp, wait); in irdma_gen_ae()
2774 irdma_handle_cqp_op(rf, cqp_request); in irdma_gen_ae()
2775 irdma_put_cqp_request(&rf->cqp, cqp_request); in irdma_gen_ae()
2781 struct irdma_pci_f *rf = iwqp->iwdev->rf; in irdma_flush_wqes() local
2786 ((flush_mask & IRDMA_REFLUSH) && rf->rdma_ver >= IRDMA_GEN_3)) in irdma_flush_wqes()
2823 (void)irdma_hw_flush_wqes(rf, &iwqp->sc_qp, &info, in irdma_flush_wqes()