Lines Matching +full:rpm +full:- +full:stats
1 /* SPDX-License-Identifier: GPL-2.0 */
123 /* NIX TX stats */
133 /* NIX RX stats */
166 /* Driver counted stats */
250 /* MSI-X */
257 /* Stats */
380 /* Stats which need to be accumulated in software because
407 struct cn10k_txsc_stats stats; member
427 struct cn10k_rxsc_stats stats; member
539 return pdev->device == PCI_DEVID_OCTEONTX2_RVU_AFVF; in is_otx2_lbkvf()
544 return (pdev->revision == 0x00) && in is_96xx_A0()
545 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); in is_96xx_A0()
550 return (pdev->revision == 0x01) && in is_96xx_B0()
551 (pdev->subsystem_device == PCI_SUBSYS_DEVID_96XX_RVU_PFVF); in is_96xx_B0()
567 u8 midr = pdev->revision & 0xF0; in is_dev_otx2()
576 return pdev->subsystem_device == PCI_SUBSYS_DEVID_CN10K_B_RVU_PFVF; in is_dev_cn10kb()
581 struct otx2_hw *hw = &pfvf->hw; in otx2_setup_dev_hw_settings()
583 pfvf->hw.cq_time_wait = CQ_TIMER_THRESH_DEFAULT; in otx2_setup_dev_hw_settings()
584 pfvf->hw.cq_ecount_wait = CQ_CQE_THRESH_DEFAULT; in otx2_setup_dev_hw_settings()
585 pfvf->hw.cq_qcount_wait = CQ_QCOUNT_DEFAULT; in otx2_setup_dev_hw_settings()
587 __set_bit(HW_TSO, &hw->cap_flag); in otx2_setup_dev_hw_settings()
589 if (is_96xx_A0(pfvf->pdev)) { in otx2_setup_dev_hw_settings()
590 __clear_bit(HW_TSO, &hw->cap_flag); in otx2_setup_dev_hw_settings()
593 pfvf->hw.cq_qcount_wait = 0x0; in otx2_setup_dev_hw_settings()
598 pfvf->hw.rq_skid = 600; in otx2_setup_dev_hw_settings()
599 pfvf->qset.rqe_cnt = Q_COUNT(Q_SIZE_1K); in otx2_setup_dev_hw_settings()
601 if (is_96xx_B0(pfvf->pdev)) in otx2_setup_dev_hw_settings()
602 __clear_bit(HW_TSO, &hw->cap_flag); in otx2_setup_dev_hw_settings()
604 if (!is_dev_otx2(pfvf->pdev)) { in otx2_setup_dev_hw_settings()
605 __set_bit(CN10K_MBOX, &hw->cap_flag); in otx2_setup_dev_hw_settings()
606 __set_bit(CN10K_LMTST, &hw->cap_flag); in otx2_setup_dev_hw_settings()
607 __set_bit(CN10K_RPM, &hw->cap_flag); in otx2_setup_dev_hw_settings()
608 __set_bit(CN10K_PTP_ONESTEP, &hw->cap_flag); in otx2_setup_dev_hw_settings()
609 __set_bit(QOS_CIR_PIR_SUPPORT, &hw->cap_flag); in otx2_setup_dev_hw_settings()
612 if (is_dev_cn10kb(pfvf->pdev)) in otx2_setup_dev_hw_settings()
613 __set_bit(CN10K_HW_MACSEC, &hw->cap_flag); in otx2_setup_dev_hw_settings()
623 blkaddr = nic->nix_blkaddr; in otx2_get_regaddr()
636 return nic->reg_base + offset; in otx2_get_regaddr()
659 mbox->bbuf_base = devm_kmalloc(&pdev->dev, MBOX_SIZE, GFP_KERNEL); in otx2_mbox_bbuf_init()
660 if (!mbox->bbuf_base) in otx2_mbox_bbuf_init()
661 return -ENOMEM; in otx2_mbox_bbuf_init()
667 otx2_mbox = &mbox->mbox; in otx2_mbox_bbuf_init()
668 mdev = &otx2_mbox->dev[0]; in otx2_mbox_bbuf_init()
669 mdev->mbase = mbox->bbuf_base; in otx2_mbox_bbuf_init()
671 otx2_mbox = &mbox->mbox_up; in otx2_mbox_bbuf_init()
672 mdev = &otx2_mbox->dev[0]; in otx2_mbox_bbuf_init()
673 mdev->mbase = mbox->bbuf_base; in otx2_mbox_bbuf_init()
680 void *hw_mbase = mbox->hwbase + (devid * MBOX_SIZE); in otx2_sync_mbox_bbuf()
681 struct otx2_mbox_dev *mdev = &mbox->dev[devid]; in otx2_sync_mbox_bbuf()
685 if (mdev->mbase == hw_mbase) in otx2_sync_mbox_bbuf()
688 hdr = hw_mbase + mbox->rx_start; in otx2_sync_mbox_bbuf()
689 msg_size = hdr->msg_size; in otx2_sync_mbox_bbuf()
691 if (msg_size > mbox->rx_size - msgs_offset) in otx2_sync_mbox_bbuf()
692 msg_size = mbox->rx_size - msgs_offset; in otx2_sync_mbox_bbuf()
695 memcpy(mdev->mbase + mbox->rx_start, in otx2_sync_mbox_bbuf()
696 hw_mbase + mbox->rx_start, msg_size + msgs_offset); in otx2_sync_mbox_bbuf()
699 /* With the absence of API for 128-bit IO memory access for arm64,
733 lmt_info = per_cpu_ptr(pfvf->hw.lmt_info, smp_processor_id()); in __cn10k_aura_freeptr()
736 val = (lmt_info->lmt_id & 0x7FF) | BIT_ULL(63); in __cn10k_aura_freeptr()
744 * tar_addr[6:4] is LMTST size-1 in units of 128b. in __cn10k_aura_freeptr()
750 tar_addr |= ((size - 1) & 0x7) << 4; in __cn10k_aura_freeptr()
753 memcpy((u64 *)lmt_info->lmt_addr, ptrs, sizeof(u64) * num_ptrs); in __cn10k_aura_freeptr()
791 return pfvf->hw.rqpool_cnt + idx; in otx2_get_pool_idx()
802 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) in otx2_sync_mbox_msg()
804 otx2_mbox_msg_send(&mbox->mbox, 0); in otx2_sync_mbox_msg()
805 err = otx2_mbox_wait_for_rsp(&mbox->mbox, 0); in otx2_sync_mbox_msg()
809 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); in otx2_sync_mbox_msg()
816 if (!otx2_mbox_nonempty(&mbox->mbox_up, devid)) in otx2_sync_mbox_up_msg()
818 otx2_mbox_msg_send(&mbox->mbox_up, devid); in otx2_sync_mbox_up_msg()
819 err = otx2_mbox_wait_for_rsp(&mbox->mbox_up, devid); in otx2_sync_mbox_up_msg()
823 return otx2_mbox_check_rsp_msgs(&mbox->mbox_up, devid); in otx2_sync_mbox_up_msg()
833 if (!otx2_mbox_nonempty(&mbox->mbox, 0)) in otx2_sync_mbox_msg_busy_poll()
835 otx2_mbox_msg_send(&mbox->mbox, 0); in otx2_sync_mbox_msg_busy_poll()
836 err = otx2_mbox_busy_poll_for_rsp(&mbox->mbox, 0); in otx2_sync_mbox_msg_busy_poll()
840 return otx2_mbox_check_rsp_msgs(&mbox->mbox, 0); in otx2_sync_mbox_msg_busy_poll()
850 &mbox->mbox, 0, sizeof(struct _req_type), \
854 req->hdr.sig = OTX2_MBOX_REQ_SIG; \
855 req->hdr.id = _id; \
856 trace_otx2_msg_alloc(mbox->mbox.pdev, _id, sizeof(*req)); \
898 iova = dma_map_page_attrs(pfvf->dev, page, in otx2_dma_map_page()
900 if (unlikely(dma_mapping_error(pfvf->dev, iova))) in otx2_dma_map_page()
909 dma_unmap_page_attrs(pfvf->dev, addr, size, in otx2_dma_unmap_page()
917 if (qidx < NIX_PF_PFC_PRIO_MAX && pfvf->pfc_alloc_status[qidx]) in otx2_get_smq_idx()
918 return pfvf->pfc_schq_list[NIX_TXSCH_LVL_SMQ][qidx]; in otx2_get_smq_idx()
921 if (qidx >= pfvf->hw.non_qos_queues) in otx2_get_smq_idx()
922 smq = pfvf->qos.qid_to_sqmap[qidx - pfvf->hw.non_qos_queues]; in otx2_get_smq_idx()
924 smq = pfvf->hw.txschq_list[NIX_TXSCH_LVL_SMQ][0]; in otx2_get_smq_idx()
931 return pfvf->hw.non_qos_queues + pfvf->hw.tc_tx_queues; in otx2_get_total_tx_queues()
948 if (!pfvf->flow_cfg) in otx2_tc_flower_rule_cnt()
951 return pfvf->flow_cfg->nr_flows; in otx2_tc_flower_rule_cnt()
954 /* MSI-X APIs */
1022 /* Device stats APIs */
1025 struct rtnl_link_stats64 *stats);
1075 /* CGX/RPM DMAC filters support */
1111 struct otx2_hw *hw = &pfvf->hw; in otx2_qos_init()
1113 hw->tc_tx_queues = qos_txqs; in otx2_qos_init()
1114 INIT_LIST_HEAD(&pfvf->qos.qos_tree); in otx2_qos_init()
1115 mutex_init(&pfvf->qos.qos_lock); in otx2_qos_init()
1120 mutex_destroy(&pfvf->qos.qos_lock); in otx2_shutdown_qos()