/linux/drivers/net/ethernet/marvell/octeon_ep_vf/ |
H A D | octep_vf_rx.c | 15 static void octep_vf_oq_reset_indices(struct octep_vf_oq *oq) in octep_vf_oq_reset_indices() argument 17 oq->host_read_idx = 0; in octep_vf_oq_reset_indices() 18 oq->host_refill_idx = 0; in octep_vf_oq_reset_indices() 19 oq->refill_count = 0; in octep_vf_oq_reset_indices() 20 oq->last_pkt_count = 0; in octep_vf_oq_reset_indices() 21 oq->pkts_pending = 0; in octep_vf_oq_reset_indices() 27 * @oq: Octeon Rx queue data structure. 32 static int octep_vf_oq_fill_ring_buffers(struct octep_vf_oq *oq) in octep_vf_oq_fill_ring_buffers() argument 34 struct octep_vf_oq_desc_hw *desc_ring = oq->desc_ring; in octep_vf_oq_fill_ring_buffers() 38 for (i = 0; i < oq in octep_vf_oq_fill_ring_buffers() 79 octep_vf_oq_refill(struct octep_vf_device * oct,struct octep_vf_oq * oq) octep_vf_oq_refill() argument 125 struct octep_vf_oq *oq; octep_vf_setup_oq() local 200 octep_vf_oq_free_ring_buffers(struct octep_vf_oq * oq) octep_vf_oq_free_ring_buffers() argument 227 octep_vf_free_oq(struct octep_vf_oq * oq) octep_vf_free_oq() argument 319 octep_vf_oq_check_hw_for_pkts(struct octep_vf_device * oct,struct octep_vf_oq * oq) octep_vf_oq_check_hw_for_pkts() argument 355 __octep_vf_oq_process_rx(struct octep_vf_device * oct,struct octep_vf_oq * oq,u16 pkts_to_process) __octep_vf_oq_process_rx() argument 479 octep_vf_oq_process_rx(struct octep_vf_oq * oq,int budget) octep_vf_oq_process_rx() argument [all...] |
H A D | octep_vf_cn9k.c | 151 conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; in octep_vf_init_config_cn93_vf() 152 conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; in octep_vf_init_config_cn93_vf() 153 conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; in octep_vf_init_config_cn93_vf() 154 conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; in octep_vf_init_config_cn93_vf() 155 conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; in octep_vf_init_config_cn93_vf() 201 struct octep_vf_oq *oq = oct->oq[oq_no]; in octep_vf_setup_oq_regs_cn93() local 227 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); in octep_vf_setup_oq_regs_cn93() 228 octep_vf_write_csr64(oct, CN93_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); in octep_vf_setup_oq_regs_cn93() 232 oq_ctl |= (oq in octep_vf_setup_oq_regs_cn93() 273 struct octep_vf_oq *oq; octep_vf_ioq_intr_handler_cn93() local [all...] |
H A D | octep_vf_cnxk.c | 153 conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; in octep_vf_init_config_cnxk_vf() 154 conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; in octep_vf_init_config_cnxk_vf() 155 conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; in octep_vf_init_config_cnxk_vf() 156 conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; in octep_vf_init_config_cnxk_vf() 157 conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; in octep_vf_init_config_cnxk_vf() 158 conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN; in octep_vf_init_config_cnxk_vf() 204 struct octep_vf_oq *oq = oct->oq[oq_no]; in octep_vf_setup_oq_regs_cnxk() local 230 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); in octep_vf_setup_oq_regs_cnxk() 231 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq in octep_vf_setup_oq_regs_cnxk() 284 struct octep_vf_oq *oq; octep_vf_ioq_intr_handler_cnxk() local [all...] |
/linux/drivers/net/ethernet/cavium/liquidio/ |
H A D | cn23xx_vf_regs.h | 164 #define CN23XX_VF_SLI_OQ_PKT_CONTROL(oq) \ argument 165 (CN23XX_VF_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_VF_OQ_OFFSET)) 167 #define CN23XX_VF_SLI_OQ_BASE_ADDR64(oq) \ argument 168 (CN23XX_VF_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_VF_OQ_OFFSET)) 170 #define CN23XX_VF_SLI_OQ_SIZE(oq) \ argument 171 (CN23XX_VF_SLI_OQ_SIZE_START + ((oq) * CN23XX_VF_OQ_OFFSET)) 173 #define CN23XX_VF_SLI_OQ_BUFF_INFO_SIZE(oq) \ argument 174 (CN23XX_VF_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_VF_OQ_OFFSET)) 176 #define CN23XX_VF_SLI_OQ_PKTS_SENT(oq) \ argument 177 (CN23XX_VF_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_VF_OQ_OFFSE 179 CN23XX_VF_SLI_OQ_PKTS_CREDIT(oq) global() argument 182 CN23XX_VF_SLI_OQ_PKT_INT_LEVELS(oq) global() argument 186 CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_CNT(oq) global() argument 189 CN23XX_VF_SLI_OQ_PKT_INT_LEVELS_TIME(oq) global() argument [all...] |
H A D | cn23xx_pf_regs.h | 282 #define CN23XX_SLI_OQ_PKT_CONTROL(oq) \ argument 283 (CN23XX_SLI_OQ_PKT_CONTROL_START + ((oq) * CN23XX_OQ_OFFSET)) 285 #define CN23XX_SLI_OQ_BASE_ADDR64(oq) \ argument 286 (CN23XX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN23XX_OQ_OFFSET)) 288 #define CN23XX_SLI_OQ_SIZE(oq) \ argument 289 (CN23XX_SLI_OQ_SIZE_START + ((oq) * CN23XX_OQ_OFFSET)) 291 #define CN23XX_SLI_OQ_BUFF_INFO_SIZE(oq) \ argument 292 (CN23XX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN23XX_OQ_OFFSET)) 294 #define CN23XX_SLI_OQ_PKTS_SENT(oq) \ argument 295 (CN23XX_SLI_OQ_PKT_SENT_START + ((oq) * CN23XX_OQ_OFFSE 297 CN23XX_SLI_OQ_PKTS_CREDIT(oq) global() argument 300 CN23XX_SLI_OQ_PKT_INT_LEVELS(oq) global() argument 305 CN23XX_SLI_OQ_PKT_INT_LEVELS_CNT(oq) global() argument 309 CN23XX_SLI_OQ_PKT_INT_LEVELS_TIME(oq) global() argument [all...] |
H A D | cn66xx_regs.h | 277 #define CN6XXX_SLI_OQ_BASE_ADDR64(oq) \ argument 278 (CN6XXX_SLI_OQ_BASE_ADDR_START64 + ((oq) * CN6XXX_OQ_OFFSET)) 280 #define CN6XXX_SLI_OQ_SIZE(oq) \ argument 281 (CN6XXX_SLI_OQ_SIZE_START + ((oq) * CN6XXX_OQ_OFFSET)) 283 #define CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq) \ argument 284 (CN6XXX_SLI_OQ0_BUFF_INFO_SIZE + ((oq) * CN6XXX_OQ_OFFSET)) 286 #define CN6XXX_SLI_OQ_PKTS_SENT(oq) \ argument 287 (CN6XXX_SLI_OQ_PKT_SENT_START + ((oq) * CN6XXX_OQ_OFFSET)) 289 #define CN6XXX_SLI_OQ_PKTS_CREDIT(oq) \ argument 290 (CN6XXX_SLI_OQ_PKT_CREDITS_START + ((oq) * CN6XXX_OQ_OFFSE [all...] |
H A D | octeon_config.h | 131 #define CFG_GET_OQ_MAX_Q(cfg) ((cfg)->oq.max_oqs) 132 #define CFG_GET_OQ_PKTS_PER_INTR(cfg) ((cfg)->oq.pkts_per_intr) 133 #define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) 134 #define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) 135 #define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) 136 #define CFG_SET_OQ_INTR_PKT(cfg, val) (cfg)->oq.oq_intr_pkt = val 137 #define CFG_SET_OQ_INTR_TIME(cfg, val) (cfg)->oq.oq_intr_time = val 413 struct octeon_oq_config oq; member
|
H A D | cn66xx_device.c | 136 /* core clock per us / oq ticks will be fractional. TO avoid that in lio_cn6xxx_get_oq_ticks() 143 /* This gives the oq ticks (1024 core clock cycles) per millisecond */ in lio_cn6xxx_get_oq_ticks() 146 /* time_intr is in microseconds. The next 2 steps gives the oq ticks in lio_cn6xxx_get_oq_ticks() 343 mask |= oct->io_qmask.oq; in lio_cn6xxx_enable_io_queues() 378 mask ^= oct->io_qmask.oq; in lio_cn6xxx_disable_io_queues() 383 mask = (u32)oct->io_qmask.oq; in lio_cn6xxx_disable_io_queues() 393 if (!(oct->io_qmask.oq & BIT_ULL(i))) in lio_cn6xxx_disable_io_queues() 521 droq_mask &= oct->io_qmask.oq; in lio_cn6xxx_process_droq_intr_regs() 557 droq_time_mask &= oct->io_qmask.oq; in lio_cn6xxx_process_droq_intr_regs() 558 droq_cnt_mask &= oct->io_qmask.oq; in lio_cn6xxx_process_droq_intr_regs() [all...] |
H A D | octeon_device.c | 52 .oq = { 161 .oq = { 327 .oq = { 430 .oq = { 651 if (oct->io_qmask.oq & BIT_ULL(i)) in octeon_free_device_mem() 1296 (oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_get_rx_qsize()
|
H A D | octeon_device.h | 189 u64 oq; member
|
H A D | cn23xx_vf_device.c | 38 /* This gives the oq ticks (1024 core clock cycles) per millisecond */ in cn23xx_vf_get_oq_ticks() 41 /* time_intr is in microseconds. The next 2 steps gives the oq ticks in cn23xx_vf_get_oq_ticks() 345 if (oct->io_qmask.oq & BIT_ULL(q_no)) { in cn23xx_enable_vf_io_queues()
|
H A D | lio_ethtool.c | 1037 "Sending iq/oq config failed status: %x\n", in lio_23xx_reconfigure_queue_count() 1050 "iq/oq config failed: %x\n", retval); in lio_23xx_reconfigure_queue_count() 1154 if (!(oct->io_qmask.oq & BIT_ULL(i))) in lio_reset_queues() 1683 if (!(oct_dev->io_qmask.oq & BIT_ULL(j))) in lio_get_ethtool_stats() 1882 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) in lio_get_strings() 1930 if (!(oct_dev->io_qmask.oq & BIT_ULL(i))) in lio_vf_get_strings()
|
H A D | lio_vf_main.c | 84 if (!(oct->io_qmask.oq & BIT_ULL(i))) in lio_wait_for_oq_pkts() 545 if (!(oct->io_qmask.oq & BIT_ULL(i))) in octeon_destroy_resources() 1995 "iq/oq config failed status: %x\n", retval); in setup_nic_devices() 2011 "iq/oq config failed, retval = %d\n", retval); in setup_nic_devices() 2249 /* only default iq and oq were initialized in liquidio_init_nic_module()
|
H A D | cn23xx_pf_device.c | 120 /* This gives the oq ticks (1024 core clock cycles) per millisecond */ in cn23xx_pf_get_oq_ticks() 123 /* time_intr is in microseconds. The next 2 steps gives the oq ticks in cn23xx_pf_get_oq_ticks() 685 if (oct->io_qmask.oq & BIT_ULL(q_no - srn)) { in cn23xx_enable_io_queues()
|
H A D | lio_main.c | 162 if (!(oct->io_qmask.oq & BIT_ULL(q_no))) in octeon_droq_bh() 196 if (!(oct->io_qmask.oq & BIT_ULL(i))) in lio_wait_for_oq_pkts() 1063 if (!(oct->io_qmask.oq & BIT_ULL(i))) in octeon_destroy_resources() 3425 "iq/oq config failed status: %x\n", in setup_nic_devices() 3441 dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n"); in setup_nic_devices() 3901 /* only default iq and oq were initialized in liquidio_init_nic_module()
|
H A D | lio_core.c | 1282 if (!(oct->io_qmask.oq & BIT_ULL(idx))) in lio_wait_for_clean_oq()
|
/linux/drivers/net/ethernet/marvell/octeon_ep/ |
H A D | octep_config.h | 67 #define CFG_GET_OQ_NUM_DESC(cfg) ((cfg)->oq.num_descs) 68 #define CFG_GET_OQ_BUF_SIZE(cfg) ((cfg)->oq.buf_size) 69 #define CFG_GET_OQ_REFILL_THRESHOLD(cfg) ((cfg)->oq.refill_threshold) 70 #define CFG_GET_OQ_INTR_PKT(cfg) ((cfg)->oq.oq_intr_pkt) 71 #define CFG_GET_OQ_INTR_TIME(cfg) ((cfg)->oq.oq_intr_time) 72 #define CFG_GET_OQ_WMARK(cfg) ((cfg)->oq.wmark) 235 struct octep_oq_config oq; member
|
H A D | octep_cnxk_pf.c | 256 conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; in octep_init_config_cnxk_pf() 257 conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; in octep_init_config_cnxk_pf() 258 conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; in octep_init_config_cnxk_pf() 259 conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; in octep_init_config_cnxk_pf() 260 conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; in octep_init_config_cnxk_pf() 261 conf->oq.wmark = OCTEP_OQ_WMARK_MIN; in octep_init_config_cnxk_pf() 335 struct octep_oq *oq = oct->oq[oq_no]; in octep_setup_oq_regs_cnxk_pf() local 360 oq->desc_ring_dma); in octep_setup_oq_regs_cnxk_pf() 362 oq in octep_setup_oq_regs_cnxk_pf() 641 struct octep_oq *oq = vector->oq; octep_ioq_intr_handler_cnxk_pf() local [all...] |
H A D | octep_cn9k_pf.c | 237 conf->oq.num_descs = OCTEP_OQ_MAX_DESCRIPTORS; in octep_init_config_cn93_pf() 238 conf->oq.buf_size = OCTEP_OQ_BUF_SIZE; in octep_init_config_cn93_pf() 239 conf->oq.refill_threshold = OCTEP_OQ_REFILL_THRESHOLD; in octep_init_config_cn93_pf() 240 conf->oq.oq_intr_pkt = OCTEP_OQ_INTR_PKT_THRESHOLD; in octep_init_config_cn93_pf() 241 conf->oq.oq_intr_time = OCTEP_OQ_INTR_TIME_THRESHOLD; in octep_init_config_cn93_pf() 315 struct octep_oq *oq = oct->oq[oq_no]; in octep_setup_oq_regs_cn93_pf() local 340 oq->desc_ring_dma); in octep_setup_oq_regs_cn93_pf() 342 oq->max_count); in octep_setup_oq_regs_cn93_pf() 346 oq_ctl |= (oq in octep_setup_oq_regs_cn93_pf() 620 struct octep_oq *oq = vector->oq; octep_ioq_intr_handler_cn93_pf() local [all...] |
H A D | octep_main.c | 66 ioq_vector->oq = oct->oq[i]; in octep_alloc_ioq_vectors() 561 * @oq: Octeon Rx queue data structure. 563 static void octep_enable_ioq_irq(struct octep_iq *iq, struct octep_oq *oq) in octep_enable_ioq_irq() argument 565 u32 pkts_pend = oq->pkts_pending; in octep_enable_ioq_irq() 573 if (oq->last_pkt_count - pkts_pend) { in octep_enable_ioq_irq() 574 writel(oq->last_pkt_count - pkts_pend, oq->pkts_sent_reg); in octep_enable_ioq_irq() 575 oq->last_pkt_count = pkts_pend; in octep_enable_ioq_irq() 580 writeq(1UL << OCTEP_OQ_INTR_RESEND_BIT, oq in octep_enable_ioq_irq() [all...] |
H A D | octep_main.h | 151 struct octep_oq *oq; member 267 struct octep_oq *oq[OCTEP_MAX_OQ]; member 269 /* Per oq stats */ 411 int octep_oq_process_rx(struct octep_oq *oq, int budget);
|
/linux/fs/xfs/ |
H A D | xfs_trans_dquot.c | 79 struct xfs_dqtrx *oq, *nq; in xfs_trans_dup_dqinfo() local 97 oq = &oqa[i]; in xfs_trans_dup_dqinfo() 100 if (oq->qt_blk_res && oq->qt_bcount_delta > 0) in xfs_trans_dup_dqinfo() 101 blk_res_used = oq->qt_bcount_delta; in xfs_trans_dup_dqinfo() 103 nq->qt_dquot = oq->qt_dquot; in xfs_trans_dup_dqinfo() 110 nq->qt_blk_res = oq->qt_blk_res - blk_res_used; in xfs_trans_dup_dqinfo() 111 oq->qt_blk_res = blk_res_used; in xfs_trans_dup_dqinfo() 113 nq->qt_rtblk_res = oq->qt_rtblk_res - in xfs_trans_dup_dqinfo() 114 oq in xfs_trans_dup_dqinfo() [all...] |
/linux/drivers/net/ethernet/chelsio/cxgb4/ |
H A D | cxgb4_main.c | 589 struct sge_uld_txq *oq; in fwevtq_handler() local 591 oq = container_of(txq, struct sge_uld_txq, q); in fwevtq_handler() 592 tasklet_schedule(&oq->qresume_tsk); in fwevtq_handler()
|