1 // SPDX-License-Identifier: GPL-2.0 2 /* Marvell Octeon EP (EndPoint) VF Ethernet Driver 3 * 4 * Copyright (C) 2020 Marvell. 5 * 6 */ 7 8 #include <linux/pci.h> 9 #include <linux/netdevice.h> 10 #include <linux/etherdevice.h> 11 12 #include "octep_vf_config.h" 13 #include "octep_vf_main.h" 14 #include "octep_vf_regs_cnxk.h" 15 16 /* Dump useful hardware IQ/OQ CSRs for debug purpose */ 17 static void cnxk_vf_dump_q_regs(struct octep_vf_device *oct, int qno) 18 { 19 struct device *dev = &oct->pdev->dev; 20 21 dev_info(dev, "IQ-%d register dump\n", qno); 22 dev_info(dev, "R[%d]_IN_INSTR_DBELL[0x%llx]: 0x%016llx\n", 23 qno, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno), 24 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(qno))); 25 dev_info(dev, "R[%d]_IN_CONTROL[0x%llx]: 0x%016llx\n", 26 qno, CNXK_VF_SDP_R_IN_CONTROL(qno), 27 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(qno))); 28 dev_info(dev, "R[%d]_IN_ENABLE[0x%llx]: 0x%016llx\n", 29 qno, CNXK_VF_SDP_R_IN_ENABLE(qno), 30 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(qno))); 31 dev_info(dev, "R[%d]_IN_INSTR_BADDR[0x%llx]: 0x%016llx\n", 32 qno, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno), 33 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(qno))); 34 dev_info(dev, "R[%d]_IN_INSTR_RSIZE[0x%llx]: 0x%016llx\n", 35 qno, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno), 36 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(qno))); 37 dev_info(dev, "R[%d]_IN_CNTS[0x%llx]: 0x%016llx\n", 38 qno, CNXK_VF_SDP_R_IN_CNTS(qno), 39 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(qno))); 40 dev_info(dev, "R[%d]_IN_INT_LEVELS[0x%llx]: 0x%016llx\n", 41 qno, CNXK_VF_SDP_R_IN_INT_LEVELS(qno), 42 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(qno))); 43 dev_info(dev, "R[%d]_IN_PKT_CNT[0x%llx]: 0x%016llx\n", 44 qno, CNXK_VF_SDP_R_IN_PKT_CNT(qno), 45 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(qno))); 46 dev_info(dev, "R[%d]_IN_BYTE_CNT[0x%llx]: 0x%016llx\n", 47 qno, CNXK_VF_SDP_R_IN_BYTE_CNT(qno), 48 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(qno))); 49 50 dev_info(dev, "OQ-%d register dump\n", qno); 51 dev_info(dev, "R[%d]_OUT_SLIST_DBELL[0x%llx]: 0x%016llx\n", 52 qno, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno), 53 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(qno))); 54 dev_info(dev, "R[%d]_OUT_CONTROL[0x%llx]: 0x%016llx\n", 55 qno, CNXK_VF_SDP_R_OUT_CONTROL(qno), 56 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(qno))); 57 dev_info(dev, "R[%d]_OUT_ENABLE[0x%llx]: 0x%016llx\n", 58 qno, CNXK_VF_SDP_R_OUT_ENABLE(qno), 59 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(qno))); 60 dev_info(dev, "R[%d]_OUT_SLIST_BADDR[0x%llx]: 0x%016llx\n", 61 qno, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno), 62 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(qno))); 63 dev_info(dev, "R[%d]_OUT_SLIST_RSIZE[0x%llx]: 0x%016llx\n", 64 qno, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno), 65 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(qno))); 66 dev_info(dev, "R[%d]_OUT_CNTS[0x%llx]: 0x%016llx\n", 67 qno, CNXK_VF_SDP_R_OUT_CNTS(qno), 68 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CNTS(qno))); 69 dev_info(dev, "R[%d]_OUT_INT_LEVELS[0x%llx]: 0x%016llx\n", 70 qno, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno), 71 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(qno))); 72 dev_info(dev, "R[%d]_OUT_PKT_CNT[0x%llx]: 0x%016llx\n", 73 qno, CNXK_VF_SDP_R_OUT_PKT_CNT(qno), 74 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(qno))); 75 dev_info(dev, "R[%d]_OUT_BYTE_CNT[0x%llx]: 0x%016llx\n", 76 qno, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno), 77 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_BYTE_CNT(qno))); 78 dev_info(dev, "R[%d]_ERR_TYPE[0x%llx]: 0x%016llx\n", 79 qno, CNXK_VF_SDP_R_ERR_TYPE(qno), 80 octep_vf_read_csr64(oct, CNXK_VF_SDP_R_ERR_TYPE(qno))); 81 } 82 83 /* Reset Hardware Tx queue */ 84 static void cnxk_vf_reset_iq(struct octep_vf_device *oct, int q_no) 85 { 86 u64 val = ULL(0); 87 88 dev_dbg(&oct->pdev->dev, "Reset VF IQ-%d\n", q_no); 89 90 /* Disable the Tx/Instruction Ring */ 91 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(q_no), val); 92 93 /* clear the Instruction Ring packet/byte counts and doorbell CSRs */ 94 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q_no), val); 95 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_PKT_CNT(q_no), val); 96 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_BYTE_CNT(q_no), val); 97 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(q_no), val); 98 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(q_no), val); 99 100 val = GENMASK_ULL(31, 0); 101 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(q_no), val); 102 103 val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no)); 104 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CNTS(q_no), val & GENMASK_ULL(31, 0)); 105 } 106 107 /* Reset Hardware Rx queue */ 108 static void cnxk_vf_reset_oq(struct octep_vf_device *oct, int q_no) 109 { 110 u64 val = ULL(0); 111 112 /* Disable Output (Rx) Ring */ 113 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(q_no), val); 114 115 /* Clear count CSRs */ 116 val = octep_vf_read_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no)); 117 octep_vf_write_csr(oct, CNXK_VF_SDP_R_OUT_CNTS(q_no), val); 118 119 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_PKT_CNT(q_no), GENMASK_ULL(35, 0)); 120 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(q_no), GENMASK_ULL(31, 0)); 121 } 122 123 /* Reset all hardware Tx/Rx queues */ 124 static void octep_vf_reset_io_queues_cnxk(struct octep_vf_device *oct) 125 { 126 struct pci_dev *pdev = oct->pdev; 127 int q; 128 129 dev_dbg(&pdev->dev, "Reset OCTEP_CNXK VF IO Queues\n"); 130 131 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 132 cnxk_vf_reset_iq(oct, q); 133 cnxk_vf_reset_oq(oct, q); 134 } 135 } 136 137 /* Initialize configuration limits and initial active config */ 138 static void octep_vf_init_config_cnxk_vf(struct octep_vf_device *oct) 139 { 140 struct octep_vf_config *conf = oct->conf; 141 u64 reg_val; 142 143 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(0)); 144 conf->ring_cfg.max_io_rings = (reg_val >> CNXK_VF_R_IN_CTL_RPVF_POS) & 145 CNXK_VF_R_IN_CTL_RPVF_MASK; 146 conf->ring_cfg.active_io_rings = conf->ring_cfg.max_io_rings; 147 148 conf->iq.num_descs = OCTEP_VF_IQ_MAX_DESCRIPTORS; 149 conf->iq.instr_type = OCTEP_VF_64BYTE_INSTR; 150 conf->iq.db_min = OCTEP_VF_DB_MIN; 151 conf->iq.intr_threshold = OCTEP_VF_IQ_INTR_THRESHOLD; 152 153 conf->oq.num_descs = OCTEP_VF_OQ_MAX_DESCRIPTORS; 154 conf->oq.buf_size = OCTEP_VF_OQ_BUF_SIZE; 155 conf->oq.refill_threshold = OCTEP_VF_OQ_REFILL_THRESHOLD; 156 conf->oq.oq_intr_pkt = OCTEP_VF_OQ_INTR_PKT_THRESHOLD; 157 conf->oq.oq_intr_time = OCTEP_VF_OQ_INTR_TIME_THRESHOLD; 158 conf->oq.wmark = OCTEP_VF_OQ_WMARK_MIN; 159 160 conf->msix_cfg.ioq_msix = conf->ring_cfg.active_io_rings; 161 } 162 163 /* Setup registers for a hardware Tx Queue */ 164 static void octep_vf_setup_iq_regs_cnxk(struct octep_vf_device *oct, int iq_no) 165 { 166 struct octep_vf_iq *iq = oct->iq[iq_no]; 167 u32 reset_instr_cnt; 168 u64 reg_val; 169 170 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no)); 171 172 /* wait for IDLE to set to 1 */ 173 if (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)) { 174 do { 175 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no)); 176 } while (!(reg_val & CNXK_VF_R_IN_CTL_IDLE)); 177 } 178 reg_val |= CNXK_VF_R_IN_CTL_RDSIZE; 179 reg_val |= CNXK_VF_R_IN_CTL_IS_64B; 180 reg_val |= CNXK_VF_R_IN_CTL_ESR; 181 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_CONTROL(iq_no), reg_val); 182 183 /* Write the start of the input queue's ring and its size */ 184 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_BADDR(iq_no), iq->desc_ring_dma); 185 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_RSIZE(iq_no), iq->max_count); 186 187 /* Remember the doorbell & instruction count register addr for this queue */ 188 iq->doorbell_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no); 189 iq->inst_cnt_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_CNTS(iq_no); 190 iq->intr_lvl_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no); 191 192 /* Store the current instruction counter (used in flush_iq calculation) */ 193 reset_instr_cnt = readl(iq->inst_cnt_reg); 194 writel(reset_instr_cnt, iq->inst_cnt_reg); 195 196 /* INTR_THRESHOLD is set to max(FFFFFFFF) to disable the INTR */ 197 reg_val = CFG_GET_IQ_INTR_THRESHOLD(oct->conf) & GENMASK_ULL(31, 0); 198 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 199 } 200 201 /* Setup registers for a hardware Rx Queue */ 202 static void octep_vf_setup_oq_regs_cnxk(struct octep_vf_device *oct, int oq_no) 203 { 204 struct octep_vf_oq *oq = oct->oq[oq_no]; 205 u32 time_threshold = 0; 206 u64 oq_ctl = ULL(0); 207 u64 reg_val; 208 209 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); 210 211 /* wait for IDLE to set to 1 */ 212 if (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)) { 213 do { 214 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); 215 } while (!(reg_val & CNXK_VF_R_OUT_CTL_IDLE)); 216 } 217 218 reg_val &= ~(CNXK_VF_R_OUT_CTL_IMODE); 219 reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_P); 220 reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_P); 221 reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_I); 222 reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_I); 223 reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_I); 224 reg_val &= ~(CNXK_VF_R_OUT_CTL_ROR_D); 225 reg_val &= ~(CNXK_VF_R_OUT_CTL_NSR_D); 226 reg_val &= ~(CNXK_VF_R_OUT_CTL_ES_D); 227 reg_val |= (CNXK_VF_R_OUT_CTL_ES_P); 228 229 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), reg_val); 230 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_BADDR(oq_no), oq->desc_ring_dma); 231 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_RSIZE(oq_no), oq->max_count); 232 233 oq_ctl = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no)); 234 /* Clear the ISIZE and BSIZE (22-0) */ 235 oq_ctl &= ~GENMASK_ULL(22, 0); 236 /* Populate the BSIZE (15-0) */ 237 oq_ctl |= (oq->buffer_size & GENMASK_ULL(15, 0)); 238 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_CONTROL(oq_no), oq_ctl); 239 240 /* Get the mapped address of the pkt_sent and pkts_credit regs */ 241 oq->pkts_sent_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_CNTS(oq_no); 242 oq->pkts_credit_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no); 243 244 time_threshold = CFG_GET_OQ_INTR_TIME(oct->conf); 245 reg_val = ((u64)time_threshold << 32) | CFG_GET_OQ_INTR_PKT(oct->conf); 246 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 247 248 /* set watermark for backpressure */ 249 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no)); 250 reg_val &= ~GENMASK_ULL(31, 0); 251 reg_val |= CFG_GET_OQ_WMARK(oct->conf); 252 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_WMARK(oq_no), reg_val); 253 } 254 255 /* Setup registers for a VF mailbox */ 256 static void octep_vf_setup_mbox_regs_cnxk(struct octep_vf_device *oct, int q_no) 257 { 258 struct octep_vf_mbox *mbox = oct->mbox; 259 260 /* PF to VF DATA reg. VF reads from this reg */ 261 mbox->mbox_read_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_DATA(q_no); 262 263 /* VF mbox interrupt reg */ 264 mbox->mbox_int_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_PF_VF_INT(q_no); 265 266 /* VF to PF DATA reg. VF writes into this reg */ 267 mbox->mbox_write_reg = oct->mmio.hw_addr + CNXK_VF_SDP_R_MBOX_VF_PF_DATA(q_no); 268 } 269 270 /* Mailbox Interrupt handler */ 271 static void cnxk_handle_vf_mbox_intr(struct octep_vf_device *oct) 272 { 273 if (oct->mbox) 274 schedule_work(&oct->mbox->wk.work); 275 else 276 dev_err(&oct->pdev->dev, "cannot schedule work on invalid mbox\n"); 277 } 278 279 /* Tx/Rx queue interrupt handler */ 280 static irqreturn_t octep_vf_ioq_intr_handler_cnxk(void *data) 281 { 282 struct octep_vf_ioq_vector *vector = data; 283 struct octep_vf_device *oct; 284 struct octep_vf_oq *oq; 285 u64 reg_val; 286 287 oct = vector->octep_vf_dev; 288 oq = vector->oq; 289 /* Mailbox interrupt arrives along with interrupt of tx/rx ring pair 0 */ 290 if (oq->q_no == 0) { 291 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0)); 292 if (reg_val & CNXK_VF_SDP_R_MBOX_PF_VF_INT_STATUS) { 293 cnxk_handle_vf_mbox_intr(oct); 294 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), reg_val); 295 } 296 } 297 napi_schedule_irqoff(oq->napi); 298 return IRQ_HANDLED; 299 } 300 301 /* Re-initialize Octeon hardware registers */ 302 static void octep_vf_reinit_regs_cnxk(struct octep_vf_device *oct) 303 { 304 u32 i; 305 306 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 307 oct->hw_ops.setup_iq_regs(oct, i); 308 309 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 310 oct->hw_ops.setup_oq_regs(oct, i); 311 312 oct->hw_ops.enable_interrupts(oct); 313 oct->hw_ops.enable_io_queues(oct); 314 315 for (i = 0; i < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); i++) 316 writel(oct->oq[i]->max_count, oct->oq[i]->pkts_credit_reg); 317 } 318 319 /* Enable all interrupts */ 320 static void octep_vf_enable_interrupts_cnxk(struct octep_vf_device *oct) 321 { 322 int num_rings, q; 323 u64 reg_val; 324 325 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 326 for (q = 0; q < num_rings; q++) { 327 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q)); 328 reg_val |= BIT_ULL_MASK(62); 329 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val); 330 331 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q)); 332 reg_val |= BIT_ULL_MASK(62); 333 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); 334 } 335 /* Enable PF to VF mbox interrupt by setting 2nd bit*/ 336 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 337 CNXK_VF_SDP_R_MBOX_PF_VF_INT_ENAB); 338 } 339 340 /* Disable all interrupts */ 341 static void octep_vf_disable_interrupts_cnxk(struct octep_vf_device *oct) 342 { 343 int num_rings, q; 344 u64 reg_val; 345 346 /* Disable PF to VF mbox interrupt by setting 2nd bit*/ 347 if (oct->mbox) 348 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_MBOX_PF_VF_INT(0), 0x0); 349 350 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 351 for (q = 0; q < num_rings; q++) { 352 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q)); 353 reg_val &= ~BIT_ULL_MASK(62); 354 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(q), reg_val); 355 356 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q)); 357 reg_val &= ~BIT_ULL_MASK(62); 358 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(q), reg_val); 359 } 360 } 361 362 /* Get new Octeon Read Index: index of descriptor that Octeon reads next. */ 363 static u32 octep_vf_update_iq_read_index_cnxk(struct octep_vf_iq *iq) 364 { 365 u32 pkt_in_done = readl(iq->inst_cnt_reg); 366 u32 last_done, new_idx; 367 368 last_done = pkt_in_done - iq->pkt_in_done; 369 iq->pkt_in_done = pkt_in_done; 370 371 new_idx = (iq->octep_vf_read_index + last_done) % iq->max_count; 372 373 return new_idx; 374 } 375 376 /* Enable a hardware Tx Queue */ 377 static void octep_vf_enable_iq_cnxk(struct octep_vf_device *oct, int iq_no) 378 { 379 u64 loop = HZ; 380 u64 reg_val; 381 382 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no), GENMASK_ULL(31, 0)); 383 384 while (octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INSTR_DBELL(iq_no)) && 385 loop--) { 386 schedule_timeout_interruptible(1); 387 } 388 389 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no)); 390 reg_val |= BIT_ULL_MASK(62); 391 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_INT_LEVELS(iq_no), reg_val); 392 393 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no)); 394 reg_val |= ULL(1); 395 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val); 396 } 397 398 /* Enable a hardware Rx Queue */ 399 static void octep_vf_enable_oq_cnxk(struct octep_vf_device *oct, int oq_no) 400 { 401 u64 reg_val; 402 403 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no)); 404 reg_val |= BIT_ULL_MASK(62); 405 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_INT_LEVELS(oq_no), reg_val); 406 407 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_SLIST_DBELL(oq_no), GENMASK_ULL(31, 0)); 408 409 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no)); 410 reg_val |= ULL(1); 411 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); 412 } 413 414 /* Enable all hardware Tx/Rx Queues assigned to VF */ 415 static void octep_vf_enable_io_queues_cnxk(struct octep_vf_device *oct) 416 { 417 u8 q; 418 419 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 420 octep_vf_enable_iq_cnxk(oct, q); 421 octep_vf_enable_oq_cnxk(oct, q); 422 } 423 } 424 425 /* Disable a hardware Tx Queue assigned to VF */ 426 static void octep_vf_disable_iq_cnxk(struct octep_vf_device *oct, int iq_no) 427 { 428 u64 reg_val; 429 430 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no)); 431 reg_val &= ~ULL(1); 432 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_IN_ENABLE(iq_no), reg_val); 433 } 434 435 /* Disable a hardware Rx Queue assigned to VF */ 436 static void octep_vf_disable_oq_cnxk(struct octep_vf_device *oct, int oq_no) 437 { 438 u64 reg_val; 439 440 reg_val = octep_vf_read_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no)); 441 reg_val &= ~ULL(1); 442 octep_vf_write_csr64(oct, CNXK_VF_SDP_R_OUT_ENABLE(oq_no), reg_val); 443 } 444 445 /* Disable all hardware Tx/Rx Queues assigned to VF */ 446 static void octep_vf_disable_io_queues_cnxk(struct octep_vf_device *oct) 447 { 448 int q; 449 450 for (q = 0; q < CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); q++) { 451 octep_vf_disable_iq_cnxk(oct, q); 452 octep_vf_disable_oq_cnxk(oct, q); 453 } 454 } 455 456 /* Dump hardware registers (including Tx/Rx queues) for debugging. */ 457 static void octep_vf_dump_registers_cnxk(struct octep_vf_device *oct) 458 { 459 u8 num_rings, q; 460 461 num_rings = CFG_GET_PORTS_ACTIVE_IO_RINGS(oct->conf); 462 for (q = 0; q < num_rings; q++) 463 cnxk_vf_dump_q_regs(oct, q); 464 } 465 466 /** 467 * octep_vf_device_setup_cnxk() - Setup Octeon device. 468 * 469 * @oct: Octeon device private data structure. 470 * 471 * - initialize hardware operations. 472 * - get target side pcie port number for the device. 473 * - set initial configuration and max limits. 474 */ 475 void octep_vf_device_setup_cnxk(struct octep_vf_device *oct) 476 { 477 oct->hw_ops.setup_iq_regs = octep_vf_setup_iq_regs_cnxk; 478 oct->hw_ops.setup_oq_regs = octep_vf_setup_oq_regs_cnxk; 479 oct->hw_ops.setup_mbox_regs = octep_vf_setup_mbox_regs_cnxk; 480 481 oct->hw_ops.ioq_intr_handler = octep_vf_ioq_intr_handler_cnxk; 482 oct->hw_ops.reinit_regs = octep_vf_reinit_regs_cnxk; 483 484 oct->hw_ops.enable_interrupts = octep_vf_enable_interrupts_cnxk; 485 oct->hw_ops.disable_interrupts = octep_vf_disable_interrupts_cnxk; 486 487 oct->hw_ops.update_iq_read_idx = octep_vf_update_iq_read_index_cnxk; 488 489 oct->hw_ops.enable_iq = octep_vf_enable_iq_cnxk; 490 oct->hw_ops.enable_oq = octep_vf_enable_oq_cnxk; 491 oct->hw_ops.enable_io_queues = octep_vf_enable_io_queues_cnxk; 492 493 oct->hw_ops.disable_iq = octep_vf_disable_iq_cnxk; 494 oct->hw_ops.disable_oq = octep_vf_disable_oq_cnxk; 495 oct->hw_ops.disable_io_queues = octep_vf_disable_io_queues_cnxk; 496 oct->hw_ops.reset_io_queues = octep_vf_reset_io_queues_cnxk; 497 498 oct->hw_ops.dump_registers = octep_vf_dump_registers_cnxk; 499 octep_vf_init_config_cnxk_vf(oct); 500 } 501