1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3 *
4 * Copyright (C) 2024 Marvell.
5 *
6 */
7
8 #include "otx2_common.h"
9 #include "otx2_reg.h"
10 #include "otx2_struct.h"
11 #include "cn10k.h"
12
13 /* CN20K mbox AF => PFx irq handler */
cn20k_pfaf_mbox_intr_handler(int irq,void * pf_irq)14 irqreturn_t cn20k_pfaf_mbox_intr_handler(int irq, void *pf_irq)
15 {
16 struct otx2_nic *pf = pf_irq;
17 struct mbox *mw = &pf->mbox;
18 struct otx2_mbox_dev *mdev;
19 struct otx2_mbox *mbox;
20 struct mbox_hdr *hdr;
21 u64 pf_trig_val;
22
23 pf_trig_val = otx2_read64(pf, RVU_PF_INT) & 0x3ULL;
24
25 /* Clear the IRQ */
26 otx2_write64(pf, RVU_PF_INT, pf_trig_val);
27
28 if (pf_trig_val & BIT_ULL(0)) {
29 mbox = &mw->mbox_up;
30 mdev = &mbox->dev[0];
31 otx2_sync_mbox_bbuf(mbox, 0);
32
33 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
34 if (hdr->num_msgs)
35 queue_work(pf->mbox_wq, &mw->mbox_up_wrk);
36
37 trace_otx2_msg_interrupt(pf->pdev, "UP message from AF to PF",
38 BIT_ULL(0));
39 }
40
41 if (pf_trig_val & BIT_ULL(1)) {
42 mbox = &mw->mbox;
43 mdev = &mbox->dev[0];
44 otx2_sync_mbox_bbuf(mbox, 0);
45
46 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
47 if (hdr->num_msgs)
48 queue_work(pf->mbox_wq, &mw->mbox_wrk);
49 trace_otx2_msg_interrupt(pf->pdev, "DOWN reply from AF to PF",
50 BIT_ULL(1));
51 }
52
53 return IRQ_HANDLED;
54 }
55
cn20k_vfaf_mbox_intr_handler(int irq,void * vf_irq)56 irqreturn_t cn20k_vfaf_mbox_intr_handler(int irq, void *vf_irq)
57 {
58 struct otx2_nic *vf = vf_irq;
59 struct otx2_mbox_dev *mdev;
60 struct otx2_mbox *mbox;
61 struct mbox_hdr *hdr;
62 u64 vf_trig_val;
63
64 vf_trig_val = otx2_read64(vf, RVU_VF_INT) & 0x3ULL;
65 /* Clear the IRQ */
66 otx2_write64(vf, RVU_VF_INT, vf_trig_val);
67
68 /* Read latest mbox data */
69 smp_rmb();
70
71 if (vf_trig_val & BIT_ULL(1)) {
72 /* Check for PF => VF response messages */
73 mbox = &vf->mbox.mbox;
74 mdev = &mbox->dev[0];
75 otx2_sync_mbox_bbuf(mbox, 0);
76
77 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
78 if (hdr->num_msgs)
79 queue_work(vf->mbox_wq, &vf->mbox.mbox_wrk);
80
81 trace_otx2_msg_interrupt(mbox->pdev, "DOWN reply from PF0 to VF",
82 BIT_ULL(1));
83 }
84
85 if (vf_trig_val & BIT_ULL(0)) {
86 /* Check for PF => VF notification messages */
87 mbox = &vf->mbox.mbox_up;
88 mdev = &mbox->dev[0];
89 otx2_sync_mbox_bbuf(mbox, 0);
90
91 hdr = (struct mbox_hdr *)(mdev->mbase + mbox->rx_start);
92 if (hdr->num_msgs)
93 queue_work(vf->mbox_wq, &vf->mbox.mbox_up_wrk);
94
95 trace_otx2_msg_interrupt(mbox->pdev, "UP message from PF0 to VF",
96 BIT_ULL(0));
97 }
98
99 return IRQ_HANDLED;
100 }
101
cn20k_enable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)102 void cn20k_enable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
103 {
104 /* Clear PF <=> VF mailbox IRQ */
105 otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
106 otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
107 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
108 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
109
110 /* Enable PF <=> VF mailbox IRQ */
111 otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(numvfs));
112 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(numvfs));
113 if (numvfs > 64) {
114 numvfs -= 64;
115 otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1),
116 INTR_MASK(numvfs));
117 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1),
118 INTR_MASK(numvfs));
119 }
120 }
121
cn20k_disable_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)122 void cn20k_disable_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
123 {
124 int vector, intr_vec, vec = 0;
125
126 /* Disable PF <=> VF mailbox IRQ */
127 otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), ~0ull);
128 otx2_write64(pf, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), ~0ull);
129 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), ~0ull);
130 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), ~0ull);
131
132 otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(0), ~0ull);
133 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(0), ~0ull);
134
135 if (numvfs > 64) {
136 otx2_write64(pf, RVU_MBOX_PF_VFPF_INTX(1), ~0ull);
137 otx2_write64(pf, RVU_MBOX_PF_VFPF1_INTX(1), ~0ull);
138 }
139
140 for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
141 RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
142 vector = pci_irq_vector(pf->pdev, intr_vec);
143 free_irq(vector, pf->hw.pfvf_irq_devid[vec]);
144 }
145 }
146
cn20k_pfvf_mbox_intr_handler(int irq,void * pf_irq)147 irqreturn_t cn20k_pfvf_mbox_intr_handler(int irq, void *pf_irq)
148 {
149 struct pf_irq_data *irq_data = pf_irq;
150 struct otx2_nic *pf = irq_data->pf;
151 struct mbox *mbox;
152 u64 intr;
153
154 /* Sync with mbox memory region */
155 rmb();
156
157 /* Clear interrupts */
158 intr = otx2_read64(pf, irq_data->intr_status);
159 otx2_write64(pf, irq_data->intr_status, intr);
160 mbox = pf->mbox_pfvf;
161
162 if (intr)
163 trace_otx2_msg_interrupt(pf->pdev, "VF(s) to PF", intr);
164
165 irq_data->pf_queue_work_hdlr(mbox, pf->mbox_pfvf_wq, irq_data->start,
166 irq_data->mdevs, intr);
167
168 return IRQ_HANDLED;
169 }
170
cn20k_register_pfvf_mbox_intr(struct otx2_nic * pf,int numvfs)171 int cn20k_register_pfvf_mbox_intr(struct otx2_nic *pf, int numvfs)
172 {
173 struct otx2_hw *hw = &pf->hw;
174 struct pf_irq_data *irq_data;
175 int intr_vec, ret, vec = 0;
176 char *irq_name;
177
178 /* irq data for 4 PF intr vectors */
179 irq_data = devm_kcalloc(pf->dev, 4,
180 sizeof(struct pf_irq_data), GFP_KERNEL);
181 if (!irq_data)
182 return -ENOMEM;
183
184 for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
185 RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1; intr_vec++, vec++) {
186 switch (intr_vec) {
187 case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
188 irq_data[vec].intr_status =
189 RVU_MBOX_PF_VFPF_INTX(0);
190 irq_data[vec].start = 0;
191 irq_data[vec].mdevs = 64;
192 break;
193 case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
194 irq_data[vec].intr_status =
195 RVU_MBOX_PF_VFPF_INTX(1);
196 irq_data[vec].start = 64;
197 irq_data[vec].mdevs = 96;
198 break;
199 case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
200 irq_data[vec].intr_status =
201 RVU_MBOX_PF_VFPF1_INTX(0);
202 irq_data[vec].start = 0;
203 irq_data[vec].mdevs = 64;
204 break;
205 case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
206 irq_data[vec].intr_status =
207 RVU_MBOX_PF_VFPF1_INTX(1);
208 irq_data[vec].start = 64;
209 irq_data[vec].mdevs = 96;
210 break;
211 }
212 irq_data[vec].pf_queue_work_hdlr = otx2_queue_vf_work;
213 irq_data[vec].vec_num = intr_vec;
214 irq_data[vec].pf = pf;
215
216 /* Register mailbox interrupt handler */
217 irq_name = &hw->irq_name[intr_vec * NAME_SIZE];
218 if (pf->pcifunc)
219 snprintf(irq_name, NAME_SIZE,
220 "RVUPF%d_VF%d Mbox%d", rvu_get_pf(pf->pdev,
221 pf->pcifunc), vec / 2, vec % 2);
222 else
223 snprintf(irq_name, NAME_SIZE, "RVUPF_VF%d Mbox%d",
224 vec / 2, vec % 2);
225
226 hw->pfvf_irq_devid[vec] = &irq_data[vec];
227 ret = request_irq(pci_irq_vector(pf->pdev, intr_vec),
228 pf->hw_ops->pfvf_mbox_intr_handler, 0,
229 irq_name,
230 &irq_data[vec]);
231 if (ret) {
232 dev_err(pf->dev,
233 "RVUPF: IRQ registration failed for PFVF mbox0 irq\n");
234 return ret;
235 }
236 }
237
238 cn20k_enable_pfvf_mbox_intr(pf, numvfs);
239
240 return 0;
241 }
242
243 #define RQ_BP_LVL_AURA (255 - ((85 * 256) / 100)) /* BP when 85% is full */
244
cn20k_aura_bpid_idx(struct otx2_nic * pfvf,int aura_id)245 static u8 cn20k_aura_bpid_idx(struct otx2_nic *pfvf, int aura_id)
246 {
247 #ifdef CONFIG_DCB
248 return pfvf->queue_to_pfc_map[aura_id];
249 #else
250 return 0;
251 #endif
252 }
253
cn20k_aura_aq_init(struct otx2_nic * pfvf,int aura_id,int pool_id,int numptrs)254 static int cn20k_aura_aq_init(struct otx2_nic *pfvf, int aura_id,
255 int pool_id, int numptrs)
256 {
257 struct npa_cn20k_aq_enq_req *aq;
258 struct otx2_pool *pool;
259 u8 bpid_idx;
260 int err;
261
262 pool = &pfvf->qset.pool[pool_id];
263
264 /* Allocate memory for HW to update Aura count.
265 * Alloc one cache line, so that it fits all FC_STYPE modes.
266 */
267 if (!pool->fc_addr) {
268 err = qmem_alloc(pfvf->dev, &pool->fc_addr, 1, OTX2_ALIGN);
269 if (err)
270 return err;
271 }
272
273 /* Initialize this aura's context via AF */
274 aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
275 if (!aq) {
276 /* Shared mbox memory buffer is full, flush it and retry */
277 err = otx2_sync_mbox_msg(&pfvf->mbox);
278 if (err)
279 return err;
280 aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
281 if (!aq)
282 return -ENOMEM;
283 }
284
285 aq->aura_id = aura_id;
286
287 /* Will be filled by AF with correct pool context address */
288 aq->aura.pool_addr = pool_id;
289 aq->aura.pool_caching = 1;
290 aq->aura.shift = ilog2(numptrs) - 8;
291 aq->aura.count = numptrs;
292 aq->aura.limit = numptrs;
293 aq->aura.avg_level = 255;
294 aq->aura.ena = 1;
295 aq->aura.fc_ena = 1;
296 aq->aura.fc_addr = pool->fc_addr->iova;
297 aq->aura.fc_hyst_bits = 0; /* Store count on all updates */
298
299 /* Enable backpressure for RQ aura */
300 if (aura_id < pfvf->hw.rqpool_cnt && !is_otx2_lbkvf(pfvf->pdev)) {
301 aq->aura.bp_ena = 0;
302 /* If NIX1 LF is attached then specify NIX1_RX.
303 *
304 * Below NPA_AURA_S[BP_ENA] is set according to the
305 * NPA_BPINTF_E enumeration given as:
306 * 0x0 + a*0x1 where 'a' is 0 for NIX0_RX and 1 for NIX1_RX so
307 * NIX0_RX is 0x0 + 0*0x1 = 0
308 * NIX1_RX is 0x0 + 1*0x1 = 1
309 * But in HRM it is given that
310 * "NPA_AURA_S[BP_ENA](w1[33:32]) - Enable aura backpressure to
311 * NIX-RX based on [BP] level. One bit per NIX-RX; index
312 * enumerated by NPA_BPINTF_E."
313 */
314 if (pfvf->nix_blkaddr == BLKADDR_NIX1)
315 aq->aura.bp_ena = 1;
316
317 bpid_idx = cn20k_aura_bpid_idx(pfvf, aura_id);
318 aq->aura.bpid = pfvf->bpid[bpid_idx];
319
320 /* Set backpressure level for RQ's Aura */
321 aq->aura.bp = RQ_BP_LVL_AURA;
322 }
323
324 /* Fill AQ info */
325 aq->ctype = NPA_AQ_CTYPE_AURA;
326 aq->op = NPA_AQ_INSTOP_INIT;
327
328 return 0;
329 }
330
cn20k_pool_aq_init(struct otx2_nic * pfvf,u16 pool_id,int stack_pages,int numptrs,int buf_size,int type)331 static int cn20k_pool_aq_init(struct otx2_nic *pfvf, u16 pool_id,
332 int stack_pages, int numptrs, int buf_size,
333 int type)
334 {
335 struct page_pool_params pp_params = { 0 };
336 struct npa_cn20k_aq_enq_req *aq;
337 struct otx2_pool *pool;
338 int err, sz;
339
340 pool = &pfvf->qset.pool[pool_id];
341 /* Alloc memory for stack which is used to store buffer pointers */
342 err = qmem_alloc(pfvf->dev, &pool->stack,
343 stack_pages, pfvf->hw.stack_pg_bytes);
344 if (err)
345 return err;
346
347 pool->rbsize = buf_size;
348
349 /* Initialize this pool's context via AF */
350 aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
351 if (!aq) {
352 /* Shared mbox memory buffer is full, flush it and retry */
353 err = otx2_sync_mbox_msg(&pfvf->mbox);
354 if (err) {
355 qmem_free(pfvf->dev, pool->stack);
356 return err;
357 }
358 aq = otx2_mbox_alloc_msg_npa_cn20k_aq_enq(&pfvf->mbox);
359 if (!aq) {
360 qmem_free(pfvf->dev, pool->stack);
361 return -ENOMEM;
362 }
363 }
364
365 aq->aura_id = pool_id;
366 aq->pool.stack_base = pool->stack->iova;
367 aq->pool.stack_caching = 1;
368 aq->pool.ena = 1;
369 aq->pool.buf_size = buf_size / 128;
370 aq->pool.stack_max_pages = stack_pages;
371 aq->pool.shift = ilog2(numptrs) - 8;
372 aq->pool.ptr_start = 0;
373 aq->pool.ptr_end = ~0ULL;
374
375 /* Fill AQ info */
376 aq->ctype = NPA_AQ_CTYPE_POOL;
377 aq->op = NPA_AQ_INSTOP_INIT;
378
379 if (type != AURA_NIX_RQ) {
380 pool->page_pool = NULL;
381 return 0;
382 }
383
384 sz = ALIGN(ALIGN(SKB_DATA_ALIGN(buf_size), OTX2_ALIGN), PAGE_SIZE);
385 pp_params.order = get_order(sz);
386 pp_params.flags = PP_FLAG_DMA_MAP;
387 pp_params.pool_size = min(OTX2_PAGE_POOL_SZ, numptrs);
388 pp_params.nid = NUMA_NO_NODE;
389 pp_params.dev = pfvf->dev;
390 pp_params.dma_dir = DMA_FROM_DEVICE;
391 pool->page_pool = page_pool_create(&pp_params);
392 if (IS_ERR(pool->page_pool)) {
393 netdev_err(pfvf->netdev, "Creation of page pool failed\n");
394 return PTR_ERR(pool->page_pool);
395 }
396
397 return 0;
398 }
399
cn20k_sq_aq_init(void * dev,u16 qidx,u8 chan_offset,u16 sqb_aura)400 static int cn20k_sq_aq_init(void *dev, u16 qidx, u8 chan_offset, u16 sqb_aura)
401 {
402 struct nix_cn20k_aq_enq_req *aq;
403 struct otx2_nic *pfvf = dev;
404
405 /* Get memory to put this msg */
406 aq = otx2_mbox_alloc_msg_nix_cn20k_aq_enq(&pfvf->mbox);
407 if (!aq)
408 return -ENOMEM;
409
410 aq->sq.cq = pfvf->hw.rx_queues + qidx;
411 aq->sq.max_sqe_size = NIX_MAXSQESZ_W16; /* 128 byte */
412 aq->sq.cq_ena = 1;
413 aq->sq.ena = 1;
414 aq->sq.smq = otx2_get_smq_idx(pfvf, qidx);
415 aq->sq.smq_rr_weight = mtu_to_dwrr_weight(pfvf, pfvf->tx_max_pktlen);
416 aq->sq.default_chan = pfvf->hw.tx_chan_base + chan_offset;
417 aq->sq.sqe_stype = NIX_STYPE_STF; /* Cache SQB */
418 aq->sq.sqb_aura = sqb_aura;
419 aq->sq.sq_int_ena = NIX_SQINT_BITS;
420 aq->sq.qint_idx = 0;
421 /* Due pipelining impact minimum 2000 unused SQ CQE's
422 * need to maintain to avoid CQ overflow.
423 */
424 aq->sq.cq_limit = (SEND_CQ_SKID * 256) / (pfvf->qset.sqe_cnt);
425
426 /* Fill AQ info */
427 aq->qidx = qidx;
428 aq->ctype = NIX_AQ_CTYPE_SQ;
429 aq->op = NIX_AQ_INSTOP_INIT;
430
431 return otx2_sync_mbox_msg(&pfvf->mbox);
432 }
433
434 static struct dev_hw_ops cn20k_hw_ops = {
435 .pfaf_mbox_intr_handler = cn20k_pfaf_mbox_intr_handler,
436 .vfaf_mbox_intr_handler = cn20k_vfaf_mbox_intr_handler,
437 .pfvf_mbox_intr_handler = cn20k_pfvf_mbox_intr_handler,
438 .sq_aq_init = cn20k_sq_aq_init,
439 .sqe_flush = cn10k_sqe_flush,
440 .aura_freeptr = cn10k_aura_freeptr,
441 .refill_pool_ptrs = cn10k_refill_pool_ptrs,
442 .aura_aq_init = cn20k_aura_aq_init,
443 .pool_aq_init = cn20k_pool_aq_init,
444 };
445
cn20k_init(struct otx2_nic * pfvf)446 void cn20k_init(struct otx2_nic *pfvf)
447 {
448 pfvf->hw_ops = &cn20k_hw_ops;
449 }
450 EXPORT_SYMBOL(cn20k_init);
451