xref: /linux/drivers/net/ethernet/marvell/octeontx2/af/cn20k/mbox_init.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Admin Function driver
3  *
4  * Copyright (C) 2024 Marvell.
5  *
6  */
7 
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 
11 #include "rvu_trace.h"
12 #include "mbox.h"
13 #include "reg.h"
14 #include "api.h"
15 
cn20k_afvf_mbox_intr_handler(int irq,void * rvu_irq)16 static irqreturn_t cn20k_afvf_mbox_intr_handler(int irq, void *rvu_irq)
17 {
18 	struct rvu_irq_data *rvu_irq_data = rvu_irq;
19 	struct rvu *rvu = rvu_irq_data->rvu;
20 	u64 intr;
21 
22 	/* Sync with mbox memory region */
23 	rmb();
24 
25 	/* Clear interrupts */
26 	intr = rvupf_read64(rvu, rvu_irq_data->intr_status);
27 	rvupf_write64(rvu, rvu_irq_data->intr_status, intr);
28 
29 	if (intr)
30 		trace_otx2_msg_interrupt(rvu->pdev, "VF(s) to AF", intr);
31 
32 	rvu_irq_data->afvf_queue_work_hdlr(&rvu->afvf_wq_info, rvu_irq_data->start,
33 					   rvu_irq_data->mdevs, intr);
34 
35 	return IRQ_HANDLED;
36 }
37 
cn20k_register_afvf_mbox_intr(struct rvu * rvu,int pf_vec_start)38 int cn20k_register_afvf_mbox_intr(struct rvu *rvu, int pf_vec_start)
39 {
40 	struct rvu_irq_data *irq_data;
41 	int intr_vec, offset, vec = 0;
42 	int err;
43 
44 	/* irq data for 4 VFPF intr vectors */
45 	irq_data = devm_kcalloc(rvu->dev, 4,
46 				sizeof(struct rvu_irq_data), GFP_KERNEL);
47 	if (!irq_data)
48 		return -ENOMEM;
49 
50 	for (intr_vec = RVU_MBOX_PF_INT_VEC_VFPF_MBOX0; intr_vec <=
51 					RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1;
52 					intr_vec++, vec++) {
53 		switch (intr_vec) {
54 		case RVU_MBOX_PF_INT_VEC_VFPF_MBOX0:
55 			irq_data[vec].intr_status =
56 						RVU_MBOX_PF_VFPF_INTX(0);
57 			irq_data[vec].start = 0;
58 			irq_data[vec].mdevs = 64;
59 			break;
60 		case RVU_MBOX_PF_INT_VEC_VFPF_MBOX1:
61 			irq_data[vec].intr_status =
62 						RVU_MBOX_PF_VFPF_INTX(1);
63 			irq_data[vec].start = 64;
64 			irq_data[vec].mdevs = 64;
65 			break;
66 		case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX0:
67 			irq_data[vec].intr_status =
68 						RVU_MBOX_PF_VFPF1_INTX(0);
69 			irq_data[vec].start = 0;
70 			irq_data[vec].mdevs = 64;
71 			break;
72 		case RVU_MBOX_PF_INT_VEC_VFPF1_MBOX1:
73 			irq_data[vec].intr_status = RVU_MBOX_PF_VFPF1_INTX(1);
74 			irq_data[vec].start = 64;
75 			irq_data[vec].mdevs = 64;
76 			break;
77 		}
78 		irq_data[vec].afvf_queue_work_hdlr =
79 						rvu_queue_work;
80 		offset = pf_vec_start + intr_vec;
81 		irq_data[vec].vec_num = offset;
82 		irq_data[vec].rvu = rvu;
83 
84 		sprintf(&rvu->irq_name[offset * NAME_SIZE], "RVUAF VFAF%d Mbox%d",
85 			vec / 2, vec % 2);
86 		err = request_irq(pci_irq_vector(rvu->pdev, offset),
87 				  rvu->ng_rvu->rvu_mbox_ops->afvf_intr_handler, 0,
88 				  &rvu->irq_name[offset * NAME_SIZE],
89 				  &irq_data[vec]);
90 		if (err) {
91 			dev_err(rvu->dev,
92 				"RVUAF: IRQ registration failed for AFVF mbox irq\n");
93 			return err;
94 		}
95 		rvu->irq_allocated[offset] = true;
96 	}
97 
98 	return 0;
99 }
100 
101 /* CN20K mbox PFx => AF irq handler */
cn20k_mbox_pf_common_intr_handler(int irq,void * rvu_irq)102 static irqreturn_t cn20k_mbox_pf_common_intr_handler(int irq, void *rvu_irq)
103 {
104 	struct rvu_irq_data *rvu_irq_data = rvu_irq;
105 	struct rvu *rvu = rvu_irq_data->rvu;
106 	u64 intr;
107 
108 	/* Clear interrupts */
109 	intr = rvu_read64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status);
110 	rvu_write64(rvu, BLKADDR_RVUM, rvu_irq_data->intr_status, intr);
111 
112 	if (intr)
113 		trace_otx2_msg_interrupt(rvu->pdev, "PF(s) to AF", intr);
114 
115 	/* Sync with mbox memory region */
116 	rmb();
117 
118 	rvu_irq_data->rvu_queue_work_hdlr(&rvu->afpf_wq_info,
119 					  rvu_irq_data->start,
120 					  rvu_irq_data->mdevs, intr);
121 
122 	return IRQ_HANDLED;
123 }
124 
cn20k_rvu_enable_mbox_intr(struct rvu * rvu)125 void cn20k_rvu_enable_mbox_intr(struct rvu *rvu)
126 {
127 	struct rvu_hwinfo *hw = rvu->hw;
128 
129 	/* Clear spurious irqs, if any */
130 	rvu_write64(rvu, BLKADDR_RVUM,
131 		    RVU_MBOX_AF_PFAF_INT(0), INTR_MASK(hw->total_pfs));
132 
133 	rvu_write64(rvu, BLKADDR_RVUM,
134 		    RVU_MBOX_AF_PFAF_INT(1), INTR_MASK(hw->total_pfs - 64));
135 
136 	rvu_write64(rvu, BLKADDR_RVUM,
137 		    RVU_MBOX_AF_PFAF1_INT(0), INTR_MASK(hw->total_pfs));
138 
139 	rvu_write64(rvu, BLKADDR_RVUM,
140 		    RVU_MBOX_AF_PFAF1_INT(1), INTR_MASK(hw->total_pfs - 64));
141 
142 	/* Enable mailbox interrupt for all PFs except PF0 i.e AF itself */
143 	rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(0),
144 		    INTR_MASK(hw->total_pfs) & ~1ULL);
145 
146 	rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1S(1),
147 		    INTR_MASK(hw->total_pfs - 64));
148 
149 	rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(0),
150 		    INTR_MASK(hw->total_pfs) & ~1ULL);
151 
152 	rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1S(1),
153 		    INTR_MASK(hw->total_pfs - 64));
154 }
155 
cn20k_rvu_unregister_interrupts(struct rvu * rvu)156 void cn20k_rvu_unregister_interrupts(struct rvu *rvu)
157 {
158 	rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(0),
159 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
160 
161 	rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF_INT_ENA_W1C(1),
162 		    INTR_MASK(rvu->hw->total_pfs - 64));
163 
164 	rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(0),
165 		    INTR_MASK(rvu->hw->total_pfs) & ~1ULL);
166 
167 	rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFAF1_INT_ENA_W1C(1),
168 		    INTR_MASK(rvu->hw->total_pfs - 64));
169 }
170 
cn20k_register_afpf_mbox_intr(struct rvu * rvu)171 int cn20k_register_afpf_mbox_intr(struct rvu *rvu)
172 {
173 	struct rvu_irq_data *irq_data;
174 	int intr_vec, ret, vec = 0;
175 
176 	/* irq data for 4 PF intr vectors */
177 	irq_data = devm_kcalloc(rvu->dev, 4,
178 				sizeof(struct rvu_irq_data), GFP_KERNEL);
179 	if (!irq_data)
180 		return -ENOMEM;
181 
182 	for (intr_vec = RVU_AF_CN20K_INT_VEC_PFAF_MBOX0; intr_vec <=
183 				RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1; intr_vec++,
184 				vec++) {
185 		switch (intr_vec) {
186 		case RVU_AF_CN20K_INT_VEC_PFAF_MBOX0:
187 			irq_data[vec].intr_status =
188 						RVU_MBOX_AF_PFAF_INT(0);
189 			irq_data[vec].start = 0;
190 			irq_data[vec].mdevs = 64;
191 			break;
192 		case RVU_AF_CN20K_INT_VEC_PFAF_MBOX1:
193 			irq_data[vec].intr_status =
194 						RVU_MBOX_AF_PFAF_INT(1);
195 			irq_data[vec].start = 64;
196 			irq_data[vec].mdevs = 96;
197 			break;
198 		case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX0:
199 			irq_data[vec].intr_status =
200 						RVU_MBOX_AF_PFAF1_INT(0);
201 			irq_data[vec].start = 0;
202 			irq_data[vec].mdevs = 64;
203 			break;
204 		case RVU_AF_CN20K_INT_VEC_PFAF1_MBOX1:
205 			irq_data[vec].intr_status =
206 						RVU_MBOX_AF_PFAF1_INT(1);
207 			irq_data[vec].start = 64;
208 			irq_data[vec].mdevs = 96;
209 			break;
210 		}
211 		irq_data[vec].rvu_queue_work_hdlr = rvu_queue_work;
212 		irq_data[vec].vec_num = intr_vec;
213 		irq_data[vec].rvu = rvu;
214 
215 		/* Register mailbox interrupt handler */
216 		sprintf(&rvu->irq_name[intr_vec * NAME_SIZE],
217 			"RVUAF PFAF%d Mbox%d",
218 			vec / 2, vec % 2);
219 		ret = request_irq(pci_irq_vector(rvu->pdev, intr_vec),
220 				  rvu->ng_rvu->rvu_mbox_ops->pf_intr_handler, 0,
221 				  &rvu->irq_name[intr_vec * NAME_SIZE],
222 				  &irq_data[vec]);
223 		if (ret)
224 			return ret;
225 
226 		rvu->irq_allocated[intr_vec] = true;
227 	}
228 
229 	return 0;
230 }
231 
cn20k_rvu_get_mbox_regions(struct rvu * rvu,void ** mbox_addr,int num,int type,unsigned long * pf_bmap)232 int cn20k_rvu_get_mbox_regions(struct rvu *rvu, void **mbox_addr,
233 			       int num, int type, unsigned long *pf_bmap)
234 {
235 	int region;
236 	u64 bar;
237 
238 	if (type == TYPE_AFVF) {
239 		for (region = 0; region < num; region++) {
240 			if (!test_bit(region, pf_bmap))
241 				continue;
242 
243 			bar = (u64)phys_to_virt((u64)rvu->ng_rvu->vf_mbox_addr->base);
244 			bar += region * MBOX_SIZE;
245 			mbox_addr[region] = (void *)bar;
246 
247 			if (!mbox_addr[region])
248 				return -ENOMEM;
249 		}
250 		return 0;
251 	}
252 
253 	for (region = 0; region < num; region++) {
254 		if (!test_bit(region, pf_bmap))
255 			continue;
256 
257 		bar = (u64)phys_to_virt((u64)rvu->ng_rvu->pf_mbox_addr->base);
258 		bar += region * MBOX_SIZE;
259 
260 		mbox_addr[region] = (void *)bar;
261 
262 		if (!mbox_addr[region])
263 			return -ENOMEM;
264 	}
265 	return 0;
266 }
267 
rvu_alloc_mbox_memory(struct rvu * rvu,int type,int ndevs,int mbox_size)268 static int rvu_alloc_mbox_memory(struct rvu *rvu, int type,
269 				 int ndevs, int mbox_size)
270 {
271 	struct qmem *mbox_addr;
272 	dma_addr_t iova;
273 	int pf, err;
274 
275 	/* Allocate contiguous memory for mailbox communication.
276 	 * eg: AF <=> PFx mbox memory
277 	 * This allocated memory is split into chunks of MBOX_SIZE
278 	 * and setup into each of the RVU PFs. In HW this memory will
279 	 * get aliased to an offset within BAR2 of those PFs.
280 	 *
281 	 * AF will access mbox memory using direct physical addresses
282 	 * and PFs will access the same shared memory from BAR2.
283 	 *
284 	 * PF <=> VF mbox memory also works in the same fashion.
285 	 * AFPF, PFVF requires IOVA to be used to maintain the mailbox msgs
286 	 */
287 
288 	err = qmem_alloc(rvu->dev, &mbox_addr, ndevs, mbox_size);
289 	if (err)
290 		return -ENOMEM;
291 
292 	switch (type) {
293 	case TYPE_AFPF:
294 		rvu->ng_rvu->pf_mbox_addr = mbox_addr;
295 		iova = (u64)mbox_addr->iova;
296 		for (pf = 0; pf < ndevs; pf++) {
297 			rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_AF_PFX_ADDR(pf),
298 				    (u64)iova);
299 			iova += mbox_size;
300 		}
301 		break;
302 	case TYPE_AFVF:
303 		rvu->ng_rvu->vf_mbox_addr = mbox_addr;
304 		rvupf_write64(rvu, RVU_PF_VF_MBOX_ADDR, (u64)mbox_addr->iova);
305 		break;
306 	default:
307 		return 0;
308 	}
309 
310 	return 0;
311 }
312 
313 static struct mbox_ops cn20k_mbox_ops = {
314 	.pf_intr_handler = cn20k_mbox_pf_common_intr_handler,
315 	.afvf_intr_handler = cn20k_afvf_mbox_intr_handler,
316 };
317 
cn20k_rvu_mbox_init(struct rvu * rvu,int type,int ndevs)318 int cn20k_rvu_mbox_init(struct rvu *rvu, int type, int ndevs)
319 {
320 	int dev;
321 
322 	if (!is_cn20k(rvu->pdev))
323 		return 0;
324 
325 	rvu->ng_rvu->rvu_mbox_ops = &cn20k_mbox_ops;
326 
327 	if (type == TYPE_AFVF) {
328 		rvu_write64(rvu, BLKADDR_RVUM, RVU_MBOX_PF_VF_CFG, ilog2(MBOX_SIZE));
329 	} else {
330 		for (dev = 0; dev < ndevs; dev++)
331 			rvu_write64(rvu, BLKADDR_RVUM,
332 				    RVU_MBOX_AF_PFX_CFG(dev), ilog2(MBOX_SIZE));
333 	}
334 
335 	return rvu_alloc_mbox_memory(rvu, type, ndevs, MBOX_SIZE);
336 }
337 
cn20k_free_mbox_memory(struct rvu * rvu)338 void cn20k_free_mbox_memory(struct rvu *rvu)
339 {
340 	if (!is_cn20k(rvu->pdev))
341 		return;
342 
343 	qmem_free(rvu->dev, rvu->ng_rvu->pf_mbox_addr);
344 	qmem_free(rvu->dev, rvu->ng_rvu->vf_mbox_addr);
345 }
346 
cn20k_rvu_disable_afvf_intr(struct rvu * rvu,int vfs)347 void cn20k_rvu_disable_afvf_intr(struct rvu *rvu, int vfs)
348 {
349 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(0), INTR_MASK(vfs));
350 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(0), INTR_MASK(vfs));
351 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(0), INTR_MASK(vfs));
352 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(0), INTR_MASK(vfs));
353 
354 	if (vfs <= 64)
355 		return;
356 
357 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
358 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
359 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
360 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1CX(1), INTR_MASK(vfs - 64));
361 }
362 
cn20k_rvu_enable_afvf_intr(struct rvu * rvu,int vfs)363 void cn20k_rvu_enable_afvf_intr(struct rvu *rvu, int vfs)
364 {
365 	/* Clear any pending interrupts and enable AF VF interrupts for
366 	 * the first 64 VFs.
367 	 */
368 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(0), INTR_MASK(vfs));
369 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(0), INTR_MASK(vfs));
370 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(0), INTR_MASK(vfs));
371 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(0), INTR_MASK(vfs));
372 
373 	/* FLR */
374 	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(0), INTR_MASK(vfs));
375 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(0), INTR_MASK(vfs));
376 
377 	/* Same for remaining VFs, if any. */
378 	if (vfs <= 64)
379 		return;
380 
381 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INTX(1), INTR_MASK(vfs - 64));
382 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
383 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INTX(1), INTR_MASK(vfs - 64));
384 	rvupf_write64(rvu, RVU_MBOX_PF_VFPF1_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
385 
386 	rvupf_write64(rvu, RVU_PF_VFFLR_INTX(1), INTR_MASK(vfs - 64));
387 	rvupf_write64(rvu, RVU_PF_VFFLR_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
388 	rvupf_write64(rvu, RVU_PF_VFME_INT_ENA_W1SX(1), INTR_MASK(vfs - 64));
389 }
390 
rvu_alloc_cint_qint_mem(struct rvu * rvu,struct rvu_pfvf * pfvf,int blkaddr,int nixlf)391 int rvu_alloc_cint_qint_mem(struct rvu *rvu, struct rvu_pfvf *pfvf,
392 			    int blkaddr, int nixlf)
393 {
394 	int qints, hwctx_size, err;
395 	u64 cfg, ctx_cfg;
396 
397 	if (is_rvu_otx2(rvu) || is_cn20k(rvu->pdev))
398 		return 0;
399 
400 	ctx_cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST3);
401 	/* Alloc memory for CQINT's HW contexts */
402 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
403 	qints = (cfg >> 24) & 0xFFF;
404 	hwctx_size = 1UL << ((ctx_cfg >> 24) & 0xF);
405 	err = qmem_alloc(rvu->dev, &pfvf->cq_ints_ctx, qints, hwctx_size);
406 	if (err)
407 		return -ENOMEM;
408 
409 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_CINTS_BASE(nixlf),
410 		    (u64)pfvf->cq_ints_ctx->iova);
411 
412 	/* Alloc memory for QINT's HW contexts */
413 	cfg = rvu_read64(rvu, blkaddr, NIX_AF_CONST2);
414 	qints = (cfg >> 12) & 0xFFF;
415 	hwctx_size = 1UL << ((ctx_cfg >> 20) & 0xF);
416 	err = qmem_alloc(rvu->dev, &pfvf->nix_qints_ctx, qints, hwctx_size);
417 	if (err)
418 		return -ENOMEM;
419 
420 	rvu_write64(rvu, blkaddr, NIX_AF_LFX_QINTS_BASE(nixlf),
421 		    (u64)pfvf->nix_qints_ctx->iova);
422 
423 	return 0;
424 }
425