1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Ethernet driver
3  *
4  * Copyright (C) 2024 Marvell.
5  *
6  */
7 
8 #include <linux/bpf_trace.h>
9 #include <linux/stringify.h>
10 #include <net/xdp_sock_drv.h>
11 #include <net/xdp.h>
12 
13 #include "otx2_common.h"
14 #include "otx2_xsk.h"
15 
otx2_xsk_pool_alloc_buf(struct otx2_nic * pfvf,struct otx2_pool * pool,dma_addr_t * dma,int idx)16 int otx2_xsk_pool_alloc_buf(struct otx2_nic *pfvf, struct otx2_pool *pool,
17 			    dma_addr_t *dma, int idx)
18 {
19 	struct xdp_buff *xdp;
20 	int delta;
21 
22 	xdp = xsk_buff_alloc(pool->xsk_pool);
23 	if (!xdp)
24 		return -ENOMEM;
25 
26 	pool->xdp[pool->xdp_top++] = xdp;
27 	*dma = OTX2_DATA_ALIGN(xsk_buff_xdp_get_dma(xdp));
28 	/* Adjust xdp->data for unaligned addresses */
29 	delta = *dma - xsk_buff_xdp_get_dma(xdp);
30 	xdp->data += delta;
31 
32 	return 0;
33 }
34 
otx2_xsk_ctx_disable(struct otx2_nic * pfvf,u16 qidx,int aura_id)35 static int otx2_xsk_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
36 {
37 	struct nix_cn10k_aq_enq_req *cn10k_rq_aq;
38 	struct npa_aq_enq_req *aura_aq;
39 	struct npa_aq_enq_req *pool_aq;
40 	struct nix_aq_enq_req *rq_aq;
41 
42 	if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
43 		cn10k_rq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
44 		if (!cn10k_rq_aq)
45 			return -ENOMEM;
46 		cn10k_rq_aq->qidx = qidx;
47 		cn10k_rq_aq->rq.ena = 0;
48 		cn10k_rq_aq->rq_mask.ena = 1;
49 		cn10k_rq_aq->ctype = NIX_AQ_CTYPE_RQ;
50 		cn10k_rq_aq->op = NIX_AQ_INSTOP_WRITE;
51 	} else {
52 		rq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
53 		if (!rq_aq)
54 			return -ENOMEM;
55 		rq_aq->qidx = qidx;
56 		rq_aq->sq.ena = 0;
57 		rq_aq->sq_mask.ena = 1;
58 		rq_aq->ctype = NIX_AQ_CTYPE_RQ;
59 		rq_aq->op = NIX_AQ_INSTOP_WRITE;
60 	}
61 
62 	aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
63 	if (!aura_aq)
64 		goto fail;
65 
66 	aura_aq->aura_id = aura_id;
67 	aura_aq->aura.ena = 0;
68 	aura_aq->aura_mask.ena = 1;
69 	aura_aq->ctype = NPA_AQ_CTYPE_AURA;
70 	aura_aq->op = NPA_AQ_INSTOP_WRITE;
71 
72 	pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
73 	if (!pool_aq)
74 		goto fail;
75 
76 	pool_aq->aura_id = aura_id;
77 	pool_aq->pool.ena = 0;
78 	pool_aq->pool_mask.ena = 1;
79 
80 	pool_aq->ctype = NPA_AQ_CTYPE_POOL;
81 	pool_aq->op = NPA_AQ_INSTOP_WRITE;
82 
83 	return otx2_sync_mbox_msg(&pfvf->mbox);
84 
85 fail:
86 	otx2_mbox_reset(&pfvf->mbox.mbox, 0);
87 	return -ENOMEM;
88 }
89 
otx2_clean_up_rq(struct otx2_nic * pfvf,int qidx)90 static void otx2_clean_up_rq(struct otx2_nic *pfvf, int qidx)
91 {
92 	struct otx2_qset *qset = &pfvf->qset;
93 	struct otx2_cq_queue *cq;
94 	struct otx2_pool *pool;
95 	u64 iova;
96 
97 	/* If the DOWN flag is set SQs are already freed */
98 	if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
99 		return;
100 
101 	cq = &qset->cq[qidx];
102 	if (cq)
103 		otx2_cleanup_rx_cqes(pfvf, cq, qidx);
104 
105 	pool = &pfvf->qset.pool[qidx];
106 	iova = otx2_aura_allocptr(pfvf, qidx);
107 	while (iova) {
108 		iova -= OTX2_HEAD_ROOM;
109 		otx2_free_bufs(pfvf, pool, iova, pfvf->rbsize);
110 		iova = otx2_aura_allocptr(pfvf, qidx);
111 	}
112 
113 	mutex_lock(&pfvf->mbox.lock);
114 	otx2_xsk_ctx_disable(pfvf, qidx, qidx);
115 	mutex_unlock(&pfvf->mbox.lock);
116 }
117 
otx2_xsk_pool_enable(struct otx2_nic * pf,struct xsk_buff_pool * pool,u16 qidx)118 int otx2_xsk_pool_enable(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
119 {
120 	u16 rx_queues = pf->hw.rx_queues;
121 	u16 tx_queues = pf->hw.tx_queues;
122 	int err;
123 
124 	if (qidx >= rx_queues || qidx >= tx_queues)
125 		return -EINVAL;
126 
127 	err = xsk_pool_dma_map(pool, pf->dev, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
128 	if (err)
129 		return err;
130 
131 	set_bit(qidx, pf->af_xdp_zc_qidx);
132 	otx2_clean_up_rq(pf, qidx);
133 	/* Reconfigure RSS table as 'qidx' cannot be part of RSS now */
134 	otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
135 	/* Kick start the NAPI context so that receiving will start */
136 	return otx2_xsk_wakeup(pf->netdev, qidx, XDP_WAKEUP_RX);
137 }
138 
otx2_xsk_pool_disable(struct otx2_nic * pf,u16 qidx)139 int otx2_xsk_pool_disable(struct otx2_nic *pf, u16 qidx)
140 {
141 	struct net_device *netdev = pf->netdev;
142 	struct xsk_buff_pool *pool;
143 	struct otx2_snd_queue *sq;
144 
145 	pool = xsk_get_pool_from_qid(netdev, qidx);
146 	if (!pool)
147 		return -EINVAL;
148 
149 	sq = &pf->qset.sq[qidx + pf->hw.tx_queues];
150 	sq->xsk_pool = NULL;
151 	otx2_clean_up_rq(pf, qidx);
152 	clear_bit(qidx, pf->af_xdp_zc_qidx);
153 	xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
154 	/* Reconfigure RSS table as 'qidx' now need to be part of RSS now */
155 	otx2_set_rss_table(pf, DEFAULT_RSS_CONTEXT_GROUP);
156 
157 	return 0;
158 }
159 
otx2_xsk_pool_setup(struct otx2_nic * pf,struct xsk_buff_pool * pool,u16 qidx)160 int otx2_xsk_pool_setup(struct otx2_nic *pf, struct xsk_buff_pool *pool, u16 qidx)
161 {
162 	if (pool)
163 		return otx2_xsk_pool_enable(pf, pool, qidx);
164 
165 	return otx2_xsk_pool_disable(pf, qidx);
166 }
167 
otx2_xsk_wakeup(struct net_device * dev,u32 queue_id,u32 flags)168 int otx2_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags)
169 {
170 	struct otx2_nic *pf = netdev_priv(dev);
171 	struct otx2_cq_poll *cq_poll = NULL;
172 	struct otx2_qset *qset = &pf->qset;
173 
174 	if (pf->flags & OTX2_FLAG_INTF_DOWN)
175 		return -ENETDOWN;
176 
177 	if (queue_id >= pf->hw.rx_queues || queue_id >= pf->hw.tx_queues)
178 		return -EINVAL;
179 
180 	cq_poll = &qset->napi[queue_id];
181 	if (!cq_poll)
182 		return -EINVAL;
183 
184 	/* Trigger interrupt */
185 	if (!napi_if_scheduled_mark_missed(&cq_poll->napi)) {
186 		otx2_write64(pf, NIX_LF_CINTX_ENA_W1S(cq_poll->cint_idx), BIT_ULL(0));
187 		otx2_write64(pf, NIX_LF_CINTX_INT_W1S(cq_poll->cint_idx), BIT_ULL(0));
188 	}
189 
190 	return 0;
191 }
192 
otx2_attach_xsk_buff(struct otx2_nic * pfvf,struct otx2_snd_queue * sq,int qidx)193 void otx2_attach_xsk_buff(struct otx2_nic *pfvf, struct otx2_snd_queue *sq, int qidx)
194 {
195 	if (test_bit(qidx, pfvf->af_xdp_zc_qidx))
196 		sq->xsk_pool = xsk_get_pool_from_qid(pfvf->netdev, qidx);
197 }
198 
otx2_zc_napi_handler(struct otx2_nic * pfvf,struct xsk_buff_pool * pool,int queue,int budget)199 void otx2_zc_napi_handler(struct otx2_nic *pfvf, struct xsk_buff_pool *pool,
200 			  int queue, int budget)
201 {
202 	struct xdp_desc *xdp_desc = pool->tx_descs;
203 	int err, i, work_done = 0, batch;
204 
205 	budget = min(budget, otx2_read_free_sqe(pfvf, queue));
206 	batch = xsk_tx_peek_release_desc_batch(pool, budget);
207 	if (!batch)
208 		return;
209 
210 	for (i = 0; i < batch; i++) {
211 		dma_addr_t dma_addr;
212 
213 		dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc[i].addr);
214 		err = otx2_xdp_sq_append_pkt(pfvf, NULL, dma_addr, xdp_desc[i].len,
215 					     queue, OTX2_AF_XDP_FRAME);
216 		if (!err) {
217 			netdev_err(pfvf->netdev, "AF_XDP: Unable to transfer packet err%d\n", err);
218 			break;
219 		}
220 		work_done++;
221 	}
222 
223 	if (work_done)
224 		xsk_tx_release(pool);
225 }
226