1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell RVU Physical Function ethernet driver
3 *
4 * Copyright (C) 2023 Marvell.
5 *
6 */
7
8 #include <linux/netdevice.h>
9 #include <net/tso.h>
10
11 #include "cn10k.h"
12 #include "otx2_reg.h"
13 #include "otx2_common.h"
14 #include "otx2_txrx.h"
15 #include "otx2_struct.h"
16
17 #define OTX2_QOS_MAX_LEAF_NODES 16
18
otx2_qos_aura_pool_free(struct otx2_nic * pfvf,int pool_id)19 static void otx2_qos_aura_pool_free(struct otx2_nic *pfvf, int pool_id)
20 {
21 struct otx2_pool *pool;
22
23 if (!pfvf->qset.pool)
24 return;
25
26 pool = &pfvf->qset.pool[pool_id];
27 qmem_free(pfvf->dev, pool->stack);
28 qmem_free(pfvf->dev, pool->fc_addr);
29 pool->stack = NULL;
30 pool->fc_addr = NULL;
31 }
32
otx2_qos_sq_aura_pool_init(struct otx2_nic * pfvf,int qidx)33 static int otx2_qos_sq_aura_pool_init(struct otx2_nic *pfvf, int qidx)
34 {
35 struct otx2_qset *qset = &pfvf->qset;
36 int pool_id, stack_pages, num_sqbs;
37 struct otx2_hw *hw = &pfvf->hw;
38 struct otx2_snd_queue *sq;
39 struct otx2_pool *pool;
40 dma_addr_t bufptr;
41 int err, ptr;
42 u64 iova, pa;
43
44 /* Calculate number of SQBs needed.
45 *
46 * For a 128byte SQE, and 4K size SQB, 31 SQEs will fit in one SQB.
47 * Last SQE is used for pointing to next SQB.
48 */
49 num_sqbs = (hw->sqb_size / 128) - 1;
50 num_sqbs = (qset->sqe_cnt + num_sqbs) / num_sqbs;
51
52 /* Get no of stack pages needed */
53 stack_pages =
54 (num_sqbs + hw->stack_pg_ptrs - 1) / hw->stack_pg_ptrs;
55
56 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, qidx);
57 pool = &pfvf->qset.pool[pool_id];
58
59 /* Initialize aura context */
60 err = otx2_aura_init(pfvf, pool_id, pool_id, num_sqbs);
61 if (err)
62 return err;
63
64 /* Initialize pool context */
65 err = otx2_pool_init(pfvf, pool_id, stack_pages,
66 num_sqbs, hw->sqb_size, AURA_NIX_SQ);
67 if (err)
68 goto aura_free;
69
70 /* Flush accumulated messages */
71 err = otx2_sync_mbox_msg(&pfvf->mbox);
72 if (err)
73 goto pool_free;
74
75 /* Allocate pointers and free them to aura/pool */
76 sq = &qset->sq[qidx];
77 sq->sqb_count = 0;
78 sq->sqb_ptrs = kcalloc(num_sqbs, sizeof(*sq->sqb_ptrs), GFP_KERNEL);
79 if (!sq->sqb_ptrs) {
80 err = -ENOMEM;
81 goto pool_free;
82 }
83
84 for (ptr = 0; ptr < num_sqbs; ptr++) {
85 err = otx2_alloc_rbuf(pfvf, pool, &bufptr, pool_id, ptr);
86 if (err)
87 goto sqb_free;
88 pfvf->hw_ops->aura_freeptr(pfvf, pool_id, bufptr);
89 sq->sqb_ptrs[sq->sqb_count++] = (u64)bufptr;
90 }
91
92 return 0;
93
94 sqb_free:
95 while (ptr--) {
96 if (!sq->sqb_ptrs[ptr])
97 continue;
98 iova = sq->sqb_ptrs[ptr];
99 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
100 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
101 DMA_FROM_DEVICE,
102 DMA_ATTR_SKIP_CPU_SYNC);
103 put_page(virt_to_page(phys_to_virt(pa)));
104 otx2_aura_allocptr(pfvf, pool_id);
105 }
106 sq->sqb_count = 0;
107 kfree(sq->sqb_ptrs);
108 pool_free:
109 qmem_free(pfvf->dev, pool->stack);
110 aura_free:
111 qmem_free(pfvf->dev, pool->fc_addr);
112 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
113 return err;
114 }
115
otx2_qos_sq_free_sqbs(struct otx2_nic * pfvf,int qidx)116 static void otx2_qos_sq_free_sqbs(struct otx2_nic *pfvf, int qidx)
117 {
118 struct otx2_qset *qset = &pfvf->qset;
119 struct otx2_hw *hw = &pfvf->hw;
120 struct otx2_snd_queue *sq;
121 u64 iova, pa;
122 int sqb;
123
124 sq = &qset->sq[qidx];
125 if (!sq->sqb_ptrs)
126 return;
127 for (sqb = 0; sqb < sq->sqb_count; sqb++) {
128 if (!sq->sqb_ptrs[sqb])
129 continue;
130 iova = sq->sqb_ptrs[sqb];
131 pa = otx2_iova_to_phys(pfvf->iommu_domain, iova);
132 dma_unmap_page_attrs(pfvf->dev, iova, hw->sqb_size,
133 DMA_FROM_DEVICE,
134 DMA_ATTR_SKIP_CPU_SYNC);
135 put_page(virt_to_page(phys_to_virt(pa)));
136 }
137
138 sq->sqb_count = 0;
139
140 sq = &qset->sq[qidx];
141 qmem_free(pfvf->dev, sq->sqe);
142 qmem_free(pfvf->dev, sq->tso_hdrs);
143 kfree(sq->sg);
144 kfree(sq->sqb_ptrs);
145 qmem_free(pfvf->dev, sq->timestamps);
146
147 memset((void *)sq, 0, sizeof(*sq));
148 }
149
150 /* send queue id */
otx2_qos_sqb_flush(struct otx2_nic * pfvf,int qidx)151 static void otx2_qos_sqb_flush(struct otx2_nic *pfvf, int qidx)
152 {
153 int sqe_tail, sqe_head;
154 void __iomem *ptr;
155 u64 incr, val;
156
157 ptr = otx2_get_regaddr(pfvf, NIX_LF_SQ_OP_STATUS);
158 incr = (u64)qidx << 32;
159 val = otx2_atomic64_add(incr, ptr);
160 sqe_head = (val >> 20) & 0x3F;
161 sqe_tail = (val >> 28) & 0x3F;
162 if (sqe_head != sqe_tail)
163 usleep_range(50, 60);
164 }
165
otx2_qos_ctx_disable(struct otx2_nic * pfvf,u16 qidx,int aura_id)166 static int otx2_qos_ctx_disable(struct otx2_nic *pfvf, u16 qidx, int aura_id)
167 {
168 struct nix_cn10k_aq_enq_req *cn10k_sq_aq;
169 struct npa_aq_enq_req *aura_aq;
170 struct npa_aq_enq_req *pool_aq;
171 struct nix_aq_enq_req *sq_aq;
172
173 if (test_bit(CN10K_LMTST, &pfvf->hw.cap_flag)) {
174 cn10k_sq_aq = otx2_mbox_alloc_msg_nix_cn10k_aq_enq(&pfvf->mbox);
175 if (!cn10k_sq_aq)
176 return -ENOMEM;
177 cn10k_sq_aq->qidx = qidx;
178 cn10k_sq_aq->sq.ena = 0;
179 cn10k_sq_aq->sq_mask.ena = 1;
180 cn10k_sq_aq->ctype = NIX_AQ_CTYPE_SQ;
181 cn10k_sq_aq->op = NIX_AQ_INSTOP_WRITE;
182 } else {
183 sq_aq = otx2_mbox_alloc_msg_nix_aq_enq(&pfvf->mbox);
184 if (!sq_aq)
185 return -ENOMEM;
186 sq_aq->qidx = qidx;
187 sq_aq->sq.ena = 0;
188 sq_aq->sq_mask.ena = 1;
189 sq_aq->ctype = NIX_AQ_CTYPE_SQ;
190 sq_aq->op = NIX_AQ_INSTOP_WRITE;
191 }
192
193 aura_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
194 if (!aura_aq) {
195 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
196 return -ENOMEM;
197 }
198
199 aura_aq->aura_id = aura_id;
200 aura_aq->aura.ena = 0;
201 aura_aq->aura_mask.ena = 1;
202 aura_aq->ctype = NPA_AQ_CTYPE_AURA;
203 aura_aq->op = NPA_AQ_INSTOP_WRITE;
204
205 pool_aq = otx2_mbox_alloc_msg_npa_aq_enq(&pfvf->mbox);
206 if (!pool_aq) {
207 otx2_mbox_reset(&pfvf->mbox.mbox, 0);
208 return -ENOMEM;
209 }
210
211 pool_aq->aura_id = aura_id;
212 pool_aq->pool.ena = 0;
213 pool_aq->pool_mask.ena = 1;
214
215 pool_aq->ctype = NPA_AQ_CTYPE_POOL;
216 pool_aq->op = NPA_AQ_INSTOP_WRITE;
217
218 return otx2_sync_mbox_msg(&pfvf->mbox);
219 }
220
otx2_qos_get_qid(struct otx2_nic * pfvf)221 int otx2_qos_get_qid(struct otx2_nic *pfvf)
222 {
223 int qidx;
224
225 qidx = find_first_zero_bit(pfvf->qos.qos_sq_bmap,
226 pfvf->hw.tc_tx_queues);
227
228 return qidx == pfvf->hw.tc_tx_queues ? -ENOSPC : qidx;
229 }
230
otx2_qos_free_qid(struct otx2_nic * pfvf,int qidx)231 void otx2_qos_free_qid(struct otx2_nic *pfvf, int qidx)
232 {
233 clear_bit(qidx, pfvf->qos.qos_sq_bmap);
234 }
235
otx2_qos_enable_sq(struct otx2_nic * pfvf,int qidx)236 int otx2_qos_enable_sq(struct otx2_nic *pfvf, int qidx)
237 {
238 struct otx2_hw *hw = &pfvf->hw;
239 int pool_id, sq_idx, err;
240
241 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
242 return -EPERM;
243
244 sq_idx = hw->non_qos_queues + qidx;
245
246 mutex_lock(&pfvf->mbox.lock);
247 err = otx2_qos_sq_aura_pool_init(pfvf, sq_idx);
248 if (err)
249 goto out;
250
251 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
252 err = otx2_sq_init(pfvf, sq_idx, pool_id);
253 if (err)
254 goto out;
255 out:
256 mutex_unlock(&pfvf->mbox.lock);
257 return err;
258 }
259
otx2_qos_nix_npa_ndc_sync(struct otx2_nic * pfvf)260 static int otx2_qos_nix_npa_ndc_sync(struct otx2_nic *pfvf)
261 {
262 struct ndc_sync_op *req;
263 int rc;
264
265 mutex_lock(&pfvf->mbox.lock);
266
267 req = otx2_mbox_alloc_msg_ndc_sync_op(&pfvf->mbox);
268 if (!req) {
269 mutex_unlock(&pfvf->mbox.lock);
270 return -ENOMEM;
271 }
272
273 req->nix_lf_tx_sync = true;
274 req->npa_lf_sync = true;
275 rc = otx2_sync_mbox_msg(&pfvf->mbox);
276 mutex_unlock(&pfvf->mbox.lock);
277 return rc;
278 }
279
otx2_qos_disable_sq(struct otx2_nic * pfvf,int qidx)280 void otx2_qos_disable_sq(struct otx2_nic *pfvf, int qidx)
281 {
282 struct otx2_qset *qset = &pfvf->qset;
283 struct otx2_hw *hw = &pfvf->hw;
284 struct otx2_snd_queue *sq;
285 struct otx2_cq_queue *cq;
286 int pool_id, sq_idx;
287
288 sq_idx = hw->non_qos_queues + qidx;
289
290 /* If the DOWN flag is set SQs are already freed */
291 if (pfvf->flags & OTX2_FLAG_INTF_DOWN)
292 return;
293
294 sq = &pfvf->qset.sq[sq_idx];
295 if (!sq->sqb_ptrs)
296 return;
297
298 if (sq_idx < hw->non_qos_queues ||
299 sq_idx >= otx2_get_total_tx_queues(pfvf)) {
300 netdev_err(pfvf->netdev, "Send Queue is not a QoS queue\n");
301 return;
302 }
303
304 cq = &qset->cq[pfvf->hw.rx_queues + sq_idx];
305 pool_id = otx2_get_pool_idx(pfvf, AURA_NIX_SQ, sq_idx);
306
307 otx2_qos_sqb_flush(pfvf, sq_idx);
308 otx2_smq_flush(pfvf, otx2_get_smq_idx(pfvf, sq_idx));
309 /* NIX/NPA NDC sync */
310 otx2_qos_nix_npa_ndc_sync(pfvf);
311 otx2_cleanup_tx_cqes(pfvf, cq);
312
313 mutex_lock(&pfvf->mbox.lock);
314 otx2_qos_ctx_disable(pfvf, sq_idx, pool_id);
315 mutex_unlock(&pfvf->mbox.lock);
316
317 otx2_qos_sq_free_sqbs(pfvf, sq_idx);
318 otx2_qos_aura_pool_free(pfvf, pool_id);
319 }
320