xref: /linux/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c (revision 37816488247ddddbc3de113c78c83572274b1e2e)
1 /*
2  * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3  *
4  * This software is available to you under a choice of one of two
5  * licenses.  You may choose to be licensed under the terms of the GNU
6  * General Public License (GPL) Version 2, available from the file
7  * COPYING in the main directory of this source tree, or the
8  * OpenIB.org BSD license below:
9  *
10  *     Redistribution and use in source and binary forms, with or
11  *     without modification, are permitted provided that the following
12  *     conditions are met:
13  *
14  *      - Redistributions of source code must retain the above
15  *        copyright notice, this list of conditions and the following
16  *        disclaimer.
17  *
18  *      - Redistributions in binary form must reproduce the above
19  *        copyright notice, this list of conditions and the following
20  *        disclaimer in the documentation and/or other materials
21  *        provided with the distribution.
22  *
23  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30  * SOFTWARE.
31  */
32 
33 #include <linux/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <linux/bitmap.h>
37 #include <linux/filter.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool/helpers.h>
40 #include <net/inet_ecn.h>
41 #include <net/gro.h>
42 #include <net/udp.h>
43 #include <net/tcp.h>
44 #include <net/xdp_sock_drv.h>
45 #include "en.h"
46 #include "en/txrx.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "en_rep.h"
50 #include "en/rep/tc.h"
51 #include "ipoib/ipoib.h"
52 #include "en_accel/ipsec.h"
53 #include "en_accel/macsec.h"
54 #include "en_accel/ipsec_rxtx.h"
55 #include "en_accel/ktls_txrx.h"
56 #include "en/xdp.h"
57 #include "en/xsk/rx.h"
58 #include "en/health.h"
59 #include "en/params.h"
60 #include "devlink.h"
61 #include "en/devlink.h"
62 
63 static struct sk_buff *
64 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
65 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
66 				u32 page_idx);
67 static struct sk_buff *
68 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
69 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
70 				   u32 page_idx);
71 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
72 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74 
75 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
76 	.handle_rx_cqe       = mlx5e_handle_rx_cqe,
77 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
78 	.handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
79 };
80 
mlx5e_read_cqe_slot(struct mlx5_cqwq * wq,u32 cqcc,void * data)81 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
82 				       u32 cqcc, void *data)
83 {
84 	u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
85 
86 	memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
87 }
88 
mlx5e_read_enhanced_title_slot(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)89 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
90 					   struct mlx5_cqe64 *cqe)
91 {
92 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
93 	struct mlx5_cqe64 *title = &cqd->title;
94 
95 	memcpy(title, cqe, sizeof(struct mlx5_cqe64));
96 
97 	if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
98 		return;
99 
100 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
101 		cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
102 			mpwrq_get_cqe_consumed_strides(title);
103 	else
104 		cqd->wqe_counter =
105 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
106 }
107 
mlx5e_read_title_slot(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)108 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
109 					 struct mlx5_cqwq *wq,
110 					 u32 cqcc)
111 {
112 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
113 	struct mlx5_cqe64 *title = &cqd->title;
114 
115 	mlx5e_read_cqe_slot(wq, cqcc, title);
116 	cqd->left        = be32_to_cpu(title->byte_cnt);
117 	cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
118 	rq->stats->cqe_compress_blks++;
119 }
120 
mlx5e_read_mini_arr_slot(struct mlx5_cqwq * wq,struct mlx5e_cq_decomp * cqd,u32 cqcc)121 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
122 					    struct mlx5e_cq_decomp *cqd,
123 					    u32 cqcc)
124 {
125 	mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
126 	cqd->mini_arr_idx = 0;
127 }
128 
mlx5e_cqes_update_owner(struct mlx5_cqwq * wq,int n)129 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
130 {
131 	u32 cqcc   = wq->cc;
132 	u8  op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
133 	u32 ci     = mlx5_cqwq_ctr2ix(wq, cqcc);
134 	u32 wq_sz  = mlx5_cqwq_get_size(wq);
135 	u32 ci_top = min_t(u32, wq_sz, ci + n);
136 
137 	for (; ci < ci_top; ci++, n--) {
138 		struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
139 
140 		cqe->op_own = op_own;
141 	}
142 
143 	if (unlikely(ci == wq_sz)) {
144 		op_own = !op_own;
145 		for (ci = 0; ci < n; ci++) {
146 			struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
147 
148 			cqe->op_own = op_own;
149 		}
150 	}
151 }
152 
mlx5e_decompress_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)153 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
154 					struct mlx5_cqwq *wq,
155 					u32 cqcc)
156 {
157 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
158 	struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
159 	struct mlx5_cqe64 *title = &cqd->title;
160 
161 	title->byte_cnt     = mini_cqe->byte_cnt;
162 	title->check_sum    = mini_cqe->checksum;
163 	title->op_own      &= 0xf0;
164 	title->op_own      |= 0x01 & (cqcc >> wq->fbc.log_sz);
165 
166 	/* state bit set implies linked-list striding RQ wq type and
167 	 * HW stride index capability supported
168 	 */
169 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
170 		title->wqe_counter = mini_cqe->stridx;
171 		return;
172 	}
173 
174 	/* HW stride index capability not supported */
175 	title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
176 	if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
177 		cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
178 	else
179 		cqd->wqe_counter =
180 			mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
181 }
182 
mlx5e_decompress_cqe_no_hash(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)183 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
184 						struct mlx5_cqwq *wq,
185 						u32 cqcc)
186 {
187 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
188 
189 	mlx5e_decompress_cqe(rq, wq, cqcc);
190 	cqd->title.rss_hash_type   = 0;
191 	cqd->title.rss_hash_result = 0;
192 }
193 
mlx5e_decompress_enhanced_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,struct mlx5_cqe64 * cqe,int budget_rem)194 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
195 					 struct mlx5_cqwq *wq,
196 					 struct mlx5_cqe64 *cqe,
197 					 int budget_rem)
198 {
199 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
200 	u32 cqcc, left;
201 	u32 i;
202 
203 	left = get_cqe_enhanced_num_mini_cqes(cqe);
204 	/* Here we avoid breaking the cqe compression session in the middle
205 	 * in case budget is not sufficient to handle all of it. In this case
206 	 * we return work_done == budget_rem to give 'busy' napi indication.
207 	 */
208 	if (unlikely(left > budget_rem))
209 		return budget_rem;
210 
211 	cqcc = wq->cc;
212 	cqd->mini_arr_idx = 0;
213 	memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
214 	for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
215 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
216 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
217 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
218 				rq, &cqd->title);
219 	}
220 	wq->cc = cqcc;
221 	rq->stats->cqe_compress_pkts += left;
222 
223 	return left;
224 }
225 
mlx5e_decompress_cqes_cont(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int update_owner_only,int budget_rem)226 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
227 					     struct mlx5_cqwq *wq,
228 					     int update_owner_only,
229 					     int budget_rem)
230 {
231 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
232 	u32 cqcc = wq->cc + update_owner_only;
233 	u32 cqe_count;
234 	u32 i;
235 
236 	cqe_count = min_t(u32, cqd->left, budget_rem);
237 
238 	for (i = update_owner_only; i < cqe_count;
239 	     i++, cqd->mini_arr_idx++, cqcc++) {
240 		if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
241 			mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
242 
243 		mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
244 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
245 				mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
246 				rq, &cqd->title);
247 	}
248 	mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
249 	wq->cc = cqcc;
250 	cqd->left -= cqe_count;
251 	rq->stats->cqe_compress_pkts += cqe_count;
252 
253 	return cqe_count;
254 }
255 
mlx5e_decompress_cqes_start(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int budget_rem)256 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
257 					      struct mlx5_cqwq *wq,
258 					      int budget_rem)
259 {
260 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
261 	u32 cc = wq->cc;
262 
263 	mlx5e_read_title_slot(rq, wq, cc);
264 	mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
265 	mlx5e_decompress_cqe(rq, wq, cc);
266 	INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
267 			mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
268 			rq, &cqd->title);
269 	cqd->mini_arr_idx++;
270 
271 	return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
272 }
273 
274 #define MLX5E_PAGECNT_BIAS_MAX (PAGE_SIZE / 64)
275 
mlx5e_page_alloc_fragmented(struct page_pool * pp,struct mlx5e_frag_page * frag_page)276 static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
277 				       struct mlx5e_frag_page *frag_page)
278 {
279 	netmem_ref netmem = page_pool_dev_alloc_netmems(pp);
280 
281 	if (unlikely(!netmem))
282 		return -ENOMEM;
283 
284 	page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
285 
286 	*frag_page = (struct mlx5e_frag_page) {
287 		.netmem	= netmem,
288 		.frags	= 0,
289 	};
290 
291 	return 0;
292 }
293 
mlx5e_page_release_fragmented(struct page_pool * pp,struct mlx5e_frag_page * frag_page)294 static void mlx5e_page_release_fragmented(struct page_pool *pp,
295 					  struct mlx5e_frag_page *frag_page)
296 {
297 	u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
298 	netmem_ref netmem = frag_page->netmem;
299 
300 	if (page_pool_unref_netmem(netmem, drain_count) == 0)
301 		page_pool_put_unrefed_netmem(pp, netmem, -1, true);
302 }
303 
mlx5e_get_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)304 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
305 				    struct mlx5e_wqe_frag_info *frag)
306 {
307 	int err = 0;
308 
309 	if (!frag->offset)
310 		/* On first frag (offset == 0), replenish page.
311 		 * Other frags that point to the same page (with a different
312 		 * offset) should just use the new one without replenishing again
313 		 * by themselves.
314 		 */
315 		err = mlx5e_page_alloc_fragmented(rq->page_pool,
316 						  frag->frag_page);
317 
318 	return err;
319 }
320 
mlx5e_frag_can_release(struct mlx5e_wqe_frag_info * frag)321 static bool mlx5e_frag_can_release(struct mlx5e_wqe_frag_info *frag)
322 {
323 #define CAN_RELEASE_MASK \
324 	(BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
325 
326 #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
327 
328 	return (frag->flags & CAN_RELEASE_MASK) == CAN_RELEASE_VALUE;
329 }
330 
mlx5e_put_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)331 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
332 				     struct mlx5e_wqe_frag_info *frag)
333 {
334 	if (mlx5e_frag_can_release(frag))
335 		mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
336 }
337 
get_frag(struct mlx5e_rq * rq,u16 ix)338 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
339 {
340 	return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
341 }
342 
mlx5e_alloc_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_rx_wqe_cyc * wqe,u16 ix)343 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
344 			      u16 ix)
345 {
346 	struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
347 	int err;
348 	int i;
349 
350 	for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
351 		dma_addr_t addr;
352 		u16 headroom;
353 
354 		err = mlx5e_get_rx_frag(rq, frag);
355 		if (unlikely(err))
356 			goto free_frags;
357 
358 		frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
359 
360 		headroom = i == 0 ? rq->buff.headroom : 0;
361 		addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
362 		wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
363 	}
364 
365 	return 0;
366 
367 free_frags:
368 	while (--i >= 0)
369 		mlx5e_put_rx_frag(rq, --frag);
370 
371 	return err;
372 }
373 
mlx5e_free_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi)374 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
375 				     struct mlx5e_wqe_frag_info *wi)
376 {
377 	int i;
378 
379 	for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
380 		mlx5e_put_rx_frag(rq, wi);
381 }
382 
mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info * wi)383 static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info *wi)
384 {
385 	if (!(wi->flags & BIT(MLX5E_WQE_FRAG_SKIP_RELEASE)))
386 		xsk_buff_free(*wi->xskp);
387 }
388 
mlx5e_dealloc_rx_wqe(struct mlx5e_rq * rq,u16 ix)389 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
390 {
391 	struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
392 
393 	if (rq->xsk_pool) {
394 		mlx5e_xsk_free_rx_wqe(wi);
395 	} else {
396 		mlx5e_free_rx_wqe(rq, wi);
397 
398 		/* Avoid a second release of the wqe pages: dealloc is called
399 		 * for the same missing wqes on regular RQ flush and on regular
400 		 * RQ close. This happens when XSK RQs come into play.
401 		 */
402 		for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
403 			wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
404 	}
405 }
406 
mlx5e_xsk_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)407 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
408 {
409 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
410 	int i;
411 
412 	for (i = 0; i < wqe_bulk; i++) {
413 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
414 		struct mlx5e_wqe_frag_info *wi;
415 
416 		wi = get_frag(rq, j);
417 		/* The page is always put into the Reuse Ring, because there
418 		 * is no way to return the page to the userspace when the
419 		 * interface goes down.
420 		 */
421 		mlx5e_xsk_free_rx_wqe(wi);
422 	}
423 }
424 
mlx5e_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)425 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
426 {
427 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
428 	int i;
429 
430 	for (i = 0; i < wqe_bulk; i++) {
431 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
432 		struct mlx5e_wqe_frag_info *wi;
433 
434 		wi = get_frag(rq, j);
435 		mlx5e_free_rx_wqe(rq, wi);
436 	}
437 }
438 
mlx5e_alloc_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)439 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
440 {
441 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
442 	int i;
443 
444 	for (i = 0; i < wqe_bulk; i++) {
445 		int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
446 		struct mlx5e_rx_wqe_cyc *wqe;
447 
448 		wqe = mlx5_wq_cyc_get_wqe(wq, j);
449 
450 		if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
451 			break;
452 	}
453 
454 	return i;
455 }
456 
mlx5e_refill_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)457 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
458 {
459 	int remaining = wqe_bulk;
460 	int total_alloc = 0;
461 	int refill_alloc;
462 	int refill;
463 
464 	/* The WQE bulk is split into smaller bulks that are sized
465 	 * according to the page pool cache refill size to avoid overflowing
466 	 * the page pool cache due to too many page releases at once.
467 	 */
468 	do {
469 		refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
470 
471 		mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
472 		refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
473 		if (unlikely(refill_alloc != refill))
474 			goto err_free;
475 
476 		total_alloc += refill_alloc;
477 		remaining -= refill;
478 	} while (remaining);
479 
480 	return total_alloc;
481 
482 err_free:
483 	mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
484 
485 	for (int i = 0; i < total_alloc + refill; i++) {
486 		int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
487 		struct mlx5e_wqe_frag_info *frag;
488 
489 		frag = get_frag(rq, j);
490 		for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
491 			frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
492 	}
493 
494 	return 0;
495 }
496 
497 static void
mlx5e_add_skb_shared_info_frag(struct mlx5e_rq * rq,struct skb_shared_info * sinfo,struct xdp_buff * xdp,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len)498 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
499 			       struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
500 			       u32 frag_offset, u32 len)
501 {
502 	netmem_ref netmem = frag_page->netmem;
503 	skb_frag_t *frag;
504 
505 	dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
506 
507 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
508 	if (!xdp_buff_has_frags(xdp)) {
509 		/* Init on the first fragment to avoid cold cache access
510 		 * when possible.
511 		 */
512 		sinfo->nr_frags = 0;
513 		sinfo->xdp_frags_size = 0;
514 		xdp_buff_set_frags_flag(xdp);
515 	}
516 
517 	frag = &sinfo->frags[sinfo->nr_frags++];
518 	skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
519 
520 	if (netmem_is_pfmemalloc(netmem))
521 		xdp_buff_set_frag_pfmemalloc(xdp);
522 	sinfo->xdp_frags_size += len;
523 }
524 
525 static inline void
mlx5e_add_skb_frag(struct mlx5e_rq * rq,struct sk_buff * skb,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len,unsigned int truesize)526 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
527 		   struct mlx5e_frag_page *frag_page,
528 		   u32 frag_offset, u32 len,
529 		   unsigned int truesize)
530 {
531 	dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
532 	u8 next_frag = skb_shinfo(skb)->nr_frags;
533 	netmem_ref netmem = frag_page->netmem;
534 
535 	dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
536 				rq->buff.map_dir);
537 
538 	if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
539 		skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
540 		return;
541 	}
542 
543 	frag_page->frags++;
544 	skb_add_rx_frag_netmem(skb, next_frag, netmem,
545 			       frag_offset, len, truesize);
546 }
547 
548 static inline void
mlx5e_copy_skb_header(struct mlx5e_rq * rq,struct sk_buff * skb,netmem_ref netmem,dma_addr_t addr,int offset_from,int dma_offset,u32 headlen)549 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
550 		      netmem_ref netmem, dma_addr_t addr,
551 		      int offset_from, int dma_offset, u32 headlen)
552 {
553 	const void *from = netmem_address(netmem) + offset_from;
554 	/* Aligning len to sizeof(long) optimizes memcpy performance */
555 	unsigned int len = ALIGN(headlen, sizeof(long));
556 
557 	dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
558 				rq->buff.map_dir);
559 	skb_copy_to_linear_data(skb, from, len);
560 }
561 
562 static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi)563 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
564 {
565 	bool no_xdp_xmit;
566 	int i;
567 
568 	/* A common case for AF_XDP. */
569 	if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
570 		return;
571 
572 	no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
573 
574 	if (rq->xsk_pool) {
575 		struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
576 
577 		/* The page is always put into the Reuse Ring, because there
578 		 * is no way to return the page to userspace when the interface
579 		 * goes down.
580 		 */
581 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
582 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap))
583 				xsk_buff_free(xsk_buffs[i]);
584 	} else {
585 		for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
586 			if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap)) {
587 				struct mlx5e_frag_page *frag_page;
588 
589 				frag_page = &wi->alloc_units.frag_pages[i];
590 				mlx5e_page_release_fragmented(rq->page_pool,
591 							      frag_page);
592 			}
593 		}
594 	}
595 }
596 
mlx5e_post_rx_mpwqe(struct mlx5e_rq * rq,u8 n)597 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
598 {
599 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
600 
601 	do {
602 		u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
603 
604 		mlx5_wq_ll_push(wq, next_wqe_index);
605 	} while (--n);
606 
607 	/* ensure wqes are visible to device before updating doorbell record */
608 	dma_wmb();
609 
610 	mlx5_wq_ll_update_db_record(wq);
611 }
612 
613 /* This function returns the size of the continuous free space inside a bitmap
614  * that starts from first and no longer than len including circular ones.
615  */
bitmap_find_window(unsigned long * bitmap,int len,int bitmap_size,int first)616 static int bitmap_find_window(unsigned long *bitmap, int len,
617 			      int bitmap_size, int first)
618 {
619 	int next_one, count;
620 
621 	next_one = find_next_bit(bitmap, bitmap_size, first);
622 	if (next_one == bitmap_size) {
623 		if (bitmap_size - first >= len)
624 			return len;
625 		next_one = find_next_bit(bitmap, bitmap_size, 0);
626 		count = next_one + bitmap_size - first;
627 	} else {
628 		count = next_one - first;
629 	}
630 
631 	return min(len, count);
632 }
633 
build_ksm_umr(struct mlx5e_icosq * sq,struct mlx5e_umr_wqe * umr_wqe,__be32 key,u16 offset,u16 ksm_len)634 static void build_ksm_umr(struct mlx5e_icosq *sq, struct mlx5e_umr_wqe *umr_wqe,
635 			  __be32 key, u16 offset, u16 ksm_len)
636 {
637 	memset(umr_wqe, 0, offsetof(struct mlx5e_umr_wqe, inline_ksms));
638 	umr_wqe->hdr.ctrl.opmod_idx_opcode =
639 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
640 			     MLX5_OPCODE_UMR);
641 	umr_wqe->hdr.ctrl.umr_mkey = key;
642 	umr_wqe->hdr.ctrl.qpn_ds = cpu_to_be32((sq->sqn << MLX5_WQE_CTRL_QPN_SHIFT)
643 					    | MLX5E_KSM_UMR_DS_CNT(ksm_len));
644 	umr_wqe->hdr.uctrl.flags = MLX5_UMR_TRANSLATION_OFFSET_EN | MLX5_UMR_INLINE;
645 	umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
646 	umr_wqe->hdr.uctrl.xlt_octowords = cpu_to_be16(ksm_len);
647 	umr_wqe->hdr.uctrl.mkey_mask     = cpu_to_be64(MLX5_MKEY_MASK_FREE);
648 }
649 
mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq * rq,int header_index)650 static struct mlx5e_frag_page *mlx5e_shampo_hd_to_frag_page(struct mlx5e_rq *rq, int header_index)
651 {
652 	BUILD_BUG_ON(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE > PAGE_SHIFT);
653 
654 	return &rq->mpwqe.shampo->pages[header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE];
655 }
656 
mlx5e_shampo_hd_offset(int header_index)657 static u64 mlx5e_shampo_hd_offset(int header_index)
658 {
659 	return (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) <<
660 		MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE;
661 }
662 
663 static void mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index);
664 
mlx5e_build_shampo_hd_umr(struct mlx5e_rq * rq,struct mlx5e_icosq * sq,u16 ksm_entries,u16 index)665 static int mlx5e_build_shampo_hd_umr(struct mlx5e_rq *rq,
666 				     struct mlx5e_icosq *sq,
667 				     u16 ksm_entries, u16 index)
668 {
669 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
670 	u16 pi, header_offset, err, wqe_bbs;
671 	u32 lkey = rq->mdev->mlx5e_res.hw_objs.mkey;
672 	struct mlx5e_umr_wqe *umr_wqe;
673 	int headroom, i = 0;
674 
675 	headroom = rq->buff.headroom;
676 	wqe_bbs = MLX5E_KSM_UMR_WQEBBS(ksm_entries);
677 	pi = mlx5e_icosq_get_next_pi(sq, wqe_bbs);
678 	umr_wqe = mlx5_wq_cyc_get_wqe(&sq->wq, pi);
679 	build_ksm_umr(sq, umr_wqe, shampo->mkey_be, index, ksm_entries);
680 
681 	WARN_ON_ONCE(ksm_entries & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1));
682 	while (i < ksm_entries) {
683 		struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
684 		u64 addr;
685 
686 		err = mlx5e_page_alloc_fragmented(rq->hd_page_pool, frag_page);
687 		if (unlikely(err))
688 			goto err_unmap;
689 
690 		addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
691 
692 		for (int j = 0; j < MLX5E_SHAMPO_WQ_HEADER_PER_PAGE; j++) {
693 			header_offset = mlx5e_shampo_hd_offset(index++);
694 
695 			umr_wqe->inline_ksms[i++] = (struct mlx5_ksm) {
696 				.key = cpu_to_be32(lkey),
697 				.va  = cpu_to_be64(addr + header_offset + headroom),
698 			};
699 		}
700 	}
701 
702 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
703 		.wqe_type	= MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR,
704 		.num_wqebbs	= wqe_bbs,
705 		.shampo.len	= ksm_entries,
706 	};
707 
708 	shampo->pi = (shampo->pi + ksm_entries) & (shampo->hd_per_wq - 1);
709 	sq->pc += wqe_bbs;
710 	sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
711 
712 	return 0;
713 
714 err_unmap:
715 	while (--i) {
716 		--index;
717 		header_offset = mlx5e_shampo_hd_offset(index);
718 		if (!header_offset) {
719 			struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, index);
720 
721 			mlx5e_page_release_fragmented(rq->hd_page_pool,
722 						      frag_page);
723 		}
724 	}
725 
726 	rq->stats->buff_alloc_err++;
727 	return err;
728 }
729 
mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq * rq)730 static int mlx5e_alloc_rx_hd_mpwqe(struct mlx5e_rq *rq)
731 {
732 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
733 	u16 ksm_entries, num_wqe, index, entries_before;
734 	struct mlx5e_icosq *sq = rq->icosq;
735 	int i, err, max_ksm_entries, len;
736 
737 	max_ksm_entries = ALIGN_DOWN(MLX5E_MAX_KSM_PER_WQE(rq->mdev),
738 				     MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
739 	ksm_entries = bitmap_find_window(shampo->bitmap,
740 					 shampo->hd_per_wqe,
741 					 shampo->hd_per_wq, shampo->pi);
742 	ksm_entries = ALIGN_DOWN(ksm_entries, MLX5E_SHAMPO_WQ_HEADER_PER_PAGE);
743 	if (!ksm_entries)
744 		return 0;
745 
746 	/* pi is aligned to MLX5E_SHAMPO_WQ_HEADER_PER_PAGE */
747 	index = shampo->pi;
748 	entries_before = shampo->hd_per_wq - index;
749 
750 	if (unlikely(entries_before < ksm_entries))
751 		num_wqe = DIV_ROUND_UP(entries_before, max_ksm_entries) +
752 			  DIV_ROUND_UP(ksm_entries - entries_before, max_ksm_entries);
753 	else
754 		num_wqe = DIV_ROUND_UP(ksm_entries, max_ksm_entries);
755 
756 	for (i = 0; i < num_wqe; i++) {
757 		len = (ksm_entries > max_ksm_entries) ? max_ksm_entries :
758 							ksm_entries;
759 		if (unlikely(index + len > shampo->hd_per_wq))
760 			len = shampo->hd_per_wq - index;
761 		err = mlx5e_build_shampo_hd_umr(rq, sq, len, index);
762 		if (unlikely(err))
763 			return err;
764 		index = (index + len) & (rq->mpwqe.shampo->hd_per_wq - 1);
765 		ksm_entries -= len;
766 	}
767 
768 	return 0;
769 }
770 
mlx5e_alloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)771 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
772 {
773 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
774 	struct mlx5e_icosq *sq = rq->icosq;
775 	struct mlx5e_frag_page *frag_page;
776 	struct mlx5_wq_cyc *wq = &sq->wq;
777 	struct mlx5e_umr_wqe *umr_wqe;
778 	u32 offset; /* 17-bit value with MTT. */
779 	u16 pi;
780 	int err;
781 	int i;
782 
783 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state)) {
784 		err = mlx5e_alloc_rx_hd_mpwqe(rq);
785 		if (unlikely(err))
786 			goto err;
787 	}
788 
789 	pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
790 	umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
791 	memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
792 
793 	frag_page = &wi->alloc_units.frag_pages[0];
794 
795 	for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
796 		dma_addr_t addr;
797 
798 		err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
799 		if (unlikely(err))
800 			goto err_unmap;
801 
802 		addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
803 		umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
804 			.ptag = cpu_to_be64(addr | MLX5_EN_WR),
805 		};
806 	}
807 
808 	/* Pad if needed, in case the value set to ucseg->xlt_octowords
809 	 * in mlx5e_build_umr_wqe() needed alignment.
810 	 */
811 	if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
812 		int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
813 			rq->mpwqe.pages_per_wqe;
814 
815 		memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
816 		       sizeof(*umr_wqe->inline_mtts) * pad);
817 	}
818 
819 	bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
820 	wi->consumed_strides = 0;
821 
822 	umr_wqe->hdr.ctrl.opmod_idx_opcode =
823 		cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
824 			    MLX5_OPCODE_UMR);
825 
826 	offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
827 	umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
828 
829 	sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
830 		.wqe_type   = MLX5E_ICOSQ_WQE_UMR_RX,
831 		.num_wqebbs = rq->mpwqe.umr_wqebbs,
832 		.umr.rq     = rq,
833 	};
834 
835 	sq->pc += rq->mpwqe.umr_wqebbs;
836 
837 	sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
838 
839 	return 0;
840 
841 err_unmap:
842 	while (--i >= 0) {
843 		frag_page--;
844 		mlx5e_page_release_fragmented(rq->page_pool, frag_page);
845 	}
846 
847 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
848 
849 err:
850 	rq->stats->buff_alloc_err++;
851 
852 	return err;
853 }
854 
855 static void
mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq * rq,u16 header_index)856 mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index)
857 {
858 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
859 
860 	if (((header_index + 1) & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) == 0) {
861 		struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
862 
863 		mlx5e_page_release_fragmented(rq->hd_page_pool, frag_page);
864 	}
865 	clear_bit(header_index, shampo->bitmap);
866 }
867 
mlx5e_shampo_dealloc_hd(struct mlx5e_rq * rq)868 void mlx5e_shampo_dealloc_hd(struct mlx5e_rq *rq)
869 {
870 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
871 	int i;
872 
873 	for_each_set_bit(i, shampo->bitmap, rq->mpwqe.shampo->hd_per_wq)
874 		mlx5e_free_rx_shampo_hd_entry(rq, i);
875 }
876 
mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)877 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
878 {
879 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
880 	/* This function is called on rq/netdev close. */
881 	mlx5e_free_rx_mpwqe(rq, wi);
882 
883 	/* Avoid a second release of the wqe pages: dealloc is called also
884 	 * for missing wqes on an already flushed RQ.
885 	 */
886 	bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
887 }
888 
mlx5e_post_rx_wqes(struct mlx5e_rq * rq)889 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
890 {
891 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
892 	int wqe_bulk, count;
893 	bool busy = false;
894 	u16 head;
895 
896 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
897 		return false;
898 
899 	if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
900 		return false;
901 
902 	if (rq->page_pool)
903 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
904 
905 	wqe_bulk = mlx5_wq_cyc_missing(wq);
906 	head = mlx5_wq_cyc_get_head(wq);
907 
908 	/* Don't allow any newly allocated WQEs to share the same page with old
909 	 * WQEs that aren't completed yet. Stop earlier.
910 	 */
911 	wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
912 
913 	if (!rq->xsk_pool) {
914 		count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
915 	} else if (likely(!dma_dev_need_sync(rq->pdev))) {
916 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
917 		count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
918 	} else {
919 		mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
920 		/* If dma_need_sync is true, it's more efficient to call
921 		 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
922 		 * because the latter does the same check and returns only one
923 		 * frame.
924 		 */
925 		count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
926 	}
927 
928 	mlx5_wq_cyc_push_n(wq, count);
929 	if (unlikely(count != wqe_bulk)) {
930 		rq->stats->buff_alloc_err++;
931 		busy = true;
932 	}
933 
934 	/* ensure wqes are visible to device before updating doorbell record */
935 	dma_wmb();
936 
937 	mlx5_wq_cyc_update_db_record(wq);
938 
939 	return busy;
940 }
941 
mlx5e_free_icosq_descs(struct mlx5e_icosq * sq)942 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
943 {
944 	u16 sqcc;
945 
946 	sqcc = sq->cc;
947 
948 	while (sqcc != sq->pc) {
949 		struct mlx5e_icosq_wqe_info *wi;
950 		u16 ci;
951 
952 		ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
953 		wi = &sq->db.wqe_info[ci];
954 		sqcc += wi->num_wqebbs;
955 #ifdef CONFIG_MLX5_EN_TLS
956 		switch (wi->wqe_type) {
957 		case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
958 			mlx5e_ktls_handle_ctx_completion(wi);
959 			break;
960 		case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
961 			mlx5e_ktls_handle_get_psv_completion(wi, sq);
962 			break;
963 		}
964 #endif
965 	}
966 	sq->cc = sqcc;
967 }
968 
mlx5e_shampo_fill_umr(struct mlx5e_rq * rq,int len)969 void mlx5e_shampo_fill_umr(struct mlx5e_rq *rq, int len)
970 {
971 	struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
972 	int end, from, full_len = len;
973 
974 	end = shampo->hd_per_wq;
975 	from = shampo->ci;
976 	if (from + len > end) {
977 		len -= end - from;
978 		bitmap_set(shampo->bitmap, from, end - from);
979 		from = 0;
980 	}
981 
982 	bitmap_set(shampo->bitmap, from, len);
983 	shampo->ci = (shampo->ci + full_len) & (shampo->hd_per_wq - 1);
984 }
985 
mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,struct mlx5e_icosq * sq)986 static void mlx5e_handle_shampo_hd_umr(struct mlx5e_shampo_umr umr,
987 				       struct mlx5e_icosq *sq)
988 {
989 	struct mlx5e_channel *c = container_of(sq, struct mlx5e_channel, icosq);
990 	/* assume 1:1 relationship between RQ and icosq */
991 	struct mlx5e_rq *rq = &c->rq;
992 
993 	mlx5e_shampo_fill_umr(rq, umr.len);
994 }
995 
mlx5e_poll_ico_cq(struct mlx5e_cq * cq)996 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
997 {
998 	struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
999 	struct mlx5_cqe64 *cqe;
1000 	u16 sqcc;
1001 	int i;
1002 
1003 	if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
1004 		return 0;
1005 
1006 	cqe = mlx5_cqwq_get_cqe(&cq->wq);
1007 	if (likely(!cqe))
1008 		return 0;
1009 
1010 	/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
1011 	 * otherwise a cq overrun may occur
1012 	 */
1013 	sqcc = sq->cc;
1014 
1015 	i = 0;
1016 	do {
1017 		u16 wqe_counter;
1018 		bool last_wqe;
1019 
1020 		mlx5_cqwq_pop(&cq->wq);
1021 
1022 		wqe_counter = be16_to_cpu(cqe->wqe_counter);
1023 
1024 		do {
1025 			struct mlx5e_icosq_wqe_info *wi;
1026 			u16 ci;
1027 
1028 			last_wqe = (sqcc == wqe_counter);
1029 
1030 			ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
1031 			wi = &sq->db.wqe_info[ci];
1032 			sqcc += wi->num_wqebbs;
1033 
1034 			if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
1035 				netdev_WARN_ONCE(cq->netdev,
1036 						 "Bad OP in ICOSQ CQE: 0x%x\n",
1037 						 get_cqe_opcode(cqe));
1038 				mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
1039 						     (struct mlx5_err_cqe *)cqe);
1040 				mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
1041 				if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
1042 					queue_work(cq->workqueue, &sq->recover_work);
1043 				break;
1044 			}
1045 
1046 			switch (wi->wqe_type) {
1047 			case MLX5E_ICOSQ_WQE_UMR_RX:
1048 				wi->umr.rq->mpwqe.umr_completed++;
1049 				break;
1050 			case MLX5E_ICOSQ_WQE_NOP:
1051 				break;
1052 			case MLX5E_ICOSQ_WQE_SHAMPO_HD_UMR:
1053 				mlx5e_handle_shampo_hd_umr(wi->shampo, sq);
1054 				break;
1055 #ifdef CONFIG_MLX5_EN_TLS
1056 			case MLX5E_ICOSQ_WQE_UMR_TLS:
1057 				break;
1058 			case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
1059 				mlx5e_ktls_handle_ctx_completion(wi);
1060 				break;
1061 			case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
1062 				mlx5e_ktls_handle_get_psv_completion(wi, sq);
1063 				break;
1064 #endif
1065 			default:
1066 				netdev_WARN_ONCE(cq->netdev,
1067 						 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
1068 						 wi->wqe_type);
1069 			}
1070 		} while (!last_wqe);
1071 	} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
1072 
1073 	sq->cc = sqcc;
1074 
1075 	mlx5_cqwq_update_db_record(&cq->wq);
1076 
1077 	return i;
1078 }
1079 
mlx5e_post_rx_mpwqes(struct mlx5e_rq * rq)1080 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
1081 {
1082 	struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
1083 	u8  umr_completed = rq->mpwqe.umr_completed;
1084 	struct mlx5e_icosq *sq = rq->icosq;
1085 	int alloc_err = 0;
1086 	u8  missing, i;
1087 	u16 head;
1088 
1089 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
1090 		return false;
1091 
1092 	if (umr_completed) {
1093 		mlx5e_post_rx_mpwqe(rq, umr_completed);
1094 		rq->mpwqe.umr_in_progress -= umr_completed;
1095 		rq->mpwqe.umr_completed = 0;
1096 	}
1097 
1098 	missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
1099 
1100 	if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
1101 		rq->stats->congst_umr++;
1102 
1103 	if (likely(missing < rq->mpwqe.min_wqe_bulk))
1104 		return false;
1105 
1106 	if (rq->page_pool)
1107 		page_pool_nid_changed(rq->page_pool, numa_mem_id());
1108 	if (rq->hd_page_pool)
1109 		page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
1110 
1111 	head = rq->mpwqe.actual_wq_head;
1112 	i = missing;
1113 	do {
1114 		struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
1115 
1116 		/* Deferred free for better page pool cache usage. */
1117 		mlx5e_free_rx_mpwqe(rq, wi);
1118 
1119 		alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
1120 					   mlx5e_alloc_rx_mpwqe(rq, head);
1121 
1122 		if (unlikely(alloc_err))
1123 			break;
1124 		head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
1125 	} while (--i);
1126 
1127 	rq->mpwqe.umr_last_bulk    = missing - i;
1128 	if (sq->doorbell_cseg) {
1129 		mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
1130 		sq->doorbell_cseg = NULL;
1131 	}
1132 
1133 	rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
1134 	rq->mpwqe.actual_wq_head   = head;
1135 
1136 	/* If XSK Fill Ring doesn't have enough frames, report the error, so
1137 	 * that one of the actions can be performed:
1138 	 * 1. If need_wakeup is used, signal that the application has to kick
1139 	 * the driver when it refills the Fill Ring.
1140 	 * 2. Otherwise, busy poll by rescheduling the NAPI poll.
1141 	 */
1142 	if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
1143 		return true;
1144 
1145 	return false;
1146 }
1147 
mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 * cqe,struct tcphdr * tcp)1148 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
1149 {
1150 	u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
1151 	u8 tcp_ack     = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
1152 			 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
1153 
1154 	tcp->check                      = 0;
1155 	tcp->psh                        = get_cqe_lro_tcppsh(cqe);
1156 
1157 	if (tcp_ack) {
1158 		tcp->ack                = 1;
1159 		tcp->ack_seq            = cqe->lro.ack_seq_num;
1160 		tcp->window             = cqe->lro.tcp_win;
1161 	}
1162 }
1163 
mlx5e_lro_update_hdr(struct sk_buff * skb,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1164 static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
1165 					 struct mlx5_cqe64 *cqe,
1166 					 u32 cqe_bcnt)
1167 {
1168 	struct ethhdr	*eth = (struct ethhdr *)(skb->data);
1169 	struct tcphdr	*tcp;
1170 	int network_depth = 0;
1171 	__wsum check;
1172 	__be16 proto;
1173 	u16 tot_len;
1174 	void *ip_p;
1175 
1176 	proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1177 
1178 	tot_len = cqe_bcnt - network_depth;
1179 	ip_p = skb->data + network_depth;
1180 
1181 	if (proto == htons(ETH_P_IP)) {
1182 		struct iphdr *ipv4 = ip_p;
1183 
1184 		tcp = ip_p + sizeof(struct iphdr);
1185 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1186 
1187 		ipv4->ttl               = cqe->lro.min_ttl;
1188 		ipv4->tot_len           = cpu_to_be16(tot_len);
1189 		ipv4->check             = 0;
1190 		ipv4->check             = ip_fast_csum((unsigned char *)ipv4,
1191 						       ipv4->ihl);
1192 
1193 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1194 		check = csum_partial(tcp, tcp->doff * 4,
1195 				     csum_unfold((__force __sum16)cqe->check_sum));
1196 		/* Almost done, don't forget the pseudo header */
1197 		tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
1198 					  ipv4->saddr, ipv4->daddr, check);
1199 	} else {
1200 		u16 payload_len = tot_len - sizeof(struct ipv6hdr);
1201 		struct ipv6hdr *ipv6 = ip_p;
1202 
1203 		tcp = ip_p + sizeof(struct ipv6hdr);
1204 		skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1205 
1206 		ipv6->hop_limit         = cqe->lro.min_ttl;
1207 		ipv6->payload_len       = cpu_to_be16(payload_len);
1208 
1209 		mlx5e_lro_update_tcp_hdr(cqe, tcp);
1210 		check = csum_partial(tcp, tcp->doff * 4,
1211 				     csum_unfold((__force __sum16)cqe->check_sum));
1212 		/* Almost done, don't forget the pseudo header */
1213 		tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
1214 					  &ipv6->daddr, check);
1215 	}
1216 
1217 	return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
1218 }
1219 
mlx5e_shampo_get_packet_hd(struct mlx5e_rq * rq,u16 header_index)1220 static void *mlx5e_shampo_get_packet_hd(struct mlx5e_rq *rq, u16 header_index)
1221 {
1222 	struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
1223 	u16 head_offset = mlx5e_shampo_hd_offset(header_index) + rq->buff.headroom;
1224 
1225 	return netmem_address(frag_page->netmem) + head_offset;
1226 }
1227 
mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4)1228 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
1229 {
1230 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1231 	struct sk_buff *skb = rq->hw_gro_data->skb;
1232 	struct udphdr *uh;
1233 
1234 	uh = (struct udphdr *)(skb->data + udp_off);
1235 	uh->len = htons(skb->len - udp_off);
1236 
1237 	if (uh->check)
1238 		uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
1239 					  ipv4->daddr, 0);
1240 
1241 	skb->csum_start = (unsigned char *)uh - skb->head;
1242 	skb->csum_offset = offsetof(struct udphdr, check);
1243 
1244 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1245 }
1246 
mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6)1247 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
1248 {
1249 	int udp_off = rq->hw_gro_data->fk.control.thoff;
1250 	struct sk_buff *skb = rq->hw_gro_data->skb;
1251 	struct udphdr *uh;
1252 
1253 	uh = (struct udphdr *)(skb->data + udp_off);
1254 	uh->len = htons(skb->len - udp_off);
1255 
1256 	if (uh->check)
1257 		uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
1258 					  &ipv6->daddr, 0);
1259 
1260 	skb->csum_start = (unsigned char *)uh - skb->head;
1261 	skb->csum_offset = offsetof(struct udphdr, check);
1262 
1263 	skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1264 }
1265 
mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,struct tcphdr * skb_tcp_hd)1266 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1267 					      struct tcphdr *skb_tcp_hd)
1268 {
1269 	u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
1270 	struct tcphdr *last_tcp_hd;
1271 	void *last_hd_addr;
1272 
1273 	last_hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
1274 	last_tcp_hd =  last_hd_addr + ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
1275 	tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
1276 }
1277 
mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4,struct mlx5_cqe64 * cqe,bool match)1278 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
1279 					     struct mlx5_cqe64 *cqe, bool match)
1280 {
1281 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1282 	struct sk_buff *skb = rq->hw_gro_data->skb;
1283 	struct tcphdr *tcp;
1284 
1285 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1286 	if (match)
1287 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1288 
1289 	tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
1290 				   ipv4->daddr, 0);
1291 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
1292 	if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id)
1293 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID;
1294 
1295 	skb->csum_start = (unsigned char *)tcp - skb->head;
1296 	skb->csum_offset = offsetof(struct tcphdr, check);
1297 
1298 	if (tcp->cwr)
1299 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1300 }
1301 
mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6,struct mlx5_cqe64 * cqe,bool match)1302 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
1303 					     struct mlx5_cqe64 *cqe, bool match)
1304 {
1305 	int tcp_off = rq->hw_gro_data->fk.control.thoff;
1306 	struct sk_buff *skb = rq->hw_gro_data->skb;
1307 	struct tcphdr *tcp;
1308 
1309 	tcp = (struct tcphdr *)(skb->data + tcp_off);
1310 	if (match)
1311 		mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1312 
1313 	tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
1314 				   &ipv6->daddr, 0);
1315 	skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1316 	skb->csum_start = (unsigned char *)tcp - skb->head;
1317 	skb->csum_offset = offsetof(struct tcphdr, check);
1318 
1319 	if (tcp->cwr)
1320 		skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1321 }
1322 
mlx5e_shampo_update_hdr(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)1323 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
1324 {
1325 	bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
1326 	struct sk_buff *skb = rq->hw_gro_data->skb;
1327 
1328 	skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
1329 	skb->ip_summed = CHECKSUM_PARTIAL;
1330 
1331 	if (is_ipv4) {
1332 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
1333 		struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
1334 		__be16 newlen = htons(skb->len - nhoff);
1335 
1336 		csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
1337 		ipv4->tot_len = newlen;
1338 
1339 		if (ipv4->protocol == IPPROTO_TCP)
1340 			mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
1341 		else
1342 			mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
1343 	} else {
1344 		int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
1345 		struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
1346 
1347 		ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
1348 
1349 		if (ipv6->nexthdr == IPPROTO_TCP)
1350 			mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
1351 		else
1352 			mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
1353 	}
1354 }
1355 
mlx5e_skb_set_hash(struct mlx5_cqe64 * cqe,struct sk_buff * skb)1356 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
1357 				      struct sk_buff *skb)
1358 {
1359 	u8 cht = cqe->rss_hash_type;
1360 	int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
1361 		 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
1362 					    PKT_HASH_TYPE_NONE;
1363 	skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
1364 }
1365 
is_last_ethertype_ip(struct sk_buff * skb,int * network_depth,__be16 * proto)1366 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
1367 					__be16 *proto)
1368 {
1369 	*proto = ((struct ethhdr *)skb->data)->h_proto;
1370 	*proto = __vlan_get_protocol(skb, *proto, network_depth);
1371 
1372 	if (*proto == htons(ETH_P_IP))
1373 		return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
1374 
1375 	if (*proto == htons(ETH_P_IPV6))
1376 		return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
1377 
1378 	return false;
1379 }
1380 
mlx5e_enable_ecn(struct mlx5e_rq * rq,struct sk_buff * skb)1381 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
1382 {
1383 	int network_depth = 0;
1384 	__be16 proto;
1385 	void *ip;
1386 	int rc;
1387 
1388 	if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
1389 		return;
1390 
1391 	ip = skb->data + network_depth;
1392 	rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
1393 					 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
1394 
1395 	rq->stats->ecn_mark += !!rc;
1396 }
1397 
get_ip_proto(struct sk_buff * skb,int network_depth,__be16 proto)1398 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
1399 {
1400 	void *ip_p = skb->data + network_depth;
1401 
1402 	return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
1403 					    ((struct ipv6hdr *)ip_p)->nexthdr;
1404 }
1405 
1406 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1407 
1408 #define MAX_PADDING 8
1409 
1410 static void
tail_padding_csum_slow(struct sk_buff * skb,int offset,int len,struct mlx5e_rq_stats * stats)1411 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
1412 		       struct mlx5e_rq_stats *stats)
1413 {
1414 	stats->csum_complete_tail_slow++;
1415 	skb->csum = csum_block_add(skb->csum,
1416 				   skb_checksum(skb, offset, len, 0),
1417 				   offset);
1418 }
1419 
1420 static void
tail_padding_csum(struct sk_buff * skb,int offset,struct mlx5e_rq_stats * stats)1421 tail_padding_csum(struct sk_buff *skb, int offset,
1422 		  struct mlx5e_rq_stats *stats)
1423 {
1424 	u8 tail_padding[MAX_PADDING];
1425 	int len = skb->len - offset;
1426 	void *tail;
1427 
1428 	if (unlikely(len > MAX_PADDING)) {
1429 		tail_padding_csum_slow(skb, offset, len, stats);
1430 		return;
1431 	}
1432 
1433 	tail = skb_header_pointer(skb, offset, len, tail_padding);
1434 	if (unlikely(!tail)) {
1435 		tail_padding_csum_slow(skb, offset, len, stats);
1436 		return;
1437 	}
1438 
1439 	stats->csum_complete_tail++;
1440 	skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
1441 }
1442 
1443 static void
mlx5e_skb_csum_fixup(struct sk_buff * skb,int network_depth,__be16 proto,struct mlx5e_rq_stats * stats)1444 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
1445 		     struct mlx5e_rq_stats *stats)
1446 {
1447 	struct ipv6hdr *ip6;
1448 	struct iphdr   *ip4;
1449 	int pkt_len;
1450 
1451 	/* Fixup vlan headers, if any */
1452 	if (network_depth > ETH_HLEN)
1453 		/* CQE csum is calculated from the IP header and does
1454 		 * not cover VLAN headers (if present). This will add
1455 		 * the checksum manually.
1456 		 */
1457 		skb->csum = csum_partial(skb->data + ETH_HLEN,
1458 					 network_depth - ETH_HLEN,
1459 					 skb->csum);
1460 
1461 	/* Fixup tail padding, if any */
1462 	switch (proto) {
1463 	case htons(ETH_P_IP):
1464 		ip4 = (struct iphdr *)(skb->data + network_depth);
1465 		pkt_len = network_depth + ntohs(ip4->tot_len);
1466 		break;
1467 	case htons(ETH_P_IPV6):
1468 		ip6 = (struct ipv6hdr *)(skb->data + network_depth);
1469 		pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
1470 		break;
1471 	default:
1472 		return;
1473 	}
1474 
1475 	if (likely(pkt_len >= skb->len))
1476 		return;
1477 
1478 	tail_padding_csum(skb, pkt_len, stats);
1479 }
1480 
mlx5e_handle_csum(struct net_device * netdev,struct mlx5_cqe64 * cqe,struct mlx5e_rq * rq,struct sk_buff * skb,bool lro)1481 static inline void mlx5e_handle_csum(struct net_device *netdev,
1482 				     struct mlx5_cqe64 *cqe,
1483 				     struct mlx5e_rq *rq,
1484 				     struct sk_buff *skb,
1485 				     bool   lro)
1486 {
1487 	struct mlx5e_rq_stats *stats = rq->stats;
1488 	int network_depth = 0;
1489 	__be16 proto;
1490 
1491 	if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
1492 		goto csum_none;
1493 
1494 	if (lro) {
1495 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1496 		stats->csum_unnecessary++;
1497 		return;
1498 	}
1499 
1500 	/* True when explicitly set via priv flag, or XDP prog is loaded */
1501 	if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
1502 	    get_cqe_tls_offload(cqe))
1503 		goto csum_unnecessary;
1504 
1505 	/* CQE csum doesn't cover padding octets in short ethernet
1506 	 * frames. And the pad field is appended prior to calculating
1507 	 * and appending the FCS field.
1508 	 *
1509 	 * Detecting these padded frames requires to verify and parse
1510 	 * IP headers, so we simply force all those small frames to be
1511 	 * CHECKSUM_UNNECESSARY even if they are not padded.
1512 	 */
1513 	if (short_frame(skb->len))
1514 		goto csum_unnecessary;
1515 
1516 	if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1517 		if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1518 			goto csum_unnecessary;
1519 
1520 		stats->csum_complete++;
1521 		skb->ip_summed = CHECKSUM_COMPLETE;
1522 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1523 
1524 		if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1525 			return; /* CQE csum covers all received bytes */
1526 
1527 		/* csum might need some fixups ...*/
1528 		mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1529 		return;
1530 	}
1531 
1532 csum_unnecessary:
1533 	if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1534 		   (cqe->hds_ip_ext & CQE_L4_OK))) {
1535 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1536 		if (cqe_is_tunneled(cqe)) {
1537 			skb->csum_level = 1;
1538 			skb->encapsulation = 1;
1539 			stats->csum_unnecessary_inner++;
1540 			return;
1541 		}
1542 		stats->csum_unnecessary++;
1543 		return;
1544 	}
1545 csum_none:
1546 	skb->ip_summed = CHECKSUM_NONE;
1547 	stats->csum_none++;
1548 }
1549 
1550 #define MLX5E_CE_BIT_MASK 0x80
1551 
mlx5e_build_rx_skb(struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct mlx5e_rq * rq,struct sk_buff * skb)1552 static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1553 				      u32 cqe_bcnt,
1554 				      struct mlx5e_rq *rq,
1555 				      struct sk_buff *skb)
1556 {
1557 	u8 lro_num_seg = be32_to_cpu(cqe->srqn) >> 24;
1558 	struct mlx5e_rq_stats *stats = rq->stats;
1559 	struct net_device *netdev = rq->netdev;
1560 
1561 	skb->mac_len = ETH_HLEN;
1562 
1563 	if (unlikely(get_cqe_tls_offload(cqe)))
1564 		mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1565 
1566 	if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1567 		mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
1568 						  be32_to_cpu(cqe->ft_metadata));
1569 
1570 	if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
1571 		mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
1572 
1573 	if (lro_num_seg > 1) {
1574 		unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1575 
1576 		skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
1577 		skb_shinfo(skb)->gso_segs = lro_num_seg;
1578 		/* Subtract one since we already counted this as one
1579 		 * "regular" packet in mlx5e_complete_rx_cqe()
1580 		 */
1581 		stats->packets += lro_num_seg - 1;
1582 		stats->lro_packets++;
1583 		stats->lro_bytes += cqe_bcnt;
1584 	}
1585 
1586 	if (unlikely(mlx5e_rx_hw_stamp(rq->tstamp)))
1587 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1588 								  rq->clock, get_cqe_ts(cqe));
1589 	skb_record_rx_queue(skb, rq->ix);
1590 
1591 	if (likely(netdev->features & NETIF_F_RXHASH))
1592 		mlx5e_skb_set_hash(cqe, skb);
1593 
1594 	if (cqe_has_vlan(cqe)) {
1595 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1596 				       be16_to_cpu(cqe->vlan_info));
1597 		stats->removed_vlan_packets++;
1598 	}
1599 
1600 	skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1601 
1602 	mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1603 	/* checking CE bit in cqe - MSB in ml_path field */
1604 	if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1605 		mlx5e_enable_ecn(rq, skb);
1606 
1607 	skb->protocol = eth_type_trans(skb, netdev);
1608 
1609 	if (unlikely(mlx5e_skb_is_multicast(skb)))
1610 		stats->mcast_packets++;
1611 }
1612 
mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1613 static void mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
1614 					 struct mlx5_cqe64 *cqe,
1615 					 u32 cqe_bcnt,
1616 					 struct sk_buff *skb)
1617 {
1618 	struct mlx5e_rq_stats *stats = rq->stats;
1619 
1620 	stats->packets++;
1621 	stats->bytes += cqe_bcnt;
1622 	if (NAPI_GRO_CB(skb)->count != 1)
1623 		return;
1624 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1625 	skb_reset_network_header(skb);
1626 	if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
1627 		napi_gro_receive(rq->cq.napi, skb);
1628 		rq->hw_gro_data->skb = NULL;
1629 	}
1630 }
1631 
mlx5e_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1632 static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1633 					 struct mlx5_cqe64 *cqe,
1634 					 u32 cqe_bcnt,
1635 					 struct sk_buff *skb)
1636 {
1637 	struct mlx5e_rq_stats *stats = rq->stats;
1638 
1639 	stats->packets++;
1640 	stats->bytes += cqe_bcnt;
1641 	mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1642 }
1643 
1644 static inline
mlx5e_build_linear_skb(struct mlx5e_rq * rq,void * va,u32 frag_size,u16 headroom,u32 cqe_bcnt,u32 metasize)1645 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1646 				       u32 frag_size, u16 headroom,
1647 				       u32 cqe_bcnt, u32 metasize)
1648 {
1649 	struct sk_buff *skb = napi_build_skb(va, frag_size);
1650 
1651 	if (unlikely(!skb)) {
1652 		rq->stats->buff_alloc_err++;
1653 		return NULL;
1654 	}
1655 
1656 	skb_reserve(skb, headroom);
1657 	skb_put(skb, cqe_bcnt);
1658 
1659 	if (metasize)
1660 		skb_metadata_set(skb, metasize);
1661 
1662 	return skb;
1663 }
1664 
mlx5e_fill_mxbuf(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,void * va,u16 headroom,u32 frame_sz,u32 len,struct mlx5e_xdp_buff * mxbuf)1665 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1666 			     void *va, u16 headroom, u32 frame_sz, u32 len,
1667 			     struct mlx5e_xdp_buff *mxbuf)
1668 {
1669 	xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
1670 	xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
1671 	mxbuf->cqe = cqe;
1672 	mxbuf->rq = rq;
1673 }
1674 
1675 static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1676 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1677 			  struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1678 {
1679 	struct mlx5e_frag_page *frag_page = wi->frag_page;
1680 	u16 rx_headroom = rq->buff.headroom;
1681 	struct bpf_prog *prog;
1682 	struct sk_buff *skb;
1683 	u32 metasize = 0;
1684 	void *va, *data;
1685 	dma_addr_t addr;
1686 	u32 frag_size;
1687 
1688 	va             = netmem_address(frag_page->netmem) + wi->offset;
1689 	data           = va + rx_headroom;
1690 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1691 
1692 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
1693 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1694 				      frag_size, rq->buff.map_dir);
1695 	net_prefetch(data);
1696 
1697 	prog = rcu_dereference(rq->xdp_prog);
1698 	if (prog) {
1699 		struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1700 
1701 		net_prefetchw(va); /* xdp_frame data area */
1702 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1703 				 cqe_bcnt, mxbuf);
1704 		if (mlx5e_xdp_handle(rq, prog, mxbuf))
1705 			return NULL; /* page/packet was consumed by XDP */
1706 
1707 		rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
1708 		metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
1709 		cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
1710 	}
1711 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1712 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
1713 	if (unlikely(!skb))
1714 		return NULL;
1715 
1716 	/* queue up for recycling/reuse */
1717 	skb_mark_for_recycle(skb);
1718 	frag_page->frags++;
1719 
1720 	return skb;
1721 }
1722 
1723 static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1724 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1725 			     struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1726 {
1727 	struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1728 	struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1729 	struct mlx5e_wqe_frag_info *head_wi = wi;
1730 	u16 rx_headroom = rq->buff.headroom;
1731 	struct mlx5e_frag_page *frag_page;
1732 	struct skb_shared_info *sinfo;
1733 	u32 frag_consumed_bytes;
1734 	struct bpf_prog *prog;
1735 	struct sk_buff *skb;
1736 	dma_addr_t addr;
1737 	u32 truesize;
1738 	void *va;
1739 
1740 	frag_page = wi->frag_page;
1741 
1742 	va = netmem_address(frag_page->netmem) + wi->offset;
1743 	frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1744 
1745 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
1746 	dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1747 				      rq->buff.frame0_sz, rq->buff.map_dir);
1748 	net_prefetchw(va); /* xdp_frame data area */
1749 	net_prefetch(va + rx_headroom);
1750 
1751 	mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1752 			 frag_consumed_bytes, mxbuf);
1753 	sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
1754 	truesize = 0;
1755 
1756 	cqe_bcnt -= frag_consumed_bytes;
1757 	frag_info++;
1758 	wi++;
1759 
1760 	while (cqe_bcnt) {
1761 		frag_page = wi->frag_page;
1762 
1763 		frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1764 
1765 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
1766 					       frag_page, wi->offset,
1767 					       frag_consumed_bytes);
1768 		truesize += frag_info->frag_stride;
1769 
1770 		cqe_bcnt -= frag_consumed_bytes;
1771 		frag_info++;
1772 		wi++;
1773 	}
1774 
1775 	prog = rcu_dereference(rq->xdp_prog);
1776 	if (prog && mlx5e_xdp_handle(rq, prog, mxbuf)) {
1777 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
1778 			struct mlx5e_wqe_frag_info *pwi;
1779 
1780 			for (pwi = head_wi; pwi < wi; pwi++)
1781 				pwi->frag_page->frags++;
1782 		}
1783 		return NULL; /* page/packet was consumed by XDP */
1784 	}
1785 
1786 	skb = mlx5e_build_linear_skb(
1787 		rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
1788 		mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
1789 		mxbuf->xdp.data_end - mxbuf->xdp.data,
1790 		mxbuf->xdp.data - mxbuf->xdp.data_meta);
1791 	if (unlikely(!skb))
1792 		return NULL;
1793 
1794 	skb_mark_for_recycle(skb);
1795 	head_wi->frag_page->frags++;
1796 
1797 	if (xdp_buff_has_frags(&mxbuf->xdp)) {
1798 		/* sinfo->nr_frags is reset by build_skb, calculate again. */
1799 		xdp_update_skb_shared_info(skb, wi - head_wi - 1,
1800 					   sinfo->xdp_frags_size, truesize,
1801 					   xdp_buff_is_frag_pfmemalloc(
1802 						&mxbuf->xdp));
1803 
1804 		for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
1805 			pwi->frag_page->frags++;
1806 	}
1807 
1808 	return skb;
1809 }
1810 
trigger_report(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1811 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1812 {
1813 	struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1814 	struct mlx5e_priv *priv = rq->priv;
1815 
1816 	if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1817 	    !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1818 		mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1819 		queue_work(priv->wq, &rq->recover_work);
1820 	}
1821 }
1822 
mlx5e_handle_rx_err_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1823 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1824 {
1825 	trigger_report(rq, cqe);
1826 	rq->stats->wqe_err++;
1827 }
1828 
mlx5e_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1829 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1830 {
1831 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1832 	struct mlx5e_wqe_frag_info *wi;
1833 	struct sk_buff *skb;
1834 	u32 cqe_bcnt;
1835 	u16 ci;
1836 
1837 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1838 	wi       = get_frag(rq, ci);
1839 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1840 
1841 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1842 		mlx5e_handle_rx_err_cqe(rq, cqe);
1843 		goto wq_cyc_pop;
1844 	}
1845 
1846 	skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
1847 			      mlx5e_skb_from_cqe_linear,
1848 			      mlx5e_skb_from_cqe_nonlinear,
1849 			      mlx5e_xsk_skb_from_cqe_linear,
1850 			      rq, wi, cqe, cqe_bcnt);
1851 	if (!skb) {
1852 		/* probably for XDP */
1853 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1854 			wi->frag_page->frags++;
1855 		goto wq_cyc_pop;
1856 	}
1857 
1858 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1859 
1860 	if (mlx5e_cqe_regb_chain(cqe))
1861 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
1862 			dev_kfree_skb_any(skb);
1863 			goto wq_cyc_pop;
1864 		}
1865 
1866 	napi_gro_receive(rq->cq.napi, skb);
1867 
1868 wq_cyc_pop:
1869 	mlx5_wq_cyc_pop(wq);
1870 }
1871 
1872 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_handle_rx_cqe_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1873 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1874 {
1875 	struct net_device *netdev = rq->netdev;
1876 	struct mlx5e_priv *priv = netdev_priv(netdev);
1877 	struct mlx5e_rep_priv *rpriv  = priv->ppriv;
1878 	struct mlx5_eswitch_rep *rep = rpriv->rep;
1879 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1880 	struct mlx5e_wqe_frag_info *wi;
1881 	struct sk_buff *skb;
1882 	u32 cqe_bcnt;
1883 	u16 ci;
1884 
1885 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1886 	wi       = get_frag(rq, ci);
1887 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1888 
1889 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1890 		mlx5e_handle_rx_err_cqe(rq, cqe);
1891 		goto wq_cyc_pop;
1892 	}
1893 
1894 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1895 			      mlx5e_skb_from_cqe_linear,
1896 			      mlx5e_skb_from_cqe_nonlinear,
1897 			      rq, wi, cqe, cqe_bcnt);
1898 	if (!skb) {
1899 		/* probably for XDP */
1900 		if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1901 			wi->frag_page->frags++;
1902 		goto wq_cyc_pop;
1903 	}
1904 
1905 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1906 
1907 	if (rep->vlan && skb_vlan_tag_present(skb))
1908 		skb_vlan_pop(skb);
1909 
1910 	mlx5e_rep_tc_receive(cqe, rq, skb);
1911 
1912 wq_cyc_pop:
1913 	mlx5_wq_cyc_pop(wq);
1914 }
1915 
mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1916 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1917 {
1918 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
1919 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
1920 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
1921 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
1922 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
1923 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
1924 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
1925 	struct mlx5e_rx_wqe_ll *wqe;
1926 	struct mlx5_wq_ll *wq;
1927 	struct sk_buff *skb;
1928 	u16 cqe_bcnt;
1929 
1930 	wi->consumed_strides += cstrides;
1931 
1932 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1933 		mlx5e_handle_rx_err_cqe(rq, cqe);
1934 		goto mpwrq_cqe_out;
1935 	}
1936 
1937 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1938 		struct mlx5e_rq_stats *stats = rq->stats;
1939 
1940 		stats->mpwqe_filler_cqes++;
1941 		stats->mpwqe_filler_strides += cstrides;
1942 		goto mpwrq_cqe_out;
1943 	}
1944 
1945 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1946 
1947 	skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1948 			      mlx5e_skb_from_cqe_mpwrq_linear,
1949 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
1950 			      rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
1951 	if (!skb)
1952 		goto mpwrq_cqe_out;
1953 
1954 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
1955 
1956 	mlx5e_rep_tc_receive(cqe, rq, skb);
1957 
1958 mpwrq_cqe_out:
1959 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1960 		return;
1961 
1962 	wq  = &rq->mpwqe.wq;
1963 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1964 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1965 }
1966 
1967 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1968 	.handle_rx_cqe       = mlx5e_handle_rx_cqe_rep,
1969 	.handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1970 };
1971 #endif
1972 
1973 static void
mlx5e_shampo_fill_skb_data(struct sk_buff * skb,struct mlx5e_rq * rq,struct mlx5e_frag_page * frag_page,u32 data_bcnt,u32 data_offset)1974 mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
1975 			   struct mlx5e_frag_page *frag_page,
1976 			   u32 data_bcnt, u32 data_offset)
1977 {
1978 	net_prefetchw(skb->data);
1979 
1980 	do {
1981 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1982 		u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - data_offset, data_bcnt);
1983 		unsigned int truesize = pg_consumed_bytes;
1984 
1985 		mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
1986 				   pg_consumed_bytes, truesize);
1987 
1988 		data_bcnt -= pg_consumed_bytes;
1989 		data_offset = 0;
1990 		frag_page++;
1991 	} while (data_bcnt);
1992 }
1993 
1994 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)1995 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1996 				   struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
1997 				   u32 page_idx)
1998 {
1999 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2000 	u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
2001 	struct mlx5e_frag_page *head_page = frag_page;
2002 	struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
2003 	u32 frag_offset    = head_offset;
2004 	u32 byte_cnt       = cqe_bcnt;
2005 	struct skb_shared_info *sinfo;
2006 	unsigned int truesize = 0;
2007 	struct bpf_prog *prog;
2008 	struct sk_buff *skb;
2009 	u32 linear_frame_sz;
2010 	u16 linear_data_len;
2011 	u16 linear_hr;
2012 	void *va;
2013 
2014 	prog = rcu_dereference(rq->xdp_prog);
2015 
2016 	if (prog) {
2017 		/* area for bpf_xdp_[store|load]_bytes */
2018 		net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
2019 		if (unlikely(mlx5e_page_alloc_fragmented(rq->page_pool,
2020 							 &wi->linear_page))) {
2021 			rq->stats->buff_alloc_err++;
2022 			return NULL;
2023 		}
2024 
2025 		va = netmem_address(wi->linear_page.netmem);
2026 		net_prefetchw(va); /* xdp_frame data area */
2027 		linear_hr = XDP_PACKET_HEADROOM;
2028 		linear_data_len = 0;
2029 		linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
2030 	} else {
2031 		skb = napi_alloc_skb(rq->cq.napi,
2032 				     ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
2033 		if (unlikely(!skb)) {
2034 			rq->stats->buff_alloc_err++;
2035 			return NULL;
2036 		}
2037 		skb_mark_for_recycle(skb);
2038 		va = skb->head;
2039 		net_prefetchw(va); /* xdp_frame data area */
2040 		net_prefetchw(skb->data);
2041 
2042 		frag_offset += headlen;
2043 		byte_cnt -= headlen;
2044 		linear_hr = skb_headroom(skb);
2045 		linear_data_len = headlen;
2046 		linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
2047 		if (unlikely(frag_offset >= PAGE_SIZE)) {
2048 			frag_page++;
2049 			frag_offset -= PAGE_SIZE;
2050 		}
2051 	}
2052 
2053 	mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
2054 			 linear_data_len, mxbuf);
2055 
2056 	sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
2057 
2058 	while (byte_cnt) {
2059 		/* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
2060 		u32 pg_consumed_bytes = min_t(u32, PAGE_SIZE - frag_offset, byte_cnt);
2061 
2062 		if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
2063 			truesize += pg_consumed_bytes;
2064 		else
2065 			truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
2066 
2067 		mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
2068 					       frag_page, frag_offset,
2069 					       pg_consumed_bytes);
2070 		byte_cnt -= pg_consumed_bytes;
2071 		frag_offset = 0;
2072 		frag_page++;
2073 	}
2074 
2075 	if (prog) {
2076 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
2077 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
2078 				struct mlx5e_frag_page *pfp;
2079 
2080 				for (pfp = head_page; pfp < frag_page; pfp++)
2081 					pfp->frags++;
2082 
2083 				wi->linear_page.frags++;
2084 			}
2085 			mlx5e_page_release_fragmented(rq->page_pool,
2086 						      &wi->linear_page);
2087 			return NULL; /* page/packet was consumed by XDP */
2088 		}
2089 
2090 		skb = mlx5e_build_linear_skb(
2091 			rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
2092 			mxbuf->xdp.data - mxbuf->xdp.data_hard_start, 0,
2093 			mxbuf->xdp.data - mxbuf->xdp.data_meta);
2094 		if (unlikely(!skb)) {
2095 			mlx5e_page_release_fragmented(rq->page_pool,
2096 						      &wi->linear_page);
2097 			return NULL;
2098 		}
2099 
2100 		skb_mark_for_recycle(skb);
2101 		wi->linear_page.frags++;
2102 		mlx5e_page_release_fragmented(rq->page_pool, &wi->linear_page);
2103 
2104 		if (xdp_buff_has_frags(&mxbuf->xdp)) {
2105 			struct mlx5e_frag_page *pagep;
2106 
2107 			/* sinfo->nr_frags is reset by build_skb, calculate again. */
2108 			xdp_update_skb_shared_info(skb, frag_page - head_page,
2109 						   sinfo->xdp_frags_size, truesize,
2110 						   xdp_buff_is_frag_pfmemalloc(
2111 							&mxbuf->xdp));
2112 
2113 			pagep = head_page;
2114 			do
2115 				pagep->frags++;
2116 			while (++pagep < frag_page);
2117 		}
2118 		__pskb_pull_tail(skb, headlen);
2119 	} else {
2120 		dma_addr_t addr;
2121 
2122 		if (xdp_buff_has_frags(&mxbuf->xdp)) {
2123 			struct mlx5e_frag_page *pagep;
2124 
2125 			xdp_update_skb_shared_info(skb, sinfo->nr_frags,
2126 						   sinfo->xdp_frags_size, truesize,
2127 						   xdp_buff_is_frag_pfmemalloc(
2128 							&mxbuf->xdp));
2129 
2130 			pagep = frag_page - sinfo->nr_frags;
2131 			do
2132 				pagep->frags++;
2133 			while (++pagep < frag_page);
2134 		}
2135 		/* copy header */
2136 		addr = page_pool_get_dma_addr_netmem(head_page->netmem);
2137 		mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
2138 				      head_offset, head_offset, headlen);
2139 		/* skb linear part was allocated with headlen and aligned to long */
2140 		skb->tail += headlen;
2141 		skb->len  += headlen;
2142 	}
2143 
2144 	return skb;
2145 }
2146 
2147 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)2148 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2149 				struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
2150 				u32 page_idx)
2151 {
2152 	struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2153 	u16 rx_headroom = rq->buff.headroom;
2154 	struct bpf_prog *prog;
2155 	struct sk_buff *skb;
2156 	u32 metasize = 0;
2157 	void *va, *data;
2158 	dma_addr_t addr;
2159 	u32 frag_size;
2160 
2161 	/* Check packet size. Note LRO doesn't use linear SKB */
2162 	if (unlikely(cqe_bcnt > rq->hw_mtu)) {
2163 		rq->stats->oversize_pkts_sw_drop++;
2164 		return NULL;
2165 	}
2166 
2167 	va             = netmem_address(frag_page->netmem) + head_offset;
2168 	data           = va + rx_headroom;
2169 	frag_size      = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2170 
2171 	addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
2172 	dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
2173 				      frag_size, rq->buff.map_dir);
2174 	net_prefetch(data);
2175 
2176 	prog = rcu_dereference(rq->xdp_prog);
2177 	if (prog) {
2178 		struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
2179 
2180 		net_prefetchw(va); /* xdp_frame data area */
2181 		mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
2182 				 cqe_bcnt, mxbuf);
2183 		if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
2184 			if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2185 				frag_page->frags++;
2186 			return NULL; /* page/packet was consumed by XDP */
2187 		}
2188 
2189 		rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
2190 		metasize =  mxbuf->xdp.data -  mxbuf->xdp.data_meta;
2191 		cqe_bcnt =  mxbuf->xdp.data_end -  mxbuf->xdp.data;
2192 	}
2193 	frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2194 	skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
2195 	if (unlikely(!skb))
2196 		return NULL;
2197 
2198 	/* queue up for recycling/reuse */
2199 	skb_mark_for_recycle(skb);
2200 	frag_page->frags++;
2201 
2202 	return skb;
2203 }
2204 
2205 static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 header_index)2206 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2207 			  struct mlx5_cqe64 *cqe, u16 header_index)
2208 {
2209 	struct mlx5e_frag_page *frag_page = mlx5e_shampo_hd_to_frag_page(rq, header_index);
2210 	u16 head_offset = mlx5e_shampo_hd_offset(header_index);
2211 	u16 head_size = cqe->shampo.header_size;
2212 	u16 rx_headroom = rq->buff.headroom;
2213 	struct sk_buff *skb = NULL;
2214 	dma_addr_t page_dma_addr;
2215 	dma_addr_t dma_addr;
2216 	void *hdr, *data;
2217 	u32 frag_size;
2218 
2219 	page_dma_addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
2220 	dma_addr = page_dma_addr + head_offset;
2221 
2222 	hdr		= netmem_address(frag_page->netmem) + head_offset;
2223 	data		= hdr + rx_headroom;
2224 	frag_size	= MLX5_SKB_FRAG_SZ(rx_headroom + head_size);
2225 
2226 	if (likely(frag_size <= BIT(MLX5E_SHAMPO_LOG_MAX_HEADER_ENTRY_SIZE))) {
2227 		/* build SKB around header */
2228 		dma_sync_single_range_for_cpu(rq->pdev, dma_addr, 0, frag_size, rq->buff.map_dir);
2229 		net_prefetchw(hdr);
2230 		net_prefetch(data);
2231 		skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size, 0);
2232 		if (unlikely(!skb))
2233 			return NULL;
2234 
2235 		frag_page->frags++;
2236 	} else {
2237 		/* allocate SKB and copy header for large header */
2238 		rq->stats->gro_large_hds++;
2239 		skb = napi_alloc_skb(rq->cq.napi,
2240 				     ALIGN(head_size, sizeof(long)));
2241 		if (unlikely(!skb)) {
2242 			rq->stats->buff_alloc_err++;
2243 			return NULL;
2244 		}
2245 
2246 		net_prefetchw(skb->data);
2247 		mlx5e_copy_skb_header(rq, skb, frag_page->netmem, dma_addr,
2248 				      head_offset + rx_headroom,
2249 				      rx_headroom, head_size);
2250 		/* skb linear part was allocated with headlen and aligned to long */
2251 		skb->tail += head_size;
2252 		skb->len  += head_size;
2253 	}
2254 
2255 	/* queue up for recycling/reuse */
2256 	skb_mark_for_recycle(skb);
2257 
2258 	return skb;
2259 }
2260 
2261 static void
mlx5e_shampo_align_fragment(struct sk_buff * skb,u8 log_stride_sz)2262 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
2263 {
2264 	skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2265 	unsigned int frag_size = skb_frag_size(last_frag);
2266 	unsigned int frag_truesize;
2267 
2268 	frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
2269 	skb->truesize += frag_truesize - frag_size;
2270 }
2271 
2272 static void
mlx5e_shampo_flush_skb(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)2273 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
2274 {
2275 	struct sk_buff *skb = rq->hw_gro_data->skb;
2276 	struct mlx5e_rq_stats *stats = rq->stats;
2277 	u16 gro_count = NAPI_GRO_CB(skb)->count;
2278 
2279 	if (likely(skb_shinfo(skb)->nr_frags))
2280 		mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
2281 	if (gro_count > 1) {
2282 		stats->gro_skbs++;
2283 		stats->gro_packets += gro_count;
2284 		stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
2285 
2286 		mlx5e_shampo_update_hdr(rq, cqe, match);
2287 	} else {
2288 		skb_shinfo(skb)->gso_size = 0;
2289 	}
2290 	napi_gro_receive(rq->cq.napi, skb);
2291 	rq->hw_gro_data->skb = NULL;
2292 }
2293 
2294 static bool
mlx5e_hw_gro_skb_has_enough_space(struct sk_buff * skb,u16 data_bcnt)2295 mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb, u16 data_bcnt)
2296 {
2297 	int nr_frags = skb_shinfo(skb)->nr_frags;
2298 
2299 	return PAGE_SIZE * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2300 }
2301 
mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2302 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2303 {
2304 	u16 data_bcnt		= mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
2305 	u16 header_index	= mlx5e_shampo_get_cqe_header_index(rq, cqe);
2306 	u32 wqe_offset		= be32_to_cpu(cqe->shampo.data_offset);
2307 	u16 cstrides		= mpwrq_get_cqe_consumed_strides(cqe);
2308 	u32 data_offset		= wqe_offset & (PAGE_SIZE - 1);
2309 	u32 cqe_bcnt		= mpwrq_get_cqe_byte_cnt(cqe);
2310 	u16 wqe_id		= be16_to_cpu(cqe->wqe_id);
2311 	u32 page_idx		= wqe_offset >> PAGE_SHIFT;
2312 	u16 head_size		= cqe->shampo.header_size;
2313 	struct sk_buff **skb	= &rq->hw_gro_data->skb;
2314 	bool flush		= cqe->shampo.flush;
2315 	bool match		= cqe->shampo.match;
2316 	struct mlx5e_rq_stats *stats = rq->stats;
2317 	struct mlx5e_rx_wqe_ll *wqe;
2318 	struct mlx5e_mpw_info *wi;
2319 	struct mlx5_wq_ll *wq;
2320 
2321 	wi = mlx5e_get_mpw_info(rq, wqe_id);
2322 	wi->consumed_strides += cstrides;
2323 
2324 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2325 		mlx5e_handle_rx_err_cqe(rq, cqe);
2326 		goto mpwrq_cqe_out;
2327 	}
2328 
2329 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2330 		stats->mpwqe_filler_cqes++;
2331 		stats->mpwqe_filler_strides += cstrides;
2332 		goto mpwrq_cqe_out;
2333 	}
2334 
2335 	if (*skb && (!match || !(mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt)))) {
2336 		match = false;
2337 		mlx5e_shampo_flush_skb(rq, cqe, match);
2338 	}
2339 
2340 	if (!*skb) {
2341 		if (likely(head_size)) {
2342 			*skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2343 		} else {
2344 			struct mlx5e_frag_page *frag_page;
2345 
2346 			frag_page = &wi->alloc_units.frag_pages[page_idx];
2347 			/* Drop packets with header in unreadable data area to
2348 			 * prevent the kernel from touching it.
2349 			 */
2350 			if (unlikely(netmem_is_net_iov(frag_page->netmem)))
2351 				goto free_hd_entry;
2352 			*skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
2353 								  cqe_bcnt,
2354 								  data_offset,
2355 								  page_idx);
2356 		}
2357 
2358 		if (unlikely(!*skb))
2359 			goto free_hd_entry;
2360 
2361 		NAPI_GRO_CB(*skb)->count = 1;
2362 		skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
2363 	} else {
2364 		NAPI_GRO_CB(*skb)->count++;
2365 		if (NAPI_GRO_CB(*skb)->count == 2 &&
2366 		    rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
2367 			void *hd_addr = mlx5e_shampo_get_packet_hd(rq, header_index);
2368 			int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff -
2369 				    sizeof(struct iphdr);
2370 			struct iphdr *iph = (struct iphdr *)(hd_addr + nhoff);
2371 
2372 			rq->hw_gro_data->second_ip_id = ntohs(iph->id);
2373 		}
2374 	}
2375 
2376 	if (likely(head_size)) {
2377 		if (data_bcnt) {
2378 			struct mlx5e_frag_page *frag_page;
2379 
2380 			frag_page = &wi->alloc_units.frag_pages[page_idx];
2381 			mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
2382 		} else {
2383 			stats->hds_nodata_packets++;
2384 			stats->hds_nodata_bytes += head_size;
2385 		}
2386 	} else {
2387 		stats->hds_nosplit_packets++;
2388 		stats->hds_nosplit_bytes += data_bcnt;
2389 	}
2390 
2391 	mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb);
2392 	if (flush && rq->hw_gro_data->skb)
2393 		mlx5e_shampo_flush_skb(rq, cqe, match);
2394 free_hd_entry:
2395 	if (likely(head_size))
2396 		mlx5e_free_rx_shampo_hd_entry(rq, header_index);
2397 mpwrq_cqe_out:
2398 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2399 		return;
2400 
2401 	if (unlikely(!cstrides))
2402 		return;
2403 
2404 	wq  = &rq->mpwqe.wq;
2405 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2406 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2407 }
2408 
mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2409 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2410 {
2411 	u16 cstrides       = mpwrq_get_cqe_consumed_strides(cqe);
2412 	u16 wqe_id         = be16_to_cpu(cqe->wqe_id);
2413 	struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
2414 	u16 stride_ix      = mpwrq_get_cqe_stride_index(cqe);
2415 	u32 wqe_offset     = stride_ix << rq->mpwqe.log_stride_sz;
2416 	u32 head_offset    = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
2417 	u32 page_idx       = wqe_offset >> rq->mpwqe.page_shift;
2418 	struct mlx5e_rx_wqe_ll *wqe;
2419 	struct mlx5_wq_ll *wq;
2420 	struct sk_buff *skb;
2421 	u16 cqe_bcnt;
2422 
2423 	wi->consumed_strides += cstrides;
2424 
2425 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2426 		mlx5e_handle_rx_err_cqe(rq, cqe);
2427 		goto mpwrq_cqe_out;
2428 	}
2429 
2430 	if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2431 		struct mlx5e_rq_stats *stats = rq->stats;
2432 
2433 		stats->mpwqe_filler_cqes++;
2434 		stats->mpwqe_filler_strides += cstrides;
2435 		goto mpwrq_cqe_out;
2436 	}
2437 
2438 	cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
2439 
2440 	skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
2441 			      mlx5e_skb_from_cqe_mpwrq_linear,
2442 			      mlx5e_skb_from_cqe_mpwrq_nonlinear,
2443 			      mlx5e_xsk_skb_from_cqe_mpwrq_linear,
2444 			      rq, wi, cqe, cqe_bcnt, head_offset,
2445 			      page_idx);
2446 	if (!skb)
2447 		goto mpwrq_cqe_out;
2448 
2449 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2450 
2451 	if (mlx5e_cqe_regb_chain(cqe))
2452 		if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
2453 			dev_kfree_skb_any(skb);
2454 			goto mpwrq_cqe_out;
2455 		}
2456 
2457 	napi_gro_receive(rq->cq.napi, skb);
2458 
2459 mpwrq_cqe_out:
2460 	if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2461 		return;
2462 
2463 	wq  = &rq->mpwqe.wq;
2464 	wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2465 	mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2466 }
2467 
mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2468 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
2469 						 struct mlx5_cqwq *cqwq,
2470 						 int budget_rem)
2471 {
2472 	struct mlx5_cqe64 *cqe, *title_cqe = NULL;
2473 	struct mlx5e_cq_decomp *cqd = &rq->cqd;
2474 	int work_done = 0;
2475 
2476 	cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq);
2477 	if (!cqe)
2478 		return work_done;
2479 
2480 	if (cqd->last_cqe_title &&
2481 	    (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
2482 		rq->stats->cqe_compress_blks++;
2483 		cqd->last_cqe_title = false;
2484 	}
2485 
2486 	do {
2487 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2488 			if (title_cqe) {
2489 				mlx5e_read_enhanced_title_slot(rq, title_cqe);
2490 				title_cqe = NULL;
2491 				rq->stats->cqe_compress_blks++;
2492 			}
2493 			work_done +=
2494 				mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
2495 							      budget_rem - work_done);
2496 			continue;
2497 		}
2498 		title_cqe = cqe;
2499 		mlx5_cqwq_pop(cqwq);
2500 
2501 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2502 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2503 				rq, cqe);
2504 		work_done++;
2505 	} while (work_done < budget_rem &&
2506 		 (cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)));
2507 
2508 	/* last cqe might be title on next poll bulk */
2509 	if (title_cqe) {
2510 		mlx5e_read_enhanced_title_slot(rq, title_cqe);
2511 		cqd->last_cqe_title = true;
2512 	}
2513 
2514 	return work_done;
2515 }
2516 
mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2517 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
2518 					      struct mlx5_cqwq *cqwq,
2519 					      int budget_rem)
2520 {
2521 	struct mlx5_cqe64 *cqe;
2522 	int work_done = 0;
2523 
2524 	if (rq->cqd.left)
2525 		work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
2526 
2527 	while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
2528 		if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2529 			work_done +=
2530 				mlx5e_decompress_cqes_start(rq, cqwq,
2531 							    budget_rem - work_done);
2532 			continue;
2533 		}
2534 
2535 		mlx5_cqwq_pop(cqwq);
2536 		INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2537 				mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2538 				rq, cqe);
2539 		work_done++;
2540 	}
2541 
2542 	return work_done;
2543 }
2544 
mlx5e_poll_rx_cq(struct mlx5e_cq * cq,int budget)2545 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2546 {
2547 	struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2548 	struct mlx5_cqwq *cqwq = &cq->wq;
2549 	int work_done;
2550 
2551 	if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
2552 		return 0;
2553 
2554 	if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
2555 		work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
2556 								  budget);
2557 	else
2558 		work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
2559 							       budget);
2560 
2561 	if (work_done == 0)
2562 		return 0;
2563 
2564 	if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
2565 		mlx5e_shampo_flush_skb(rq, NULL, false);
2566 
2567 	if (rcu_access_pointer(rq->xdp_prog))
2568 		mlx5e_xdp_rx_poll_complete(rq);
2569 
2570 	mlx5_cqwq_update_db_record(cqwq);
2571 
2572 	/* ensure cq space is freed before enabling more cqes */
2573 	wmb();
2574 
2575 	return work_done;
2576 }
2577 
2578 #ifdef CONFIG_MLX5_CORE_IPOIB
2579 
2580 #define MLX5_IB_GRH_SGID_OFFSET 8
2581 #define MLX5_IB_GRH_DGID_OFFSET 24
2582 #define MLX5_GID_SIZE           16
2583 
mlx5i_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)2584 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
2585 					 struct mlx5_cqe64 *cqe,
2586 					 u32 cqe_bcnt,
2587 					 struct sk_buff *skb)
2588 {
2589 	struct hwtstamp_config *tstamp;
2590 	struct mlx5e_rq_stats *stats;
2591 	struct net_device *netdev;
2592 	struct mlx5e_priv *priv;
2593 	char *pseudo_header;
2594 	u32 flags_rqpn;
2595 	u32 qpn;
2596 	u8 *dgid;
2597 	u8 g;
2598 
2599 	qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
2600 	netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
2601 
2602 	/* No mapping present, cannot process SKB. This might happen if a child
2603 	 * interface is going down while having unprocessed CQEs on parent RQ
2604 	 */
2605 	if (unlikely(!netdev)) {
2606 		/* TODO: add drop counters support */
2607 		skb->dev = NULL;
2608 		pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
2609 		return;
2610 	}
2611 
2612 	priv = mlx5i_epriv(netdev);
2613 	tstamp = &priv->tstamp;
2614 	stats = &priv->channel_stats[rq->ix]->rq;
2615 
2616 	flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
2617 	g = (flags_rqpn >> 28) & 3;
2618 	dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
2619 	if ((!g) || dgid[0] != 0xff)
2620 		skb->pkt_type = PACKET_HOST;
2621 	else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
2622 		skb->pkt_type = PACKET_BROADCAST;
2623 	else
2624 		skb->pkt_type = PACKET_MULTICAST;
2625 
2626 	/* Drop packets that this interface sent, ie multicast packets
2627 	 * that the HCA has replicated.
2628 	 */
2629 	if (g && (qpn == (flags_rqpn & 0xffffff)) &&
2630 	    (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
2631 		    MLX5_GID_SIZE) == 0)) {
2632 		skb->dev = NULL;
2633 		return;
2634 	}
2635 
2636 	skb_pull(skb, MLX5_IB_GRH_BYTES);
2637 
2638 	skb->protocol = *((__be16 *)(skb->data));
2639 
2640 	if (netdev->features & NETIF_F_RXCSUM) {
2641 		skb->ip_summed = CHECKSUM_COMPLETE;
2642 		skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
2643 		stats->csum_complete++;
2644 	} else {
2645 		skb->ip_summed = CHECKSUM_NONE;
2646 		stats->csum_none++;
2647 	}
2648 
2649 	if (unlikely(mlx5e_rx_hw_stamp(tstamp)))
2650 		skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
2651 								  rq->clock, get_cqe_ts(cqe));
2652 	skb_record_rx_queue(skb, rq->ix);
2653 
2654 	if (likely(netdev->features & NETIF_F_RXHASH))
2655 		mlx5e_skb_set_hash(cqe, skb);
2656 
2657 	/* 20 bytes of ipoib header and 4 for encap existing */
2658 	pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
2659 	memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
2660 	skb_reset_mac_header(skb);
2661 	skb_pull(skb, MLX5_IPOIB_HARD_LEN);
2662 
2663 	skb->dev = netdev;
2664 
2665 	stats->packets++;
2666 	stats->bytes += cqe_bcnt;
2667 }
2668 
mlx5i_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2669 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2670 {
2671 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2672 	struct mlx5e_wqe_frag_info *wi;
2673 	struct sk_buff *skb;
2674 	u32 cqe_bcnt;
2675 	u16 ci;
2676 
2677 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2678 	wi       = get_frag(rq, ci);
2679 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2680 
2681 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2682 		rq->stats->wqe_err++;
2683 		goto wq_cyc_pop;
2684 	}
2685 
2686 	skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
2687 			      mlx5e_skb_from_cqe_linear,
2688 			      mlx5e_skb_from_cqe_nonlinear,
2689 			      rq, wi, cqe, cqe_bcnt);
2690 	if (!skb)
2691 		goto wq_cyc_pop;
2692 
2693 	mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2694 	if (unlikely(!skb->dev)) {
2695 		dev_kfree_skb_any(skb);
2696 		goto wq_cyc_pop;
2697 	}
2698 	napi_gro_receive(rq->cq.napi, skb);
2699 
2700 wq_cyc_pop:
2701 	mlx5_wq_cyc_pop(wq);
2702 }
2703 
2704 const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
2705 	.handle_rx_cqe       = mlx5i_handle_rx_cqe,
2706 	.handle_rx_cqe_mpwqe = NULL, /* Not supported */
2707 };
2708 #endif /* CONFIG_MLX5_CORE_IPOIB */
2709 
mlx5e_rq_set_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params,bool xsk)2710 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
2711 {
2712 	struct net_device *netdev = rq->netdev;
2713 	struct mlx5_core_dev *mdev = rq->mdev;
2714 	struct mlx5e_priv *priv = rq->priv;
2715 
2716 	switch (rq->wq_type) {
2717 	case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2718 		rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
2719 			mlx5e_xsk_skb_from_cqe_mpwrq_linear :
2720 			mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
2721 				mlx5e_skb_from_cqe_mpwrq_linear :
2722 				mlx5e_skb_from_cqe_mpwrq_nonlinear;
2723 		rq->post_wqes = mlx5e_post_rx_mpwqes;
2724 		rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
2725 
2726 		if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
2727 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
2728 			if (!rq->handle_rx_cqe) {
2729 				netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
2730 				return -EINVAL;
2731 			}
2732 		} else {
2733 			rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
2734 			if (!rq->handle_rx_cqe) {
2735 				netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
2736 				return -EINVAL;
2737 			}
2738 		}
2739 
2740 		break;
2741 	default: /* MLX5_WQ_TYPE_CYCLIC */
2742 		rq->wqe.skb_from_cqe = xsk ?
2743 			mlx5e_xsk_skb_from_cqe_linear :
2744 			mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
2745 				mlx5e_skb_from_cqe_linear :
2746 				mlx5e_skb_from_cqe_nonlinear;
2747 		rq->post_wqes = mlx5e_post_rx_wqes;
2748 		rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2749 		rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
2750 		if (!rq->handle_rx_cqe) {
2751 			netdev_err(netdev, "RX handler of RQ is not set\n");
2752 			return -EINVAL;
2753 		}
2754 	}
2755 
2756 	return 0;
2757 }
2758 
mlx5e_trap_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2759 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2760 {
2761 	struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2762 	struct mlx5e_wqe_frag_info *wi;
2763 	struct sk_buff *skb;
2764 	u32 cqe_bcnt;
2765 	u16 trap_id;
2766 	u16 ci;
2767 
2768 	trap_id  = get_cqe_flow_tag(cqe);
2769 	ci       = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2770 	wi       = get_frag(rq, ci);
2771 	cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2772 
2773 	if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2774 		rq->stats->wqe_err++;
2775 		goto wq_cyc_pop;
2776 	}
2777 
2778 	skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
2779 	if (!skb)
2780 		goto wq_cyc_pop;
2781 
2782 	mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2783 	skb_push(skb, ETH_HLEN);
2784 
2785 	mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
2786 				 rq->netdev->devlink_port);
2787 	dev_kfree_skb_any(skb);
2788 
2789 wq_cyc_pop:
2790 	mlx5_wq_cyc_pop(wq);
2791 }
2792 
mlx5e_rq_set_trap_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params)2793 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
2794 {
2795 	rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
2796 			       mlx5e_skb_from_cqe_linear :
2797 			       mlx5e_skb_from_cqe_nonlinear;
2798 	rq->post_wqes = mlx5e_post_rx_wqes;
2799 	rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2800 	rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
2801 }
2802