1 /*
2 * Copyright (c) 2015, Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #include <linux/ip.h>
34 #include <linux/ipv6.h>
35 #include <linux/tcp.h>
36 #include <linux/bitmap.h>
37 #include <linux/filter.h>
38 #include <net/ip6_checksum.h>
39 #include <net/page_pool/helpers.h>
40 #include <net/inet_ecn.h>
41 #include <net/gro.h>
42 #include <net/udp.h>
43 #include <net/tcp.h>
44 #include <net/xdp_sock_drv.h>
45 #include "en.h"
46 #include "en/txrx.h"
47 #include "en_tc.h"
48 #include "eswitch.h"
49 #include "en_rep.h"
50 #include "en/rep/tc.h"
51 #include "ipoib/ipoib.h"
52 #include "en_accel/ipsec.h"
53 #include "en_accel/macsec.h"
54 #include "en_accel/psp_rxtx.h"
55 #include "en_accel/ipsec_rxtx.h"
56 #include "en_accel/ktls_txrx.h"
57 #include "en/xdp.h"
58 #include "en/xsk/rx.h"
59 #include "en/health.h"
60 #include "en/params.h"
61 #include "devlink.h"
62 #include "en/devlink.h"
63
64 static struct sk_buff *
65 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
66 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
67 u32 page_idx);
68 static struct sk_buff *
69 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
70 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
71 u32 page_idx);
72 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
73 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
74 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe);
75
76 const struct mlx5e_rx_handlers mlx5e_rx_handlers_nic = {
77 .handle_rx_cqe = mlx5e_handle_rx_cqe,
78 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq,
79 .handle_rx_cqe_mpwqe_shampo = mlx5e_handle_rx_cqe_mpwrq_shampo,
80 };
81
mlx5e_read_cqe_slot(struct mlx5_cqwq * wq,u32 cqcc,void * data)82 static inline void mlx5e_read_cqe_slot(struct mlx5_cqwq *wq,
83 u32 cqcc, void *data)
84 {
85 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
86
87 memcpy(data, mlx5_cqwq_get_wqe(wq, ci), sizeof(struct mlx5_cqe64));
88 }
89
mlx5e_read_enhanced_title_slot(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)90 static void mlx5e_read_enhanced_title_slot(struct mlx5e_rq *rq,
91 struct mlx5_cqe64 *cqe)
92 {
93 struct mlx5e_cq_decomp *cqd = &rq->cqd;
94 struct mlx5_cqe64 *title = &cqd->title;
95
96 memcpy(title, cqe, sizeof(struct mlx5_cqe64));
97
98 if (likely(test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)))
99 return;
100
101 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
102 cqd->wqe_counter = mpwrq_get_cqe_stride_index(title) +
103 mpwrq_get_cqe_consumed_strides(title);
104 else
105 cqd->wqe_counter =
106 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, be16_to_cpu(title->wqe_counter) + 1);
107 }
108
mlx5e_read_title_slot(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)109 static inline void mlx5e_read_title_slot(struct mlx5e_rq *rq,
110 struct mlx5_cqwq *wq,
111 u32 cqcc)
112 {
113 struct mlx5e_cq_decomp *cqd = &rq->cqd;
114 struct mlx5_cqe64 *title = &cqd->title;
115
116 mlx5e_read_cqe_slot(wq, cqcc, title);
117 cqd->left = be32_to_cpu(title->byte_cnt);
118 cqd->wqe_counter = be16_to_cpu(title->wqe_counter);
119 rq->stats->cqe_compress_blks++;
120 }
121
mlx5e_read_mini_arr_slot(struct mlx5_cqwq * wq,struct mlx5e_cq_decomp * cqd,u32 cqcc)122 static inline void mlx5e_read_mini_arr_slot(struct mlx5_cqwq *wq,
123 struct mlx5e_cq_decomp *cqd,
124 u32 cqcc)
125 {
126 mlx5e_read_cqe_slot(wq, cqcc, cqd->mini_arr);
127 cqd->mini_arr_idx = 0;
128 }
129
mlx5e_cqes_update_owner(struct mlx5_cqwq * wq,int n)130 static inline void mlx5e_cqes_update_owner(struct mlx5_cqwq *wq, int n)
131 {
132 u32 cqcc = wq->cc;
133 u8 op_own = mlx5_cqwq_get_ctr_wrap_cnt(wq, cqcc) & 1;
134 u32 ci = mlx5_cqwq_ctr2ix(wq, cqcc);
135 u32 wq_sz = mlx5_cqwq_get_size(wq);
136 u32 ci_top = min_t(u32, wq_sz, ci + n);
137
138 for (; ci < ci_top; ci++, n--) {
139 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
140
141 cqe->op_own = op_own;
142 }
143
144 if (unlikely(ci == wq_sz)) {
145 op_own = !op_own;
146 for (ci = 0; ci < n; ci++) {
147 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(wq, ci);
148
149 cqe->op_own = op_own;
150 }
151 }
152 }
153
mlx5e_decompress_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)154 static inline void mlx5e_decompress_cqe(struct mlx5e_rq *rq,
155 struct mlx5_cqwq *wq,
156 u32 cqcc)
157 {
158 struct mlx5e_cq_decomp *cqd = &rq->cqd;
159 struct mlx5_mini_cqe8 *mini_cqe = &cqd->mini_arr[cqd->mini_arr_idx];
160 struct mlx5_cqe64 *title = &cqd->title;
161
162 title->byte_cnt = mini_cqe->byte_cnt;
163 title->check_sum = mini_cqe->checksum;
164 title->op_own &= 0xf0;
165 title->op_own |= 0x01 & (cqcc >> wq->fbc.log_sz);
166
167 /* state bit set implies linked-list striding RQ wq type and
168 * HW stride index capability supported
169 */
170 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_HW_STRIDX, &rq->state)) {
171 title->wqe_counter = mini_cqe->stridx;
172 return;
173 }
174
175 /* HW stride index capability not supported */
176 title->wqe_counter = cpu_to_be16(cqd->wqe_counter);
177 if (rq->wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ)
178 cqd->wqe_counter += mpwrq_get_cqe_consumed_strides(title);
179 else
180 cqd->wqe_counter =
181 mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, cqd->wqe_counter + 1);
182 }
183
mlx5e_decompress_cqe_no_hash(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,u32 cqcc)184 static inline void mlx5e_decompress_cqe_no_hash(struct mlx5e_rq *rq,
185 struct mlx5_cqwq *wq,
186 u32 cqcc)
187 {
188 struct mlx5e_cq_decomp *cqd = &rq->cqd;
189
190 mlx5e_decompress_cqe(rq, wq, cqcc);
191 cqd->title.rss_hash_type = 0;
192 cqd->title.rss_hash_result = 0;
193 }
194
mlx5e_decompress_enhanced_cqe(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,struct mlx5_cqe64 * cqe,int budget_rem)195 static u32 mlx5e_decompress_enhanced_cqe(struct mlx5e_rq *rq,
196 struct mlx5_cqwq *wq,
197 struct mlx5_cqe64 *cqe,
198 int budget_rem)
199 {
200 struct mlx5e_cq_decomp *cqd = &rq->cqd;
201 u32 cqcc, left;
202 u32 i;
203
204 left = get_cqe_enhanced_num_mini_cqes(cqe);
205 /* Here we avoid breaking the cqe compression session in the middle
206 * in case budget is not sufficient to handle all of it. In this case
207 * we return work_done == budget_rem to give 'busy' napi indication.
208 */
209 if (unlikely(left > budget_rem))
210 return budget_rem;
211
212 cqcc = wq->cc;
213 cqd->mini_arr_idx = 0;
214 memcpy(cqd->mini_arr, cqe, sizeof(struct mlx5_cqe64));
215 for (i = 0; i < left; i++, cqd->mini_arr_idx++, cqcc++) {
216 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
217 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
218 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
219 rq, &cqd->title);
220 }
221 wq->cc = cqcc;
222 rq->stats->cqe_compress_pkts += left;
223
224 return left;
225 }
226
mlx5e_decompress_cqes_cont(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int update_owner_only,int budget_rem)227 static inline u32 mlx5e_decompress_cqes_cont(struct mlx5e_rq *rq,
228 struct mlx5_cqwq *wq,
229 int update_owner_only,
230 int budget_rem)
231 {
232 struct mlx5e_cq_decomp *cqd = &rq->cqd;
233 u32 cqcc = wq->cc + update_owner_only;
234 u32 cqe_count;
235 u32 i;
236
237 cqe_count = min_t(u32, cqd->left, budget_rem);
238
239 for (i = update_owner_only; i < cqe_count;
240 i++, cqd->mini_arr_idx++, cqcc++) {
241 if (cqd->mini_arr_idx == MLX5_MINI_CQE_ARRAY_SIZE)
242 mlx5e_read_mini_arr_slot(wq, cqd, cqcc);
243
244 mlx5e_decompress_cqe_no_hash(rq, wq, cqcc);
245 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
246 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
247 rq, &cqd->title);
248 }
249 mlx5e_cqes_update_owner(wq, cqcc - wq->cc);
250 wq->cc = cqcc;
251 cqd->left -= cqe_count;
252 rq->stats->cqe_compress_pkts += cqe_count;
253
254 return cqe_count;
255 }
256
mlx5e_decompress_cqes_start(struct mlx5e_rq * rq,struct mlx5_cqwq * wq,int budget_rem)257 static inline u32 mlx5e_decompress_cqes_start(struct mlx5e_rq *rq,
258 struct mlx5_cqwq *wq,
259 int budget_rem)
260 {
261 struct mlx5e_cq_decomp *cqd = &rq->cqd;
262 u32 cc = wq->cc;
263
264 mlx5e_read_title_slot(rq, wq, cc);
265 mlx5e_read_mini_arr_slot(wq, cqd, cc + 1);
266 mlx5e_decompress_cqe(rq, wq, cc);
267 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
268 mlx5e_handle_rx_cqe_mpwrq_shampo, mlx5e_handle_rx_cqe,
269 rq, &cqd->title);
270 cqd->mini_arr_idx++;
271
272 return mlx5e_decompress_cqes_cont(rq, wq, 1, budget_rem);
273 }
274
mlx5e_page_alloc_fragmented(struct page_pool * pp,struct mlx5e_frag_page * frag_page)275 static int mlx5e_page_alloc_fragmented(struct page_pool *pp,
276 struct mlx5e_frag_page *frag_page)
277 {
278 netmem_ref netmem = page_pool_dev_alloc_netmems(pp);
279
280 if (unlikely(!netmem))
281 return -ENOMEM;
282
283 page_pool_fragment_netmem(netmem, MLX5E_PAGECNT_BIAS_MAX);
284
285 *frag_page = (struct mlx5e_frag_page) {
286 .netmem = netmem,
287 .frags = 0,
288 };
289
290 return 0;
291 }
292
mlx5e_page_release_fragmented(struct page_pool * pp,struct mlx5e_frag_page * frag_page)293 static void mlx5e_page_release_fragmented(struct page_pool *pp,
294 struct mlx5e_frag_page *frag_page)
295 {
296 u16 drain_count = MLX5E_PAGECNT_BIAS_MAX - frag_page->frags;
297 netmem_ref netmem = frag_page->netmem;
298
299 if (page_pool_unref_netmem(netmem, drain_count) == 0)
300 page_pool_put_unrefed_netmem(pp, netmem, -1, true);
301 }
302
mlx5e_mpwqe_linear_page_refill(struct mlx5e_rq * rq)303 static int mlx5e_mpwqe_linear_page_refill(struct mlx5e_rq *rq)
304 {
305 struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
306
307 if (likely(li->frag_page.frags < li->max_frags))
308 return 0;
309
310 if (likely(li->frag_page.netmem)) {
311 mlx5e_page_release_fragmented(rq->page_pool, &li->frag_page);
312 li->frag_page.netmem = 0;
313 }
314
315 return mlx5e_page_alloc_fragmented(rq->page_pool, &li->frag_page);
316 }
317
mlx5e_mpwqe_get_linear_page_frag(struct mlx5e_rq * rq)318 static void *mlx5e_mpwqe_get_linear_page_frag(struct mlx5e_rq *rq)
319 {
320 struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
321 u32 frag_offset;
322
323 if (unlikely(mlx5e_mpwqe_linear_page_refill(rq)))
324 return NULL;
325
326 frag_offset = li->frag_page.frags << MLX5E_XDP_LOG_MAX_LINEAR_SZ;
327 WARN_ON(frag_offset >= BIT(rq->mpwqe.page_shift));
328
329 return netmem_address(li->frag_page.netmem) + frag_offset;
330 }
331
mlx5e_get_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)332 static inline int mlx5e_get_rx_frag(struct mlx5e_rq *rq,
333 struct mlx5e_wqe_frag_info *frag)
334 {
335 int err = 0;
336
337 if (!frag->offset)
338 /* On first frag (offset == 0), replenish page.
339 * Other frags that point to the same page (with a different
340 * offset) should just use the new one without replenishing again
341 * by themselves.
342 */
343 err = mlx5e_page_alloc_fragmented(rq->page_pool,
344 frag->frag_page);
345
346 return err;
347 }
348
mlx5e_frag_can_release(struct mlx5e_wqe_frag_info * frag)349 static bool mlx5e_frag_can_release(struct mlx5e_wqe_frag_info *frag)
350 {
351 #define CAN_RELEASE_MASK \
352 (BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE) | BIT(MLX5E_WQE_FRAG_SKIP_RELEASE))
353
354 #define CAN_RELEASE_VALUE BIT(MLX5E_WQE_FRAG_LAST_IN_PAGE)
355
356 return (frag->flags & CAN_RELEASE_MASK) == CAN_RELEASE_VALUE;
357 }
358
mlx5e_put_rx_frag(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * frag)359 static inline void mlx5e_put_rx_frag(struct mlx5e_rq *rq,
360 struct mlx5e_wqe_frag_info *frag)
361 {
362 if (mlx5e_frag_can_release(frag))
363 mlx5e_page_release_fragmented(rq->page_pool, frag->frag_page);
364 }
365
get_frag(struct mlx5e_rq * rq,u16 ix)366 static inline struct mlx5e_wqe_frag_info *get_frag(struct mlx5e_rq *rq, u16 ix)
367 {
368 return &rq->wqe.frags[ix << rq->wqe.info.log_num_frags];
369 }
370
mlx5e_alloc_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_rx_wqe_cyc * wqe,u16 ix)371 static int mlx5e_alloc_rx_wqe(struct mlx5e_rq *rq, struct mlx5e_rx_wqe_cyc *wqe,
372 u16 ix)
373 {
374 struct mlx5e_wqe_frag_info *frag = get_frag(rq, ix);
375 int err;
376 int i;
377
378 for (i = 0; i < rq->wqe.info.num_frags; i++, frag++) {
379 dma_addr_t addr;
380 u16 headroom;
381
382 err = mlx5e_get_rx_frag(rq, frag);
383 if (unlikely(err))
384 goto free_frags;
385
386 frag->flags &= ~BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
387
388 headroom = i == 0 ? rq->buff.headroom : 0;
389 addr = page_pool_get_dma_addr_netmem(frag->frag_page->netmem);
390 wqe->data[i].addr = cpu_to_be64(addr + frag->offset + headroom);
391 }
392
393 return 0;
394
395 free_frags:
396 while (--i >= 0)
397 mlx5e_put_rx_frag(rq, --frag);
398
399 return err;
400 }
401
mlx5e_free_rx_wqe(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi)402 static inline void mlx5e_free_rx_wqe(struct mlx5e_rq *rq,
403 struct mlx5e_wqe_frag_info *wi)
404 {
405 int i;
406
407 for (i = 0; i < rq->wqe.info.num_frags; i++, wi++)
408 mlx5e_put_rx_frag(rq, wi);
409 }
410
mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info * wi)411 static void mlx5e_xsk_free_rx_wqe(struct mlx5e_wqe_frag_info *wi)
412 {
413 if (!(wi->flags & BIT(MLX5E_WQE_FRAG_SKIP_RELEASE)))
414 xsk_buff_free(*wi->xskp);
415 }
416
mlx5e_dealloc_rx_wqe(struct mlx5e_rq * rq,u16 ix)417 static void mlx5e_dealloc_rx_wqe(struct mlx5e_rq *rq, u16 ix)
418 {
419 struct mlx5e_wqe_frag_info *wi = get_frag(rq, ix);
420
421 if (rq->xsk_pool) {
422 mlx5e_xsk_free_rx_wqe(wi);
423 } else {
424 mlx5e_free_rx_wqe(rq, wi);
425
426 /* Avoid a second release of the wqe pages: dealloc is called
427 * for the same missing wqes on regular RQ flush and on regular
428 * RQ close. This happens when XSK RQs come into play.
429 */
430 for (int i = 0; i < rq->wqe.info.num_frags; i++, wi++)
431 wi->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
432 }
433 }
434
mlx5e_xsk_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)435 static void mlx5e_xsk_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
436 {
437 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
438 int i;
439
440 for (i = 0; i < wqe_bulk; i++) {
441 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
442 struct mlx5e_wqe_frag_info *wi;
443
444 wi = get_frag(rq, j);
445 /* The page is always put into the Reuse Ring, because there
446 * is no way to return the page to the userspace when the
447 * interface goes down.
448 */
449 mlx5e_xsk_free_rx_wqe(wi);
450 }
451 }
452
mlx5e_free_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)453 static void mlx5e_free_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
454 {
455 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
456 int i;
457
458 for (i = 0; i < wqe_bulk; i++) {
459 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
460 struct mlx5e_wqe_frag_info *wi;
461
462 wi = get_frag(rq, j);
463 mlx5e_free_rx_wqe(rq, wi);
464 }
465 }
466
mlx5e_alloc_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)467 static int mlx5e_alloc_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
468 {
469 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
470 int i;
471
472 for (i = 0; i < wqe_bulk; i++) {
473 int j = mlx5_wq_cyc_ctr2ix(wq, ix + i);
474 struct mlx5e_rx_wqe_cyc *wqe;
475
476 wqe = mlx5_wq_cyc_get_wqe(wq, j);
477
478 if (unlikely(mlx5e_alloc_rx_wqe(rq, wqe, j)))
479 break;
480 }
481
482 return i;
483 }
484
mlx5e_refill_rx_wqes(struct mlx5e_rq * rq,u16 ix,int wqe_bulk)485 static int mlx5e_refill_rx_wqes(struct mlx5e_rq *rq, u16 ix, int wqe_bulk)
486 {
487 int remaining = wqe_bulk;
488 int total_alloc = 0;
489 int refill_alloc;
490 int refill;
491
492 /* The WQE bulk is split into smaller bulks that are sized
493 * according to the page pool cache refill size to avoid overflowing
494 * the page pool cache due to too many page releases at once.
495 */
496 do {
497 refill = min_t(u16, rq->wqe.info.refill_unit, remaining);
498
499 mlx5e_free_rx_wqes(rq, ix + total_alloc, refill);
500 refill_alloc = mlx5e_alloc_rx_wqes(rq, ix + total_alloc, refill);
501 if (unlikely(refill_alloc != refill))
502 goto err_free;
503
504 total_alloc += refill_alloc;
505 remaining -= refill;
506 } while (remaining);
507
508 return total_alloc;
509
510 err_free:
511 mlx5e_free_rx_wqes(rq, ix, total_alloc + refill_alloc);
512
513 for (int i = 0; i < total_alloc + refill; i++) {
514 int j = mlx5_wq_cyc_ctr2ix(&rq->wqe.wq, ix + i);
515 struct mlx5e_wqe_frag_info *frag;
516
517 frag = get_frag(rq, j);
518 for (int k = 0; k < rq->wqe.info.num_frags; k++, frag++)
519 frag->flags |= BIT(MLX5E_WQE_FRAG_SKIP_RELEASE);
520 }
521
522 return 0;
523 }
524
525 static void
mlx5e_add_skb_shared_info_frag(struct mlx5e_rq * rq,struct skb_shared_info * sinfo,struct xdp_buff * xdp,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len)526 mlx5e_add_skb_shared_info_frag(struct mlx5e_rq *rq, struct skb_shared_info *sinfo,
527 struct xdp_buff *xdp, struct mlx5e_frag_page *frag_page,
528 u32 frag_offset, u32 len)
529 {
530 netmem_ref netmem = frag_page->netmem;
531 skb_frag_t *frag;
532
533 dma_addr_t addr = page_pool_get_dma_addr_netmem(netmem);
534
535 dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len, rq->buff.map_dir);
536 if (!xdp_buff_has_frags(xdp)) {
537 /* Init on the first fragment to avoid cold cache access
538 * when possible.
539 */
540 sinfo->nr_frags = 0;
541 sinfo->xdp_frags_size = 0;
542 xdp_buff_set_frags_flag(xdp);
543 }
544
545 frag = &sinfo->frags[sinfo->nr_frags++];
546 skb_frag_fill_netmem_desc(frag, netmem, frag_offset, len);
547
548 if (netmem_is_pfmemalloc(netmem))
549 xdp_buff_set_frag_pfmemalloc(xdp);
550 sinfo->xdp_frags_size += len;
551 }
552
553 static inline void
mlx5e_add_skb_frag(struct mlx5e_rq * rq,struct sk_buff * skb,struct mlx5e_frag_page * frag_page,u32 frag_offset,u32 len,unsigned int truesize)554 mlx5e_add_skb_frag(struct mlx5e_rq *rq, struct sk_buff *skb,
555 struct mlx5e_frag_page *frag_page,
556 u32 frag_offset, u32 len,
557 unsigned int truesize)
558 {
559 dma_addr_t addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
560 u8 next_frag = skb_shinfo(skb)->nr_frags;
561 netmem_ref netmem = frag_page->netmem;
562
563 dma_sync_single_for_cpu(rq->pdev, addr + frag_offset, len,
564 rq->buff.map_dir);
565
566 if (skb_can_coalesce_netmem(skb, next_frag, netmem, frag_offset)) {
567 skb_coalesce_rx_frag(skb, next_frag - 1, len, truesize);
568 return;
569 }
570
571 frag_page->frags++;
572 skb_add_rx_frag_netmem(skb, next_frag, netmem,
573 frag_offset, len, truesize);
574 }
575
576 static inline void
mlx5e_copy_skb_header(struct mlx5e_rq * rq,struct sk_buff * skb,netmem_ref netmem,dma_addr_t addr,int offset_from,int dma_offset,u32 headlen)577 mlx5e_copy_skb_header(struct mlx5e_rq *rq, struct sk_buff *skb,
578 netmem_ref netmem, dma_addr_t addr,
579 int offset_from, int dma_offset, u32 headlen)
580 {
581 const void *from = netmem_address(netmem) + offset_from;
582 /* Aligning len to sizeof(long) optimizes memcpy performance */
583 unsigned int len = ALIGN(headlen, sizeof(long));
584
585 dma_sync_single_for_cpu(rq->pdev, addr + dma_offset, len,
586 rq->buff.map_dir);
587 skb_copy_to_linear_data(skb, from, len);
588 }
589
590 static void
mlx5e_free_rx_mpwqe(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi)591 mlx5e_free_rx_mpwqe(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi)
592 {
593 bool no_xdp_xmit;
594 int i;
595
596 /* A common case for AF_XDP. */
597 if (bitmap_full(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe))
598 return;
599
600 no_xdp_xmit = bitmap_empty(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
601
602 if (rq->xsk_pool) {
603 struct xdp_buff **xsk_buffs = wi->alloc_units.xsk_buffs;
604
605 /* The page is always put into the Reuse Ring, because there
606 * is no way to return the page to userspace when the interface
607 * goes down.
608 */
609 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++)
610 if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap))
611 xsk_buff_free(xsk_buffs[i]);
612 } else {
613 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++) {
614 if (no_xdp_xmit || !test_bit(i, wi->skip_release_bitmap)) {
615 struct mlx5e_frag_page *frag_page;
616
617 frag_page = &wi->alloc_units.frag_pages[i];
618 mlx5e_page_release_fragmented(rq->page_pool,
619 frag_page);
620 }
621 }
622 }
623 }
624
mlx5e_post_rx_mpwqe(struct mlx5e_rq * rq,u8 n)625 static void mlx5e_post_rx_mpwqe(struct mlx5e_rq *rq, u8 n)
626 {
627 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
628
629 do {
630 u16 next_wqe_index = mlx5_wq_ll_get_wqe_next_ix(wq, wq->head);
631
632 mlx5_wq_ll_push(wq, next_wqe_index);
633 } while (--n);
634
635 /* ensure wqes are visible to device before updating doorbell record */
636 dma_wmb();
637
638 mlx5_wq_ll_update_db_record(wq);
639 }
640
mlx5e_alloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)641 static int mlx5e_alloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
642 {
643 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
644 struct mlx5e_icosq *sq = rq->icosq;
645 struct mlx5e_frag_page *frag_page;
646 struct mlx5_wq_cyc *wq = &sq->wq;
647 struct mlx5e_umr_wqe *umr_wqe;
648 u32 offset; /* 17-bit value with MTT. */
649 bool sync_locked;
650 u16 pi;
651 int err;
652 int i;
653
654 sync_locked = mlx5e_icosq_sync_lock(sq);
655 pi = mlx5e_icosq_get_next_pi(sq, rq->mpwqe.umr_wqebbs);
656 umr_wqe = mlx5_wq_cyc_get_wqe(wq, pi);
657 memcpy(umr_wqe, &rq->mpwqe.umr_wqe, sizeof(struct mlx5e_umr_wqe));
658
659 frag_page = &wi->alloc_units.frag_pages[0];
660
661 for (i = 0; i < rq->mpwqe.pages_per_wqe; i++, frag_page++) {
662 dma_addr_t addr;
663
664 err = mlx5e_page_alloc_fragmented(rq->page_pool, frag_page);
665 if (unlikely(err))
666 goto err_unmap;
667
668 addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
669 umr_wqe->inline_mtts[i] = (struct mlx5_mtt) {
670 .ptag = cpu_to_be64(addr | MLX5_EN_WR),
671 };
672 }
673
674 /* Pad if needed, in case the value set to ucseg->xlt_octowords
675 * in mlx5e_build_umr_wqe() needed alignment.
676 */
677 if (rq->mpwqe.pages_per_wqe & (MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT - 1)) {
678 int pad = ALIGN(rq->mpwqe.pages_per_wqe, MLX5_UMR_MTT_NUM_ENTRIES_ALIGNMENT) -
679 rq->mpwqe.pages_per_wqe;
680
681 memset(&umr_wqe->inline_mtts[rq->mpwqe.pages_per_wqe], 0,
682 sizeof(*umr_wqe->inline_mtts) * pad);
683 }
684
685 bitmap_zero(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
686 wi->consumed_strides = 0;
687
688 umr_wqe->hdr.ctrl.opmod_idx_opcode =
689 cpu_to_be32((sq->pc << MLX5_WQE_CTRL_WQE_INDEX_SHIFT) |
690 MLX5_OPCODE_UMR);
691
692 offset = (ix * rq->mpwqe.mtts_per_wqe) * sizeof(struct mlx5_mtt) / MLX5_OCTWORD;
693 umr_wqe->hdr.uctrl.xlt_offset = cpu_to_be16(offset);
694
695 sq->db.wqe_info[pi] = (struct mlx5e_icosq_wqe_info) {
696 .wqe_type = MLX5E_ICOSQ_WQE_UMR_RX,
697 .num_wqebbs = rq->mpwqe.umr_wqebbs,
698 .umr.rq = rq,
699 };
700
701 sq->pc += rq->mpwqe.umr_wqebbs;
702 mlx5e_icosq_sync_unlock(sq, sync_locked);
703
704 sq->doorbell_cseg = &umr_wqe->hdr.ctrl;
705
706 return 0;
707
708 err_unmap:
709 mlx5e_icosq_sync_unlock(sq, sync_locked);
710 while (--i >= 0) {
711 frag_page--;
712 mlx5e_page_release_fragmented(rq->page_pool, frag_page);
713 }
714
715 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
716
717 rq->stats->buff_alloc_err++;
718
719 return err;
720 }
721
mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq * rq,u16 ix)722 static void mlx5e_dealloc_rx_mpwqe(struct mlx5e_rq *rq, u16 ix)
723 {
724 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, ix);
725 /* This function is called on rq/netdev close. */
726 mlx5e_free_rx_mpwqe(rq, wi);
727
728 /* Avoid a second release of the wqe pages: dealloc is called also
729 * for missing wqes on an already flushed RQ.
730 */
731 bitmap_fill(wi->skip_release_bitmap, rq->mpwqe.pages_per_wqe);
732 }
733
mlx5e_mpwqe_dealloc_linear_page(struct mlx5e_rq * rq)734 void mlx5e_mpwqe_dealloc_linear_page(struct mlx5e_rq *rq)
735 {
736 struct mlx5e_mpw_linear_info *li = rq->mpwqe.linear_info;
737
738 if (!li || !li->frag_page.netmem)
739 return;
740
741 mlx5e_page_release_fragmented(rq->page_pool, &li->frag_page);
742
743 /* Recovery flow can call this function and then alloc again, so leave
744 * things in a good state for re-allocation.
745 */
746 li->frag_page.netmem = 0;
747 li->frag_page.frags = li->max_frags;
748 }
749
mlx5e_post_rx_wqes(struct mlx5e_rq * rq)750 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_wqes(struct mlx5e_rq *rq)
751 {
752 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
753 int wqe_bulk, count;
754 bool busy = false;
755 u16 head;
756
757 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
758 return false;
759
760 if (mlx5_wq_cyc_missing(wq) < rq->wqe.info.wqe_bulk)
761 return false;
762
763 if (rq->page_pool)
764 page_pool_nid_changed(rq->page_pool, numa_mem_id());
765
766 wqe_bulk = mlx5_wq_cyc_missing(wq);
767 head = mlx5_wq_cyc_get_head(wq);
768
769 /* Don't allow any newly allocated WQEs to share the same page with old
770 * WQEs that aren't completed yet. Stop earlier.
771 */
772 wqe_bulk -= (head + wqe_bulk) & rq->wqe.info.wqe_index_mask;
773
774 if (!rq->xsk_pool) {
775 count = mlx5e_refill_rx_wqes(rq, head, wqe_bulk);
776 } else if (likely(!dma_dev_need_sync(rq->pdev))) {
777 mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
778 count = mlx5e_xsk_alloc_rx_wqes_batched(rq, head, wqe_bulk);
779 } else {
780 mlx5e_xsk_free_rx_wqes(rq, head, wqe_bulk);
781 /* If dma_need_sync is true, it's more efficient to call
782 * xsk_buff_alloc in a loop, rather than xsk_buff_alloc_batch,
783 * because the latter does the same check and returns only one
784 * frame.
785 */
786 count = mlx5e_xsk_alloc_rx_wqes(rq, head, wqe_bulk);
787 }
788
789 mlx5_wq_cyc_push_n(wq, count);
790 if (unlikely(count != wqe_bulk)) {
791 rq->stats->buff_alloc_err++;
792 busy = true;
793 }
794
795 /* ensure wqes are visible to device before updating doorbell record */
796 dma_wmb();
797
798 mlx5_wq_cyc_update_db_record(wq);
799
800 return busy;
801 }
802
mlx5e_free_icosq_descs(struct mlx5e_icosq * sq)803 void mlx5e_free_icosq_descs(struct mlx5e_icosq *sq)
804 {
805 u16 sqcc;
806
807 sqcc = sq->cc;
808
809 while (sqcc != sq->pc) {
810 struct mlx5e_icosq_wqe_info *wi;
811 u16 ci;
812
813 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
814 wi = &sq->db.wqe_info[ci];
815 sqcc += wi->num_wqebbs;
816 #ifdef CONFIG_MLX5_EN_TLS
817 switch (wi->wqe_type) {
818 case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
819 mlx5e_ktls_handle_ctx_completion(wi);
820 break;
821 case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
822 mlx5e_ktls_handle_get_psv_completion(wi, sq);
823 break;
824 }
825 #endif
826 }
827 sq->cc = sqcc;
828 }
829
mlx5e_poll_ico_cq(struct mlx5e_cq * cq)830 int mlx5e_poll_ico_cq(struct mlx5e_cq *cq)
831 {
832 struct mlx5e_icosq *sq = container_of(cq, struct mlx5e_icosq, cq);
833 struct mlx5_cqe64 *cqe;
834 u16 sqcc;
835 int i;
836
837 if (unlikely(!test_bit(MLX5E_SQ_STATE_ENABLED, &sq->state)))
838 return 0;
839
840 cqe = mlx5_cqwq_get_cqe(&cq->wq);
841 if (likely(!cqe))
842 return 0;
843
844 /* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
845 * otherwise a cq overrun may occur
846 */
847 sqcc = sq->cc;
848
849 i = 0;
850 do {
851 u16 wqe_counter;
852 bool last_wqe;
853
854 mlx5_cqwq_pop(&cq->wq);
855
856 wqe_counter = be16_to_cpu(cqe->wqe_counter);
857
858 do {
859 struct mlx5e_icosq_wqe_info *wi;
860 u16 ci;
861
862 last_wqe = (sqcc == wqe_counter);
863
864 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
865 wi = &sq->db.wqe_info[ci];
866 sqcc += wi->num_wqebbs;
867
868 if (last_wqe && unlikely(get_cqe_opcode(cqe) != MLX5_CQE_REQ)) {
869 netdev_WARN_ONCE(cq->netdev,
870 "Bad OP in ICOSQ CQE: 0x%x\n",
871 get_cqe_opcode(cqe));
872 #ifdef CONFIG_MLX5_EN_TLS
873 if (wi->wqe_type == MLX5E_ICOSQ_WQE_GET_PSV_TLS)
874 mlx5e_ktls_rx_resync_async_request_cancel(wi);
875 #endif
876 mlx5e_dump_error_cqe(&sq->cq, sq->sqn,
877 (struct mlx5_err_cqe *)cqe);
878 mlx5_wq_cyc_wqe_dump(&sq->wq, ci, wi->num_wqebbs);
879 if (!test_and_set_bit(MLX5E_SQ_STATE_RECOVERING, &sq->state))
880 queue_work(cq->workqueue, &sq->recover_work);
881 break;
882 }
883
884 switch (wi->wqe_type) {
885 case MLX5E_ICOSQ_WQE_UMR_RX:
886 wi->umr.rq->mpwqe.umr_completed++;
887 break;
888 case MLX5E_ICOSQ_WQE_NOP:
889 break;
890 #ifdef CONFIG_MLX5_EN_TLS
891 case MLX5E_ICOSQ_WQE_UMR_TLS:
892 break;
893 case MLX5E_ICOSQ_WQE_SET_PSV_TLS:
894 mlx5e_ktls_handle_ctx_completion(wi);
895 break;
896 case MLX5E_ICOSQ_WQE_GET_PSV_TLS:
897 mlx5e_ktls_handle_get_psv_completion(wi, sq);
898 break;
899 #endif
900 default:
901 netdev_WARN_ONCE(cq->netdev,
902 "Bad WQE type in ICOSQ WQE info: 0x%x\n",
903 wi->wqe_type);
904 }
905 } while (!last_wqe);
906 } while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
907
908 sq->cc = sqcc;
909
910 mlx5_cqwq_update_db_record(&cq->wq);
911
912 return i;
913 }
914
mlx5e_reclaim_mpwqe_pages(struct mlx5e_rq * rq,int head,int reclaim)915 static void mlx5e_reclaim_mpwqe_pages(struct mlx5e_rq *rq, int head,
916 int reclaim)
917 {
918 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
919
920 for (int i = 0; i < reclaim; i++) {
921 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
922
923 mlx5e_dealloc_rx_mpwqe(rq, head);
924 }
925 }
926
mlx5e_post_rx_mpwqes(struct mlx5e_rq * rq)927 INDIRECT_CALLABLE_SCOPE bool mlx5e_post_rx_mpwqes(struct mlx5e_rq *rq)
928 {
929 struct mlx5_wq_ll *wq = &rq->mpwqe.wq;
930 u8 umr_completed = rq->mpwqe.umr_completed;
931 struct mlx5e_icosq *sq = rq->icosq;
932 bool reclaimed = false;
933 int alloc_err = 0;
934 u8 missing, i;
935 u16 head;
936
937 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
938 return false;
939
940 if (umr_completed) {
941 mlx5e_post_rx_mpwqe(rq, umr_completed);
942 rq->mpwqe.umr_in_progress -= umr_completed;
943 rq->mpwqe.umr_completed = 0;
944 }
945
946 missing = mlx5_wq_ll_missing(wq) - rq->mpwqe.umr_in_progress;
947
948 if (unlikely(rq->mpwqe.umr_in_progress > rq->mpwqe.umr_last_bulk))
949 rq->stats->congst_umr++;
950
951 if (likely(missing < rq->mpwqe.min_wqe_bulk))
952 return false;
953
954 if (rq->page_pool)
955 page_pool_nid_changed(rq->page_pool, numa_mem_id());
956 if (rq->hd_page_pool)
957 page_pool_nid_changed(rq->hd_page_pool, numa_mem_id());
958
959 head = rq->mpwqe.actual_wq_head;
960 i = missing;
961 do {
962 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, head);
963
964 /* Deferred free for better page pool cache usage. */
965 mlx5e_free_rx_mpwqe(rq, wi);
966
967 retry:
968 alloc_err = rq->xsk_pool ? mlx5e_xsk_alloc_rx_mpwqe(rq, head) :
969 mlx5e_alloc_rx_mpwqe(rq, head);
970 if (unlikely(alloc_err)) {
971 int reclaim = i - 1;
972
973 if (reclaimed || !reclaim)
974 break;
975
976 mlx5e_reclaim_mpwqe_pages(rq, head, reclaim);
977 reclaimed = true;
978
979 goto retry;
980 }
981 head = mlx5_wq_ll_get_wqe_next_ix(wq, head);
982 } while (--i);
983
984 rq->mpwqe.umr_last_bulk = missing - i;
985 if (sq->doorbell_cseg) {
986 mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, sq->doorbell_cseg);
987 sq->doorbell_cseg = NULL;
988 }
989
990 rq->mpwqe.umr_in_progress += rq->mpwqe.umr_last_bulk;
991 rq->mpwqe.actual_wq_head = head;
992
993 /* If XSK Fill Ring doesn't have enough frames, report the error, so
994 * that one of the actions can be performed:
995 * 1. If need_wakeup is used, signal that the application has to kick
996 * the driver when it refills the Fill Ring.
997 * 2. Otherwise, busy poll by rescheduling the NAPI poll.
998 */
999 if (unlikely(alloc_err == -ENOMEM && rq->xsk_pool))
1000 return true;
1001
1002 return false;
1003 }
1004
mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 * cqe,struct tcphdr * tcp)1005 static void mlx5e_lro_update_tcp_hdr(struct mlx5_cqe64 *cqe, struct tcphdr *tcp)
1006 {
1007 u8 l4_hdr_type = get_cqe_l4_hdr_type(cqe);
1008 u8 tcp_ack = (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_NO_DATA) ||
1009 (l4_hdr_type == CQE_L4_HDR_TYPE_TCP_ACK_AND_DATA);
1010
1011 tcp->check = 0;
1012 tcp->psh = get_cqe_lro_tcppsh(cqe);
1013
1014 if (tcp_ack) {
1015 tcp->ack = 1;
1016 tcp->ack_seq = cqe->lro.ack_seq_num;
1017 tcp->window = cqe->lro.tcp_win;
1018 }
1019 }
1020
mlx5e_lro_update_hdr(struct sk_buff * skb,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1021 static unsigned int mlx5e_lro_update_hdr(struct sk_buff *skb,
1022 struct mlx5_cqe64 *cqe,
1023 u32 cqe_bcnt)
1024 {
1025 struct ethhdr *eth = (struct ethhdr *)(skb->data);
1026 struct tcphdr *tcp;
1027 int network_depth = 0;
1028 __wsum check;
1029 __be16 proto;
1030 u16 tot_len;
1031 void *ip_p;
1032
1033 proto = __vlan_get_protocol(skb, eth->h_proto, &network_depth);
1034
1035 tot_len = cqe_bcnt - network_depth;
1036 ip_p = skb->data + network_depth;
1037
1038 if (proto == htons(ETH_P_IP)) {
1039 struct iphdr *ipv4 = ip_p;
1040
1041 tcp = ip_p + sizeof(struct iphdr);
1042 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1043
1044 ipv4->ttl = cqe->lro.min_ttl;
1045 ipv4->tot_len = cpu_to_be16(tot_len);
1046 ipv4->check = 0;
1047 ipv4->check = ip_fast_csum((unsigned char *)ipv4,
1048 ipv4->ihl);
1049
1050 mlx5e_lro_update_tcp_hdr(cqe, tcp);
1051 check = csum_partial(tcp, tcp->doff * 4,
1052 csum_unfold((__force __sum16)cqe->check_sum));
1053 /* Almost done, don't forget the pseudo header */
1054 tcp->check = tcp_v4_check(tot_len - sizeof(struct iphdr),
1055 ipv4->saddr, ipv4->daddr, check);
1056 } else {
1057 u16 payload_len = tot_len - sizeof(struct ipv6hdr);
1058 struct ipv6hdr *ipv6 = ip_p;
1059
1060 tcp = ip_p + sizeof(struct ipv6hdr);
1061 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1062
1063 ipv6->hop_limit = cqe->lro.min_ttl;
1064 ipv6->payload_len = cpu_to_be16(payload_len);
1065
1066 mlx5e_lro_update_tcp_hdr(cqe, tcp);
1067 check = csum_partial(tcp, tcp->doff * 4,
1068 csum_unfold((__force __sum16)cqe->check_sum));
1069 /* Almost done, don't forget the pseudo header */
1070 tcp->check = tcp_v6_check(payload_len, &ipv6->saddr,
1071 &ipv6->daddr, check);
1072 }
1073
1074 return (unsigned int)((unsigned char *)tcp + tcp->doff * 4 - skb->data);
1075 }
1076
mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4)1077 static void mlx5e_shampo_update_ipv4_udp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4)
1078 {
1079 int udp_off = rq->hw_gro_data->fk.control.thoff;
1080 struct sk_buff *skb = rq->hw_gro_data->skb;
1081 struct udphdr *uh;
1082
1083 uh = (struct udphdr *)(skb->data + udp_off);
1084 uh->len = htons(skb->len - udp_off);
1085
1086 if (uh->check)
1087 uh->check = ~udp_v4_check(skb->len - udp_off, ipv4->saddr,
1088 ipv4->daddr, 0);
1089
1090 skb->csum_start = (unsigned char *)uh - skb->head;
1091 skb->csum_offset = offsetof(struct udphdr, check);
1092
1093 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1094 }
1095
mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6)1096 static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6)
1097 {
1098 int udp_off = rq->hw_gro_data->fk.control.thoff;
1099 struct sk_buff *skb = rq->hw_gro_data->skb;
1100 struct udphdr *uh;
1101
1102 uh = (struct udphdr *)(skb->data + udp_off);
1103 uh->len = htons(skb->len - udp_off);
1104
1105 if (uh->check)
1106 uh->check = ~udp_v6_check(skb->len - udp_off, &ipv6->saddr,
1107 &ipv6->daddr, 0);
1108
1109 skb->csum_start = (unsigned char *)uh - skb->head;
1110 skb->csum_offset = offsetof(struct udphdr, check);
1111
1112 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_L4;
1113 }
1114
mlx5e_shampo_get_hd_buf_info(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,struct mlx5e_dma_info ** di,u32 * head_offset)1115 static void mlx5e_shampo_get_hd_buf_info(struct mlx5e_rq *rq,
1116 struct mlx5_cqe64 *cqe,
1117 struct mlx5e_dma_info **di,
1118 u32 *head_offset)
1119 {
1120 u32 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
1121 struct mlx5e_shampo_hd *shampo = rq->mpwqe.shampo;
1122 u32 di_index;
1123
1124 di_index = header_index >> MLX5E_SHAMPO_LOG_WQ_HEADER_PER_PAGE;
1125 *di = &shampo->hd_buf_pages[di_index];
1126 *head_offset = (header_index & (MLX5E_SHAMPO_WQ_HEADER_PER_PAGE - 1)) *
1127 BIT(MLX5E_SHAMPO_LOG_HEADER_ENTRY_SIZE);
1128 }
1129
mlx5e_shampo_get_hdr(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,int len)1130 static void *mlx5e_shampo_get_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1131 int len)
1132 {
1133 struct mlx5e_dma_info *di;
1134 u32 head_offset;
1135
1136 mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
1137
1138 dma_sync_single_range_for_cpu(rq->pdev, di->addr, head_offset,
1139 len, rq->buff.map_dir);
1140
1141 return page_address(di->page) + head_offset;
1142 }
1143
mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,struct tcphdr * skb_tcp_hd)1144 static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1145 struct tcphdr *skb_tcp_hd)
1146 {
1147 int nhoff = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
1148 int len = nhoff + sizeof(struct tcphdr);
1149 struct tcphdr *last_tcp_hd;
1150 void *last_hd_addr;
1151
1152 last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
1153 last_tcp_hd = (struct tcphdr *)(last_hd_addr + nhoff);
1154
1155 tcp_flag_word(skb_tcp_hd) |= tcp_flag_word(last_tcp_hd) & (TCP_FLAG_FIN | TCP_FLAG_PSH);
1156 }
1157
mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq * rq,struct iphdr * ipv4,struct mlx5_cqe64 * cqe,bool match)1158 static void mlx5e_shampo_update_ipv4_tcp_hdr(struct mlx5e_rq *rq, struct iphdr *ipv4,
1159 struct mlx5_cqe64 *cqe, bool match)
1160 {
1161 int tcp_off = rq->hw_gro_data->fk.control.thoff;
1162 struct sk_buff *skb = rq->hw_gro_data->skb;
1163 struct tcphdr *tcp;
1164
1165 tcp = (struct tcphdr *)(skb->data + tcp_off);
1166 if (match)
1167 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1168
1169 tcp->check = ~tcp_v4_check(skb->len - tcp_off, ipv4->saddr,
1170 ipv4->daddr, 0);
1171 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
1172 if (ntohs(ipv4->id) == rq->hw_gro_data->second_ip_id) {
1173 bool encap = rq->hw_gro_data->fk.control.flags & FLOW_DIS_ENCAPSULATION;
1174
1175 skb_shinfo(skb)->gso_type |= encap ? SKB_GSO_TCP_FIXEDID_INNER :
1176 SKB_GSO_TCP_FIXEDID;
1177 }
1178
1179 skb->csum_start = (unsigned char *)tcp - skb->head;
1180 skb->csum_offset = offsetof(struct tcphdr, check);
1181
1182 if (tcp->cwr)
1183 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1184 }
1185
mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq * rq,struct ipv6hdr * ipv6,struct mlx5_cqe64 * cqe,bool match)1186 static void mlx5e_shampo_update_ipv6_tcp_hdr(struct mlx5e_rq *rq, struct ipv6hdr *ipv6,
1187 struct mlx5_cqe64 *cqe, bool match)
1188 {
1189 int tcp_off = rq->hw_gro_data->fk.control.thoff;
1190 struct sk_buff *skb = rq->hw_gro_data->skb;
1191 struct tcphdr *tcp;
1192
1193 tcp = (struct tcphdr *)(skb->data + tcp_off);
1194 if (match)
1195 mlx5e_shampo_update_fin_psh_flags(rq, cqe, tcp);
1196
1197 tcp->check = ~tcp_v6_check(skb->len - tcp_off, &ipv6->saddr,
1198 &ipv6->daddr, 0);
1199 skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
1200 skb->csum_start = (unsigned char *)tcp - skb->head;
1201 skb->csum_offset = offsetof(struct tcphdr, check);
1202
1203 if (tcp->cwr)
1204 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
1205 }
1206
mlx5e_shampo_update_hdr(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)1207 static void mlx5e_shampo_update_hdr(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
1208 {
1209 bool is_ipv4 = (rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP));
1210 struct sk_buff *skb = rq->hw_gro_data->skb;
1211
1212 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count;
1213 skb->ip_summed = CHECKSUM_PARTIAL;
1214
1215 if (is_ipv4) {
1216 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct iphdr);
1217 struct iphdr *ipv4 = (struct iphdr *)(skb->data + nhoff);
1218 __be16 newlen = htons(skb->len - nhoff);
1219
1220 csum_replace2(&ipv4->check, ipv4->tot_len, newlen);
1221 ipv4->tot_len = newlen;
1222
1223 if (ipv4->protocol == IPPROTO_TCP)
1224 mlx5e_shampo_update_ipv4_tcp_hdr(rq, ipv4, cqe, match);
1225 else
1226 mlx5e_shampo_update_ipv4_udp_hdr(rq, ipv4);
1227 } else {
1228 int nhoff = rq->hw_gro_data->fk.control.thoff - sizeof(struct ipv6hdr);
1229 struct ipv6hdr *ipv6 = (struct ipv6hdr *)(skb->data + nhoff);
1230
1231 ipv6->payload_len = htons(skb->len - nhoff - sizeof(*ipv6));
1232
1233 if (ipv6->nexthdr == IPPROTO_TCP)
1234 mlx5e_shampo_update_ipv6_tcp_hdr(rq, ipv6, cqe, match);
1235 else
1236 mlx5e_shampo_update_ipv6_udp_hdr(rq, ipv6);
1237 }
1238 }
1239
mlx5e_skb_set_hash(struct mlx5_cqe64 * cqe,struct sk_buff * skb)1240 static inline void mlx5e_skb_set_hash(struct mlx5_cqe64 *cqe,
1241 struct sk_buff *skb)
1242 {
1243 u8 cht = cqe->rss_hash_type;
1244 int ht = (cht & CQE_RSS_HTYPE_L4) ? PKT_HASH_TYPE_L4 :
1245 (cht & CQE_RSS_HTYPE_IP) ? PKT_HASH_TYPE_L3 :
1246 PKT_HASH_TYPE_NONE;
1247 skb_set_hash(skb, be32_to_cpu(cqe->rss_hash_result), ht);
1248 }
1249
is_last_ethertype_ip(struct sk_buff * skb,int * network_depth,__be16 * proto)1250 static inline bool is_last_ethertype_ip(struct sk_buff *skb, int *network_depth,
1251 __be16 *proto)
1252 {
1253 *proto = ((struct ethhdr *)skb->data)->h_proto;
1254 *proto = __vlan_get_protocol(skb, *proto, network_depth);
1255
1256 if (*proto == htons(ETH_P_IP))
1257 return pskb_may_pull(skb, *network_depth + sizeof(struct iphdr));
1258
1259 if (*proto == htons(ETH_P_IPV6))
1260 return pskb_may_pull(skb, *network_depth + sizeof(struct ipv6hdr));
1261
1262 return false;
1263 }
1264
mlx5e_enable_ecn(struct mlx5e_rq * rq,struct sk_buff * skb)1265 static inline void mlx5e_enable_ecn(struct mlx5e_rq *rq, struct sk_buff *skb)
1266 {
1267 int network_depth = 0;
1268 __be16 proto;
1269 void *ip;
1270 int rc;
1271
1272 if (unlikely(!is_last_ethertype_ip(skb, &network_depth, &proto)))
1273 return;
1274
1275 ip = skb->data + network_depth;
1276 rc = ((proto == htons(ETH_P_IP)) ? IP_ECN_set_ce((struct iphdr *)ip) :
1277 IP6_ECN_set_ce(skb, (struct ipv6hdr *)ip));
1278
1279 rq->stats->ecn_mark += !!rc;
1280 }
1281
get_ip_proto(struct sk_buff * skb,int network_depth,__be16 proto)1282 static u8 get_ip_proto(struct sk_buff *skb, int network_depth, __be16 proto)
1283 {
1284 void *ip_p = skb->data + network_depth;
1285
1286 return (proto == htons(ETH_P_IP)) ? ((struct iphdr *)ip_p)->protocol :
1287 ((struct ipv6hdr *)ip_p)->nexthdr;
1288 }
1289
1290 #define short_frame(size) ((size) <= ETH_ZLEN + ETH_FCS_LEN)
1291
1292 #define MAX_PADDING 8
1293
1294 static void
tail_padding_csum_slow(struct sk_buff * skb,int offset,int len,struct mlx5e_rq_stats * stats)1295 tail_padding_csum_slow(struct sk_buff *skb, int offset, int len,
1296 struct mlx5e_rq_stats *stats)
1297 {
1298 stats->csum_complete_tail_slow++;
1299 skb->csum = csum_block_add(skb->csum,
1300 skb_checksum(skb, offset, len, 0),
1301 offset);
1302 }
1303
1304 static void
tail_padding_csum(struct sk_buff * skb,int offset,struct mlx5e_rq_stats * stats)1305 tail_padding_csum(struct sk_buff *skb, int offset,
1306 struct mlx5e_rq_stats *stats)
1307 {
1308 u8 tail_padding[MAX_PADDING];
1309 int len = skb->len - offset;
1310 void *tail;
1311
1312 if (unlikely(len > MAX_PADDING)) {
1313 tail_padding_csum_slow(skb, offset, len, stats);
1314 return;
1315 }
1316
1317 tail = skb_header_pointer(skb, offset, len, tail_padding);
1318 if (unlikely(!tail)) {
1319 tail_padding_csum_slow(skb, offset, len, stats);
1320 return;
1321 }
1322
1323 stats->csum_complete_tail++;
1324 skb->csum = csum_block_add(skb->csum, csum_partial(tail, len, 0), offset);
1325 }
1326
1327 static void
mlx5e_skb_csum_fixup(struct sk_buff * skb,int network_depth,__be16 proto,struct mlx5e_rq_stats * stats)1328 mlx5e_skb_csum_fixup(struct sk_buff *skb, int network_depth, __be16 proto,
1329 struct mlx5e_rq_stats *stats)
1330 {
1331 struct ipv6hdr *ip6;
1332 struct iphdr *ip4;
1333 int pkt_len;
1334
1335 /* Fixup vlan headers, if any */
1336 if (network_depth > ETH_HLEN)
1337 /* CQE csum is calculated from the IP header and does
1338 * not cover VLAN headers (if present). This will add
1339 * the checksum manually.
1340 */
1341 skb->csum = csum_partial(skb->data + ETH_HLEN,
1342 network_depth - ETH_HLEN,
1343 skb->csum);
1344
1345 /* Fixup tail padding, if any */
1346 switch (proto) {
1347 case htons(ETH_P_IP):
1348 ip4 = (struct iphdr *)(skb->data + network_depth);
1349 pkt_len = network_depth + ntohs(ip4->tot_len);
1350 break;
1351 case htons(ETH_P_IPV6):
1352 ip6 = (struct ipv6hdr *)(skb->data + network_depth);
1353 pkt_len = network_depth + sizeof(*ip6) + ntohs(ip6->payload_len);
1354 break;
1355 default:
1356 return;
1357 }
1358
1359 if (likely(pkt_len >= skb->len))
1360 return;
1361
1362 tail_padding_csum(skb, pkt_len, stats);
1363 }
1364
mlx5e_handle_csum(struct net_device * netdev,struct mlx5_cqe64 * cqe,struct mlx5e_rq * rq,struct sk_buff * skb,bool lro)1365 static inline void mlx5e_handle_csum(struct net_device *netdev,
1366 struct mlx5_cqe64 *cqe,
1367 struct mlx5e_rq *rq,
1368 struct sk_buff *skb,
1369 bool lro)
1370 {
1371 struct mlx5e_rq_stats *stats = rq->stats;
1372 int network_depth = 0;
1373 __be16 proto;
1374
1375 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
1376 goto csum_none;
1377
1378 if (lro) {
1379 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380 stats->csum_unnecessary++;
1381 return;
1382 }
1383
1384 /* True when explicitly set via priv flag, or XDP prog is loaded */
1385 if (test_bit(MLX5E_RQ_STATE_NO_CSUM_COMPLETE, &rq->state) ||
1386 get_cqe_tls_offload(cqe))
1387 goto csum_unnecessary;
1388
1389 /* CQE csum doesn't cover padding octets in short ethernet
1390 * frames. And the pad field is appended prior to calculating
1391 * and appending the FCS field.
1392 *
1393 * Detecting these padded frames requires to verify and parse
1394 * IP headers, so we simply force all those small frames to be
1395 * CHECKSUM_UNNECESSARY even if they are not padded.
1396 */
1397 if (short_frame(skb->len))
1398 goto csum_unnecessary;
1399
1400 if (likely(is_last_ethertype_ip(skb, &network_depth, &proto))) {
1401 if (unlikely(get_ip_proto(skb, network_depth, proto) == IPPROTO_SCTP))
1402 goto csum_unnecessary;
1403
1404 stats->csum_complete++;
1405 skb->ip_summed = CHECKSUM_COMPLETE;
1406 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
1407
1408 if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
1409 /* TBD: PSP csum complete corrections for now chose csum_unnecessary path */
1410 goto csum_unnecessary;
1411 }
1412
1413 if (test_bit(MLX5E_RQ_STATE_CSUM_FULL, &rq->state))
1414 return; /* CQE csum covers all received bytes */
1415
1416 /* csum might need some fixups ...*/
1417 mlx5e_skb_csum_fixup(skb, network_depth, proto, stats);
1418 return;
1419 }
1420
1421 csum_unnecessary:
1422 if (likely((cqe->hds_ip_ext & CQE_L3_OK) &&
1423 (cqe->hds_ip_ext & CQE_L4_OK))) {
1424 skb->ip_summed = CHECKSUM_UNNECESSARY;
1425 if (cqe_is_tunneled(cqe)) {
1426 skb->csum_level = 1;
1427 skb->encapsulation = 1;
1428 stats->csum_unnecessary_inner++;
1429 return;
1430 }
1431 stats->csum_unnecessary++;
1432 return;
1433 }
1434 csum_none:
1435 skb->ip_summed = CHECKSUM_NONE;
1436 stats->csum_none++;
1437 }
1438
1439 #define MLX5E_CE_BIT_MASK 0x80
1440
mlx5e_build_rx_skb(struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct mlx5e_rq * rq,struct sk_buff * skb)1441 static inline bool mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe,
1442 u32 cqe_bcnt,
1443 struct mlx5e_rq *rq,
1444 struct sk_buff *skb)
1445 {
1446 u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
1447 struct mlx5e_rq_stats *stats = rq->stats;
1448 struct net_device *netdev = rq->netdev;
1449
1450 skb->mac_len = ETH_HLEN;
1451
1452 if (unlikely(get_cqe_tls_offload(cqe)))
1453 mlx5e_ktls_handle_rx_skb(rq, skb, cqe, &cqe_bcnt);
1454
1455 if (unlikely(mlx5e_psp_is_rx_flow(cqe))) {
1456 if (mlx5e_psp_offload_handle_rx_skb(netdev, skb, cqe))
1457 return true;
1458 }
1459
1460 if (unlikely(mlx5_ipsec_is_rx_flow(cqe)))
1461 mlx5e_ipsec_offload_handle_rx_skb(netdev, skb,
1462 be32_to_cpu(cqe->ft_metadata));
1463
1464 if (unlikely(mlx5e_macsec_is_rx_flow(cqe)))
1465 mlx5e_macsec_offload_handle_rx_skb(netdev, skb, cqe);
1466
1467 if (lro_num_seg > 1) {
1468 unsigned int hdrlen = mlx5e_lro_update_hdr(skb, cqe, cqe_bcnt);
1469
1470 skb_shinfo(skb)->gso_size = DIV_ROUND_UP(cqe_bcnt - hdrlen, lro_num_seg);
1471 skb_shinfo(skb)->gso_segs = lro_num_seg;
1472 /* Subtract one since we already counted this as one
1473 * "regular" packet in mlx5e_complete_rx_cqe()
1474 */
1475 stats->packets += lro_num_seg - 1;
1476 stats->lro_packets++;
1477 stats->lro_bytes += cqe_bcnt;
1478 }
1479
1480 if (unlikely(mlx5e_rx_hw_stamp(rq->hwtstamp_config)))
1481 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
1482 rq->clock, get_cqe_ts(cqe));
1483 skb_record_rx_queue(skb, rq->ix);
1484
1485 if (likely(netdev->features & NETIF_F_RXHASH))
1486 mlx5e_skb_set_hash(cqe, skb);
1487
1488 if (cqe_has_vlan(cqe)) {
1489 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
1490 be16_to_cpu(cqe->vlan_info));
1491 stats->removed_vlan_packets++;
1492 }
1493
1494 skb->mark = be32_to_cpu(cqe->sop_drop_qpn) & MLX5E_TC_FLOW_ID_MASK;
1495
1496 mlx5e_handle_csum(netdev, cqe, rq, skb, !!lro_num_seg);
1497 /* checking CE bit in cqe - MSB in ml_path field */
1498 if (unlikely(cqe->ml_path & MLX5E_CE_BIT_MASK))
1499 mlx5e_enable_ecn(rq, skb);
1500
1501 skb->protocol = eth_type_trans(skb, netdev);
1502
1503 if (unlikely(mlx5e_skb_is_multicast(skb)))
1504 stats->mcast_packets++;
1505
1506 return false;
1507 }
1508
mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1509 static bool mlx5e_shampo_complete_rx_cqe(struct mlx5e_rq *rq,
1510 struct mlx5_cqe64 *cqe,
1511 u32 cqe_bcnt,
1512 struct sk_buff *skb)
1513 {
1514 struct mlx5e_rq_stats *stats = rq->stats;
1515
1516 stats->packets++;
1517 stats->bytes += cqe_bcnt;
1518 if (NAPI_GRO_CB(skb)->count != 1)
1519 return false;
1520
1521 if (mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb))
1522 return true;
1523
1524 skb_reset_network_header(skb);
1525 if (!skb_flow_dissect_flow_keys(skb, &rq->hw_gro_data->fk, 0)) {
1526 napi_gro_receive(rq->cq.napi, skb);
1527 rq->hw_gro_data->skb = NULL;
1528 }
1529 return false;
1530 }
1531
mlx5e_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)1532 static inline bool mlx5e_complete_rx_cqe(struct mlx5e_rq *rq,
1533 struct mlx5_cqe64 *cqe,
1534 u32 cqe_bcnt,
1535 struct sk_buff *skb)
1536 {
1537 struct mlx5e_rq_stats *stats = rq->stats;
1538
1539 stats->packets++;
1540 stats->bytes += cqe_bcnt;
1541 return mlx5e_build_rx_skb(cqe, cqe_bcnt, rq, skb);
1542 }
1543
1544 static inline
mlx5e_build_linear_skb(struct mlx5e_rq * rq,void * va,u32 frag_size,u16 headroom,u32 cqe_bcnt,u32 metasize)1545 struct sk_buff *mlx5e_build_linear_skb(struct mlx5e_rq *rq, void *va,
1546 u32 frag_size, u16 headroom,
1547 u32 cqe_bcnt, u32 metasize)
1548 {
1549 struct sk_buff *skb = napi_build_skb(va, frag_size);
1550
1551 if (unlikely(!skb)) {
1552 rq->stats->buff_alloc_err++;
1553 return NULL;
1554 }
1555
1556 skb_reserve(skb, headroom);
1557 skb_put(skb, cqe_bcnt);
1558
1559 if (metasize)
1560 skb_metadata_set(skb, metasize);
1561
1562 return skb;
1563 }
1564
mlx5e_fill_mxbuf(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,void * va,u16 headroom,u32 frame_sz,u32 len,struct mlx5e_xdp_buff * mxbuf)1565 static void mlx5e_fill_mxbuf(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe,
1566 void *va, u16 headroom, u32 frame_sz, u32 len,
1567 struct mlx5e_xdp_buff *mxbuf)
1568 {
1569 xdp_init_buff(&mxbuf->xdp, frame_sz, &rq->xdp_rxq);
1570 xdp_prepare_buff(&mxbuf->xdp, va, headroom, len, true);
1571 mxbuf->cqe = cqe;
1572 mxbuf->rq = rq;
1573 }
1574
1575 static struct sk_buff *
mlx5e_skb_from_cqe_linear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1576 mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1577 struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1578 {
1579 struct mlx5e_frag_page *frag_page = wi->frag_page;
1580 u16 rx_headroom = rq->buff.headroom;
1581 struct bpf_prog *prog;
1582 struct sk_buff *skb;
1583 u32 metasize = 0;
1584 void *va, *data;
1585 dma_addr_t addr;
1586 u32 frag_size;
1587
1588 va = netmem_address(frag_page->netmem) + wi->offset;
1589 data = va + rx_headroom;
1590 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1591
1592 addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
1593 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1594 frag_size, rq->buff.map_dir);
1595 net_prefetch(data);
1596
1597 prog = rcu_dereference(rq->xdp_prog);
1598 if (prog) {
1599 struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1600
1601 net_prefetchw(va); /* xdp_frame data area */
1602 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1603 cqe_bcnt, mxbuf);
1604 if (mlx5e_xdp_handle(rq, prog, mxbuf))
1605 return NULL; /* page/packet was consumed by XDP */
1606
1607 rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
1608 metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
1609 cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
1610 }
1611 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
1612 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
1613 if (unlikely(!skb))
1614 return NULL;
1615
1616 /* queue up for recycling/reuse */
1617 skb_mark_for_recycle(skb);
1618 frag_page->frags++;
1619
1620 return skb;
1621 }
1622
1623 static struct sk_buff *
mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq * rq,struct mlx5e_wqe_frag_info * wi,struct mlx5_cqe64 * cqe,u32 cqe_bcnt)1624 mlx5e_skb_from_cqe_nonlinear(struct mlx5e_rq *rq, struct mlx5e_wqe_frag_info *wi,
1625 struct mlx5_cqe64 *cqe, u32 cqe_bcnt)
1626 {
1627 struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
1628 struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1629 struct mlx5e_wqe_frag_info *head_wi = wi;
1630 u16 rx_headroom = rq->buff.headroom;
1631 struct mlx5e_frag_page *frag_page;
1632 struct skb_shared_info *sinfo;
1633 u32 frag_consumed_bytes;
1634 struct bpf_prog *prog;
1635 u8 nr_frags_free = 0;
1636 struct sk_buff *skb;
1637 dma_addr_t addr;
1638 u32 truesize;
1639 void *va;
1640
1641 frag_page = wi->frag_page;
1642
1643 va = netmem_address(frag_page->netmem) + wi->offset;
1644 frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1645
1646 addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
1647 dma_sync_single_range_for_cpu(rq->pdev, addr, wi->offset,
1648 rq->buff.frame0_sz, rq->buff.map_dir);
1649 net_prefetchw(va); /* xdp_frame data area */
1650 net_prefetch(va + rx_headroom);
1651
1652 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
1653 frag_consumed_bytes, mxbuf);
1654 sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
1655 truesize = 0;
1656
1657 cqe_bcnt -= frag_consumed_bytes;
1658 frag_info++;
1659 wi++;
1660
1661 while (cqe_bcnt) {
1662 frag_page = wi->frag_page;
1663
1664 frag_consumed_bytes = min_t(u32, frag_info->frag_size, cqe_bcnt);
1665
1666 mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
1667 frag_page, wi->offset,
1668 frag_consumed_bytes);
1669 truesize += frag_info->frag_stride;
1670
1671 cqe_bcnt -= frag_consumed_bytes;
1672 frag_info++;
1673 wi++;
1674 }
1675
1676 prog = rcu_dereference(rq->xdp_prog);
1677 if (prog) {
1678 u8 old_nr_frags = sinfo->nr_frags;
1679
1680 if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
1681 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT,
1682 rq->flags)) {
1683 struct mlx5e_wqe_frag_info *pwi;
1684
1685 for (pwi = head_wi; pwi < wi; pwi++)
1686 pwi->frag_page->frags++;
1687 }
1688 return NULL; /* page/packet was consumed by XDP */
1689 }
1690
1691 nr_frags_free = old_nr_frags - sinfo->nr_frags;
1692 if (unlikely(nr_frags_free))
1693 truesize -= nr_frags_free * frag_info->frag_stride;
1694 }
1695
1696 skb = mlx5e_build_linear_skb(
1697 rq, mxbuf->xdp.data_hard_start, rq->buff.frame0_sz,
1698 mxbuf->xdp.data - mxbuf->xdp.data_hard_start,
1699 mxbuf->xdp.data_end - mxbuf->xdp.data,
1700 mxbuf->xdp.data - mxbuf->xdp.data_meta);
1701 if (unlikely(!skb))
1702 return NULL;
1703
1704 skb_mark_for_recycle(skb);
1705 head_wi->frag_page->frags++;
1706
1707 if (xdp_buff_has_frags(&mxbuf->xdp)) {
1708 /* sinfo->nr_frags is reset by build_skb, calculate again. */
1709 xdp_update_skb_frags_info(skb, wi - head_wi - nr_frags_free - 1,
1710 sinfo->xdp_frags_size, truesize,
1711 xdp_buff_get_skb_flags(&mxbuf->xdp));
1712
1713 for (struct mlx5e_wqe_frag_info *pwi = head_wi + 1; pwi < wi; pwi++)
1714 pwi->frag_page->frags++;
1715 }
1716
1717 return skb;
1718 }
1719
trigger_report(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1720 static void trigger_report(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1721 {
1722 struct mlx5_err_cqe *err_cqe = (struct mlx5_err_cqe *)cqe;
1723 struct mlx5e_priv *priv = rq->priv;
1724
1725 if (cqe_syndrome_needs_recover(err_cqe->syndrome) &&
1726 !test_and_set_bit(MLX5E_RQ_STATE_RECOVERING, &rq->state)) {
1727 mlx5e_dump_error_cqe(&rq->cq, rq->rqn, err_cqe);
1728 queue_work(priv->wq, &rq->recover_work);
1729 }
1730 }
1731
mlx5e_handle_rx_err_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1732 static void mlx5e_handle_rx_err_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1733 {
1734 trigger_report(rq, cqe);
1735 rq->stats->wqe_err++;
1736 }
1737
mlx5e_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1738 static void mlx5e_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1739 {
1740 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1741 struct mlx5e_wqe_frag_info *wi;
1742 struct sk_buff *skb;
1743 u32 cqe_bcnt;
1744 u16 ci;
1745
1746 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1747 wi = get_frag(rq, ci);
1748 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1749
1750 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1751 mlx5e_handle_rx_err_cqe(rq, cqe);
1752 goto wq_cyc_pop;
1753 }
1754
1755 skb = INDIRECT_CALL_3(rq->wqe.skb_from_cqe,
1756 mlx5e_skb_from_cqe_linear,
1757 mlx5e_skb_from_cqe_nonlinear,
1758 mlx5e_xsk_skb_from_cqe_linear,
1759 rq, wi, cqe, cqe_bcnt);
1760 if (!skb) {
1761 /* probably for XDP */
1762 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1763 wi->frag_page->frags++;
1764 goto wq_cyc_pop;
1765 }
1766
1767 if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1768 goto wq_cyc_pop;
1769
1770 if (mlx5e_cqe_regb_chain(cqe))
1771 if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
1772 dev_kfree_skb_any(skb);
1773 goto wq_cyc_pop;
1774 }
1775
1776 napi_gro_receive(rq->cq.napi, skb);
1777
1778 wq_cyc_pop:
1779 mlx5_wq_cyc_pop(wq);
1780 }
1781
1782 #ifdef CONFIG_MLX5_ESWITCH
mlx5e_handle_rx_cqe_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1783 static void mlx5e_handle_rx_cqe_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1784 {
1785 struct net_device *netdev = rq->netdev;
1786 struct mlx5e_priv *priv = netdev_priv(netdev);
1787 struct mlx5e_rep_priv *rpriv = priv->ppriv;
1788 struct mlx5_eswitch_rep *rep = rpriv->rep;
1789 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
1790 struct mlx5e_wqe_frag_info *wi;
1791 struct sk_buff *skb;
1792 u32 cqe_bcnt;
1793 u16 ci;
1794
1795 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
1796 wi = get_frag(rq, ci);
1797 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
1798
1799 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1800 mlx5e_handle_rx_err_cqe(rq, cqe);
1801 goto wq_cyc_pop;
1802 }
1803
1804 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
1805 mlx5e_skb_from_cqe_linear,
1806 mlx5e_skb_from_cqe_nonlinear,
1807 rq, wi, cqe, cqe_bcnt);
1808 if (!skb) {
1809 /* probably for XDP */
1810 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
1811 wi->frag_page->frags++;
1812 goto wq_cyc_pop;
1813 }
1814
1815 if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1816 goto wq_cyc_pop;
1817
1818 if (rep->vlan && skb_vlan_tag_present(skb))
1819 skb_vlan_pop(skb);
1820
1821 mlx5e_rep_tc_receive(cqe, rq, skb);
1822
1823 wq_cyc_pop:
1824 mlx5_wq_cyc_pop(wq);
1825 }
1826
mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)1827 static void mlx5e_handle_rx_cqe_mpwrq_rep(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
1828 {
1829 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
1830 u16 wqe_id = be16_to_cpu(cqe->wqe_id);
1831 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
1832 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
1833 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
1834 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
1835 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
1836 struct mlx5e_rx_wqe_ll *wqe;
1837 struct mlx5_wq_ll *wq;
1838 struct sk_buff *skb;
1839 u16 cqe_bcnt;
1840
1841 wi->consumed_strides += cstrides;
1842
1843 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
1844 mlx5e_handle_rx_err_cqe(rq, cqe);
1845 goto mpwrq_cqe_out;
1846 }
1847
1848 if (unlikely(mpwrq_is_filler_cqe(cqe))) {
1849 struct mlx5e_rq_stats *stats = rq->stats;
1850
1851 stats->mpwqe_filler_cqes++;
1852 stats->mpwqe_filler_strides += cstrides;
1853 goto mpwrq_cqe_out;
1854 }
1855
1856 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
1857
1858 skb = INDIRECT_CALL_2(rq->mpwqe.skb_from_cqe_mpwrq,
1859 mlx5e_skb_from_cqe_mpwrq_linear,
1860 mlx5e_skb_from_cqe_mpwrq_nonlinear,
1861 rq, wi, cqe, cqe_bcnt, head_offset, page_idx);
1862 if (!skb)
1863 goto mpwrq_cqe_out;
1864
1865 if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
1866 goto mpwrq_cqe_out;
1867
1868 mlx5e_rep_tc_receive(cqe, rq, skb);
1869
1870 mpwrq_cqe_out:
1871 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
1872 return;
1873
1874 wq = &rq->mpwqe.wq;
1875 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
1876 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
1877 }
1878
1879 const struct mlx5e_rx_handlers mlx5e_rx_handlers_rep = {
1880 .handle_rx_cqe = mlx5e_handle_rx_cqe_rep,
1881 .handle_rx_cqe_mpwqe = mlx5e_handle_rx_cqe_mpwrq_rep,
1882 };
1883 #endif
1884
1885 static void
mlx5e_shampo_fill_skb_data(struct sk_buff * skb,struct mlx5e_rq * rq,struct mlx5e_frag_page * frag_page,u32 data_bcnt,u32 data_offset)1886 mlx5e_shampo_fill_skb_data(struct sk_buff *skb, struct mlx5e_rq *rq,
1887 struct mlx5e_frag_page *frag_page,
1888 u32 data_bcnt, u32 data_offset)
1889 {
1890 u32 page_size = BIT(rq->mpwqe.page_shift);
1891
1892 net_prefetchw(skb->data);
1893
1894 do {
1895 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1896 u32 pg_consumed_bytes = min_t(u32, page_size - data_offset,
1897 data_bcnt);
1898 unsigned int truesize = pg_consumed_bytes;
1899
1900 mlx5e_add_skb_frag(rq, skb, frag_page, data_offset,
1901 pg_consumed_bytes, truesize);
1902
1903 data_bcnt -= pg_consumed_bytes;
1904 data_offset = 0;
1905 frag_page++;
1906 } while (data_bcnt);
1907 }
1908
1909 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)1910 mlx5e_skb_from_cqe_mpwrq_nonlinear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
1911 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
1912 u32 page_idx)
1913 {
1914 struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
1915 u16 headlen = min_t(u16, MLX5E_RX_MAX_HEAD, cqe_bcnt);
1916 struct mlx5e_frag_page *head_page = frag_page;
1917 struct mlx5e_frag_page *linear_page = NULL;
1918 struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
1919 u32 page_size = BIT(rq->mpwqe.page_shift);
1920 u32 frag_offset = head_offset;
1921 u32 byte_cnt = cqe_bcnt;
1922 struct skb_shared_info *sinfo;
1923 unsigned int truesize = 0;
1924 u32 pg_consumed_bytes;
1925 struct bpf_prog *prog;
1926 struct sk_buff *skb;
1927 u32 linear_frame_sz;
1928 u16 linear_data_len;
1929 u16 linear_hr;
1930 void *va;
1931
1932 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
1933 u8 lro_num_seg = get_cqe_lro_num_seg(cqe);
1934
1935 if (lro_num_seg <= 1) {
1936 rq->stats->oversize_pkts_sw_drop++;
1937 return NULL;
1938 }
1939 }
1940
1941 prog = rcu_dereference(rq->xdp_prog);
1942
1943 if (prog) {
1944 /* area for bpf_xdp_[store|load]_bytes */
1945 net_prefetchw(netmem_address(frag_page->netmem) + frag_offset);
1946
1947 va = mlx5e_mpwqe_get_linear_page_frag(rq);
1948 if (!va) {
1949 rq->stats->buff_alloc_err++;
1950 return NULL;
1951 }
1952
1953 net_prefetchw(va); /* xdp_frame data area */
1954 linear_hr = XDP_PACKET_HEADROOM;
1955 linear_data_len = 0;
1956 linear_frame_sz = MLX5_SKB_FRAG_SZ(linear_hr + MLX5E_RX_MAX_HEAD);
1957 linear_page = &rq->mpwqe.linear_info->frag_page;
1958 } else {
1959 skb = napi_alloc_skb(rq->cq.napi,
1960 ALIGN(MLX5E_RX_MAX_HEAD, sizeof(long)));
1961 if (unlikely(!skb)) {
1962 rq->stats->buff_alloc_err++;
1963 return NULL;
1964 }
1965 skb_mark_for_recycle(skb);
1966 va = skb->head;
1967 net_prefetchw(va); /* xdp_frame data area */
1968 net_prefetchw(skb->data);
1969
1970 frag_offset += headlen;
1971 byte_cnt -= headlen;
1972 linear_hr = skb_headroom(skb);
1973 linear_data_len = headlen;
1974 linear_frame_sz = MLX5_SKB_FRAG_SZ(skb_end_offset(skb));
1975 if (unlikely(frag_offset >= page_size)) {
1976 frag_page++;
1977 frag_offset -= page_size;
1978 }
1979 }
1980
1981 mlx5e_fill_mxbuf(rq, cqe, va, linear_hr, linear_frame_sz,
1982 linear_data_len, mxbuf);
1983
1984 sinfo = xdp_get_shared_info_from_buff(&mxbuf->xdp);
1985
1986 while (byte_cnt) {
1987 /* Non-linear mode, hence non-XSK, which always uses PAGE_SIZE. */
1988 pg_consumed_bytes =
1989 min_t(u32, page_size - frag_offset, byte_cnt);
1990
1991 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state))
1992 truesize += pg_consumed_bytes;
1993 else
1994 truesize += ALIGN(pg_consumed_bytes, BIT(rq->mpwqe.log_stride_sz));
1995
1996 mlx5e_add_skb_shared_info_frag(rq, sinfo, &mxbuf->xdp,
1997 frag_page, frag_offset,
1998 pg_consumed_bytes);
1999 byte_cnt -= pg_consumed_bytes;
2000 frag_offset = 0;
2001 frag_page++;
2002 }
2003
2004 if (prog) {
2005 u8 nr_frags_free, old_nr_frags = sinfo->nr_frags;
2006 u8 new_nr_frags;
2007 u32 len;
2008
2009 if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
2010 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) {
2011 struct mlx5e_frag_page *pfp;
2012
2013 for (pfp = head_page; pfp < frag_page; pfp++)
2014 pfp->frags++;
2015
2016 linear_page->frags++;
2017 }
2018 return NULL; /* page/packet was consumed by XDP */
2019 }
2020
2021 new_nr_frags = sinfo->nr_frags;
2022 nr_frags_free = old_nr_frags - new_nr_frags;
2023 if (unlikely(nr_frags_free))
2024 truesize -= (nr_frags_free - 1) * page_size +
2025 ALIGN(pg_consumed_bytes,
2026 BIT(rq->mpwqe.log_stride_sz));
2027
2028 len = mxbuf->xdp.data_end - mxbuf->xdp.data;
2029
2030 skb = mlx5e_build_linear_skb(
2031 rq, mxbuf->xdp.data_hard_start, linear_frame_sz,
2032 mxbuf->xdp.data - mxbuf->xdp.data_hard_start, len,
2033 mxbuf->xdp.data - mxbuf->xdp.data_meta);
2034 if (unlikely(!skb))
2035 return NULL;
2036
2037 skb_mark_for_recycle(skb);
2038 linear_page->frags++;
2039
2040 if (xdp_buff_has_frags(&mxbuf->xdp)) {
2041 struct mlx5e_frag_page *pagep;
2042
2043 /* sinfo->nr_frags is reset by build_skb, calculate again. */
2044 xdp_update_skb_frags_info(skb, new_nr_frags,
2045 sinfo->xdp_frags_size,
2046 truesize,
2047 xdp_buff_get_skb_flags(&mxbuf->xdp));
2048
2049 pagep = head_page;
2050 do
2051 pagep->frags++;
2052 while (++pagep < frag_page);
2053
2054 headlen = min_t(u16, MLX5E_RX_MAX_HEAD - len,
2055 skb->data_len);
2056 __pskb_pull_tail(skb, headlen);
2057 }
2058 } else {
2059 dma_addr_t addr;
2060
2061 if (xdp_buff_has_frags(&mxbuf->xdp)) {
2062 struct mlx5e_frag_page *pagep;
2063
2064 xdp_update_skb_frags_info(skb, sinfo->nr_frags,
2065 sinfo->xdp_frags_size,
2066 truesize,
2067 xdp_buff_get_skb_flags(&mxbuf->xdp));
2068
2069 pagep = frag_page - sinfo->nr_frags;
2070 do
2071 pagep->frags++;
2072 while (++pagep < frag_page);
2073 }
2074 /* copy header */
2075 addr = page_pool_get_dma_addr_netmem(head_page->netmem);
2076 mlx5e_copy_skb_header(rq, skb, head_page->netmem, addr,
2077 head_offset, head_offset, headlen);
2078 /* skb linear part was allocated with headlen and aligned to long */
2079 skb->tail += headlen;
2080 skb->len += headlen;
2081 }
2082
2083 return skb;
2084 }
2085
2086 static struct sk_buff *
mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 cqe_bcnt,u32 head_offset,u32 page_idx)2087 mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2088 struct mlx5_cqe64 *cqe, u16 cqe_bcnt, u32 head_offset,
2089 u32 page_idx)
2090 {
2091 struct mlx5e_frag_page *frag_page = &wi->alloc_units.frag_pages[page_idx];
2092 u16 rx_headroom = rq->buff.headroom;
2093 struct bpf_prog *prog;
2094 struct sk_buff *skb;
2095 u32 metasize = 0;
2096 void *va, *data;
2097 dma_addr_t addr;
2098 u32 frag_size;
2099
2100 /* Check packet size. Note LRO doesn't use linear SKB */
2101 if (unlikely(cqe_bcnt > rq->hw_mtu)) {
2102 rq->stats->oversize_pkts_sw_drop++;
2103 return NULL;
2104 }
2105
2106 va = netmem_address(frag_page->netmem) + head_offset;
2107 data = va + rx_headroom;
2108 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2109
2110 addr = page_pool_get_dma_addr_netmem(frag_page->netmem);
2111 dma_sync_single_range_for_cpu(rq->pdev, addr, head_offset,
2112 frag_size, rq->buff.map_dir);
2113 net_prefetch(data);
2114
2115 prog = rcu_dereference(rq->xdp_prog);
2116 if (prog) {
2117 struct mlx5e_xdp_buff *mxbuf = &rq->mxbuf;
2118
2119 net_prefetchw(va); /* xdp_frame data area */
2120 mlx5e_fill_mxbuf(rq, cqe, va, rx_headroom, rq->buff.frame0_sz,
2121 cqe_bcnt, mxbuf);
2122 if (mlx5e_xdp_handle(rq, prog, mxbuf)) {
2123 if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))
2124 frag_page->frags++;
2125 return NULL; /* page/packet was consumed by XDP */
2126 }
2127
2128 rx_headroom = mxbuf->xdp.data - mxbuf->xdp.data_hard_start;
2129 metasize = mxbuf->xdp.data - mxbuf->xdp.data_meta;
2130 cqe_bcnt = mxbuf->xdp.data_end - mxbuf->xdp.data;
2131 }
2132 frag_size = MLX5_SKB_FRAG_SZ(rx_headroom + cqe_bcnt);
2133 skb = mlx5e_build_linear_skb(rq, va, frag_size, rx_headroom, cqe_bcnt, metasize);
2134 if (unlikely(!skb))
2135 return NULL;
2136
2137 /* queue up for recycling/reuse */
2138 skb_mark_for_recycle(skb);
2139 frag_page->frags++;
2140
2141 return skb;
2142 }
2143
2144 static struct sk_buff *
mlx5e_skb_from_cqe_shampo(struct mlx5e_rq * rq,struct mlx5e_mpw_info * wi,struct mlx5_cqe64 * cqe,u16 header_index)2145 mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi,
2146 struct mlx5_cqe64 *cqe, u16 header_index)
2147 {
2148 u16 head_size = cqe->shampo.header_size;
2149 struct mlx5e_dma_info *di;
2150 struct sk_buff *skb;
2151 u32 head_offset;
2152 int len;
2153
2154 len = ALIGN(head_size, sizeof(long));
2155 skb = napi_alloc_skb(rq->cq.napi, len);
2156 if (unlikely(!skb)) {
2157 rq->stats->buff_alloc_err++;
2158 return NULL;
2159 }
2160
2161 net_prefetchw(skb->data);
2162
2163 mlx5e_shampo_get_hd_buf_info(rq, cqe, &di, &head_offset);
2164 mlx5e_copy_skb_header(rq, skb, page_to_netmem(di->page), di->addr,
2165 head_offset, head_offset, len);
2166 __skb_put(skb, head_size);
2167
2168 /* queue up for recycling/reuse */
2169 skb_mark_for_recycle(skb);
2170
2171 return skb;
2172 }
2173
2174 static void
mlx5e_shampo_align_fragment(struct sk_buff * skb,u8 log_stride_sz)2175 mlx5e_shampo_align_fragment(struct sk_buff *skb, u8 log_stride_sz)
2176 {
2177 skb_frag_t *last_frag = &skb_shinfo(skb)->frags[skb_shinfo(skb)->nr_frags - 1];
2178 unsigned int frag_size = skb_frag_size(last_frag);
2179 unsigned int frag_truesize;
2180
2181 frag_truesize = ALIGN(frag_size, BIT(log_stride_sz));
2182 skb->truesize += frag_truesize - frag_size;
2183 }
2184
2185 static void
mlx5e_shampo_flush_skb(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,bool match)2186 mlx5e_shampo_flush_skb(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, bool match)
2187 {
2188 struct sk_buff *skb = rq->hw_gro_data->skb;
2189 struct mlx5e_rq_stats *stats = rq->stats;
2190 u16 gro_count = NAPI_GRO_CB(skb)->count;
2191
2192 if (likely(skb_shinfo(skb)->nr_frags))
2193 mlx5e_shampo_align_fragment(skb, rq->mpwqe.log_stride_sz);
2194 if (gro_count > 1) {
2195 stats->gro_skbs++;
2196 stats->gro_packets += gro_count;
2197 stats->gro_bytes += skb->data_len + skb_headlen(skb) * gro_count;
2198
2199 mlx5e_shampo_update_hdr(rq, cqe, match);
2200 } else {
2201 skb_shinfo(skb)->gso_size = 0;
2202 }
2203 napi_gro_receive(rq->cq.napi, skb);
2204 rq->hw_gro_data->skb = NULL;
2205 }
2206
mlx5e_hw_gro_skb_has_enough_space(struct sk_buff * skb,u16 data_bcnt,u32 page_size)2207 static bool mlx5e_hw_gro_skb_has_enough_space(struct sk_buff *skb,
2208 u16 data_bcnt,
2209 u32 page_size)
2210 {
2211 int nr_frags = skb_shinfo(skb)->nr_frags;
2212
2213 if (page_size >= GRO_LEGACY_MAX_SIZE)
2214 return skb->len + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2215 else
2216 return page_size * nr_frags + data_bcnt <= GRO_LEGACY_MAX_SIZE;
2217 }
2218
mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2219 static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2220 {
2221 u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size;
2222 u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe);
2223 u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset);
2224 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
2225 u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
2226 u16 wqe_id = be16_to_cpu(cqe->wqe_id);
2227 u16 head_size = cqe->shampo.header_size;
2228 struct sk_buff **skb = &rq->hw_gro_data->skb;
2229 bool flush = cqe->shampo.flush;
2230 bool match = cqe->shampo.match;
2231 u32 page_size = BIT(rq->mpwqe.page_shift);
2232 struct mlx5e_rq_stats *stats = rq->stats;
2233 struct mlx5e_rx_wqe_ll *wqe;
2234 struct mlx5e_mpw_info *wi;
2235 struct mlx5_wq_ll *wq;
2236 u32 data_offset;
2237 u32 page_idx;
2238
2239 wi = mlx5e_get_mpw_info(rq, wqe_id);
2240 wi->consumed_strides += cstrides;
2241
2242 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2243 mlx5e_handle_rx_err_cqe(rq, cqe);
2244 goto mpwrq_cqe_out;
2245 }
2246
2247 if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2248 stats->mpwqe_filler_cqes++;
2249 stats->mpwqe_filler_strides += cstrides;
2250 goto mpwrq_cqe_out;
2251 }
2252
2253 data_offset = wqe_offset & (page_size - 1);
2254 page_idx = wqe_offset >> rq->mpwqe.page_shift;
2255 if (*skb &&
2256 !(match && mlx5e_hw_gro_skb_has_enough_space(*skb, data_bcnt,
2257 page_size))) {
2258 match = false;
2259 mlx5e_shampo_flush_skb(rq, cqe, match);
2260 }
2261
2262 if (!*skb) {
2263 if (likely(head_size)) {
2264 *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index);
2265 } else {
2266 struct mlx5e_frag_page *frag_page;
2267
2268 frag_page = &wi->alloc_units.frag_pages[page_idx];
2269 /* Drop packets with header in unreadable data area to
2270 * prevent the kernel from touching it.
2271 */
2272 if (unlikely(netmem_is_net_iov(frag_page->netmem)))
2273 goto mpwrq_cqe_out;
2274 *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe,
2275 cqe_bcnt,
2276 data_offset,
2277 page_idx);
2278 }
2279
2280 if (unlikely(!*skb))
2281 goto mpwrq_cqe_out;
2282
2283 NAPI_GRO_CB(*skb)->count = 1;
2284 skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size;
2285 } else {
2286 NAPI_GRO_CB(*skb)->count++;
2287
2288 if (NAPI_GRO_CB(*skb)->count == 2 &&
2289 rq->hw_gro_data->fk.basic.n_proto == htons(ETH_P_IP)) {
2290 int len = ETH_HLEN + rq->hw_gro_data->fk.control.thoff;
2291 int nhoff = len - sizeof(struct iphdr);
2292 void *last_hd_addr;
2293 struct iphdr *iph;
2294
2295 last_hd_addr = mlx5e_shampo_get_hdr(rq, cqe, len);
2296 iph = (struct iphdr *)(last_hd_addr + nhoff);
2297 rq->hw_gro_data->second_ip_id = ntohs(iph->id);
2298 }
2299 }
2300
2301 if (likely(head_size)) {
2302 if (data_bcnt) {
2303 struct mlx5e_frag_page *frag_page;
2304
2305 frag_page = &wi->alloc_units.frag_pages[page_idx];
2306 mlx5e_shampo_fill_skb_data(*skb, rq, frag_page, data_bcnt, data_offset);
2307 } else {
2308 stats->hds_nodata_packets++;
2309 stats->hds_nodata_bytes += head_size;
2310 }
2311 } else {
2312 stats->hds_nosplit_packets++;
2313 stats->hds_nosplit_bytes += data_bcnt;
2314 }
2315
2316 if (mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb)) {
2317 *skb = NULL;
2318 goto mpwrq_cqe_out;
2319 }
2320 if (flush && rq->hw_gro_data->skb)
2321 mlx5e_shampo_flush_skb(rq, cqe, match);
2322 mpwrq_cqe_out:
2323 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2324 return;
2325
2326 if (unlikely(!cstrides))
2327 return;
2328
2329 wq = &rq->mpwqe.wq;
2330 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2331 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2332 }
2333
mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2334 static void mlx5e_handle_rx_cqe_mpwrq(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2335 {
2336 u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe);
2337 u16 wqe_id = be16_to_cpu(cqe->wqe_id);
2338 struct mlx5e_mpw_info *wi = mlx5e_get_mpw_info(rq, wqe_id);
2339 u16 stride_ix = mpwrq_get_cqe_stride_index(cqe);
2340 u32 wqe_offset = stride_ix << rq->mpwqe.log_stride_sz;
2341 u32 head_offset = wqe_offset & ((1 << rq->mpwqe.page_shift) - 1);
2342 u32 page_idx = wqe_offset >> rq->mpwqe.page_shift;
2343 struct mlx5e_rx_wqe_ll *wqe;
2344 struct mlx5_wq_ll *wq;
2345 struct sk_buff *skb;
2346 u16 cqe_bcnt;
2347
2348 wi->consumed_strides += cstrides;
2349
2350 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2351 mlx5e_handle_rx_err_cqe(rq, cqe);
2352 goto mpwrq_cqe_out;
2353 }
2354
2355 if (unlikely(mpwrq_is_filler_cqe(cqe))) {
2356 struct mlx5e_rq_stats *stats = rq->stats;
2357
2358 stats->mpwqe_filler_cqes++;
2359 stats->mpwqe_filler_strides += cstrides;
2360 goto mpwrq_cqe_out;
2361 }
2362
2363 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe);
2364
2365 skb = INDIRECT_CALL_3(rq->mpwqe.skb_from_cqe_mpwrq,
2366 mlx5e_skb_from_cqe_mpwrq_linear,
2367 mlx5e_skb_from_cqe_mpwrq_nonlinear,
2368 mlx5e_xsk_skb_from_cqe_mpwrq_linear,
2369 rq, wi, cqe, cqe_bcnt, head_offset,
2370 page_idx);
2371 if (!skb)
2372 goto mpwrq_cqe_out;
2373
2374 if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
2375 goto mpwrq_cqe_out;
2376
2377 if (mlx5e_cqe_regb_chain(cqe))
2378 if (!mlx5e_tc_update_skb_nic(cqe, skb)) {
2379 dev_kfree_skb_any(skb);
2380 goto mpwrq_cqe_out;
2381 }
2382
2383 napi_gro_receive(rq->cq.napi, skb);
2384
2385 mpwrq_cqe_out:
2386 if (likely(wi->consumed_strides < rq->mpwqe.num_strides))
2387 return;
2388
2389 wq = &rq->mpwqe.wq;
2390 wqe = mlx5_wq_ll_get_wqe(wq, wqe_id);
2391 mlx5_wq_ll_pop(wq, cqe->wqe_id, &wqe->next.next_wqe_index);
2392 }
2393
mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2394 static int mlx5e_rx_cq_process_enhanced_cqe_comp(struct mlx5e_rq *rq,
2395 struct mlx5_cqwq *cqwq,
2396 int budget_rem)
2397 {
2398 struct mlx5_cqe64 *cqe, *title_cqe = NULL;
2399 struct mlx5e_cq_decomp *cqd = &rq->cqd;
2400 int work_done = 0;
2401
2402 cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq);
2403 if (!cqe)
2404 return work_done;
2405
2406 if (cqd->last_cqe_title &&
2407 (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED)) {
2408 rq->stats->cqe_compress_blks++;
2409 cqd->last_cqe_title = false;
2410 }
2411
2412 do {
2413 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2414 if (title_cqe) {
2415 mlx5e_read_enhanced_title_slot(rq, title_cqe);
2416 title_cqe = NULL;
2417 rq->stats->cqe_compress_blks++;
2418 }
2419 work_done +=
2420 mlx5e_decompress_enhanced_cqe(rq, cqwq, cqe,
2421 budget_rem - work_done);
2422 continue;
2423 }
2424 title_cqe = cqe;
2425 mlx5_cqwq_pop(cqwq);
2426
2427 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2428 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2429 rq, cqe);
2430 work_done++;
2431 } while (work_done < budget_rem &&
2432 (cqe = mlx5_cqwq_get_cqe_enhanced_comp(cqwq)));
2433
2434 /* last cqe might be title on next poll bulk */
2435 if (title_cqe) {
2436 mlx5e_read_enhanced_title_slot(rq, title_cqe);
2437 cqd->last_cqe_title = true;
2438 }
2439
2440 return work_done;
2441 }
2442
mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq * rq,struct mlx5_cqwq * cqwq,int budget_rem)2443 static int mlx5e_rx_cq_process_basic_cqe_comp(struct mlx5e_rq *rq,
2444 struct mlx5_cqwq *cqwq,
2445 int budget_rem)
2446 {
2447 struct mlx5_cqe64 *cqe;
2448 int work_done = 0;
2449
2450 if (rq->cqd.left)
2451 work_done += mlx5e_decompress_cqes_cont(rq, cqwq, 0, budget_rem);
2452
2453 while (work_done < budget_rem && (cqe = mlx5_cqwq_get_cqe(cqwq))) {
2454 if (mlx5_get_cqe_format(cqe) == MLX5_COMPRESSED) {
2455 work_done +=
2456 mlx5e_decompress_cqes_start(rq, cqwq,
2457 budget_rem - work_done);
2458 continue;
2459 }
2460
2461 mlx5_cqwq_pop(cqwq);
2462 INDIRECT_CALL_3(rq->handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq,
2463 mlx5e_handle_rx_cqe, mlx5e_handle_rx_cqe_mpwrq_shampo,
2464 rq, cqe);
2465 work_done++;
2466 }
2467
2468 return work_done;
2469 }
2470
mlx5e_poll_rx_cq(struct mlx5e_cq * cq,int budget)2471 int mlx5e_poll_rx_cq(struct mlx5e_cq *cq, int budget)
2472 {
2473 struct mlx5e_rq *rq = container_of(cq, struct mlx5e_rq, cq);
2474 struct mlx5_cqwq *cqwq = &cq->wq;
2475 int work_done;
2476
2477 if (unlikely(!test_bit(MLX5E_RQ_STATE_ENABLED, &rq->state)))
2478 return 0;
2479
2480 if (test_bit(MLX5E_RQ_STATE_MINI_CQE_ENHANCED, &rq->state))
2481 work_done = mlx5e_rx_cq_process_enhanced_cqe_comp(rq, cqwq,
2482 budget);
2483 else
2484 work_done = mlx5e_rx_cq_process_basic_cqe_comp(rq, cqwq,
2485 budget);
2486
2487 if (work_done == 0)
2488 return 0;
2489
2490 if (test_bit(MLX5E_RQ_STATE_SHAMPO, &rq->state) && rq->hw_gro_data->skb)
2491 mlx5e_shampo_flush_skb(rq, NULL, false);
2492
2493 if (rcu_access_pointer(rq->xdp_prog))
2494 mlx5e_xdp_rx_poll_complete(rq);
2495
2496 mlx5_cqwq_update_db_record(cqwq);
2497
2498 /* ensure cq space is freed before enabling more cqes */
2499 wmb();
2500
2501 return work_done;
2502 }
2503
2504 #ifdef CONFIG_MLX5_CORE_IPOIB
2505
2506 #define MLX5_IB_GRH_SGID_OFFSET 8
2507 #define MLX5_IB_GRH_DGID_OFFSET 24
2508 #define MLX5_GID_SIZE 16
2509
mlx5i_complete_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe,u32 cqe_bcnt,struct sk_buff * skb)2510 static inline void mlx5i_complete_rx_cqe(struct mlx5e_rq *rq,
2511 struct mlx5_cqe64 *cqe,
2512 u32 cqe_bcnt,
2513 struct sk_buff *skb)
2514 {
2515 struct mlx5e_rq_stats *stats;
2516 struct net_device *netdev;
2517 struct mlx5e_priv *priv;
2518 char *pseudo_header;
2519 u32 flags_rqpn;
2520 u32 qpn;
2521 u8 *dgid;
2522 u8 g;
2523
2524 qpn = be32_to_cpu(cqe->sop_drop_qpn) & 0xffffff;
2525 netdev = mlx5i_pkey_get_netdev(rq->netdev, qpn);
2526
2527 /* No mapping present, cannot process SKB. This might happen if a child
2528 * interface is going down while having unprocessed CQEs on parent RQ
2529 */
2530 if (unlikely(!netdev)) {
2531 /* TODO: add drop counters support */
2532 skb->dev = NULL;
2533 pr_warn_once("Unable to map QPN %u to dev - dropping skb\n", qpn);
2534 return;
2535 }
2536
2537 priv = mlx5i_epriv(netdev);
2538 stats = &priv->channel_stats[rq->ix]->rq;
2539
2540 flags_rqpn = be32_to_cpu(cqe->flags_rqpn);
2541 g = (flags_rqpn >> 28) & 3;
2542 dgid = skb->data + MLX5_IB_GRH_DGID_OFFSET;
2543 if ((!g) || dgid[0] != 0xff)
2544 skb->pkt_type = PACKET_HOST;
2545 else if (memcmp(dgid, netdev->broadcast + 4, MLX5_GID_SIZE) == 0)
2546 skb->pkt_type = PACKET_BROADCAST;
2547 else
2548 skb->pkt_type = PACKET_MULTICAST;
2549
2550 /* Drop packets that this interface sent, ie multicast packets
2551 * that the HCA has replicated.
2552 */
2553 if (g && (qpn == (flags_rqpn & 0xffffff)) &&
2554 (memcmp(netdev->dev_addr + 4, skb->data + MLX5_IB_GRH_SGID_OFFSET,
2555 MLX5_GID_SIZE) == 0)) {
2556 skb->dev = NULL;
2557 return;
2558 }
2559
2560 skb_pull(skb, MLX5_IB_GRH_BYTES);
2561
2562 skb->protocol = *((__be16 *)(skb->data));
2563
2564 if (netdev->features & NETIF_F_RXCSUM) {
2565 skb->ip_summed = CHECKSUM_COMPLETE;
2566 skb->csum = csum_unfold((__force __sum16)cqe->check_sum);
2567 stats->csum_complete++;
2568 } else {
2569 skb->ip_summed = CHECKSUM_NONE;
2570 stats->csum_none++;
2571 }
2572
2573 if (unlikely(mlx5e_rx_hw_stamp(&priv->hwtstamp_config)))
2574 skb_hwtstamps(skb)->hwtstamp = mlx5e_cqe_ts_to_ns(rq->ptp_cyc2time,
2575 rq->clock, get_cqe_ts(cqe));
2576 skb_record_rx_queue(skb, rq->ix);
2577
2578 if (likely(netdev->features & NETIF_F_RXHASH))
2579 mlx5e_skb_set_hash(cqe, skb);
2580
2581 /* 20 bytes of ipoib header and 4 for encap existing */
2582 pseudo_header = skb_push(skb, MLX5_IPOIB_PSEUDO_LEN);
2583 memset(pseudo_header, 0, MLX5_IPOIB_PSEUDO_LEN);
2584 skb_reset_mac_header(skb);
2585 skb_pull(skb, MLX5_IPOIB_HARD_LEN);
2586
2587 skb->dev = netdev;
2588
2589 stats->packets++;
2590 stats->bytes += cqe_bcnt;
2591 }
2592
mlx5i_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2593 static void mlx5i_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2594 {
2595 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2596 struct mlx5e_wqe_frag_info *wi;
2597 struct sk_buff *skb;
2598 u32 cqe_bcnt;
2599 u16 ci;
2600
2601 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2602 wi = get_frag(rq, ci);
2603 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2604
2605 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2606 rq->stats->wqe_err++;
2607 goto wq_cyc_pop;
2608 }
2609
2610 skb = INDIRECT_CALL_2(rq->wqe.skb_from_cqe,
2611 mlx5e_skb_from_cqe_linear,
2612 mlx5e_skb_from_cqe_nonlinear,
2613 rq, wi, cqe, cqe_bcnt);
2614 if (!skb)
2615 goto wq_cyc_pop;
2616
2617 mlx5i_complete_rx_cqe(rq, cqe, cqe_bcnt, skb);
2618 if (unlikely(!skb->dev)) {
2619 dev_kfree_skb_any(skb);
2620 goto wq_cyc_pop;
2621 }
2622 napi_gro_receive(rq->cq.napi, skb);
2623
2624 wq_cyc_pop:
2625 mlx5_wq_cyc_pop(wq);
2626 }
2627
2628 const struct mlx5e_rx_handlers mlx5i_rx_handlers = {
2629 .handle_rx_cqe = mlx5i_handle_rx_cqe,
2630 .handle_rx_cqe_mpwqe = NULL, /* Not supported */
2631 };
2632 #endif /* CONFIG_MLX5_CORE_IPOIB */
2633
mlx5e_rq_set_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params,bool xsk)2634 int mlx5e_rq_set_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params, bool xsk)
2635 {
2636 struct net_device *netdev = rq->netdev;
2637 struct mlx5_core_dev *mdev = rq->mdev;
2638 struct mlx5e_priv *priv = rq->priv;
2639
2640 switch (rq->wq_type) {
2641 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2642 rq->mpwqe.skb_from_cqe_mpwrq = xsk ?
2643 mlx5e_xsk_skb_from_cqe_mpwrq_linear :
2644 mlx5e_rx_mpwqe_is_linear_skb(mdev, params, NULL) ?
2645 mlx5e_skb_from_cqe_mpwrq_linear :
2646 mlx5e_skb_from_cqe_mpwrq_nonlinear;
2647 rq->post_wqes = mlx5e_post_rx_mpwqes;
2648 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
2649
2650 if (params->packet_merge.type == MLX5E_PACKET_MERGE_SHAMPO) {
2651 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe_shampo;
2652 if (!rq->handle_rx_cqe) {
2653 netdev_err(netdev, "RX handler of SHAMPO MPWQE RQ is not set\n");
2654 return -EINVAL;
2655 }
2656 } else {
2657 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe_mpwqe;
2658 if (!rq->handle_rx_cqe) {
2659 netdev_err(netdev, "RX handler of MPWQE RQ is not set\n");
2660 return -EINVAL;
2661 }
2662 }
2663
2664 break;
2665 default: /* MLX5_WQ_TYPE_CYCLIC */
2666 rq->wqe.skb_from_cqe = xsk ?
2667 mlx5e_xsk_skb_from_cqe_linear :
2668 mlx5e_rx_is_linear_skb(mdev, params, NULL) ?
2669 mlx5e_skb_from_cqe_linear :
2670 mlx5e_skb_from_cqe_nonlinear;
2671 rq->post_wqes = mlx5e_post_rx_wqes;
2672 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2673 rq->handle_rx_cqe = priv->profile->rx_handlers->handle_rx_cqe;
2674 if (!rq->handle_rx_cqe) {
2675 netdev_err(netdev, "RX handler of RQ is not set\n");
2676 return -EINVAL;
2677 }
2678 }
2679
2680 return 0;
2681 }
2682
mlx5e_trap_handle_rx_cqe(struct mlx5e_rq * rq,struct mlx5_cqe64 * cqe)2683 static void mlx5e_trap_handle_rx_cqe(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe)
2684 {
2685 struct mlx5_wq_cyc *wq = &rq->wqe.wq;
2686 struct mlx5e_wqe_frag_info *wi;
2687 struct sk_buff *skb;
2688 u32 cqe_bcnt;
2689 u16 trap_id;
2690 u16 ci;
2691
2692 trap_id = get_cqe_flow_tag(cqe);
2693 ci = mlx5_wq_cyc_ctr2ix(wq, be16_to_cpu(cqe->wqe_counter));
2694 wi = get_frag(rq, ci);
2695 cqe_bcnt = be32_to_cpu(cqe->byte_cnt);
2696
2697 if (unlikely(MLX5E_RX_ERR_CQE(cqe))) {
2698 rq->stats->wqe_err++;
2699 goto wq_cyc_pop;
2700 }
2701
2702 skb = mlx5e_skb_from_cqe_nonlinear(rq, wi, cqe, cqe_bcnt);
2703 if (!skb)
2704 goto wq_cyc_pop;
2705
2706 if (mlx5e_complete_rx_cqe(rq, cqe, cqe_bcnt, skb))
2707 goto wq_cyc_pop;
2708 skb_push(skb, ETH_HLEN);
2709
2710 mlx5_devlink_trap_report(rq->mdev, trap_id, skb,
2711 rq->netdev->devlink_port);
2712 dev_kfree_skb_any(skb);
2713
2714 wq_cyc_pop:
2715 mlx5_wq_cyc_pop(wq);
2716 }
2717
mlx5e_rq_set_trap_handlers(struct mlx5e_rq * rq,struct mlx5e_params * params)2718 void mlx5e_rq_set_trap_handlers(struct mlx5e_rq *rq, struct mlx5e_params *params)
2719 {
2720 rq->wqe.skb_from_cqe = mlx5e_rx_is_linear_skb(rq->mdev, params, NULL) ?
2721 mlx5e_skb_from_cqe_linear :
2722 mlx5e_skb_from_cqe_nonlinear;
2723 rq->post_wqes = mlx5e_post_rx_wqes;
2724 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
2725 rq->handle_rx_cqe = mlx5e_trap_handle_rx_cqe;
2726 }
2727