1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2025 Broadcom.
3
4 #include <asm/byteorder.h>
5 #include <linux/dma-mapping.h>
6 #include <linux/dmapool.h>
7 #include <linux/delay.h>
8 #include <linux/errno.h>
9 #include <linux/kernel.h>
10 #include <linux/list.h>
11 #include <linux/pci.h>
12 #include <linux/netdevice.h>
13 #include <linux/etherdevice.h>
14 #include <linux/if.h>
15 #include <net/ip.h>
16 #include <net/tcp.h>
17 #include <net/gro.h>
18 #include <linux/skbuff.h>
19 #include <net/page_pool/helpers.h>
20 #include <linux/if_vlan.h>
21 #include <net/udp_tunnel.h>
22 #include <net/dst_metadata.h>
23 #include <net/netdev_queues.h>
24
25 #include "bnge.h"
26 #include "bnge_hwrm.h"
27 #include "bnge_hwrm_lib.h"
28 #include "bnge_netdev.h"
29 #include "bnge_rmem.h"
30 #include "bnge_txrx.h"
31
bnge_msix(int irq,void * dev_instance)32 irqreturn_t bnge_msix(int irq, void *dev_instance)
33 {
34 struct bnge_napi *bnapi = dev_instance;
35 struct bnge_nq_ring_info *nqr;
36 struct bnge_net *bn;
37 u32 cons;
38
39 bn = bnapi->bn;
40 nqr = &bnapi->nq_ring;
41 cons = RING_CMP(bn, nqr->nq_raw_cons);
42
43 prefetch(&nqr->desc_ring[CP_RING(cons)][CP_IDX(cons)]);
44 napi_schedule(&bnapi->napi);
45 return IRQ_HANDLED;
46 }
47
bnge_get_tpa_agg(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,u16 agg_id,u16 curr)48 static struct rx_agg_cmp *bnge_get_tpa_agg(struct bnge_net *bn,
49 struct bnge_rx_ring_info *rxr,
50 u16 agg_id, u16 curr)
51 {
52 struct bnge_tpa_info *tpa_info = &rxr->rx_tpa[agg_id];
53
54 return &tpa_info->agg_arr[curr];
55 }
56
bnge_get_agg(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,u16 cp_cons,u16 curr)57 static struct rx_agg_cmp *bnge_get_agg(struct bnge_net *bn,
58 struct bnge_cp_ring_info *cpr,
59 u16 cp_cons, u16 curr)
60 {
61 struct rx_agg_cmp *agg;
62
63 cp_cons = RING_CMP(bn, ADV_RAW_CMP(cp_cons, curr));
64 agg = (struct rx_agg_cmp *)
65 &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
66 return agg;
67 }
68
bnge_reuse_rx_agg_bufs(struct bnge_cp_ring_info * cpr,u16 idx,u16 start,u32 agg_bufs,bool tpa)69 static void bnge_reuse_rx_agg_bufs(struct bnge_cp_ring_info *cpr, u16 idx,
70 u16 start, u32 agg_bufs, bool tpa)
71 {
72 struct bnge_napi *bnapi = cpr->bnapi;
73 struct bnge_net *bn = bnapi->bn;
74 struct bnge_rx_ring_info *rxr;
75 u16 prod, sw_prod;
76 u32 i;
77
78 rxr = bnapi->rx_ring;
79 sw_prod = rxr->rx_sw_agg_prod;
80 prod = rxr->rx_agg_prod;
81
82 for (i = 0; i < agg_bufs; i++) {
83 struct bnge_sw_rx_agg_bd *cons_rx_buf, *prod_rx_buf;
84 struct rx_agg_cmp *agg;
85 struct rx_bd *prod_bd;
86 netmem_ref netmem;
87 u16 cons;
88
89 if (tpa)
90 agg = bnge_get_tpa_agg(bn, rxr, idx, start + i);
91 else
92 agg = bnge_get_agg(bn, cpr, idx, start + i);
93 cons = agg->rx_agg_cmp_opaque;
94 __clear_bit(cons, rxr->rx_agg_bmap);
95
96 if (unlikely(test_bit(sw_prod, rxr->rx_agg_bmap)))
97 sw_prod = bnge_find_next_agg_idx(rxr, sw_prod);
98
99 __set_bit(sw_prod, rxr->rx_agg_bmap);
100 prod_rx_buf = &rxr->rx_agg_buf_ring[sw_prod];
101 cons_rx_buf = &rxr->rx_agg_buf_ring[cons];
102
103 /* It is possible for sw_prod to be equal to cons, so
104 * set cons_rx_buf->netmem to 0 first.
105 */
106 netmem = cons_rx_buf->netmem;
107 cons_rx_buf->netmem = 0;
108 prod_rx_buf->netmem = netmem;
109 prod_rx_buf->offset = cons_rx_buf->offset;
110
111 prod_rx_buf->mapping = cons_rx_buf->mapping;
112
113 prod_bd = &rxr->rx_agg_desc_ring[RX_AGG_RING(bn, prod)]
114 [RX_IDX(prod)];
115
116 prod_bd->rx_bd_haddr = cpu_to_le64(cons_rx_buf->mapping);
117 prod_bd->rx_bd_opaque = sw_prod;
118
119 prod = NEXT_RX_AGG(prod);
120 sw_prod = RING_RX_AGG(bn, NEXT_RX_AGG(sw_prod));
121 }
122 rxr->rx_agg_prod = prod;
123 rxr->rx_sw_agg_prod = sw_prod;
124 }
125
bnge_agg_bufs_valid(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,u8 agg_bufs,u32 * raw_cons)126 static int bnge_agg_bufs_valid(struct bnge_net *bn,
127 struct bnge_cp_ring_info *cpr,
128 u8 agg_bufs, u32 *raw_cons)
129 {
130 struct rx_agg_cmp *agg;
131 u16 last;
132
133 *raw_cons = ADV_RAW_CMP(*raw_cons, agg_bufs);
134 last = RING_CMP(bn, *raw_cons);
135 agg = (struct rx_agg_cmp *)
136 &cpr->desc_ring[CP_RING(last)][CP_IDX(last)];
137 return RX_AGG_CMP_VALID(bn, agg, *raw_cons);
138 }
139
bnge_discard_rx(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,u32 * raw_cons,void * cmp)140 static int bnge_discard_rx(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
141 u32 *raw_cons, void *cmp)
142 {
143 u32 tmp_raw_cons = *raw_cons;
144 struct rx_cmp *rxcmp = cmp;
145 u8 cmp_type, agg_bufs = 0;
146
147 cmp_type = RX_CMP_TYPE(rxcmp);
148
149 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
150 agg_bufs = (le32_to_cpu(rxcmp->rx_cmp_misc_v1) &
151 RX_CMP_AGG_BUFS) >>
152 RX_CMP_AGG_BUFS_SHIFT;
153 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
154 return 0;
155 }
156
157 if (agg_bufs) {
158 if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons))
159 return -EBUSY;
160 }
161 *raw_cons = tmp_raw_cons;
162 return 0;
163 }
164
__bnge_rx_agg_netmems(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,u16 idx,u32 agg_bufs,bool tpa,struct sk_buff * skb)165 static u32 __bnge_rx_agg_netmems(struct bnge_net *bn,
166 struct bnge_cp_ring_info *cpr,
167 u16 idx, u32 agg_bufs, bool tpa,
168 struct sk_buff *skb)
169 {
170 struct bnge_napi *bnapi = cpr->bnapi;
171 struct skb_shared_info *shinfo;
172 struct bnge_rx_ring_info *rxr;
173 u32 i, total_frag_len = 0;
174 u16 prod;
175
176 rxr = bnapi->rx_ring;
177 prod = rxr->rx_agg_prod;
178 shinfo = skb_shinfo(skb);
179
180 for (i = 0; i < agg_bufs; i++) {
181 struct bnge_sw_rx_agg_bd *cons_rx_buf;
182 struct rx_agg_cmp *agg;
183 u16 cons, frag_len;
184 netmem_ref netmem;
185
186 if (tpa)
187 agg = bnge_get_tpa_agg(bn, rxr, idx, i);
188 else
189 agg = bnge_get_agg(bn, cpr, idx, i);
190 cons = agg->rx_agg_cmp_opaque;
191 frag_len = (le32_to_cpu(agg->rx_agg_cmp_len_flags_type) &
192 RX_AGG_CMP_LEN) >> RX_AGG_CMP_LEN_SHIFT;
193
194 cons_rx_buf = &rxr->rx_agg_buf_ring[cons];
195 skb_add_rx_frag_netmem(skb, i, cons_rx_buf->netmem,
196 cons_rx_buf->offset,
197 frag_len, BNGE_RX_PAGE_SIZE);
198 __clear_bit(cons, rxr->rx_agg_bmap);
199
200 /* It is possible for bnge_alloc_rx_netmem() to allocate
201 * a sw_prod index that equals the cons index, so we
202 * need to clear the cons entry now.
203 */
204 netmem = cons_rx_buf->netmem;
205 cons_rx_buf->netmem = 0;
206
207 if (bnge_alloc_rx_netmem(bn, rxr, prod, GFP_ATOMIC) != 0) {
208 skb->len -= frag_len;
209 skb->data_len -= frag_len;
210 skb->truesize -= BNGE_RX_PAGE_SIZE;
211
212 --shinfo->nr_frags;
213 cons_rx_buf->netmem = netmem;
214
215 /* Update prod since possibly some netmems have been
216 * allocated already.
217 */
218 rxr->rx_agg_prod = prod;
219 bnge_reuse_rx_agg_bufs(cpr, idx, i, agg_bufs - i, tpa);
220 return 0;
221 }
222
223 page_pool_dma_sync_netmem_for_cpu(rxr->page_pool, netmem, 0,
224 BNGE_RX_PAGE_SIZE);
225
226 total_frag_len += frag_len;
227 prod = NEXT_RX_AGG(prod);
228 }
229 rxr->rx_agg_prod = prod;
230 return total_frag_len;
231 }
232
bnge_rx_agg_netmems_skb(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,struct sk_buff * skb,u16 idx,u32 agg_bufs,bool tpa)233 static struct sk_buff *bnge_rx_agg_netmems_skb(struct bnge_net *bn,
234 struct bnge_cp_ring_info *cpr,
235 struct sk_buff *skb, u16 idx,
236 u32 agg_bufs, bool tpa)
237 {
238 u32 total_frag_len;
239
240 total_frag_len = __bnge_rx_agg_netmems(bn, cpr, idx, agg_bufs,
241 tpa, skb);
242 if (!total_frag_len) {
243 skb_mark_for_recycle(skb);
244 dev_kfree_skb(skb);
245 return NULL;
246 }
247
248 return skb;
249 }
250
bnge_sched_reset_rxr(struct bnge_net * bn,struct bnge_rx_ring_info * rxr)251 static void bnge_sched_reset_rxr(struct bnge_net *bn,
252 struct bnge_rx_ring_info *rxr)
253 {
254 if (!rxr->bnapi->in_reset) {
255 rxr->bnapi->in_reset = true;
256
257 /* TODO: Initiate reset task */
258 }
259 rxr->rx_next_cons = 0xffff;
260 }
261
bnge_sched_reset_txr(struct bnge_net * bn,struct bnge_tx_ring_info * txr,u16 curr)262 static void bnge_sched_reset_txr(struct bnge_net *bn,
263 struct bnge_tx_ring_info *txr,
264 u16 curr)
265 {
266 struct bnge_napi *bnapi = txr->bnapi;
267
268 if (bnapi->tx_fault)
269 return;
270
271 netdev_err(bn->netdev, "Invalid Tx completion (ring:%d tx_hw_cons:%u cons:%u prod:%u curr:%u)",
272 txr->txq_index, txr->tx_hw_cons,
273 txr->tx_cons, txr->tx_prod, curr);
274 WARN_ON_ONCE(1);
275 bnapi->tx_fault = 1;
276 /* TODO: Initiate reset task */
277 }
278
bnge_tpa_alloc_agg_idx(struct bnge_rx_ring_info * rxr,u16 agg_id)279 static u16 bnge_tpa_alloc_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
280 {
281 struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
282 u16 idx = agg_id & MAX_TPA_MASK;
283
284 if (test_bit(idx, map->agg_idx_bmap)) {
285 idx = find_first_zero_bit(map->agg_idx_bmap, MAX_TPA);
286 if (idx >= MAX_TPA)
287 return INVALID_HW_RING_ID;
288 }
289 __set_bit(idx, map->agg_idx_bmap);
290 map->agg_id_tbl[agg_id] = idx;
291 return idx;
292 }
293
bnge_free_agg_idx(struct bnge_rx_ring_info * rxr,u16 idx)294 static void bnge_free_agg_idx(struct bnge_rx_ring_info *rxr, u16 idx)
295 {
296 struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
297
298 __clear_bit(idx, map->agg_idx_bmap);
299 }
300
bnge_lookup_agg_idx(struct bnge_rx_ring_info * rxr,u16 agg_id)301 static u16 bnge_lookup_agg_idx(struct bnge_rx_ring_info *rxr, u16 agg_id)
302 {
303 struct bnge_tpa_idx_map *map = rxr->rx_tpa_idx_map;
304
305 return map->agg_id_tbl[agg_id];
306 }
307
bnge_tpa_metadata(struct bnge_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)308 static void bnge_tpa_metadata(struct bnge_tpa_info *tpa_info,
309 struct rx_tpa_start_cmp *tpa_start,
310 struct rx_tpa_start_cmp_ext *tpa_start1)
311 {
312 tpa_info->cfa_code_valid = 1;
313 tpa_info->cfa_code = TPA_START_CFA_CODE(tpa_start1);
314 tpa_info->vlan_valid = 0;
315 if (tpa_info->flags2 & RX_CMP_FLAGS2_META_FORMAT_VLAN) {
316 tpa_info->vlan_valid = 1;
317 tpa_info->metadata =
318 le32_to_cpu(tpa_start1->rx_tpa_start_cmp_metadata);
319 }
320 }
321
bnge_tpa_metadata_v2(struct bnge_tpa_info * tpa_info,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)322 static void bnge_tpa_metadata_v2(struct bnge_tpa_info *tpa_info,
323 struct rx_tpa_start_cmp *tpa_start,
324 struct rx_tpa_start_cmp_ext *tpa_start1)
325 {
326 tpa_info->vlan_valid = 0;
327 if (TPA_START_VLAN_VALID(tpa_start)) {
328 u32 tpid_sel = TPA_START_VLAN_TPID_SEL(tpa_start);
329 u32 vlan_proto = ETH_P_8021Q;
330
331 tpa_info->vlan_valid = 1;
332 if (tpid_sel == RX_TPA_START_METADATA1_TPID_8021AD)
333 vlan_proto = ETH_P_8021AD;
334 tpa_info->metadata = vlan_proto << 16 |
335 TPA_START_METADATA0_TCI(tpa_start1);
336 }
337 }
338
bnge_tpa_start(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,u8 cmp_type,struct rx_tpa_start_cmp * tpa_start,struct rx_tpa_start_cmp_ext * tpa_start1)339 static void bnge_tpa_start(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
340 u8 cmp_type, struct rx_tpa_start_cmp *tpa_start,
341 struct rx_tpa_start_cmp_ext *tpa_start1)
342 {
343 struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
344 struct bnge_tpa_info *tpa_info;
345 u16 cons, prod, agg_id;
346 struct rx_bd *prod_bd;
347 dma_addr_t mapping;
348
349 agg_id = TPA_START_AGG_ID(tpa_start);
350 agg_id = bnge_tpa_alloc_agg_idx(rxr, agg_id);
351 if (unlikely(agg_id == INVALID_HW_RING_ID)) {
352 netdev_warn(bn->netdev, "Unable to allocate agg ID for ring %d, agg 0x%lx\n",
353 rxr->bnapi->index, TPA_START_AGG_ID(tpa_start));
354 bnge_sched_reset_rxr(bn, rxr);
355 return;
356 }
357 cons = tpa_start->rx_tpa_start_cmp_opaque;
358 prod = rxr->rx_prod;
359 cons_rx_buf = &rxr->rx_buf_ring[cons];
360 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
361 tpa_info = &rxr->rx_tpa[agg_id];
362
363 if (unlikely(cons != rxr->rx_next_cons ||
364 TPA_START_ERROR(tpa_start))) {
365 netdev_warn(bn->netdev, "TPA cons %x, expected cons %x, error code %lx\n",
366 cons, rxr->rx_next_cons,
367 TPA_START_ERROR_CODE(tpa_start1));
368 bnge_sched_reset_rxr(bn, rxr);
369 return;
370 }
371 prod_rx_buf->data = tpa_info->data;
372 prod_rx_buf->data_ptr = tpa_info->data_ptr;
373
374 mapping = tpa_info->mapping;
375 prod_rx_buf->mapping = mapping;
376
377 prod_bd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
378
379 prod_bd->rx_bd_haddr = cpu_to_le64(mapping);
380
381 tpa_info->data = cons_rx_buf->data;
382 tpa_info->data_ptr = cons_rx_buf->data_ptr;
383 cons_rx_buf->data = NULL;
384 tpa_info->mapping = cons_rx_buf->mapping;
385
386 tpa_info->len =
387 le32_to_cpu(tpa_start->rx_tpa_start_cmp_len_flags_type) >>
388 RX_TPA_START_CMP_LEN_SHIFT;
389 if (likely(TPA_START_HASH_VALID(tpa_start))) {
390 tpa_info->hash_type = PKT_HASH_TYPE_L4;
391 if (TPA_START_IS_IPV6(tpa_start1))
392 tpa_info->gso_type = SKB_GSO_TCPV6;
393 else
394 tpa_info->gso_type = SKB_GSO_TCPV4;
395 tpa_info->rss_hash =
396 le32_to_cpu(tpa_start->rx_tpa_start_cmp_rss_hash);
397 } else {
398 tpa_info->hash_type = PKT_HASH_TYPE_NONE;
399 tpa_info->gso_type = 0;
400 netif_warn(bn, rx_err, bn->netdev, "TPA packet without valid hash\n");
401 }
402 tpa_info->flags2 = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_flags2);
403 tpa_info->hdr_info = le32_to_cpu(tpa_start1->rx_tpa_start_cmp_hdr_info);
404 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP)
405 bnge_tpa_metadata(tpa_info, tpa_start, tpa_start1);
406 else
407 bnge_tpa_metadata_v2(tpa_info, tpa_start, tpa_start1);
408 tpa_info->agg_count = 0;
409
410 rxr->rx_prod = NEXT_RX(prod);
411 cons = RING_RX(bn, NEXT_RX(cons));
412 rxr->rx_next_cons = RING_RX(bn, NEXT_RX(cons));
413 cons_rx_buf = &rxr->rx_buf_ring[cons];
414
415 bnge_reuse_rx_data(rxr, cons, cons_rx_buf->data);
416 rxr->rx_prod = NEXT_RX(rxr->rx_prod);
417 cons_rx_buf->data = NULL;
418 }
419
bnge_abort_tpa(struct bnge_cp_ring_info * cpr,u16 idx,u32 agg_bufs)420 static void bnge_abort_tpa(struct bnge_cp_ring_info *cpr, u16 idx, u32 agg_bufs)
421 {
422 if (agg_bufs)
423 bnge_reuse_rx_agg_bufs(cpr, idx, 0, agg_bufs, true);
424 }
425
bnge_tpa_agg(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,struct rx_agg_cmp * rx_agg)426 static void bnge_tpa_agg(struct bnge_net *bn, struct bnge_rx_ring_info *rxr,
427 struct rx_agg_cmp *rx_agg)
428 {
429 u16 agg_id = TPA_AGG_AGG_ID(rx_agg);
430 struct bnge_tpa_info *tpa_info;
431
432 agg_id = bnge_lookup_agg_idx(rxr, agg_id);
433 tpa_info = &rxr->rx_tpa[agg_id];
434
435 if (unlikely(tpa_info->agg_count >= MAX_SKB_FRAGS)) {
436 netdev_warn(bn->netdev,
437 "TPA completion count %d exceeds limit for ring %d\n",
438 tpa_info->agg_count, rxr->bnapi->index);
439
440 bnge_sched_reset_rxr(bn, rxr);
441 return;
442 }
443
444 tpa_info->agg_arr[tpa_info->agg_count++] = *rx_agg;
445 }
446
bnge_reuse_rx_data(struct bnge_rx_ring_info * rxr,u16 cons,void * data)447 void bnge_reuse_rx_data(struct bnge_rx_ring_info *rxr, u16 cons, void *data)
448 {
449 struct bnge_sw_rx_bd *cons_rx_buf, *prod_rx_buf;
450 struct bnge_net *bn = rxr->bnapi->bn;
451 struct rx_bd *cons_bd, *prod_bd;
452 u16 prod = rxr->rx_prod;
453
454 prod_rx_buf = &rxr->rx_buf_ring[RING_RX(bn, prod)];
455 cons_rx_buf = &rxr->rx_buf_ring[cons];
456
457 prod_rx_buf->data = data;
458 prod_rx_buf->data_ptr = cons_rx_buf->data_ptr;
459
460 prod_rx_buf->mapping = cons_rx_buf->mapping;
461
462 prod_bd = &rxr->rx_desc_ring[RX_RING(bn, prod)][RX_IDX(prod)];
463 cons_bd = &rxr->rx_desc_ring[RX_RING(bn, cons)][RX_IDX(cons)];
464
465 prod_bd->rx_bd_haddr = cons_bd->rx_bd_haddr;
466 }
467
bnge_deliver_skb(struct bnge_net * bn,struct bnge_napi * bnapi,struct sk_buff * skb)468 static void bnge_deliver_skb(struct bnge_net *bn, struct bnge_napi *bnapi,
469 struct sk_buff *skb)
470 {
471 skb_mark_for_recycle(skb);
472 skb_record_rx_queue(skb, bnapi->index);
473 napi_gro_receive(&bnapi->napi, skb);
474 }
475
bnge_copy_skb(struct bnge_napi * bnapi,u8 * data,unsigned int len,dma_addr_t mapping)476 static struct sk_buff *bnge_copy_skb(struct bnge_napi *bnapi, u8 *data,
477 unsigned int len, dma_addr_t mapping)
478 {
479 struct bnge_net *bn = bnapi->bn;
480 struct bnge_dev *bd = bn->bd;
481 struct sk_buff *skb;
482
483 skb = napi_alloc_skb(&bnapi->napi, len);
484 if (!skb)
485 return NULL;
486
487 dma_sync_single_for_cpu(bd->dev, mapping, len, bn->rx_dir);
488
489 memcpy(skb->data - NET_IP_ALIGN, data - NET_IP_ALIGN,
490 len + NET_IP_ALIGN);
491
492 dma_sync_single_for_device(bd->dev, mapping, len, bn->rx_dir);
493
494 skb_put(skb, len);
495
496 return skb;
497 }
498
499 #ifdef CONFIG_INET
bnge_gro_tunnel(struct sk_buff * skb,__be16 ip_proto)500 static void bnge_gro_tunnel(struct sk_buff *skb, __be16 ip_proto)
501 {
502 struct udphdr *uh = NULL;
503
504 if (ip_proto == htons(ETH_P_IP)) {
505 struct iphdr *iph = (struct iphdr *)skb->data;
506
507 if (iph->protocol == IPPROTO_UDP)
508 uh = (struct udphdr *)(iph + 1);
509 } else {
510 struct ipv6hdr *iph = (struct ipv6hdr *)skb->data;
511
512 if (iph->nexthdr == IPPROTO_UDP)
513 uh = (struct udphdr *)(iph + 1);
514 }
515 if (uh) {
516 if (uh->check)
517 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL_CSUM;
518 else
519 skb_shinfo(skb)->gso_type |= SKB_GSO_UDP_TUNNEL;
520 }
521 }
522
bnge_gro_func(struct bnge_tpa_info * tpa_info,int payload_off,int tcp_ts,struct sk_buff * skb)523 static struct sk_buff *bnge_gro_func(struct bnge_tpa_info *tpa_info,
524 int payload_off, int tcp_ts,
525 struct sk_buff *skb)
526 {
527 u16 outer_ip_off, inner_ip_off, inner_mac_off;
528 u32 hdr_info = tpa_info->hdr_info;
529 int iphdr_len, nw_off;
530
531 inner_ip_off = BNGE_TPA_INNER_L3_OFF(hdr_info);
532 inner_mac_off = BNGE_TPA_INNER_L2_OFF(hdr_info);
533 outer_ip_off = BNGE_TPA_OUTER_L3_OFF(hdr_info);
534
535 nw_off = inner_ip_off - ETH_HLEN;
536 skb_set_network_header(skb, nw_off);
537 iphdr_len = (tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_IP_TYPE) ?
538 sizeof(struct ipv6hdr) : sizeof(struct iphdr);
539 skb_set_transport_header(skb, nw_off + iphdr_len);
540
541 if (inner_mac_off) { /* tunnel */
542 __be16 proto = *((__be16 *)(skb->data + outer_ip_off -
543 ETH_HLEN - 2));
544
545 bnge_gro_tunnel(skb, proto);
546 }
547
548 return skb;
549 }
550
bnge_gro_skb(struct bnge_net * bn,struct bnge_tpa_info * tpa_info,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,struct sk_buff * skb)551 static struct sk_buff *bnge_gro_skb(struct bnge_net *bn,
552 struct bnge_tpa_info *tpa_info,
553 struct rx_tpa_end_cmp *tpa_end,
554 struct rx_tpa_end_cmp_ext *tpa_end1,
555 struct sk_buff *skb)
556 {
557 int payload_off;
558 u16 segs;
559
560 segs = TPA_END_TPA_SEGS(tpa_end);
561 if (segs == 1)
562 return skb;
563
564 NAPI_GRO_CB(skb)->count = segs;
565 skb_shinfo(skb)->gso_size =
566 le32_to_cpu(tpa_end1->rx_tpa_end_cmp_seg_len);
567 skb_shinfo(skb)->gso_type = tpa_info->gso_type;
568 payload_off = TPA_END_PAYLOAD_OFF(tpa_end1);
569 skb = bnge_gro_func(tpa_info, payload_off,
570 TPA_END_GRO_TS(tpa_end), skb);
571 if (likely(skb))
572 tcp_gro_complete(skb);
573
574 return skb;
575 }
576 #endif
577
bnge_tpa_end(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,u32 * raw_cons,struct rx_tpa_end_cmp * tpa_end,struct rx_tpa_end_cmp_ext * tpa_end1,u8 * event)578 static struct sk_buff *bnge_tpa_end(struct bnge_net *bn,
579 struct bnge_cp_ring_info *cpr,
580 u32 *raw_cons,
581 struct rx_tpa_end_cmp *tpa_end,
582 struct rx_tpa_end_cmp_ext *tpa_end1,
583 u8 *event)
584 {
585 struct bnge_napi *bnapi = cpr->bnapi;
586 struct net_device *dev = bn->netdev;
587 struct bnge_tpa_info *tpa_info;
588 struct bnge_rx_ring_info *rxr;
589 u8 *data_ptr, agg_bufs;
590 struct sk_buff *skb;
591 u16 idx = 0, agg_id;
592 dma_addr_t mapping;
593 unsigned int len;
594 void *data;
595
596 if (unlikely(bnapi->in_reset)) {
597 int rc = bnge_discard_rx(bn, cpr, raw_cons, tpa_end);
598
599 if (rc < 0)
600 return ERR_PTR(-EBUSY);
601 return NULL;
602 }
603
604 rxr = bnapi->rx_ring;
605 agg_id = TPA_END_AGG_ID(tpa_end);
606 agg_id = bnge_lookup_agg_idx(rxr, agg_id);
607 agg_bufs = TPA_END_AGG_BUFS(tpa_end1);
608 tpa_info = &rxr->rx_tpa[agg_id];
609 if (unlikely(agg_bufs != tpa_info->agg_count)) {
610 netdev_warn(bn->netdev, "TPA end agg_buf %d != expected agg_bufs %d\n",
611 agg_bufs, tpa_info->agg_count);
612 agg_bufs = tpa_info->agg_count;
613 }
614 tpa_info->agg_count = 0;
615 *event |= BNGE_AGG_EVENT;
616 bnge_free_agg_idx(rxr, agg_id);
617 idx = agg_id;
618 data = tpa_info->data;
619 data_ptr = tpa_info->data_ptr;
620 prefetch(data_ptr);
621 len = tpa_info->len;
622 mapping = tpa_info->mapping;
623
624 if (unlikely(agg_bufs > MAX_SKB_FRAGS || TPA_END_ERRORS(tpa_end1))) {
625 bnge_abort_tpa(cpr, idx, agg_bufs);
626 if (agg_bufs > MAX_SKB_FRAGS)
627 netdev_warn(bn->netdev, "TPA frags %d exceeded MAX_SKB_FRAGS %d\n",
628 agg_bufs, (int)MAX_SKB_FRAGS);
629 return NULL;
630 }
631
632 if (len <= bn->rx_copybreak) {
633 skb = bnge_copy_skb(bnapi, data_ptr, len, mapping);
634 if (!skb) {
635 bnge_abort_tpa(cpr, idx, agg_bufs);
636 return NULL;
637 }
638 } else {
639 dma_addr_t new_mapping;
640 u8 *new_data;
641
642 new_data = __bnge_alloc_rx_frag(bn, &new_mapping, rxr,
643 GFP_ATOMIC);
644 if (!new_data) {
645 bnge_abort_tpa(cpr, idx, agg_bufs);
646 return NULL;
647 }
648
649 tpa_info->data = new_data;
650 tpa_info->data_ptr = new_data + bn->rx_offset;
651 tpa_info->mapping = new_mapping;
652
653 skb = napi_build_skb(data, bn->rx_buf_size);
654 dma_sync_single_for_cpu(bn->bd->dev, mapping,
655 bn->rx_buf_use_size, bn->rx_dir);
656
657 if (!skb) {
658 page_pool_free_va(rxr->head_pool, data, true);
659 bnge_abort_tpa(cpr, idx, agg_bufs);
660 return NULL;
661 }
662 skb_mark_for_recycle(skb);
663 skb_reserve(skb, bn->rx_offset);
664 skb_put(skb, len);
665 }
666
667 if (agg_bufs) {
668 skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, idx, agg_bufs,
669 true);
670 /* Page reuse already handled by bnge_rx_agg_netmems_skb(). */
671 if (!skb)
672 return NULL;
673 }
674
675 skb->protocol = eth_type_trans(skb, dev);
676
677 if (tpa_info->hash_type != PKT_HASH_TYPE_NONE)
678 skb_set_hash(skb, tpa_info->rss_hash, tpa_info->hash_type);
679
680 if (tpa_info->vlan_valid &&
681 (dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX)) {
682 __be16 vlan_proto = htons(tpa_info->metadata >>
683 RX_CMP_FLAGS2_METADATA_TPID_SFT);
684 u16 vtag = tpa_info->metadata & RX_CMP_FLAGS2_METADATA_TCI_MASK;
685
686 if (eth_type_vlan(vlan_proto)) {
687 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
688 } else {
689 dev_kfree_skb(skb);
690 return NULL;
691 }
692 }
693
694 skb_checksum_none_assert(skb);
695 if (likely(tpa_info->flags2 & RX_TPA_START_CMP_FLAGS2_L4_CS_CALC)) {
696 skb->ip_summed = CHECKSUM_UNNECESSARY;
697 skb->csum_level =
698 (tpa_info->flags2 & RX_CMP_FLAGS2_T_L4_CS_CALC) >> 3;
699 }
700
701 #ifdef CONFIG_INET
702 if (bn->priv_flags & BNGE_NET_EN_GRO)
703 skb = bnge_gro_skb(bn, tpa_info, tpa_end, tpa_end1, skb);
704 #endif
705
706 return skb;
707 }
708
bnge_rss_ext_op(struct bnge_net * bn,struct rx_cmp * rxcmp)709 static enum pkt_hash_types bnge_rss_ext_op(struct bnge_net *bn,
710 struct rx_cmp *rxcmp)
711 {
712 u8 ext_op = RX_CMP_V3_HASH_TYPE(bn->bd, rxcmp);
713
714 switch (ext_op) {
715 case EXT_OP_INNER_4:
716 case EXT_OP_OUTER_4:
717 case EXT_OP_INNFL_3:
718 case EXT_OP_OUTFL_3:
719 return PKT_HASH_TYPE_L4;
720 default:
721 return PKT_HASH_TYPE_L3;
722 }
723 }
724
bnge_rx_vlan(struct sk_buff * skb,u8 cmp_type,struct rx_cmp * rxcmp,struct rx_cmp_ext * rxcmp1)725 static struct sk_buff *bnge_rx_vlan(struct sk_buff *skb, u8 cmp_type,
726 struct rx_cmp *rxcmp,
727 struct rx_cmp_ext *rxcmp1)
728 {
729 __be16 vlan_proto;
730 u16 vtag;
731
732 if (cmp_type == CMP_TYPE_RX_L2_CMP) {
733 __le32 flags2 = rxcmp1->rx_cmp_flags2;
734 u32 meta_data;
735
736 if (!(flags2 & cpu_to_le32(RX_CMP_FLAGS2_META_FORMAT_VLAN)))
737 return skb;
738
739 meta_data = le32_to_cpu(rxcmp1->rx_cmp_meta_data);
740 vtag = meta_data & RX_CMP_FLAGS2_METADATA_TCI_MASK;
741 vlan_proto =
742 htons(meta_data >> RX_CMP_FLAGS2_METADATA_TPID_SFT);
743 if (eth_type_vlan(vlan_proto))
744 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
745 else
746 goto vlan_err;
747 } else if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
748 if (RX_CMP_VLAN_VALID(rxcmp)) {
749 u32 tpid_sel = RX_CMP_VLAN_TPID_SEL(rxcmp);
750
751 if (tpid_sel == RX_CMP_METADATA1_TPID_8021Q)
752 vlan_proto = htons(ETH_P_8021Q);
753 else if (tpid_sel == RX_CMP_METADATA1_TPID_8021AD)
754 vlan_proto = htons(ETH_P_8021AD);
755 else
756 goto vlan_err;
757 vtag = RX_CMP_METADATA0_TCI(rxcmp1);
758 __vlan_hwaccel_put_tag(skb, vlan_proto, vtag);
759 }
760 }
761 return skb;
762
763 vlan_err:
764 skb_mark_for_recycle(skb);
765 dev_kfree_skb(skb);
766 return NULL;
767 }
768
bnge_rx_skb(struct bnge_net * bn,struct bnge_rx_ring_info * rxr,u16 cons,void * data,u8 * data_ptr,dma_addr_t dma_addr,unsigned int len)769 static struct sk_buff *bnge_rx_skb(struct bnge_net *bn,
770 struct bnge_rx_ring_info *rxr, u16 cons,
771 void *data, u8 *data_ptr,
772 dma_addr_t dma_addr,
773 unsigned int len)
774 {
775 struct bnge_dev *bd = bn->bd;
776 u16 prod = rxr->rx_prod;
777 struct sk_buff *skb;
778 int err;
779
780 err = bnge_alloc_rx_data(bn, rxr, prod, GFP_ATOMIC);
781 if (unlikely(err)) {
782 bnge_reuse_rx_data(rxr, cons, data);
783 return NULL;
784 }
785
786 dma_sync_single_for_cpu(bd->dev, dma_addr, len, bn->rx_dir);
787 skb = napi_build_skb(data, bn->rx_buf_size);
788 if (!skb) {
789 page_pool_free_va(rxr->head_pool, data, true);
790 return NULL;
791 }
792
793 skb_mark_for_recycle(skb);
794 skb_reserve(skb, bn->rx_offset);
795 skb_put(skb, len);
796 return skb;
797 }
798
799 /* returns the following:
800 * 1 - 1 packet successfully received
801 * 0 - successful TPA_START, packet not completed yet
802 * -EBUSY - completion ring does not have all the agg buffers yet
803 * -ENOMEM - packet aborted due to out of memory
804 * -EIO - packet aborted due to hw error indicated in BD
805 */
bnge_rx_pkt(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,u32 * raw_cons,u8 * event)806 static int bnge_rx_pkt(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
807 u32 *raw_cons, u8 *event)
808 {
809 struct bnge_napi *bnapi = cpr->bnapi;
810 struct net_device *dev = bn->netdev;
811 struct bnge_rx_ring_info *rxr;
812 u32 tmp_raw_cons, flags, misc;
813 struct bnge_sw_rx_bd *rx_buf;
814 struct rx_cmp_ext *rxcmp1;
815 u16 cons, prod, cp_cons;
816 u8 *data_ptr, cmp_type;
817 struct rx_cmp *rxcmp;
818 dma_addr_t dma_addr;
819 struct sk_buff *skb;
820 unsigned int len;
821 u8 agg_bufs;
822 void *data;
823 int rc = 0;
824
825 rxr = bnapi->rx_ring;
826
827 tmp_raw_cons = *raw_cons;
828 cp_cons = RING_CMP(bn, tmp_raw_cons);
829 rxcmp = (struct rx_cmp *)
830 &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
831
832 cmp_type = RX_CMP_TYPE(rxcmp);
833
834 if (cmp_type == CMP_TYPE_RX_TPA_AGG_CMP) {
835 bnge_tpa_agg(bn, rxr, (struct rx_agg_cmp *)rxcmp);
836 goto next_rx_no_prod_no_len;
837 }
838
839 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
840 cp_cons = RING_CMP(bn, tmp_raw_cons);
841 rxcmp1 = (struct rx_cmp_ext *)
842 &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
843
844 if (!RX_CMP_VALID(bn, rxcmp1, tmp_raw_cons))
845 return -EBUSY;
846
847 /* The valid test of the entry must be done first before
848 * reading any further.
849 */
850 dma_rmb();
851 prod = rxr->rx_prod;
852
853 if (cmp_type == CMP_TYPE_RX_L2_TPA_START_CMP ||
854 cmp_type == CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
855 bnge_tpa_start(bn, rxr, cmp_type,
856 (struct rx_tpa_start_cmp *)rxcmp,
857 (struct rx_tpa_start_cmp_ext *)rxcmp1);
858
859 *event |= BNGE_RX_EVENT;
860 goto next_rx_no_prod_no_len;
861
862 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
863 skb = bnge_tpa_end(bn, cpr, &tmp_raw_cons,
864 (struct rx_tpa_end_cmp *)rxcmp,
865 (struct rx_tpa_end_cmp_ext *)rxcmp1, event);
866 if (IS_ERR(skb))
867 return -EBUSY;
868
869 rc = -ENOMEM;
870 if (likely(skb)) {
871 bnge_deliver_skb(bn, bnapi, skb);
872 rc = 1;
873 }
874 *event |= BNGE_RX_EVENT;
875 goto next_rx_no_prod_no_len;
876 }
877
878 cons = rxcmp->rx_cmp_opaque;
879 if (unlikely(cons != rxr->rx_next_cons)) {
880 int rc1 = bnge_discard_rx(bn, cpr, &tmp_raw_cons, rxcmp);
881
882 /* 0xffff is forced error, don't print it */
883 if (rxr->rx_next_cons != 0xffff)
884 netdev_warn(bn->netdev, "RX cons %x != expected cons %x\n",
885 cons, rxr->rx_next_cons);
886 bnge_sched_reset_rxr(bn, rxr);
887 if (rc1)
888 return rc1;
889 goto next_rx_no_prod_no_len;
890 }
891 rx_buf = &rxr->rx_buf_ring[cons];
892 data = rx_buf->data;
893 data_ptr = rx_buf->data_ptr;
894 prefetch(data_ptr);
895
896 misc = le32_to_cpu(rxcmp->rx_cmp_misc_v1);
897 agg_bufs = (misc & RX_CMP_AGG_BUFS) >> RX_CMP_AGG_BUFS_SHIFT;
898
899 if (agg_bufs) {
900 if (!bnge_agg_bufs_valid(bn, cpr, agg_bufs, &tmp_raw_cons))
901 return -EBUSY;
902
903 cp_cons = NEXT_CMP(bn, cp_cons);
904 *event |= BNGE_AGG_EVENT;
905 }
906 *event |= BNGE_RX_EVENT;
907
908 rx_buf->data = NULL;
909 if (rxcmp1->rx_cmp_cfa_code_errors_v2 & RX_CMP_L2_ERRORS) {
910 bnge_reuse_rx_data(rxr, cons, data);
911 if (agg_bufs)
912 bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0, agg_bufs,
913 false);
914 rc = -EIO;
915 goto next_rx_no_len;
916 }
917
918 flags = le32_to_cpu(rxcmp->rx_cmp_len_flags_type);
919 len = flags >> RX_CMP_LEN_SHIFT;
920 dma_addr = rx_buf->mapping;
921
922 if (len <= bn->rx_copybreak) {
923 skb = bnge_copy_skb(bnapi, data_ptr, len, dma_addr);
924 bnge_reuse_rx_data(rxr, cons, data);
925 } else {
926 skb = bnge_rx_skb(bn, rxr, cons, data, data_ptr, dma_addr, len);
927 }
928
929 if (!skb) {
930 if (agg_bufs)
931 bnge_reuse_rx_agg_bufs(cpr, cp_cons, 0,
932 agg_bufs, false);
933 goto oom_next_rx;
934 }
935
936 if (agg_bufs) {
937 skb = bnge_rx_agg_netmems_skb(bn, cpr, skb, cp_cons,
938 agg_bufs, false);
939 if (!skb)
940 goto oom_next_rx;
941 }
942
943 if (RX_CMP_HASH_VALID(rxcmp)) {
944 enum pkt_hash_types type;
945
946 if (cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
947 type = bnge_rss_ext_op(bn, rxcmp);
948 } else {
949 u32 itypes = RX_CMP_ITYPES(rxcmp);
950
951 if (itypes == RX_CMP_FLAGS_ITYPE_TCP ||
952 itypes == RX_CMP_FLAGS_ITYPE_UDP)
953 type = PKT_HASH_TYPE_L4;
954 else
955 type = PKT_HASH_TYPE_L3;
956 }
957 skb_set_hash(skb, le32_to_cpu(rxcmp->rx_cmp_rss_hash), type);
958 }
959
960 skb->protocol = eth_type_trans(skb, dev);
961
962 if (skb->dev->features & BNGE_HW_FEATURE_VLAN_ALL_RX) {
963 skb = bnge_rx_vlan(skb, cmp_type, rxcmp, rxcmp1);
964 if (!skb)
965 goto next_rx;
966 }
967
968 skb_checksum_none_assert(skb);
969 if (RX_CMP_L4_CS_OK(rxcmp1)) {
970 if (dev->features & NETIF_F_RXCSUM) {
971 skb->ip_summed = CHECKSUM_UNNECESSARY;
972 skb->csum_level = RX_CMP_ENCAP(rxcmp1);
973 }
974 }
975
976 bnge_deliver_skb(bn, bnapi, skb);
977 rc = 1;
978
979 next_rx:
980 /* Update Stats */
981 next_rx_no_len:
982 rxr->rx_prod = NEXT_RX(prod);
983 rxr->rx_next_cons = RING_RX(bn, NEXT_RX(cons));
984
985 next_rx_no_prod_no_len:
986 *raw_cons = tmp_raw_cons;
987 return rc;
988
989 oom_next_rx:
990 rc = -ENOMEM;
991 goto next_rx;
992 }
993
994 /* In netpoll mode, if we are using a combined completion ring, we need to
995 * discard the rx packets and recycle the buffers.
996 */
bnge_force_rx_discard(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,u32 * raw_cons,u8 * event)997 static int bnge_force_rx_discard(struct bnge_net *bn,
998 struct bnge_cp_ring_info *cpr,
999 u32 *raw_cons, u8 *event)
1000 {
1001 u32 tmp_raw_cons = *raw_cons;
1002 struct rx_cmp_ext *rxcmp1;
1003 struct rx_cmp *rxcmp;
1004 u16 cp_cons;
1005 u8 cmp_type;
1006 int rc;
1007
1008 cp_cons = RING_CMP(bn, tmp_raw_cons);
1009 rxcmp = (struct rx_cmp *)
1010 &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1011
1012 tmp_raw_cons = NEXT_RAW_CMP(tmp_raw_cons);
1013 cp_cons = RING_CMP(bn, tmp_raw_cons);
1014 rxcmp1 = (struct rx_cmp_ext *)
1015 &cpr->desc_ring[CP_RING(cp_cons)][CP_IDX(cp_cons)];
1016
1017 if (!RX_CMP_VALID(bn, rxcmp1, tmp_raw_cons))
1018 return -EBUSY;
1019
1020 /* The valid test of the entry must be done first before
1021 * reading any further.
1022 */
1023 dma_rmb();
1024 cmp_type = RX_CMP_TYPE(rxcmp);
1025 if (cmp_type == CMP_TYPE_RX_L2_CMP ||
1026 cmp_type == CMP_TYPE_RX_L2_V3_CMP) {
1027 rxcmp1->rx_cmp_cfa_code_errors_v2 |=
1028 cpu_to_le32(RX_CMPL_ERRORS_CRC_ERROR);
1029 } else if (cmp_type == CMP_TYPE_RX_L2_TPA_END_CMP) {
1030 struct rx_tpa_end_cmp_ext *tpa_end1;
1031
1032 tpa_end1 = (struct rx_tpa_end_cmp_ext *)rxcmp1;
1033 tpa_end1->rx_tpa_end_cmp_errors_v2 |=
1034 cpu_to_le32(RX_TPA_END_CMP_ERRORS);
1035 }
1036 rc = bnge_rx_pkt(bn, cpr, raw_cons, event);
1037 return rc;
1038 }
1039
__bnge_tx_int(struct bnge_net * bn,struct bnge_tx_ring_info * txr,int budget)1040 static void __bnge_tx_int(struct bnge_net *bn, struct bnge_tx_ring_info *txr,
1041 int budget)
1042 {
1043 u16 hw_cons = txr->tx_hw_cons;
1044 struct bnge_dev *bd = bn->bd;
1045 unsigned int tx_bytes = 0;
1046 unsigned int tx_pkts = 0;
1047 struct netdev_queue *txq;
1048 u16 cons = txr->tx_cons;
1049 skb_frag_t *frag;
1050
1051 txq = netdev_get_tx_queue(bn->netdev, txr->txq_index);
1052
1053 while (SW_TX_RING(bn, cons) != hw_cons) {
1054 struct bnge_sw_tx_bd *tx_buf;
1055 struct sk_buff *skb;
1056 int j, last;
1057
1058 tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, cons)];
1059 skb = tx_buf->skb;
1060 if (unlikely(!skb)) {
1061 bnge_sched_reset_txr(bn, txr, cons);
1062 return;
1063 }
1064
1065 cons = NEXT_TX(cons);
1066 tx_pkts++;
1067 tx_bytes += skb->len;
1068 tx_buf->skb = NULL;
1069
1070 dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping),
1071 skb_headlen(skb), DMA_TO_DEVICE);
1072 last = tx_buf->nr_frags;
1073
1074 for (j = 0; j < last; j++) {
1075 frag = &skb_shinfo(skb)->frags[j];
1076 cons = NEXT_TX(cons);
1077 tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, cons)];
1078 netmem_dma_unmap_page_attrs(bd->dev,
1079 dma_unmap_addr(tx_buf,
1080 mapping),
1081 skb_frag_size(frag),
1082 DMA_TO_DEVICE, 0);
1083 }
1084
1085 cons = NEXT_TX(cons);
1086
1087 napi_consume_skb(skb, budget);
1088 }
1089
1090 WRITE_ONCE(txr->tx_cons, cons);
1091
1092 __netif_txq_completed_wake(txq, tx_pkts, tx_bytes,
1093 bnge_tx_avail(bn, txr), bn->tx_wake_thresh,
1094 (READ_ONCE(txr->dev_state) ==
1095 BNGE_DEV_STATE_CLOSING));
1096 }
1097
bnge_tx_int(struct bnge_net * bn,struct bnge_napi * bnapi,int budget)1098 static void bnge_tx_int(struct bnge_net *bn, struct bnge_napi *bnapi,
1099 int budget)
1100 {
1101 struct bnge_tx_ring_info *txr;
1102 int i;
1103
1104 bnge_for_each_napi_tx(i, bnapi, txr) {
1105 if (txr->tx_hw_cons != SW_TX_RING(bn, txr->tx_cons))
1106 __bnge_tx_int(bn, txr, budget);
1107 }
1108
1109 bnapi->events &= ~BNGE_TX_CMP_EVENT;
1110 }
1111
__bnge_poll_work_done(struct bnge_net * bn,struct bnge_napi * bnapi,int budget)1112 static void __bnge_poll_work_done(struct bnge_net *bn, struct bnge_napi *bnapi,
1113 int budget)
1114 {
1115 struct bnge_rx_ring_info *rxr = bnapi->rx_ring;
1116
1117 if ((bnapi->events & BNGE_TX_CMP_EVENT) && !bnapi->tx_fault)
1118 bnge_tx_int(bn, bnapi, budget);
1119
1120 if ((bnapi->events & BNGE_RX_EVENT)) {
1121 bnge_db_write(bn->bd, &rxr->rx_db, rxr->rx_prod);
1122 bnapi->events &= ~BNGE_RX_EVENT;
1123 }
1124
1125 if (bnapi->events & BNGE_AGG_EVENT) {
1126 bnge_db_write(bn->bd, &rxr->rx_agg_db, rxr->rx_agg_prod);
1127 bnapi->events &= ~BNGE_AGG_EVENT;
1128 }
1129 }
1130
1131 static void
bnge_hwrm_update_token(struct bnge_dev * bd,u16 seq_id,enum bnge_hwrm_wait_state state)1132 bnge_hwrm_update_token(struct bnge_dev *bd, u16 seq_id,
1133 enum bnge_hwrm_wait_state state)
1134 {
1135 struct bnge_hwrm_wait_token *token;
1136
1137 rcu_read_lock();
1138 hlist_for_each_entry_rcu(token, &bd->hwrm_pending_list, node) {
1139 if (token->seq_id == seq_id) {
1140 WRITE_ONCE(token->state, state);
1141 rcu_read_unlock();
1142 return;
1143 }
1144 }
1145 rcu_read_unlock();
1146 dev_err(bd->dev, "Invalid hwrm seq id %d\n", seq_id);
1147 }
1148
bnge_hwrm_handler(struct bnge_dev * bd,struct tx_cmp * txcmp)1149 static int bnge_hwrm_handler(struct bnge_dev *bd, struct tx_cmp *txcmp)
1150 {
1151 struct hwrm_cmpl *h_cmpl = (struct hwrm_cmpl *)txcmp;
1152 u16 cmpl_type = TX_CMP_TYPE(txcmp), seq_id;
1153
1154 switch (cmpl_type) {
1155 case CMPL_BASE_TYPE_HWRM_DONE:
1156 seq_id = le16_to_cpu(h_cmpl->sequence_id);
1157 bnge_hwrm_update_token(bd, seq_id, BNGE_HWRM_COMPLETE);
1158 break;
1159
1160 case CMPL_BASE_TYPE_HWRM_ASYNC_EVENT:
1161 default:
1162 break;
1163 }
1164
1165 return 0;
1166 }
1167
__bnge_poll_work(struct bnge_net * bn,struct bnge_cp_ring_info * cpr,int budget)1168 static int __bnge_poll_work(struct bnge_net *bn, struct bnge_cp_ring_info *cpr,
1169 int budget)
1170 {
1171 struct bnge_napi *bnapi = cpr->bnapi;
1172 u32 raw_cons = cpr->cp_raw_cons;
1173 struct tx_cmp *txcmp;
1174 int rx_pkts = 0;
1175 u8 event = 0;
1176 u32 cons;
1177
1178 cpr->has_more_work = 0;
1179 cpr->had_work_done = 1;
1180 while (1) {
1181 u8 cmp_type;
1182 int rc;
1183
1184 cons = RING_CMP(bn, raw_cons);
1185 txcmp = &cpr->desc_ring[CP_RING(cons)][CP_IDX(cons)];
1186
1187 if (!TX_CMP_VALID(bn, txcmp, raw_cons))
1188 break;
1189
1190 /* The valid test of the entry must be done first before
1191 * reading any further.
1192 */
1193 dma_rmb();
1194 cmp_type = TX_CMP_TYPE(txcmp);
1195 if (cmp_type == CMP_TYPE_TX_L2_CMP ||
1196 cmp_type == CMP_TYPE_TX_L2_COAL_CMP) {
1197 u32 opaque = txcmp->tx_cmp_opaque;
1198 struct bnge_tx_ring_info *txr;
1199 u16 tx_freed;
1200
1201 txr = bnapi->tx_ring[TX_OPAQUE_RING(opaque)];
1202 event |= BNGE_TX_CMP_EVENT;
1203 if (cmp_type == CMP_TYPE_TX_L2_COAL_CMP)
1204 txr->tx_hw_cons = TX_CMP_SQ_CONS_IDX(txcmp);
1205 else
1206 txr->tx_hw_cons = TX_OPAQUE_PROD(bn, opaque);
1207 tx_freed = ((txr->tx_hw_cons - txr->tx_cons) &
1208 bn->tx_ring_mask);
1209 /* return full budget so NAPI will complete. */
1210 if (unlikely(tx_freed >= bn->tx_wake_thresh)) {
1211 rx_pkts = budget;
1212 raw_cons = NEXT_RAW_CMP(raw_cons);
1213 if (budget)
1214 cpr->has_more_work = 1;
1215 break;
1216 }
1217 } else if (cmp_type >= CMP_TYPE_RX_L2_CMP &&
1218 cmp_type <= CMP_TYPE_RX_L2_TPA_START_V3_CMP) {
1219 if (likely(budget))
1220 rc = bnge_rx_pkt(bn, cpr, &raw_cons, &event);
1221 else
1222 rc = bnge_force_rx_discard(bn, cpr, &raw_cons,
1223 &event);
1224 if (likely(rc >= 0))
1225 rx_pkts += rc;
1226 /* Increment rx_pkts when rc is -ENOMEM to count towards
1227 * the NAPI budget. Otherwise, we may potentially loop
1228 * here forever if we consistently cannot allocate
1229 * buffers.
1230 */
1231 else if (rc == -ENOMEM && budget)
1232 rx_pkts++;
1233 else if (rc == -EBUSY) /* partial completion */
1234 break;
1235 } else if (unlikely(cmp_type == CMPL_BASE_TYPE_HWRM_DONE ||
1236 cmp_type == CMPL_BASE_TYPE_HWRM_FWD_REQ ||
1237 cmp_type == CMPL_BA_TY_HWRM_ASY_EVT)) {
1238 bnge_hwrm_handler(bn->bd, txcmp);
1239 }
1240 raw_cons = NEXT_RAW_CMP(raw_cons);
1241
1242 if (rx_pkts && rx_pkts == budget) {
1243 cpr->has_more_work = 1;
1244 break;
1245 }
1246 }
1247
1248 cpr->cp_raw_cons = raw_cons;
1249 bnapi->events |= event;
1250 return rx_pkts;
1251 }
1252
__bnge_poll_cqs_done(struct bnge_net * bn,struct bnge_napi * bnapi,u64 dbr_type,int budget)1253 static void __bnge_poll_cqs_done(struct bnge_net *bn, struct bnge_napi *bnapi,
1254 u64 dbr_type, int budget)
1255 {
1256 struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1257 int i;
1258
1259 for (i = 0; i < nqr->cp_ring_count; i++) {
1260 struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[i];
1261 struct bnge_db_info *db;
1262
1263 if (cpr->had_work_done) {
1264 u32 tgl = 0;
1265
1266 if (dbr_type == DBR_TYPE_CQ_ARMALL) {
1267 cpr->had_nqe_notify = 0;
1268 tgl = cpr->toggle;
1269 }
1270 db = &cpr->cp_db;
1271 bnge_writeq(bn->bd,
1272 db->db_key64 | dbr_type | DB_TOGGLE(tgl) |
1273 DB_RING_IDX(db, cpr->cp_raw_cons),
1274 db->doorbell);
1275 cpr->had_work_done = 0;
1276 }
1277 }
1278 __bnge_poll_work_done(bn, bnapi, budget);
1279 }
1280
__bnge_poll_cqs(struct bnge_net * bn,struct bnge_napi * bnapi,int budget)1281 static int __bnge_poll_cqs(struct bnge_net *bn, struct bnge_napi *bnapi,
1282 int budget)
1283 {
1284 struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1285 int i, work_done = 0;
1286
1287 for (i = 0; i < nqr->cp_ring_count; i++) {
1288 struct bnge_cp_ring_info *cpr = &nqr->cp_ring_arr[i];
1289
1290 if (cpr->had_nqe_notify) {
1291 work_done += __bnge_poll_work(bn, cpr,
1292 budget - work_done);
1293 nqr->has_more_work |= cpr->has_more_work;
1294 }
1295 }
1296 return work_done;
1297 }
1298
bnge_napi_poll(struct napi_struct * napi,int budget)1299 int bnge_napi_poll(struct napi_struct *napi, int budget)
1300 {
1301 struct bnge_napi *bnapi = container_of(napi, struct bnge_napi, napi);
1302 struct bnge_nq_ring_info *nqr = &bnapi->nq_ring;
1303 u32 raw_cons = nqr->nq_raw_cons;
1304 struct bnge_net *bn = bnapi->bn;
1305 struct bnge_dev *bd = bn->bd;
1306 struct nqe_cn *nqcmp;
1307 int work_done = 0;
1308 u32 cons;
1309
1310 if (nqr->has_more_work) {
1311 nqr->has_more_work = 0;
1312 work_done = __bnge_poll_cqs(bn, bnapi, budget);
1313 }
1314
1315 while (1) {
1316 u16 type;
1317
1318 cons = RING_CMP(bn, raw_cons);
1319 nqcmp = &nqr->desc_ring[CP_RING(cons)][CP_IDX(cons)];
1320
1321 if (!NQ_CMP_VALID(bn, nqcmp, raw_cons)) {
1322 if (nqr->has_more_work)
1323 break;
1324
1325 __bnge_poll_cqs_done(bn, bnapi, DBR_TYPE_CQ_ARMALL,
1326 budget);
1327 nqr->nq_raw_cons = raw_cons;
1328 if (napi_complete_done(napi, work_done))
1329 BNGE_DB_NQ_ARM(bd, &nqr->nq_db,
1330 nqr->nq_raw_cons);
1331 goto poll_done;
1332 }
1333
1334 /* The valid test of the entry must be done first before
1335 * reading any further.
1336 */
1337 dma_rmb();
1338
1339 type = le16_to_cpu(nqcmp->type);
1340 if (NQE_CN_TYPE(type) == NQ_CN_TYPE_CQ_NOTIFICATION) {
1341 u32 idx = le32_to_cpu(nqcmp->cq_handle_low);
1342 u32 cq_type = BNGE_NQ_HDL_TYPE(idx);
1343 struct bnge_cp_ring_info *cpr;
1344
1345 /* No more budget for RX work */
1346 if (budget && work_done >= budget &&
1347 cq_type == BNGE_NQ_HDL_TYPE_RX)
1348 break;
1349
1350 idx = BNGE_NQ_HDL_IDX(idx);
1351 cpr = &nqr->cp_ring_arr[idx];
1352 cpr->had_nqe_notify = 1;
1353 cpr->toggle = NQE_CN_TOGGLE(type);
1354 work_done += __bnge_poll_work(bn, cpr,
1355 budget - work_done);
1356 nqr->has_more_work |= cpr->has_more_work;
1357 } else {
1358 bnge_hwrm_handler(bn->bd, (struct tx_cmp *)nqcmp);
1359 }
1360 raw_cons = NEXT_RAW_CMP(raw_cons);
1361 }
1362
1363 __bnge_poll_cqs_done(bn, bnapi, DBR_TYPE_CQ, budget);
1364 if (raw_cons != nqr->nq_raw_cons) {
1365 nqr->nq_raw_cons = raw_cons;
1366 BNGE_DB_NQ(bd, &nqr->nq_db, raw_cons);
1367 }
1368 poll_done:
1369 return work_done;
1370 }
1371
bnge_xmit_get_cfa_action(struct sk_buff * skb)1372 static u16 bnge_xmit_get_cfa_action(struct sk_buff *skb)
1373 {
1374 struct metadata_dst *md_dst = skb_metadata_dst(skb);
1375
1376 if (!md_dst || md_dst->type != METADATA_HW_PORT_MUX)
1377 return 0;
1378
1379 return md_dst->u.port_info.port_id;
1380 }
1381
1382 static const u16 bnge_lhint_arr[] = {
1383 TX_BD_FLAGS_LHINT_512_AND_SMALLER,
1384 TX_BD_FLAGS_LHINT_512_TO_1023,
1385 TX_BD_FLAGS_LHINT_1024_TO_2047,
1386 TX_BD_FLAGS_LHINT_1024_TO_2047,
1387 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1388 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1389 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1390 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1391 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1392 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1393 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1394 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1395 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1396 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1397 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1398 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1399 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1400 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1401 TX_BD_FLAGS_LHINT_2048_AND_LARGER,
1402 };
1403
bnge_txr_db_kick(struct bnge_net * bn,struct bnge_tx_ring_info * txr,u16 prod)1404 static void bnge_txr_db_kick(struct bnge_net *bn, struct bnge_tx_ring_info *txr,
1405 u16 prod)
1406 {
1407 /* Sync BD data before updating doorbell */
1408 wmb();
1409 bnge_db_write(bn->bd, &txr->tx_db, prod);
1410 txr->kick_pending = 0;
1411 }
1412
bnge_get_gso_hdr_len(struct sk_buff * skb)1413 static u32 bnge_get_gso_hdr_len(struct sk_buff *skb)
1414 {
1415 bool udp_gso = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4);
1416 u32 hdr_len;
1417
1418 if (skb->encapsulation) {
1419 if (udp_gso)
1420 hdr_len = skb_inner_transport_offset(skb) +
1421 sizeof(struct udphdr);
1422 else
1423 hdr_len = skb_inner_tcp_all_headers(skb);
1424 } else if (udp_gso) {
1425 hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
1426 } else {
1427 hdr_len = skb_tcp_all_headers(skb);
1428 }
1429
1430 return hdr_len;
1431 }
1432
bnge_start_xmit(struct sk_buff * skb,struct net_device * dev)1433 netdev_tx_t bnge_start_xmit(struct sk_buff *skb, struct net_device *dev)
1434 {
1435 u32 len, free_size, vlan_tag_flags, cfa_action, flags;
1436 struct bnge_net *bn = netdev_priv(dev);
1437 struct bnge_tx_ring_info *txr;
1438 struct bnge_dev *bd = bn->bd;
1439 struct bnge_sw_tx_bd *tx_buf;
1440 struct tx_bd *txbd, *txbd0;
1441 struct netdev_queue *txq;
1442 struct tx_bd_ext *txbd1;
1443 u16 prod, last_frag;
1444 unsigned int length;
1445 dma_addr_t mapping;
1446 __le32 lflags = 0;
1447 skb_frag_t *frag;
1448 int i;
1449
1450 i = skb_get_queue_mapping(skb);
1451 txq = netdev_get_tx_queue(dev, i);
1452 txr = &bn->tx_ring[bn->tx_ring_map[i]];
1453 prod = txr->tx_prod;
1454
1455 free_size = bnge_tx_avail(bn, txr);
1456 if (unlikely(free_size < skb_shinfo(skb)->nr_frags + 2)) {
1457 /* We must have raced with NAPI cleanup */
1458 if (net_ratelimit() && txr->kick_pending)
1459 netif_warn(bn, tx_err, dev,
1460 "bnge: ring busy w/ flush pending!\n");
1461 if (!netif_txq_try_stop(txq, bnge_tx_avail(bn, txr),
1462 bn->tx_wake_thresh))
1463 return NETDEV_TX_BUSY;
1464 }
1465
1466 last_frag = skb_shinfo(skb)->nr_frags;
1467
1468 txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
1469
1470 tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
1471 tx_buf->skb = skb;
1472 tx_buf->nr_frags = last_frag;
1473
1474 vlan_tag_flags = 0;
1475 cfa_action = bnge_xmit_get_cfa_action(skb);
1476 if (skb_vlan_tag_present(skb)) {
1477 vlan_tag_flags = TX_BD_CFA_META_KEY_VLAN |
1478 skb_vlan_tag_get(skb);
1479 /* Currently supports 8021Q, 8021AD vlan offloads
1480 * QINQ1, QINQ2, QINQ3 vlan headers are deprecated
1481 */
1482 if (skb->vlan_proto == htons(ETH_P_8021Q))
1483 vlan_tag_flags |= 1 << TX_BD_CFA_META_TPID_SHIFT;
1484 }
1485
1486 if (unlikely(skb->no_fcs))
1487 lflags |= cpu_to_le32(TX_BD_FLAGS_NO_CRC);
1488
1489 if (eth_skb_pad(skb))
1490 goto tx_kick_pending;
1491
1492 len = skb_headlen(skb);
1493
1494 mapping = dma_map_single(bd->dev, skb->data, len, DMA_TO_DEVICE);
1495
1496 if (unlikely(dma_mapping_error(bd->dev, mapping)))
1497 goto tx_free;
1498
1499 dma_unmap_addr_set(tx_buf, mapping, mapping);
1500 flags = (len << TX_BD_LEN_SHIFT) | TX_BD_TYPE_LONG_TX_BD |
1501 TX_BD_CNT(last_frag + 2);
1502
1503 txbd->tx_bd_haddr = cpu_to_le64(mapping);
1504 txbd->tx_bd_opaque = SET_TX_OPAQUE(bn, txr, prod, 2 + last_frag);
1505
1506 prod = NEXT_TX(prod);
1507 txbd1 = (struct tx_bd_ext *)
1508 &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
1509
1510 if (skb_is_gso(skb)) {
1511 u32 hdr_len = bnge_get_gso_hdr_len(skb);
1512
1513 lflags |= cpu_to_le32(TX_BD_FLAGS_LSO | TX_BD_FLAGS_T_IPID |
1514 (hdr_len << (TX_BD_HSIZE_SHIFT - 1)));
1515 length = skb_shinfo(skb)->gso_size;
1516 txbd1->tx_bd_mss = cpu_to_le32(length);
1517 length += hdr_len;
1518 } else {
1519 length = skb->len;
1520 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1521 lflags |= cpu_to_le32(TX_BD_FLAGS_TCP_UDP_CHKSUM);
1522 txbd1->tx_bd_mss = 0;
1523 }
1524 }
1525
1526 flags |= bnge_lhint_arr[length >> 9];
1527
1528 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
1529 txbd1->tx_bd_hsize_lflags = lflags;
1530 txbd1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
1531 txbd1->tx_bd_cfa_action =
1532 cpu_to_le32(cfa_action << TX_BD_CFA_ACTION_SHIFT);
1533 txbd0 = txbd;
1534 for (i = 0; i < last_frag; i++) {
1535 frag = &skb_shinfo(skb)->frags[i];
1536
1537 prod = NEXT_TX(prod);
1538 txbd = &txr->tx_desc_ring[TX_RING(bn, prod)][TX_IDX(prod)];
1539
1540 len = skb_frag_size(frag);
1541 mapping = skb_frag_dma_map(bd->dev, frag, 0, len,
1542 DMA_TO_DEVICE);
1543
1544 if (unlikely(dma_mapping_error(bd->dev, mapping)))
1545 goto tx_dma_error;
1546
1547 tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
1548 netmem_dma_unmap_addr_set(skb_frag_netmem(frag), tx_buf,
1549 mapping, mapping);
1550
1551 txbd->tx_bd_haddr = cpu_to_le64(mapping);
1552
1553 flags = len << TX_BD_LEN_SHIFT;
1554 txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
1555 }
1556
1557 flags &= ~TX_BD_LEN;
1558 txbd->tx_bd_len_flags_type =
1559 cpu_to_le32(((len) << TX_BD_LEN_SHIFT) | flags |
1560 TX_BD_FLAGS_PACKET_END);
1561
1562 netdev_tx_sent_queue(txq, skb->len);
1563
1564 prod = NEXT_TX(prod);
1565 WRITE_ONCE(txr->tx_prod, prod);
1566
1567 if (!netdev_xmit_more() || netif_xmit_stopped(txq)) {
1568 bnge_txr_db_kick(bn, txr, prod);
1569 } else {
1570 if (free_size >= bn->tx_wake_thresh)
1571 txbd0->tx_bd_len_flags_type |=
1572 cpu_to_le32(TX_BD_FLAGS_NO_CMPL);
1573 txr->kick_pending = 1;
1574 }
1575
1576 if (unlikely(bnge_tx_avail(bn, txr) <= MAX_SKB_FRAGS + 1)) {
1577 if (netdev_xmit_more()) {
1578 txbd0->tx_bd_len_flags_type &=
1579 cpu_to_le32(~TX_BD_FLAGS_NO_CMPL);
1580 bnge_txr_db_kick(bn, txr, prod);
1581 }
1582
1583 netif_txq_try_stop(txq, bnge_tx_avail(bn, txr),
1584 bn->tx_wake_thresh);
1585 }
1586 return NETDEV_TX_OK;
1587
1588 tx_dma_error:
1589 last_frag = i;
1590
1591 /* start back at beginning and unmap skb */
1592 prod = txr->tx_prod;
1593 tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
1594 dma_unmap_single(bd->dev, dma_unmap_addr(tx_buf, mapping),
1595 skb_headlen(skb), DMA_TO_DEVICE);
1596 prod = NEXT_TX(prod);
1597
1598 /* unmap remaining mapped pages */
1599 for (i = 0; i < last_frag; i++) {
1600 prod = NEXT_TX(prod);
1601 tx_buf = &txr->tx_buf_ring[SW_TX_RING(bn, prod)];
1602 frag = &skb_shinfo(skb)->frags[i];
1603 netmem_dma_unmap_page_attrs(bd->dev,
1604 dma_unmap_addr(tx_buf, mapping),
1605 skb_frag_size(frag),
1606 DMA_TO_DEVICE, 0);
1607 }
1608
1609 tx_free:
1610 dev_kfree_skb_any(skb);
1611
1612 tx_kick_pending:
1613 if (txr->kick_pending)
1614 bnge_txr_db_kick(bn, txr, txr->tx_prod);
1615 txr->tx_buf_ring[SW_TX_RING(bn, txr->tx_prod)].skb = NULL;
1616 dev_core_stats_tx_dropped_inc(dev);
1617 return NETDEV_TX_OK;
1618 }
1619
bnge_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)1620 netdev_features_t bnge_features_check(struct sk_buff *skb,
1621 struct net_device *dev,
1622 netdev_features_t features)
1623 {
1624 u32 len;
1625
1626 features = vlan_features_check(skb, features);
1627 #if (MAX_SKB_FRAGS > TX_MAX_FRAGS)
1628 if (skb_shinfo(skb)->nr_frags > TX_MAX_FRAGS)
1629 features &= ~NETIF_F_SG;
1630 #endif
1631
1632 if (skb_is_gso(skb))
1633 len = bnge_get_gso_hdr_len(skb) + skb_shinfo(skb)->gso_size;
1634 else
1635 len = skb->len;
1636
1637 len >>= 9;
1638 if (unlikely(len >= ARRAY_SIZE(bnge_lhint_arr)))
1639 features &= ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
1640
1641 return features;
1642 }
1643