1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2021, Microsoft Corporation. */
3
4 #include <uapi/linux/bpf.h>
5
6 #include <linux/debugfs.h>
7 #include <linux/inetdevice.h>
8 #include <linux/etherdevice.h>
9 #include <linux/ethtool.h>
10 #include <linux/filter.h>
11 #include <linux/mm.h>
12 #include <linux/pci.h>
13 #include <linux/export.h>
14 #include <linux/skbuff.h>
15
16 #include <net/checksum.h>
17 #include <net/ip6_checksum.h>
18 #include <net/netdev_lock.h>
19 #include <net/page_pool/helpers.h>
20 #include <net/xdp.h>
21
22 #include <net/mana/mana.h>
23 #include <net/mana/mana_auxiliary.h>
24 #include <net/mana/hw_channel.h>
25
26 static DEFINE_IDA(mana_adev_ida);
27
mana_adev_idx_alloc(void)28 static int mana_adev_idx_alloc(void)
29 {
30 return ida_alloc(&mana_adev_ida, GFP_KERNEL);
31 }
32
mana_adev_idx_free(int idx)33 static void mana_adev_idx_free(int idx)
34 {
35 ida_free(&mana_adev_ida, idx);
36 }
37
mana_dbg_q_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)38 static ssize_t mana_dbg_q_read(struct file *filp, char __user *buf, size_t count,
39 loff_t *pos)
40 {
41 struct gdma_queue *gdma_q = filp->private_data;
42
43 return simple_read_from_buffer(buf, count, pos, gdma_q->queue_mem_ptr,
44 gdma_q->queue_size);
45 }
46
47 static const struct file_operations mana_dbg_q_fops = {
48 .owner = THIS_MODULE,
49 .open = simple_open,
50 .read = mana_dbg_q_read,
51 };
52
mana_en_need_log(struct mana_port_context * apc,int err)53 static bool mana_en_need_log(struct mana_port_context *apc, int err)
54 {
55 if (apc && apc->ac && apc->ac->gdma_dev &&
56 apc->ac->gdma_dev->gdma_context)
57 return mana_need_log(apc->ac->gdma_dev->gdma_context, err);
58 else
59 return true;
60 }
61
mana_put_rx_page(struct mana_rxq * rxq,struct page * page,bool from_pool)62 static void mana_put_rx_page(struct mana_rxq *rxq, struct page *page,
63 bool from_pool)
64 {
65 if (from_pool)
66 page_pool_put_full_page(rxq->page_pool, page, false);
67 else
68 put_page(page);
69 }
70
71 /* Microsoft Azure Network Adapter (MANA) functions */
72
mana_open(struct net_device * ndev)73 static int mana_open(struct net_device *ndev)
74 {
75 struct mana_port_context *apc = netdev_priv(ndev);
76 int err;
77 err = mana_alloc_queues(ndev);
78
79 if (err) {
80 netdev_err(ndev, "%s failed to allocate queues: %d\n", __func__, err);
81 return err;
82 }
83
84 apc->port_is_up = true;
85
86 /* Ensure port state updated before txq state */
87 smp_wmb();
88
89 netif_tx_wake_all_queues(ndev);
90 netdev_dbg(ndev, "%s successful\n", __func__);
91 return 0;
92 }
93
mana_close(struct net_device * ndev)94 static int mana_close(struct net_device *ndev)
95 {
96 struct mana_port_context *apc = netdev_priv(ndev);
97
98 if (!apc->port_is_up)
99 return 0;
100
101 return mana_detach(ndev, true);
102 }
103
mana_link_state_handle(struct work_struct * w)104 static void mana_link_state_handle(struct work_struct *w)
105 {
106 struct mana_context *ac;
107 struct net_device *ndev;
108 u32 link_event;
109 bool link_up;
110 int i;
111
112 ac = container_of(w, struct mana_context, link_change_work);
113
114 rtnl_lock();
115
116 link_event = READ_ONCE(ac->link_event);
117
118 if (link_event == HWC_DATA_HW_LINK_CONNECT)
119 link_up = true;
120 else if (link_event == HWC_DATA_HW_LINK_DISCONNECT)
121 link_up = false;
122 else
123 goto out;
124
125 /* Process all ports */
126 for (i = 0; i < ac->num_ports; i++) {
127 ndev = ac->ports[i];
128 if (!ndev)
129 continue;
130
131 if (link_up) {
132 netif_carrier_on(ndev);
133
134 __netdev_notify_peers(ndev);
135 } else {
136 netif_carrier_off(ndev);
137 }
138 }
139
140 out:
141 rtnl_unlock();
142 }
143
mana_can_tx(struct gdma_queue * wq)144 static bool mana_can_tx(struct gdma_queue *wq)
145 {
146 return mana_gd_wq_avail_space(wq) >= MAX_TX_WQE_SIZE;
147 }
148
mana_checksum_info(struct sk_buff * skb)149 static unsigned int mana_checksum_info(struct sk_buff *skb)
150 {
151 if (skb->protocol == htons(ETH_P_IP)) {
152 struct iphdr *ip = ip_hdr(skb);
153
154 if (ip->protocol == IPPROTO_TCP)
155 return IPPROTO_TCP;
156
157 if (ip->protocol == IPPROTO_UDP)
158 return IPPROTO_UDP;
159 } else if (skb->protocol == htons(ETH_P_IPV6)) {
160 struct ipv6hdr *ip6 = ipv6_hdr(skb);
161
162 if (ip6->nexthdr == IPPROTO_TCP)
163 return IPPROTO_TCP;
164
165 if (ip6->nexthdr == IPPROTO_UDP)
166 return IPPROTO_UDP;
167 }
168
169 /* No csum offloading */
170 return 0;
171 }
172
mana_add_sge(struct mana_tx_package * tp,struct mana_skb_head * ash,int sg_i,dma_addr_t da,int sge_len,u32 gpa_mkey)173 static void mana_add_sge(struct mana_tx_package *tp, struct mana_skb_head *ash,
174 int sg_i, dma_addr_t da, int sge_len, u32 gpa_mkey)
175 {
176 ash->dma_handle[sg_i] = da;
177 ash->size[sg_i] = sge_len;
178
179 tp->wqe_req.sgl[sg_i].address = da;
180 tp->wqe_req.sgl[sg_i].mem_key = gpa_mkey;
181 tp->wqe_req.sgl[sg_i].size = sge_len;
182 }
183
mana_map_skb(struct sk_buff * skb,struct mana_port_context * apc,struct mana_tx_package * tp,int gso_hs)184 static int mana_map_skb(struct sk_buff *skb, struct mana_port_context *apc,
185 struct mana_tx_package *tp, int gso_hs)
186 {
187 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
188 int hsg = 1; /* num of SGEs of linear part */
189 struct gdma_dev *gd = apc->ac->gdma_dev;
190 int skb_hlen = skb_headlen(skb);
191 int sge0_len, sge1_len = 0;
192 struct gdma_context *gc;
193 struct device *dev;
194 skb_frag_t *frag;
195 dma_addr_t da;
196 int sg_i;
197 int i;
198
199 gc = gd->gdma_context;
200 dev = gc->dev;
201
202 if (gso_hs && gso_hs < skb_hlen) {
203 sge0_len = gso_hs;
204 sge1_len = skb_hlen - gso_hs;
205 } else {
206 sge0_len = skb_hlen;
207 }
208
209 da = dma_map_single(dev, skb->data, sge0_len, DMA_TO_DEVICE);
210 if (dma_mapping_error(dev, da))
211 return -ENOMEM;
212
213 mana_add_sge(tp, ash, 0, da, sge0_len, gd->gpa_mkey);
214
215 if (sge1_len) {
216 sg_i = 1;
217 da = dma_map_single(dev, skb->data + sge0_len, sge1_len,
218 DMA_TO_DEVICE);
219 if (dma_mapping_error(dev, da))
220 goto frag_err;
221
222 mana_add_sge(tp, ash, sg_i, da, sge1_len, gd->gpa_mkey);
223 hsg = 2;
224 }
225
226 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
227 sg_i = hsg + i;
228
229 frag = &skb_shinfo(skb)->frags[i];
230 da = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
231 DMA_TO_DEVICE);
232 if (dma_mapping_error(dev, da))
233 goto frag_err;
234
235 mana_add_sge(tp, ash, sg_i, da, skb_frag_size(frag),
236 gd->gpa_mkey);
237 }
238
239 return 0;
240
241 frag_err:
242 if (net_ratelimit())
243 netdev_err(apc->ndev, "Failed to map skb of size %u to DMA\n",
244 skb->len);
245 for (i = sg_i - 1; i >= hsg; i--)
246 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
247 DMA_TO_DEVICE);
248
249 for (i = hsg - 1; i >= 0; i--)
250 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
251 DMA_TO_DEVICE);
252
253 return -ENOMEM;
254 }
255
256 /* Handle the case when GSO SKB linear length is too large.
257 * MANA NIC requires GSO packets to put only the packet header to SGE0.
258 * So, we need 2 SGEs for the skb linear part which contains more than the
259 * header.
260 * Return a positive value for the number of SGEs, or a negative value
261 * for an error.
262 */
mana_fix_skb_head(struct net_device * ndev,struct sk_buff * skb,int gso_hs)263 static int mana_fix_skb_head(struct net_device *ndev, struct sk_buff *skb,
264 int gso_hs)
265 {
266 int num_sge = 1 + skb_shinfo(skb)->nr_frags;
267 int skb_hlen = skb_headlen(skb);
268
269 if (gso_hs < skb_hlen) {
270 num_sge++;
271 } else if (gso_hs > skb_hlen) {
272 if (net_ratelimit())
273 netdev_err(ndev,
274 "TX nonlinear head: hs:%d, skb_hlen:%d\n",
275 gso_hs, skb_hlen);
276
277 return -EINVAL;
278 }
279
280 return num_sge;
281 }
282
283 /* Get the GSO packet's header size */
mana_get_gso_hs(struct sk_buff * skb)284 static int mana_get_gso_hs(struct sk_buff *skb)
285 {
286 int gso_hs;
287
288 if (skb->encapsulation) {
289 gso_hs = skb_inner_tcp_all_headers(skb);
290 } else {
291 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
292 gso_hs = skb_transport_offset(skb) +
293 sizeof(struct udphdr);
294 } else {
295 gso_hs = skb_tcp_all_headers(skb);
296 }
297 }
298
299 return gso_hs;
300 }
301
mana_per_port_queue_reset_work_handler(struct work_struct * work)302 static void mana_per_port_queue_reset_work_handler(struct work_struct *work)
303 {
304 struct mana_port_context *apc = container_of(work,
305 struct mana_port_context,
306 queue_reset_work);
307 struct net_device *ndev = apc->ndev;
308 int err;
309
310 rtnl_lock();
311
312 /* Pre-allocate buffers to prevent failure in mana_attach later */
313 err = mana_pre_alloc_rxbufs(apc, ndev->mtu, apc->num_queues);
314 if (err) {
315 netdev_err(ndev, "Insufficient memory for reset post tx stall detection\n");
316 goto out;
317 }
318
319 err = mana_detach(ndev, false);
320 if (err) {
321 netdev_err(ndev, "mana_detach failed: %d\n", err);
322 goto dealloc_pre_rxbufs;
323 }
324
325 err = mana_attach(ndev);
326 if (err)
327 netdev_err(ndev, "mana_attach failed: %d\n", err);
328
329 dealloc_pre_rxbufs:
330 mana_pre_dealloc_rxbufs(apc);
331 out:
332 rtnl_unlock();
333 }
334
mana_start_xmit(struct sk_buff * skb,struct net_device * ndev)335 netdev_tx_t mana_start_xmit(struct sk_buff *skb, struct net_device *ndev)
336 {
337 enum mana_tx_pkt_format pkt_fmt = MANA_SHORT_PKT_FMT;
338 struct mana_port_context *apc = netdev_priv(ndev);
339 int gso_hs = 0; /* zero for non-GSO pkts */
340 u16 txq_idx = skb_get_queue_mapping(skb);
341 struct gdma_dev *gd = apc->ac->gdma_dev;
342 bool ipv4 = false, ipv6 = false;
343 struct mana_tx_package pkg = {};
344 struct netdev_queue *net_txq;
345 struct mana_stats_tx *tx_stats;
346 struct gdma_queue *gdma_sq;
347 int err, len, num_gso_seg;
348 unsigned int csum_type;
349 struct mana_txq *txq;
350 struct mana_cq *cq;
351
352 if (unlikely(!apc->port_is_up))
353 goto tx_drop;
354
355 if (skb_cow_head(skb, MANA_HEADROOM))
356 goto tx_drop_count;
357
358 txq = &apc->tx_qp[txq_idx].txq;
359 gdma_sq = txq->gdma_sq;
360 cq = &apc->tx_qp[txq_idx].tx_cq;
361 tx_stats = &txq->stats;
362
363 BUILD_BUG_ON(MAX_TX_WQE_SGL_ENTRIES != MANA_MAX_TX_WQE_SGL_ENTRIES);
364 if (MAX_SKB_FRAGS + 2 > MAX_TX_WQE_SGL_ENTRIES &&
365 skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
366 /* GSO skb with Hardware SGE limit exceeded is not expected here
367 * as they are handled in mana_features_check() callback
368 */
369 if (skb_linearize(skb)) {
370 netdev_warn_once(ndev, "Failed to linearize skb with nr_frags=%d and is_gso=%d\n",
371 skb_shinfo(skb)->nr_frags,
372 skb_is_gso(skb));
373 goto tx_drop_count;
374 }
375 apc->eth_stats.tx_linear_pkt_cnt++;
376 }
377
378 pkg.tx_oob.s_oob.vcq_num = cq->gdma_id;
379 pkg.tx_oob.s_oob.vsq_frame = txq->vsq_frame;
380
381 if (txq->vp_offset > MANA_SHORT_VPORT_OFFSET_MAX) {
382 pkg.tx_oob.l_oob.long_vp_offset = txq->vp_offset;
383 pkt_fmt = MANA_LONG_PKT_FMT;
384 } else {
385 pkg.tx_oob.s_oob.short_vp_offset = txq->vp_offset;
386 }
387
388 if (skb_vlan_tag_present(skb)) {
389 pkt_fmt = MANA_LONG_PKT_FMT;
390 pkg.tx_oob.l_oob.inject_vlan_pri_tag = 1;
391 pkg.tx_oob.l_oob.pcp = skb_vlan_tag_get_prio(skb);
392 pkg.tx_oob.l_oob.dei = skb_vlan_tag_get_cfi(skb);
393 pkg.tx_oob.l_oob.vlan_id = skb_vlan_tag_get_id(skb);
394 }
395
396 pkg.tx_oob.s_oob.pkt_fmt = pkt_fmt;
397
398 if (pkt_fmt == MANA_SHORT_PKT_FMT) {
399 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_short_oob);
400 u64_stats_update_begin(&tx_stats->syncp);
401 tx_stats->short_pkt_fmt++;
402 u64_stats_update_end(&tx_stats->syncp);
403 } else {
404 pkg.wqe_req.inline_oob_size = sizeof(struct mana_tx_oob);
405 u64_stats_update_begin(&tx_stats->syncp);
406 tx_stats->long_pkt_fmt++;
407 u64_stats_update_end(&tx_stats->syncp);
408 }
409
410 pkg.wqe_req.inline_oob_data = &pkg.tx_oob;
411 pkg.wqe_req.flags = 0;
412 pkg.wqe_req.client_data_unit = 0;
413
414 pkg.wqe_req.num_sge = 1 + skb_shinfo(skb)->nr_frags;
415
416 if (skb->protocol == htons(ETH_P_IP))
417 ipv4 = true;
418 else if (skb->protocol == htons(ETH_P_IPV6))
419 ipv6 = true;
420
421 if (skb_is_gso(skb)) {
422 int num_sge;
423
424 gso_hs = mana_get_gso_hs(skb);
425
426 num_sge = mana_fix_skb_head(ndev, skb, gso_hs);
427 if (num_sge > 0)
428 pkg.wqe_req.num_sge = num_sge;
429 else
430 goto tx_drop_count;
431
432 u64_stats_update_begin(&tx_stats->syncp);
433 if (skb->encapsulation) {
434 tx_stats->tso_inner_packets++;
435 tx_stats->tso_inner_bytes += skb->len - gso_hs;
436 } else {
437 tx_stats->tso_packets++;
438 tx_stats->tso_bytes += skb->len - gso_hs;
439 }
440 u64_stats_update_end(&tx_stats->syncp);
441
442 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
443 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
444
445 pkg.tx_oob.s_oob.comp_iphdr_csum = 1;
446 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
447 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
448
449 pkg.wqe_req.client_data_unit = skb_shinfo(skb)->gso_size;
450 pkg.wqe_req.flags = GDMA_WR_OOB_IN_SGL | GDMA_WR_PAD_BY_SGE0;
451 if (ipv4) {
452 ip_hdr(skb)->tot_len = 0;
453 ip_hdr(skb)->check = 0;
454 tcp_hdr(skb)->check =
455 ~csum_tcpudp_magic(ip_hdr(skb)->saddr,
456 ip_hdr(skb)->daddr, 0,
457 IPPROTO_TCP, 0);
458 } else {
459 ipv6_hdr(skb)->payload_len = 0;
460 tcp_hdr(skb)->check =
461 ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
462 &ipv6_hdr(skb)->daddr, 0,
463 IPPROTO_TCP, 0);
464 }
465 } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
466 csum_type = mana_checksum_info(skb);
467
468 u64_stats_update_begin(&tx_stats->syncp);
469 tx_stats->csum_partial++;
470 u64_stats_update_end(&tx_stats->syncp);
471
472 if (csum_type == IPPROTO_TCP) {
473 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
474 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
475
476 pkg.tx_oob.s_oob.comp_tcp_csum = 1;
477 pkg.tx_oob.s_oob.trans_off = skb_transport_offset(skb);
478
479 } else if (csum_type == IPPROTO_UDP) {
480 pkg.tx_oob.s_oob.is_outer_ipv4 = ipv4;
481 pkg.tx_oob.s_oob.is_outer_ipv6 = ipv6;
482
483 pkg.tx_oob.s_oob.comp_udp_csum = 1;
484 } else {
485 /* Can't do offload of this type of checksum */
486 if (skb_checksum_help(skb))
487 goto tx_drop_count;
488 }
489 }
490
491 if (pkg.wqe_req.num_sge <= ARRAY_SIZE(pkg.sgl_array)) {
492 pkg.wqe_req.sgl = pkg.sgl_array;
493 } else {
494 pkg.sgl_ptr = kmalloc_objs(struct gdma_sge, pkg.wqe_req.num_sge,
495 GFP_ATOMIC);
496 if (!pkg.sgl_ptr)
497 goto tx_drop_count;
498
499 pkg.wqe_req.sgl = pkg.sgl_ptr;
500 }
501
502 if (mana_map_skb(skb, apc, &pkg, gso_hs)) {
503 u64_stats_update_begin(&tx_stats->syncp);
504 tx_stats->mana_map_err++;
505 u64_stats_update_end(&tx_stats->syncp);
506 goto free_sgl_ptr;
507 }
508
509 skb_queue_tail(&txq->pending_skbs, skb);
510
511 len = skb->len;
512 num_gso_seg = skb_is_gso(skb) ? skb_shinfo(skb)->gso_segs : 1;
513 net_txq = netdev_get_tx_queue(ndev, txq_idx);
514
515 err = mana_gd_post_work_request(gdma_sq, &pkg.wqe_req,
516 (struct gdma_posted_wqe_info *)skb->cb);
517 if (!mana_can_tx(gdma_sq)) {
518 netif_tx_stop_queue(net_txq);
519 apc->eth_stats.stop_queue++;
520 }
521
522 if (err) {
523 (void)skb_dequeue_tail(&txq->pending_skbs);
524 mana_unmap_skb(skb, apc);
525 netdev_warn(ndev, "Failed to post TX OOB: %d\n", err);
526 goto free_sgl_ptr;
527 }
528
529 err = NETDEV_TX_OK;
530 atomic_inc(&txq->pending_sends);
531
532 mana_gd_wq_ring_doorbell(gd->gdma_context, gdma_sq);
533
534 /* skb may be freed after mana_gd_post_work_request. Do not use it. */
535 skb = NULL;
536
537 /* Populated the packet and bytes counters based on post GSO packet
538 * calculations
539 */
540 tx_stats = &txq->stats;
541 u64_stats_update_begin(&tx_stats->syncp);
542 tx_stats->packets += num_gso_seg;
543 tx_stats->bytes += len + ((num_gso_seg - 1) * gso_hs);
544 u64_stats_update_end(&tx_stats->syncp);
545
546 if (netif_tx_queue_stopped(net_txq) && mana_can_tx(gdma_sq)) {
547 netif_tx_wake_queue(net_txq);
548 apc->eth_stats.wake_queue++;
549 }
550
551 kfree(pkg.sgl_ptr);
552 return err;
553
554 free_sgl_ptr:
555 kfree(pkg.sgl_ptr);
556 tx_drop_count:
557 ndev->stats.tx_dropped++;
558 tx_drop:
559 dev_kfree_skb_any(skb);
560 return NETDEV_TX_OK;
561 }
562
563 #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
mana_features_check(struct sk_buff * skb,struct net_device * ndev,netdev_features_t features)564 static netdev_features_t mana_features_check(struct sk_buff *skb,
565 struct net_device *ndev,
566 netdev_features_t features)
567 {
568 if (skb_shinfo(skb)->nr_frags + 2 > MAX_TX_WQE_SGL_ENTRIES) {
569 /* Exceeds HW SGE limit.
570 * GSO case:
571 * Disable GSO so the stack will software-segment the skb
572 * into smaller skbs that fit the SGE budget.
573 * Non-GSO case:
574 * The xmit path will attempt skb_linearize() as a fallback.
575 */
576 features &= ~NETIF_F_GSO_MASK;
577 }
578 return features;
579 }
580 #endif
581
mana_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * st)582 static void mana_get_stats64(struct net_device *ndev,
583 struct rtnl_link_stats64 *st)
584 {
585 struct mana_port_context *apc = netdev_priv(ndev);
586 unsigned int num_queues = apc->num_queues;
587 struct mana_stats_rx *rx_stats;
588 struct mana_stats_tx *tx_stats;
589 unsigned int start;
590 u64 packets, bytes;
591 int q;
592
593 if (!apc->port_is_up)
594 return;
595
596 netdev_stats_to_stats64(st, &ndev->stats);
597
598 if (apc->ac->hwc_timeout_occurred)
599 netdev_warn_once(ndev, "HWC timeout occurred\n");
600
601 st->rx_missed_errors = apc->ac->hc_stats.hc_rx_discards_no_wqe;
602
603 for (q = 0; q < num_queues; q++) {
604 rx_stats = &apc->rxqs[q]->stats;
605
606 do {
607 start = u64_stats_fetch_begin(&rx_stats->syncp);
608 packets = rx_stats->packets;
609 bytes = rx_stats->bytes;
610 } while (u64_stats_fetch_retry(&rx_stats->syncp, start));
611
612 st->rx_packets += packets;
613 st->rx_bytes += bytes;
614 }
615
616 for (q = 0; q < num_queues; q++) {
617 tx_stats = &apc->tx_qp[q].txq.stats;
618
619 do {
620 start = u64_stats_fetch_begin(&tx_stats->syncp);
621 packets = tx_stats->packets;
622 bytes = tx_stats->bytes;
623 } while (u64_stats_fetch_retry(&tx_stats->syncp, start));
624
625 st->tx_packets += packets;
626 st->tx_bytes += bytes;
627 }
628 }
629
mana_get_tx_queue(struct net_device * ndev,struct sk_buff * skb,int old_q)630 static int mana_get_tx_queue(struct net_device *ndev, struct sk_buff *skb,
631 int old_q)
632 {
633 struct mana_port_context *apc = netdev_priv(ndev);
634 u32 hash = skb_get_hash(skb);
635 struct sock *sk = skb->sk;
636 int txq;
637
638 txq = apc->indir_table[hash & (apc->indir_table_sz - 1)];
639
640 if (txq != old_q && sk && sk_fullsock(sk) &&
641 rcu_access_pointer(sk->sk_dst_cache))
642 sk_tx_queue_set(sk, txq);
643
644 return txq;
645 }
646
mana_select_queue(struct net_device * ndev,struct sk_buff * skb,struct net_device * sb_dev)647 static u16 mana_select_queue(struct net_device *ndev, struct sk_buff *skb,
648 struct net_device *sb_dev)
649 {
650 int txq;
651
652 if (ndev->real_num_tx_queues == 1)
653 return 0;
654
655 txq = sk_tx_queue_get(skb->sk);
656
657 if (txq < 0 || skb->ooo_okay || txq >= ndev->real_num_tx_queues) {
658 if (skb_rx_queue_recorded(skb))
659 txq = skb_get_rx_queue(skb);
660 else
661 txq = mana_get_tx_queue(ndev, skb, txq);
662 }
663
664 return txq;
665 }
666
667 /* Release pre-allocated RX buffers */
mana_pre_dealloc_rxbufs(struct mana_port_context * mpc)668 void mana_pre_dealloc_rxbufs(struct mana_port_context *mpc)
669 {
670 struct device *dev;
671 int i;
672
673 dev = mpc->ac->gdma_dev->gdma_context->dev;
674
675 if (!mpc->rxbufs_pre)
676 goto out1;
677
678 if (!mpc->das_pre)
679 goto out2;
680
681 while (mpc->rxbpre_total) {
682 i = --mpc->rxbpre_total;
683 dma_unmap_single(dev, mpc->das_pre[i], mpc->rxbpre_datasize,
684 DMA_FROM_DEVICE);
685 put_page(virt_to_head_page(mpc->rxbufs_pre[i]));
686 }
687
688 kfree(mpc->das_pre);
689 mpc->das_pre = NULL;
690
691 out2:
692 kfree(mpc->rxbufs_pre);
693 mpc->rxbufs_pre = NULL;
694
695 out1:
696 mpc->rxbpre_datasize = 0;
697 mpc->rxbpre_alloc_size = 0;
698 mpc->rxbpre_headroom = 0;
699 }
700
701 /* Get a buffer from the pre-allocated RX buffers */
mana_get_rxbuf_pre(struct mana_rxq * rxq,dma_addr_t * da)702 static void *mana_get_rxbuf_pre(struct mana_rxq *rxq, dma_addr_t *da)
703 {
704 struct net_device *ndev = rxq->ndev;
705 struct mana_port_context *mpc;
706 void *va;
707
708 mpc = netdev_priv(ndev);
709
710 if (!mpc->rxbufs_pre || !mpc->das_pre || !mpc->rxbpre_total) {
711 netdev_err(ndev, "No RX pre-allocated bufs\n");
712 return NULL;
713 }
714
715 /* Check sizes to catch unexpected coding error */
716 if (mpc->rxbpre_datasize != rxq->datasize) {
717 netdev_err(ndev, "rxbpre_datasize mismatch: %u: %u\n",
718 mpc->rxbpre_datasize, rxq->datasize);
719 return NULL;
720 }
721
722 if (mpc->rxbpre_alloc_size != rxq->alloc_size) {
723 netdev_err(ndev, "rxbpre_alloc_size mismatch: %u: %u\n",
724 mpc->rxbpre_alloc_size, rxq->alloc_size);
725 return NULL;
726 }
727
728 if (mpc->rxbpre_headroom != rxq->headroom) {
729 netdev_err(ndev, "rxbpre_headroom mismatch: %u: %u\n",
730 mpc->rxbpre_headroom, rxq->headroom);
731 return NULL;
732 }
733
734 mpc->rxbpre_total--;
735
736 *da = mpc->das_pre[mpc->rxbpre_total];
737 va = mpc->rxbufs_pre[mpc->rxbpre_total];
738 mpc->rxbufs_pre[mpc->rxbpre_total] = NULL;
739
740 /* Deallocate the array after all buffers are gone */
741 if (!mpc->rxbpre_total)
742 mana_pre_dealloc_rxbufs(mpc);
743
744 return va;
745 }
746
747 /* Get RX buffer's data size, alloc size, XDP headroom based on MTU */
mana_get_rxbuf_cfg(struct mana_port_context * apc,int mtu,u32 * datasize,u32 * alloc_size,u32 * headroom,u32 * frag_count)748 static void mana_get_rxbuf_cfg(struct mana_port_context *apc,
749 int mtu, u32 *datasize, u32 *alloc_size,
750 u32 *headroom, u32 *frag_count)
751 {
752 u32 len, buf_size;
753
754 /* Calculate datasize first (consistent across all cases) */
755 *datasize = mtu + ETH_HLEN;
756
757 /* For xdp and jumbo frames make sure only one packet fits per page */
758 if (mtu + MANA_RXBUF_PAD > PAGE_SIZE / 2 || mana_xdp_get(apc)) {
759 if (mana_xdp_get(apc)) {
760 *headroom = XDP_PACKET_HEADROOM;
761 *alloc_size = PAGE_SIZE;
762 } else {
763 *headroom = 0; /* no support for XDP */
764 *alloc_size = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD +
765 *headroom);
766 }
767
768 *frag_count = 1;
769
770 /* In the single-buffer path, napi_build_skb() must see the
771 * actual backing allocation size so skb->truesize reflects
772 * the full page (or higher-order page), not just the usable
773 * packet area.
774 */
775 *alloc_size = PAGE_SIZE << get_order(*alloc_size);
776 return;
777 }
778
779 /* Standard MTU case - optimize for multiple packets per page */
780 *headroom = 0;
781
782 /* Calculate base buffer size needed */
783 len = SKB_DATA_ALIGN(mtu + MANA_RXBUF_PAD + *headroom);
784 buf_size = ALIGN(len, MANA_RX_FRAG_ALIGNMENT);
785
786 /* Calculate how many packets can fit in a page */
787 *frag_count = PAGE_SIZE / buf_size;
788 *alloc_size = buf_size;
789 }
790
mana_pre_alloc_rxbufs(struct mana_port_context * mpc,int new_mtu,int num_queues)791 int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu, int num_queues)
792 {
793 struct device *dev;
794 struct page *page;
795 dma_addr_t da;
796 int num_rxb;
797 void *va;
798 int i;
799
800 mana_get_rxbuf_cfg(mpc, new_mtu, &mpc->rxbpre_datasize,
801 &mpc->rxbpre_alloc_size, &mpc->rxbpre_headroom,
802 &mpc->rxbpre_frag_count);
803
804 dev = mpc->ac->gdma_dev->gdma_context->dev;
805
806 num_rxb = num_queues * mpc->rx_queue_size;
807
808 WARN(mpc->rxbufs_pre, "mana rxbufs_pre exists\n");
809 mpc->rxbufs_pre = kmalloc_array(num_rxb, sizeof(void *), GFP_KERNEL);
810 if (!mpc->rxbufs_pre)
811 goto error;
812
813 mpc->das_pre = kmalloc_objs(dma_addr_t, num_rxb);
814 if (!mpc->das_pre)
815 goto error;
816
817 mpc->rxbpre_total = 0;
818
819 for (i = 0; i < num_rxb; i++) {
820 page = dev_alloc_pages(get_order(mpc->rxbpre_alloc_size));
821 if (!page)
822 goto error;
823
824 va = page_to_virt(page);
825
826 da = dma_map_single(dev, va + mpc->rxbpre_headroom,
827 mpc->rxbpre_datasize, DMA_FROM_DEVICE);
828 if (dma_mapping_error(dev, da)) {
829 put_page(page);
830 goto error;
831 }
832
833 mpc->rxbufs_pre[i] = va;
834 mpc->das_pre[i] = da;
835 mpc->rxbpre_total = i + 1;
836 }
837
838 return 0;
839
840 error:
841 netdev_err(mpc->ndev, "Failed to pre-allocate RX buffers for %d queues\n", num_queues);
842 mana_pre_dealloc_rxbufs(mpc);
843 return -ENOMEM;
844 }
845
mana_change_mtu(struct net_device * ndev,int new_mtu)846 static int mana_change_mtu(struct net_device *ndev, int new_mtu)
847 {
848 struct mana_port_context *mpc = netdev_priv(ndev);
849 unsigned int old_mtu = ndev->mtu;
850 int err;
851
852 /* Pre-allocate buffers to prevent failure in mana_attach later */
853 err = mana_pre_alloc_rxbufs(mpc, new_mtu, mpc->num_queues);
854 if (err) {
855 netdev_err(ndev, "Insufficient memory for new MTU\n");
856 return err;
857 }
858
859 err = mana_detach(ndev, false);
860 if (err) {
861 netdev_err(ndev, "mana_detach failed: %d\n", err);
862 goto out;
863 }
864
865 WRITE_ONCE(ndev->mtu, new_mtu);
866
867 err = mana_attach(ndev);
868 if (err) {
869 netdev_err(ndev, "mana_attach failed: %d\n", err);
870 WRITE_ONCE(ndev->mtu, old_mtu);
871 }
872
873 out:
874 mana_pre_dealloc_rxbufs(mpc);
875 return err;
876 }
877
mana_tx_timeout(struct net_device * netdev,unsigned int txqueue)878 static void mana_tx_timeout(struct net_device *netdev, unsigned int txqueue)
879 {
880 struct mana_port_context *apc = netdev_priv(netdev);
881 struct mana_context *ac = apc->ac;
882 struct gdma_context *gc = ac->gdma_dev->gdma_context;
883
884 /* Already in service, hence tx queue reset is not required.*/
885 if (test_bit(GC_IN_SERVICE, &gc->flags))
886 return;
887
888 /* Note: If there are pending queue reset work for this port(apc),
889 * subsequent request queued up from here are ignored. This is because
890 * we are using the same work instance per port(apc).
891 */
892 queue_work(ac->per_port_queue_reset_wq, &apc->queue_reset_work);
893 }
894
mana_shaper_set(struct net_shaper_binding * binding,const struct net_shaper * shaper,struct netlink_ext_ack * extack)895 static int mana_shaper_set(struct net_shaper_binding *binding,
896 const struct net_shaper *shaper,
897 struct netlink_ext_ack *extack)
898 {
899 struct mana_port_context *apc = netdev_priv(binding->netdev);
900 u32 old_speed, rate;
901 int err;
902
903 if (shaper->handle.scope != NET_SHAPER_SCOPE_NETDEV) {
904 NL_SET_ERR_MSG_MOD(extack, "net shaper scope should be netdev");
905 return -EINVAL;
906 }
907
908 if (apc->handle.id && shaper->handle.id != apc->handle.id) {
909 NL_SET_ERR_MSG_MOD(extack, "Cannot create multiple shapers");
910 return -EOPNOTSUPP;
911 }
912
913 if (!shaper->bw_max || (shaper->bw_max % 100000000)) {
914 NL_SET_ERR_MSG_MOD(extack, "Please use multiples of 100Mbps for bandwidth");
915 return -EINVAL;
916 }
917
918 rate = div_u64(shaper->bw_max, 1000); /* Convert bps to Kbps */
919 rate = div_u64(rate, 1000); /* Convert Kbps to Mbps */
920
921 /* Get current speed */
922 err = mana_query_link_cfg(apc);
923 old_speed = (err) ? SPEED_UNKNOWN : apc->speed;
924
925 if (!err) {
926 err = mana_set_bw_clamp(apc, rate, TRI_STATE_TRUE);
927 apc->speed = (err) ? old_speed : rate;
928 apc->handle = (err) ? apc->handle : shaper->handle;
929 }
930
931 return err;
932 }
933
mana_shaper_del(struct net_shaper_binding * binding,const struct net_shaper_handle * handle,struct netlink_ext_ack * extack)934 static int mana_shaper_del(struct net_shaper_binding *binding,
935 const struct net_shaper_handle *handle,
936 struct netlink_ext_ack *extack)
937 {
938 struct mana_port_context *apc = netdev_priv(binding->netdev);
939 int err;
940
941 err = mana_set_bw_clamp(apc, 0, TRI_STATE_FALSE);
942
943 if (!err) {
944 /* Reset mana port context parameters */
945 apc->handle.id = 0;
946 apc->handle.scope = NET_SHAPER_SCOPE_UNSPEC;
947 apc->speed = apc->max_speed;
948 }
949
950 return err;
951 }
952
mana_shaper_cap(struct net_shaper_binding * binding,enum net_shaper_scope scope,unsigned long * flags)953 static void mana_shaper_cap(struct net_shaper_binding *binding,
954 enum net_shaper_scope scope,
955 unsigned long *flags)
956 {
957 *flags = BIT(NET_SHAPER_A_CAPS_SUPPORT_BW_MAX) |
958 BIT(NET_SHAPER_A_CAPS_SUPPORT_METRIC_BPS);
959 }
960
961 static const struct net_shaper_ops mana_shaper_ops = {
962 .set = mana_shaper_set,
963 .delete = mana_shaper_del,
964 .capabilities = mana_shaper_cap,
965 };
966
967 static const struct net_device_ops mana_devops = {
968 .ndo_open = mana_open,
969 .ndo_stop = mana_close,
970 .ndo_select_queue = mana_select_queue,
971 #if (MAX_SKB_FRAGS + 2 > MANA_MAX_TX_WQE_SGL_ENTRIES)
972 .ndo_features_check = mana_features_check,
973 #endif
974 .ndo_start_xmit = mana_start_xmit,
975 .ndo_validate_addr = eth_validate_addr,
976 .ndo_get_stats64 = mana_get_stats64,
977 .ndo_bpf = mana_bpf,
978 .ndo_xdp_xmit = mana_xdp_xmit,
979 .ndo_change_mtu = mana_change_mtu,
980 .ndo_tx_timeout = mana_tx_timeout,
981 .net_shaper_ops = &mana_shaper_ops,
982 };
983
mana_cleanup_port_context(struct mana_port_context * apc)984 static void mana_cleanup_port_context(struct mana_port_context *apc)
985 {
986 /*
987 * make sure subsequent cleanup attempts don't end up removing already
988 * cleaned dentry pointer
989 */
990 debugfs_remove(apc->mana_port_debugfs);
991 apc->mana_port_debugfs = NULL;
992 kfree(apc->rxqs);
993 apc->rxqs = NULL;
994 }
995
mana_cleanup_indir_table(struct mana_port_context * apc)996 static void mana_cleanup_indir_table(struct mana_port_context *apc)
997 {
998 apc->indir_table_sz = 0;
999 kfree(apc->indir_table);
1000 kfree(apc->rxobj_table);
1001 }
1002
mana_init_port_context(struct mana_port_context * apc)1003 static int mana_init_port_context(struct mana_port_context *apc)
1004 {
1005 apc->rxqs = kzalloc_objs(struct mana_rxq *, apc->num_queues);
1006
1007 return !apc->rxqs ? -ENOMEM : 0;
1008 }
1009
mana_send_request(struct mana_context * ac,void * in_buf,u32 in_len,void * out_buf,u32 out_len)1010 static int mana_send_request(struct mana_context *ac, void *in_buf,
1011 u32 in_len, void *out_buf, u32 out_len)
1012 {
1013 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1014 struct gdma_resp_hdr *resp = out_buf;
1015 struct gdma_req_hdr *req = in_buf;
1016 struct device *dev = gc->dev;
1017 static atomic_t activity_id;
1018 int err;
1019
1020 req->dev_id = gc->mana.dev_id;
1021 req->activity_id = atomic_inc_return(&activity_id);
1022
1023 err = mana_gd_send_request(gc, in_len, in_buf, out_len,
1024 out_buf);
1025 if (err || resp->status) {
1026 if (err == -EOPNOTSUPP)
1027 return err;
1028
1029 if (req->req.msg_type != MANA_QUERY_PHY_STAT &&
1030 mana_need_log(gc, err))
1031 dev_err(dev, "Command 0x%x failed with status: 0x%x, err: %d\n",
1032 req->req.msg_type, resp->status, err);
1033 return err ? err : -EPROTO;
1034 }
1035
1036 if (req->dev_id.as_uint32 != resp->dev_id.as_uint32 ||
1037 req->activity_id != resp->activity_id) {
1038 dev_err(dev, "Unexpected mana message response: %x,%x,%x,%x\n",
1039 req->dev_id.as_uint32, resp->dev_id.as_uint32,
1040 req->activity_id, resp->activity_id);
1041 return -EPROTO;
1042 }
1043
1044 return 0;
1045 }
1046
mana_verify_resp_hdr(const struct gdma_resp_hdr * resp_hdr,const enum mana_command_code expected_code,const u32 min_size)1047 static int mana_verify_resp_hdr(const struct gdma_resp_hdr *resp_hdr,
1048 const enum mana_command_code expected_code,
1049 const u32 min_size)
1050 {
1051 if (resp_hdr->response.msg_type != expected_code)
1052 return -EPROTO;
1053
1054 if (resp_hdr->response.msg_version < GDMA_MESSAGE_V1)
1055 return -EPROTO;
1056
1057 if (resp_hdr->response.msg_size < min_size)
1058 return -EPROTO;
1059
1060 return 0;
1061 }
1062
mana_pf_register_hw_vport(struct mana_port_context * apc)1063 static int mana_pf_register_hw_vport(struct mana_port_context *apc)
1064 {
1065 struct mana_register_hw_vport_resp resp = {};
1066 struct mana_register_hw_vport_req req = {};
1067 int err;
1068
1069 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_HW_PORT,
1070 sizeof(req), sizeof(resp));
1071 req.attached_gfid = 1;
1072 req.is_pf_default_vport = 1;
1073 req.allow_all_ether_types = 1;
1074
1075 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1076 sizeof(resp));
1077 if (err) {
1078 netdev_err(apc->ndev, "Failed to register hw vPort: %d\n", err);
1079 return err;
1080 }
1081
1082 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_HW_PORT,
1083 sizeof(resp));
1084 if (err || resp.hdr.status) {
1085 netdev_err(apc->ndev, "Failed to register hw vPort: %d, 0x%x\n",
1086 err, resp.hdr.status);
1087 return err ? err : -EPROTO;
1088 }
1089
1090 apc->port_handle = resp.hw_vport_handle;
1091 return 0;
1092 }
1093
mana_pf_deregister_hw_vport(struct mana_port_context * apc)1094 static void mana_pf_deregister_hw_vport(struct mana_port_context *apc)
1095 {
1096 struct mana_deregister_hw_vport_resp resp = {};
1097 struct mana_deregister_hw_vport_req req = {};
1098 int err;
1099
1100 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_HW_PORT,
1101 sizeof(req), sizeof(resp));
1102 req.hw_vport_handle = apc->port_handle;
1103
1104 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1105 sizeof(resp));
1106 if (err) {
1107 if (mana_en_need_log(apc, err))
1108 netdev_err(apc->ndev, "Failed to unregister hw vPort: %d\n",
1109 err);
1110
1111 return;
1112 }
1113
1114 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_HW_PORT,
1115 sizeof(resp));
1116 if (err || resp.hdr.status)
1117 netdev_err(apc->ndev,
1118 "Failed to deregister hw vPort: %d, 0x%x\n",
1119 err, resp.hdr.status);
1120 }
1121
mana_pf_register_filter(struct mana_port_context * apc)1122 static int mana_pf_register_filter(struct mana_port_context *apc)
1123 {
1124 struct mana_register_filter_resp resp = {};
1125 struct mana_register_filter_req req = {};
1126 int err;
1127
1128 mana_gd_init_req_hdr(&req.hdr, MANA_REGISTER_FILTER,
1129 sizeof(req), sizeof(resp));
1130 req.vport = apc->port_handle;
1131 memcpy(req.mac_addr, apc->mac_addr, ETH_ALEN);
1132
1133 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1134 sizeof(resp));
1135 if (err) {
1136 netdev_err(apc->ndev, "Failed to register filter: %d\n", err);
1137 return err;
1138 }
1139
1140 err = mana_verify_resp_hdr(&resp.hdr, MANA_REGISTER_FILTER,
1141 sizeof(resp));
1142 if (err || resp.hdr.status) {
1143 netdev_err(apc->ndev, "Failed to register filter: %d, 0x%x\n",
1144 err, resp.hdr.status);
1145 return err ? err : -EPROTO;
1146 }
1147
1148 apc->pf_filter_handle = resp.filter_handle;
1149 return 0;
1150 }
1151
mana_pf_deregister_filter(struct mana_port_context * apc)1152 static void mana_pf_deregister_filter(struct mana_port_context *apc)
1153 {
1154 struct mana_deregister_filter_resp resp = {};
1155 struct mana_deregister_filter_req req = {};
1156 int err;
1157
1158 mana_gd_init_req_hdr(&req.hdr, MANA_DEREGISTER_FILTER,
1159 sizeof(req), sizeof(resp));
1160 req.filter_handle = apc->pf_filter_handle;
1161
1162 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1163 sizeof(resp));
1164 if (err) {
1165 if (mana_en_need_log(apc, err))
1166 netdev_err(apc->ndev, "Failed to unregister filter: %d\n",
1167 err);
1168
1169 return;
1170 }
1171
1172 err = mana_verify_resp_hdr(&resp.hdr, MANA_DEREGISTER_FILTER,
1173 sizeof(resp));
1174 if (err || resp.hdr.status)
1175 netdev_err(apc->ndev,
1176 "Failed to deregister filter: %d, 0x%x\n",
1177 err, resp.hdr.status);
1178 }
1179
mana_query_device_cfg(struct mana_context * ac,u32 proto_major_ver,u32 proto_minor_ver,u32 proto_micro_ver,u16 * max_num_vports,u8 * bm_hostmode)1180 static int mana_query_device_cfg(struct mana_context *ac, u32 proto_major_ver,
1181 u32 proto_minor_ver, u32 proto_micro_ver,
1182 u16 *max_num_vports, u8 *bm_hostmode)
1183 {
1184 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1185 struct mana_query_device_cfg_resp resp = {};
1186 struct mana_query_device_cfg_req req = {};
1187 struct device *dev = gc->dev;
1188 int err = 0;
1189
1190 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_DEV_CONFIG,
1191 sizeof(req), sizeof(resp));
1192
1193 req.hdr.resp.msg_version = GDMA_MESSAGE_V3;
1194
1195 req.proto_major_ver = proto_major_ver;
1196 req.proto_minor_ver = proto_minor_ver;
1197 req.proto_micro_ver = proto_micro_ver;
1198
1199 err = mana_send_request(ac, &req, sizeof(req), &resp, sizeof(resp));
1200 if (err) {
1201 dev_err(dev, "Failed to query config: %d", err);
1202 return err;
1203 }
1204
1205 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_DEV_CONFIG,
1206 sizeof(resp));
1207 if (err || resp.hdr.status) {
1208 dev_err(dev, "Invalid query result: %d, 0x%x\n", err,
1209 resp.hdr.status);
1210 if (!err)
1211 err = -EPROTO;
1212 return err;
1213 }
1214
1215 *max_num_vports = resp.max_num_vports;
1216
1217 if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2) {
1218 if (resp.adapter_mtu < ETH_MIN_MTU + ETH_HLEN) {
1219 dev_err(dev, "Adapter MTU too small: %u\n",
1220 resp.adapter_mtu);
1221 return -EPROTO;
1222 }
1223 gc->adapter_mtu = resp.adapter_mtu;
1224 } else {
1225 gc->adapter_mtu = ETH_FRAME_LEN;
1226 }
1227
1228 if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V3)
1229 *bm_hostmode = resp.bm_hostmode;
1230 else
1231 *bm_hostmode = 0;
1232
1233 debugfs_create_u16("adapter-MTU", 0400, gc->mana_pci_debugfs, &gc->adapter_mtu);
1234
1235 return 0;
1236 }
1237
mana_query_vport_cfg(struct mana_port_context * apc,u32 vport_index,u32 * max_sq,u32 * max_rq,u32 * num_indir_entry)1238 static int mana_query_vport_cfg(struct mana_port_context *apc, u32 vport_index,
1239 u32 *max_sq, u32 *max_rq, u32 *num_indir_entry)
1240 {
1241 struct mana_query_vport_cfg_resp resp = {};
1242 struct mana_query_vport_cfg_req req = {};
1243 int err;
1244
1245 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_VPORT_CONFIG,
1246 sizeof(req), sizeof(resp));
1247
1248 req.vport_index = vport_index;
1249
1250 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1251 sizeof(resp));
1252 if (err)
1253 return err;
1254
1255 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_VPORT_CONFIG,
1256 sizeof(resp));
1257 if (err)
1258 return err;
1259
1260 if (resp.hdr.status)
1261 return -EPROTO;
1262
1263 *max_sq = resp.max_num_sq;
1264 *max_rq = resp.max_num_rq;
1265 if (resp.num_indirection_ent > 0 &&
1266 resp.num_indirection_ent <= MANA_INDIRECT_TABLE_MAX_SIZE &&
1267 is_power_of_2(resp.num_indirection_ent)) {
1268 *num_indir_entry = resp.num_indirection_ent;
1269 } else {
1270 netdev_warn(apc->ndev,
1271 "Setting indirection table size to default %d for vPort %d\n",
1272 MANA_INDIRECT_TABLE_DEF_SIZE, apc->port_idx);
1273 *num_indir_entry = MANA_INDIRECT_TABLE_DEF_SIZE;
1274 }
1275
1276 apc->port_handle = resp.vport;
1277 ether_addr_copy(apc->mac_addr, resp.mac_addr);
1278
1279 return 0;
1280 }
1281
mana_uncfg_vport(struct mana_port_context * apc)1282 void mana_uncfg_vport(struct mana_port_context *apc)
1283 {
1284 mutex_lock(&apc->vport_mutex);
1285 apc->vport_use_count--;
1286 WARN_ON(apc->vport_use_count < 0);
1287 mutex_unlock(&apc->vport_mutex);
1288 }
1289 EXPORT_SYMBOL_NS(mana_uncfg_vport, "NET_MANA");
1290
mana_cfg_vport(struct mana_port_context * apc,u32 protection_dom_id,u32 doorbell_pg_id)1291 int mana_cfg_vport(struct mana_port_context *apc, u32 protection_dom_id,
1292 u32 doorbell_pg_id)
1293 {
1294 struct mana_config_vport_resp resp = {};
1295 struct mana_config_vport_req req = {};
1296 int err;
1297
1298 /* This function is used to program the Ethernet port in the hardware
1299 * table. It can be called from the Ethernet driver or the RDMA driver.
1300 *
1301 * For Ethernet usage, the hardware supports only one active user on a
1302 * physical port. The driver checks on the port usage before programming
1303 * the hardware when creating the RAW QP (RDMA driver) or exposing the
1304 * device to kernel NET layer (Ethernet driver).
1305 *
1306 * Because the RDMA driver doesn't know in advance which QP type the
1307 * user will create, it exposes the device with all its ports. The user
1308 * may not be able to create RAW QP on a port if this port is already
1309 * in used by the Ethernet driver from the kernel.
1310 *
1311 * This physical port limitation only applies to the RAW QP. For RC QP,
1312 * the hardware doesn't have this limitation. The user can create RC
1313 * QPs on a physical port up to the hardware limits independent of the
1314 * Ethernet usage on the same port.
1315 */
1316 mutex_lock(&apc->vport_mutex);
1317 if (apc->vport_use_count > 0) {
1318 mutex_unlock(&apc->vport_mutex);
1319 return -EBUSY;
1320 }
1321 apc->vport_use_count++;
1322 mutex_unlock(&apc->vport_mutex);
1323
1324 mana_gd_init_req_hdr(&req.hdr, MANA_CONFIG_VPORT_TX,
1325 sizeof(req), sizeof(resp));
1326 req.vport = apc->port_handle;
1327 req.pdid = protection_dom_id;
1328 req.doorbell_pageid = doorbell_pg_id;
1329
1330 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1331 sizeof(resp));
1332 if (err) {
1333 netdev_err(apc->ndev, "Failed to configure vPort: %d\n", err);
1334 goto out;
1335 }
1336
1337 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_TX,
1338 sizeof(resp));
1339 if (err || resp.hdr.status) {
1340 netdev_err(apc->ndev, "Failed to configure vPort: %d, 0x%x\n",
1341 err, resp.hdr.status);
1342 if (!err)
1343 err = -EPROTO;
1344
1345 goto out;
1346 }
1347
1348 apc->tx_shortform_allowed = resp.short_form_allowed;
1349 apc->tx_vp_offset = resp.tx_vport_offset;
1350
1351 netdev_info(apc->ndev, "Enabled vPort %llu PD %u DB %u MAC %pM\n",
1352 apc->port_handle, protection_dom_id, doorbell_pg_id, apc->mac_addr);
1353 out:
1354 if (err)
1355 mana_uncfg_vport(apc);
1356
1357 return err;
1358 }
1359 EXPORT_SYMBOL_NS(mana_cfg_vport, "NET_MANA");
1360
mana_cfg_vport_steering(struct mana_port_context * apc,enum TRI_STATE rx,bool update_default_rxobj,bool update_key,bool update_tab)1361 static int mana_cfg_vport_steering(struct mana_port_context *apc,
1362 enum TRI_STATE rx,
1363 bool update_default_rxobj, bool update_key,
1364 bool update_tab)
1365 {
1366 struct mana_cfg_rx_steer_req_v2 *req;
1367 struct mana_cfg_rx_steer_resp resp = {};
1368 struct net_device *ndev = apc->ndev;
1369 u32 req_buf_size;
1370 int err;
1371
1372 req_buf_size = struct_size(req, indir_tab, apc->indir_table_sz);
1373 req = kzalloc(req_buf_size, GFP_KERNEL);
1374 if (!req)
1375 return -ENOMEM;
1376
1377 mana_gd_init_req_hdr(&req->hdr, MANA_CONFIG_VPORT_RX, req_buf_size,
1378 sizeof(resp));
1379
1380 req->hdr.req.msg_version = GDMA_MESSAGE_V2;
1381 req->hdr.resp.msg_version = GDMA_MESSAGE_V2;
1382
1383 req->vport = apc->port_handle;
1384 req->num_indir_entries = apc->indir_table_sz;
1385 req->indir_tab_offset = offsetof(struct mana_cfg_rx_steer_req_v2,
1386 indir_tab);
1387 req->rx_enable = rx;
1388 req->rss_enable = apc->rss_state;
1389 req->update_default_rxobj = update_default_rxobj;
1390 req->update_hashkey = update_key;
1391 req->update_indir_tab = update_tab;
1392 req->default_rxobj = apc->default_rxobj;
1393
1394 if (rx != TRI_STATE_FALSE)
1395 req->cqe_coalescing_enable = apc->cqe_coalescing_enable;
1396
1397 if (update_key)
1398 memcpy(&req->hashkey, apc->hashkey, MANA_HASH_KEY_SIZE);
1399
1400 if (update_tab)
1401 memcpy(req->indir_tab, apc->rxobj_table,
1402 flex_array_size(req, indir_tab, req->num_indir_entries));
1403
1404 err = mana_send_request(apc->ac, req, req_buf_size, &resp,
1405 sizeof(resp));
1406 if (err) {
1407 if (mana_en_need_log(apc, err))
1408 netdev_err(ndev, "Failed to configure vPort RX: %d\n", err);
1409
1410 goto out;
1411 }
1412
1413 err = mana_verify_resp_hdr(&resp.hdr, MANA_CONFIG_VPORT_RX,
1414 sizeof(resp));
1415 if (err) {
1416 netdev_err(ndev, "vPort RX configuration failed: %d\n", err);
1417 goto out;
1418 }
1419
1420 if (resp.hdr.status) {
1421 netdev_err(ndev, "vPort RX configuration failed: 0x%x\n",
1422 resp.hdr.status);
1423 err = -EPROTO;
1424 goto out;
1425 }
1426
1427 if (resp.hdr.response.msg_version >= GDMA_MESSAGE_V2)
1428 apc->cqe_coalescing_timeout_ns =
1429 resp.cqe_coalescing_timeout_ns;
1430
1431 netdev_info(ndev, "Configured steering vPort %llu entries %u\n",
1432 apc->port_handle, apc->indir_table_sz);
1433 out:
1434 kfree(req);
1435 return err;
1436 }
1437
mana_query_link_cfg(struct mana_port_context * apc)1438 int mana_query_link_cfg(struct mana_port_context *apc)
1439 {
1440 struct net_device *ndev = apc->ndev;
1441 struct mana_query_link_config_resp resp = {};
1442 struct mana_query_link_config_req req = {};
1443 int err;
1444
1445 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_LINK_CONFIG,
1446 sizeof(req), sizeof(resp));
1447
1448 req.vport = apc->port_handle;
1449 req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
1450
1451 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1452 sizeof(resp));
1453
1454 if (err) {
1455 if (err == -EOPNOTSUPP) {
1456 netdev_info_once(ndev, "MANA_QUERY_LINK_CONFIG not supported\n");
1457 return err;
1458 }
1459 netdev_err(ndev, "Failed to query link config: %d\n", err);
1460 return err;
1461 }
1462
1463 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_LINK_CONFIG,
1464 sizeof(resp));
1465
1466 if (err || resp.hdr.status) {
1467 netdev_err(ndev, "Failed to query link config: %d, 0x%x\n", err,
1468 resp.hdr.status);
1469 if (!err)
1470 err = -EOPNOTSUPP;
1471 return err;
1472 }
1473
1474 if (resp.qos_unconfigured) {
1475 err = -EINVAL;
1476 return err;
1477 }
1478 apc->speed = resp.link_speed_mbps;
1479 apc->max_speed = resp.qos_speed_mbps;
1480 return 0;
1481 }
1482
mana_set_bw_clamp(struct mana_port_context * apc,u32 speed,int enable_clamping)1483 int mana_set_bw_clamp(struct mana_port_context *apc, u32 speed,
1484 int enable_clamping)
1485 {
1486 struct mana_set_bw_clamp_resp resp = {};
1487 struct mana_set_bw_clamp_req req = {};
1488 struct net_device *ndev = apc->ndev;
1489 int err;
1490
1491 mana_gd_init_req_hdr(&req.hdr, MANA_SET_BW_CLAMP,
1492 sizeof(req), sizeof(resp));
1493 req.vport = apc->port_handle;
1494 req.link_speed_mbps = speed;
1495 req.enable_clamping = enable_clamping;
1496
1497 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1498 sizeof(resp));
1499
1500 if (err) {
1501 if (err == -EOPNOTSUPP) {
1502 netdev_info_once(ndev, "MANA_SET_BW_CLAMP not supported\n");
1503 return err;
1504 }
1505 netdev_err(ndev, "Failed to set bandwidth clamp for speed %u, err = %d",
1506 speed, err);
1507 return err;
1508 }
1509
1510 err = mana_verify_resp_hdr(&resp.hdr, MANA_SET_BW_CLAMP,
1511 sizeof(resp));
1512
1513 if (err || resp.hdr.status) {
1514 netdev_err(ndev, "Failed to set bandwidth clamp: %d, 0x%x\n", err,
1515 resp.hdr.status);
1516 if (!err)
1517 err = -EOPNOTSUPP;
1518 return err;
1519 }
1520
1521 if (resp.qos_unconfigured)
1522 netdev_info(ndev, "QoS is unconfigured\n");
1523
1524 return 0;
1525 }
1526
mana_create_wq_obj(struct mana_port_context * apc,mana_handle_t vport,u32 wq_type,struct mana_obj_spec * wq_spec,struct mana_obj_spec * cq_spec,mana_handle_t * wq_obj)1527 int mana_create_wq_obj(struct mana_port_context *apc,
1528 mana_handle_t vport,
1529 u32 wq_type, struct mana_obj_spec *wq_spec,
1530 struct mana_obj_spec *cq_spec,
1531 mana_handle_t *wq_obj)
1532 {
1533 struct mana_create_wqobj_resp resp = {};
1534 struct mana_create_wqobj_req req = {};
1535 struct net_device *ndev = apc->ndev;
1536 int err;
1537
1538 mana_gd_init_req_hdr(&req.hdr, MANA_CREATE_WQ_OBJ,
1539 sizeof(req), sizeof(resp));
1540 req.vport = vport;
1541 req.wq_type = wq_type;
1542 req.wq_gdma_region = wq_spec->gdma_region;
1543 req.cq_gdma_region = cq_spec->gdma_region;
1544 req.wq_size = wq_spec->queue_size;
1545 req.cq_size = cq_spec->queue_size;
1546 req.cq_moderation_ctx_id = cq_spec->modr_ctx_id;
1547 req.cq_parent_qid = cq_spec->attached_eq;
1548
1549 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1550 sizeof(resp));
1551 if (err) {
1552 netdev_err(ndev, "Failed to create WQ object: %d\n", err);
1553 goto out;
1554 }
1555
1556 err = mana_verify_resp_hdr(&resp.hdr, MANA_CREATE_WQ_OBJ,
1557 sizeof(resp));
1558 if (err || resp.hdr.status) {
1559 netdev_err(ndev, "Failed to create WQ object: %d, 0x%x\n", err,
1560 resp.hdr.status);
1561 if (!err)
1562 err = -EPROTO;
1563 goto out;
1564 }
1565
1566 if (resp.wq_obj == INVALID_MANA_HANDLE) {
1567 netdev_err(ndev, "Got an invalid WQ object handle\n");
1568 err = -EPROTO;
1569 goto out;
1570 }
1571
1572 *wq_obj = resp.wq_obj;
1573 wq_spec->queue_index = resp.wq_id;
1574 cq_spec->queue_index = resp.cq_id;
1575
1576 return 0;
1577 out:
1578 return err;
1579 }
1580 EXPORT_SYMBOL_NS(mana_create_wq_obj, "NET_MANA");
1581
mana_destroy_wq_obj(struct mana_port_context * apc,u32 wq_type,mana_handle_t wq_obj)1582 void mana_destroy_wq_obj(struct mana_port_context *apc, u32 wq_type,
1583 mana_handle_t wq_obj)
1584 {
1585 struct mana_destroy_wqobj_resp resp = {};
1586 struct mana_destroy_wqobj_req req = {};
1587 struct net_device *ndev = apc->ndev;
1588 int err;
1589
1590 mana_gd_init_req_hdr(&req.hdr, MANA_DESTROY_WQ_OBJ,
1591 sizeof(req), sizeof(resp));
1592 req.wq_type = wq_type;
1593 req.wq_obj_handle = wq_obj;
1594
1595 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1596 sizeof(resp));
1597 if (err) {
1598 if (mana_en_need_log(apc, err))
1599 netdev_err(ndev, "Failed to destroy WQ object: %d\n", err);
1600
1601 return;
1602 }
1603
1604 err = mana_verify_resp_hdr(&resp.hdr, MANA_DESTROY_WQ_OBJ,
1605 sizeof(resp));
1606 if (err || resp.hdr.status)
1607 netdev_err(ndev, "Failed to destroy WQ object: %d, 0x%x\n", err,
1608 resp.hdr.status);
1609 }
1610 EXPORT_SYMBOL_NS(mana_destroy_wq_obj, "NET_MANA");
1611
mana_destroy_eq(struct mana_context * ac)1612 static void mana_destroy_eq(struct mana_context *ac)
1613 {
1614 struct gdma_context *gc = ac->gdma_dev->gdma_context;
1615 struct gdma_queue *eq;
1616 int i;
1617
1618 if (!ac->eqs)
1619 return;
1620
1621 debugfs_remove_recursive(ac->mana_eqs_debugfs);
1622 ac->mana_eqs_debugfs = NULL;
1623
1624 for (i = 0; i < gc->max_num_queues; i++) {
1625 eq = ac->eqs[i].eq;
1626 if (!eq)
1627 continue;
1628
1629 mana_gd_destroy_queue(gc, eq);
1630 }
1631
1632 kfree(ac->eqs);
1633 ac->eqs = NULL;
1634 }
1635
mana_create_eq_debugfs(struct mana_context * ac,int i)1636 static void mana_create_eq_debugfs(struct mana_context *ac, int i)
1637 {
1638 struct mana_eq eq = ac->eqs[i];
1639 char eqnum[32];
1640
1641 sprintf(eqnum, "eq%d", i);
1642 eq.mana_eq_debugfs = debugfs_create_dir(eqnum, ac->mana_eqs_debugfs);
1643 debugfs_create_u32("head", 0400, eq.mana_eq_debugfs, &eq.eq->head);
1644 debugfs_create_u32("tail", 0400, eq.mana_eq_debugfs, &eq.eq->tail);
1645 debugfs_create_file("eq_dump", 0400, eq.mana_eq_debugfs, eq.eq, &mana_dbg_q_fops);
1646 }
1647
mana_create_eq(struct mana_context * ac)1648 static int mana_create_eq(struct mana_context *ac)
1649 {
1650 struct gdma_dev *gd = ac->gdma_dev;
1651 struct gdma_context *gc = gd->gdma_context;
1652 struct gdma_queue_spec spec = {};
1653 int err;
1654 int i;
1655
1656 ac->eqs = kzalloc_objs(struct mana_eq, gc->max_num_queues);
1657 if (!ac->eqs)
1658 return -ENOMEM;
1659
1660 spec.type = GDMA_EQ;
1661 spec.monitor_avl_buf = false;
1662 spec.queue_size = EQ_SIZE;
1663 spec.eq.callback = NULL;
1664 spec.eq.context = ac->eqs;
1665 spec.eq.log2_throttle_limit = LOG2_EQ_THROTTLE;
1666
1667 ac->mana_eqs_debugfs = debugfs_create_dir("EQs", gc->mana_pci_debugfs);
1668
1669 for (i = 0; i < gc->max_num_queues; i++) {
1670 spec.eq.msix_index = (i + 1) % gc->num_msix_usable;
1671 err = mana_gd_create_mana_eq(gd, &spec, &ac->eqs[i].eq);
1672 if (err) {
1673 dev_err(gc->dev, "Failed to create EQ %d : %d\n", i, err);
1674 goto out;
1675 }
1676 mana_create_eq_debugfs(ac, i);
1677 }
1678
1679 return 0;
1680 out:
1681 mana_destroy_eq(ac);
1682 return err;
1683 }
1684
mana_fence_rq(struct mana_port_context * apc,struct mana_rxq * rxq)1685 static int mana_fence_rq(struct mana_port_context *apc, struct mana_rxq *rxq)
1686 {
1687 struct mana_fence_rq_resp resp = {};
1688 struct mana_fence_rq_req req = {};
1689 int err;
1690
1691 init_completion(&rxq->fence_event);
1692
1693 mana_gd_init_req_hdr(&req.hdr, MANA_FENCE_RQ,
1694 sizeof(req), sizeof(resp));
1695 req.wq_obj_handle = rxq->rxobj;
1696
1697 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
1698 sizeof(resp));
1699 if (err) {
1700 netdev_err(apc->ndev, "Failed to fence RQ %u: %d\n",
1701 rxq->rxq_idx, err);
1702 return err;
1703 }
1704
1705 err = mana_verify_resp_hdr(&resp.hdr, MANA_FENCE_RQ, sizeof(resp));
1706 if (err || resp.hdr.status) {
1707 netdev_err(apc->ndev, "Failed to fence RQ %u: %d, 0x%x\n",
1708 rxq->rxq_idx, err, resp.hdr.status);
1709 if (!err)
1710 err = -EPROTO;
1711
1712 return err;
1713 }
1714
1715 if (wait_for_completion_timeout(&rxq->fence_event, 10 * HZ) == 0) {
1716 netdev_err(apc->ndev, "Failed to fence RQ %u: timed out\n",
1717 rxq->rxq_idx);
1718 return -ETIMEDOUT;
1719 }
1720
1721 return 0;
1722 }
1723
mana_fence_rqs(struct mana_port_context * apc)1724 static void mana_fence_rqs(struct mana_port_context *apc)
1725 {
1726 unsigned int rxq_idx;
1727 struct mana_rxq *rxq;
1728 int err;
1729
1730 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
1731 rxq = apc->rxqs[rxq_idx];
1732 err = mana_fence_rq(apc, rxq);
1733
1734 /* In case of any error, use sleep instead. */
1735 if (err)
1736 msleep(100);
1737 }
1738 }
1739
mana_move_wq_tail(struct gdma_queue * wq,u32 num_units)1740 static int mana_move_wq_tail(struct gdma_queue *wq, u32 num_units)
1741 {
1742 u32 used_space_old;
1743 u32 used_space_new;
1744
1745 used_space_old = wq->head - wq->tail;
1746 used_space_new = wq->head - (wq->tail + num_units);
1747
1748 if (WARN_ON_ONCE(used_space_new > used_space_old))
1749 return -ERANGE;
1750
1751 wq->tail += num_units;
1752 return 0;
1753 }
1754
mana_unmap_skb(struct sk_buff * skb,struct mana_port_context * apc)1755 void mana_unmap_skb(struct sk_buff *skb, struct mana_port_context *apc)
1756 {
1757 struct mana_skb_head *ash = (struct mana_skb_head *)skb->head;
1758 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
1759 struct device *dev = gc->dev;
1760 int hsg, i;
1761
1762 /* Number of SGEs of linear part */
1763 hsg = (skb_is_gso(skb) && skb_headlen(skb) > ash->size[0]) ? 2 : 1;
1764
1765 for (i = 0; i < hsg; i++)
1766 dma_unmap_single(dev, ash->dma_handle[i], ash->size[i],
1767 DMA_TO_DEVICE);
1768
1769 for (i = hsg; i < skb_shinfo(skb)->nr_frags + hsg; i++)
1770 dma_unmap_page(dev, ash->dma_handle[i], ash->size[i],
1771 DMA_TO_DEVICE);
1772 }
1773
mana_poll_tx_cq(struct mana_cq * cq)1774 static void mana_poll_tx_cq(struct mana_cq *cq)
1775 {
1776 struct gdma_comp *completions = cq->gdma_comp_buf;
1777 struct gdma_posted_wqe_info *wqe_info;
1778 unsigned int pkt_transmitted = 0;
1779 unsigned int wqe_unit_cnt = 0;
1780 struct mana_txq *txq = cq->txq;
1781 struct mana_port_context *apc;
1782 struct netdev_queue *net_txq;
1783 struct gdma_queue *gdma_wq;
1784 unsigned int avail_space;
1785 struct net_device *ndev;
1786 struct sk_buff *skb;
1787 bool txq_stopped;
1788 int comp_read;
1789 int i;
1790
1791 ndev = txq->ndev;
1792 apc = netdev_priv(ndev);
1793
1794 /* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
1795 * doorbell can be rung in time for the hardware's requirement
1796 * of at least one doorbell ring every 8 wraparounds.
1797 */
1798 comp_read = mana_gd_poll_cq(cq->gdma_cq, completions,
1799 min((cq->gdma_cq->queue_size /
1800 COMP_ENTRY_SIZE) * 4,
1801 CQE_POLLING_BUFFER));
1802
1803 if (comp_read < 1)
1804 return;
1805
1806 for (i = 0; i < comp_read; i++) {
1807 struct mana_tx_comp_oob *cqe_oob;
1808
1809 if (WARN_ON_ONCE(!completions[i].is_sq))
1810 return;
1811
1812 cqe_oob = (struct mana_tx_comp_oob *)completions[i].cqe_data;
1813 if (WARN_ON_ONCE(cqe_oob->cqe_hdr.client_type !=
1814 MANA_CQE_COMPLETION))
1815 return;
1816
1817 switch (cqe_oob->cqe_hdr.cqe_type) {
1818 case CQE_TX_OKAY:
1819 break;
1820
1821 case CQE_TX_SA_DROP:
1822 case CQE_TX_MTU_DROP:
1823 case CQE_TX_INVALID_OOB:
1824 case CQE_TX_INVALID_ETH_TYPE:
1825 case CQE_TX_HDR_PROCESSING_ERROR:
1826 case CQE_TX_VF_DISABLED:
1827 case CQE_TX_VPORT_IDX_OUT_OF_RANGE:
1828 case CQE_TX_VPORT_DISABLED:
1829 case CQE_TX_VLAN_TAGGING_VIOLATION:
1830 if (net_ratelimit())
1831 netdev_err(ndev, "TX: CQE error %d\n",
1832 cqe_oob->cqe_hdr.cqe_type);
1833
1834 apc->eth_stats.tx_cqe_err++;
1835 break;
1836
1837 default:
1838 /* If the CQE type is unknown, log an error,
1839 * and still free the SKB, update tail, etc.
1840 */
1841 if (net_ratelimit())
1842 netdev_err(ndev, "TX: unknown CQE type %d\n",
1843 cqe_oob->cqe_hdr.cqe_type);
1844
1845 apc->eth_stats.tx_cqe_unknown_type++;
1846 break;
1847 }
1848
1849 if (WARN_ON_ONCE(txq->gdma_txq_id != completions[i].wq_num))
1850 return;
1851
1852 skb = skb_dequeue(&txq->pending_skbs);
1853 if (WARN_ON_ONCE(!skb))
1854 return;
1855
1856 wqe_info = (struct gdma_posted_wqe_info *)skb->cb;
1857 wqe_unit_cnt += wqe_info->wqe_size_in_bu;
1858
1859 mana_unmap_skb(skb, apc);
1860
1861 napi_consume_skb(skb, cq->budget);
1862
1863 pkt_transmitted++;
1864 }
1865
1866 if (WARN_ON_ONCE(wqe_unit_cnt == 0))
1867 return;
1868
1869 mana_move_wq_tail(txq->gdma_sq, wqe_unit_cnt);
1870
1871 gdma_wq = txq->gdma_sq;
1872 avail_space = mana_gd_wq_avail_space(gdma_wq);
1873
1874 /* Ensure tail updated before checking q stop */
1875 smp_mb();
1876
1877 net_txq = txq->net_txq;
1878 txq_stopped = netif_tx_queue_stopped(net_txq);
1879
1880 /* Ensure checking txq_stopped before apc->port_is_up. */
1881 smp_rmb();
1882
1883 if (txq_stopped && apc->port_is_up && avail_space >= MAX_TX_WQE_SIZE) {
1884 netif_tx_wake_queue(net_txq);
1885 apc->eth_stats.wake_queue++;
1886 }
1887
1888 if (atomic_sub_return(pkt_transmitted, &txq->pending_sends) < 0)
1889 WARN_ON_ONCE(1);
1890
1891 cq->work_done = pkt_transmitted;
1892 }
1893
mana_post_pkt_rxq(struct mana_rxq * rxq)1894 static void mana_post_pkt_rxq(struct mana_rxq *rxq)
1895 {
1896 struct mana_recv_buf_oob *recv_buf_oob;
1897 u32 curr_index;
1898 int err;
1899
1900 curr_index = rxq->buf_index++;
1901 if (rxq->buf_index == rxq->num_rx_buf)
1902 rxq->buf_index = 0;
1903
1904 recv_buf_oob = &rxq->rx_oobs[curr_index];
1905
1906 err = mana_gd_post_work_request(rxq->gdma_rq, &recv_buf_oob->wqe_req,
1907 &recv_buf_oob->wqe_inf);
1908 if (WARN_ON_ONCE(err))
1909 return;
1910
1911 WARN_ON_ONCE(recv_buf_oob->wqe_inf.wqe_size_in_bu != 1);
1912 }
1913
mana_build_skb(struct mana_rxq * rxq,void * buf_va,uint pkt_len,struct xdp_buff * xdp)1914 static struct sk_buff *mana_build_skb(struct mana_rxq *rxq, void *buf_va,
1915 uint pkt_len, struct xdp_buff *xdp)
1916 {
1917 struct sk_buff *skb = napi_build_skb(buf_va, rxq->alloc_size);
1918
1919 if (!skb)
1920 return NULL;
1921
1922 if (xdp->data_hard_start) {
1923 u32 metasize = xdp->data - xdp->data_meta;
1924
1925 skb_reserve(skb, xdp->data - xdp->data_hard_start);
1926 skb_put(skb, xdp->data_end - xdp->data);
1927 if (metasize)
1928 skb_metadata_set(skb, metasize);
1929 return skb;
1930 }
1931
1932 skb_reserve(skb, rxq->headroom);
1933 skb_put(skb, pkt_len);
1934
1935 return skb;
1936 }
1937
mana_rx_skb(void * buf_va,bool from_pool,struct mana_rxcomp_oob * cqe,struct mana_rxq * rxq,int i)1938 static void mana_rx_skb(void *buf_va, bool from_pool,
1939 struct mana_rxcomp_oob *cqe, struct mana_rxq *rxq,
1940 int i)
1941 {
1942 struct mana_stats_rx *rx_stats = &rxq->stats;
1943 struct net_device *ndev = rxq->ndev;
1944 uint pkt_len = cqe->ppi[i].pkt_len;
1945 u16 rxq_idx = rxq->rxq_idx;
1946 struct napi_struct *napi;
1947 struct xdp_buff xdp = {};
1948 struct sk_buff *skb;
1949 u32 hash_value;
1950 u32 act;
1951
1952 rxq->rx_cq.work_done++;
1953 napi = &rxq->rx_cq.napi;
1954
1955 if (!buf_va) {
1956 ++ndev->stats.rx_dropped;
1957 return;
1958 }
1959
1960 act = mana_run_xdp(ndev, rxq, &xdp, buf_va, pkt_len);
1961
1962 if (act == XDP_REDIRECT && !rxq->xdp_rc)
1963 return;
1964
1965 if (act != XDP_PASS && act != XDP_TX)
1966 goto drop_xdp;
1967
1968 skb = mana_build_skb(rxq, buf_va, pkt_len, &xdp);
1969
1970 if (!skb)
1971 goto drop;
1972
1973 if (from_pool)
1974 skb_mark_for_recycle(skb);
1975
1976 skb->dev = napi->dev;
1977
1978 skb->protocol = eth_type_trans(skb, ndev);
1979 skb_checksum_none_assert(skb);
1980 skb_record_rx_queue(skb, rxq_idx);
1981
1982 if ((ndev->features & NETIF_F_RXCSUM) && cqe->rx_iphdr_csum_succeed) {
1983 if (cqe->rx_tcp_csum_succeed || cqe->rx_udp_csum_succeed)
1984 skb->ip_summed = CHECKSUM_UNNECESSARY;
1985 }
1986
1987 if (cqe->rx_hashtype != 0 && (ndev->features & NETIF_F_RXHASH)) {
1988 hash_value = cqe->ppi[i].pkt_hash;
1989
1990 if (cqe->rx_hashtype & MANA_HASH_L4)
1991 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L4);
1992 else
1993 skb_set_hash(skb, hash_value, PKT_HASH_TYPE_L3);
1994 }
1995
1996 if (cqe->rx_vlantag_present) {
1997 u16 vlan_tci = cqe->rx_vlan_id;
1998
1999 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
2000 }
2001
2002 u64_stats_update_begin(&rx_stats->syncp);
2003 rx_stats->packets++;
2004 rx_stats->bytes += pkt_len;
2005
2006 if (act == XDP_TX)
2007 rx_stats->xdp_tx++;
2008 u64_stats_update_end(&rx_stats->syncp);
2009
2010 if (act == XDP_TX) {
2011 skb_set_queue_mapping(skb, rxq_idx);
2012 mana_xdp_tx(skb, ndev);
2013 return;
2014 }
2015
2016 napi_gro_receive(napi, skb);
2017
2018 return;
2019
2020 drop_xdp:
2021 u64_stats_update_begin(&rx_stats->syncp);
2022 rx_stats->xdp_drop++;
2023 u64_stats_update_end(&rx_stats->syncp);
2024
2025 drop:
2026 if (from_pool) {
2027 if (rxq->frag_count == 1)
2028 page_pool_recycle_direct(rxq->page_pool,
2029 virt_to_head_page(buf_va));
2030 else
2031 page_pool_free_va(rxq->page_pool, buf_va, true);
2032 } else {
2033 WARN_ON_ONCE(rxq->xdp_save_va);
2034 /* Save for reuse */
2035 rxq->xdp_save_va = buf_va;
2036 }
2037
2038 ++ndev->stats.rx_dropped;
2039
2040 return;
2041 }
2042
mana_get_rxfrag(struct mana_rxq * rxq,struct device * dev,dma_addr_t * da,bool * from_pool)2043 static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev,
2044 dma_addr_t *da, bool *from_pool)
2045 {
2046 struct page *page;
2047 u32 offset;
2048 void *va;
2049 *from_pool = false;
2050
2051 /* Don't use fragments for jumbo frames or XDP where it's 1 fragment
2052 * per page.
2053 */
2054 if (rxq->frag_count == 1) {
2055 /* Reuse XDP dropped page if available */
2056 if (rxq->xdp_save_va) {
2057 va = rxq->xdp_save_va;
2058 page = virt_to_head_page(va);
2059 rxq->xdp_save_va = NULL;
2060 } else {
2061 page = page_pool_dev_alloc_pages(rxq->page_pool);
2062 if (!page)
2063 return NULL;
2064
2065 *from_pool = true;
2066 va = page_to_virt(page);
2067 }
2068
2069 *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize,
2070 DMA_FROM_DEVICE);
2071 if (dma_mapping_error(dev, *da)) {
2072 mana_put_rx_page(rxq, page, *from_pool);
2073 return NULL;
2074 }
2075
2076 return va;
2077 }
2078
2079 page = page_pool_dev_alloc_frag(rxq->page_pool, &offset,
2080 rxq->alloc_size);
2081 if (!page)
2082 return NULL;
2083
2084 va = page_to_virt(page) + offset;
2085 *da = page_pool_get_dma_addr(page) + offset + rxq->headroom;
2086 *from_pool = true;
2087
2088 return va;
2089 }
2090
2091 /* Allocate frag for rx buffer, and save the old buf */
mana_refill_rx_oob(struct device * dev,struct mana_rxq * rxq,struct mana_recv_buf_oob * rxoob,void ** old_buf,bool * old_fp)2092 static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq,
2093 struct mana_recv_buf_oob *rxoob, void **old_buf,
2094 bool *old_fp)
2095 {
2096 bool from_pool;
2097 dma_addr_t da;
2098 void *va;
2099
2100 va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
2101 if (!va)
2102 return;
2103 if (!rxoob->from_pool || rxq->frag_count == 1)
2104 dma_unmap_single(dev, rxoob->sgl[0].address, rxq->datasize,
2105 DMA_FROM_DEVICE);
2106 *old_buf = rxoob->buf_va;
2107 *old_fp = rxoob->from_pool;
2108
2109 rxoob->buf_va = va;
2110 rxoob->sgl[0].address = da;
2111 rxoob->from_pool = from_pool;
2112 }
2113
mana_process_rx_cqe(struct mana_rxq * rxq,struct mana_cq * cq,struct gdma_comp * cqe)2114 static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq,
2115 struct gdma_comp *cqe)
2116 {
2117 struct mana_rxcomp_oob *oob = (struct mana_rxcomp_oob *)cqe->cqe_data;
2118 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
2119 struct net_device *ndev = rxq->ndev;
2120 struct mana_recv_buf_oob *rxbuf_oob;
2121 struct mana_port_context *apc;
2122 struct device *dev = gc->dev;
2123 bool coalesced = false;
2124 void *old_buf = NULL;
2125 u32 curr, pktlen;
2126 bool old_fp;
2127 int i;
2128
2129 apc = netdev_priv(ndev);
2130
2131 switch (oob->cqe_hdr.cqe_type) {
2132 case CQE_RX_OKAY:
2133 break;
2134
2135 case CQE_RX_TRUNCATED:
2136 ++ndev->stats.rx_dropped;
2137 rxbuf_oob = &rxq->rx_oobs[rxq->buf_index];
2138 netdev_warn_once(ndev, "Dropped a truncated packet\n");
2139
2140 mana_move_wq_tail(rxq->gdma_rq,
2141 rxbuf_oob->wqe_inf.wqe_size_in_bu);
2142 mana_post_pkt_rxq(rxq);
2143 return;
2144
2145 case CQE_RX_COALESCED_4:
2146 coalesced = true;
2147 break;
2148
2149 case CQE_RX_OBJECT_FENCE:
2150 complete(&rxq->fence_event);
2151 return;
2152
2153 default:
2154 netdev_err(ndev, "Unknown RX CQE type = %d\n",
2155 oob->cqe_hdr.cqe_type);
2156 apc->eth_stats.rx_cqe_unknown_type++;
2157 return;
2158 }
2159
2160 for (i = 0; i < MANA_RXCOMP_OOB_NUM_PPI; i++) {
2161 old_buf = NULL;
2162 pktlen = oob->ppi[i].pkt_len;
2163 if (pktlen == 0)
2164 break;
2165
2166 curr = rxq->buf_index;
2167 rxbuf_oob = &rxq->rx_oobs[curr];
2168 WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1);
2169
2170 mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf, &old_fp);
2171
2172 /* Unsuccessful refill will have old_buf == NULL.
2173 * In this case, mana_rx_skb() will drop the packet.
2174 */
2175 mana_rx_skb(old_buf, old_fp, oob, rxq, i);
2176
2177 mana_move_wq_tail(rxq->gdma_rq,
2178 rxbuf_oob->wqe_inf.wqe_size_in_bu);
2179
2180 mana_post_pkt_rxq(rxq);
2181
2182 if (!coalesced)
2183 break;
2184 }
2185
2186 /* Collect coalesced CQE count based on packets processed.
2187 * Coalesced CQEs have at least 2 packets, so index is i - 2.
2188 */
2189 if (i > 1) {
2190 u64_stats_update_begin(&rxq->stats.syncp);
2191 rxq->stats.coalesced_cqe[i - 2]++;
2192 u64_stats_update_end(&rxq->stats.syncp);
2193 } else if (!i && !pktlen) {
2194 u64_stats_update_begin(&rxq->stats.syncp);
2195 rxq->stats.pkt_len0_err++;
2196 u64_stats_update_end(&rxq->stats.syncp);
2197 netdev_err_once(ndev,
2198 "RX pkt len=0, rq=%u, cq=%u, rxobj=0x%llx\n",
2199 rxq->gdma_id, cq->gdma_id, rxq->rxobj);
2200 }
2201 }
2202
mana_poll_rx_cq(struct mana_cq * cq)2203 static void mana_poll_rx_cq(struct mana_cq *cq)
2204 {
2205 struct gdma_comp *comp = cq->gdma_comp_buf;
2206 struct mana_rxq *rxq = cq->rxq;
2207 int comp_read, i;
2208
2209 /* Limit CQEs polled to 4 wraparounds of the CQ to ensure the
2210 * doorbell can be rung in time for the hardware's requirement
2211 * of at least one doorbell ring every 8 wraparounds.
2212 */
2213 comp_read = mana_gd_poll_cq(cq->gdma_cq, comp,
2214 min((cq->gdma_cq->queue_size /
2215 COMP_ENTRY_SIZE) * 4,
2216 CQE_POLLING_BUFFER));
2217 WARN_ON_ONCE(comp_read > CQE_POLLING_BUFFER);
2218
2219 rxq->xdp_flush = false;
2220
2221 for (i = 0; i < comp_read; i++) {
2222 if (WARN_ON_ONCE(comp[i].is_sq))
2223 return;
2224
2225 /* verify recv cqe references the right rxq */
2226 if (WARN_ON_ONCE(comp[i].wq_num != cq->rxq->gdma_id))
2227 return;
2228
2229 mana_process_rx_cqe(rxq, cq, &comp[i]);
2230 }
2231
2232 if (comp_read > 0) {
2233 struct gdma_context *gc = rxq->gdma_rq->gdma_dev->gdma_context;
2234
2235 mana_gd_wq_ring_doorbell(gc, rxq->gdma_rq);
2236 }
2237
2238 if (rxq->xdp_flush)
2239 xdp_do_flush();
2240 }
2241
mana_cq_handler(void * context,struct gdma_queue * gdma_queue)2242 static int mana_cq_handler(void *context, struct gdma_queue *gdma_queue)
2243 {
2244 struct mana_cq *cq = context;
2245 int w;
2246
2247 WARN_ON_ONCE(cq->gdma_cq != gdma_queue);
2248
2249 if (cq->type == MANA_CQ_TYPE_RX)
2250 mana_poll_rx_cq(cq);
2251 else
2252 mana_poll_tx_cq(cq);
2253
2254 w = cq->work_done;
2255 cq->work_done_since_doorbell += w;
2256
2257 if (w < cq->budget) {
2258 mana_gd_ring_cq(gdma_queue, SET_ARM_BIT);
2259 cq->work_done_since_doorbell = 0;
2260 napi_complete_done(&cq->napi, w);
2261 } else if (cq->work_done_since_doorbell >=
2262 (cq->gdma_cq->queue_size / COMP_ENTRY_SIZE) * 4) {
2263 /* MANA hardware requires at least one doorbell ring every 8
2264 * wraparounds of CQ even if there is no need to arm the CQ.
2265 * This driver rings the doorbell as soon as it has processed
2266 * 4 wraparounds.
2267 */
2268 mana_gd_ring_cq(gdma_queue, 0);
2269 cq->work_done_since_doorbell = 0;
2270 }
2271
2272 return w;
2273 }
2274
mana_poll(struct napi_struct * napi,int budget)2275 static int mana_poll(struct napi_struct *napi, int budget)
2276 {
2277 struct mana_cq *cq = container_of(napi, struct mana_cq, napi);
2278 int w;
2279
2280 cq->work_done = 0;
2281 cq->budget = budget;
2282
2283 w = mana_cq_handler(cq, cq->gdma_cq);
2284
2285 return min(w, budget);
2286 }
2287
mana_schedule_napi(void * context,struct gdma_queue * gdma_queue)2288 static void mana_schedule_napi(void *context, struct gdma_queue *gdma_queue)
2289 {
2290 struct mana_cq *cq = context;
2291
2292 napi_schedule_irqoff(&cq->napi);
2293 }
2294
mana_deinit_cq(struct mana_port_context * apc,struct mana_cq * cq)2295 static void mana_deinit_cq(struct mana_port_context *apc, struct mana_cq *cq)
2296 {
2297 struct gdma_dev *gd = apc->ac->gdma_dev;
2298
2299 if (!cq->gdma_cq)
2300 return;
2301
2302 mana_gd_destroy_queue(gd->gdma_context, cq->gdma_cq);
2303 }
2304
mana_deinit_txq(struct mana_port_context * apc,struct mana_txq * txq)2305 static void mana_deinit_txq(struct mana_port_context *apc, struct mana_txq *txq)
2306 {
2307 struct gdma_dev *gd = apc->ac->gdma_dev;
2308
2309 if (!txq->gdma_sq)
2310 return;
2311
2312 mana_gd_destroy_queue(gd->gdma_context, txq->gdma_sq);
2313 }
2314
mana_destroy_txq(struct mana_port_context * apc)2315 static void mana_destroy_txq(struct mana_port_context *apc)
2316 {
2317 struct napi_struct *napi;
2318 int i;
2319
2320 if (!apc->tx_qp)
2321 return;
2322
2323 for (i = 0; i < apc->num_queues; i++) {
2324 debugfs_remove_recursive(apc->tx_qp[i].mana_tx_debugfs);
2325 apc->tx_qp[i].mana_tx_debugfs = NULL;
2326
2327 napi = &apc->tx_qp[i].tx_cq.napi;
2328 if (apc->tx_qp[i].txq.napi_initialized) {
2329 napi_synchronize(napi);
2330 napi_disable_locked(napi);
2331 netif_napi_del_locked(napi);
2332 apc->tx_qp[i].txq.napi_initialized = false;
2333 }
2334 mana_destroy_wq_obj(apc, GDMA_SQ, apc->tx_qp[i].tx_object);
2335
2336 mana_deinit_cq(apc, &apc->tx_qp[i].tx_cq);
2337
2338 mana_deinit_txq(apc, &apc->tx_qp[i].txq);
2339 }
2340
2341 kfree(apc->tx_qp);
2342 apc->tx_qp = NULL;
2343 }
2344
mana_create_txq_debugfs(struct mana_port_context * apc,int idx)2345 static void mana_create_txq_debugfs(struct mana_port_context *apc, int idx)
2346 {
2347 struct mana_tx_qp *tx_qp = &apc->tx_qp[idx];
2348 char qnum[32];
2349
2350 sprintf(qnum, "TX-%d", idx);
2351 tx_qp->mana_tx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2352 debugfs_create_u32("sq_head", 0400, tx_qp->mana_tx_debugfs,
2353 &tx_qp->txq.gdma_sq->head);
2354 debugfs_create_u32("sq_tail", 0400, tx_qp->mana_tx_debugfs,
2355 &tx_qp->txq.gdma_sq->tail);
2356 debugfs_create_u32("sq_pend_skb_qlen", 0400, tx_qp->mana_tx_debugfs,
2357 &tx_qp->txq.pending_skbs.qlen);
2358 debugfs_create_u32("cq_head", 0400, tx_qp->mana_tx_debugfs,
2359 &tx_qp->tx_cq.gdma_cq->head);
2360 debugfs_create_u32("cq_tail", 0400, tx_qp->mana_tx_debugfs,
2361 &tx_qp->tx_cq.gdma_cq->tail);
2362 debugfs_create_u32("cq_budget", 0400, tx_qp->mana_tx_debugfs,
2363 &tx_qp->tx_cq.budget);
2364 debugfs_create_file("txq_dump", 0400, tx_qp->mana_tx_debugfs,
2365 tx_qp->txq.gdma_sq, &mana_dbg_q_fops);
2366 debugfs_create_file("cq_dump", 0400, tx_qp->mana_tx_debugfs,
2367 tx_qp->tx_cq.gdma_cq, &mana_dbg_q_fops);
2368 }
2369
mana_create_txq(struct mana_port_context * apc,struct net_device * net)2370 static int mana_create_txq(struct mana_port_context *apc,
2371 struct net_device *net)
2372 {
2373 struct mana_context *ac = apc->ac;
2374 struct gdma_dev *gd = ac->gdma_dev;
2375 struct mana_obj_spec wq_spec;
2376 struct mana_obj_spec cq_spec;
2377 struct gdma_queue_spec spec;
2378 struct gdma_context *gc;
2379 struct mana_txq *txq;
2380 struct mana_cq *cq;
2381 u32 txq_size;
2382 u32 cq_size;
2383 int err;
2384 int i;
2385
2386 apc->tx_qp = kzalloc_objs(struct mana_tx_qp, apc->num_queues);
2387 if (!apc->tx_qp)
2388 return -ENOMEM;
2389
2390 /* The minimum size of the WQE is 32 bytes, hence
2391 * apc->tx_queue_size represents the maximum number of WQEs
2392 * the SQ can store. This value is then used to size other queues
2393 * to prevent overflow.
2394 * Also note that the txq_size is always going to be MANA_PAGE_ALIGNED,
2395 * as min val of apc->tx_queue_size is 128 and that would make
2396 * txq_size 128*32 = 4096 and the other higher values of apc->tx_queue_size
2397 * are always power of two
2398 */
2399 txq_size = apc->tx_queue_size * 32;
2400
2401 cq_size = apc->tx_queue_size * COMP_ENTRY_SIZE;
2402
2403 gc = gd->gdma_context;
2404
2405 for (i = 0; i < apc->num_queues; i++) {
2406 apc->tx_qp[i].tx_object = INVALID_MANA_HANDLE;
2407
2408 /* Create SQ */
2409 txq = &apc->tx_qp[i].txq;
2410
2411 u64_stats_init(&txq->stats.syncp);
2412 txq->ndev = net;
2413 txq->net_txq = netdev_get_tx_queue(net, i);
2414 txq->vp_offset = apc->tx_vp_offset;
2415 txq->napi_initialized = false;
2416 skb_queue_head_init(&txq->pending_skbs);
2417
2418 memset(&spec, 0, sizeof(spec));
2419 spec.type = GDMA_SQ;
2420 spec.monitor_avl_buf = true;
2421 spec.queue_size = txq_size;
2422 err = mana_gd_create_mana_wq_cq(gd, &spec, &txq->gdma_sq);
2423 if (err)
2424 goto out;
2425
2426 /* Create SQ's CQ */
2427 cq = &apc->tx_qp[i].tx_cq;
2428 cq->type = MANA_CQ_TYPE_TX;
2429
2430 cq->txq = txq;
2431
2432 memset(&spec, 0, sizeof(spec));
2433 spec.type = GDMA_CQ;
2434 spec.monitor_avl_buf = false;
2435 spec.queue_size = cq_size;
2436 spec.cq.callback = mana_schedule_napi;
2437 spec.cq.parent_eq = ac->eqs[i].eq;
2438 spec.cq.context = cq;
2439 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2440 if (err)
2441 goto out;
2442
2443 memset(&wq_spec, 0, sizeof(wq_spec));
2444 memset(&cq_spec, 0, sizeof(cq_spec));
2445
2446 wq_spec.gdma_region = txq->gdma_sq->mem_info.dma_region_handle;
2447 wq_spec.queue_size = txq->gdma_sq->queue_size;
2448
2449 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2450 cq_spec.queue_size = cq->gdma_cq->queue_size;
2451 cq_spec.modr_ctx_id = 0;
2452 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2453
2454 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_SQ,
2455 &wq_spec, &cq_spec,
2456 &apc->tx_qp[i].tx_object);
2457
2458 if (err)
2459 goto out;
2460
2461 txq->gdma_sq->id = wq_spec.queue_index;
2462 cq->gdma_cq->id = cq_spec.queue_index;
2463
2464 txq->gdma_sq->mem_info.dma_region_handle =
2465 GDMA_INVALID_DMA_REGION;
2466 cq->gdma_cq->mem_info.dma_region_handle =
2467 GDMA_INVALID_DMA_REGION;
2468
2469 txq->gdma_txq_id = txq->gdma_sq->id;
2470
2471 cq->gdma_id = cq->gdma_cq->id;
2472
2473 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2474 err = -EINVAL;
2475 goto out;
2476 }
2477
2478 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2479
2480 mana_create_txq_debugfs(apc, i);
2481
2482 set_bit(NAPI_STATE_NO_BUSY_POLL, &cq->napi.state);
2483 netif_napi_add_locked(net, &cq->napi, mana_poll);
2484 napi_enable_locked(&cq->napi);
2485 txq->napi_initialized = true;
2486
2487 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2488 }
2489
2490 return 0;
2491 out:
2492 netdev_err(net, "Failed to create %d TX queues, %d\n",
2493 apc->num_queues, err);
2494 mana_destroy_txq(apc);
2495 return err;
2496 }
2497
mana_destroy_rxq(struct mana_port_context * apc,struct mana_rxq * rxq,bool napi_initialized)2498 static void mana_destroy_rxq(struct mana_port_context *apc,
2499 struct mana_rxq *rxq, bool napi_initialized)
2500
2501 {
2502 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2503 struct mana_recv_buf_oob *rx_oob;
2504 struct device *dev = gc->dev;
2505 struct napi_struct *napi;
2506 struct page *page;
2507 int i;
2508
2509 if (!rxq)
2510 return;
2511
2512 debugfs_remove_recursive(rxq->mana_rx_debugfs);
2513 rxq->mana_rx_debugfs = NULL;
2514
2515 napi = &rxq->rx_cq.napi;
2516
2517 if (napi_initialized) {
2518 napi_synchronize(napi);
2519
2520 napi_disable_locked(napi);
2521 netif_napi_del_locked(napi);
2522 }
2523 xdp_rxq_info_unreg(&rxq->xdp_rxq);
2524
2525 mana_destroy_wq_obj(apc, GDMA_RQ, rxq->rxobj);
2526
2527 mana_deinit_cq(apc, &rxq->rx_cq);
2528
2529 if (rxq->xdp_save_va)
2530 put_page(virt_to_head_page(rxq->xdp_save_va));
2531
2532 for (i = 0; i < rxq->num_rx_buf; i++) {
2533 rx_oob = &rxq->rx_oobs[i];
2534
2535 if (!rx_oob->buf_va)
2536 continue;
2537
2538 page = virt_to_head_page(rx_oob->buf_va);
2539
2540 if (rxq->frag_count == 1 || !rx_oob->from_pool) {
2541 dma_unmap_single(dev, rx_oob->sgl[0].address,
2542 rx_oob->sgl[0].size, DMA_FROM_DEVICE);
2543 mana_put_rx_page(rxq, page, rx_oob->from_pool);
2544 } else {
2545 page_pool_free_va(rxq->page_pool, rx_oob->buf_va, true);
2546 }
2547
2548 rx_oob->buf_va = NULL;
2549 }
2550
2551 page_pool_destroy(rxq->page_pool);
2552
2553 if (rxq->gdma_rq)
2554 mana_gd_destroy_queue(gc, rxq->gdma_rq);
2555
2556 kfree(rxq);
2557 }
2558
mana_fill_rx_oob(struct mana_recv_buf_oob * rx_oob,u32 mem_key,struct mana_rxq * rxq,struct device * dev)2559 static int mana_fill_rx_oob(struct mana_recv_buf_oob *rx_oob, u32 mem_key,
2560 struct mana_rxq *rxq, struct device *dev)
2561 {
2562 struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2563 bool from_pool = false;
2564 dma_addr_t da;
2565 void *va;
2566
2567 if (mpc->rxbufs_pre)
2568 va = mana_get_rxbuf_pre(rxq, &da);
2569 else
2570 va = mana_get_rxfrag(rxq, dev, &da, &from_pool);
2571
2572 if (!va)
2573 return -ENOMEM;
2574
2575 rx_oob->buf_va = va;
2576 rx_oob->from_pool = from_pool;
2577
2578 rx_oob->sgl[0].address = da;
2579 rx_oob->sgl[0].size = rxq->datasize;
2580 rx_oob->sgl[0].mem_key = mem_key;
2581
2582 return 0;
2583 }
2584
2585 #define MANA_WQE_HEADER_SIZE 16
2586 #define MANA_WQE_SGE_SIZE 16
2587
mana_alloc_rx_wqe(struct mana_port_context * apc,struct mana_rxq * rxq,u32 * rxq_size,u32 * cq_size)2588 static int mana_alloc_rx_wqe(struct mana_port_context *apc,
2589 struct mana_rxq *rxq, u32 *rxq_size, u32 *cq_size)
2590 {
2591 struct gdma_context *gc = apc->ac->gdma_dev->gdma_context;
2592 struct mana_recv_buf_oob *rx_oob;
2593 struct device *dev = gc->dev;
2594 u32 buf_idx;
2595 int ret;
2596
2597 WARN_ON(rxq->datasize == 0);
2598
2599 *rxq_size = 0;
2600 *cq_size = 0;
2601
2602 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2603 rx_oob = &rxq->rx_oobs[buf_idx];
2604 memset(rx_oob, 0, sizeof(*rx_oob));
2605
2606 rx_oob->num_sge = 1;
2607
2608 ret = mana_fill_rx_oob(rx_oob, apc->ac->gdma_dev->gpa_mkey, rxq,
2609 dev);
2610 if (ret)
2611 return ret;
2612
2613 rx_oob->wqe_req.sgl = rx_oob->sgl;
2614 rx_oob->wqe_req.num_sge = rx_oob->num_sge;
2615 rx_oob->wqe_req.inline_oob_size = 0;
2616 rx_oob->wqe_req.inline_oob_data = NULL;
2617 rx_oob->wqe_req.flags = 0;
2618 rx_oob->wqe_req.client_data_unit = 0;
2619
2620 *rxq_size += ALIGN(MANA_WQE_HEADER_SIZE +
2621 MANA_WQE_SGE_SIZE * rx_oob->num_sge, 32);
2622 *cq_size += COMP_ENTRY_SIZE;
2623 }
2624
2625 return 0;
2626 }
2627
mana_push_wqe(struct mana_rxq * rxq)2628 static int mana_push_wqe(struct mana_rxq *rxq)
2629 {
2630 struct mana_recv_buf_oob *rx_oob;
2631 u32 buf_idx;
2632 int err;
2633
2634 for (buf_idx = 0; buf_idx < rxq->num_rx_buf; buf_idx++) {
2635 rx_oob = &rxq->rx_oobs[buf_idx];
2636
2637 err = mana_gd_post_and_ring(rxq->gdma_rq, &rx_oob->wqe_req,
2638 &rx_oob->wqe_inf);
2639 if (err)
2640 return -ENOSPC;
2641 }
2642
2643 return 0;
2644 }
2645
mana_create_page_pool(struct mana_rxq * rxq,struct gdma_context * gc)2646 static int mana_create_page_pool(struct mana_rxq *rxq, struct gdma_context *gc)
2647 {
2648 struct mana_port_context *mpc = netdev_priv(rxq->ndev);
2649 struct page_pool_params pprm = {};
2650 int ret;
2651
2652 pprm.pool_size = mpc->rx_queue_size / rxq->frag_count + 1;
2653 pprm.nid = gc->numa_node;
2654 pprm.napi = &rxq->rx_cq.napi;
2655 pprm.netdev = rxq->ndev;
2656 pprm.order = get_order(rxq->alloc_size);
2657 pprm.queue_idx = rxq->rxq_idx;
2658 pprm.dev = gc->dev;
2659
2660 /* Let the page pool do the dma map when page sharing with multiple
2661 * fragments enabled for rx buffers.
2662 */
2663 if (rxq->frag_count > 1) {
2664 pprm.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2665 pprm.max_len = PAGE_SIZE;
2666 pprm.dma_dir = DMA_FROM_DEVICE;
2667 }
2668
2669 rxq->page_pool = page_pool_create(&pprm);
2670
2671 if (IS_ERR(rxq->page_pool)) {
2672 ret = PTR_ERR(rxq->page_pool);
2673 rxq->page_pool = NULL;
2674 return ret;
2675 }
2676
2677 return 0;
2678 }
2679
mana_create_rxq(struct mana_port_context * apc,u32 rxq_idx,struct mana_eq * eq,struct net_device * ndev)2680 static struct mana_rxq *mana_create_rxq(struct mana_port_context *apc,
2681 u32 rxq_idx, struct mana_eq *eq,
2682 struct net_device *ndev)
2683 {
2684 struct gdma_dev *gd = apc->ac->gdma_dev;
2685 struct mana_obj_spec wq_spec;
2686 struct mana_obj_spec cq_spec;
2687 struct gdma_queue_spec spec;
2688 struct mana_cq *cq = NULL;
2689 struct gdma_context *gc;
2690 u32 cq_size, rq_size;
2691 struct mana_rxq *rxq;
2692 int err;
2693
2694 gc = gd->gdma_context;
2695
2696 rxq = kzalloc_flex(*rxq, rx_oobs, apc->rx_queue_size);
2697 if (!rxq)
2698 return NULL;
2699
2700 rxq->ndev = ndev;
2701 rxq->num_rx_buf = apc->rx_queue_size;
2702 rxq->rxq_idx = rxq_idx;
2703 rxq->rxobj = INVALID_MANA_HANDLE;
2704
2705 mana_get_rxbuf_cfg(apc, ndev->mtu, &rxq->datasize, &rxq->alloc_size,
2706 &rxq->headroom, &rxq->frag_count);
2707 /* Create page pool for RX queue */
2708 err = mana_create_page_pool(rxq, gc);
2709 if (err) {
2710 netdev_err(ndev, "Create page pool err:%d\n", err);
2711 goto out;
2712 }
2713
2714 err = mana_alloc_rx_wqe(apc, rxq, &rq_size, &cq_size);
2715 if (err)
2716 goto out;
2717
2718 rq_size = MANA_PAGE_ALIGN(rq_size);
2719 cq_size = MANA_PAGE_ALIGN(cq_size);
2720
2721 /* Create RQ */
2722 memset(&spec, 0, sizeof(spec));
2723 spec.type = GDMA_RQ;
2724 spec.monitor_avl_buf = true;
2725 spec.queue_size = rq_size;
2726 err = mana_gd_create_mana_wq_cq(gd, &spec, &rxq->gdma_rq);
2727 if (err)
2728 goto out;
2729
2730 /* Create RQ's CQ */
2731 cq = &rxq->rx_cq;
2732 cq->type = MANA_CQ_TYPE_RX;
2733 cq->rxq = rxq;
2734
2735 memset(&spec, 0, sizeof(spec));
2736 spec.type = GDMA_CQ;
2737 spec.monitor_avl_buf = false;
2738 spec.queue_size = cq_size;
2739 spec.cq.callback = mana_schedule_napi;
2740 spec.cq.parent_eq = eq->eq;
2741 spec.cq.context = cq;
2742 err = mana_gd_create_mana_wq_cq(gd, &spec, &cq->gdma_cq);
2743 if (err)
2744 goto out;
2745
2746 memset(&wq_spec, 0, sizeof(wq_spec));
2747 memset(&cq_spec, 0, sizeof(cq_spec));
2748 wq_spec.gdma_region = rxq->gdma_rq->mem_info.dma_region_handle;
2749 wq_spec.queue_size = rxq->gdma_rq->queue_size;
2750
2751 cq_spec.gdma_region = cq->gdma_cq->mem_info.dma_region_handle;
2752 cq_spec.queue_size = cq->gdma_cq->queue_size;
2753 cq_spec.modr_ctx_id = 0;
2754 cq_spec.attached_eq = cq->gdma_cq->cq.parent->id;
2755
2756 err = mana_create_wq_obj(apc, apc->port_handle, GDMA_RQ,
2757 &wq_spec, &cq_spec, &rxq->rxobj);
2758 if (err)
2759 goto out;
2760
2761 rxq->gdma_rq->id = wq_spec.queue_index;
2762 cq->gdma_cq->id = cq_spec.queue_index;
2763
2764 rxq->gdma_rq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2765 cq->gdma_cq->mem_info.dma_region_handle = GDMA_INVALID_DMA_REGION;
2766
2767 rxq->gdma_id = rxq->gdma_rq->id;
2768 cq->gdma_id = cq->gdma_cq->id;
2769
2770 err = mana_push_wqe(rxq);
2771 if (err)
2772 goto out;
2773
2774 if (WARN_ON(cq->gdma_id >= gc->max_num_cqs)) {
2775 err = -EINVAL;
2776 goto out;
2777 }
2778
2779 gc->cq_table[cq->gdma_id] = cq->gdma_cq;
2780
2781 netif_napi_add_weight_locked(ndev, &cq->napi, mana_poll, 1);
2782
2783 WARN_ON(xdp_rxq_info_reg(&rxq->xdp_rxq, ndev, rxq_idx,
2784 cq->napi.napi_id));
2785 WARN_ON(xdp_rxq_info_reg_mem_model(&rxq->xdp_rxq, MEM_TYPE_PAGE_POOL,
2786 rxq->page_pool));
2787
2788 napi_enable_locked(&cq->napi);
2789
2790 mana_gd_ring_cq(cq->gdma_cq, SET_ARM_BIT);
2791 out:
2792 if (!err)
2793 return rxq;
2794
2795 netdev_err(ndev, "Failed to create RXQ: err = %d\n", err);
2796
2797 mana_destroy_rxq(apc, rxq, false);
2798
2799 if (cq)
2800 mana_deinit_cq(apc, cq);
2801
2802 return NULL;
2803 }
2804
mana_create_rxq_debugfs(struct mana_port_context * apc,int idx)2805 static void mana_create_rxq_debugfs(struct mana_port_context *apc, int idx)
2806 {
2807 struct mana_rxq *rxq;
2808 char qnum[32];
2809
2810 rxq = apc->rxqs[idx];
2811
2812 sprintf(qnum, "RX-%d", idx);
2813 rxq->mana_rx_debugfs = debugfs_create_dir(qnum, apc->mana_port_debugfs);
2814 debugfs_create_u32("rq_head", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->head);
2815 debugfs_create_u32("rq_tail", 0400, rxq->mana_rx_debugfs, &rxq->gdma_rq->tail);
2816 debugfs_create_u32("rq_nbuf", 0400, rxq->mana_rx_debugfs, &rxq->num_rx_buf);
2817 debugfs_create_u32("cq_head", 0400, rxq->mana_rx_debugfs,
2818 &rxq->rx_cq.gdma_cq->head);
2819 debugfs_create_u32("cq_tail", 0400, rxq->mana_rx_debugfs,
2820 &rxq->rx_cq.gdma_cq->tail);
2821 debugfs_create_u32("cq_budget", 0400, rxq->mana_rx_debugfs, &rxq->rx_cq.budget);
2822 debugfs_create_file("rxq_dump", 0400, rxq->mana_rx_debugfs, rxq->gdma_rq, &mana_dbg_q_fops);
2823 debugfs_create_file("cq_dump", 0400, rxq->mana_rx_debugfs, rxq->rx_cq.gdma_cq,
2824 &mana_dbg_q_fops);
2825 }
2826
mana_add_rx_queues(struct mana_port_context * apc,struct net_device * ndev)2827 static int mana_add_rx_queues(struct mana_port_context *apc,
2828 struct net_device *ndev)
2829 {
2830 struct mana_context *ac = apc->ac;
2831 struct mana_rxq *rxq;
2832 int err = 0;
2833 int i;
2834
2835 for (i = 0; i < apc->num_queues; i++) {
2836 rxq = mana_create_rxq(apc, i, &ac->eqs[i], ndev);
2837 if (!rxq) {
2838 err = -ENOMEM;
2839 netdev_err(ndev, "Failed to create rxq %d : %d\n", i, err);
2840 goto out;
2841 }
2842
2843 u64_stats_init(&rxq->stats.syncp);
2844
2845 apc->rxqs[i] = rxq;
2846
2847 mana_create_rxq_debugfs(apc, i);
2848 }
2849
2850 apc->default_rxobj = apc->rxqs[0]->rxobj;
2851 out:
2852 return err;
2853 }
2854
mana_destroy_vport(struct mana_port_context * apc)2855 static void mana_destroy_vport(struct mana_port_context *apc)
2856 {
2857 struct gdma_dev *gd = apc->ac->gdma_dev;
2858 struct mana_rxq *rxq;
2859 u32 rxq_idx;
2860
2861 for (rxq_idx = 0; rxq_idx < apc->num_queues; rxq_idx++) {
2862 rxq = apc->rxqs[rxq_idx];
2863 if (!rxq)
2864 continue;
2865
2866 mana_destroy_rxq(apc, rxq, true);
2867 apc->rxqs[rxq_idx] = NULL;
2868 }
2869
2870 mana_destroy_txq(apc);
2871 mana_uncfg_vport(apc);
2872
2873 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
2874 mana_pf_deregister_hw_vport(apc);
2875 }
2876
mana_create_vport(struct mana_port_context * apc,struct net_device * net)2877 static int mana_create_vport(struct mana_port_context *apc,
2878 struct net_device *net)
2879 {
2880 struct gdma_dev *gd = apc->ac->gdma_dev;
2881 int err;
2882
2883 apc->default_rxobj = INVALID_MANA_HANDLE;
2884
2885 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
2886 err = mana_pf_register_hw_vport(apc);
2887 if (err)
2888 return err;
2889 }
2890
2891 err = mana_cfg_vport(apc, gd->pdid, gd->doorbell);
2892 if (err)
2893 return err;
2894
2895 return mana_create_txq(apc, net);
2896 }
2897
mana_rss_table_alloc(struct mana_port_context * apc)2898 static int mana_rss_table_alloc(struct mana_port_context *apc)
2899 {
2900 if (!apc->indir_table_sz) {
2901 netdev_err(apc->ndev,
2902 "Indirection table size not set for vPort %d\n",
2903 apc->port_idx);
2904 return -EINVAL;
2905 }
2906
2907 apc->indir_table = kcalloc(apc->indir_table_sz, sizeof(u32), GFP_KERNEL);
2908 if (!apc->indir_table)
2909 return -ENOMEM;
2910
2911 apc->rxobj_table = kzalloc_objs(mana_handle_t, apc->indir_table_sz);
2912 if (!apc->rxobj_table) {
2913 kfree(apc->indir_table);
2914 return -ENOMEM;
2915 }
2916
2917 return 0;
2918 }
2919
mana_rss_table_init(struct mana_port_context * apc)2920 static void mana_rss_table_init(struct mana_port_context *apc)
2921 {
2922 int i;
2923
2924 for (i = 0; i < apc->indir_table_sz; i++)
2925 apc->indir_table[i] =
2926 ethtool_rxfh_indir_default(i, apc->num_queues);
2927 }
2928
mana_config_rss(struct mana_port_context * apc,enum TRI_STATE rx,bool update_hash,bool update_tab)2929 int mana_config_rss(struct mana_port_context *apc, enum TRI_STATE rx,
2930 bool update_hash, bool update_tab)
2931 {
2932 u32 queue_idx;
2933 int err;
2934 int i;
2935
2936 if (update_tab) {
2937 for (i = 0; i < apc->indir_table_sz; i++) {
2938 queue_idx = apc->indir_table[i];
2939 apc->rxobj_table[i] = apc->rxqs[queue_idx]->rxobj;
2940 }
2941 }
2942
2943 err = mana_cfg_vport_steering(apc, rx, true, update_hash, update_tab);
2944 if (err)
2945 return err;
2946
2947 mana_fence_rqs(apc);
2948
2949 return 0;
2950 }
2951
mana_query_gf_stats(struct mana_context * ac)2952 int mana_query_gf_stats(struct mana_context *ac)
2953 {
2954 struct gdma_context *gc = ac->gdma_dev->gdma_context;
2955 struct mana_query_gf_stat_resp resp = {};
2956 struct mana_query_gf_stat_req req = {};
2957 struct device *dev = gc->dev;
2958 int err;
2959
2960 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_GF_STAT,
2961 sizeof(req), sizeof(resp));
2962 req.hdr.resp.msg_version = GDMA_MESSAGE_V2;
2963 req.req_stats = STATISTICS_FLAGS_RX_DISCARDS_NO_WQE |
2964 STATISTICS_FLAGS_RX_ERRORS_VPORT_DISABLED |
2965 STATISTICS_FLAGS_HC_RX_BYTES |
2966 STATISTICS_FLAGS_HC_RX_UCAST_PACKETS |
2967 STATISTICS_FLAGS_HC_RX_UCAST_BYTES |
2968 STATISTICS_FLAGS_HC_RX_MCAST_PACKETS |
2969 STATISTICS_FLAGS_HC_RX_MCAST_BYTES |
2970 STATISTICS_FLAGS_HC_RX_BCAST_PACKETS |
2971 STATISTICS_FLAGS_HC_RX_BCAST_BYTES |
2972 STATISTICS_FLAGS_TX_ERRORS_GF_DISABLED |
2973 STATISTICS_FLAGS_TX_ERRORS_VPORT_DISABLED |
2974 STATISTICS_FLAGS_TX_ERRORS_INVAL_VPORT_OFFSET_PACKETS |
2975 STATISTICS_FLAGS_TX_ERRORS_VLAN_ENFORCEMENT |
2976 STATISTICS_FLAGS_TX_ERRORS_ETH_TYPE_ENFORCEMENT |
2977 STATISTICS_FLAGS_TX_ERRORS_SA_ENFORCEMENT |
2978 STATISTICS_FLAGS_TX_ERRORS_SQPDID_ENFORCEMENT |
2979 STATISTICS_FLAGS_TX_ERRORS_CQPDID_ENFORCEMENT |
2980 STATISTICS_FLAGS_TX_ERRORS_MTU_VIOLATION |
2981 STATISTICS_FLAGS_TX_ERRORS_INVALID_OOB |
2982 STATISTICS_FLAGS_HC_TX_BYTES |
2983 STATISTICS_FLAGS_HC_TX_UCAST_PACKETS |
2984 STATISTICS_FLAGS_HC_TX_UCAST_BYTES |
2985 STATISTICS_FLAGS_HC_TX_MCAST_PACKETS |
2986 STATISTICS_FLAGS_HC_TX_MCAST_BYTES |
2987 STATISTICS_FLAGS_HC_TX_BCAST_PACKETS |
2988 STATISTICS_FLAGS_HC_TX_BCAST_BYTES |
2989 STATISTICS_FLAGS_TX_ERRORS_GDMA_ERROR;
2990
2991 err = mana_send_request(ac, &req, sizeof(req), &resp,
2992 sizeof(resp));
2993 if (err) {
2994 dev_err(dev, "Failed to query GF stats: %d\n", err);
2995 return err;
2996 }
2997 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_GF_STAT,
2998 sizeof(resp));
2999 if (err || resp.hdr.status) {
3000 dev_err(dev, "Failed to query GF stats: %d, 0x%x\n", err,
3001 resp.hdr.status);
3002 return err;
3003 }
3004
3005 ac->hc_stats.hc_rx_discards_no_wqe = resp.rx_discards_nowqe;
3006 ac->hc_stats.hc_rx_err_vport_disabled = resp.rx_err_vport_disabled;
3007 ac->hc_stats.hc_rx_bytes = resp.hc_rx_bytes;
3008 ac->hc_stats.hc_rx_ucast_pkts = resp.hc_rx_ucast_pkts;
3009 ac->hc_stats.hc_rx_ucast_bytes = resp.hc_rx_ucast_bytes;
3010 ac->hc_stats.hc_rx_bcast_pkts = resp.hc_rx_bcast_pkts;
3011 ac->hc_stats.hc_rx_bcast_bytes = resp.hc_rx_bcast_bytes;
3012 ac->hc_stats.hc_rx_mcast_pkts = resp.hc_rx_mcast_pkts;
3013 ac->hc_stats.hc_rx_mcast_bytes = resp.hc_rx_mcast_bytes;
3014 ac->hc_stats.hc_tx_err_gf_disabled = resp.tx_err_gf_disabled;
3015 ac->hc_stats.hc_tx_err_vport_disabled = resp.tx_err_vport_disabled;
3016 ac->hc_stats.hc_tx_err_inval_vportoffset_pkt =
3017 resp.tx_err_inval_vport_offset_pkt;
3018 ac->hc_stats.hc_tx_err_vlan_enforcement =
3019 resp.tx_err_vlan_enforcement;
3020 ac->hc_stats.hc_tx_err_eth_type_enforcement =
3021 resp.tx_err_ethtype_enforcement;
3022 ac->hc_stats.hc_tx_err_sa_enforcement = resp.tx_err_SA_enforcement;
3023 ac->hc_stats.hc_tx_err_sqpdid_enforcement =
3024 resp.tx_err_SQPDID_enforcement;
3025 ac->hc_stats.hc_tx_err_cqpdid_enforcement =
3026 resp.tx_err_CQPDID_enforcement;
3027 ac->hc_stats.hc_tx_err_mtu_violation = resp.tx_err_mtu_violation;
3028 ac->hc_stats.hc_tx_err_inval_oob = resp.tx_err_inval_oob;
3029 ac->hc_stats.hc_tx_bytes = resp.hc_tx_bytes;
3030 ac->hc_stats.hc_tx_ucast_pkts = resp.hc_tx_ucast_pkts;
3031 ac->hc_stats.hc_tx_ucast_bytes = resp.hc_tx_ucast_bytes;
3032 ac->hc_stats.hc_tx_bcast_pkts = resp.hc_tx_bcast_pkts;
3033 ac->hc_stats.hc_tx_bcast_bytes = resp.hc_tx_bcast_bytes;
3034 ac->hc_stats.hc_tx_mcast_pkts = resp.hc_tx_mcast_pkts;
3035 ac->hc_stats.hc_tx_mcast_bytes = resp.hc_tx_mcast_bytes;
3036 ac->hc_stats.hc_tx_err_gdma = resp.tx_err_gdma;
3037
3038 return 0;
3039 }
3040
mana_query_phy_stats(struct mana_port_context * apc)3041 void mana_query_phy_stats(struct mana_port_context *apc)
3042 {
3043 struct mana_query_phy_stat_resp resp = {};
3044 struct mana_query_phy_stat_req req = {};
3045 struct net_device *ndev = apc->ndev;
3046 int err;
3047
3048 mana_gd_init_req_hdr(&req.hdr, MANA_QUERY_PHY_STAT,
3049 sizeof(req), sizeof(resp));
3050 err = mana_send_request(apc->ac, &req, sizeof(req), &resp,
3051 sizeof(resp));
3052 if (err)
3053 return;
3054
3055 err = mana_verify_resp_hdr(&resp.hdr, MANA_QUERY_PHY_STAT,
3056 sizeof(resp));
3057 if (err || resp.hdr.status) {
3058 netdev_err(ndev,
3059 "Failed to query PHY stats: %d, resp:0x%x\n",
3060 err, resp.hdr.status);
3061 return;
3062 }
3063
3064 /* Aggregate drop counters */
3065 apc->phy_stats.rx_pkt_drop_phy = resp.rx_pkt_drop_phy;
3066 apc->phy_stats.tx_pkt_drop_phy = resp.tx_pkt_drop_phy;
3067
3068 /* Per TC traffic Counters */
3069 apc->phy_stats.rx_pkt_tc0_phy = resp.rx_pkt_tc0_phy;
3070 apc->phy_stats.tx_pkt_tc0_phy = resp.tx_pkt_tc0_phy;
3071 apc->phy_stats.rx_pkt_tc1_phy = resp.rx_pkt_tc1_phy;
3072 apc->phy_stats.tx_pkt_tc1_phy = resp.tx_pkt_tc1_phy;
3073 apc->phy_stats.rx_pkt_tc2_phy = resp.rx_pkt_tc2_phy;
3074 apc->phy_stats.tx_pkt_tc2_phy = resp.tx_pkt_tc2_phy;
3075 apc->phy_stats.rx_pkt_tc3_phy = resp.rx_pkt_tc3_phy;
3076 apc->phy_stats.tx_pkt_tc3_phy = resp.tx_pkt_tc3_phy;
3077 apc->phy_stats.rx_pkt_tc4_phy = resp.rx_pkt_tc4_phy;
3078 apc->phy_stats.tx_pkt_tc4_phy = resp.tx_pkt_tc4_phy;
3079 apc->phy_stats.rx_pkt_tc5_phy = resp.rx_pkt_tc5_phy;
3080 apc->phy_stats.tx_pkt_tc5_phy = resp.tx_pkt_tc5_phy;
3081 apc->phy_stats.rx_pkt_tc6_phy = resp.rx_pkt_tc6_phy;
3082 apc->phy_stats.tx_pkt_tc6_phy = resp.tx_pkt_tc6_phy;
3083 apc->phy_stats.rx_pkt_tc7_phy = resp.rx_pkt_tc7_phy;
3084 apc->phy_stats.tx_pkt_tc7_phy = resp.tx_pkt_tc7_phy;
3085
3086 /* Per TC byte Counters */
3087 apc->phy_stats.rx_byte_tc0_phy = resp.rx_byte_tc0_phy;
3088 apc->phy_stats.tx_byte_tc0_phy = resp.tx_byte_tc0_phy;
3089 apc->phy_stats.rx_byte_tc1_phy = resp.rx_byte_tc1_phy;
3090 apc->phy_stats.tx_byte_tc1_phy = resp.tx_byte_tc1_phy;
3091 apc->phy_stats.rx_byte_tc2_phy = resp.rx_byte_tc2_phy;
3092 apc->phy_stats.tx_byte_tc2_phy = resp.tx_byte_tc2_phy;
3093 apc->phy_stats.rx_byte_tc3_phy = resp.rx_byte_tc3_phy;
3094 apc->phy_stats.tx_byte_tc3_phy = resp.tx_byte_tc3_phy;
3095 apc->phy_stats.rx_byte_tc4_phy = resp.rx_byte_tc4_phy;
3096 apc->phy_stats.tx_byte_tc4_phy = resp.tx_byte_tc4_phy;
3097 apc->phy_stats.rx_byte_tc5_phy = resp.rx_byte_tc5_phy;
3098 apc->phy_stats.tx_byte_tc5_phy = resp.tx_byte_tc5_phy;
3099 apc->phy_stats.rx_byte_tc6_phy = resp.rx_byte_tc6_phy;
3100 apc->phy_stats.tx_byte_tc6_phy = resp.tx_byte_tc6_phy;
3101 apc->phy_stats.rx_byte_tc7_phy = resp.rx_byte_tc7_phy;
3102 apc->phy_stats.tx_byte_tc7_phy = resp.tx_byte_tc7_phy;
3103
3104 /* Per TC pause Counters */
3105 apc->phy_stats.rx_pause_tc0_phy = resp.rx_pause_tc0_phy;
3106 apc->phy_stats.tx_pause_tc0_phy = resp.tx_pause_tc0_phy;
3107 apc->phy_stats.rx_pause_tc1_phy = resp.rx_pause_tc1_phy;
3108 apc->phy_stats.tx_pause_tc1_phy = resp.tx_pause_tc1_phy;
3109 apc->phy_stats.rx_pause_tc2_phy = resp.rx_pause_tc2_phy;
3110 apc->phy_stats.tx_pause_tc2_phy = resp.tx_pause_tc2_phy;
3111 apc->phy_stats.rx_pause_tc3_phy = resp.rx_pause_tc3_phy;
3112 apc->phy_stats.tx_pause_tc3_phy = resp.tx_pause_tc3_phy;
3113 apc->phy_stats.rx_pause_tc4_phy = resp.rx_pause_tc4_phy;
3114 apc->phy_stats.tx_pause_tc4_phy = resp.tx_pause_tc4_phy;
3115 apc->phy_stats.rx_pause_tc5_phy = resp.rx_pause_tc5_phy;
3116 apc->phy_stats.tx_pause_tc5_phy = resp.tx_pause_tc5_phy;
3117 apc->phy_stats.rx_pause_tc6_phy = resp.rx_pause_tc6_phy;
3118 apc->phy_stats.tx_pause_tc6_phy = resp.tx_pause_tc6_phy;
3119 apc->phy_stats.rx_pause_tc7_phy = resp.rx_pause_tc7_phy;
3120 apc->phy_stats.tx_pause_tc7_phy = resp.tx_pause_tc7_phy;
3121 }
3122
mana_init_port(struct net_device * ndev)3123 static int mana_init_port(struct net_device *ndev)
3124 {
3125 struct mana_port_context *apc = netdev_priv(ndev);
3126 struct gdma_dev *gd = apc->ac->gdma_dev;
3127 u32 max_txq, max_rxq, max_queues;
3128 int port_idx = apc->port_idx;
3129 struct gdma_context *gc;
3130 char vport[32];
3131 int err;
3132
3133 err = mana_init_port_context(apc);
3134 if (err)
3135 return err;
3136
3137 gc = gd->gdma_context;
3138
3139 err = mana_query_vport_cfg(apc, port_idx, &max_txq, &max_rxq,
3140 &apc->indir_table_sz);
3141 if (err) {
3142 netdev_err(ndev, "Failed to query info for vPort %d\n",
3143 port_idx);
3144 goto reset_apc;
3145 }
3146
3147 max_queues = min_t(u32, max_txq, max_rxq);
3148 if (apc->max_queues > max_queues)
3149 apc->max_queues = max_queues;
3150
3151 if (apc->num_queues > apc->max_queues)
3152 apc->num_queues = apc->max_queues;
3153
3154 eth_hw_addr_set(ndev, apc->mac_addr);
3155 sprintf(vport, "vport%d", port_idx);
3156 apc->mana_port_debugfs = debugfs_create_dir(vport, gc->mana_pci_debugfs);
3157 debugfs_create_u32("current_speed", 0400, apc->mana_port_debugfs,
3158 &apc->speed);
3159 return 0;
3160
3161 reset_apc:
3162 mana_cleanup_port_context(apc);
3163 return err;
3164 }
3165
mana_alloc_queues(struct net_device * ndev)3166 int mana_alloc_queues(struct net_device *ndev)
3167 {
3168 struct mana_port_context *apc = netdev_priv(ndev);
3169 struct gdma_dev *gd = apc->ac->gdma_dev;
3170 int err;
3171
3172 err = mana_create_vport(apc, ndev);
3173 if (err) {
3174 netdev_err(ndev, "Failed to create vPort %u : %d\n", apc->port_idx, err);
3175 return err;
3176 }
3177
3178 err = netif_set_real_num_tx_queues(ndev, apc->num_queues);
3179 if (err) {
3180 netdev_err(ndev,
3181 "netif_set_real_num_tx_queues () failed for ndev with num_queues %u : %d\n",
3182 apc->num_queues, err);
3183 goto destroy_vport;
3184 }
3185
3186 err = mana_add_rx_queues(apc, ndev);
3187 if (err)
3188 goto destroy_vport;
3189
3190 apc->rss_state = apc->num_queues > 1 ? TRI_STATE_TRUE : TRI_STATE_FALSE;
3191
3192 err = netif_set_real_num_rx_queues(ndev, apc->num_queues);
3193 if (err) {
3194 netdev_err(ndev,
3195 "netif_set_real_num_rx_queues () failed for ndev with num_queues %u : %d\n",
3196 apc->num_queues, err);
3197 goto destroy_vport;
3198 }
3199
3200 mana_rss_table_init(apc);
3201
3202 err = mana_config_rss(apc, TRI_STATE_TRUE, true, true);
3203 if (err) {
3204 netdev_err(ndev, "Failed to configure RSS table: %d\n", err);
3205 goto destroy_vport;
3206 }
3207
3208 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode) {
3209 err = mana_pf_register_filter(apc);
3210 if (err)
3211 goto destroy_vport;
3212 }
3213
3214 mana_chn_setxdp(apc, mana_xdp_get(apc));
3215
3216 return 0;
3217
3218 destroy_vport:
3219 mana_destroy_vport(apc);
3220 return err;
3221 }
3222
mana_attach(struct net_device * ndev)3223 int mana_attach(struct net_device *ndev)
3224 {
3225 struct mana_port_context *apc = netdev_priv(ndev);
3226 int err;
3227
3228 ASSERT_RTNL();
3229
3230 err = mana_init_port(ndev);
3231 if (err)
3232 return err;
3233
3234 if (apc->port_st_save) {
3235 err = mana_alloc_queues(ndev);
3236 if (err) {
3237 mana_cleanup_port_context(apc);
3238 return err;
3239 }
3240 }
3241
3242 apc->port_is_up = apc->port_st_save;
3243
3244 /* Ensure port state updated before txq state */
3245 smp_wmb();
3246
3247 netif_device_attach(ndev);
3248
3249 return 0;
3250 }
3251
mana_dealloc_queues(struct net_device * ndev)3252 static int mana_dealloc_queues(struct net_device *ndev)
3253 {
3254 struct mana_port_context *apc = netdev_priv(ndev);
3255 unsigned long timeout = jiffies + 120 * HZ;
3256 struct gdma_dev *gd = apc->ac->gdma_dev;
3257 struct mana_txq *txq;
3258 struct sk_buff *skb;
3259 int i, err;
3260 u32 tsleep;
3261
3262 if (apc->port_is_up)
3263 return -EINVAL;
3264
3265 mana_chn_setxdp(apc, NULL);
3266
3267 if (gd->gdma_context->is_pf && !apc->ac->bm_hostmode)
3268 mana_pf_deregister_filter(apc);
3269
3270 /* No packet can be transmitted now since apc->port_is_up is false.
3271 * There is still a tiny chance that mana_poll_tx_cq() can re-enable
3272 * a txq because it may not timely see apc->port_is_up being cleared
3273 * to false, but it doesn't matter since mana_start_xmit() drops any
3274 * new packets due to apc->port_is_up being false.
3275 *
3276 * Drain all the in-flight TX packets.
3277 * A timeout of 120 seconds for all the queues is used.
3278 * This will break the while loop when h/w is not responding.
3279 * This value of 120 has been decided here considering max
3280 * number of queues.
3281 */
3282
3283 for (i = 0; i < apc->num_queues; i++) {
3284 txq = &apc->tx_qp[i].txq;
3285 tsleep = 1000;
3286 while (atomic_read(&txq->pending_sends) > 0 &&
3287 time_before(jiffies, timeout)) {
3288 usleep_range(tsleep, tsleep + 1000);
3289 tsleep <<= 1;
3290 }
3291 if (atomic_read(&txq->pending_sends)) {
3292 err = pcie_flr(to_pci_dev(gd->gdma_context->dev));
3293 if (err) {
3294 netdev_err(ndev, "flr failed %d with %d pkts pending in txq %u\n",
3295 err, atomic_read(&txq->pending_sends),
3296 txq->gdma_txq_id);
3297 }
3298 break;
3299 }
3300 }
3301
3302 for (i = 0; i < apc->num_queues; i++) {
3303 txq = &apc->tx_qp[i].txq;
3304 while ((skb = skb_dequeue(&txq->pending_skbs))) {
3305 mana_unmap_skb(skb, apc);
3306 dev_kfree_skb_any(skb);
3307 }
3308 atomic_set(&txq->pending_sends, 0);
3309 }
3310 /* We're 100% sure the queues can no longer be woken up, because
3311 * we're sure now mana_poll_tx_cq() can't be running.
3312 */
3313
3314 apc->rss_state = TRI_STATE_FALSE;
3315 err = mana_config_rss(apc, TRI_STATE_FALSE, false, false);
3316 if (err && mana_en_need_log(apc, err))
3317 netdev_err(ndev, "Failed to disable vPort: %d\n", err);
3318
3319 /* Even in err case, still need to cleanup the vPort */
3320 mana_destroy_vport(apc);
3321
3322 return 0;
3323 }
3324
mana_detach(struct net_device * ndev,bool from_close)3325 int mana_detach(struct net_device *ndev, bool from_close)
3326 {
3327 struct mana_port_context *apc = netdev_priv(ndev);
3328 int err;
3329
3330 ASSERT_RTNL();
3331
3332 apc->port_st_save = apc->port_is_up;
3333 apc->port_is_up = false;
3334
3335 /* Ensure port state updated before txq state */
3336 smp_wmb();
3337
3338 netif_tx_disable(ndev);
3339
3340 if (apc->port_st_save) {
3341 err = mana_dealloc_queues(ndev);
3342 if (err) {
3343 netdev_err(ndev, "%s failed to deallocate queues: %d\n", __func__, err);
3344 return err;
3345 }
3346 }
3347
3348 if (!from_close) {
3349 netif_device_detach(ndev);
3350 mana_cleanup_port_context(apc);
3351 }
3352
3353 return 0;
3354 }
3355
mana_probe_port(struct mana_context * ac,int port_idx,struct net_device ** ndev_storage)3356 static int mana_probe_port(struct mana_context *ac, int port_idx,
3357 struct net_device **ndev_storage)
3358 {
3359 struct gdma_context *gc = ac->gdma_dev->gdma_context;
3360 struct mana_port_context *apc;
3361 struct net_device *ndev;
3362 int err;
3363
3364 ndev = alloc_etherdev_mq(sizeof(struct mana_port_context),
3365 gc->max_num_queues);
3366 if (!ndev)
3367 return -ENOMEM;
3368
3369 *ndev_storage = ndev;
3370
3371 apc = netdev_priv(ndev);
3372 apc->ac = ac;
3373 apc->ndev = ndev;
3374 apc->max_queues = gc->max_num_queues;
3375 /* Use MANA_DEF_NUM_QUEUES as default, still honoring the HW limit */
3376 apc->num_queues = min(gc->max_num_queues, MANA_DEF_NUM_QUEUES);
3377 apc->tx_queue_size = DEF_TX_BUFFERS_PER_QUEUE;
3378 apc->rx_queue_size = DEF_RX_BUFFERS_PER_QUEUE;
3379 apc->port_handle = INVALID_MANA_HANDLE;
3380 apc->pf_filter_handle = INVALID_MANA_HANDLE;
3381 apc->port_idx = port_idx;
3382 apc->cqe_coalescing_enable = 0;
3383
3384 mutex_init(&apc->vport_mutex);
3385 apc->vport_use_count = 0;
3386
3387 ndev->netdev_ops = &mana_devops;
3388 ndev->ethtool_ops = &mana_ethtool_ops;
3389 ndev->mtu = ETH_DATA_LEN;
3390 ndev->max_mtu = gc->adapter_mtu - ETH_HLEN;
3391 ndev->min_mtu = ETH_MIN_MTU;
3392 ndev->needed_headroom = MANA_HEADROOM;
3393 ndev->dev_port = port_idx;
3394 /* Recommended timeout based on HW FPGA re-config scenario. */
3395 ndev->watchdog_timeo = 15 * HZ;
3396 SET_NETDEV_DEV(ndev, gc->dev);
3397
3398 netif_set_tso_max_size(ndev, GSO_MAX_SIZE);
3399
3400 netif_carrier_off(ndev);
3401
3402 netdev_rss_key_fill(apc->hashkey, MANA_HASH_KEY_SIZE);
3403
3404 err = mana_init_port(ndev);
3405 if (err)
3406 goto free_net;
3407
3408 err = mana_rss_table_alloc(apc);
3409 if (err)
3410 goto reset_apc;
3411
3412 /* Initialize the per port queue reset work.*/
3413 INIT_WORK(&apc->queue_reset_work,
3414 mana_per_port_queue_reset_work_handler);
3415
3416 netdev_lockdep_set_classes(ndev);
3417
3418 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3419 ndev->hw_features |= NETIF_F_RXCSUM;
3420 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
3421 ndev->hw_features |= NETIF_F_RXHASH;
3422 ndev->features = ndev->hw_features | NETIF_F_HW_VLAN_CTAG_TX |
3423 NETIF_F_HW_VLAN_CTAG_RX;
3424 ndev->vlan_features = ndev->features;
3425 xdp_set_features_flag(ndev, NETDEV_XDP_ACT_BASIC |
3426 NETDEV_XDP_ACT_REDIRECT |
3427 NETDEV_XDP_ACT_NDO_XMIT);
3428
3429 err = register_netdev(ndev);
3430 if (err) {
3431 netdev_err(ndev, "Unable to register netdev.\n");
3432 goto free_indir;
3433 }
3434
3435 netif_carrier_on(ndev);
3436
3437 return 0;
3438
3439 free_indir:
3440 mana_cleanup_indir_table(apc);
3441 reset_apc:
3442 mana_cleanup_port_context(apc);
3443 free_net:
3444 *ndev_storage = NULL;
3445 netdev_err(ndev, "Failed to probe vPort %d: %d\n", port_idx, err);
3446 free_netdev(ndev);
3447 return err;
3448 }
3449
adev_release(struct device * dev)3450 static void adev_release(struct device *dev)
3451 {
3452 struct mana_adev *madev = container_of(dev, struct mana_adev, adev.dev);
3453
3454 kfree(madev);
3455 }
3456
remove_adev(struct gdma_dev * gd)3457 static void remove_adev(struct gdma_dev *gd)
3458 {
3459 struct auxiliary_device *adev = gd->adev;
3460 int id = adev->id;
3461
3462 auxiliary_device_delete(adev);
3463 auxiliary_device_uninit(adev);
3464
3465 mana_adev_idx_free(id);
3466 gd->adev = NULL;
3467 }
3468
add_adev(struct gdma_dev * gd,const char * name)3469 static int add_adev(struct gdma_dev *gd, const char *name)
3470 {
3471 struct auxiliary_device *adev;
3472 struct mana_adev *madev;
3473 int ret;
3474 int id;
3475
3476 madev = kzalloc_obj(*madev);
3477 if (!madev)
3478 return -ENOMEM;
3479
3480 adev = &madev->adev;
3481 ret = mana_adev_idx_alloc();
3482 if (ret < 0)
3483 goto idx_fail;
3484 id = ret;
3485 adev->id = id;
3486
3487 adev->name = name;
3488 adev->dev.parent = gd->gdma_context->dev;
3489 adev->dev.release = adev_release;
3490 madev->mdev = gd;
3491
3492 ret = auxiliary_device_init(adev);
3493 if (ret)
3494 goto init_fail;
3495
3496 /* madev is owned by the auxiliary device */
3497 madev = NULL;
3498 ret = auxiliary_device_add(adev);
3499 if (ret)
3500 goto add_fail;
3501
3502 gd->adev = adev;
3503 dev_dbg(gd->gdma_context->dev,
3504 "Auxiliary device added successfully\n");
3505 return 0;
3506
3507 add_fail:
3508 auxiliary_device_uninit(adev);
3509
3510 init_fail:
3511 mana_adev_idx_free(id);
3512
3513 idx_fail:
3514 kfree(madev);
3515
3516 return ret;
3517 }
3518
mana_rdma_service_handle(struct work_struct * work)3519 static void mana_rdma_service_handle(struct work_struct *work)
3520 {
3521 struct mana_service_work *serv_work =
3522 container_of(work, struct mana_service_work, work);
3523 struct gdma_dev *gd = serv_work->gdma_dev;
3524 struct device *dev = gd->gdma_context->dev;
3525 int ret;
3526
3527 if (READ_ONCE(gd->rdma_teardown))
3528 goto out;
3529
3530 switch (serv_work->event) {
3531 case GDMA_SERVICE_TYPE_RDMA_SUSPEND:
3532 if (!gd->adev || gd->is_suspended)
3533 break;
3534
3535 remove_adev(gd);
3536 gd->is_suspended = true;
3537 break;
3538
3539 case GDMA_SERVICE_TYPE_RDMA_RESUME:
3540 if (!gd->is_suspended)
3541 break;
3542
3543 ret = add_adev(gd, "rdma");
3544 if (ret)
3545 dev_err(dev, "Failed to add adev on resume: %d\n", ret);
3546 else
3547 gd->is_suspended = false;
3548 break;
3549
3550 default:
3551 dev_warn(dev, "unknown adev service event %u\n",
3552 serv_work->event);
3553 break;
3554 }
3555
3556 out:
3557 kfree(serv_work);
3558 }
3559
mana_rdma_service_event(struct gdma_context * gc,enum gdma_service_type event)3560 int mana_rdma_service_event(struct gdma_context *gc, enum gdma_service_type event)
3561 {
3562 struct gdma_dev *gd = &gc->mana_ib;
3563 struct mana_service_work *serv_work;
3564
3565 if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3566 /* RDMA device is not detected on pci */
3567 return 0;
3568 }
3569
3570 serv_work = kzalloc_obj(*serv_work, GFP_ATOMIC);
3571 if (!serv_work)
3572 return -ENOMEM;
3573
3574 serv_work->event = event;
3575 serv_work->gdma_dev = gd;
3576
3577 INIT_WORK(&serv_work->work, mana_rdma_service_handle);
3578 queue_work(gc->service_wq, &serv_work->work);
3579
3580 return 0;
3581 }
3582
3583 #define MANA_GF_STATS_PERIOD (2 * HZ)
3584
mana_gf_stats_work_handler(struct work_struct * work)3585 static void mana_gf_stats_work_handler(struct work_struct *work)
3586 {
3587 struct mana_context *ac =
3588 container_of(to_delayed_work(work), struct mana_context, gf_stats_work);
3589 struct gdma_context *gc = ac->gdma_dev->gdma_context;
3590 int err;
3591
3592 err = mana_query_gf_stats(ac);
3593 if (err == -ETIMEDOUT) {
3594 /* HWC timeout detected - reset stats and stop rescheduling */
3595 ac->hwc_timeout_occurred = true;
3596 memset(&ac->hc_stats, 0, sizeof(ac->hc_stats));
3597 dev_warn(gc->dev,
3598 "Gf stats wk handler: gf stats query timed out.\n");
3599 /* As HWC timed out, indicating a faulty HW state and needs a
3600 * reset.
3601 */
3602 mana_schedule_serv_work(gc, GDMA_EQE_HWC_RESET_REQUEST);
3603 return;
3604 }
3605 schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
3606 }
3607
mana_probe(struct gdma_dev * gd,bool resuming)3608 int mana_probe(struct gdma_dev *gd, bool resuming)
3609 {
3610 struct gdma_context *gc = gd->gdma_context;
3611 struct mana_context *ac = gd->driver_data;
3612 struct mana_port_context *apc = NULL;
3613 struct device *dev = gc->dev;
3614 u8 bm_hostmode = 0;
3615 u16 num_ports = 0;
3616 int err;
3617 int i;
3618
3619 dev_info(dev,
3620 "Microsoft Azure Network Adapter protocol version: %d.%d.%d\n",
3621 MANA_MAJOR_VERSION, MANA_MINOR_VERSION, MANA_MICRO_VERSION);
3622
3623 err = mana_gd_register_device(gd);
3624 if (err)
3625 return err;
3626
3627 if (!resuming) {
3628 ac = kzalloc_obj(*ac);
3629 if (!ac)
3630 return -ENOMEM;
3631
3632 ac->gdma_dev = gd;
3633 gd->driver_data = ac;
3634 }
3635
3636 err = mana_create_eq(ac);
3637 if (err) {
3638 dev_err(dev, "Failed to create EQs: %d\n", err);
3639 goto out;
3640 }
3641
3642 err = mana_query_device_cfg(ac, MANA_MAJOR_VERSION, MANA_MINOR_VERSION,
3643 MANA_MICRO_VERSION, &num_ports, &bm_hostmode);
3644 if (err)
3645 goto out;
3646
3647 ac->bm_hostmode = bm_hostmode;
3648
3649 if (!resuming) {
3650 ac->num_ports = num_ports;
3651
3652 INIT_WORK(&ac->link_change_work, mana_link_state_handle);
3653 } else {
3654 if (ac->num_ports != num_ports) {
3655 dev_err(dev, "The number of vPorts changed: %d->%d\n",
3656 ac->num_ports, num_ports);
3657 err = -EPROTO;
3658 goto out;
3659 }
3660
3661 enable_work(&ac->link_change_work);
3662 }
3663
3664 if (ac->num_ports == 0)
3665 dev_err(dev, "Failed to detect any vPort\n");
3666
3667 if (ac->num_ports > MAX_PORTS_IN_MANA_DEV)
3668 ac->num_ports = MAX_PORTS_IN_MANA_DEV;
3669
3670 ac->per_port_queue_reset_wq =
3671 create_singlethread_workqueue("mana_per_port_queue_reset_wq");
3672 if (!ac->per_port_queue_reset_wq) {
3673 dev_err(dev, "Failed to allocate per port queue reset workqueue\n");
3674 err = -ENOMEM;
3675 goto out;
3676 }
3677
3678 if (!resuming) {
3679 for (i = 0; i < ac->num_ports; i++) {
3680 err = mana_probe_port(ac, i, &ac->ports[i]);
3681 /* we log the port for which the probe failed and stop
3682 * probes for subsequent ports.
3683 * Note that we keep running ports, for which the probes
3684 * were successful, unless add_adev fails too
3685 */
3686 if (err) {
3687 dev_err(dev, "Probe Failed for port %d\n", i);
3688 break;
3689 }
3690 }
3691 } else {
3692 for (i = 0; i < ac->num_ports; i++) {
3693 rtnl_lock();
3694 apc = netdev_priv(ac->ports[i]);
3695 enable_work(&apc->queue_reset_work);
3696 err = mana_attach(ac->ports[i]);
3697 rtnl_unlock();
3698 /* we log the port for which the attach failed and stop
3699 * attach for subsequent ports
3700 * Note that we keep running ports, for which the attach
3701 * were successful, unless add_adev fails too
3702 */
3703 if (err) {
3704 dev_err(dev, "Attach Failed for port %d\n", i);
3705 break;
3706 }
3707 }
3708 }
3709
3710 err = add_adev(gd, "eth");
3711
3712 INIT_DELAYED_WORK(&ac->gf_stats_work, mana_gf_stats_work_handler);
3713 schedule_delayed_work(&ac->gf_stats_work, MANA_GF_STATS_PERIOD);
3714
3715 out:
3716 if (err) {
3717 mana_remove(gd, false);
3718 } else {
3719 dev_dbg(dev, "gd=%p, id=%u, num_ports=%d, type=%u, instance=%u\n",
3720 gd, gd->dev_id.as_uint32, ac->num_ports,
3721 gd->dev_id.type, gd->dev_id.instance);
3722 dev_dbg(dev, "%s succeeded\n", __func__);
3723 }
3724
3725 return err;
3726 }
3727
mana_remove(struct gdma_dev * gd,bool suspending)3728 void mana_remove(struct gdma_dev *gd, bool suspending)
3729 {
3730 struct gdma_context *gc = gd->gdma_context;
3731 struct mana_context *ac = gd->driver_data;
3732 struct mana_port_context *apc;
3733 struct device *dev = gc->dev;
3734 struct net_device *ndev;
3735 int err;
3736 int i;
3737
3738 disable_work_sync(&ac->link_change_work);
3739 cancel_delayed_work_sync(&ac->gf_stats_work);
3740
3741 /* adev currently doesn't support suspending, always remove it */
3742 if (gd->adev)
3743 remove_adev(gd);
3744
3745 for (i = 0; i < ac->num_ports; i++) {
3746 ndev = ac->ports[i];
3747 if (!ndev) {
3748 if (i == 0)
3749 dev_err(dev, "No net device to remove\n");
3750 goto out;
3751 }
3752
3753 apc = netdev_priv(ndev);
3754 disable_work_sync(&apc->queue_reset_work);
3755
3756 /* All cleanup actions should stay after rtnl_lock(), otherwise
3757 * other functions may access partially cleaned up data.
3758 */
3759 rtnl_lock();
3760
3761 err = mana_detach(ndev, false);
3762 if (err)
3763 netdev_err(ndev, "Failed to detach vPort %d: %d\n",
3764 i, err);
3765
3766 if (suspending) {
3767 /* No need to unregister the ndev. */
3768 rtnl_unlock();
3769 continue;
3770 }
3771
3772 unregister_netdevice(ndev);
3773 mana_cleanup_indir_table(apc);
3774
3775 rtnl_unlock();
3776
3777 free_netdev(ndev);
3778 }
3779
3780 mana_destroy_eq(ac);
3781 out:
3782 if (ac->per_port_queue_reset_wq) {
3783 destroy_workqueue(ac->per_port_queue_reset_wq);
3784 ac->per_port_queue_reset_wq = NULL;
3785 }
3786
3787 mana_gd_deregister_device(gd);
3788
3789 if (suspending)
3790 return;
3791
3792 gd->driver_data = NULL;
3793 gd->gdma_context = NULL;
3794 kfree(ac);
3795 dev_dbg(dev, "%s succeeded\n", __func__);
3796 }
3797
mana_rdma_probe(struct gdma_dev * gd)3798 int mana_rdma_probe(struct gdma_dev *gd)
3799 {
3800 int err = 0;
3801
3802 if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3803 /* RDMA device is not detected on pci */
3804 return err;
3805 }
3806
3807 err = mana_gd_register_device(gd);
3808 if (err)
3809 return err;
3810
3811 err = add_adev(gd, "rdma");
3812 if (err)
3813 mana_gd_deregister_device(gd);
3814
3815 return err;
3816 }
3817
mana_rdma_remove(struct gdma_dev * gd)3818 void mana_rdma_remove(struct gdma_dev *gd)
3819 {
3820 struct gdma_context *gc = gd->gdma_context;
3821
3822 if (gd->dev_id.type != GDMA_DEVICE_MANA_IB) {
3823 /* RDMA device is not detected on pci */
3824 return;
3825 }
3826
3827 WRITE_ONCE(gd->rdma_teardown, true);
3828
3829 if (gc->service_wq)
3830 flush_workqueue(gc->service_wq);
3831
3832 if (gd->adev)
3833 remove_adev(gd);
3834
3835 mana_gd_deregister_device(gd);
3836 }
3837
mana_get_primary_netdev(struct mana_context * ac,u32 port_index,netdevice_tracker * tracker)3838 struct net_device *mana_get_primary_netdev(struct mana_context *ac,
3839 u32 port_index,
3840 netdevice_tracker *tracker)
3841 {
3842 struct net_device *ndev;
3843
3844 if (port_index >= ac->num_ports)
3845 return NULL;
3846
3847 rcu_read_lock();
3848
3849 /* If mana is used in netvsc, the upper netdevice should be returned. */
3850 ndev = netdev_master_upper_dev_get_rcu(ac->ports[port_index]);
3851
3852 /* If there is no upper device, use the parent Ethernet device */
3853 if (!ndev)
3854 ndev = ac->ports[port_index];
3855
3856 netdev_hold(ndev, tracker, GFP_ATOMIC);
3857 rcu_read_unlock();
3858
3859 return ndev;
3860 }
3861 EXPORT_SYMBOL_NS(mana_get_primary_netdev, "NET_MANA");
3862