Lines Matching +full:hw +full:- +full:gro
1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/dma-mapping.h>
19 #include <net/gro.h>
54 static int debug = -1;
79 /* hns3_pci_tbl - PCI Device ID Table
387 napi_schedule_irqoff(&tqp_vector->napi); in hns3_irq_handle()
388 tqp_vector->event_cnt++; in hns3_irq_handle()
398 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_uninit_irq()
399 tqp_vectors = &priv->tqp_vector[i]; in hns3_nic_uninit_irq()
401 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED) in hns3_nic_uninit_irq()
405 irq_set_affinity_hint(tqp_vectors->vector_irq, NULL); in hns3_nic_uninit_irq()
408 free_irq(tqp_vectors->vector_irq, tqp_vectors); in hns3_nic_uninit_irq()
409 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED; in hns3_nic_uninit_irq()
422 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_irq()
423 tqp_vectors = &priv->tqp_vector[i]; in hns3_nic_init_irq()
425 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED) in hns3_nic_init_irq()
428 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) { in hns3_nic_init_irq()
429 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
430 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
431 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
434 } else if (tqp_vectors->rx_group.ring) { in hns3_nic_init_irq()
435 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
436 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
437 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
439 } else if (tqp_vectors->tx_group.ring) { in hns3_nic_init_irq()
440 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN, in hns3_nic_init_irq()
441 "%s-%s-%s-%d", hns3_driver_name, in hns3_nic_init_irq()
442 pci_name(priv->ae_handle->pdev), in hns3_nic_init_irq()
449 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0'; in hns3_nic_init_irq()
451 irq_set_status_flags(tqp_vectors->vector_irq, IRQ_NOAUTOEN); in hns3_nic_init_irq()
452 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0, in hns3_nic_init_irq()
453 tqp_vectors->name, tqp_vectors); in hns3_nic_init_irq()
455 netdev_err(priv->netdev, "request irq(%d) fail\n", in hns3_nic_init_irq()
456 tqp_vectors->vector_irq); in hns3_nic_init_irq()
461 irq_set_affinity_hint(tqp_vectors->vector_irq, in hns3_nic_init_irq()
462 &tqp_vectors->affinity_mask); in hns3_nic_init_irq()
464 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED; in hns3_nic_init_irq()
473 writel(mask_en, tqp_vector->mask_addr); in hns3_mask_vector_irq()
478 napi_enable(&tqp_vector->napi); in hns3_irq_enable()
479 enable_irq(tqp_vector->vector_irq); in hns3_irq_enable()
484 disable_irq(tqp_vector->vector_irq); in hns3_irq_disable()
485 napi_disable(&tqp_vector->napi); in hns3_irq_disable()
486 cancel_work_sync(&tqp_vector->rx_group.dim.work); in hns3_irq_disable()
487 cancel_work_sync(&tqp_vector->tx_group.dim.work); in hns3_irq_disable()
496 * Rl defines rate of interrupts i.e. number of interrupts-per-second in hns3_set_vector_coalesce_rl()
499 if (rl_reg > 0 && !tqp_vector->tx_group.coal.adapt_enable && in hns3_set_vector_coalesce_rl()
500 !tqp_vector->rx_group.coal.adapt_enable) in hns3_set_vector_coalesce_rl()
502 * 0-59 and the unit is 4. in hns3_set_vector_coalesce_rl()
506 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET); in hns3_set_vector_coalesce_rl()
514 if (tqp_vector->rx_group.coal.unit_1us) in hns3_set_vector_coalesce_rx_gl()
519 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET); in hns3_set_vector_coalesce_rx_gl()
527 if (tqp_vector->tx_group.coal.unit_1us) in hns3_set_vector_coalesce_tx_gl()
532 writel(new_val, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET); in hns3_set_vector_coalesce_tx_gl()
538 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_TX_QL_OFFSET); in hns3_set_vector_coalesce_tx_ql()
544 writel(ql_value, tqp_vector->mask_addr + HNS3_VECTOR_RX_QL_OFFSET); in hns3_set_vector_coalesce_rx_ql()
550 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_vector_coalesce_init()
551 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; in hns3_vector_coalesce_init()
552 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; in hns3_vector_coalesce_init()
553 struct hns3_enet_coalesce *ptx_coal = &priv->tx_coal; in hns3_vector_coalesce_init()
554 struct hns3_enet_coalesce *prx_coal = &priv->rx_coal; in hns3_vector_coalesce_init()
556 tx_coal->adapt_enable = ptx_coal->adapt_enable; in hns3_vector_coalesce_init()
557 rx_coal->adapt_enable = prx_coal->adapt_enable; in hns3_vector_coalesce_init()
559 tx_coal->int_gl = ptx_coal->int_gl; in hns3_vector_coalesce_init()
560 rx_coal->int_gl = prx_coal->int_gl; in hns3_vector_coalesce_init()
562 rx_coal->flow_level = prx_coal->flow_level; in hns3_vector_coalesce_init()
563 tx_coal->flow_level = ptx_coal->flow_level; in hns3_vector_coalesce_init()
568 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) { in hns3_vector_coalesce_init()
569 tx_coal->unit_1us = 1; in hns3_vector_coalesce_init()
570 rx_coal->unit_1us = 1; in hns3_vector_coalesce_init()
573 if (ae_dev->dev_specs.int_ql_max) { in hns3_vector_coalesce_init()
574 tx_coal->ql_enable = 1; in hns3_vector_coalesce_init()
575 rx_coal->ql_enable = 1; in hns3_vector_coalesce_init()
576 tx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; in hns3_vector_coalesce_init()
577 rx_coal->int_ql_max = ae_dev->dev_specs.int_ql_max; in hns3_vector_coalesce_init()
578 tx_coal->int_ql = ptx_coal->int_ql; in hns3_vector_coalesce_init()
579 rx_coal->int_ql = prx_coal->int_ql; in hns3_vector_coalesce_init()
587 struct hns3_enet_coalesce *tx_coal = &tqp_vector->tx_group.coal; in hns3_vector_coalesce_init_hw()
588 struct hns3_enet_coalesce *rx_coal = &tqp_vector->rx_group.coal; in hns3_vector_coalesce_init_hw()
589 struct hnae3_handle *h = priv->ae_handle; in hns3_vector_coalesce_init_hw()
591 hns3_set_vector_coalesce_tx_gl(tqp_vector, tx_coal->int_gl); in hns3_vector_coalesce_init_hw()
592 hns3_set_vector_coalesce_rx_gl(tqp_vector, rx_coal->int_gl); in hns3_vector_coalesce_init_hw()
593 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting); in hns3_vector_coalesce_init_hw()
595 if (tx_coal->ql_enable) in hns3_vector_coalesce_init_hw()
596 hns3_set_vector_coalesce_tx_ql(tqp_vector, tx_coal->int_ql); in hns3_vector_coalesce_init_hw()
598 if (rx_coal->ql_enable) in hns3_vector_coalesce_init_hw()
599 hns3_set_vector_coalesce_rx_ql(tqp_vector, rx_coal->int_ql); in hns3_vector_coalesce_init_hw()
605 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_nic_set_real_num_queue()
606 struct hnae3_tc_info *tc_info = &kinfo->tc_info; in hns3_nic_set_real_num_queue()
607 unsigned int queue_size = kinfo->num_tqps; in hns3_nic_set_real_num_queue()
610 if (tc_info->num_tc <= 1 && !tc_info->mqprio_active) { in hns3_nic_set_real_num_queue()
613 ret = netdev_set_num_tc(netdev, tc_info->num_tc); in hns3_nic_set_real_num_queue()
620 for (i = 0; i < tc_info->num_tc; i++) in hns3_nic_set_real_num_queue()
621 netdev_set_tc_queue(netdev, i, tc_info->tqp_count[i], in hns3_nic_set_real_num_queue()
622 tc_info->tqp_offset[i]); in hns3_nic_set_real_num_queue()
646 h->ae_algo->ops->get_tqps_and_rss_info(h, &alloc_tqps, &max_rss_size); in hns3_get_max_available_channels()
647 rss_size = alloc_tqps / h->kinfo.tc_info.num_tc; in hns3_get_max_available_channels()
673 free_irq_cpu_rmap(netdev->rx_cpu_rmap); in hns3_free_rx_cpu_rmap()
674 netdev->rx_cpu_rmap = NULL; in hns3_free_rx_cpu_rmap()
685 if (!netdev->rx_cpu_rmap) { in hns3_set_rx_cpu_rmap()
686 netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(priv->vector_num); in hns3_set_rx_cpu_rmap()
687 if (!netdev->rx_cpu_rmap) in hns3_set_rx_cpu_rmap()
688 return -ENOMEM; in hns3_set_rx_cpu_rmap()
691 for (i = 0; i < priv->vector_num; i++) { in hns3_set_rx_cpu_rmap()
692 tqp_vector = &priv->tqp_vector[i]; in hns3_set_rx_cpu_rmap()
693 ret = irq_cpu_rmap_add(netdev->rx_cpu_rmap, in hns3_set_rx_cpu_rmap()
694 tqp_vector->vector_irq); in hns3_set_rx_cpu_rmap()
707 struct hnae3_handle *h = priv->ae_handle; in hns3_enable_irqs_and_tqps()
710 for (i = 0; i < priv->vector_num; i++) in hns3_enable_irqs_and_tqps()
711 hns3_irq_enable(&priv->tqp_vector[i]); in hns3_enable_irqs_and_tqps()
713 for (i = 0; i < priv->vector_num; i++) in hns3_enable_irqs_and_tqps()
714 hns3_mask_vector_irq(&priv->tqp_vector[i], 1); in hns3_enable_irqs_and_tqps()
716 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_enable_irqs_and_tqps()
717 hns3_tqp_enable(h->kinfo.tqp[i]); in hns3_enable_irqs_and_tqps()
723 struct hnae3_handle *h = priv->ae_handle; in hns3_disable_irqs_and_tqps()
726 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_disable_irqs_and_tqps()
727 hns3_tqp_disable(h->kinfo.tqp[i]); in hns3_disable_irqs_and_tqps()
729 for (i = 0; i < priv->vector_num; i++) in hns3_disable_irqs_and_tqps()
730 hns3_mask_vector_irq(&priv->tqp_vector[i], 0); in hns3_disable_irqs_and_tqps()
732 for (i = 0; i < priv->vector_num; i++) in hns3_disable_irqs_and_tqps()
733 hns3_irq_disable(&priv->tqp_vector[i]); in hns3_disable_irqs_and_tqps()
739 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_net_up()
746 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_nic_net_up()
751 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0; in hns3_nic_net_up()
753 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_nic_net_up()
764 for (i = 0; i < priv->vector_num; i++) { in hns3_config_xps()
765 struct hns3_enet_tqp_vector *tqp_vector = &priv->tqp_vector[i]; in hns3_config_xps()
766 struct hns3_enet_ring *ring = tqp_vector->tx_group.ring; in hns3_config_xps()
771 ret = netif_set_xps_queue(priv->netdev, in hns3_config_xps()
772 &tqp_vector->affinity_mask, in hns3_config_xps()
773 ring->tqp->tqp_index); in hns3_config_xps()
775 netdev_warn(priv->netdev, in hns3_config_xps()
778 ring = ring->next; in hns3_config_xps()
791 return -EBUSY; in hns3_nic_net_open()
793 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_nic_net_open()
810 kinfo = &h->kinfo; in hns3_nic_net_open()
812 netdev_set_prio_tc_map(netdev, i, kinfo->tc_info.prio_tc[i]); in hns3_nic_net_open()
814 if (h->ae_algo->ops->set_timer_task) in hns3_nic_net_open()
815 h->ae_algo->ops->set_timer_task(priv->ae_handle, true); in hns3_nic_net_open()
826 struct net_device *ndev = h->kinfo.netdev; in hns3_reset_tx_queue()
831 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_reset_tx_queue()
833 priv->ring[i].queue_index); in hns3_reset_tx_queue()
846 ops = priv->ae_handle->ae_algo->ops; in hns3_nic_net_down()
847 if (ops->stop) in hns3_nic_net_down()
848 ops->stop(priv->ae_handle); in hns3_nic_net_down()
855 hns3_clear_all_ring(priv->ae_handle, false); in hns3_nic_net_down()
857 hns3_reset_tx_queue(priv->ae_handle); in hns3_nic_net_down()
865 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_nic_net_stop()
870 if (h->ae_algo->ops->set_timer_task) in hns3_nic_net_stop()
871 h->ae_algo->ops->set_timer_task(priv->ae_handle, false); in hns3_nic_net_stop()
886 if (h->ae_algo->ops->add_uc_addr) in hns3_nic_uc_sync()
887 return h->ae_algo->ops->add_uc_addr(h, addr); in hns3_nic_uc_sync()
901 if (ether_addr_equal(addr, netdev->dev_addr)) in hns3_nic_uc_unsync()
904 if (h->ae_algo->ops->rm_uc_addr) in hns3_nic_uc_unsync()
905 return h->ae_algo->ops->rm_uc_addr(h, addr); in hns3_nic_uc_unsync()
915 if (h->ae_algo->ops->add_mc_addr) in hns3_nic_mc_sync()
916 return h->ae_algo->ops->add_mc_addr(h, addr); in hns3_nic_mc_sync()
926 if (h->ae_algo->ops->rm_mc_addr) in hns3_nic_mc_unsync()
927 return h->ae_algo->ops->rm_mc_addr(h, addr); in hns3_nic_mc_unsync()
936 if (netdev->flags & IFF_PROMISC) in hns3_get_netdev_flags()
938 else if (netdev->flags & IFF_ALLMULTI) in hns3_get_netdev_flags()
957 h->netdev_flags = new_flags; in hns3_nic_set_rx_mode()
963 const struct hnae3_ae_ops *ops = handle->ae_algo->ops; in hns3_request_update_promisc_mode()
965 if (ops->request_update_promisc_mode) in hns3_request_update_promisc_mode()
966 ops->request_update_promisc_mode(handle); in hns3_request_update_promisc_mode()
971 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_space()
977 ntc = smp_load_acquire(&tx_spare->last_to_clean); in hns3_tx_spare_space()
978 ntu = tx_spare->next_to_use; in hns3_tx_spare_space()
981 return ntc - ntu - 1; in hns3_tx_spare_space()
986 return max(ntc, tx_spare->len - ntu) - 1; in hns3_tx_spare_space()
991 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_update()
994 tx_spare->last_to_clean == tx_spare->next_to_clean) in hns3_tx_spare_update()
1000 smp_store_release(&tx_spare->last_to_clean, in hns3_tx_spare_update()
1001 tx_spare->next_to_clean); in hns3_tx_spare_update()
1008 u32 len = skb->len <= ring->tx_copybreak ? skb->len : in hns3_can_use_tx_bounce()
1011 if (len > ring->tx_copybreak) in hns3_can_use_tx_bounce()
1026 if (skb->len <= ring->tx_copybreak || !tx_sgl || in hns3_can_use_tx_sgl()
1028 skb_shinfo(skb)->nr_frags < tx_sgl)) in hns3_can_use_tx_sgl()
1041 u32 alloc_size = ring->tqp->handle->kinfo.tx_spare_buf_size; in hns3_init_tx_spare_buffer()
1079 tx_spare->dma = dma; in hns3_init_tx_spare_buffer()
1080 tx_spare->buf = page_address(page); in hns3_init_tx_spare_buffer()
1081 tx_spare->len = PAGE_SIZE << order; in hns3_init_tx_spare_buffer()
1082 ring->tx_spare = tx_spare; in hns3_init_tx_spare_buffer()
1090 ring->tqp->handle->kinfo.tx_spare_buf_size = 0; in hns3_init_tx_spare_buffer()
1100 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_alloc()
1101 u32 ntu = tx_spare->next_to_use; in hns3_tx_spare_alloc()
1109 if (ntu + size > tx_spare->len) { in hns3_tx_spare_alloc()
1110 *cb_len += (tx_spare->len - ntu); in hns3_tx_spare_alloc()
1114 tx_spare->next_to_use = ntu + size; in hns3_tx_spare_alloc()
1115 if (tx_spare->next_to_use == tx_spare->len) in hns3_tx_spare_alloc()
1116 tx_spare->next_to_use = 0; in hns3_tx_spare_alloc()
1118 *dma = tx_spare->dma + ntu; in hns3_tx_spare_alloc()
1120 return tx_spare->buf + ntu; in hns3_tx_spare_alloc()
1125 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_rollback()
1127 if (len > tx_spare->next_to_use) { in hns3_tx_spare_rollback()
1128 len -= tx_spare->next_to_use; in hns3_tx_spare_rollback()
1129 tx_spare->next_to_use = tx_spare->len - len; in hns3_tx_spare_rollback()
1131 tx_spare->next_to_use -= len; in hns3_tx_spare_rollback()
1138 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_tx_spare_reclaim_cb()
1139 u32 ntc = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1140 u32 len = cb->length; in hns3_tx_spare_reclaim_cb()
1142 tx_spare->next_to_clean += len; in hns3_tx_spare_reclaim_cb()
1144 if (tx_spare->next_to_clean >= tx_spare->len) { in hns3_tx_spare_reclaim_cb()
1145 tx_spare->next_to_clean -= tx_spare->len; in hns3_tx_spare_reclaim_cb()
1147 if (tx_spare->next_to_clean) { in hns3_tx_spare_reclaim_cb()
1149 len = tx_spare->next_to_clean; in hns3_tx_spare_reclaim_cb()
1156 * tx_spare->next_to_clean is moved forword. in hns3_tx_spare_reclaim_cb()
1158 if (cb->type & (DESC_TYPE_BOUNCE_HEAD | DESC_TYPE_BOUNCE_ALL)) { in hns3_tx_spare_reclaim_cb()
1159 dma_addr_t dma = tx_spare->dma + ntc; in hns3_tx_spare_reclaim_cb()
1164 struct sg_table *sgt = tx_spare->buf + ntc; in hns3_tx_spare_reclaim_cb()
1166 dma_unmap_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, in hns3_tx_spare_reclaim_cb()
1193 if (l3.v4->version == 4) in hns3_set_tso()
1194 l3.v4->check = 0; in hns3_set_tso()
1197 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE | in hns3_set_tso()
1208 if (l3.v4->version == 4) in hns3_set_tso()
1209 l3.v4->check = 0; in hns3_set_tso()
1213 l4_offset = l4.hdr - skb->data; in hns3_set_tso()
1216 l4_paylen = skb->len - l4_offset; in hns3_set_tso()
1218 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in hns3_set_tso()
1220 csum_replace_by_diff(&l4.udp->check, in hns3_set_tso()
1223 hdr_len = (l4.tcp->doff << 2) + l4_offset; in hns3_set_tso()
1224 csum_replace_by_diff(&l4.tcp->check, in hns3_set_tso()
1228 *send_bytes = (skb_shinfo(skb)->gso_segs - 1) * hdr_len + skb->len; in hns3_set_tso()
1231 *paylen_fdop_ol4cs = skb->len - hdr_len; in hns3_set_tso()
1235 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM) in hns3_set_tso()
1239 *mss = skb_shinfo(skb)->gso_size; in hns3_set_tso()
1259 if (skb->protocol == htons(ETH_P_IPV6)) { in hns3_get_l4_protocol()
1261 l4_proto_tmp = l3.v6->nexthdr; in hns3_get_l4_protocol()
1263 ipv6_skip_exthdr(skb, exthdr - skb->data, in hns3_get_l4_protocol()
1265 } else if (skb->protocol == htons(ETH_P_IP)) { in hns3_get_l4_protocol()
1266 l4_proto_tmp = l3.v4->protocol; in hns3_get_l4_protocol()
1268 return -EINVAL; in hns3_get_l4_protocol()
1274 if (!skb->encapsulation) { in hns3_get_l4_protocol()
1283 if (l3.v6->version == 6) { in hns3_get_l4_protocol()
1285 l4_proto_tmp = l3.v6->nexthdr; in hns3_get_l4_protocol()
1287 ipv6_skip_exthdr(skb, exthdr - skb->data, in hns3_get_l4_protocol()
1289 } else if (l3.v4->version == 4) { in hns3_get_l4_protocol()
1290 l4_proto_tmp = l3.v4->protocol; in hns3_get_l4_protocol()
1298 /* when skb->encapsulation is 0, skb->ip_summed is CHECKSUM_PARTIAL
1306 struct hns3_nic_priv *priv = netdev_priv(skb->dev); in hns3_tunnel_csum_bug()
1307 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_tunnel_csum_bug()
1313 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hns3_tunnel_csum_bug()
1318 if (!(!skb->encapsulation && in hns3_tunnel_csum_bug()
1319 (l4.udp->dest == htons(IANA_VXLAN_UDP_PORT) || in hns3_tunnel_csum_bug()
1320 l4.udp->dest == htons(GENEVE_UDP_PORT) || in hns3_tunnel_csum_bug()
1321 l4.udp->dest == htons(IANA_VXLAN_GPE_UDP_PORT)))) in hns3_tunnel_csum_bug()
1339 l2_len = l3.hdr - skb->data; in hns3_set_outer_l2l3l4()
1343 l3_len = l4.hdr - l3.hdr; in hns3_set_outer_l2l3l4()
1348 l4_len = il2_hdr - l4.hdr; in hns3_set_outer_l2l3l4()
1352 if (skb->protocol == htons(ETH_P_IP)) { in hns3_set_outer_l2l3l4()
1361 } else if (skb->protocol == htons(ETH_P_IPV6)) { in hns3_set_outer_l2l3l4()
1377 if (l3.v4->version == 4) { in hns3_set_l3_type()
1386 } else if (l3.v6->version == 6) { in hns3_set_l3_type()
1402 l4.tcp->doff); in hns3_set_l4_csum_length()
1429 return -EDOM; in hns3_set_l4_csum_length()
1444 unsigned char *l2_hdr = skb->data; in hns3_set_l2l3l4()
1454 if (skb->encapsulation) { in hns3_set_l2l3l4()
1461 return -EDOM; in hns3_set_l2l3l4()
1481 l2_len = l3.hdr - l2_hdr; in hns3_set_l2l3l4()
1485 l3_len = l4.hdr - l3.hdr; in hns3_set_l2l3l4()
1494 struct hnae3_handle *handle = tx_ring->tqp->handle; in hns3_handle_vtags()
1499 if (!(skb->protocol == htons(ETH_P_8021Q) || in hns3_handle_vtags()
1503 /* For HW limitation on HNAE3_DEVICE_VERSION_V2, if port based insert in hns3_handle_vtags()
1507 ae_dev = pci_get_drvdata(handle->pdev); in hns3_handle_vtags()
1509 ae_dev->dev_version <= HNAE3_DEVICE_VERSION_V2 && in hns3_handle_vtags()
1510 handle->port_base_vlan_state == in hns3_handle_vtags()
1512 return -EINVAL; in hns3_handle_vtags()
1514 if (skb->protocol == htons(ETH_P_8021Q) && in hns3_handle_vtags()
1515 !(handle->kinfo.netdev->features & NETIF_F_HW_VLAN_CTAG_TX)) { in hns3_handle_vtags()
1516 /* When HW VLAN acceleration is turned off, and the stack in hns3_handle_vtags()
1520 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1525 /* Based on hw strategy, use out_vtag in two layer tag case, in hns3_handle_vtags()
1528 if (skb->protocol == htons(ETH_P_8021Q) && in hns3_handle_vtags()
1529 handle->port_base_vlan_state == in hns3_handle_vtags()
1535 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1544 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) in hns3_handle_vtags()
1547 skb->protocol = vlan_get_protocol(skb); in hns3_handle_vtags()
1554 struct hns3_nic_priv *priv = netdev_priv(skb->dev); in hns3_check_hw_tx_csum()
1557 * HW checksum of the non-IP packets and GSO packets is handled at in hns3_check_hw_tx_csum()
1561 !test_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state)) in hns3_check_hw_tx_csum()
1578 pa->paylen_ol4cs = skb->len; in hns3_init_desc_data()
1579 pa->ol_type_vlan_len_msec = 0; in hns3_init_desc_data()
1580 pa->type_cs_vlan_tso = 0; in hns3_init_desc_data()
1581 pa->mss_hw_csum = 0; in hns3_init_desc_data()
1582 pa->inner_vtag = 0; in hns3_init_desc_data()
1583 pa->out_vtag = 0; in hns3_init_desc_data()
1597 param->inner_vtag = skb_vlan_tag_get(skb); in hns3_handle_vlan_info()
1598 param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & in hns3_handle_vlan_info()
1600 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); in hns3_handle_vlan_info()
1602 param->out_vtag = skb_vlan_tag_get(skb); in hns3_handle_vlan_info()
1603 param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & in hns3_handle_vlan_info()
1605 hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, in hns3_handle_vlan_info()
1621 hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, in hns3_handle_csum_partial()
1623 hns3_set_field(param->ol_type_vlan_len_msec, in hns3_handle_csum_partial()
1625 skb->csum_offset >> 1); in hns3_handle_csum_partial()
1626 param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); in hns3_handle_csum_partial()
1639 ¶m->type_cs_vlan_tso, in hns3_handle_csum_partial()
1640 ¶m->ol_type_vlan_len_msec); in hns3_handle_csum_partial()
1646 ret = hns3_set_tso(skb, ¶m->paylen_ol4cs, ¶m->mss_hw_csum, in hns3_handle_csum_partial()
1647 ¶m->type_cs_vlan_tso, &desc_cb->send_bytes); in hns3_handle_csum_partial()
1667 desc_cb->send_bytes = skb->len; in hns3_fill_skb_desc()
1669 if (skb->ip_summed == CHECKSUM_PARTIAL) { in hns3_fill_skb_desc()
1676 desc->tx.ol_type_vlan_len_msec = in hns3_fill_skb_desc()
1678 desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso); in hns3_fill_skb_desc()
1679 desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs); in hns3_fill_skb_desc()
1680 desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum); in hns3_fill_skb_desc()
1681 desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag); in hns3_fill_skb_desc()
1682 desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag); in hns3_fill_skb_desc()
1692 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; in hns3_fill_desc()
1697 desc->addr = cpu_to_le64(dma); in hns3_fill_desc()
1698 desc->tx.send_size = cpu_to_le16(size); in hns3_fill_desc()
1699 desc->tx.bdtp_fe_sc_vld_ra_ri = in hns3_fill_desc()
1702 trace_hns3_tx_desc(ring, ring->next_to_use); in hns3_fill_desc()
1714 desc->addr = cpu_to_le64(dma + HNS3_MAX_BD_SIZE * k); in hns3_fill_desc()
1715 desc->tx.send_size = cpu_to_le16((k == frag_buf_num - 1) ? in hns3_fill_desc()
1717 desc->tx.bdtp_fe_sc_vld_ra_ri = in hns3_fill_desc()
1720 trace_hns3_tx_desc(ring, ring->next_to_use); in hns3_fill_desc()
1724 desc = &ring->desc[ring->next_to_use]; in hns3_fill_desc()
1733 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_map_and_fill_desc()
1745 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE); in hns3_map_and_fill_desc()
1763 return -ENOMEM; in hns3_map_and_fill_desc()
1766 desc_cb->priv = priv; in hns3_map_and_fill_desc()
1767 desc_cb->length = size; in hns3_map_and_fill_desc()
1768 desc_cb->dma = dma; in hns3_map_and_fill_desc()
1769 desc_cb->type = type; in hns3_map_and_fill_desc()
1783 size -= HNS3_MAX_BD_SIZE; in hns3_skb_bd_num()
1795 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in hns3_skb_bd_num()
1796 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in hns3_skb_bd_num()
1803 size -= HNS3_MAX_BD_SIZE; in hns3_skb_bd_num()
1826 if (likely(skb->len <= HNS3_MAX_BD_SIZE && !recursion_level && in hns3_tx_bd_num()
1828 skb_shinfo(skb)->nr_frags < max_non_tso_bd_num)) in hns3_tx_bd_num()
1829 return skb_shinfo(skb)->nr_frags + 1U; in hns3_tx_bd_num()
1850 if (!skb->encapsulation) in hns3_gso_hdr_len()
1856 /* HW need every continuous max_non_tso_bd_num buffer data to be larger
1858 * max_non_tso_bd_num - 1 frags to be larger than gso header len + mss,
1859 * and the remaining continuous max_non_tso_bd_num - 1 frags to be larger
1860 * than MSS except the last max_non_tso_bd_num - 1 frags.
1868 for (i = 0; i < max_non_tso_bd_num - 1U; i++) in hns3_skb_need_linearized()
1874 if (tot_len + bd_size[max_non_tso_bd_num - 1U] < in hns3_skb_need_linearized()
1875 skb_shinfo(skb)->gso_size + hns3_gso_hdr_len(skb)) in hns3_skb_need_linearized()
1878 /* ensure every continuous max_non_tso_bd_num - 1 buffer is greater in hns3_skb_need_linearized()
1881 for (i = 0; i < bd_num - max_non_tso_bd_num; i++) { in hns3_skb_need_linearized()
1882 tot_len -= bd_size[i]; in hns3_skb_need_linearized()
1883 tot_len += bd_size[i + max_non_tso_bd_num - 1U]; in hns3_skb_need_linearized()
1885 if (tot_len < skb_shinfo(skb)->gso_size) in hns3_skb_need_linearized()
1897 size[i] = skb_frag_size(&shinfo->frags[i]); in hns3_shinfo_pack()
1909 return -ENOMEM; in hns3_skb_linearize()
1912 /* The skb->len has exceeded the hw limitation, linearization in hns3_skb_linearize()
1915 if (skb->len > HNS3_MAX_TSO_SIZE || in hns3_skb_linearize()
1916 (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { in hns3_skb_linearize()
1918 return -ENOMEM; in hns3_skb_linearize()
1923 return -ENOMEM; in hns3_skb_linearize()
1934 u8 max_non_tso_bd_num = priv->max_non_tso_bd_num; in hns3_nic_maybe_stop_tx()
1948 return -ENOMEM; in hns3_nic_maybe_stop_tx()
1950 bd_num = hns3_tx_bd_count(skb->len); in hns3_nic_maybe_stop_tx()
1959 netif_stop_subqueue(netdev, ring->queue_index); in hns3_nic_maybe_stop_tx()
1967 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_nic_maybe_stop_tx()
1968 netif_start_subqueue(netdev, ring->queue_index); in hns3_nic_maybe_stop_tx()
1974 return -EBUSY; in hns3_nic_maybe_stop_tx()
1982 for (i = 0; i < ring->desc_num; i++) { in hns3_clear_desc()
1983 struct hns3_desc *desc = &ring->desc[ring->next_to_use]; in hns3_clear_desc()
1989 if (ring->next_to_use == next_to_use_orig) in hns3_clear_desc()
1995 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_clear_desc()
1997 if (!desc_cb->dma) in hns3_clear_desc()
2001 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) in hns3_clear_desc()
2002 dma_unmap_single(dev, desc_cb->dma, desc_cb->length, in hns3_clear_desc()
2004 else if (desc_cb->type & in hns3_clear_desc()
2006 hns3_tx_spare_rollback(ring, desc_cb->length); in hns3_clear_desc()
2007 else if (desc_cb->length) in hns3_clear_desc()
2008 dma_unmap_page(dev, desc_cb->dma, desc_cb->length, in hns3_clear_desc()
2011 desc_cb->length = 0; in hns3_clear_desc()
2012 desc_cb->dma = 0; in hns3_clear_desc()
2013 desc_cb->type = DESC_TYPE_UNKNOWN; in hns3_clear_desc()
2029 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in hns3_fill_skb_to_desc()
2030 skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in hns3_fill_skb_to_desc()
2064 int idx = (ring->next_to_use - num + ring->desc_num) % in hns3_tx_push_bd()
2065 ring->desc_num; in hns3_tx_push_bd()
2067 u64_stats_update_begin(&ring->syncp); in hns3_tx_push_bd()
2068 ring->stats.tx_push++; in hns3_tx_push_bd()
2069 u64_stats_update_end(&ring->syncp); in hns3_tx_push_bd()
2070 memcpy(&desc[offset], &ring->desc[idx], in hns3_tx_push_bd()
2073 } while (--num); in hns3_tx_push_bd()
2075 __iowrite64_copy(ring->tqp->mem_base, desc, in hns3_tx_push_bd()
2084 __le64 bd_num = cpu_to_le64((u64)ring->pending_buf); in hns3_tx_mem_doorbell()
2091 __iowrite64_copy(ring->tqp->mem_base + HNS3_MEM_DOORBELL_OFFSET, in hns3_tx_mem_doorbell()
2093 u64_stats_update_begin(&ring->syncp); in hns3_tx_mem_doorbell()
2094 ring->stats.tx_mem_doorbell += ring->pending_buf; in hns3_tx_mem_doorbell()
2095 u64_stats_update_end(&ring->syncp); in hns3_tx_mem_doorbell()
2107 if (test_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state) && num && in hns3_tx_doorbell()
2108 !ring->pending_buf && num <= HNS3_MAX_PUSH_BD_NUM && doorbell) { in hns3_tx_doorbell()
2113 smp_store_release(&ring->last_to_use, ring->next_to_use); in hns3_tx_doorbell()
2118 ring->pending_buf += num; in hns3_tx_doorbell()
2128 smp_store_release(&ring->last_to_use, ring->next_to_use); in hns3_tx_doorbell()
2130 if (ring->tqp->mem_base) in hns3_tx_doorbell()
2133 writel(ring->pending_buf, in hns3_tx_doorbell()
2134 ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); in hns3_tx_doorbell()
2136 ring->pending_buf = 0; in hns3_tx_doorbell()
2144 if (!(h->ae_algo->ops->set_tx_hwts_info && in hns3_tsyn()
2145 h->ae_algo->ops->set_tx_hwts_info(h, skb))) in hns3_tsyn()
2148 desc->tx.bdtp_fe_sc_vld_ra_ri |= cpu_to_le16(BIT(HNS3_TXD_TSYN_B)); in hns3_tsyn()
2154 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_handle_tx_bounce()
2163 if (skb->len <= ring->tx_copybreak) { in hns3_handle_tx_bounce()
2164 size = skb->len; in hns3_handle_tx_bounce()
2180 desc_cb->priv = skb; in hns3_handle_tx_bounce()
2181 desc_cb->length = cb_len; in hns3_handle_tx_bounce()
2182 desc_cb->dma = dma; in hns3_handle_tx_bounce()
2183 desc_cb->type = type; in hns3_handle_tx_bounce()
2207 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_handle_tx_sgl()
2208 u32 nfrag = skb_shinfo(skb)->nr_frags + 1; in hns3_handle_tx_sgl()
2225 sgt->sgl = (struct scatterlist *)(sgt + 1); in hns3_handle_tx_sgl()
2226 sg_init_table(sgt->sgl, nfrag); in hns3_handle_tx_sgl()
2227 nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); in hns3_handle_tx_sgl()
2231 return -ENOMEM; in hns3_handle_tx_sgl()
2234 sgt->orig_nents = nents; in hns3_handle_tx_sgl()
2235 sgt->nents = dma_map_sg(ring_to_dev(ring), sgt->sgl, sgt->orig_nents, in hns3_handle_tx_sgl()
2237 if (unlikely(!sgt->nents)) { in hns3_handle_tx_sgl()
2240 return -ENOMEM; in hns3_handle_tx_sgl()
2243 desc_cb->priv = skb; in hns3_handle_tx_sgl()
2244 desc_cb->length = cb_len; in hns3_handle_tx_sgl()
2245 desc_cb->dma = dma; in hns3_handle_tx_sgl()
2246 desc_cb->type = DESC_TYPE_SGL_SKB; in hns3_handle_tx_sgl()
2248 for (i = 0; i < sgt->nents; i++) in hns3_handle_tx_sgl()
2249 bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), in hns3_handle_tx_sgl()
2250 sg_dma_len(sgt->sgl + i)); in hns3_handle_tx_sgl()
2261 if (!ring->tx_spare) in hns3_handle_desc_filling()
2283 ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use], in hns3_handle_skb_desc()
2288 /* 'ret < 0' means filling error, 'ret == 0' means skb->len is in hns3_handle_skb_desc()
2290 * need to be notified to the hw. in hns3_handle_skb_desc()
2304 struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; in hns3_nic_net_xmit()
2305 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_nic_net_xmit()
2320 prefetch(skb->data); in hns3_nic_net_xmit()
2324 if (ret == -EBUSY) { in hns3_nic_net_xmit()
2333 ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use); in hns3_nic_net_xmit()
2337 pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : in hns3_nic_net_xmit()
2338 (ring->desc_num - 1); in hns3_nic_net_xmit()
2340 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) in hns3_nic_net_xmit()
2341 hns3_tsyn(netdev, skb, &ring->desc[pre_ntu]); in hns3_nic_net_xmit()
2343 ring->desc[pre_ntu].tx.bdtp_fe_sc_vld_ra_ri |= in hns3_nic_net_xmit()
2350 dev_queue = netdev_get_tx_queue(netdev, ring->queue_index); in hns3_nic_net_xmit()
2351 doorbell = __netdev_tx_sent_queue(dev_queue, desc_cb->send_bytes, in hns3_nic_net_xmit()
2371 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data)) in hns3_nic_net_set_mac_address()
2372 return -EADDRNOTAVAIL; in hns3_nic_net_set_mac_address()
2374 if (ether_addr_equal(netdev->dev_addr, mac_addr->sa_data)) { in hns3_nic_net_set_mac_address()
2375 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2384 if (!hns3_is_phys_func(h->pdev) && in hns3_nic_net_set_mac_address()
2385 !is_zero_ether_addr(netdev->perm_addr)) { in hns3_nic_net_set_mac_address()
2386 hnae3_format_mac_addr(format_mac_addr_perm, netdev->perm_addr); in hns3_nic_net_set_mac_address()
2387 hnae3_format_mac_addr(format_mac_addr_sa, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2390 return -EPERM; in hns3_nic_net_set_mac_address()
2393 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false); in hns3_nic_net_set_mac_address()
2399 eth_hw_addr_set(netdev, mac_addr->sa_data); in hns3_nic_net_set_mac_address()
2410 return -EINVAL; in hns3_nic_do_ioctl()
2412 if (!h->ae_algo->ops->do_ioctl) in hns3_nic_do_ioctl()
2413 return -EOPNOTSUPP; in hns3_nic_do_ioctl()
2415 return h->ae_algo->ops->do_ioctl(h, ifr, cmd); in hns3_nic_do_ioctl()
2421 netdev_features_t changed = netdev->features ^ features; in hns3_nic_set_features()
2423 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_set_features()
2427 if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { in hns3_nic_set_features()
2429 ret = h->ae_algo->ops->set_gro_en(h, enable); in hns3_nic_set_features()
2435 h->ae_algo->ops->enable_hw_strip_rxvtag) { in hns3_nic_set_features()
2437 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, enable); in hns3_nic_set_features()
2442 if ((changed & NETIF_F_NTUPLE) && h->ae_algo->ops->enable_fd) { in hns3_nic_set_features()
2444 h->ae_algo->ops->enable_fd(h, enable); in hns3_nic_set_features()
2447 if ((netdev->features & NETIF_F_HW_TC) > (features & NETIF_F_HW_TC) && in hns3_nic_set_features()
2448 h->ae_algo->ops->cls_flower_active(h)) { in hns3_nic_set_features()
2450 "there are offloaded TC filters active, cannot disable HW TC offload"); in hns3_nic_set_features()
2451 return -EINVAL; in hns3_nic_set_features()
2455 h->ae_algo->ops->enable_vlan_filter) { in hns3_nic_set_features()
2457 ret = h->ae_algo->ops->enable_vlan_filter(h, enable); in hns3_nic_set_features()
2474 if (skb->ip_summed != CHECKSUM_PARTIAL) in hns3_features_check()
2477 if (skb->encapsulation) in hns3_features_check()
2502 start = u64_stats_fetch_begin(&ring->syncp); in hns3_fetch_stats()
2504 stats->tx_bytes += ring->stats.tx_bytes; in hns3_fetch_stats()
2505 stats->tx_packets += ring->stats.tx_pkts; in hns3_fetch_stats()
2506 stats->tx_dropped += ring->stats.sw_err_cnt; in hns3_fetch_stats()
2507 stats->tx_dropped += ring->stats.tx_vlan_err; in hns3_fetch_stats()
2508 stats->tx_dropped += ring->stats.tx_l4_proto_err; in hns3_fetch_stats()
2509 stats->tx_dropped += ring->stats.tx_l2l3l4_err; in hns3_fetch_stats()
2510 stats->tx_dropped += ring->stats.tx_tso_err; in hns3_fetch_stats()
2511 stats->tx_dropped += ring->stats.over_max_recursion; in hns3_fetch_stats()
2512 stats->tx_dropped += ring->stats.hw_limitation; in hns3_fetch_stats()
2513 stats->tx_dropped += ring->stats.copy_bits_err; in hns3_fetch_stats()
2514 stats->tx_dropped += ring->stats.skb2sgl_err; in hns3_fetch_stats()
2515 stats->tx_dropped += ring->stats.map_sg_err; in hns3_fetch_stats()
2516 stats->tx_errors += ring->stats.sw_err_cnt; in hns3_fetch_stats()
2517 stats->tx_errors += ring->stats.tx_vlan_err; in hns3_fetch_stats()
2518 stats->tx_errors += ring->stats.tx_l4_proto_err; in hns3_fetch_stats()
2519 stats->tx_errors += ring->stats.tx_l2l3l4_err; in hns3_fetch_stats()
2520 stats->tx_errors += ring->stats.tx_tso_err; in hns3_fetch_stats()
2521 stats->tx_errors += ring->stats.over_max_recursion; in hns3_fetch_stats()
2522 stats->tx_errors += ring->stats.hw_limitation; in hns3_fetch_stats()
2523 stats->tx_errors += ring->stats.copy_bits_err; in hns3_fetch_stats()
2524 stats->tx_errors += ring->stats.skb2sgl_err; in hns3_fetch_stats()
2525 stats->tx_errors += ring->stats.map_sg_err; in hns3_fetch_stats()
2527 stats->rx_bytes += ring->stats.rx_bytes; in hns3_fetch_stats()
2528 stats->rx_packets += ring->stats.rx_pkts; in hns3_fetch_stats()
2529 stats->rx_dropped += ring->stats.l2_err; in hns3_fetch_stats()
2530 stats->rx_errors += ring->stats.l2_err; in hns3_fetch_stats()
2531 stats->rx_errors += ring->stats.l3l4_csum_err; in hns3_fetch_stats()
2532 stats->rx_crc_errors += ring->stats.l2_err; in hns3_fetch_stats()
2533 stats->multicast += ring->stats.rx_multicast; in hns3_fetch_stats()
2534 stats->rx_length_errors += ring->stats.err_pkt_len; in hns3_fetch_stats()
2536 } while (u64_stats_fetch_retry(&ring->syncp, start)); in hns3_fetch_stats()
2543 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_nic_get_stats64()
2544 struct hnae3_handle *handle = priv->ae_handle; in hns3_nic_get_stats64()
2549 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_nic_get_stats64()
2552 handle->ae_algo->ops->update_stats(handle); in hns3_nic_get_stats64()
2557 ring = &priv->ring[idx]; in hns3_nic_get_stats64()
2561 ring = &priv->ring[idx + queue_num]; in hns3_nic_get_stats64()
2565 stats->tx_bytes = ring_total_stats.tx_bytes; in hns3_nic_get_stats64()
2566 stats->tx_packets = ring_total_stats.tx_packets; in hns3_nic_get_stats64()
2567 stats->rx_bytes = ring_total_stats.rx_bytes; in hns3_nic_get_stats64()
2568 stats->rx_packets = ring_total_stats.rx_packets; in hns3_nic_get_stats64()
2570 stats->rx_errors = ring_total_stats.rx_errors; in hns3_nic_get_stats64()
2571 stats->multicast = ring_total_stats.multicast; in hns3_nic_get_stats64()
2572 stats->rx_length_errors = ring_total_stats.rx_length_errors; in hns3_nic_get_stats64()
2573 stats->rx_crc_errors = ring_total_stats.rx_crc_errors; in hns3_nic_get_stats64()
2574 stats->rx_missed_errors = netdev->stats.rx_missed_errors; in hns3_nic_get_stats64()
2576 stats->tx_errors = ring_total_stats.tx_errors; in hns3_nic_get_stats64()
2577 stats->rx_dropped = ring_total_stats.rx_dropped; in hns3_nic_get_stats64()
2578 stats->tx_dropped = ring_total_stats.tx_dropped; in hns3_nic_get_stats64()
2579 stats->collisions = netdev->stats.collisions; in hns3_nic_get_stats64()
2580 stats->rx_over_errors = netdev->stats.rx_over_errors; in hns3_nic_get_stats64()
2581 stats->rx_frame_errors = netdev->stats.rx_frame_errors; in hns3_nic_get_stats64()
2582 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors; in hns3_nic_get_stats64()
2583 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors; in hns3_nic_get_stats64()
2584 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors; in hns3_nic_get_stats64()
2585 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors; in hns3_nic_get_stats64()
2586 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors; in hns3_nic_get_stats64()
2587 stats->tx_window_errors = netdev->stats.tx_window_errors; in hns3_nic_get_stats64()
2588 stats->rx_compressed = netdev->stats.rx_compressed; in hns3_nic_get_stats64()
2589 stats->tx_compressed = netdev->stats.tx_compressed; in hns3_nic_get_stats64()
2596 u8 tc = mqprio_qopt->qopt.num_tc; in hns3_setup_tc()
2597 u16 mode = mqprio_qopt->mode; in hns3_setup_tc()
2598 u8 hw = mqprio_qopt->qopt.hw; in hns3_setup_tc() local
2601 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS && in hns3_setup_tc()
2602 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0))) in hns3_setup_tc()
2603 return -EOPNOTSUPP; in hns3_setup_tc()
2606 return -EINVAL; in hns3_setup_tc()
2609 return -EINVAL; in hns3_setup_tc()
2612 kinfo = &h->kinfo; in hns3_setup_tc()
2616 return (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ? in hns3_setup_tc()
2617 kinfo->dcb_ops->setup_tc(h, mqprio_qopt) : -EOPNOTSUPP; in hns3_setup_tc()
2623 int tc = tc_classid_to_hwtc(priv->netdev, flow->classid); in hns3_setup_tc_cls_flower()
2624 struct hnae3_handle *h = hns3_get_handle(priv->netdev); in hns3_setup_tc_cls_flower()
2626 switch (flow->command) { in hns3_setup_tc_cls_flower()
2628 if (h->ae_algo->ops->add_cls_flower) in hns3_setup_tc_cls_flower()
2629 return h->ae_algo->ops->add_cls_flower(h, flow, tc); in hns3_setup_tc_cls_flower()
2632 if (h->ae_algo->ops->del_cls_flower) in hns3_setup_tc_cls_flower()
2633 return h->ae_algo->ops->del_cls_flower(h, flow); in hns3_setup_tc_cls_flower()
2639 return -EOPNOTSUPP; in hns3_setup_tc_cls_flower()
2647 if (!tc_cls_can_offload_and_chain0(priv->netdev, type_data)) in hns3_setup_tc_block_cb()
2648 return -EOPNOTSUPP; in hns3_setup_tc_block_cb()
2654 return -EOPNOTSUPP; in hns3_setup_tc_block_cb()
2677 return -EOPNOTSUPP; in hns3_nic_setup_tc()
2687 int ret = -EIO; in hns3_vlan_rx_add_vid()
2689 if (h->ae_algo->ops->set_vlan_filter) in hns3_vlan_rx_add_vid()
2690 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false); in hns3_vlan_rx_add_vid()
2699 int ret = -EIO; in hns3_vlan_rx_kill_vid()
2701 if (h->ae_algo->ops->set_vlan_filter) in hns3_vlan_rx_kill_vid()
2702 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true); in hns3_vlan_rx_kill_vid()
2711 int ret = -EIO; in hns3_ndo_set_vf_vlan()
2717 if (h->ae_algo->ops->set_vf_vlan_filter) in hns3_ndo_set_vf_vlan()
2718 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan, in hns3_ndo_set_vf_vlan()
2729 return -EBUSY; in hns3_set_vf_spoofchk()
2731 if (!handle->ae_algo->ops->set_vf_spoofchk) in hns3_set_vf_spoofchk()
2732 return -EOPNOTSUPP; in hns3_set_vf_spoofchk()
2734 return handle->ae_algo->ops->set_vf_spoofchk(handle, vf, enable); in hns3_set_vf_spoofchk()
2741 if (!handle->ae_algo->ops->set_vf_trust) in hns3_set_vf_trust()
2742 return -EOPNOTSUPP; in hns3_set_vf_trust()
2744 return handle->ae_algo->ops->set_vf_trust(handle, vf, enable); in hns3_set_vf_trust()
2753 return -EBUSY; in hns3_nic_change_mtu()
2755 if (!h->ae_algo->ops->set_mtu) in hns3_nic_change_mtu()
2756 return -EOPNOTSUPP; in hns3_nic_change_mtu()
2759 "change mtu from %u to %d\n", netdev->mtu, new_mtu); in hns3_nic_change_mtu()
2761 ret = h->ae_algo->ops->set_mtu(h, new_mtu); in hns3_nic_change_mtu()
2766 WRITE_ONCE(netdev->mtu, new_mtu); in hns3_nic_change_mtu()
2776 for (i = 0; i < ndev->num_tx_queues; i++) { in hns3_get_timeout_queue()
2781 trans_start = READ_ONCE(q->trans_start); in hns3_get_timeout_queue()
2784 (trans_start + ndev->watchdog_timeo))) { in hns3_get_timeout_queue()
2786 struct dql *dql = &q->dql; in hns3_get_timeout_queue()
2789 dql->last_obj_cnt, dql->num_queued, in hns3_get_timeout_queue()
2790 dql->adj_limit, dql->num_completed); in hns3_get_timeout_queue()
2793 q->state, in hns3_get_timeout_queue()
2794 jiffies_to_msecs(jiffies - trans_start)); in hns3_get_timeout_queue()
2806 struct napi_struct *napi = &tx_ring->tqp_vector->napi; in hns3_dump_queue_stats()
2811 priv->tx_timeout_count, timeout_queue, tx_ring->next_to_use, in hns3_dump_queue_stats()
2812 tx_ring->next_to_clean, napi->state); in hns3_dump_queue_stats()
2816 tx_ring->stats.tx_pkts, tx_ring->stats.tx_bytes, in hns3_dump_queue_stats()
2817 tx_ring->stats.sw_err_cnt, tx_ring->pending_buf); in hns3_dump_queue_stats()
2821 tx_ring->stats.seg_pkt_cnt, tx_ring->stats.tx_more, in hns3_dump_queue_stats()
2822 tx_ring->stats.restart_queue, tx_ring->stats.tx_busy); in hns3_dump_queue_stats()
2825 tx_ring->stats.tx_push, tx_ring->stats.tx_mem_doorbell); in hns3_dump_queue_stats()
2837 readl(tx_ring->tqp_vector->mask_addr)); in hns3_dump_queue_reg()
2857 if (timeout_queue >= ndev->num_tx_queues) { in hns3_get_tx_timeo_queue_info()
2860 priv->tx_timeout_count); in hns3_get_tx_timeo_queue_info()
2864 priv->tx_timeout_count++; in hns3_get_tx_timeo_queue_info()
2866 tx_ring = &priv->ring[timeout_queue]; in hns3_get_tx_timeo_queue_info()
2872 if (h->ae_algo->ops->get_mac_stats) { in hns3_get_tx_timeo_queue_info()
2875 h->ae_algo->ops->get_mac_stats(h, &mac_stats); in hns3_get_tx_timeo_queue_info()
2888 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_net_timeout()
2896 if (h->ae_algo->ops->reset_event) in hns3_nic_net_timeout()
2897 h->ae_algo->ops->reset_event(h->pdev, h); in hns3_nic_net_timeout()
2907 if (!h->ae_algo->ops->add_arfs_entry) in hns3_rx_flow_steer()
2908 return -EOPNOTSUPP; in hns3_rx_flow_steer()
2910 if (skb->encapsulation) in hns3_rx_flow_steer()
2911 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2914 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2920 return -EPROTONOSUPPORT; in hns3_rx_flow_steer()
2922 return h->ae_algo->ops->add_arfs_entry(h, rxq_index, flow_id, &fkeys); in hns3_rx_flow_steer()
2931 if (!h->ae_algo->ops->get_vf_config) in hns3_nic_get_vf_config()
2932 return -EOPNOTSUPP; in hns3_nic_get_vf_config()
2934 return h->ae_algo->ops->get_vf_config(h, vf, ivf); in hns3_nic_get_vf_config()
2942 if (!h->ae_algo->ops->set_vf_link_state) in hns3_nic_set_vf_link_state()
2943 return -EOPNOTSUPP; in hns3_nic_set_vf_link_state()
2945 return h->ae_algo->ops->set_vf_link_state(h, vf, link_state); in hns3_nic_set_vf_link_state()
2953 if (!h->ae_algo->ops->set_vf_rate) in hns3_nic_set_vf_rate()
2954 return -EOPNOTSUPP; in hns3_nic_set_vf_rate()
2956 return h->ae_algo->ops->set_vf_rate(h, vf, min_tx_rate, max_tx_rate, in hns3_nic_set_vf_rate()
2965 if (!h->ae_algo->ops->set_vf_mac) in hns3_nic_set_vf_mac()
2966 return -EOPNOTSUPP; in hns3_nic_set_vf_mac()
2973 return -EINVAL; in hns3_nic_set_vf_mac()
2976 return h->ae_algo->ops->set_vf_mac(h, vf_id, mac); in hns3_nic_set_vf_mac()
2984 __be16 protocol = skb->protocol; in hns3_get_skb_dscp()
3005 if (h->kinfo.tc_map_mode != HNAE3_TC_MAP_MODE_DSCP || in hns3_nic_select_queue()
3006 !h->ae_algo->ops->get_dscp_prio) in hns3_nic_select_queue()
3013 skb->priority = h->kinfo.dscp_prio[dscp]; in hns3_nic_select_queue()
3014 if (skb->priority == HNAE3_PRIO_ID_INVALID) in hns3_nic_select_queue()
3015 skb->priority = 0; in hns3_nic_select_queue()
3051 u32 dev_id = pdev->device; in hns3_is_phys_func()
3067 dev_warn(&pdev->dev, "un-recognized pci device-id %u", in hns3_is_phys_func()
3076 /* If our VFs are assigned we cannot shut down SR-IOV in hns3_disable_sriov()
3081 dev_warn(&pdev->dev, in hns3_disable_sriov()
3089 /* hns3_probe - Device initialization routine
3104 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev), GFP_KERNEL); in hns3_probe()
3106 return -ENOMEM; in hns3_probe()
3108 ae_dev->pdev = pdev; in hns3_probe()
3109 ae_dev->flag = ent->driver_data; in hns3_probe()
3130 if (ae_dev->ops->clean_vf_config) in hns3_clean_vf_config()
3131 ae_dev->ops->clean_vf_config(ae_dev, num_vfs); in hns3_clean_vf_config()
3134 /* hns3_remove - Device removal routine
3161 dev_warn(&pdev->dev, "Can not config SRIOV\n"); in hns3_pci_sriov_configure()
3162 return -EINVAL; in hns3_pci_sriov_configure()
3168 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret); in hns3_pci_sriov_configure()
3177 dev_warn(&pdev->dev, in hns3_pci_sriov_configure()
3199 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { in hns3_suspend()
3201 if (ae_dev->ops && ae_dev->ops->reset_prepare) in hns3_suspend()
3202 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FUNC_RESET); in hns3_suspend()
3212 if (ae_dev && hns3_is_phys_func(ae_dev->pdev)) { in hns3_resume()
3214 if (ae_dev->ops && ae_dev->ops->reset_done) in hns3_resume()
3215 ae_dev->ops->reset_done(ae_dev); in hns3_resume()
3227 dev_info(&pdev->dev, "PCI error detected, state(=%u)!!\n", state); in hns3_error_detected()
3232 if (!ae_dev || !ae_dev->ops) { in hns3_error_detected()
3233 dev_err(&pdev->dev, in hns3_error_detected()
3234 "Can't recover - error happened before device initialized\n"); in hns3_error_detected()
3238 if (ae_dev->ops->handle_hw_ras_error) in hns3_error_detected()
3239 ret = ae_dev->ops->handle_hw_ras_error(ae_dev); in hns3_error_detected()
3251 struct device *dev = &pdev->dev; in hns3_slot_reset()
3253 if (!ae_dev || !ae_dev->ops) in hns3_slot_reset()
3256 ops = ae_dev->ops; in hns3_slot_reset()
3258 if (ops->reset_event && ops->get_reset_level && in hns3_slot_reset()
3259 ops->set_default_reset_request) { in hns3_slot_reset()
3260 if (ae_dev->hw_err_reset_req) { in hns3_slot_reset()
3261 reset_type = ops->get_reset_level(ae_dev, in hns3_slot_reset()
3262 &ae_dev->hw_err_reset_req); in hns3_slot_reset()
3263 ops->set_default_reset_request(ae_dev, reset_type); in hns3_slot_reset()
3265 ops->reset_event(pdev, NULL); in hns3_slot_reset()
3278 dev_info(&pdev->dev, "FLR prepare\n"); in hns3_reset_prepare()
3279 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_prepare) in hns3_reset_prepare()
3280 ae_dev->ops->reset_prepare(ae_dev, HNAE3_FLR_RESET); in hns3_reset_prepare()
3287 dev_info(&pdev->dev, "FLR done\n"); in hns3_reset_done()
3288 if (ae_dev && ae_dev->ops && ae_dev->ops->reset_done) in hns3_reset_done()
3289 ae_dev->ops->reset_done(ae_dev); in hns3_reset_done()
3316 struct pci_dev *pdev = h->pdev; in hns3_set_default_feature()
3319 netdev->priv_flags |= IFF_UNICAST_FLT; in hns3_set_default_feature()
3321 netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | in hns3_set_default_feature()
3329 netdev->features |= NETIF_F_GRO_HW; in hns3_set_default_feature()
3332 netdev->features |= NETIF_F_NTUPLE; in hns3_set_default_feature()
3334 if (test_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B, ae_dev->caps)) in hns3_set_default_feature()
3335 netdev->features |= NETIF_F_GSO_UDP_L4; in hns3_set_default_feature()
3337 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) in hns3_set_default_feature()
3338 netdev->features |= NETIF_F_HW_CSUM; in hns3_set_default_feature()
3340 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; in hns3_set_default_feature()
3342 if (test_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B, ae_dev->caps)) in hns3_set_default_feature()
3343 netdev->features |= NETIF_F_GSO_UDP_TUNNEL_CSUM; in hns3_set_default_feature()
3345 if (test_bit(HNAE3_DEV_SUPPORT_FD_FORWARD_TC_B, ae_dev->caps)) in hns3_set_default_feature()
3346 netdev->features |= NETIF_F_HW_TC; in hns3_set_default_feature()
3348 netdev->hw_features |= netdev->features; in hns3_set_default_feature()
3349 if (!test_bit(HNAE3_DEV_SUPPORT_VLAN_FLTR_MDF_B, ae_dev->caps)) in hns3_set_default_feature()
3350 netdev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; in hns3_set_default_feature()
3352 netdev->vlan_features |= netdev->features & in hns3_set_default_feature()
3357 netdev->hw_enc_features |= netdev->vlan_features | NETIF_F_TSO_MANGLEID; in hns3_set_default_feature()
3363 if (ae_dev->dev_version > HNAE3_DEVICE_VERSION_V2) { in hns3_set_default_feature()
3364 netdev->features &= ~NETIF_F_GSO_GRE; in hns3_set_default_feature()
3365 netdev->features &= ~NETIF_F_GSO_GRE_CSUM; in hns3_set_default_feature()
3375 if (ring->page_pool) { in hns3_alloc_buffer()
3376 p = page_pool_dev_alloc_frag(ring->page_pool, in hns3_alloc_buffer()
3377 &cb->page_offset, in hns3_alloc_buffer()
3380 return -ENOMEM; in hns3_alloc_buffer()
3382 cb->priv = p; in hns3_alloc_buffer()
3383 cb->buf = page_address(p); in hns3_alloc_buffer()
3384 cb->dma = page_pool_get_dma_addr(p); in hns3_alloc_buffer()
3385 cb->type = DESC_TYPE_PP_FRAG; in hns3_alloc_buffer()
3386 cb->reuse_flag = 0; in hns3_alloc_buffer()
3392 return -ENOMEM; in hns3_alloc_buffer()
3394 cb->priv = p; in hns3_alloc_buffer()
3395 cb->page_offset = 0; in hns3_alloc_buffer()
3396 cb->reuse_flag = 0; in hns3_alloc_buffer()
3397 cb->buf = page_address(p); in hns3_alloc_buffer()
3398 cb->length = hns3_page_size(ring); in hns3_alloc_buffer()
3399 cb->type = DESC_TYPE_PAGE; in hns3_alloc_buffer()
3400 page_ref_add(p, USHRT_MAX - 1); in hns3_alloc_buffer()
3401 cb->pagecnt_bias = USHRT_MAX; in hns3_alloc_buffer()
3409 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_HEAD | in hns3_free_buffer()
3411 napi_consume_skb(cb->priv, budget); in hns3_free_buffer()
3413 if (cb->type & DESC_TYPE_PAGE && cb->pagecnt_bias) in hns3_free_buffer()
3414 __page_frag_cache_drain(cb->priv, cb->pagecnt_bias); in hns3_free_buffer()
3415 else if (cb->type & DESC_TYPE_PP_FRAG) in hns3_free_buffer()
3416 page_pool_put_full_page(ring->page_pool, cb->priv, in hns3_free_buffer()
3424 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0, in hns3_map_buffer()
3425 cb->length, ring_to_dma_dir(ring)); in hns3_map_buffer()
3427 if (unlikely(dma_mapping_error(ring_to_dev(ring), cb->dma))) in hns3_map_buffer()
3428 return -EIO; in hns3_map_buffer()
3436 if (cb->type & (DESC_TYPE_SKB | DESC_TYPE_FRAGLIST_SKB)) in hns3_unmap_buffer()
3437 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, in hns3_unmap_buffer()
3439 else if ((cb->type & DESC_TYPE_PAGE) && cb->length) in hns3_unmap_buffer()
3440 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, in hns3_unmap_buffer()
3442 else if (cb->type & (DESC_TYPE_BOUNCE_ALL | DESC_TYPE_BOUNCE_HEAD | in hns3_unmap_buffer()
3449 hns3_unmap_buffer(ring, &ring->desc_cb[i]); in hns3_buffer_detach()
3450 ring->desc[i].addr = 0; in hns3_buffer_detach()
3451 ring->desc_cb[i].refill = 0; in hns3_buffer_detach()
3457 struct hns3_desc_cb *cb = &ring->desc_cb[i]; in hns3_free_buffer_detach()
3459 if (!ring->desc_cb[i].dma) in hns3_free_buffer_detach()
3470 for (i = 0; i < ring->desc_num; i++) in hns3_free_buffers()
3477 int size = ring->desc_num * sizeof(ring->desc[0]); in hns3_free_desc()
3481 if (ring->desc) { in hns3_free_desc()
3483 ring->desc, ring->desc_dma_addr); in hns3_free_desc()
3484 ring->desc = NULL; in hns3_free_desc()
3490 int size = ring->desc_num * sizeof(ring->desc[0]); in hns3_alloc_desc()
3492 ring->desc = dma_alloc_coherent(ring_to_dev(ring), size, in hns3_alloc_desc()
3493 &ring->desc_dma_addr, GFP_KERNEL); in hns3_alloc_desc()
3494 if (!ring->desc) in hns3_alloc_desc()
3495 return -ENOMEM; in hns3_alloc_desc()
3506 if (ret || ring->page_pool) in hns3_alloc_and_map_buffer()
3523 int ret = hns3_alloc_and_map_buffer(ring, &ring->desc_cb[i]); in hns3_alloc_and_attach_buffer()
3528 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_alloc_and_attach_buffer()
3529 ring->desc_cb[i].page_offset); in hns3_alloc_and_attach_buffer()
3530 ring->desc_cb[i].refill = 1; in hns3_alloc_and_attach_buffer()
3540 for (i = 0; i < ring->desc_num; i++) { in hns3_alloc_ring_buffers()
3552 for (j = i - 1; j >= 0; j--) in hns3_alloc_ring_buffers()
3557 /* detach a in-used buffer and replace with a reserved one */
3561 hns3_unmap_buffer(ring, &ring->desc_cb[i]); in hns3_replace_buffer()
3562 ring->desc_cb[i] = *res_cb; in hns3_replace_buffer()
3563 ring->desc_cb[i].refill = 1; in hns3_replace_buffer()
3564 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_replace_buffer()
3565 ring->desc_cb[i].page_offset); in hns3_replace_buffer()
3566 ring->desc[i].rx.bd_base_info = 0; in hns3_replace_buffer()
3571 ring->desc_cb[i].reuse_flag = 0; in hns3_reuse_buffer()
3572 ring->desc_cb[i].refill = 1; in hns3_reuse_buffer()
3573 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma + in hns3_reuse_buffer()
3574 ring->desc_cb[i].page_offset); in hns3_reuse_buffer()
3575 ring->desc[i].rx.bd_base_info = 0; in hns3_reuse_buffer()
3578 ring->desc_cb[i].dma + ring->desc_cb[i].page_offset, in hns3_reuse_buffer()
3589 int ltu = smp_load_acquire(&ring->last_to_use); in hns3_nic_reclaim_desc()
3590 int ntc = ring->next_to_clean; in hns3_nic_reclaim_desc()
3596 desc = &ring->desc[ntc]; in hns3_nic_reclaim_desc()
3598 if (le16_to_cpu(desc->tx.bdtp_fe_sc_vld_ra_ri) & in hns3_nic_reclaim_desc()
3602 desc_cb = &ring->desc_cb[ntc]; in hns3_nic_reclaim_desc()
3604 if (desc_cb->type & (DESC_TYPE_SKB | DESC_TYPE_BOUNCE_ALL | in hns3_nic_reclaim_desc()
3608 (*bytes) += desc_cb->send_bytes; in hns3_nic_reclaim_desc()
3614 if (++ntc == ring->desc_num) in hns3_nic_reclaim_desc()
3618 prefetch(&ring->desc_cb[ntc]); in hns3_nic_reclaim_desc()
3628 smp_store_release(&ring->next_to_clean, ntc); in hns3_nic_reclaim_desc()
3648 ring->tqp_vector->tx_group.total_bytes += bytes; in hns3_clean_tx_ring()
3649 ring->tqp_vector->tx_group.total_packets += pkts; in hns3_clean_tx_ring()
3651 u64_stats_update_begin(&ring->syncp); in hns3_clean_tx_ring()
3652 ring->stats.tx_bytes += bytes; in hns3_clean_tx_ring()
3653 ring->stats.tx_pkts += pkts; in hns3_clean_tx_ring()
3654 u64_stats_update_end(&ring->syncp); in hns3_clean_tx_ring()
3656 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index); in hns3_clean_tx_ring()
3666 !test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) { in hns3_clean_tx_ring()
3668 ring->stats.restart_queue++; in hns3_clean_tx_ring()
3675 int ntc = ring->next_to_clean; in hns3_desc_unused()
3676 int ntu = ring->next_to_use; in hns3_desc_unused()
3678 if (unlikely(ntc == ntu && !ring->desc_cb[ntc].refill)) in hns3_desc_unused()
3679 return ring->desc_num; in hns3_desc_unused()
3681 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu; in hns3_desc_unused()
3693 desc_cb = &ring->desc_cb[ring->next_to_use]; in hns3_nic_alloc_rx_buffers()
3694 if (desc_cb->reuse_flag) { in hns3_nic_alloc_rx_buffers()
3697 hns3_reuse_buffer(ring, ring->next_to_use); in hns3_nic_alloc_rx_buffers()
3707 writel(i, ring->tqp->io_base + in hns3_nic_alloc_rx_buffers()
3711 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns3_nic_alloc_rx_buffers()
3719 writel(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG); in hns3_nic_alloc_rx_buffers()
3725 return page_count(cb->priv) == cb->pagecnt_bias; in hns3_can_reuse_page()
3733 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; in hns3_handle_rx_copybreak()
3734 u32 frag_offset = desc_cb->page_offset + pull_len; in hns3_handle_rx_copybreak()
3735 int size = le16_to_cpu(desc->rx.size); in hns3_handle_rx_copybreak()
3736 u32 frag_size = size - pull_len; in hns3_handle_rx_copybreak()
3744 return -ENOMEM; in hns3_handle_rx_copybreak()
3747 desc_cb->reuse_flag = 1; in hns3_handle_rx_copybreak()
3748 memcpy(frag, desc_cb->buf + frag_offset, frag_size); in hns3_handle_rx_copybreak()
3760 struct hns3_desc *desc = &ring->desc[ring->next_to_clean]; in hns3_nic_reuse_page()
3761 u32 frag_offset = desc_cb->page_offset + pull_len; in hns3_nic_reuse_page()
3762 int size = le16_to_cpu(desc->rx.size); in hns3_nic_reuse_page()
3764 u32 frag_size = size - pull_len; in hns3_nic_reuse_page()
3768 if (ring->page_pool) { in hns3_nic_reuse_page()
3769 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, in hns3_nic_reuse_page()
3774 /* Avoid re-using remote or pfmem page */ in hns3_nic_reuse_page()
3775 if (unlikely(!dev_page_is_reusable(desc_cb->priv))) in hns3_nic_reuse_page()
3787 * is non-zero, which means page_offset @ truesize will in hns3_nic_reuse_page()
3791 if ((!desc_cb->page_offset && reused) || in hns3_nic_reuse_page()
3792 ((desc_cb->page_offset + truesize + truesize) <= in hns3_nic_reuse_page()
3793 hns3_page_size(ring) && desc_cb->page_offset)) { in hns3_nic_reuse_page()
3794 desc_cb->page_offset += truesize; in hns3_nic_reuse_page()
3795 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3796 } else if (desc_cb->page_offset && reused) { in hns3_nic_reuse_page()
3797 desc_cb->page_offset = 0; in hns3_nic_reuse_page()
3798 desc_cb->reuse_flag = 1; in hns3_nic_reuse_page()
3799 } else if (frag_size <= ring->rx_copybreak) { in hns3_nic_reuse_page()
3806 desc_cb->pagecnt_bias--; in hns3_nic_reuse_page()
3808 if (unlikely(!desc_cb->pagecnt_bias)) { in hns3_nic_reuse_page()
3809 page_ref_add(desc_cb->priv, USHRT_MAX); in hns3_nic_reuse_page()
3810 desc_cb->pagecnt_bias = USHRT_MAX; in hns3_nic_reuse_page()
3813 skb_add_rx_frag(skb, i, desc_cb->priv, frag_offset, in hns3_nic_reuse_page()
3816 if (unlikely(!desc_cb->reuse_flag)) in hns3_nic_reuse_page()
3817 __page_frag_cache_drain(desc_cb->priv, desc_cb->pagecnt_bias); in hns3_nic_reuse_page()
3822 __be16 type = skb->protocol; in hns3_gro_complete()
3830 return -EFAULT; in hns3_gro_complete()
3832 vh = (struct vlan_hdr *)(skb->data + depth); in hns3_gro_complete()
3833 type = vh->h_vlan_encapsulated_proto; in hns3_gro_complete()
3845 th->check = ~tcp_v4_check(skb->len - depth, iph->saddr, in hns3_gro_complete()
3846 iph->daddr, 0); in hns3_gro_complete()
3853 th->check = ~tcp_v6_check(skb->len - depth, &iph->saddr, in hns3_gro_complete()
3854 &iph->daddr, 0); in hns3_gro_complete()
3856 hns3_rl_err(skb->dev, in hns3_gro_complete()
3857 "Error: FW GRO supports only IPv4/IPv6, not 0x%04x, depth: %d\n", in hns3_gro_complete()
3859 return -EFAULT; in hns3_gro_complete()
3862 skb_shinfo(skb)->gso_segs = NAPI_GRO_CB(skb)->count; in hns3_gro_complete()
3863 if (th->cwr) in hns3_gro_complete()
3864 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in hns3_gro_complete()
3867 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_FIXEDID; in hns3_gro_complete()
3869 skb->csum_start = (unsigned char *)th - skb->head; in hns3_gro_complete()
3870 skb->csum_offset = offsetof(struct tcphdr, check); in hns3_gro_complete()
3871 skb->ip_summed = CHECKSUM_PARTIAL; in hns3_gro_complete()
3886 skb->ip_summed = CHECKSUM_COMPLETE; in hns3_checksum_complete()
3887 skb->csum = csum_unfold((__force __sum16)csum); in hns3_checksum_complete()
3897 skb->csum_level = hns3_rx_ptype_tbl[ptype].csum_level; in hns3_rx_handle_csum()
3898 skb->ip_summed = hns3_rx_ptype_tbl[ptype].ip_summed; in hns3_rx_handle_csum()
3908 skb->csum_level = 1; in hns3_rx_handle_csum()
3921 skb->ip_summed = CHECKSUM_UNNECESSARY; in hns3_rx_handle_csum()
3936 skb->ip_summed = CHECKSUM_NONE; in hns3_rx_checksum()
3940 if (!(netdev->features & NETIF_F_RXCSUM)) in hns3_rx_checksum()
3943 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) in hns3_rx_checksum()
3956 skb->ip_summed = CHECKSUM_NONE; in hns3_rx_checksum()
3968 napi_gro_flush(&ring->tqp_vector->napi, false); in hns3_rx_skb()
3970 napi_gro_receive(&ring->tqp_vector->napi, skb); in hns3_rx_skb()
3977 struct hnae3_handle *handle = ring->tqp->handle; in hns3_parse_vlan_tag()
3978 struct pci_dev *pdev = ring->tqp->handle->pdev; in hns3_parse_vlan_tag()
3981 if (unlikely(ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)) { in hns3_parse_vlan_tag()
3982 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
3984 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
4000 if (handle->port_base_vlan_state != in hns3_parse_vlan_tag()
4004 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
4007 if (handle->port_base_vlan_state != in hns3_parse_vlan_tag()
4011 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
4014 if (handle->port_base_vlan_state == in hns3_parse_vlan_tag()
4016 *vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag); in hns3_parse_vlan_tag()
4018 *vlan_tag = le16_to_cpu(desc->rx.vlan_tag); in hns3_parse_vlan_tag()
4028 ring->desc[ring->next_to_clean].rx.bd_base_info &= in hns3_rx_ring_move_fw()
4030 ring->desc_cb[ring->next_to_clean].refill = 0; in hns3_rx_ring_move_fw()
4031 ring->next_to_clean += 1; in hns3_rx_ring_move_fw()
4033 if (unlikely(ring->next_to_clean == ring->desc_num)) in hns3_rx_ring_move_fw()
4034 ring->next_to_clean = 0; in hns3_rx_ring_move_fw()
4040 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_alloc_skb()
4044 ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); in hns3_alloc_skb()
4045 skb = ring->skb; in hns3_alloc_skb()
4050 return -ENOMEM; in hns3_alloc_skb()
4054 prefetchw(skb->data); in hns3_alloc_skb()
4056 ring->pending_buf = 1; in hns3_alloc_skb()
4057 ring->frag_num = 0; in hns3_alloc_skb()
4058 ring->tail_skb = NULL; in hns3_alloc_skb()
4062 /* We can reuse buffer as-is, just make sure it is reusable */ in hns3_alloc_skb()
4063 if (dev_page_is_reusable(desc_cb->priv)) in hns3_alloc_skb()
4064 desc_cb->reuse_flag = 1; in hns3_alloc_skb()
4065 else if (desc_cb->type & DESC_TYPE_PP_FRAG) in hns3_alloc_skb()
4066 page_pool_put_full_page(ring->page_pool, desc_cb->priv, in hns3_alloc_skb()
4069 __page_frag_cache_drain(desc_cb->priv, in hns3_alloc_skb()
4070 desc_cb->pagecnt_bias); in hns3_alloc_skb()
4076 if (ring->page_pool) in hns3_alloc_skb()
4081 ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); in hns3_alloc_skb()
4082 __skb_put(skb, ring->pull_len); in hns3_alloc_skb()
4083 hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len, in hns3_alloc_skb()
4092 struct sk_buff *skb = ring->skb; in hns3_add_frag()
4100 desc = &ring->desc[ring->next_to_clean]; in hns3_add_frag()
4101 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_add_frag()
4102 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_add_frag()
4103 /* make sure HW write desc complete */ in hns3_add_frag()
4106 return -ENXIO; in hns3_add_frag()
4108 if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { in hns3_add_frag()
4109 new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0); in hns3_add_frag()
4113 return -ENXIO; in hns3_add_frag()
4116 if (ring->page_pool) in hns3_add_frag()
4119 ring->frag_num = 0; in hns3_add_frag()
4121 if (ring->tail_skb) { in hns3_add_frag()
4122 ring->tail_skb->next = new_skb; in hns3_add_frag()
4123 ring->tail_skb = new_skb; in hns3_add_frag()
4125 skb_shinfo(skb)->frag_list = new_skb; in hns3_add_frag()
4126 ring->tail_skb = new_skb; in hns3_add_frag()
4130 if (ring->tail_skb) { in hns3_add_frag()
4131 head_skb->truesize += hns3_buf_size(ring); in hns3_add_frag()
4132 head_skb->data_len += le16_to_cpu(desc->rx.size); in hns3_add_frag()
4133 head_skb->len += le16_to_cpu(desc->rx.size); in hns3_add_frag()
4134 skb = ring->tail_skb; in hns3_add_frag()
4138 desc_cb->dma + desc_cb->page_offset, in hns3_add_frag()
4142 hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb); in hns3_add_frag()
4145 ring->pending_buf++; in hns3_add_frag()
4159 skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info, in hns3_set_gro_and_checksum()
4162 /* if there is no HW GRO, do not set gro params */ in hns3_set_gro_and_checksum()
4163 if (!skb_shinfo(skb)->gso_size) { in hns3_set_gro_and_checksum()
4169 NAPI_GRO_CB(skb)->count = hnae3_get_field(l234info, in hns3_set_gro_and_checksum()
4173 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { in hns3_set_gro_and_checksum()
4184 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; in hns3_set_gro_and_checksum()
4186 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; in hns3_set_gro_and_checksum()
4188 return -EFAULT; in hns3_set_gro_and_checksum()
4201 if (test_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state)) { in hns3_set_rx_skb_rss_type()
4233 u32 nsec = le32_to_cpu(desc->ts_nsec); in hns3_handle_rx_ts_info()
4234 u32 sec = le32_to_cpu(desc->ts_sec); in hns3_handle_rx_ts_info()
4236 if (h->ae_algo->ops->get_rx_hwts) in hns3_handle_rx_ts_info()
4237 h->ae_algo->ops->get_rx_hwts(h, skb, nsec, sec); in hns3_handle_rx_ts_info()
4247 /* Based on hw strategy, the tag offloaded will be stored at in hns3_handle_rx_vlan_tag()
4251 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) { in hns3_handle_rx_vlan_tag()
4271 * current packet, and ring->next_to_clean indicates the first in hns3_handle_bdinfo()
4272 * descriptor of next packet, so need - 1 below. in hns3_handle_bdinfo()
4274 pre_ntc = ring->next_to_clean ? (ring->next_to_clean - 1) : in hns3_handle_bdinfo()
4275 (ring->desc_num - 1); in hns3_handle_bdinfo()
4276 desc = &ring->desc[pre_ntc]; in hns3_handle_bdinfo()
4277 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_handle_bdinfo()
4278 l234info = le32_to_cpu(desc->rx.l234_info); in hns3_handle_bdinfo()
4279 ol_info = le32_to_cpu(desc->rx.ol_info); in hns3_handle_bdinfo()
4280 csum = le16_to_cpu(desc->csum); in hns3_handle_bdinfo()
4286 if (unlikely(!desc->rx.pkt_len || (l234info & (BIT(HNS3_RXD_TRUNCAT_B) | in hns3_handle_bdinfo()
4288 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4290 ring->stats.l2_err++; in hns3_handle_bdinfo()
4292 ring->stats.err_pkt_len++; in hns3_handle_bdinfo()
4293 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4295 return -EFAULT; in hns3_handle_bdinfo()
4298 len = skb->len; in hns3_handle_bdinfo()
4301 skb->protocol = eth_type_trans(skb, netdev); in hns3_handle_bdinfo()
4314 u64_stats_update_begin(&ring->syncp); in hns3_handle_bdinfo()
4315 ring->stats.rx_pkts++; in hns3_handle_bdinfo()
4316 ring->stats.rx_bytes += len; in hns3_handle_bdinfo()
4319 ring->stats.rx_multicast++; in hns3_handle_bdinfo()
4321 u64_stats_update_end(&ring->syncp); in hns3_handle_bdinfo()
4323 ring->tqp_vector->rx_group.total_bytes += len; in hns3_handle_bdinfo()
4325 hns3_set_rx_skb_rss_type(ring, skb, le32_to_cpu(desc->rx.rss_hash), in hns3_handle_bdinfo()
4332 struct sk_buff *skb = ring->skb; in hns3_handle_rx_bd()
4339 desc = &ring->desc[ring->next_to_clean]; in hns3_handle_rx_bd()
4340 desc_cb = &ring->desc_cb[ring->next_to_clean]; in hns3_handle_rx_bd()
4345 bd_base_info = le32_to_cpu(desc->rx.bd_base_info); in hns3_handle_rx_bd()
4348 return -ENXIO; in hns3_handle_rx_bd()
4351 length = le16_to_cpu(desc->rx.size); in hns3_handle_rx_bd()
4353 ring->va = desc_cb->buf + desc_cb->page_offset; in hns3_handle_rx_bd()
4356 desc_cb->dma + desc_cb->page_offset, in hns3_handle_rx_bd()
4367 net_prefetch(ring->va); in hns3_handle_rx_bd()
4369 ret = hns3_alloc_skb(ring, length, ring->va); in hns3_handle_rx_bd()
4370 skb = ring->skb; in hns3_handle_rx_bd()
4385 /* As the head data may be changed when GRO enable, copy in hns3_handle_rx_bd()
4388 if (skb->len > HNS3_RX_HEAD_SIZE) in hns3_handle_rx_bd()
4389 memcpy(skb->data, ring->va, in hns3_handle_rx_bd()
4390 ALIGN(ring->pull_len, sizeof(long))); in hns3_handle_rx_bd()
4398 skb_record_rx_queue(skb, ring->tqp->tqp_index); in hns3_handle_rx_bd()
4411 unused_count -= ring->pending_buf; in hns3_clean_rx_ring()
4424 if (unlikely(!ring->skb || err == -ENXIO)) { in hns3_clean_rx_ring()
4427 rx_fn(ring, ring->skb); in hns3_clean_rx_ring()
4431 unused_count += ring->pending_buf; in hns3_clean_rx_ring()
4432 ring->skb = NULL; in hns3_clean_rx_ring()
4433 ring->pending_buf = 0; in hns3_clean_rx_ring()
4449 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group; in hns3_update_rx_int_coalesce()
4452 if (!rx_group->coal.adapt_enable) in hns3_update_rx_int_coalesce()
4455 dim_update_sample(tqp_vector->event_cnt, rx_group->total_packets, in hns3_update_rx_int_coalesce()
4456 rx_group->total_bytes, &sample); in hns3_update_rx_int_coalesce()
4457 net_dim(&rx_group->dim, &sample); in hns3_update_rx_int_coalesce()
4462 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group; in hns3_update_tx_int_coalesce()
4465 if (!tx_group->coal.adapt_enable) in hns3_update_tx_int_coalesce()
4468 dim_update_sample(tqp_vector->event_cnt, tx_group->total_packets, in hns3_update_tx_int_coalesce()
4469 tx_group->total_bytes, &sample); in hns3_update_tx_int_coalesce()
4470 net_dim(&tx_group->dim, &sample); in hns3_update_tx_int_coalesce()
4475 struct hns3_nic_priv *priv = netdev_priv(napi->dev); in hns3_nic_common_poll()
4484 if (unlikely(test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { in hns3_nic_common_poll()
4492 hns3_for_each_ring(ring, tqp_vector->tx_group) in hns3_nic_common_poll()
4496 if (tqp_vector->num_tqps > 1) in hns3_nic_common_poll()
4497 rx_budget = max(budget / tqp_vector->num_tqps, 1); in hns3_nic_common_poll()
4499 hns3_for_each_ring(ring, tqp_vector->rx_group) { in hns3_nic_common_poll()
4508 tqp_vector->rx_group.total_packets += rx_pkt_total; in hns3_nic_common_poll()
4514 likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) { in hns3_nic_common_poll()
4531 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_create_ring_chain()
4535 ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring; in hns3_create_ring_chain()
4538 while (cur_chain->next) in hns3_create_ring_chain()
4539 cur_chain = cur_chain->next; in hns3_create_ring_chain()
4543 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); in hns3_create_ring_chain()
4545 return -ENOMEM; in hns3_create_ring_chain()
4547 cur_chain->next = chain; in hns3_create_ring_chain()
4550 chain->tqp_index = ring->tqp->tqp_index; in hns3_create_ring_chain()
4551 hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, in hns3_create_ring_chain()
4553 hnae3_set_field(chain->int_gl_idx, in hns3_create_ring_chain()
4559 ring = ring->next; in hns3_create_ring_chain()
4568 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_get_vector_ring_chain()
4582 chain = cur_chain->next; in hns3_get_vector_ring_chain()
4583 devm_kfree(&pdev->dev, cur_chain); in hns3_get_vector_ring_chain()
4593 struct pci_dev *pdev = tqp_vector->handle->pdev; in hns3_free_vector_ring_chain()
4599 chain_tmp = chain->next; in hns3_free_vector_ring_chain()
4600 devm_kfree(&pdev->dev, chain); in hns3_free_vector_ring_chain()
4608 ring->next = group->ring; in hns3_add_ring_to_group()
4609 group->ring = ring; in hns3_add_ring_to_group()
4611 group->count++; in hns3_add_ring_to_group()
4616 struct pci_dev *pdev = priv->ae_handle->pdev; in hns3_nic_set_cpumask()
4618 int num_vectors = priv->vector_num; in hns3_nic_set_cpumask()
4622 numa_node = dev_to_node(&pdev->dev); in hns3_nic_set_cpumask()
4625 tqp_vector = &priv->tqp_vector[vector_i]; in hns3_nic_set_cpumask()
4627 &tqp_vector->affinity_mask); in hns3_nic_set_cpumask()
4636 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; in hns3_rx_dim_work()
4638 net_dim_get_rx_moderation(dim->mode, dim->profile_ix); in hns3_rx_dim_work()
4640 hns3_set_vector_coalesce_rx_gl(group->ring->tqp_vector, cur_moder.usec); in hns3_rx_dim_work()
4641 tqp_vector->rx_group.coal.int_gl = cur_moder.usec; in hns3_rx_dim_work()
4643 if (cur_moder.pkts < tqp_vector->rx_group.coal.int_ql_max) { in hns3_rx_dim_work()
4645 tqp_vector->rx_group.coal.int_ql = cur_moder.pkts; in hns3_rx_dim_work()
4648 dim->state = DIM_START_MEASURE; in hns3_rx_dim_work()
4656 struct hns3_enet_tqp_vector *tqp_vector = group->ring->tqp_vector; in hns3_tx_dim_work()
4658 net_dim_get_tx_moderation(dim->mode, dim->profile_ix); in hns3_tx_dim_work()
4661 tqp_vector->tx_group.coal.int_gl = cur_moder.usec; in hns3_tx_dim_work()
4663 if (cur_moder.pkts < tqp_vector->tx_group.coal.int_ql_max) { in hns3_tx_dim_work()
4665 tqp_vector->tx_group.coal.int_ql = cur_moder.pkts; in hns3_tx_dim_work()
4668 dim->state = DIM_START_MEASURE; in hns3_tx_dim_work()
4673 INIT_WORK(&tqp_vector->rx_group.dim.work, hns3_rx_dim_work); in hns3_nic_init_dim()
4674 INIT_WORK(&tqp_vector->tx_group.dim.work, hns3_tx_dim_work); in hns3_nic_init_dim()
4679 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_init_vector_data()
4686 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_vector_data()
4687 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_init_vector_data()
4689 tqp_vector->num_tqps = 0; in hns3_nic_init_vector_data()
4693 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_nic_init_vector_data()
4694 u16 vector_i = i % priv->vector_num; in hns3_nic_init_vector_data()
4695 u16 tqp_num = h->kinfo.num_tqps; in hns3_nic_init_vector_data()
4697 tqp_vector = &priv->tqp_vector[vector_i]; in hns3_nic_init_vector_data()
4699 hns3_add_ring_to_group(&tqp_vector->tx_group, in hns3_nic_init_vector_data()
4700 &priv->ring[i]); in hns3_nic_init_vector_data()
4702 hns3_add_ring_to_group(&tqp_vector->rx_group, in hns3_nic_init_vector_data()
4703 &priv->ring[i + tqp_num]); in hns3_nic_init_vector_data()
4705 priv->ring[i].tqp_vector = tqp_vector; in hns3_nic_init_vector_data()
4706 priv->ring[i + tqp_num].tqp_vector = tqp_vector; in hns3_nic_init_vector_data()
4707 tqp_vector->num_tqps++; in hns3_nic_init_vector_data()
4710 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_init_vector_data()
4713 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_init_vector_data()
4715 tqp_vector->rx_group.total_bytes = 0; in hns3_nic_init_vector_data()
4716 tqp_vector->rx_group.total_packets = 0; in hns3_nic_init_vector_data()
4717 tqp_vector->tx_group.total_bytes = 0; in hns3_nic_init_vector_data()
4718 tqp_vector->tx_group.total_packets = 0; in hns3_nic_init_vector_data()
4719 tqp_vector->handle = h; in hns3_nic_init_vector_data()
4723 ret = -ENOMEM; in hns3_nic_init_vector_data()
4727 ret = h->ae_algo->ops->map_ring_to_vector(h, in hns3_nic_init_vector_data()
4728 tqp_vector->vector_irq, vector_ring_chain); in hns3_nic_init_vector_data()
4735 netif_napi_add(priv->netdev, &tqp_vector->napi, in hns3_nic_init_vector_data()
4742 while (i--) in hns3_nic_init_vector_data()
4743 netif_napi_del(&priv->tqp_vector[i].napi); in hns3_nic_init_vector_data()
4750 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_nic_init_coal_cfg()
4751 struct hns3_enet_coalesce *tx_coal = &priv->tx_coal; in hns3_nic_init_coal_cfg()
4752 struct hns3_enet_coalesce *rx_coal = &priv->rx_coal; in hns3_nic_init_coal_cfg()
4759 * Default: enable interrupt coalescing self-adaptive and GL in hns3_nic_init_coal_cfg()
4761 tx_coal->adapt_enable = 1; in hns3_nic_init_coal_cfg()
4762 rx_coal->adapt_enable = 1; in hns3_nic_init_coal_cfg()
4764 tx_coal->int_gl = HNS3_INT_GL_50K; in hns3_nic_init_coal_cfg()
4765 rx_coal->int_gl = HNS3_INT_GL_50K; in hns3_nic_init_coal_cfg()
4767 rx_coal->flow_level = HNS3_FLOW_LOW; in hns3_nic_init_coal_cfg()
4768 tx_coal->flow_level = HNS3_FLOW_LOW; in hns3_nic_init_coal_cfg()
4770 if (ae_dev->dev_specs.int_ql_max) { in hns3_nic_init_coal_cfg()
4771 tx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; in hns3_nic_init_coal_cfg()
4772 rx_coal->int_ql = HNS3_INT_QL_DEFAULT_CFG; in hns3_nic_init_coal_cfg()
4778 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_alloc_vector_data()
4781 struct pci_dev *pdev = h->pdev; in hns3_nic_alloc_vector_data()
4782 u16 tqp_num = h->kinfo.num_tqps; in hns3_nic_alloc_vector_data()
4791 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector), in hns3_nic_alloc_vector_data()
4794 return -ENOMEM; in hns3_nic_alloc_vector_data()
4797 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector); in hns3_nic_alloc_vector_data()
4799 priv->vector_num = vector_num; in hns3_nic_alloc_vector_data()
4800 priv->tqp_vector = (struct hns3_enet_tqp_vector *) in hns3_nic_alloc_vector_data()
4801 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector), in hns3_nic_alloc_vector_data()
4803 if (!priv->tqp_vector) { in hns3_nic_alloc_vector_data()
4804 ret = -ENOMEM; in hns3_nic_alloc_vector_data()
4808 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_alloc_vector_data()
4809 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_alloc_vector_data()
4810 tqp_vector->idx = i; in hns3_nic_alloc_vector_data()
4811 tqp_vector->mask_addr = vector[i].io_addr; in hns3_nic_alloc_vector_data()
4812 tqp_vector->vector_irq = vector[i].vector; in hns3_nic_alloc_vector_data()
4817 devm_kfree(&pdev->dev, vector); in hns3_nic_alloc_vector_data()
4823 group->ring = NULL; in hns3_clear_ring_group()
4824 group->count = 0; in hns3_clear_ring_group()
4830 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_uninit_vector_data()
4834 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_uninit_vector_data()
4835 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_uninit_vector_data()
4837 if (!tqp_vector->rx_group.ring && !tqp_vector->tx_group.ring) in hns3_nic_uninit_vector_data()
4846 dev_warn(priv->dev, "failed to get ring chain\n"); in hns3_nic_uninit_vector_data()
4848 h->ae_algo->ops->unmap_ring_from_vector(h, in hns3_nic_uninit_vector_data()
4849 tqp_vector->vector_irq, vector_ring_chain); in hns3_nic_uninit_vector_data()
4853 hns3_clear_ring_group(&tqp_vector->rx_group); in hns3_nic_uninit_vector_data()
4854 hns3_clear_ring_group(&tqp_vector->tx_group); in hns3_nic_uninit_vector_data()
4855 netif_napi_del(&priv->tqp_vector[i].napi); in hns3_nic_uninit_vector_data()
4861 struct hnae3_handle *h = priv->ae_handle; in hns3_nic_dealloc_vector_data()
4862 struct pci_dev *pdev = h->pdev; in hns3_nic_dealloc_vector_data()
4865 for (i = 0; i < priv->vector_num; i++) { in hns3_nic_dealloc_vector_data()
4868 tqp_vector = &priv->tqp_vector[i]; in hns3_nic_dealloc_vector_data()
4869 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq); in hns3_nic_dealloc_vector_data()
4874 devm_kfree(&pdev->dev, priv->tqp_vector); in hns3_nic_dealloc_vector_data()
4880 int queue_num = priv->ae_handle->kinfo.num_tqps; in hns3_ring_get_cfg()
4885 ring = &priv->ring[q->tqp_index]; in hns3_ring_get_cfg()
4886 desc_num = priv->ae_handle->kinfo.num_tx_desc; in hns3_ring_get_cfg()
4887 ring->queue_index = q->tqp_index; in hns3_ring_get_cfg()
4888 ring->tx_copybreak = priv->tx_copybreak; in hns3_ring_get_cfg()
4889 ring->last_to_use = 0; in hns3_ring_get_cfg()
4891 ring = &priv->ring[q->tqp_index + queue_num]; in hns3_ring_get_cfg()
4892 desc_num = priv->ae_handle->kinfo.num_rx_desc; in hns3_ring_get_cfg()
4893 ring->queue_index = q->tqp_index; in hns3_ring_get_cfg()
4894 ring->rx_copybreak = priv->rx_copybreak; in hns3_ring_get_cfg()
4897 hnae3_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type); in hns3_ring_get_cfg()
4899 ring->tqp = q; in hns3_ring_get_cfg()
4900 ring->desc = NULL; in hns3_ring_get_cfg()
4901 ring->desc_cb = NULL; in hns3_ring_get_cfg()
4902 ring->dev = priv->dev; in hns3_ring_get_cfg()
4903 ring->desc_dma_addr = 0; in hns3_ring_get_cfg()
4904 ring->buf_size = q->buf_size; in hns3_ring_get_cfg()
4905 ring->desc_num = desc_num; in hns3_ring_get_cfg()
4906 ring->next_to_use = 0; in hns3_ring_get_cfg()
4907 ring->next_to_clean = 0; in hns3_ring_get_cfg()
4919 struct hnae3_handle *h = priv->ae_handle; in hns3_get_ring_config()
4920 struct pci_dev *pdev = h->pdev; in hns3_get_ring_config()
4923 priv->ring = devm_kzalloc(&pdev->dev, in hns3_get_ring_config()
4924 array3_size(h->kinfo.num_tqps, in hns3_get_ring_config()
4925 sizeof(*priv->ring), 2), in hns3_get_ring_config()
4927 if (!priv->ring) in hns3_get_ring_config()
4928 return -ENOMEM; in hns3_get_ring_config()
4930 for (i = 0; i < h->kinfo.num_tqps; i++) in hns3_get_ring_config()
4931 hns3_queue_to_ring(h->kinfo.tqp[i], priv); in hns3_get_ring_config()
4938 if (!priv->ring) in hns3_put_ring_config()
4941 devm_kfree(priv->dev, priv->ring); in hns3_put_ring_config()
4942 priv->ring = NULL; in hns3_put_ring_config()
4950 .pool_size = ring->desc_num * hns3_buf_size(ring) / in hns3_alloc_page_pool()
4959 ring->page_pool = page_pool_create(&pp_params); in hns3_alloc_page_pool()
4960 if (IS_ERR(ring->page_pool)) { in hns3_alloc_page_pool()
4962 PTR_ERR(ring->page_pool)); in hns3_alloc_page_pool()
4963 ring->page_pool = NULL; in hns3_alloc_page_pool()
4971 if (ring->desc_num <= 0 || ring->buf_size <= 0) in hns3_alloc_ring_memory()
4972 return -EINVAL; in hns3_alloc_ring_memory()
4974 ring->desc_cb = devm_kcalloc(ring_to_dev(ring), ring->desc_num, in hns3_alloc_ring_memory()
4975 sizeof(ring->desc_cb[0]), GFP_KERNEL); in hns3_alloc_ring_memory()
4976 if (!ring->desc_cb) { in hns3_alloc_ring_memory()
4977 ret = -ENOMEM; in hns3_alloc_ring_memory()
5001 devm_kfree(ring_to_dev(ring), ring->desc_cb); in hns3_alloc_ring_memory()
5002 ring->desc_cb = NULL; in hns3_alloc_ring_memory()
5010 devm_kfree(ring_to_dev(ring), ring->desc_cb); in hns3_fini_ring()
5011 ring->desc_cb = NULL; in hns3_fini_ring()
5012 ring->next_to_clean = 0; in hns3_fini_ring()
5013 ring->next_to_use = 0; in hns3_fini_ring()
5014 ring->last_to_use = 0; in hns3_fini_ring()
5015 ring->pending_buf = 0; in hns3_fini_ring()
5016 if (!HNAE3_IS_TX_RING(ring) && ring->skb) { in hns3_fini_ring()
5017 dev_kfree_skb_any(ring->skb); in hns3_fini_ring()
5018 ring->skb = NULL; in hns3_fini_ring()
5019 } else if (HNAE3_IS_TX_RING(ring) && ring->tx_spare) { in hns3_fini_ring()
5020 struct hns3_tx_spare *tx_spare = ring->tx_spare; in hns3_fini_ring()
5022 dma_unmap_page(ring_to_dev(ring), tx_spare->dma, tx_spare->len, in hns3_fini_ring()
5024 free_pages((unsigned long)tx_spare->buf, in hns3_fini_ring()
5025 get_order(tx_spare->len)); in hns3_fini_ring()
5027 ring->tx_spare = NULL; in hns3_fini_ring()
5030 if (!HNAE3_IS_TX_RING(ring) && ring->page_pool) { in hns3_fini_ring()
5031 page_pool_destroy(ring->page_pool); in hns3_fini_ring()
5032 ring->page_pool = NULL; in hns3_fini_ring()
5062 dma_addr_t dma = ring->desc_dma_addr; in hns3_init_ring_hw()
5063 struct hnae3_queue *q = ring->tqp; in hns3_init_ring_hw()
5071 hns3_buf_size2type(ring->buf_size)); in hns3_init_ring_hw()
5073 ring->desc_num / 8 - 1); in hns3_init_ring_hw()
5081 ring->desc_num / 8 - 1); in hns3_init_ring_hw()
5087 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; in hns3_init_tx_ring_tc()
5088 struct hnae3_tc_info *tc_info = &kinfo->tc_info; in hns3_init_tx_ring_tc()
5091 for (i = 0; i < tc_info->num_tc; i++) { in hns3_init_tx_ring_tc()
5094 for (j = 0; j < tc_info->tqp_count[i]; j++) { in hns3_init_tx_ring_tc()
5097 q = priv->ring[tc_info->tqp_offset[i] + j].tqp; in hns3_init_tx_ring_tc()
5105 struct hnae3_handle *h = priv->ae_handle; in hns3_init_all_ring()
5106 int ring_num = h->kinfo.num_tqps * 2; in hns3_init_all_ring()
5111 ret = hns3_alloc_ring_memory(&priv->ring[i]); in hns3_init_all_ring()
5113 dev_err(priv->dev, in hns3_init_all_ring()
5118 u64_stats_init(&priv->ring[i].syncp); in hns3_init_all_ring()
5125 for (j = i - 1; j >= 0; j--) in hns3_init_all_ring()
5126 hns3_fini_ring(&priv->ring[j]); in hns3_init_all_ring()
5128 return -ENOMEM; in hns3_init_all_ring()
5133 struct hnae3_handle *h = priv->ae_handle; in hns3_uninit_all_ring()
5136 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_uninit_all_ring()
5137 hns3_fini_ring(&priv->ring[i]); in hns3_uninit_all_ring()
5138 hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]); in hns3_uninit_all_ring()
5147 struct hnae3_handle *h = priv->ae_handle; in hns3_init_mac_addr()
5151 if (h->ae_algo->ops->get_mac_addr) in hns3_init_mac_addr()
5152 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp); in hns3_init_mac_addr()
5157 hnae3_format_mac_addr(format_mac_addr, netdev->dev_addr); in hns3_init_mac_addr()
5158 dev_warn(priv->dev, "using random MAC address %s\n", in hns3_init_mac_addr()
5160 } else if (!ether_addr_equal(netdev->dev_addr, mac_addr_temp)) { in hns3_init_mac_addr()
5162 ether_addr_copy(netdev->perm_addr, mac_addr_temp); in hns3_init_mac_addr()
5167 if (h->ae_algo->ops->set_mac_addr) in hns3_init_mac_addr()
5168 ret = h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true); in hns3_init_mac_addr()
5178 if (h->ae_algo->ops->mac_connect_phy) in hns3_init_phy()
5179 ret = h->ae_algo->ops->mac_connect_phy(h); in hns3_init_phy()
5188 if (h->ae_algo->ops->mac_disconnect_phy) in hns3_uninit_phy()
5189 h->ae_algo->ops->mac_disconnect_phy(h); in hns3_uninit_phy()
5194 if (!handle->ae_algo->ops->client_start) in hns3_client_start()
5197 return handle->ae_algo->ops->client_start(handle); in hns3_client_start()
5202 if (!handle->ae_algo->ops->client_stop) in hns3_client_stop()
5205 handle->ae_algo->ops->client_stop(handle); in hns3_client_stop()
5210 struct hnae3_knic_private_info *kinfo = &priv->ae_handle->kinfo; in hns3_info_show()
5213 hnae3_format_mac_addr(format_mac_addr, priv->netdev->dev_addr); in hns3_info_show()
5214 dev_info(priv->dev, "MAC address: %s\n", format_mac_addr); in hns3_info_show()
5215 dev_info(priv->dev, "Task queue pairs numbers: %u\n", kinfo->num_tqps); in hns3_info_show()
5216 dev_info(priv->dev, "RSS size: %u\n", kinfo->rss_size); in hns3_info_show()
5217 dev_info(priv->dev, "Allocated RSS size: %u\n", kinfo->req_rss_size); in hns3_info_show()
5218 dev_info(priv->dev, "RX buffer length: %u\n", kinfo->rx_buf_len); in hns3_info_show()
5219 dev_info(priv->dev, "Desc num per TX queue: %u\n", kinfo->num_tx_desc); in hns3_info_show()
5220 dev_info(priv->dev, "Desc num per RX queue: %u\n", kinfo->num_rx_desc); in hns3_info_show()
5221 dev_info(priv->dev, "Total number of enabled TCs: %u\n", in hns3_info_show()
5222 kinfo->tc_info.num_tc); in hns3_info_show()
5223 dev_info(priv->dev, "Max mtu size: %u\n", priv->netdev->max_mtu); in hns3_info_show()
5229 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(priv->ae_handle->pdev); in hns3_set_cq_period_mode()
5230 struct hnae3_handle *handle = priv->ae_handle; in hns3_set_cq_period_mode()
5234 priv->tx_cqe_mode = mode; in hns3_set_cq_period_mode()
5236 for (i = 0; i < priv->vector_num; i++) in hns3_set_cq_period_mode()
5237 priv->tqp_vector[i].tx_group.dim.mode = mode; in hns3_set_cq_period_mode()
5239 priv->rx_cqe_mode = mode; in hns3_set_cq_period_mode()
5241 for (i = 0; i < priv->vector_num; i++) in hns3_set_cq_period_mode()
5242 priv->tqp_vector[i].rx_group.dim.mode = mode; in hns3_set_cq_period_mode()
5253 writel(new_mode, handle->kinfo.io_base + reg); in hns3_set_cq_period_mode()
5267 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); in hns3_state_init()
5268 struct net_device *netdev = handle->kinfo.netdev; in hns3_state_init()
5271 set_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_state_init()
5273 if (test_bit(HNAE3_DEV_SUPPORT_TX_PUSH_B, ae_dev->caps)) in hns3_state_init()
5274 set_bit(HNS3_NIC_STATE_TX_PUSH_ENABLE, &priv->state); in hns3_state_init()
5276 if (ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V3) in hns3_state_init()
5277 set_bit(HNAE3_PFLAG_LIMIT_PROMISC, &handle->supported_pflags); in hns3_state_init()
5279 if (test_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B, ae_dev->caps)) in hns3_state_init()
5280 set_bit(HNS3_NIC_STATE_HW_TX_CSUM_ENABLE, &priv->state); in hns3_state_init()
5283 set_bit(HNS3_NIC_STATE_RXD_ADV_LAYOUT_ENABLE, &priv->state); in hns3_state_init()
5288 struct hns3_nic_priv *priv = handle->priv; in hns3_state_uninit()
5290 clear_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_state_uninit()
5295 struct pci_dev *pdev = handle->pdev; in hns3_client_init()
5302 handle->ae_algo->ops->get_tqps_and_rss_info(handle, &alloc_tqps, in hns3_client_init()
5306 return -ENOMEM; in hns3_client_init()
5309 priv->dev = &pdev->dev; in hns3_client_init()
5310 priv->netdev = netdev; in hns3_client_init()
5311 priv->ae_handle = handle; in hns3_client_init()
5312 priv->tx_timeout_count = 0; in hns3_client_init()
5313 priv->max_non_tso_bd_num = ae_dev->dev_specs.max_non_tso_bd_num; in hns3_client_init()
5314 set_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_client_init()
5316 handle->msg_enable = netif_msg_init(debug, DEFAULT_MSG_LEVEL); in hns3_client_init()
5318 handle->kinfo.netdev = netdev; in hns3_client_init()
5319 handle->priv = (void *)priv; in hns3_client_init()
5325 netdev->watchdog_timeo = HNS3_TX_TIMEOUT; in hns3_client_init()
5326 netdev->priv_flags |= IFF_UNICAST_FLT; in hns3_client_init()
5327 netdev->netdev_ops = &hns3_nic_netdev_ops; in hns3_client_init()
5328 SET_NETDEV_DEV(netdev, &pdev->dev); in hns3_client_init()
5336 ret = -ENOMEM; in hns3_client_init()
5344 ret = -ENOMEM; in hns3_client_init()
5350 ret = -ENOMEM; in hns3_client_init()
5356 ret = -ENOMEM; in hns3_client_init()
5370 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); in hns3_client_init()
5374 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); in hns3_client_init()
5381 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); in hns3_client_init()
5389 dev_err(priv->dev, "failed to init debugfs, ret = %d\n", in hns3_client_init()
5394 netdev->max_mtu = HNS3_MAX_MTU(ae_dev->dev_specs.max_frm_size); in hns3_client_init()
5400 dev_err(priv->dev, "probe register netdev fail!\n"); in hns3_client_init()
5425 priv->ring = NULL; in hns3_client_init()
5427 priv->ae_handle = NULL; in hns3_client_init()
5434 struct net_device *netdev = handle->kinfo.netdev; in hns3_client_uninit()
5437 if (netdev->reg_state != NETREG_UNINITIALIZED) in hns3_client_uninit()
5444 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_client_uninit()
5470 struct net_device *netdev = handle->kinfo.netdev; in hns3_link_status_change()
5490 while (ring->next_to_clean != ring->next_to_use) { in hns3_clear_tx_ring()
5491 ring->desc[ring->next_to_clean].tx.bdtp_fe_sc_vld_ra_ri = 0; in hns3_clear_tx_ring()
5492 hns3_free_buffer_detach(ring, ring->next_to_clean, 0); in hns3_clear_tx_ring()
5496 ring->pending_buf = 0; in hns3_clear_tx_ring()
5504 while (ring->next_to_use != ring->next_to_clean) { in hns3_clear_rx_ring()
5509 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { in hns3_clear_rx_ring()
5521 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); in hns3_clear_rx_ring()
5527 if (ring->skb) { in hns3_clear_rx_ring()
5528 dev_kfree_skb_any(ring->skb); in hns3_clear_rx_ring()
5529 ring->skb = NULL; in hns3_clear_rx_ring()
5530 ring->pending_buf = 0; in hns3_clear_rx_ring()
5538 while (ring->next_to_use != ring->next_to_clean) { in hns3_force_clear_rx_ring()
5543 if (!ring->desc_cb[ring->next_to_use].reuse_flag) { in hns3_force_clear_rx_ring()
5545 &ring->desc_cb[ring->next_to_use]); in hns3_force_clear_rx_ring()
5546 ring->desc_cb[ring->next_to_use].dma = 0; in hns3_force_clear_rx_ring()
5555 struct net_device *ndev = h->kinfo.netdev; in hns3_clear_all_ring()
5559 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_clear_all_ring()
5562 ring = &priv->ring[i]; in hns3_clear_all_ring()
5565 ring = &priv->ring[i + h->kinfo.num_tqps]; in hns3_clear_all_ring()
5578 struct net_device *ndev = h->kinfo.netdev; in hns3_nic_reset_all_ring()
5584 ret = h->ae_algo->ops->reset_queue(h); in hns3_nic_reset_all_ring()
5588 for (i = 0; i < h->kinfo.num_tqps; i++) { in hns3_nic_reset_all_ring()
5589 hns3_init_ring_hw(&priv->ring[i]); in hns3_nic_reset_all_ring()
5594 hns3_clear_tx_ring(&priv->ring[i]); in hns3_nic_reset_all_ring()
5595 priv->ring[i].next_to_clean = 0; in hns3_nic_reset_all_ring()
5596 priv->ring[i].next_to_use = 0; in hns3_nic_reset_all_ring()
5597 priv->ring[i].last_to_use = 0; in hns3_nic_reset_all_ring()
5599 rx_ring = &priv->ring[i + h->kinfo.num_tqps]; in hns3_nic_reset_all_ring()
5608 for (j = 0; j < rx_ring->desc_num; j++) in hns3_nic_reset_all_ring()
5611 rx_ring->next_to_clean = 0; in hns3_nic_reset_all_ring()
5612 rx_ring->next_to_use = 0; in hns3_nic_reset_all_ring()
5622 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hns3_reset_notify_down_enet()
5623 struct net_device *ndev = kinfo->netdev; in hns3_reset_notify_down_enet()
5626 if (test_and_set_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) in hns3_reset_notify_down_enet()
5637 struct hnae3_knic_private_info *kinfo = &handle->kinfo; in hns3_reset_notify_up_enet()
5638 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev); in hns3_reset_notify_up_enet()
5641 if (!test_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_reset_notify_up_enet()
5642 netdev_err(kinfo->netdev, "device is not initialized yet\n"); in hns3_reset_notify_up_enet()
5643 return -EFAULT; in hns3_reset_notify_up_enet()
5646 clear_bit(HNS3_NIC_STATE_RESETTING, &priv->state); in hns3_reset_notify_up_enet()
5648 if (netif_running(kinfo->netdev)) { in hns3_reset_notify_up_enet()
5649 ret = hns3_nic_net_open(kinfo->netdev); in hns3_reset_notify_up_enet()
5651 set_bit(HNS3_NIC_STATE_RESETTING, &priv->state); in hns3_reset_notify_up_enet()
5652 netdev_err(kinfo->netdev, in hns3_reset_notify_up_enet()
5663 struct net_device *netdev = handle->kinfo.netdev; in hns3_reset_notify_init_enet()
5686 hns3_cq_period_mode_init(priv, priv->tx_cqe_mode, priv->rx_cqe_mode); in hns3_reset_notify_init_enet()
5691 dev_warn(priv->dev, "set rx cpu rmap fail, ret=%d\n", ret); in hns3_reset_notify_init_enet()
5695 dev_err(priv->dev, "init irq failed! ret=%d\n", ret); in hns3_reset_notify_init_enet()
5700 if (!hns3_is_phys_func(handle->pdev)) in hns3_reset_notify_init_enet()
5705 dev_err(priv->dev, "hns3_client_start fail! ret=%d\n", ret); in hns3_reset_notify_init_enet()
5709 set_bit(HNS3_NIC_STATE_INITED, &priv->state); in hns3_reset_notify_init_enet()
5730 struct net_device *netdev = handle->kinfo.netdev; in hns3_reset_notify_uninit_enet()
5733 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_reset_notify_uninit_enet()
5736 if (!test_and_clear_bit(HNS3_NIC_STATE_INITED, &priv->state)) { in hns3_reset_notify_uninit_enet()
5744 hns3_reset_tx_queue(priv->ae_handle); in hns3_reset_notify_uninit_enet()
5787 ret = handle->ae_algo->ops->set_channels(handle, new_tqp_num, in hns3_change_channels()
5790 dev_err(&handle->pdev->dev, in hns3_change_channels()
5810 struct hnae3_knic_private_info *kinfo = &h->kinfo; in hns3_set_channels()
5812 u32 new_tqp_num = ch->combined_count; in hns3_set_channels()
5817 return -EBUSY; in hns3_set_channels()
5819 if (ch->rx_count || ch->tx_count) in hns3_set_channels()
5820 return -EINVAL; in hns3_set_channels()
5822 if (kinfo->tc_info.mqprio_active) { in hns3_set_channels()
5823 dev_err(&netdev->dev, in hns3_set_channels()
5825 return -EINVAL; in hns3_set_channels()
5830 dev_err(&netdev->dev, in hns3_set_channels()
5833 return -EINVAL; in hns3_set_channels()
5836 if (kinfo->rss_size == new_tqp_num) in hns3_set_channels()
5851 org_tqp_num = h->kinfo.num_tqps; in hns3_set_channels()
5878 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_external_lb_prepare()
5891 hns3_nic_reset_all_ring(priv->ae_handle); in hns3_external_lb_prepare()
5893 hns3_reset_tx_queue(priv->ae_handle); in hns3_external_lb_prepare()
5899 struct hnae3_handle *h = priv->ae_handle; in hns3_external_lb_restore()
5907 if (!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) in hns3_external_lb_restore()
5910 if (hns3_nic_reset_all_ring(priv->ae_handle)) in hns3_external_lb_restore()
5913 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state); in hns3_external_lb_restore()
5919 if (h->ae_algo->ops->get_status(h)) in hns3_external_lb_restore()
5941 dev_err(&handle->pdev->dev, "Detected %s!\n", in hns3_process_hw_error()
5956 /* hns3_init_module - Driver registration routine
5964 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string); in hns3_init_module()
5995 /* hns3_exit_module - Driver exit cleanup routine
6012 MODULE_ALIAS("pci:hns-nic");