12874c5fdSThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2b5996f11Shuangdaode /*
3b5996f11Shuangdaode * Copyright (c) 2014-2015 Hisilicon Limited.
4b5996f11Shuangdaode */
5b5996f11Shuangdaode
6b5996f11Shuangdaode #include <linux/clk.h>
7b5996f11Shuangdaode #include <linux/cpumask.h>
8b5996f11Shuangdaode #include <linux/etherdevice.h>
9b5996f11Shuangdaode #include <linux/if_vlan.h>
10b5996f11Shuangdaode #include <linux/interrupt.h>
11b5996f11Shuangdaode #include <linux/io.h>
12b5996f11Shuangdaode #include <linux/ip.h>
13b5996f11Shuangdaode #include <linux/ipv6.h>
145a6bd84fSBarry Song #include <linux/irq.h>
15b5996f11Shuangdaode #include <linux/module.h>
16b5996f11Shuangdaode #include <linux/phy.h>
17b5996f11Shuangdaode #include <linux/platform_device.h>
18b5996f11Shuangdaode #include <linux/skbuff.h>
19b5996f11Shuangdaode
20b5996f11Shuangdaode #include "hnae.h"
21b5996f11Shuangdaode #include "hns_enet.h"
2244770e11SJarod Wilson #include "hns_dsaf_mac.h"
23b5996f11Shuangdaode
24b5996f11Shuangdaode #define NIC_MAX_Q_PER_VF 16
25b5996f11Shuangdaode #define HNS_NIC_TX_TIMEOUT (5 * HZ)
26b5996f11Shuangdaode
27b5996f11Shuangdaode #define SERVICE_TIMER_HZ (1 * HZ)
28b5996f11Shuangdaode
29b5996f11Shuangdaode #define RCB_IRQ_NOT_INITED 0
30b5996f11Shuangdaode #define RCB_IRQ_INITED 1
319cbe9fd5Syankejian #define HNS_BUFFER_SIZE_2048 2048
32b5996f11Shuangdaode
3313ac695eSSalil #define BD_MAX_SEND_SIZE 8191
3413ac695eSSalil
fill_v2_desc_hw(struct hnae_ring * ring,void * priv,int size,int send_sz,dma_addr_t dma,int frag_end,int buf_num,enum hns_desc_type type,int mtu)352e9361efSYunsheng Lin static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
362e9361efSYunsheng Lin int send_sz, dma_addr_t dma, int frag_end,
3713ac695eSSalil int buf_num, enum hns_desc_type type, int mtu)
3813ac695eSSalil {
3913ac695eSSalil struct hnae_desc *desc = &ring->desc[ring->next_to_use];
4013ac695eSSalil struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
4113ac695eSSalil struct iphdr *iphdr;
4213ac695eSSalil struct ipv6hdr *ipv6hdr;
4313ac695eSSalil struct sk_buff *skb;
4413ac695eSSalil __be16 protocol;
4513ac695eSSalil u8 bn_pid = 0;
4613ac695eSSalil u8 rrcfv = 0;
4713ac695eSSalil u8 ip_offset = 0;
4813ac695eSSalil u8 tvsvsn = 0;
4913ac695eSSalil u16 mss = 0;
5013ac695eSSalil u8 l4_len = 0;
5113ac695eSSalil u16 paylen = 0;
5213ac695eSSalil
5313ac695eSSalil desc_cb->priv = priv;
5413ac695eSSalil desc_cb->length = size;
5513ac695eSSalil desc_cb->dma = dma;
5613ac695eSSalil desc_cb->type = type;
5713ac695eSSalil
5813ac695eSSalil desc->addr = cpu_to_le64(dma);
592e9361efSYunsheng Lin desc->tx.send_size = cpu_to_le16((u16)send_sz);
6013ac695eSSalil
6113ac695eSSalil /* config bd buffer end */
6213ac695eSSalil hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
6313ac695eSSalil hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
64f8a1a636SSheng Li
65f8a1a636SSheng Li /* fill port_id in the tx bd for sending management pkts */
66f8a1a636SSheng Li hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
67f8a1a636SSheng Li HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
6813ac695eSSalil
6913ac695eSSalil if (type == DESC_TYPE_SKB) {
7013ac695eSSalil skb = (struct sk_buff *)priv;
7113ac695eSSalil
7213ac695eSSalil if (skb->ip_summed == CHECKSUM_PARTIAL) {
7313ac695eSSalil skb_reset_mac_len(skb);
7413ac695eSSalil protocol = skb->protocol;
7513ac695eSSalil ip_offset = ETH_HLEN;
7613ac695eSSalil
7713ac695eSSalil if (protocol == htons(ETH_P_8021Q)) {
7813ac695eSSalil ip_offset += VLAN_HLEN;
7913ac695eSSalil protocol = vlan_get_protocol(skb);
8013ac695eSSalil skb->protocol = protocol;
8113ac695eSSalil }
8213ac695eSSalil
8313ac695eSSalil if (skb->protocol == htons(ETH_P_IP)) {
8413ac695eSSalil iphdr = ip_hdr(skb);
8513ac695eSSalil hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
8613ac695eSSalil hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
8713ac695eSSalil
8813ac695eSSalil /* check for tcp/udp header */
890b51b1dcSDaode Huang if (iphdr->protocol == IPPROTO_TCP &&
900b51b1dcSDaode Huang skb_is_gso(skb)) {
9113ac695eSSalil hnae_set_bit(tvsvsn,
9213ac695eSSalil HNSV2_TXD_TSE_B, 1);
9313ac695eSSalil l4_len = tcp_hdrlen(skb);
940b51b1dcSDaode Huang mss = skb_shinfo(skb)->gso_size;
95504148feSEric Dumazet paylen = skb->len - skb_tcp_all_headers(skb);
9613ac695eSSalil }
9713ac695eSSalil } else if (skb->protocol == htons(ETH_P_IPV6)) {
9813ac695eSSalil hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
9913ac695eSSalil ipv6hdr = ipv6_hdr(skb);
10013ac695eSSalil hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
10113ac695eSSalil
10213ac695eSSalil /* check for tcp/udp header */
1030b51b1dcSDaode Huang if (ipv6hdr->nexthdr == IPPROTO_TCP &&
1040b51b1dcSDaode Huang skb_is_gso(skb) && skb_is_gso_v6(skb)) {
10513ac695eSSalil hnae_set_bit(tvsvsn,
10613ac695eSSalil HNSV2_TXD_TSE_B, 1);
10713ac695eSSalil l4_len = tcp_hdrlen(skb);
1080b51b1dcSDaode Huang mss = skb_shinfo(skb)->gso_size;
109504148feSEric Dumazet paylen = skb->len - skb_tcp_all_headers(skb);
11013ac695eSSalil }
11113ac695eSSalil }
11213ac695eSSalil desc->tx.ip_offset = ip_offset;
11313ac695eSSalil desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
11413ac695eSSalil desc->tx.mss = cpu_to_le16(mss);
11513ac695eSSalil desc->tx.l4_len = l4_len;
11613ac695eSSalil desc->tx.paylen = cpu_to_le16(paylen);
11713ac695eSSalil }
11813ac695eSSalil }
11913ac695eSSalil
12013ac695eSSalil hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
12113ac695eSSalil
12213ac695eSSalil desc->tx.bn_pid = bn_pid;
12313ac695eSSalil desc->tx.ra_ri_cs_fe_vld = rrcfv;
12413ac695eSSalil
12513ac695eSSalil ring_ptr_move_fw(ring, next_to_use);
12613ac695eSSalil }
12713ac695eSSalil
fill_v2_desc(struct hnae_ring * ring,void * priv,int size,dma_addr_t dma,int frag_end,int buf_num,enum hns_desc_type type,int mtu)1282e9361efSYunsheng Lin static void fill_v2_desc(struct hnae_ring *ring, void *priv,
1292e9361efSYunsheng Lin int size, dma_addr_t dma, int frag_end,
1302e9361efSYunsheng Lin int buf_num, enum hns_desc_type type, int mtu)
1312e9361efSYunsheng Lin {
1322e9361efSYunsheng Lin fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
1332e9361efSYunsheng Lin buf_num, type, mtu);
1342e9361efSYunsheng Lin }
1352e9361efSYunsheng Lin
13663434888SKejian Yan static const struct acpi_device_id hns_enet_acpi_match[] = {
13763434888SKejian Yan { "HISI00C1", 0 },
13863434888SKejian Yan { "HISI00C2", 0 },
13963434888SKejian Yan { },
14063434888SKejian Yan };
14163434888SKejian Yan MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
14263434888SKejian Yan
fill_desc(struct hnae_ring * ring,void * priv,int size,dma_addr_t dma,int frag_end,int buf_num,enum hns_desc_type type,int mtu,bool is_gso)143b5996f11Shuangdaode static void fill_desc(struct hnae_ring *ring, void *priv,
144b5996f11Shuangdaode int size, dma_addr_t dma, int frag_end,
14513ac695eSSalil int buf_num, enum hns_desc_type type, int mtu,
146b5996f11Shuangdaode bool is_gso)
147b5996f11Shuangdaode {
148b5996f11Shuangdaode struct hnae_desc *desc = &ring->desc[ring->next_to_use];
149b5996f11Shuangdaode struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
150b5996f11Shuangdaode struct sk_buff *skb;
151b5996f11Shuangdaode __be16 protocol;
152b5996f11Shuangdaode u32 ip_offset;
153b5996f11Shuangdaode u32 asid_bufnum_pid = 0;
154b5996f11Shuangdaode u32 flag_ipoffset = 0;
155b5996f11Shuangdaode
156b5996f11Shuangdaode desc_cb->priv = priv;
157b5996f11Shuangdaode desc_cb->length = size;
158b5996f11Shuangdaode desc_cb->dma = dma;
159b5996f11Shuangdaode desc_cb->type = type;
160b5996f11Shuangdaode
161b5996f11Shuangdaode desc->addr = cpu_to_le64(dma);
162b5996f11Shuangdaode desc->tx.send_size = cpu_to_le16((u16)size);
163b5996f11Shuangdaode
164b5996f11Shuangdaode /*config bd buffer end */
165b5996f11Shuangdaode flag_ipoffset |= 1 << HNS_TXD_VLD_B;
166b5996f11Shuangdaode
167b5996f11Shuangdaode asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
168b5996f11Shuangdaode
169b5996f11Shuangdaode if (type == DESC_TYPE_SKB) {
170b5996f11Shuangdaode skb = (struct sk_buff *)priv;
171b5996f11Shuangdaode
172b5996f11Shuangdaode if (skb->ip_summed == CHECKSUM_PARTIAL) {
173b5996f11Shuangdaode protocol = skb->protocol;
174b5996f11Shuangdaode ip_offset = ETH_HLEN;
175b5996f11Shuangdaode
176b5996f11Shuangdaode /*if it is a SW VLAN check the next protocol*/
177b5996f11Shuangdaode if (protocol == htons(ETH_P_8021Q)) {
178b5996f11Shuangdaode ip_offset += VLAN_HLEN;
179b5996f11Shuangdaode protocol = vlan_get_protocol(skb);
180b5996f11Shuangdaode skb->protocol = protocol;
181b5996f11Shuangdaode }
182b5996f11Shuangdaode
183b5996f11Shuangdaode if (skb->protocol == htons(ETH_P_IP)) {
184b5996f11Shuangdaode flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
185b5996f11Shuangdaode /* check for tcp/udp header */
186b5996f11Shuangdaode flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
187b5996f11Shuangdaode
188b5996f11Shuangdaode } else if (skb->protocol == htons(ETH_P_IPV6)) {
189b5996f11Shuangdaode /* ipv6 has not l3 cs, check for L4 header */
190b5996f11Shuangdaode flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
191b5996f11Shuangdaode }
192b5996f11Shuangdaode
193b5996f11Shuangdaode flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
194b5996f11Shuangdaode }
195b5996f11Shuangdaode }
196b5996f11Shuangdaode
197b5996f11Shuangdaode flag_ipoffset |= frag_end << HNS_TXD_FE_B;
198b5996f11Shuangdaode
199b5996f11Shuangdaode desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
200b5996f11Shuangdaode desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
201b5996f11Shuangdaode
202b5996f11Shuangdaode ring_ptr_move_fw(ring, next_to_use);
203b5996f11Shuangdaode }
204b5996f11Shuangdaode
unfill_desc(struct hnae_ring * ring)205b5996f11Shuangdaode static void unfill_desc(struct hnae_ring *ring)
206b5996f11Shuangdaode {
207b5996f11Shuangdaode ring_ptr_move_bw(ring, next_to_use);
208b5996f11Shuangdaode }
20913ac695eSSalil
hns_nic_maybe_stop_tx(struct sk_buff ** out_skb,int * bnum,struct hnae_ring * ring)21013ac695eSSalil static int hns_nic_maybe_stop_tx(
21113ac695eSSalil struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
21213ac695eSSalil {
21313ac695eSSalil struct sk_buff *skb = *out_skb;
21413ac695eSSalil struct sk_buff *new_skb = NULL;
21513ac695eSSalil int buf_num;
21613ac695eSSalil
21713ac695eSSalil /* no. of segments (plus a header) */
21813ac695eSSalil buf_num = skb_shinfo(skb)->nr_frags + 1;
21913ac695eSSalil
22013ac695eSSalil if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
22113ac695eSSalil if (ring_space(ring) < 1)
22213ac695eSSalil return -EBUSY;
22313ac695eSSalil
22413ac695eSSalil new_skb = skb_copy(skb, GFP_ATOMIC);
22513ac695eSSalil if (!new_skb)
22613ac695eSSalil return -ENOMEM;
22713ac695eSSalil
22813ac695eSSalil dev_kfree_skb_any(skb);
22913ac695eSSalil *out_skb = new_skb;
23013ac695eSSalil buf_num = 1;
23113ac695eSSalil } else if (buf_num > ring_space(ring)) {
23213ac695eSSalil return -EBUSY;
23313ac695eSSalil }
23413ac695eSSalil
23513ac695eSSalil *bnum = buf_num;
23613ac695eSSalil return 0;
23713ac695eSSalil }
23864353af6SSalil
hns_nic_maybe_stop_tso(struct sk_buff ** out_skb,int * bnum,struct hnae_ring * ring)23964353af6SSalil static int hns_nic_maybe_stop_tso(
24064353af6SSalil struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
24164353af6SSalil {
24264353af6SSalil int i;
24364353af6SSalil int size;
24464353af6SSalil int buf_num;
24564353af6SSalil int frag_num;
24664353af6SSalil struct sk_buff *skb = *out_skb;
247d7840976SMatthew Wilcox (Oracle) struct sk_buff *new_skb = NULL;
24864353af6SSalil skb_frag_t *frag;
24964353af6SSalil
25064353af6SSalil size = skb_headlen(skb);
25164353af6SSalil buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
25264353af6SSalil
25364353af6SSalil frag_num = skb_shinfo(skb)->nr_frags;
25464353af6SSalil for (i = 0; i < frag_num; i++) {
25564353af6SSalil frag = &skb_shinfo(skb)->frags[i];
25664353af6SSalil size = skb_frag_size(frag);
25764353af6SSalil buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
25864353af6SSalil }
25964353af6SSalil
26064353af6SSalil if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
26164353af6SSalil buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
26264353af6SSalil if (ring_space(ring) < buf_num)
26364353af6SSalil return -EBUSY;
26464353af6SSalil /* manual split the send packet */
26564353af6SSalil new_skb = skb_copy(skb, GFP_ATOMIC);
26664353af6SSalil if (!new_skb)
26764353af6SSalil return -ENOMEM;
26864353af6SSalil dev_kfree_skb_any(skb);
26964353af6SSalil *out_skb = new_skb;
27064353af6SSalil
27164353af6SSalil } else if (ring_space(ring) < buf_num) {
27264353af6SSalil return -EBUSY;
27364353af6SSalil }
27464353af6SSalil
27564353af6SSalil *bnum = buf_num;
27664353af6SSalil return 0;
27764353af6SSalil }
27864353af6SSalil
hns_nic_maybe_stop_tx_v2(struct sk_buff ** out_skb,int * bnum,struct hnae_ring * ring)27964353af6SSalil static int hns_nic_maybe_stop_tx_v2(struct sk_buff **out_skb, int *bnum,
28064353af6SSalil struct hnae_ring *ring)
28164353af6SSalil {
28264353af6SSalil if (skb_is_gso(*out_skb))
28364353af6SSalil return hns_nic_maybe_stop_tso(out_skb, bnum, ring);
28464353af6SSalil else
28564353af6SSalil return hns_nic_maybe_stop_tx(out_skb, bnum, ring);
28664353af6SSalil }
28764353af6SSalil
fill_tso_desc(struct hnae_ring * ring,void * priv,int size,dma_addr_t dma,int frag_end,int buf_num,enum hns_desc_type type,int mtu)28864353af6SSalil static void fill_tso_desc(struct hnae_ring *ring, void *priv,
28964353af6SSalil int size, dma_addr_t dma, int frag_end,
29064353af6SSalil int buf_num, enum hns_desc_type type, int mtu)
29164353af6SSalil {
2922e9361efSYunsheng Lin int frag_buf_num;
29364353af6SSalil int sizeoflast;
29464353af6SSalil int k;
29564353af6SSalil
29664353af6SSalil frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
29764353af6SSalil sizeoflast = size % BD_MAX_SEND_SIZE;
29864353af6SSalil sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
29964353af6SSalil
30064353af6SSalil /* when the frag size is bigger than hardware, split this frag */
30164353af6SSalil for (k = 0; k < frag_buf_num; k++)
30264353af6SSalil fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
30327463ad9SYunsheng Lin (k == frag_buf_num - 1) ?
304b5996f11Shuangdaode sizeoflast : BD_MAX_SEND_SIZE,
305b5996f11Shuangdaode dma + BD_MAX_SEND_SIZE * k,
306b5996f11Shuangdaode frag_end && (k == frag_buf_num - 1) ? 1 : 0,
307b5996f11Shuangdaode buf_num,
308b5996f11Shuangdaode (type == DESC_TYPE_SKB && !k) ?
309b85ea006SKejian Yan DESC_TYPE_SKB : DESC_TYPE_PAGE,
310b5996f11Shuangdaode mtu);
311d7840976SMatthew Wilcox (Oracle) }
312b5996f11Shuangdaode
fill_desc_v2(struct hnae_ring * ring,void * priv,int size,dma_addr_t dma,int frag_end,int buf_num,enum hns_desc_type type,int mtu,bool is_gso)31313ac695eSSalil static void fill_desc_v2(struct hnae_ring *ring, void *priv,
314b5996f11Shuangdaode int size, dma_addr_t dma, int frag_end,
315b5996f11Shuangdaode int buf_num, enum hns_desc_type type, int mtu,
31613ac695eSSalil bool is_gso)
317b5996f11Shuangdaode {
31813ac695eSSalil if (is_gso)
31913ac695eSSalil fill_tso_desc(ring, priv, size, dma, frag_end, buf_num, type,
320b5996f11Shuangdaode mtu);
321b5996f11Shuangdaode else
32213ac695eSSalil fill_v2_desc(ring, priv, size, dma, frag_end, buf_num, type,
323b5996f11Shuangdaode mtu);
324b5996f11Shuangdaode }
325b5996f11Shuangdaode
hns_nic_net_xmit_hw(struct net_device * ndev,struct sk_buff * skb,struct hns_nic_ring_data * ring_data)32613ac695eSSalil netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
32713ac695eSSalil struct sk_buff *skb,
328b5996f11Shuangdaode struct hns_nic_ring_data *ring_data)
329b5996f11Shuangdaode {
33013ac695eSSalil struct hns_nic_priv *priv = netdev_priv(ndev);
33113ac695eSSalil struct hnae_ring *ring = ring_data->ring;
332b5996f11Shuangdaode struct device *dev = ring_to_dev(ring);
333b5996f11Shuangdaode struct netdev_queue *dev_queue;
334b5996f11Shuangdaode skb_frag_t *frag;
335b5996f11Shuangdaode int buf_num;
336b5996f11Shuangdaode int seg_num;
337b5996f11Shuangdaode dma_addr_t dma;
338b5996f11Shuangdaode int size, next_to_use;
339b5996f11Shuangdaode bool is_gso;
340b5996f11Shuangdaode int i;
341b5996f11Shuangdaode
34213ac695eSSalil switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
34313ac695eSSalil case -EBUSY:
344b5996f11Shuangdaode ring->stats.tx_busy++;
345b5996f11Shuangdaode goto out_net_tx_busy;
34613ac695eSSalil case -ENOMEM:
347b5996f11Shuangdaode ring->stats.sw_err_cnt++;
348b5996f11Shuangdaode netdev_err(ndev, "no memory to xmit!\n");
349b5996f11Shuangdaode goto out_err_tx_ok;
350b5996f11Shuangdaode default:
351b5996f11Shuangdaode break;
352b5996f11Shuangdaode }
353b5996f11Shuangdaode
354b5996f11Shuangdaode /* no. of segments (plus a header) */
35513ac695eSSalil seg_num = skb_shinfo(skb)->nr_frags + 1;
35613ac695eSSalil next_to_use = ring->next_to_use;
35713ac695eSSalil
358b5996f11Shuangdaode /* fill the first part */
359b5996f11Shuangdaode size = skb_headlen(skb);
360b5996f11Shuangdaode dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
361b5996f11Shuangdaode if (dma_mapping_error(dev, dma)) {
362b5996f11Shuangdaode netdev_err(ndev, "TX head DMA map failed\n");
363b5996f11Shuangdaode ring->stats.sw_err_cnt++;
36427463ad9SYunsheng Lin goto out_err_tx_ok;
36527463ad9SYunsheng Lin }
36627463ad9SYunsheng Lin is_gso = skb_is_gso(skb);
36727463ad9SYunsheng Lin priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
368b5996f11Shuangdaode buf_num, DESC_TYPE_SKB, ndev->mtu, is_gso);
369b5996f11Shuangdaode
370b5996f11Shuangdaode /* fill the fragments */
371b5996f11Shuangdaode for (i = 1; i < seg_num; i++) {
372b5996f11Shuangdaode frag = &skb_shinfo(skb)->frags[i - 1];
373b5996f11Shuangdaode size = skb_frag_size(frag);
374b5996f11Shuangdaode dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
375b5996f11Shuangdaode if (dma_mapping_error(dev, dma)) {
37613ac695eSSalil netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
377b5996f11Shuangdaode ring->stats.sw_err_cnt++;
37813ac695eSSalil goto out_map_frag_fail;
37913ac695eSSalil }
38013ac695eSSalil priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
38113ac695eSSalil seg_num - 1 == i ? 1 : 0, buf_num,
38213ac695eSSalil DESC_TYPE_PAGE, ndev->mtu, is_gso);
38313ac695eSSalil }
38413ac695eSSalil
38513ac695eSSalil /*complete translate all packets*/
386b5996f11Shuangdaode dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
387b5996f11Shuangdaode netdev_tx_sent_queue(dev_queue, skb->len);
388b5996f11Shuangdaode
389b5996f11Shuangdaode netif_trans_update(ndev);
390b5996f11Shuangdaode ndev->stats.tx_bytes += skb->len;
391b5996f11Shuangdaode ndev->stats.tx_packets++;
392b5996f11Shuangdaode
393b5996f11Shuangdaode wmb(); /* commit all data before submit */
394b5996f11Shuangdaode assert(skb->queue_mapping < priv->ae_handle->q_num);
395b5996f11Shuangdaode hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
396b5996f11Shuangdaode
397b5996f11Shuangdaode return NETDEV_TX_OK;
398b5996f11Shuangdaode
399b5996f11Shuangdaode out_map_frag_fail:
400b5996f11Shuangdaode
401b5996f11Shuangdaode while (ring->next_to_use != next_to_use) {
402b5996f11Shuangdaode unfill_desc(ring);
403b5996f11Shuangdaode if (ring->next_to_use != next_to_use)
404b5996f11Shuangdaode dma_unmap_page(dev,
405b5996f11Shuangdaode ring->desc_cb[ring->next_to_use].dma,
406b5996f11Shuangdaode ring->desc_cb[ring->next_to_use].length,
4079cbe9fd5Syankejian DMA_TO_DEVICE);
4089cbe9fd5Syankejian else
4099cbe9fd5Syankejian dma_unmap_single(dev,
410b5996f11Shuangdaode ring->desc_cb[next_to_use].dma,
4119cbe9fd5Syankejian ring->desc_cb[next_to_use].length,
412ac4a5b52SHuazhong Tan DMA_TO_DEVICE);
413ac4a5b52SHuazhong Tan }
4149cbe9fd5Syankejian
415be78a690SArnd Bergmann out_err_tx_ok:
416be78a690SArnd Bergmann
417b4957ab0SSalil dev_kfree_skb_any(skb);
418b4957ab0SSalil return NETDEV_TX_OK;
4199cbe9fd5Syankejian
4209cbe9fd5Syankejian out_net_tx_busy:
4219cbe9fd5Syankejian
4229cbe9fd5Syankejian netif_stop_subqueue(ndev, skb->queue_mapping);
423be78a690SArnd Bergmann
4249cbe9fd5Syankejian /* Herbert's original patch had:
4259cbe9fd5Syankejian * smp_mb__after_netif_stop_queue();
4269cbe9fd5Syankejian * but since that doesn't exist yet, just open code it.
4279cbe9fd5Syankejian */
4289cbe9fd5Syankejian smp_mb();
4299cbe9fd5Syankejian return NETDEV_TX_BUSY;
4309cbe9fd5Syankejian }
431b1ccd4c0SHuazhong Tan
hns_nic_reuse_page(struct sk_buff * skb,int i,struct hnae_ring * ring,int pull_len,struct hnae_desc_cb * desc_cb)4329cbe9fd5Syankejian static void hns_nic_reuse_page(struct sk_buff *skb, int i,
433b5996f11Shuangdaode struct hnae_ring *ring, int pull_len,
434be78a690SArnd Bergmann struct hnae_desc_cb *desc_cb)
435be78a690SArnd Bergmann {
436be78a690SArnd Bergmann struct hnae_desc *desc;
437be78a690SArnd Bergmann u32 truesize;
4389cbe9fd5Syankejian int size;
4399cbe9fd5Syankejian int last_offset;
4409cbe9fd5Syankejian bool twobufs;
4419cbe9fd5Syankejian
4429cbe9fd5Syankejian twobufs = ((PAGE_SIZE < 8192) &&
4439cbe9fd5Syankejian hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
4449cbe9fd5Syankejian
4459cbe9fd5Syankejian desc = &ring->desc[ring->next_to_clean];
4469cbe9fd5Syankejian size = le16_to_cpu(desc->rx.size);
4479cbe9fd5Syankejian
4489cbe9fd5Syankejian if (twobufs) {
449be78a690SArnd Bergmann truesize = hnae_buf_size(ring);
450b5996f11Shuangdaode } else {
4519cbe9fd5Syankejian truesize = ALIGN(size, L1_CACHE_BYTES);
452b5996f11Shuangdaode last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
453b5996f11Shuangdaode }
454b5996f11Shuangdaode
455b5996f11Shuangdaode skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
456b5996f11Shuangdaode size - pull_len, truesize);
457b5996f11Shuangdaode
458b5996f11Shuangdaode /* avoid re-using remote pages,flag default unreuse */
459b5996f11Shuangdaode if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
46013ac695eSSalil return;
46113ac695eSSalil
46213ac695eSSalil if (twobufs) {
46313ac695eSSalil /* if we are only owner of page we can reuse it */
46413ac695eSSalil if (likely(page_count(desc_cb->priv) == 1)) {
46513ac695eSSalil /* flip page offset to other buffer */
46613ac695eSSalil desc_cb->page_offset ^= truesize;
46713ac695eSSalil
46813ac695eSSalil desc_cb->reuse_flag = 1;
46913ac695eSSalil /* bump ref count on page before it is given*/
47013ac695eSSalil get_page(desc_cb->priv);
47113ac695eSSalil }
472862b3d20SSalil return;
473862b3d20SSalil }
474862b3d20SSalil
475862b3d20SSalil /* move offset up to the next cache line */
476862b3d20SSalil desc_cb->page_offset += truesize;
477862b3d20SSalil
478862b3d20SSalil if (desc_cb->page_offset <= last_offset) {
479862b3d20SSalil desc_cb->reuse_flag = 1;
480862b3d20SSalil /* bump ref count on page before it is given*/
481862b3d20SSalil get_page(desc_cb->priv);
482862b3d20SSalil }
483862b3d20SSalil }
484862b3d20SSalil
get_v2rx_desc_bnum(u32 bnum_flag,int * out_bnum)485862b3d20SSalil static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
486862b3d20SSalil {
487862b3d20SSalil *out_bnum = hnae_get_field(bnum_flag,
488862b3d20SSalil HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
489862b3d20SSalil }
490862b3d20SSalil
get_rx_desc_bnum(u32 bnum_flag,int * out_bnum)491862b3d20SSalil static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
492862b3d20SSalil {
493862b3d20SSalil *out_bnum = hnae_get_field(bnum_flag,
494862b3d20SSalil HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
495862b3d20SSalil }
496862b3d20SSalil
hns_nic_rx_checksum(struct hns_nic_ring_data * ring_data,struct sk_buff * skb,u32 flag)497862b3d20SSalil static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
498862b3d20SSalil struct sk_buff *skb, u32 flag)
499862b3d20SSalil {
500862b3d20SSalil struct net_device *netdev = ring_data->napi.dev;
501862b3d20SSalil u32 l3id;
502862b3d20SSalil u32 l4id;
503862b3d20SSalil
504862b3d20SSalil /* check if RX checksum offload is enabled */
505862b3d20SSalil if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
506862b3d20SSalil return;
507862b3d20SSalil
508862b3d20SSalil /* In hardware, we only support checksum for the following protocols:
509862b3d20SSalil * 1) IPv4,
510862b3d20SSalil * 2) TCP(over IPv4 or IPv6),
511862b3d20SSalil * 3) UDP(over IPv4 or IPv6),
512862b3d20SSalil * 4) SCTP(over IPv4 or IPv6)
513862b3d20SSalil * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
514862b3d20SSalil * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
515862b3d20SSalil *
516862b3d20SSalil * Hardware limitation:
517862b3d20SSalil * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
518862b3d20SSalil * Error" bit (which usually can be used to indicate whether checksum
519862b3d20SSalil * was calculated by the hardware and if there was any error encountered
520862b3d20SSalil * during checksum calculation).
521862b3d20SSalil *
522862b3d20SSalil * Software workaround:
523862b3d20SSalil * We do get info within the RX descriptor about the kind of L3/L4
524862b3d20SSalil * protocol coming in the packet and the error status. These errors
525862b3d20SSalil * might not just be checksum errors but could be related to version,
526862b3d20SSalil * length of IPv4, UDP, TCP etc.
527862b3d20SSalil * Because there is no-way of knowing if it is a L3/L4 error due to bad
528862b3d20SSalil * checksum or any other L3/L4 error, we will not (cannot) convey
529862b3d20SSalil * checksum status for such cases to upper stack and will not maintain
530862b3d20SSalil * the RX L3/L4 checksum counters as well.
531862b3d20SSalil */
532862b3d20SSalil
533862b3d20SSalil l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
534862b3d20SSalil l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
535862b3d20SSalil
536862b3d20SSalil /* check L3 protocol for which checksum is supported */
537b5996f11Shuangdaode if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
538b5996f11Shuangdaode return;
539b5996f11Shuangdaode
540b5996f11Shuangdaode /* check for any(not just checksum)flagged L3 protocol errors */
541b5996f11Shuangdaode if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
54213ac695eSSalil return;
543b5996f11Shuangdaode
544b5996f11Shuangdaode /* we do not support checksum of fragmented packets */
545b5996f11Shuangdaode if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
546b5996f11Shuangdaode return;
5479cbe9fd5Syankejian
548b5996f11Shuangdaode /* check L4 protocol for which checksum is supported */
549b5996f11Shuangdaode if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
550b5996f11Shuangdaode (l4id != HNS_RX_FLAG_L4ID_UDP) &&
551b5996f11Shuangdaode (l4id != HNS_RX_FLAG_L4ID_SCTP))
552b5996f11Shuangdaode return;
55313ac695eSSalil
55413ac695eSSalil /* check for any(not just checksum)flagged L4 protocol errors */
55513ac695eSSalil if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
556b5996f11Shuangdaode return;
557b5996f11Shuangdaode
55813ac695eSSalil /* now, this has to be a packet with valid RX checksum */
559f468f21bSTariq Toukan skb->ip_summed = CHECKSUM_UNNECESSARY;
56013ac695eSSalil }
56113ac695eSSalil
hns_nic_poll_rx_skb(struct hns_nic_ring_data * ring_data,struct sk_buff ** out_skb,int * out_bnum)56213ac695eSSalil static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
563b5996f11Shuangdaode struct sk_buff **out_skb, int *out_bnum)
564b5996f11Shuangdaode {
565b5996f11Shuangdaode struct hnae_ring *ring = ring_data->ring;
566b5996f11Shuangdaode struct net_device *ndev = ring_data->napi.dev;
567b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
5689cbe9fd5Syankejian struct sk_buff *skb;
56913ac695eSSalil struct hnae_desc *desc;
57013ac695eSSalil struct hnae_desc_cb *desc_cb;
57113ac695eSSalil unsigned char *va;
57213ac695eSSalil int bnum, length, i;
57313ac695eSSalil int pull_len;
574b5996f11Shuangdaode u32 bnum_flag;
575b5996f11Shuangdaode
576b5996f11Shuangdaode desc = &ring->desc[ring->next_to_clean];
577b5996f11Shuangdaode desc_cb = &ring->desc_cb[ring->next_to_clean];
578b5996f11Shuangdaode
579b5996f11Shuangdaode prefetch(desc);
580b5996f11Shuangdaode
581b5996f11Shuangdaode va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
582b5996f11Shuangdaode
583b5996f11Shuangdaode /* prefetch first cache line of first page */
584b5996f11Shuangdaode net_prefetch(va);
585b5996f11Shuangdaode
586b5996f11Shuangdaode skb = *out_skb = napi_alloc_skb(&ring_data->napi,
587b5996f11Shuangdaode HNS_RX_HEAD_SIZE);
588b5996f11Shuangdaode if (unlikely(!skb)) {
589b5996f11Shuangdaode ring->stats.sw_err_cnt++;
590b5996f11Shuangdaode return -ENOMEM;
591b5996f11Shuangdaode }
592c43f1255SStanislav Fomichev
593b5996f11Shuangdaode prefetchw(skb->data);
594b5996f11Shuangdaode length = le16_to_cpu(desc->rx.pkt_len);
595b5996f11Shuangdaode bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
5969cbe9fd5Syankejian priv->ops.get_rxd_bnum(bnum_flag, &bnum);
597b5996f11Shuangdaode *out_bnum = bnum;
598b5996f11Shuangdaode
599b5996f11Shuangdaode if (length <= HNS_RX_HEAD_SIZE) {
600b5996f11Shuangdaode memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
601b5996f11Shuangdaode
602b5996f11Shuangdaode /* we can reuse buffer as-is, just make sure it is local */
603b5996f11Shuangdaode if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
604b5996f11Shuangdaode desc_cb->reuse_flag = 1;
605b5996f11Shuangdaode else /* this page cannot be reused so discard it */
606b5996f11Shuangdaode put_page(desc_cb->priv);
6079cbe9fd5Syankejian
608b5996f11Shuangdaode ring_ptr_move_fw(ring, next_to_clean);
609b5996f11Shuangdaode
610b5996f11Shuangdaode if (unlikely(bnum != 1)) { /* check err*/
611b5996f11Shuangdaode *out_bnum = 1;
612b5996f11Shuangdaode goto out_bnum_err;
613b5996f11Shuangdaode }
614b5996f11Shuangdaode } else {
615b5996f11Shuangdaode ring->stats.seg_pkt_cnt++;
616b5996f11Shuangdaode
617b5996f11Shuangdaode pull_len = eth_get_headlen(ndev, va, HNS_RX_HEAD_SIZE);
618b5996f11Shuangdaode memcpy(__skb_put(skb, pull_len), va,
619b5996f11Shuangdaode ALIGN(pull_len, sizeof(long)));
620b5996f11Shuangdaode
621b5996f11Shuangdaode hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
622b5996f11Shuangdaode ring_ptr_move_fw(ring, next_to_clean);
623b5996f11Shuangdaode
624b5996f11Shuangdaode if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
625b5996f11Shuangdaode *out_bnum = 1;
626b5996f11Shuangdaode goto out_bnum_err;
627b5996f11Shuangdaode }
628b5996f11Shuangdaode for (i = 1; i < bnum; i++) {
629b5996f11Shuangdaode desc = &ring->desc[ring->next_to_clean];
630b5996f11Shuangdaode desc_cb = &ring->desc_cb[ring->next_to_clean];
631b5996f11Shuangdaode
632b5996f11Shuangdaode hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
633b5996f11Shuangdaode ring_ptr_move_fw(ring, next_to_clean);
634b5996f11Shuangdaode }
635b5996f11Shuangdaode }
636b5996f11Shuangdaode
637b5996f11Shuangdaode /* check except process, free skb and jump the desc */
638b5996f11Shuangdaode if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
639b5996f11Shuangdaode out_bnum_err:
640b5996f11Shuangdaode *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
641b5996f11Shuangdaode netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
642b5996f11Shuangdaode bnum, ring->max_desc_num_per_pkt,
643b5996f11Shuangdaode length, (int)MAX_SKB_FRAGS,
644b5996f11Shuangdaode ((u64 *)desc)[0], ((u64 *)desc)[1]);
645b5996f11Shuangdaode ring->stats.err_bd_num++;
646b5996f11Shuangdaode dev_kfree_skb_any(skb);
647b5996f11Shuangdaode return -EDOM;
648b5996f11Shuangdaode }
649b5996f11Shuangdaode
650b5996f11Shuangdaode bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
651862b3d20SSalil
652862b3d20SSalil if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
653862b3d20SSalil netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
654862b3d20SSalil ((u64 *)desc)[0], ((u64 *)desc)[1]);
655b5996f11Shuangdaode ring->stats.non_vld_descs++;
656b5996f11Shuangdaode dev_kfree_skb_any(skb);
657b5996f11Shuangdaode return -EINVAL;
658b5996f11Shuangdaode }
659b5996f11Shuangdaode
660b5996f11Shuangdaode if (unlikely((!desc->rx.pkt_len) ||
661b5996f11Shuangdaode hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
662b5996f11Shuangdaode ring->stats.err_pkt_len++;
663b5996f11Shuangdaode dev_kfree_skb_any(skb);
664b5996f11Shuangdaode return -EFAULT;
665b5996f11Shuangdaode }
666b5996f11Shuangdaode
667b5996f11Shuangdaode if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
668b5996f11Shuangdaode ring->stats.l2_err++;
669b5996f11Shuangdaode dev_kfree_skb_any(skb);
670b5996f11Shuangdaode return -EFAULT;
671b5996f11Shuangdaode }
672b5996f11Shuangdaode
673b5996f11Shuangdaode ring->stats.rx_pkts++;
674b5996f11Shuangdaode ring->stats.rx_bytes += skb->len;
675b5996f11Shuangdaode
676b5996f11Shuangdaode /* indicate to upper stack if our hardware has already calculated
677b5996f11Shuangdaode * the RX checksum
678b5996f11Shuangdaode */
679b5996f11Shuangdaode hns_nic_rx_checksum(ring_data, skb, bnum_flag);
680b5996f11Shuangdaode
681b5996f11Shuangdaode return 0;
682b5996f11Shuangdaode }
683b5996f11Shuangdaode
684b5996f11Shuangdaode static void
hns_nic_alloc_rx_buffers(struct hns_nic_ring_data * ring_data,int cleand_count)685b5996f11Shuangdaode hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
686b5996f11Shuangdaode {
687b5996f11Shuangdaode int i, ret;
688b5996f11Shuangdaode struct hnae_desc_cb res_cbs;
689b5996f11Shuangdaode struct hnae_desc_cb *desc_cb;
690b5996f11Shuangdaode struct hnae_ring *ring = ring_data->ring;
691b5996f11Shuangdaode struct net_device *ndev = ring_data->napi.dev;
692b5996f11Shuangdaode
693b5996f11Shuangdaode for (i = 0; i < cleand_count; i++) {
694b5996f11Shuangdaode desc_cb = &ring->desc_cb[ring->next_to_use];
695b5996f11Shuangdaode if (desc_cb->reuse_flag) {
696b5996f11Shuangdaode ring->stats.reuse_pg_cnt++;
697b5996f11Shuangdaode hnae_reuse_buffer(ring, ring->next_to_use);
69893ab48a9SJason A. Donenfeld } else {
699b5996f11Shuangdaode ret = hnae_reserve_buffer_map(ring, &res_cbs);
700b5996f11Shuangdaode if (ret) {
7010e97cd4eSlipeng ring->stats.sw_err_cnt++;
7020e97cd4eSlipeng netdev_err(ndev, "hnae reserve buffer map failed.\n");
7030e97cd4eSlipeng break;
7040e97cd4eSlipeng }
7050e97cd4eSlipeng hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
7060e97cd4eSlipeng }
7070e97cd4eSlipeng
7080e97cd4eSlipeng ring_ptr_move_fw(ring, next_to_use);
709b8c17f70SLin Yun Sheng }
710b8c17f70SLin Yun Sheng
711b8c17f70SLin Yun Sheng wmb(); /* make all data has been write before submit */
712b8c17f70SLin Yun Sheng writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
713b8c17f70SLin Yun Sheng }
714b8c17f70SLin Yun Sheng
715b8c17f70SLin Yun Sheng /* return error number for error or number of desc left to take
716b8c17f70SLin Yun Sheng */
hns_nic_rx_up_pro(struct hns_nic_ring_data * ring_data,struct sk_buff * skb)717b8c17f70SLin Yun Sheng static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
718b8c17f70SLin Yun Sheng struct sk_buff *skb)
719b8c17f70SLin Yun Sheng {
720b8c17f70SLin Yun Sheng struct net_device *ndev = ring_data->napi.dev;
721b8c17f70SLin Yun Sheng
722b8c17f70SLin Yun Sheng skb->protocol = eth_type_trans(skb, ndev);
723b8c17f70SLin Yun Sheng napi_gro_receive(&ring_data->napi, skb);
724b8c17f70SLin Yun Sheng }
725b8c17f70SLin Yun Sheng
hns_desc_unused(struct hnae_ring * ring)726b8c17f70SLin Yun Sheng static int hns_desc_unused(struct hnae_ring *ring)
727b8c17f70SLin Yun Sheng {
728b8c17f70SLin Yun Sheng int ntc = ring->next_to_clean;
729b8c17f70SLin Yun Sheng int ntu = ring->next_to_use;
730b8c17f70SLin Yun Sheng
731b8c17f70SLin Yun Sheng return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
732b8c17f70SLin Yun Sheng }
733b8c17f70SLin Yun Sheng
734b8c17f70SLin Yun Sheng #define HNS_LOWEST_LATENCY_RATE 27 /* 27 MB/s */
735b8c17f70SLin Yun Sheng #define HNS_LOW_LATENCY_RATE 80 /* 80 MB/s */
736b8c17f70SLin Yun Sheng
737b8c17f70SLin Yun Sheng #define HNS_COAL_BDNUM 3
738b8c17f70SLin Yun Sheng
hns_coal_rx_bdnum(struct hnae_ring * ring)739b8c17f70SLin Yun Sheng static u32 hns_coal_rx_bdnum(struct hnae_ring *ring)
740b8c17f70SLin Yun Sheng {
741b8c17f70SLin Yun Sheng bool coal_enable = ring->q->handle->coal_adapt_en;
742b8c17f70SLin Yun Sheng
743b8c17f70SLin Yun Sheng if (coal_enable &&
744967b2e2aSLin Yun Sheng ring->coal_last_rx_bytes > HNS_LOWEST_LATENCY_RATE)
745967b2e2aSLin Yun Sheng return HNS_COAL_BDNUM;
746b8c17f70SLin Yun Sheng else
747b8c17f70SLin Yun Sheng return 0;
748b8c17f70SLin Yun Sheng }
749b8c17f70SLin Yun Sheng
hns_update_rx_rate(struct hnae_ring * ring)750b8c17f70SLin Yun Sheng static void hns_update_rx_rate(struct hnae_ring *ring)
751b8c17f70SLin Yun Sheng {
752b8c17f70SLin Yun Sheng bool coal_enable = ring->q->handle->coal_adapt_en;
753d0ea5cbdSJesse Brandeburg u32 time_passed_ms;
754d0ea5cbdSJesse Brandeburg u64 total_bytes;
755b8c17f70SLin Yun Sheng
756b8c17f70SLin Yun Sheng if (!coal_enable ||
757b8c17f70SLin Yun Sheng time_before(jiffies, ring->coal_last_jiffies + (HZ >> 4)))
758b8c17f70SLin Yun Sheng return;
759b8c17f70SLin Yun Sheng
760b8c17f70SLin Yun Sheng /* ring->stats.rx_bytes overflowed */
761b8c17f70SLin Yun Sheng if (ring->coal_last_rx_bytes > ring->stats.rx_bytes) {
762b8c17f70SLin Yun Sheng ring->coal_last_rx_bytes = ring->stats.rx_bytes;
763b8c17f70SLin Yun Sheng ring->coal_last_jiffies = jiffies;
764b8c17f70SLin Yun Sheng return;
765b8c17f70SLin Yun Sheng }
766b8c17f70SLin Yun Sheng
767b8c17f70SLin Yun Sheng total_bytes = ring->stats.rx_bytes - ring->coal_last_rx_bytes;
768b8c17f70SLin Yun Sheng time_passed_ms = jiffies_to_msecs(jiffies - ring->coal_last_jiffies);
769b8c17f70SLin Yun Sheng do_div(total_bytes, time_passed_ms);
770b8c17f70SLin Yun Sheng ring->coal_rx_rate = total_bytes >> 10;
7715a9594cfSYang Shen
772b8c17f70SLin Yun Sheng ring->coal_last_rx_bytes = ring->stats.rx_bytes;
773b8c17f70SLin Yun Sheng ring->coal_last_jiffies = jiffies;
774b8c17f70SLin Yun Sheng }
775b8c17f70SLin Yun Sheng
776b8c17f70SLin Yun Sheng /**
777b8c17f70SLin Yun Sheng * smooth_alg - smoothing algrithm for adjusting coalesce parameter
778b8c17f70SLin Yun Sheng * @new_param: new value
779b8c17f70SLin Yun Sheng * @old_param: old value
780b8c17f70SLin Yun Sheng **/
smooth_alg(u32 new_param,u32 old_param)781b8c17f70SLin Yun Sheng static u32 smooth_alg(u32 new_param, u32 old_param)
782b8c17f70SLin Yun Sheng {
783b8c17f70SLin Yun Sheng u32 gap = (new_param > old_param) ? new_param - old_param
784b8c17f70SLin Yun Sheng : old_param - new_param;
785b8c17f70SLin Yun Sheng
786b8c17f70SLin Yun Sheng if (gap > 8)
787b8c17f70SLin Yun Sheng gap >>= 3;
788b8c17f70SLin Yun Sheng
789b8c17f70SLin Yun Sheng if (new_param > old_param)
790b8c17f70SLin Yun Sheng return old_param + gap;
791b8c17f70SLin Yun Sheng else
792b8c17f70SLin Yun Sheng return old_param - gap;
793b8c17f70SLin Yun Sheng }
794b8c17f70SLin Yun Sheng
795b8c17f70SLin Yun Sheng /**
796b8c17f70SLin Yun Sheng * hns_nic_adpt_coalesce - self adapte coalesce according to rx rate
797b8c17f70SLin Yun Sheng * @ring_data: pointer to hns_nic_ring_data
798b8c17f70SLin Yun Sheng **/
hns_nic_adpt_coalesce(struct hns_nic_ring_data * ring_data)799b8c17f70SLin Yun Sheng static void hns_nic_adpt_coalesce(struct hns_nic_ring_data *ring_data)
800b8c17f70SLin Yun Sheng {
801b8c17f70SLin Yun Sheng struct hnae_ring *ring = ring_data->ring;
802b8c17f70SLin Yun Sheng struct hnae_handle *handle = ring->q->handle;
803b8c17f70SLin Yun Sheng u32 new_coal_param, old_coal_param = ring->coal_param;
804b8c17f70SLin Yun Sheng
805b8c17f70SLin Yun Sheng if (ring->coal_rx_rate < HNS_LOWEST_LATENCY_RATE)
806b8c17f70SLin Yun Sheng new_coal_param = HNAE_LOWEST_LATENCY_COAL_PARAM;
807b8c17f70SLin Yun Sheng else if (ring->coal_rx_rate < HNS_LOW_LATENCY_RATE)
808b8c17f70SLin Yun Sheng new_coal_param = HNAE_LOW_LATENCY_COAL_PARAM;
809b8c17f70SLin Yun Sheng else
810b8c17f70SLin Yun Sheng new_coal_param = HNAE_BULK_LATENCY_COAL_PARAM;
811b8c17f70SLin Yun Sheng
812b8c17f70SLin Yun Sheng if (new_coal_param == old_coal_param &&
813b8c17f70SLin Yun Sheng new_coal_param == handle->coal_param)
814b8c17f70SLin Yun Sheng return;
815b8c17f70SLin Yun Sheng
816b8c17f70SLin Yun Sheng new_coal_param = smooth_alg(new_coal_param, old_coal_param);
817b8c17f70SLin Yun Sheng ring->coal_param = new_coal_param;
818b5996f11Shuangdaode
819b5996f11Shuangdaode /**
820b5996f11Shuangdaode * Because all ring in one port has one coalesce param, when one ring
821b5996f11Shuangdaode * calculate its own coalesce param, it cannot write to hardware at
822b5996f11Shuangdaode * once. There are three conditions as follows:
82334447271SDaode Huang * 1. current ring's coalesce param is larger than the hardware.
824b5996f11Shuangdaode * 2. or ring which adapt last time can change again.
825b5996f11Shuangdaode * 3. timeout.
8260e97cd4eSlipeng */
827b5996f11Shuangdaode if (new_coal_param == handle->coal_param) {
828b5996f11Shuangdaode handle->coal_last_jiffies = jiffies;
829b5996f11Shuangdaode handle->coal_ring_idx = ring_data->queue_index;
830b5996f11Shuangdaode } else if (new_coal_param > handle->coal_param ||
831b5996f11Shuangdaode handle->coal_ring_idx == ring_data->queue_index ||
8320e97cd4eSlipeng time_after(jiffies, handle->coal_last_jiffies + (HZ >> 4))) {
83334447271SDaode Huang handle->dev->ops->set_coalesce_usecs(handle,
834b5996f11Shuangdaode new_coal_param);
835b5996f11Shuangdaode handle->dev->ops->set_coalesce_frames(handle,
8360e97cd4eSlipeng 1, new_coal_param);
8370e97cd4eSlipeng handle->coal_param = new_coal_param;
8380e97cd4eSlipeng handle->coal_ring_idx = ring_data->queue_index;
839b5996f11Shuangdaode handle->coal_last_jiffies = jiffies;
8400e97cd4eSlipeng }
841b5996f11Shuangdaode }
842b5996f11Shuangdaode
hns_nic_rx_poll_one(struct hns_nic_ring_data * ring_data,int budget,void * v)8433a31b64eSJun He static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
844b5996f11Shuangdaode int budget, void *v)
845b5996f11Shuangdaode {
8463a31b64eSJun He struct hnae_ring *ring = ring_data->ring;
847b5996f11Shuangdaode struct sk_buff *skb;
848b5996f11Shuangdaode int num, bnum;
849b5996f11Shuangdaode #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
850b5996f11Shuangdaode int recv_pkts, recv_bds, clean_count, err;
851b5996f11Shuangdaode int unused_count = hns_desc_unused(ring);
852b5996f11Shuangdaode
853b5996f11Shuangdaode num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
854b5996f11Shuangdaode rmb(); /* make sure num taken effect before the other data is touched */
855b5996f11Shuangdaode
856b5996f11Shuangdaode recv_pkts = 0, recv_bds = 0, clean_count = 0;
857b5996f11Shuangdaode num -= unused_count;
858b5996f11Shuangdaode
859b5996f11Shuangdaode while (recv_pkts < budget && recv_bds < num) {
860b5996f11Shuangdaode /* reuse or realloc buffers */
8613a31b64eSJun He if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
86213ac695eSSalil hns_nic_alloc_rx_buffers(ring_data,
8630e97cd4eSlipeng clean_count + unused_count);
8640e97cd4eSlipeng clean_count = 0;
8650e97cd4eSlipeng unused_count = hns_desc_unused(ring);
86613ac695eSSalil }
867b5996f11Shuangdaode
868b5996f11Shuangdaode /* poll one pkt */
869b5996f11Shuangdaode err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
87036eedfdeSlipeng if (unlikely(!skb)) /* this fault cannot be repaired */
871b5996f11Shuangdaode goto out;
872b5996f11Shuangdaode
8737f8bcd91SYonglong Liu recv_bds += bnum;
874b8c17f70SLin Yun Sheng clean_count += bnum;
875b5996f11Shuangdaode if (unlikely(err)) { /* do jump the err */
876b8c17f70SLin Yun Sheng recv_pkts++;
877cee5add4SDaode Huang continue;
878b5996f11Shuangdaode }
879b8c17f70SLin Yun Sheng
880b5996f11Shuangdaode /* do update ip stack process*/
881b5996f11Shuangdaode ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
882b8c17f70SLin Yun Sheng ring_data, skb);
883b8c17f70SLin Yun Sheng recv_pkts++;
884b8c17f70SLin Yun Sheng }
885b8c17f70SLin Yun Sheng
886b8c17f70SLin Yun Sheng out:
887b8c17f70SLin Yun Sheng /* make all data has been write before submit */
888b5996f11Shuangdaode if (clean_count + unused_count > 0)
889b5996f11Shuangdaode hns_nic_alloc_rx_buffers(ring_data,
890b5996f11Shuangdaode clean_count + unused_count);
891b8c17f70SLin Yun Sheng
892b5996f11Shuangdaode return recv_pkts;
893b8c17f70SLin Yun Sheng }
894b8c17f70SLin Yun Sheng
hns_nic_rx_fini_pro(struct hns_nic_ring_data * ring_data)895b5996f11Shuangdaode static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
896b5996f11Shuangdaode {
89736eedfdeSlipeng struct hnae_ring *ring = ring_data->ring;
898cee5add4SDaode Huang int num;
899cee5add4SDaode Huang bool rx_stopped;
90036eedfdeSlipeng
901cee5add4SDaode Huang hns_update_rx_rate(ring);
902b8c17f70SLin Yun Sheng
903cee5add4SDaode Huang /* for hardware bug fixed */
904cee5add4SDaode Huang ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
905b8c17f70SLin Yun Sheng num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
906b8c17f70SLin Yun Sheng
907b8c17f70SLin Yun Sheng if (num <= hns_coal_rx_bdnum(ring)) {
908b8c17f70SLin Yun Sheng if (ring->q->handle->coal_adapt_en)
90936eedfdeSlipeng hns_nic_adpt_coalesce(ring_data);
910b8c17f70SLin Yun Sheng
911b8c17f70SLin Yun Sheng rx_stopped = true;
91236eedfdeSlipeng } else {
913cee5add4SDaode Huang ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
914cee5add4SDaode Huang ring_data->ring, 1);
915b5996f11Shuangdaode
916b5996f11Shuangdaode rx_stopped = false;
917b5996f11Shuangdaode }
918b5996f11Shuangdaode
919b5996f11Shuangdaode return rx_stopped;
920b5996f11Shuangdaode }
921b5996f11Shuangdaode
hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data * ring_data)922b5996f11Shuangdaode static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
923b5996f11Shuangdaode {
924b5996f11Shuangdaode struct hnae_ring *ring = ring_data->ring;
925b5996f11Shuangdaode int num;
926b5996f11Shuangdaode
927b5996f11Shuangdaode hns_update_rx_rate(ring);
928b5996f11Shuangdaode num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
929b5996f11Shuangdaode
930b5996f11Shuangdaode if (num <= hns_coal_rx_bdnum(ring)) {
931b5996f11Shuangdaode if (ring->q->handle->coal_adapt_en)
932b5996f11Shuangdaode hns_nic_adpt_coalesce(ring_data);
933b5996f11Shuangdaode
934b5996f11Shuangdaode return true;
935b5996f11Shuangdaode }
936b5996f11Shuangdaode
937b5996f11Shuangdaode return false;
938b5996f11Shuangdaode }
939b5996f11Shuangdaode
hns_nic_reclaim_one_desc(struct hnae_ring * ring,int * bytes,int * pkts)940b5996f11Shuangdaode static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
941b5996f11Shuangdaode int *bytes, int *pkts)
942b5996f11Shuangdaode {
943b5996f11Shuangdaode struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
944b5996f11Shuangdaode
945b5996f11Shuangdaode (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
946b5996f11Shuangdaode (*bytes) += desc_cb->length;
947b5996f11Shuangdaode /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
948b5996f11Shuangdaode hnae_free_buffer_detach(ring, ring->next_to_clean);
949b5996f11Shuangdaode
950b5996f11Shuangdaode ring_ptr_move_fw(ring, next_to_clean);
951b5996f11Shuangdaode }
952b5996f11Shuangdaode
is_valid_clean_head(struct hnae_ring * ring,int h)953b5996f11Shuangdaode static int is_valid_clean_head(struct hnae_ring *ring, int h)
954b5996f11Shuangdaode {
955b5996f11Shuangdaode int u = ring->next_to_use;
956b5996f11Shuangdaode int c = ring->next_to_clean;
957b5996f11Shuangdaode
958b5996f11Shuangdaode if (unlikely(h > ring->desc_num))
959bf5a6b4cSSalil Mehta return 0;
960b5996f11Shuangdaode
961b5996f11Shuangdaode assert(u > 0 && u < ring->desc_num);
962b5996f11Shuangdaode assert(c > 0 && c < ring->desc_num);
963b5996f11Shuangdaode assert(u != c && h != c); /* must be checked before call this func */
964b5996f11Shuangdaode
965b5996f11Shuangdaode return u > c ? (h > c && h <= u) : (h > c || h <= u);
966b5996f11Shuangdaode }
967b5996f11Shuangdaode
968b5996f11Shuangdaode /* reclaim all desc in one budget
969b5996f11Shuangdaode * return error or number of desc left
970b5996f11Shuangdaode */
hns_nic_tx_poll_one(struct hns_nic_ring_data * ring_data,int budget,void * v)9719cbe9fd5Syankejian static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
972b5996f11Shuangdaode int budget, void *v)
9739cbe9fd5Syankejian {
9749cbe9fd5Syankejian struct hnae_ring *ring = ring_data->ring;
9759cbe9fd5Syankejian struct net_device *ndev = ring_data->napi.dev;
9763a39a12aSLiubin Shu struct netdev_queue *dev_queue;
9773a39a12aSLiubin Shu struct hns_nic_priv *priv = netdev_priv(ndev);
9783a39a12aSLiubin Shu int head;
979b5996f11Shuangdaode int bytes, pkts;
980b5996f11Shuangdaode
981b5996f11Shuangdaode head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
982b5996f11Shuangdaode rmb(); /* make sure head is ready before touch any data */
98313ac695eSSalil
98413ac695eSSalil if (is_ring_empty(ring) || head == ring->next_to_clean)
98513ac695eSSalil return 0; /* no data to poll */
986b5996f11Shuangdaode
987b5996f11Shuangdaode if (!is_valid_clean_head(ring, head)) {
988b5996f11Shuangdaode netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
989b5996f11Shuangdaode ring->next_to_use, ring->next_to_clean);
990b5996f11Shuangdaode ring->stats.io_err_cnt++;
991b5996f11Shuangdaode return -EIO;
992b5996f11Shuangdaode }
993b5996f11Shuangdaode
994b5996f11Shuangdaode bytes = 0;
995b5996f11Shuangdaode pkts = 0;
996b5996f11Shuangdaode while (head != ring->next_to_clean) {
997b5996f11Shuangdaode hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
998b5996f11Shuangdaode /* issue prefetch for next Tx descriptor */
999b5996f11Shuangdaode prefetch(&ring->desc_cb[ring->next_to_clean]);
1000b5996f11Shuangdaode }
100136eedfdeSlipeng /* update tx ring statistics. */
1002b5996f11Shuangdaode ring->stats.tx_pkts += pkts;
1003b5996f11Shuangdaode ring->stats.tx_bytes += bytes;
1004cee5add4SDaode Huang
1005cee5add4SDaode Huang dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1006cee5add4SDaode Huang netdev_tx_completed_queue(dev_queue, pkts, bytes);
1007cee5add4SDaode Huang
1008cee5add4SDaode Huang if (unlikely(priv->link && !netif_carrier_ok(ndev)))
1009b5996f11Shuangdaode netif_carrier_on(ndev);
1010b5996f11Shuangdaode
1011b5996f11Shuangdaode if (unlikely(pkts && netif_carrier_ok(ndev) &&
1012b5996f11Shuangdaode (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
1013b5996f11Shuangdaode /* Make sure that anybody stopping the queue after this
101436eedfdeSlipeng * sees the new next_to_clean.
101536eedfdeSlipeng */
101636eedfdeSlipeng smp_mb();
1017b5996f11Shuangdaode if (netif_tx_queue_stopped(dev_queue) &&
1018b5996f11Shuangdaode !test_bit(NIC_STATE_DOWN, &priv->state)) {
1019b5996f11Shuangdaode netif_tx_wake_queue(dev_queue);
102036eedfdeSlipeng ring->stats.restart_queue++;
1021cee5add4SDaode Huang }
1022cee5add4SDaode Huang }
1023cee5add4SDaode Huang return 0;
1024cee5add4SDaode Huang }
1025cee5add4SDaode Huang
hns_nic_tx_fini_pro(struct hns_nic_ring_data * ring_data)102636eedfdeSlipeng static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
1027cee5add4SDaode Huang {
102836eedfdeSlipeng struct hnae_ring *ring = ring_data->ring;
1029cee5add4SDaode Huang int head;
1030cee5add4SDaode Huang
1031b5996f11Shuangdaode ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1032b5996f11Shuangdaode
1033b5996f11Shuangdaode head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1034b5996f11Shuangdaode
1035b5996f11Shuangdaode if (head != ring->next_to_clean) {
1036b5996f11Shuangdaode ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1037b5996f11Shuangdaode ring_data->ring, 1);
1038b5996f11Shuangdaode
1039b5996f11Shuangdaode return false;
1040b5996f11Shuangdaode } else {
1041b5996f11Shuangdaode return true;
1042b5996f11Shuangdaode }
1043b5996f11Shuangdaode }
1044b5996f11Shuangdaode
hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data * ring_data)1045b5996f11Shuangdaode static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
1046b5996f11Shuangdaode {
1047b5996f11Shuangdaode struct hnae_ring *ring = ring_data->ring;
1048b5996f11Shuangdaode int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1049b5996f11Shuangdaode
1050b5996f11Shuangdaode if (head == ring->next_to_clean)
105136eedfdeSlipeng return true;
1052b5996f11Shuangdaode else
1053b5996f11Shuangdaode return false;
105436eedfdeSlipeng }
1055b5996f11Shuangdaode
hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data * ring_data)105636eedfdeSlipeng static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
105736eedfdeSlipeng {
105836eedfdeSlipeng struct hnae_ring *ring = ring_data->ring;
105936eedfdeSlipeng struct net_device *ndev = ring_data->napi.dev;
106036eedfdeSlipeng struct netdev_queue *dev_queue;
106136eedfdeSlipeng int head;
1062b5996f11Shuangdaode int bytes, pkts;
106336eedfdeSlipeng
106436eedfdeSlipeng head = ring->next_to_use; /* ntu :soft setted ring position*/
106549edd6a2SYonglong Liu bytes = 0;
106636eedfdeSlipeng pkts = 0;
1067b5996f11Shuangdaode while (head != ring->next_to_clean)
1068b5996f11Shuangdaode hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
1069b5996f11Shuangdaode
1070b5996f11Shuangdaode dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1071b5996f11Shuangdaode netdev_tx_reset_queue(dev_queue);
1072b5996f11Shuangdaode }
1073b5996f11Shuangdaode
hns_nic_common_poll(struct napi_struct * napi,int budget)1074b5996f11Shuangdaode static int hns_nic_common_poll(struct napi_struct *napi, int budget)
1075b5996f11Shuangdaode {
1076b5996f11Shuangdaode int clean_complete = 0;
1077b5996f11Shuangdaode struct hns_nic_ring_data *ring_data =
1078b5996f11Shuangdaode container_of(napi, struct hns_nic_ring_data, napi);
1079b5996f11Shuangdaode struct hnae_ring *ring = ring_data->ring;
1080b5996f11Shuangdaode
1081b5996f11Shuangdaode clean_complete += ring_data->poll_one(
1082b5996f11Shuangdaode ring_data, budget - clean_complete,
1083b5996f11Shuangdaode ring_data->ex_process);
1084b5996f11Shuangdaode
1085b5996f11Shuangdaode if (clean_complete < budget) {
1086b5996f11Shuangdaode if (ring_data->fini_process(ring_data)) {
1087b5996f11Shuangdaode napi_complete(napi);
1088b5996f11Shuangdaode ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1089b5996f11Shuangdaode } else {
1090b5996f11Shuangdaode return budget;
1091bb7189dcSQianqian Xie }
1092b5996f11Shuangdaode }
109331fabbeeSPeng Li
1094262b38cdSPhilippe Reynes return clean_complete;
109531fabbeeSPeng Li }
109631fabbeeSPeng Li
hns_irq_handle(int irq,void * dev)109731fabbeeSPeng Li static irqreturn_t hns_irq_handle(int irq, void *dev)
109831fabbeeSPeng Li {
109931fabbeeSPeng Li struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
110031fabbeeSPeng Li
110131fabbeeSPeng Li ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
110231fabbeeSPeng Li ring_data->ring, 1);
110331fabbeeSPeng Li napi_schedule(&ring_data->napi);
110431fabbeeSPeng Li
110531fabbeeSPeng Li return IRQ_HANDLED;
110631fabbeeSPeng Li }
1107bb7189dcSQianqian Xie
1108bb7189dcSQianqian Xie /**
110931fabbeeSPeng Li *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1110bb7189dcSQianqian Xie *@ndev: net device
111131fabbeeSPeng Li */
hns_nic_adjust_link(struct net_device * ndev)111231fabbeeSPeng Li static void hns_nic_adjust_link(struct net_device *ndev)
1113bb7189dcSQianqian Xie {
1114bb7189dcSQianqian Xie struct hns_nic_priv *priv = netdev_priv(ndev);
1115bb7189dcSQianqian Xie struct hnae_handle *h = priv->ae_handle;
1116bb7189dcSQianqian Xie int state = 1;
1117bb7189dcSQianqian Xie
1118bb7189dcSQianqian Xie /* If there is no phy, do not need adjust link */
1119bb7189dcSQianqian Xie if (ndev->phydev) {
1120bb7189dcSQianqian Xie /* When phy link down, do nothing */
1121bb7189dcSQianqian Xie if (ndev->phydev->link == 0)
1122bb7189dcSQianqian Xie return;
1123bb7189dcSQianqian Xie
1124bb7189dcSQianqian Xie if (h->dev->ops->need_adjust_link(h, ndev->phydev->speed,
1125bb7189dcSQianqian Xie ndev->phydev->duplex)) {
1126b5996f11Shuangdaode /* because Hi161X chip don't support to change gmac
1127b5996f11Shuangdaode * speed and duplex with traffic. Delay 200ms to
1128b5996f11Shuangdaode * make sure there is no more data in chip FIFO.
1129b5996f11Shuangdaode */
1130b5996f11Shuangdaode netif_carrier_off(ndev);
1131b5996f11Shuangdaode msleep(200);
1132b5996f11Shuangdaode h->dev->ops->adjust_link(h, ndev->phydev->speed,
1133b5996f11Shuangdaode ndev->phydev->duplex);
1134b5996f11Shuangdaode netif_carrier_on(ndev);
1135b5996f11Shuangdaode }
11363c1bcc86SAndrew Lunn }
1137652d39b0SKejian Yan
1138652d39b0SKejian Yan state = state && h->dev->ops->get_status(h);
1139b5996f11Shuangdaode
1140652d39b0SKejian Yan if (state != priv->link) {
1141b5996f11Shuangdaode if (state) {
1142b5996f11Shuangdaode netif_carrier_on(ndev);
1143c77804beSYonglong Liu netif_tx_wake_all_queues(ndev);
1144c77804beSYonglong Liu netdev_info(ndev, "link up\n");
1145c77804beSYonglong Liu } else {
1146c77804beSYonglong Liu netif_carrier_off(ndev);
1147c77804beSYonglong Liu netdev_info(ndev, "link down\n");
1148c77804beSYonglong Liu }
1149c77804beSYonglong Liu priv->link = state;
1150652d39b0SKejian Yan }
1151652d39b0SKejian Yan }
1152b5996f11Shuangdaode
1153652d39b0SKejian Yan /**
1154652d39b0SKejian Yan *hns_nic_init_phy - init phy
1155652d39b0SKejian Yan *@ndev: net device
1156652d39b0SKejian Yan *@h: ae handle
1157652d39b0SKejian Yan * Return 0 on success, negative on failure
1158652d39b0SKejian Yan */
hns_nic_init_phy(struct net_device * ndev,struct hnae_handle * h)1159652d39b0SKejian Yan int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1160b5996f11Shuangdaode {
116120e79a0aSYonglong Liu __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, };
116220e79a0aSYonglong Liu struct phy_device *phy_dev = h->phy_dev;
1163b5996f11Shuangdaode int ret;
1164b5996f11Shuangdaode
1165b5996f11Shuangdaode if (!h->phy_dev)
1166b5996f11Shuangdaode return 0;
1167b5996f11Shuangdaode
1168b5996f11Shuangdaode ethtool_convert_legacy_u32_to_link_mode(supported, h->if_support);
1169b5996f11Shuangdaode linkmode_and(phy_dev->supported, phy_dev->supported, supported);
1170b5996f11Shuangdaode linkmode_copy(phy_dev->advertising, phy_dev->supported);
1171b5996f11Shuangdaode
1172b5996f11Shuangdaode if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1173b5996f11Shuangdaode phy_dev->autoneg = false;
1174b5996f11Shuangdaode
1175b5996f11Shuangdaode if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1176b5996f11Shuangdaode phy_dev->dev_flags = 0;
1177b5996f11Shuangdaode
1178b5996f11Shuangdaode ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1179b5996f11Shuangdaode h->phy_if);
1180b5996f11Shuangdaode } else {
1181b5996f11Shuangdaode ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1182b5996f11Shuangdaode }
1183b5996f11Shuangdaode if (unlikely(ret))
1184b5996f11Shuangdaode return -ENODEV;
1185b5996f11Shuangdaode
1186b5996f11Shuangdaode phy_attached_info(phy_dev);
1187b5996f11Shuangdaode
1188b5996f11Shuangdaode return 0;
1189b5996f11Shuangdaode }
1190b5996f11Shuangdaode
hns_nic_ring_open(struct net_device * netdev,int idx)1191b5996f11Shuangdaode static int hns_nic_ring_open(struct net_device *netdev, int idx)
1192b5996f11Shuangdaode {
1193b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(netdev);
1194b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1195a05e4c0aSJakub Kicinski
1196b5996f11Shuangdaode napi_enable(&priv->ring_data[idx].napi);
1197b5996f11Shuangdaode
1198b5996f11Shuangdaode enable_irq(priv->ring_data[idx].ring->irq);
1199b5996f11Shuangdaode h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1200336a443bSYueHaibing
1201b5996f11Shuangdaode return 0;
1202b5996f11Shuangdaode }
1203b5996f11Shuangdaode
hns_nic_net_set_mac_address(struct net_device * ndev,void * p)1204b5996f11Shuangdaode static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1205b5996f11Shuangdaode {
1206b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
1207b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1208b5996f11Shuangdaode struct sockaddr *mac_addr = p;
1209b5996f11Shuangdaode int ret;
1210b5996f11Shuangdaode
1211b5996f11Shuangdaode if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1212b5996f11Shuangdaode return -EADDRNOTAVAIL;
1213b8eeac56SJakub Kicinski
1214b5996f11Shuangdaode ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1215b5996f11Shuangdaode if (ret) {
1216b5996f11Shuangdaode netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1217b5996f11Shuangdaode return ret;
1218b5996f11Shuangdaode }
1219b5996f11Shuangdaode
1220b5996f11Shuangdaode eth_hw_addr_set(ndev, mac_addr->sa_data);
1221b5996f11Shuangdaode
1222b5996f11Shuangdaode return 0;
1223b5996f11Shuangdaode }
1224b5996f11Shuangdaode
hns_nic_update_stats(struct net_device * netdev)1225b5996f11Shuangdaode static void hns_nic_update_stats(struct net_device *netdev)
1226b5996f11Shuangdaode {
1227b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(netdev);
1228b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1229b5996f11Shuangdaode
1230b5996f11Shuangdaode h->dev->ops->update_stats(h, &netdev->stats);
1231ba2d0791Slipeng }
1232ba2d0791Slipeng
123313ac695eSSalil /* set mac addr if it is configed. or leave it to the AE driver */
hns_init_mac_addr(struct net_device * ndev)123413ac695eSSalil static void hns_init_mac_addr(struct net_device *ndev)
1235ff3edc9bSArnd Bergmann {
1236c8ad0cf3SLu Wei struct hns_nic_priv *priv = netdev_priv(ndev);
1237ba2d0791Slipeng
1238ba2d0791Slipeng if (device_get_ethdev_address(priv->dev, ndev)) {
1239ba2d0791Slipeng eth_hw_addr_random(ndev);
1240ba2d0791Slipeng dev_warn(priv->dev, "No valid mac, use random mac %pM",
1241ba2d0791Slipeng ndev->dev_addr);
1242ba2d0791Slipeng }
1243ba2d0791Slipeng }
1244ba2d0791Slipeng
hns_nic_ring_close(struct net_device * netdev,int idx)124513ac695eSSalil static void hns_nic_ring_close(struct net_device *netdev, int idx)
1246ba2d0791Slipeng {
1247ba2d0791Slipeng struct hns_nic_priv *priv = netdev_priv(netdev);
1248ba2d0791Slipeng struct hnae_handle *h = priv->ae_handle;
1249ba2d0791Slipeng
125013ac695eSSalil h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
125113ac695eSSalil disable_irq(priv->ring_data[idx].ring->irq);
1252ff3edc9bSArnd Bergmann
1253ff3edc9bSArnd Bergmann napi_disable(&priv->ring_data[idx].napi);
1254ff3edc9bSArnd Bergmann }
1255ba2d0791Slipeng
hns_nic_init_affinity_mask(int q_num,int ring_idx,struct hnae_ring * ring,cpumask_t * mask)125613ac695eSSalil static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
125713ac695eSSalil struct hnae_ring *ring, cpumask_t *mask)
1258c82bd077SYonglong Liu {
1259c82bd077SYonglong Liu int cpu;
1260c82bd077SYonglong Liu
1261c82bd077SYonglong Liu /* Different irq balance between 16core and 32core.
1262c82bd077SYonglong Liu * The cpu mask set by ring index according to the ring flag
1263c82bd077SYonglong Liu * which indicate the ring is tx or rx.
1264c82bd077SYonglong Liu */
1265c82bd077SYonglong Liu if (q_num == num_possible_cpus()) {
1266c82bd077SYonglong Liu if (is_tx_ring(ring))
1267c82bd077SYonglong Liu cpu = ring_idx;
1268c82bd077SYonglong Liu else
1269c82bd077SYonglong Liu cpu = ring_idx - q_num;
1270c82bd077SYonglong Liu } else {
1271c82bd077SYonglong Liu if (is_tx_ring(ring))
1272c82bd077SYonglong Liu cpu = ring_idx * 2;
1273c82bd077SYonglong Liu else
1274b5996f11Shuangdaode cpu = (ring_idx - q_num) * 2 + 1;
1275b5996f11Shuangdaode }
1276b5996f11Shuangdaode
1277b5996f11Shuangdaode cpumask_clear(mask);
1278b5996f11Shuangdaode cpumask_set_cpu(cpu, mask);
1279b5996f11Shuangdaode
1280ba2d0791Slipeng return cpu;
1281b5996f11Shuangdaode }
1282b5996f11Shuangdaode
hns_nic_free_irq(int q_num,struct hns_nic_priv * priv)1283b5996f11Shuangdaode static void hns_nic_free_irq(int q_num, struct hns_nic_priv *priv)
1284b5996f11Shuangdaode {
1285b5996f11Shuangdaode int i;
1286b5996f11Shuangdaode
1287b5996f11Shuangdaode for (i = 0; i < q_num * 2; i++) {
1288b5996f11Shuangdaode if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
1289b5996f11Shuangdaode irq_set_affinity_hint(priv->ring_data[i].ring->irq,
1290ba2d0791Slipeng NULL);
1291b5996f11Shuangdaode free_irq(priv->ring_data[i].ring->irq,
1292b5996f11Shuangdaode &priv->ring_data[i]);
1293b5996f11Shuangdaode priv->ring_data[i].ring->irq_init_flag =
12945a6bd84fSBarry Song RCB_IRQ_NOT_INITED;
1295b5996f11Shuangdaode }
1296b5996f11Shuangdaode }
1297b5996f11Shuangdaode }
1298b5996f11Shuangdaode
hns_nic_init_irq(struct hns_nic_priv * priv)1299b5996f11Shuangdaode static int hns_nic_init_irq(struct hns_nic_priv *priv)
1300c82bd077SYonglong Liu {
1301b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1302ba2d0791Slipeng struct hns_nic_ring_data *rd;
1303ba2d0791Slipeng int i;
1304ba2d0791Slipeng int ret;
1305ba2d0791Slipeng int cpu;
1306ba2d0791Slipeng
1307ba2d0791Slipeng for (i = 0; i < h->q_num * 2; i++) {
1308ba2d0791Slipeng rd = &priv->ring_data[i];
1309ba2d0791Slipeng
1310b5996f11Shuangdaode if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
131113ac695eSSalil break;
1312b5996f11Shuangdaode
1313b5996f11Shuangdaode snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1314c82bd077SYonglong Liu "%s-%s%d", priv->netdev->name,
1315c82bd077SYonglong Liu (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
1316c82bd077SYonglong Liu
1317c82bd077SYonglong Liu rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1318b5996f11Shuangdaode
1319b5996f11Shuangdaode irq_set_status_flags(rd->ring->irq, IRQ_NOAUTOEN);
1320b5996f11Shuangdaode ret = request_irq(rd->ring->irq,
1321b5996f11Shuangdaode hns_irq_handle, 0, rd->ring->ring_name, rd);
1322b5996f11Shuangdaode if (ret) {
1323b5996f11Shuangdaode netdev_err(priv->netdev, "request irq(%d) fail\n",
1324454784d8SDaode Huang rd->ring->irq);
1325b5996f11Shuangdaode goto out_free_irq;
1326b5996f11Shuangdaode }
13275778b13bSYonglong Liu
13285778b13bSYonglong Liu cpu = hns_nic_init_affinity_mask(h->q_num, i,
13295778b13bSYonglong Liu rd->ring, &rd->mask);
1330b5996f11Shuangdaode
1331b5996f11Shuangdaode if (cpu_online(cpu))
1332b5996f11Shuangdaode irq_set_affinity_hint(rd->ring->irq,
1333b5996f11Shuangdaode &rd->mask);
1334b5996f11Shuangdaode
1335b5996f11Shuangdaode rd->ring->irq_init_flag = RCB_IRQ_INITED;
1336b5996f11Shuangdaode }
1337b5996f11Shuangdaode
1338b5996f11Shuangdaode return 0;
1339b5996f11Shuangdaode
1340b5996f11Shuangdaode out_free_irq:
1341b5996f11Shuangdaode hns_nic_free_irq(h->q_num, priv);
1342b5996f11Shuangdaode return ret;
1343b5996f11Shuangdaode }
1344b5996f11Shuangdaode
hns_nic_net_up(struct net_device * ndev)1345b5996f11Shuangdaode static int hns_nic_net_up(struct net_device *ndev)
1346b5996f11Shuangdaode {
1347b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
1348b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1349b5996f11Shuangdaode int i, j;
1350262b38cdSPhilippe Reynes int ret;
1351262b38cdSPhilippe Reynes
1352b5996f11Shuangdaode if (!test_bit(NIC_STATE_DOWN, &priv->state))
1353b5996f11Shuangdaode return 0;
1354b5996f11Shuangdaode
1355b5996f11Shuangdaode ret = hns_nic_init_irq(priv);
1356b5996f11Shuangdaode if (ret != 0) {
1357b5996f11Shuangdaode netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1358b5996f11Shuangdaode return ret;
1359b5996f11Shuangdaode }
1360b5996f11Shuangdaode
1361b5996f11Shuangdaode for (i = 0; i < h->q_num * 2; i++) {
1362b5996f11Shuangdaode ret = hns_nic_ring_open(ndev, i);
1363b5996f11Shuangdaode if (ret)
1364b5996f11Shuangdaode goto out_has_some_queues;
1365c82bd077SYonglong Liu }
1366b5996f11Shuangdaode
1367b5996f11Shuangdaode ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1368b5996f11Shuangdaode if (ret)
1369b5996f11Shuangdaode goto out_set_mac_addr_err;
1370b5996f11Shuangdaode
1371b5996f11Shuangdaode ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1372b5996f11Shuangdaode if (ret)
1373b5996f11Shuangdaode goto out_start_err;
1374b5996f11Shuangdaode
1375b5996f11Shuangdaode if (ndev->phydev)
1376b5996f11Shuangdaode phy_start(ndev->phydev);
1377b5996f11Shuangdaode
1378b5996f11Shuangdaode clear_bit(NIC_STATE_DOWN, &priv->state);
1379b5996f11Shuangdaode (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1380b5996f11Shuangdaode
1381b5996f11Shuangdaode return 0;
1382b5996f11Shuangdaode
1383b5996f11Shuangdaode out_start_err:
1384b5996f11Shuangdaode netif_stop_queue(ndev);
1385b5996f11Shuangdaode out_set_mac_addr_err:
1386262b38cdSPhilippe Reynes out_has_some_queues:
1387262b38cdSPhilippe Reynes for (j = i - 1; j >= 0; j--)
1388b5996f11Shuangdaode hns_nic_ring_close(ndev, j);
1389b5996f11Shuangdaode
1390b5996f11Shuangdaode hns_nic_free_irq(h->q_num, priv);
1391b5996f11Shuangdaode set_bit(NIC_STATE_DOWN, &priv->state);
1392b5996f11Shuangdaode
1393b5996f11Shuangdaode return ret;
1394b5996f11Shuangdaode }
1395b5996f11Shuangdaode
hns_nic_net_down(struct net_device * ndev)1396b5996f11Shuangdaode static void hns_nic_net_down(struct net_device *ndev)
1397b5996f11Shuangdaode {
1398b5996f11Shuangdaode int i;
1399b5996f11Shuangdaode struct hnae_ae_ops *ops;
1400b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
1401b5996f11Shuangdaode
1402b5996f11Shuangdaode if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1403b5996f11Shuangdaode return;
1404b5996f11Shuangdaode
1405b5996f11Shuangdaode (void) timer_delete_sync(&priv->service_timer);
1406b5996f11Shuangdaode netif_tx_stop_all_queues(ndev);
1407b5996f11Shuangdaode netif_carrier_off(ndev);
1408b5996f11Shuangdaode netif_tx_disable(ndev);
1409b5996f11Shuangdaode priv->link = 0;
1410b5996f11Shuangdaode
1411b5996f11Shuangdaode if (ndev->phydev)
1412b5996f11Shuangdaode phy_stop(ndev->phydev);
1413b5996f11Shuangdaode
1414b5996f11Shuangdaode ops = priv->ae_handle->dev->ops;
1415b5996f11Shuangdaode
1416b5996f11Shuangdaode if (ops->stop)
1417b5996f11Shuangdaode ops->stop(priv->ae_handle);
1418b5996f11Shuangdaode
1419b5996f11Shuangdaode netif_tx_stop_all_queues(ndev);
1420b5996f11Shuangdaode
142176b825abSLin Yun Sheng for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1422b5996f11Shuangdaode hns_nic_ring_close(ndev, i);
1423860e9538SFlorian Westphal hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1424b5996f11Shuangdaode
1425b5996f11Shuangdaode /* clean tx buffers*/
1426b5996f11Shuangdaode hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1427b5996f11Shuangdaode }
142876b825abSLin Yun Sheng }
142976b825abSLin Yun Sheng
hns_nic_net_reset(struct net_device * ndev)143076b825abSLin Yun Sheng void hns_nic_net_reset(struct net_device *ndev)
143176b825abSLin Yun Sheng {
143276b825abSLin Yun Sheng struct hns_nic_priv *priv = netdev_priv(ndev);
1433b5996f11Shuangdaode struct hnae_handle *handle = priv->ae_handle;
143476b825abSLin Yun Sheng
1435b5996f11Shuangdaode while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1436b5996f11Shuangdaode usleep_range(1000, 2000);
1437b5996f11Shuangdaode
1438b5996f11Shuangdaode (void)hnae_reinit_handle(handle);
1439b5996f11Shuangdaode
1440b5996f11Shuangdaode clear_bit(NIC_STATE_RESETTING, &priv->state);
1441b5996f11Shuangdaode }
1442b5996f11Shuangdaode
hns_nic_net_reinit(struct net_device * netdev)1443b5996f11Shuangdaode void hns_nic_net_reinit(struct net_device *netdev)
1444b5996f11Shuangdaode {
1445b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(netdev);
1446b5996f11Shuangdaode enum hnae_port_type type = priv->ae_handle->port_type;
1447b5996f11Shuangdaode
1448b5996f11Shuangdaode netif_trans_update(priv->netdev);
1449b5996f11Shuangdaode while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1450b5996f11Shuangdaode usleep_range(1000, 2000);
1451b5996f11Shuangdaode
1452b5996f11Shuangdaode hns_nic_net_down(netdev);
1453b5996f11Shuangdaode
1454b5996f11Shuangdaode /* Only do hns_nic_net_reset in debug mode
1455b5996f11Shuangdaode * because of hardware limitation.
1456b5996f11Shuangdaode */
1457b5996f11Shuangdaode if (type == HNAE_PORT_DEBUG)
1458b5996f11Shuangdaode hns_nic_net_reset(netdev);
1459b5996f11Shuangdaode
1460b5996f11Shuangdaode (void)hns_nic_net_up(netdev);
1461b5996f11Shuangdaode clear_bit(NIC_STATE_REINITING, &priv->state);
1462b5996f11Shuangdaode }
1463b5996f11Shuangdaode
hns_nic_net_open(struct net_device * ndev)1464b5996f11Shuangdaode static int hns_nic_net_open(struct net_device *ndev)
1465b5996f11Shuangdaode {
1466b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
1467b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1468b5996f11Shuangdaode int ret;
1469b5996f11Shuangdaode
1470b5996f11Shuangdaode if (test_bit(NIC_STATE_TESTING, &priv->state))
1471b5996f11Shuangdaode return -EBUSY;
1472b5996f11Shuangdaode
1473b5996f11Shuangdaode priv->link = 0;
1474b5996f11Shuangdaode netif_carrier_off(ndev);
1475b5996f11Shuangdaode
1476b5996f11Shuangdaode ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1477b5996f11Shuangdaode if (ret < 0) {
1478b5996f11Shuangdaode netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1479b5996f11Shuangdaode ret);
1480b5996f11Shuangdaode return ret;
1481b5996f11Shuangdaode }
1482b5996f11Shuangdaode
1483a57275d3SYonglong Liu ret = netif_set_real_num_rx_queues(ndev, h->q_num);
14840290bd29SMichael S. Tsirkin if (ret < 0) {
1485b5996f11Shuangdaode netdev_err(ndev,
1486b5996f11Shuangdaode "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1487b5996f11Shuangdaode return ret;
1488a57275d3SYonglong Liu }
1489a57275d3SYonglong Liu
1490a57275d3SYonglong Liu ret = hns_nic_net_up(ndev);
1491a57275d3SYonglong Liu if (ret) {
1492a57275d3SYonglong Liu netdev_err(ndev,
1493a57275d3SYonglong Liu "hns net up fail, ret=%d!\n", ret);
1494b5996f11Shuangdaode return ret;
1495b5996f11Shuangdaode }
1496a57275d3SYonglong Liu
1497b5996f11Shuangdaode return 0;
1498b5996f11Shuangdaode }
1499b5996f11Shuangdaode
hns_nic_net_stop(struct net_device * ndev)1500b5996f11Shuangdaode static int hns_nic_net_stop(struct net_device *ndev)
1501b5996f11Shuangdaode {
1502b5996f11Shuangdaode hns_nic_net_down(ndev);
150399d51897STom Rix
150427463ad9SYunsheng Lin return 0;
150527463ad9SYunsheng Lin }
1506b5996f11Shuangdaode
1507b5996f11Shuangdaode static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1508b5996f11Shuangdaode #define HNS_TX_TIMEO_LIMIT (40 * HZ)
hns_nic_net_timeout(struct net_device * ndev,unsigned int txqueue)1509b29bd412Slipeng static void hns_nic_net_timeout(struct net_device *ndev, unsigned int txqueue)
1510b29bd412Slipeng {
1511b29bd412Slipeng struct hns_nic_priv *priv = netdev_priv(ndev);
1512b29bd412Slipeng
1513b29bd412Slipeng if (ndev->watchdog_timeo < HNS_TX_TIMEO_LIMIT) {
1514b29bd412Slipeng ndev->watchdog_timeo *= 2;
1515b29bd412Slipeng netdev_info(ndev, "watchdog_timo changed to %d.\n",
1516b29bd412Slipeng ndev->watchdog_timeo);
1517b29bd412Slipeng } else {
1518b29bd412Slipeng ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
1519b29bd412Slipeng hns_tx_timeout_reset(priv);
1520b29bd412Slipeng }
1521b29bd412Slipeng }
1522b29bd412Slipeng
hns_nic_net_xmit(struct sk_buff * skb,struct net_device * ndev)1523b29bd412Slipeng static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1524b29bd412Slipeng struct net_device *ndev)
1525b29bd412Slipeng {
1526b29bd412Slipeng struct hns_nic_priv *priv = netdev_priv(ndev);
1527b29bd412Slipeng
1528b29bd412Slipeng assert(skb->queue_mapping < priv->ae_handle->q_num);
1529b29bd412Slipeng
1530b29bd412Slipeng return hns_nic_net_xmit_hw(ndev, skb,
1531b29bd412Slipeng &tx_ring_data(priv, skb->queue_mapping));
1532b29bd412Slipeng }
1533b29bd412Slipeng
hns_nic_drop_rx_fetch(struct hns_nic_ring_data * ring_data,struct sk_buff * skb)1534b29bd412Slipeng static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
1535b29bd412Slipeng struct sk_buff *skb)
1536b29bd412Slipeng {
1537b29bd412Slipeng dev_kfree_skb_any(skb);
1538b29bd412Slipeng }
1539b29bd412Slipeng
1540b29bd412Slipeng #define HNS_LB_TX_RING 0
hns_assemble_skb(struct net_device * ndev)1541b29bd412Slipeng static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
1542b29bd412Slipeng {
1543b29bd412Slipeng struct sk_buff *skb;
1544b29bd412Slipeng struct ethhdr *ethhdr;
1545b29bd412Slipeng int frame_len;
1546b29bd412Slipeng
1547b29bd412Slipeng /* allocate test skb */
1548b29bd412Slipeng skb = alloc_skb(64, GFP_KERNEL);
1549b29bd412Slipeng if (!skb)
1550b29bd412Slipeng return NULL;
1551b29bd412Slipeng
1552b29bd412Slipeng skb_put(skb, 64);
1553b29bd412Slipeng skb->dev = ndev;
1554b29bd412Slipeng memset(skb->data, 0xFF, skb->len);
1555b29bd412Slipeng
1556b29bd412Slipeng /* must be tcp/ip package */
1557b29bd412Slipeng ethhdr = (struct ethhdr *)skb->data;
1558b29bd412Slipeng ethhdr->h_proto = htons(ETH_P_IP);
1559b29bd412Slipeng
1560b29bd412Slipeng frame_len = skb->len & (~1ul);
1561b29bd412Slipeng memset(&skb->data[frame_len / 2], 0xAA,
1562b29bd412Slipeng frame_len / 2 - 1);
1563b29bd412Slipeng
1564b29bd412Slipeng skb->queue_mapping = HNS_LB_TX_RING;
1565b29bd412Slipeng
1566b29bd412Slipeng return skb;
1567b29bd412Slipeng }
1568b29bd412Slipeng
hns_enable_serdes_lb(struct net_device * ndev)1569b29bd412Slipeng static int hns_enable_serdes_lb(struct net_device *ndev)
1570b29bd412Slipeng {
1571b29bd412Slipeng struct hns_nic_priv *priv = netdev_priv(ndev);
1572b29bd412Slipeng struct hnae_handle *h = priv->ae_handle;
1573b29bd412Slipeng struct hnae_ae_ops *ops = h->dev->ops;
1574b29bd412Slipeng int speed, duplex;
1575b29bd412Slipeng int ret;
1576b29bd412Slipeng
1577b29bd412Slipeng ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
1578b29bd412Slipeng if (ret)
1579b29bd412Slipeng return ret;
1580b29bd412Slipeng
1581b29bd412Slipeng ret = ops->start ? ops->start(h) : 0;
1582b29bd412Slipeng if (ret)
1583b29bd412Slipeng return ret;
1584b29bd412Slipeng
1585b29bd412Slipeng /* link adjust duplex*/
1586b29bd412Slipeng if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1587b29bd412Slipeng speed = 1000;
1588b29bd412Slipeng else
1589b29bd412Slipeng speed = 10000;
1590b29bd412Slipeng duplex = 1;
1591b29bd412Slipeng
1592b29bd412Slipeng ops->adjust_link(h, speed, duplex);
1593c8ad0cf3SLu Wei
1594b29bd412Slipeng /* wait h/w ready */
1595b29bd412Slipeng mdelay(300);
1596b29bd412Slipeng
1597b29bd412Slipeng return 0;
1598b29bd412Slipeng }
1599b29bd412Slipeng
hns_disable_serdes_lb(struct net_device * ndev)1600b29bd412Slipeng static void hns_disable_serdes_lb(struct net_device *ndev)
1601b29bd412Slipeng {
1602b29bd412Slipeng struct hns_nic_priv *priv = netdev_priv(ndev);
1603b29bd412Slipeng struct hnae_handle *h = priv->ae_handle;
1604b29bd412Slipeng struct hnae_ae_ops *ops = h->dev->ops;
1605b29bd412Slipeng
1606b29bd412Slipeng ops->stop(h);
1607b29bd412Slipeng ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
1608b29bd412Slipeng }
1609b29bd412Slipeng
1610b29bd412Slipeng /**
1611b29bd412Slipeng *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
1612b29bd412Slipeng *function as follows:
1613b29bd412Slipeng * 1. if one rx ring has found the page_offset is not equal 0 between head
1614b29bd412Slipeng * and tail, it means that the chip fetched the wrong descs for the ring
1615b29bd412Slipeng * which buffer size is 4096.
1616b29bd412Slipeng * 2. we set the chip serdes loopback and set rss indirection to the ring.
1617b29bd412Slipeng * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
1618b29bd412Slipeng * receiving all packages and it will fetch new descriptions.
1619b29bd412Slipeng * 4. recover to the original state.
1620b29bd412Slipeng *
1621b29bd412Slipeng *@ndev: net device
1622c8ad0cf3SLu Wei */
hns_nic_clear_all_rx_fetch(struct net_device * ndev)1623b29bd412Slipeng static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
1624b29bd412Slipeng {
1625b29bd412Slipeng struct hns_nic_priv *priv = netdev_priv(ndev);
1626b29bd412Slipeng struct hnae_handle *h = priv->ae_handle;
1627b29bd412Slipeng struct hnae_ae_ops *ops = h->dev->ops;
1628b29bd412Slipeng struct hns_nic_ring_data *rd;
1629b29bd412Slipeng struct hnae_ring *ring;
1630b29bd412Slipeng struct sk_buff *skb;
1631b29bd412Slipeng u32 *org_indir;
1632b29bd412Slipeng u32 *cur_indir;
1633b29bd412Slipeng int indir_size;
1634b29bd412Slipeng int head, tail;
1635b29bd412Slipeng int fetch_num;
1636b29bd412Slipeng int i, j;
1637b29bd412Slipeng bool found;
1638b29bd412Slipeng int retry_times;
1639b29bd412Slipeng int ret = 0;
1640b29bd412Slipeng
1641b29bd412Slipeng /* alloc indir memory */
1642b29bd412Slipeng indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
1643b29bd412Slipeng org_indir = kzalloc(indir_size, GFP_KERNEL);
1644b29bd412Slipeng if (!org_indir)
1645b29bd412Slipeng return -ENOMEM;
1646b29bd412Slipeng
1647b29bd412Slipeng /* store the original indirection */
1648b29bd412Slipeng ops->get_rss(h, org_indir, NULL, NULL);
1649b29bd412Slipeng
1650b29bd412Slipeng cur_indir = kzalloc(indir_size, GFP_KERNEL);
1651b29bd412Slipeng if (!cur_indir) {
1652b29bd412Slipeng ret = -ENOMEM;
1653b29bd412Slipeng goto cur_indir_alloc_err;
1654b29bd412Slipeng }
1655b29bd412Slipeng
1656b29bd412Slipeng /* set loopback */
1657b29bd412Slipeng if (hns_enable_serdes_lb(ndev)) {
1658b29bd412Slipeng ret = -EINVAL;
1659b29bd412Slipeng goto enable_serdes_lb_err;
1660b29bd412Slipeng }
1661b29bd412Slipeng
1662b29bd412Slipeng /* foreach every rx ring to clear fetch desc */
1663b29bd412Slipeng for (i = 0; i < h->q_num; i++) {
1664143c253fSJia-Ju Bai ring = &h->qs[i]->rx_ring;
1665143c253fSJia-Ju Bai head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1666b29bd412Slipeng tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
1667143c253fSJia-Ju Bai found = false;
1668b29bd412Slipeng fetch_num = ring_dist(ring, head, tail);
1669b29bd412Slipeng
1670b29bd412Slipeng while (head != tail) {
1671b29bd412Slipeng if (ring->desc_cb[head].page_offset != 0) {
1672b29bd412Slipeng found = true;
1673b29bd412Slipeng break;
1674b29bd412Slipeng }
1675b29bd412Slipeng
1676b29bd412Slipeng head++;
1677b29bd412Slipeng if (head == ring->desc_num)
1678b29bd412Slipeng head = 0;
1679b29bd412Slipeng }
1680b29bd412Slipeng
1681b29bd412Slipeng if (found) {
1682b29bd412Slipeng for (j = 0; j < indir_size / sizeof(*org_indir); j++)
1683b29bd412Slipeng cur_indir[j] = i;
1684b29bd412Slipeng ops->set_rss(h, cur_indir, NULL, 0);
1685b29bd412Slipeng
1686b29bd412Slipeng for (j = 0; j < fetch_num; j++) {
1687b29bd412Slipeng /* alloc one skb and init */
1688b29bd412Slipeng skb = hns_assemble_skb(ndev);
1689b29bd412Slipeng if (!skb) {
1690b29bd412Slipeng ret = -ENOMEM;
1691b29bd412Slipeng goto out;
1692b29bd412Slipeng }
1693b29bd412Slipeng rd = &tx_ring_data(priv, skb->queue_mapping);
1694b29bd412Slipeng hns_nic_net_xmit_hw(ndev, skb, rd);
1695b29bd412Slipeng
1696b29bd412Slipeng retry_times = 0;
1697b29bd412Slipeng while (retry_times++ < 10) {
1698b29bd412Slipeng mdelay(10);
1699b29bd412Slipeng /* clean rx */
1700b29bd412Slipeng rd = &rx_ring_data(priv, i);
1701b29bd412Slipeng if (rd->poll_one(rd, fetch_num,
1702b29bd412Slipeng hns_nic_drop_rx_fetch))
1703b29bd412Slipeng break;
1704b29bd412Slipeng }
1705b29bd412Slipeng
1706b5996f11Shuangdaode retry_times = 0;
1707b5996f11Shuangdaode while (retry_times++ < 10) {
1708b5996f11Shuangdaode mdelay(10);
1709b5996f11Shuangdaode /* clean tx ring 0 send package */
1710b29bd412Slipeng rd = &tx_ring_data(priv,
1711b5996f11Shuangdaode HNS_LB_TX_RING);
1712b5996f11Shuangdaode if (rd->poll_one(rd, fetch_num, NULL))
1713b29bd412Slipeng break;
1714b29bd412Slipeng }
1715b29bd412Slipeng }
1716b29bd412Slipeng }
1717b29bd412Slipeng }
1718b29bd412Slipeng
1719b29bd412Slipeng out:
1720b29bd412Slipeng /* restore everything */
1721b5996f11Shuangdaode ops->set_rss(h, org_indir, NULL, 0);
1722b5996f11Shuangdaode hns_disable_serdes_lb(ndev);
1723b5996f11Shuangdaode enable_serdes_lb_err:
1724b29bd412Slipeng kfree(cur_indir);
1725b5996f11Shuangdaode cur_indir_alloc_err:
1726b5996f11Shuangdaode kfree(org_indir);
1727b5996f11Shuangdaode
1728b5996f11Shuangdaode return ret;
1729b29bd412Slipeng }
1730b29bd412Slipeng
hns_nic_change_mtu(struct net_device * ndev,int new_mtu)1731b29bd412Slipeng static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1732b29bd412Slipeng {
1733b29bd412Slipeng struct hns_nic_priv *priv = netdev_priv(ndev);
1734b29bd412Slipeng struct hnae_handle *h = priv->ae_handle;
1735b29bd412Slipeng bool if_running = netif_running(ndev);
1736b29bd412Slipeng int ret;
1737b29bd412Slipeng
1738b29bd412Slipeng /* MTU < 68 is an error and causes problems on some kernels */
1739b29bd412Slipeng if (new_mtu < 68)
1740b29bd412Slipeng return -EINVAL;
1741b29bd412Slipeng
1742b29bd412Slipeng /* MTU no change */
1743b29bd412Slipeng if (new_mtu == ndev->mtu)
1744b29bd412Slipeng return 0;
1745b29bd412Slipeng
1746b29bd412Slipeng if (!h->dev->ops->set_mtu)
1747b29bd412Slipeng return -ENOTSUPP;
1748b29bd412Slipeng
1749b29bd412Slipeng if (if_running) {
1750b29bd412Slipeng (void)hns_nic_net_stop(ndev);
1751b29bd412Slipeng msleep(100);
1752b29bd412Slipeng }
1753b29bd412Slipeng
1754b29bd412Slipeng if (priv->enet_ver != AE_VERSION_1 &&
1755b5996f11Shuangdaode ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
1756b5996f11Shuangdaode new_mtu > BD_SIZE_2048_MAX_MTU) {
1757b29bd412Slipeng /* update desc */
1758b29bd412Slipeng hnae_reinit_all_ring_desc(h);
1759b29bd412Slipeng
1760b29bd412Slipeng /* clear the package which the chip has fetched */
1761b29bd412Slipeng ret = hns_nic_clear_all_rx_fetch(ndev);
1762b29bd412Slipeng
1763b29bd412Slipeng /* the page offset must be consist with desc */
1764b29bd412Slipeng hnae_reinit_all_ring_page_off(h);
1765b5996f11Shuangdaode
1766b5996f11Shuangdaode if (ret) {
1767b5996f11Shuangdaode netdev_err(ndev, "clear the fetched desc fail\n");
176838f616daSSalil goto out;
176938f616daSSalil }
177038f616daSSalil }
177138f616daSSalil
177238f616daSSalil ret = h->dev->ops->set_mtu(h, new_mtu);
177338f616daSSalil if (ret) {
177438f616daSSalil netdev_err(ndev, "set mtu fail, return value %d\n",
177538f616daSSalil ret);
177638f616daSSalil goto out;
177738f616daSSalil }
177838f616daSSalil
177938f616daSSalil /* finally, set new mtu to netdevice */
178038f616daSSalil WRITE_ONCE(ndev->mtu, new_mtu);
178138f616daSSalil
178238f616daSSalil out:
1783ee8b7a11SJakub Kicinski if (if_running) {
178438f616daSSalil if (hns_nic_net_open(ndev)) {
178538f616daSSalil netdev_err(ndev, "hns net open fail\n");
178638f616daSSalil ret = -EINVAL;
178738f616daSSalil }
178838f616daSSalil }
178938f616daSSalil
179038f616daSSalil return ret;
179138f616daSSalil }
179238f616daSSalil
hns_nic_set_features(struct net_device * netdev,netdev_features_t features)179338f616daSSalil static int hns_nic_set_features(struct net_device *netdev,
179438f616daSSalil netdev_features_t features)
179538f616daSSalil {
179638f616daSSalil struct hns_nic_priv *priv = netdev_priv(netdev);
179738f616daSSalil
179838f616daSSalil switch (priv->enet_ver) {
179938f616daSSalil case AE_VERSION_1:
180038f616daSSalil if (features & (NETIF_F_TSO | NETIF_F_TSO6))
180138f616daSSalil netdev_info(netdev, "enet v1 do not support tso!\n");
180238f616daSSalil break;
180338f616daSSalil default:
180438f616daSSalil break;
180538f616daSSalil }
180638f616daSSalil netdev->features = features;
180738f616daSSalil return 0;
180838f616daSSalil }
180938f616daSSalil
hns_nic_fix_features(struct net_device * netdev,netdev_features_t features)181066355f52SKejian Yan static netdev_features_t hns_nic_fix_features(
181166355f52SKejian Yan struct net_device *netdev, netdev_features_t features)
181266355f52SKejian Yan {
181366355f52SKejian Yan struct hns_nic_priv *priv = netdev_priv(netdev);
181466355f52SKejian Yan
181566355f52SKejian Yan switch (priv->enet_ver) {
181666355f52SKejian Yan case AE_VERSION_1:
181766355f52SKejian Yan features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
181866355f52SKejian Yan NETIF_F_HW_VLAN_CTAG_FILTER);
181966355f52SKejian Yan break;
182066355f52SKejian Yan default:
182166355f52SKejian Yan break;
182266355f52SKejian Yan }
182366355f52SKejian Yan return features;
182466355f52SKejian Yan }
182566355f52SKejian Yan
hns_nic_uc_sync(struct net_device * netdev,const unsigned char * addr)182666355f52SKejian Yan static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
182766355f52SKejian Yan {
182866355f52SKejian Yan struct hns_nic_priv *priv = netdev_priv(netdev);
182966355f52SKejian Yan struct hnae_handle *h = priv->ae_handle;
183066355f52SKejian Yan
183166355f52SKejian Yan if (h->dev->ops->add_uc_addr)
183266355f52SKejian Yan return h->dev->ops->add_uc_addr(h, addr);
1833b5996f11Shuangdaode
1834d0ea5cbdSJesse Brandeburg return 0;
183543ec0f07SWang Hai }
1836b5996f11Shuangdaode
hns_nic_uc_unsync(struct net_device * netdev,const unsigned char * addr)1837b5996f11Shuangdaode static int hns_nic_uc_unsync(struct net_device *netdev,
1838b5996f11Shuangdaode const unsigned char *addr)
1839336a443bSYueHaibing {
1840b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(netdev);
1841b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1842b5996f11Shuangdaode
1843b5996f11Shuangdaode if (h->dev->ops->rm_uc_addr)
1844b5996f11Shuangdaode return h->dev->ops->rm_uc_addr(h, addr);
1845b5996f11Shuangdaode
1846b5996f11Shuangdaode return 0;
1847b5996f11Shuangdaode }
1848b5996f11Shuangdaode
1849b5996f11Shuangdaode /**
1850ec2cafe6SKejian Yan * hns_set_multicast_list - set mutl mac address
1851ec2cafe6SKejian Yan * @ndev: net device
1852ec2cafe6SKejian Yan *
1853ec2cafe6SKejian Yan * return void
1854b5996f11Shuangdaode */
hns_set_multicast_list(struct net_device * ndev)1855b5996f11Shuangdaode static void hns_set_multicast_list(struct net_device *ndev)
1856b5996f11Shuangdaode {
1857b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
1858b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1859b5996f11Shuangdaode struct netdev_hw_addr *ha = NULL;
1860b5996f11Shuangdaode
1861336a443bSYueHaibing if (!h) {
18624568637fSyankejian netdev_err(ndev, "hnae handle is null\n");
18634568637fSyankejian return;
18644568637fSyankejian }
18654568637fSyankejian
18664568637fSyankejian if (h->dev->ops->clr_mc_addr)
18674568637fSyankejian if (h->dev->ops->clr_mc_addr(h))
18684568637fSyankejian netdev_err(ndev, "clear multicast address fail\n");
18694568637fSyankejian
18704568637fSyankejian if (h->dev->ops->set_mc_addr) {
18714568637fSyankejian netdev_for_each_mc_addr(ha, ndev)
18724568637fSyankejian if (h->dev->ops->set_mc_addr(h, ha->addr))
18734568637fSyankejian netdev_err(ndev, "set multicast fail\n");
187466355f52SKejian Yan }
187566355f52SKejian Yan }
187666355f52SKejian Yan
hns_nic_set_rx_mode(struct net_device * ndev)18774568637fSyankejian static void hns_nic_set_rx_mode(struct net_device *ndev)
18784568637fSyankejian {
1879bc1f4470Sstephen hemminger struct hns_nic_priv *priv = netdev_priv(ndev);
1880b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1881b5996f11Shuangdaode
18827f8bcd91SYonglong Liu if (h->dev->ops->set_promisc_mode) {
1883b5996f11Shuangdaode if (ndev->flags & IFF_PROMISC)
1884b5996f11Shuangdaode h->dev->ops->set_promisc_mode(h, 1);
1885b5996f11Shuangdaode else
1886b5996f11Shuangdaode h->dev->ops->set_promisc_mode(h, 0);
1887b5996f11Shuangdaode }
1888b5996f11Shuangdaode
1889b5996f11Shuangdaode hns_set_multicast_list(ndev);
1890b5996f11Shuangdaode
1891b5996f11Shuangdaode if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
1892b5996f11Shuangdaode netdev_err(ndev, "sync uc address fail\n");
1893b5996f11Shuangdaode }
1894b5996f11Shuangdaode
hns_nic_get_stats64(struct net_device * ndev,struct rtnl_link_stats64 * stats)1895b5996f11Shuangdaode static void hns_nic_get_stats64(struct net_device *ndev,
1896b5996f11Shuangdaode struct rtnl_link_stats64 *stats)
1897b5996f11Shuangdaode {
1898b5996f11Shuangdaode int idx;
1899b5996f11Shuangdaode u64 tx_bytes = 0;
1900b5996f11Shuangdaode u64 rx_bytes = 0;
1901b5996f11Shuangdaode u64 tx_pkts = 0;
1902b5996f11Shuangdaode u64 rx_pkts = 0;
1903b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
1904b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1905b5996f11Shuangdaode
1906b5996f11Shuangdaode for (idx = 0; idx < h->q_num; idx++) {
1907b5996f11Shuangdaode tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1908b5996f11Shuangdaode tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1909b5996f11Shuangdaode rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1910b5996f11Shuangdaode rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1911b5996f11Shuangdaode }
1912b5996f11Shuangdaode
1913b5996f11Shuangdaode stats->tx_bytes = tx_bytes;
1914b5996f11Shuangdaode stats->tx_packets = tx_pkts;
1915b5996f11Shuangdaode stats->rx_bytes = rx_bytes;
1916b5996f11Shuangdaode stats->rx_packets = rx_pkts;
1917b5996f11Shuangdaode
1918b5996f11Shuangdaode stats->rx_errors = ndev->stats.rx_errors;
1919b5996f11Shuangdaode stats->multicast = ndev->stats.multicast;
1920b5996f11Shuangdaode stats->rx_length_errors = ndev->stats.rx_length_errors;
1921b5996f11Shuangdaode stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1922b5996f11Shuangdaode stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1923b5996f11Shuangdaode
19242162a4a1SDaode Huang stats->tx_errors = ndev->stats.tx_errors;
19252162a4a1SDaode Huang stats->rx_dropped = ndev->stats.rx_dropped;
1926a350ecceSPaolo Abeni stats->tx_dropped = ndev->stats.tx_dropped;
19272162a4a1SDaode Huang stats->collisions = ndev->stats.collisions;
19282162a4a1SDaode Huang stats->rx_over_errors = ndev->stats.rx_over_errors;
19292162a4a1SDaode Huang stats->rx_frame_errors = ndev->stats.rx_frame_errors;
19302162a4a1SDaode Huang stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
19312162a4a1SDaode Huang stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
19322162a4a1SDaode Huang stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
19332162a4a1SDaode Huang stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
19342162a4a1SDaode Huang stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
19352162a4a1SDaode Huang stats->tx_window_errors = ndev->stats.tx_window_errors;
1936a350ecceSPaolo Abeni stats->rx_compressed = ndev->stats.rx_compressed;
19372162a4a1SDaode Huang stats->tx_compressed = ndev->stats.tx_compressed;
19382162a4a1SDaode Huang }
1939b5996f11Shuangdaode
1940b5996f11Shuangdaode static u16
hns_nic_select_queue(struct net_device * ndev,struct sk_buff * skb,struct net_device * sb_dev)1941b5996f11Shuangdaode hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
1942b5996f11Shuangdaode struct net_device *sb_dev)
1943b5996f11Shuangdaode {
1944b5996f11Shuangdaode struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
1945b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
1946a7605370SArnd Bergmann
194738f616daSSalil /* fix hardware broadcast/multicast packets queue loopback */
194838f616daSSalil if (!AE_IS_VER1(priv->enet_ver) &&
1949b5996f11Shuangdaode is_multicast_ether_addr(eth_hdr->h_dest))
19504568637fSyankejian return 0;
19512162a4a1SDaode Huang else
1952b5996f11Shuangdaode return netdev_pick_tx(ndev, skb, NULL);
1953b5996f11Shuangdaode }
1954b5996f11Shuangdaode
1955b5996f11Shuangdaode static const struct net_device_ops hns_nic_netdev_ops = {
1956b5996f11Shuangdaode .ndo_open = hns_nic_net_open,
1957b5996f11Shuangdaode .ndo_stop = hns_nic_net_stop,
1958b5996f11Shuangdaode .ndo_start_xmit = hns_nic_net_xmit,
1959b5996f11Shuangdaode .ndo_tx_timeout = hns_nic_net_timeout,
1960bb7189dcSQianqian Xie .ndo_set_mac_address = hns_nic_net_set_mac_address,
1961bb7189dcSQianqian Xie .ndo_change_mtu = hns_nic_change_mtu,
1962bb7189dcSQianqian Xie .ndo_eth_ioctl = phy_do_ioctl_running,
1963b5996f11Shuangdaode .ndo_set_features = hns_nic_set_features,
1964bb7189dcSQianqian Xie .ndo_fix_features = hns_nic_fix_features,
1965b5996f11Shuangdaode .ndo_get_stats64 = hns_nic_get_stats64,
1966bb7189dcSQianqian Xie .ndo_set_rx_mode = hns_nic_set_rx_mode,
1967b5996f11Shuangdaode .ndo_select_queue = hns_nic_select_queue,
1968b5996f11Shuangdaode };
1969b5996f11Shuangdaode
hns_nic_update_link_status(struct net_device * netdev)1970b5996f11Shuangdaode static void hns_nic_update_link_status(struct net_device *netdev)
1971b5996f11Shuangdaode {
1972b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(netdev);
1973b5996f11Shuangdaode
1974b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1975b5996f11Shuangdaode
1976b5996f11Shuangdaode if (h->phy_dev) {
1977b5996f11Shuangdaode if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1978b5996f11Shuangdaode return;
1979b5996f11Shuangdaode
1980b5996f11Shuangdaode (void)genphy_read_status(h->phy_dev);
1981b5996f11Shuangdaode }
1982b5996f11Shuangdaode hns_nic_adjust_link(netdev);
1983b5996f11Shuangdaode }
1984b5996f11Shuangdaode
1985b5996f11Shuangdaode /* for dumping key regs*/
hns_nic_dump(struct hns_nic_priv * priv)1986b5996f11Shuangdaode static void hns_nic_dump(struct hns_nic_priv *priv)
1987b5996f11Shuangdaode {
1988b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
1989b5996f11Shuangdaode struct hnae_ae_ops *ops = h->dev->ops;
1990b5996f11Shuangdaode u32 *data, reg_num, i;
1991b5996f11Shuangdaode
1992b5996f11Shuangdaode if (ops->get_regs_len && ops->get_regs) {
1993b5996f11Shuangdaode reg_num = ops->get_regs_len(priv->ae_handle);
1994b5996f11Shuangdaode reg_num = (reg_num + 3ul) & ~3ul;
1995b5996f11Shuangdaode data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1996b5996f11Shuangdaode if (data) {
1997b5996f11Shuangdaode ops->get_regs(priv->ae_handle, data);
1998b5996f11Shuangdaode for (i = 0; i < reg_num; i += 4)
1999b5996f11Shuangdaode pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
2000b5996f11Shuangdaode i, data[i], data[i + 1],
2001b5996f11Shuangdaode data[i + 2], data[i + 3]);
2002f7211729SQianqian Xie kfree(data);
2003b5996f11Shuangdaode }
2004b5996f11Shuangdaode }
2005b5996f11Shuangdaode
2006b5996f11Shuangdaode for (i = 0; i < h->q_num; i++) {
2007b5996f11Shuangdaode pr_info("tx_queue%d_next_to_clean:%d\n",
2008b5996f11Shuangdaode i, h->qs[i]->tx_ring.next_to_clean);
2009b5996f11Shuangdaode pr_info("tx_queue%d_next_to_use:%d\n",
2010b5996f11Shuangdaode i, h->qs[i]->tx_ring.next_to_use);
2011b5996f11Shuangdaode pr_info("rx_queue%d_next_to_clean:%d\n",
2012b5996f11Shuangdaode i, h->qs[i]->rx_ring.next_to_clean);
2013b5996f11Shuangdaode pr_info("rx_queue%d_next_to_use:%d\n",
2014b5996f11Shuangdaode i, h->qs[i]->rx_ring.next_to_use);
2015b5996f11Shuangdaode }
2016b5996f11Shuangdaode }
2017b5996f11Shuangdaode
201813ac695eSSalil /* for resetting subtask */
hns_nic_reset_subtask(struct hns_nic_priv * priv)201913ac695eSSalil static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
2020b5996f11Shuangdaode {
2021b5996f11Shuangdaode enum hnae_port_type type = priv->ae_handle->port_type;
202290a505b9Slisheng
2023860e9538SFlorian Westphal if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
2024b5996f11Shuangdaode return;
202576b825abSLin Yun Sheng clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2026b5996f11Shuangdaode
2027b5996f11Shuangdaode /* If we're already down, removing or resetting, just bail */
2028b5996f11Shuangdaode if (test_bit(NIC_STATE_DOWN, &priv->state) ||
2029b5996f11Shuangdaode test_bit(NIC_STATE_REMOVING, &priv->state) ||
2030b5996f11Shuangdaode test_bit(NIC_STATE_RESETTING, &priv->state))
2031b5996f11Shuangdaode return;
203213ac695eSSalil
2033b4957ab0SSalil hns_nic_dump(priv);
2034b5996f11Shuangdaode netdev_info(priv->netdev, "try to reset %s port!\n",
2035b5996f11Shuangdaode (type == HNAE_PORT_DEBUG ? "debug" : "service"));
2036b5996f11Shuangdaode
2037b5996f11Shuangdaode rtnl_lock();
2038b5996f11Shuangdaode /* put off any impending NetWatchDogTimeout */
2039b5996f11Shuangdaode netif_trans_update(priv->netdev);
2040b5996f11Shuangdaode hns_nic_net_reinit(priv->netdev);
2041b5996f11Shuangdaode
2042b5996f11Shuangdaode rtnl_unlock();
2043b5996f11Shuangdaode }
2044a57275d3SYonglong Liu
2045b5996f11Shuangdaode /* for doing service complete*/
hns_nic_service_event_complete(struct hns_nic_priv * priv)2046b5996f11Shuangdaode static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
2047b5996f11Shuangdaode {
2048b5996f11Shuangdaode WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
2049b5996f11Shuangdaode /* make sure to commit the things */
2050b5996f11Shuangdaode smp_mb__before_atomic();
2051b5996f11Shuangdaode clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2052b5996f11Shuangdaode }
2053b5996f11Shuangdaode
hns_nic_service_task(struct work_struct * work)2054b5996f11Shuangdaode static void hns_nic_service_task(struct work_struct *work)
2055b5996f11Shuangdaode {
2056b5996f11Shuangdaode struct hns_nic_priv *priv
2057b5996f11Shuangdaode = container_of(work, struct hns_nic_priv, service_task);
2058b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
2059b5996f11Shuangdaode
2060d039ef68SKees Cook hns_nic_reset_subtask(priv);
2061b5996f11Shuangdaode hns_nic_update_link_status(priv->netdev);
2062d039ef68SKees Cook h->dev->ops->update_led_status(h);
2063b5996f11Shuangdaode hns_nic_update_stats(priv->netdev);
2064b5996f11Shuangdaode
2065b5996f11Shuangdaode hns_nic_service_event_complete(priv);
2066b5996f11Shuangdaode }
2067b5996f11Shuangdaode
hns_nic_task_schedule(struct hns_nic_priv * priv)2068b5996f11Shuangdaode static void hns_nic_task_schedule(struct hns_nic_priv *priv)
2069b5996f11Shuangdaode {
2070b5996f11Shuangdaode if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
2071b5996f11Shuangdaode !test_bit(NIC_STATE_REMOVING, &priv->state) &&
2072b5996f11Shuangdaode !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
2073b5996f11Shuangdaode (void)schedule_work(&priv->service_task);
2074b5996f11Shuangdaode }
2075b5996f11Shuangdaode
hns_nic_service_timer(struct timer_list * t)2076b5996f11Shuangdaode static void hns_nic_service_timer(struct timer_list *t)
2077b5996f11Shuangdaode {
2078b5996f11Shuangdaode struct hns_nic_priv *priv = timer_container_of(priv, t, service_timer);
2079b5996f11Shuangdaode
2080b5996f11Shuangdaode (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
2081b5996f11Shuangdaode
2082b5996f11Shuangdaode hns_nic_task_schedule(priv);
2083b5996f11Shuangdaode }
2084b5996f11Shuangdaode
2085b5996f11Shuangdaode /**
2086b5996f11Shuangdaode * hns_tx_timeout_reset - initiate reset due to Tx timeout
2087b5996f11Shuangdaode * @priv: driver private struct
2088b5996f11Shuangdaode **/
hns_tx_timeout_reset(struct hns_nic_priv * priv)2089b5996f11Shuangdaode static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
20904b34aa41SSheng Li {
2091b5996f11Shuangdaode /* Do the reset outside of interrupt context */
2092b5996f11Shuangdaode if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
2093b5996f11Shuangdaode set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2094b5996f11Shuangdaode netdev_warn(priv->netdev,
2095b5996f11Shuangdaode "initiating reset due to tx timeout(%llu,0x%lx)\n",
2096b5996f11Shuangdaode priv->tx_timeout_count, priv->state);
2097b5996f11Shuangdaode priv->tx_timeout_count++;
20986396bb22SKees Cook hns_nic_task_schedule(priv);
20996396bb22SKees Cook }
2100b5996f11Shuangdaode }
2101b5996f11Shuangdaode
hns_nic_init_ring_data(struct hns_nic_priv * priv)2102b5996f11Shuangdaode static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2103b5996f11Shuangdaode {
2104b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
2105b5996f11Shuangdaode struct hns_nic_ring_data *rd;
2106b5996f11Shuangdaode bool is_ver1 = AE_IS_VER1(priv->enet_ver);
2107b5996f11Shuangdaode int i;
2108b5996f11Shuangdaode
2109cee5add4SDaode Huang if (h->q_num > NIC_MAX_Q_PER_VF) {
2110cee5add4SDaode Huang netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
2111b5996f11Shuangdaode return -EINVAL;
2112*b48b89f9SJakub Kicinski }
2113b5996f11Shuangdaode
2114b5996f11Shuangdaode priv->ring_data = kzalloc(array3_size(h->q_num,
2115b5996f11Shuangdaode sizeof(*priv->ring_data), 2),
2116b5996f11Shuangdaode GFP_KERNEL);
2117b5996f11Shuangdaode if (!priv->ring_data)
2118b5996f11Shuangdaode return -ENOMEM;
2119b5996f11Shuangdaode
2120b5996f11Shuangdaode for (i = 0; i < h->q_num; i++) {
2121cee5add4SDaode Huang rd = &priv->ring_data[i];
2122cee5add4SDaode Huang rd->queue_index = i;
2123b5996f11Shuangdaode rd->ring = &h->qs[i]->tx_ring;
2124*b48b89f9SJakub Kicinski rd->poll_one = hns_nic_tx_poll_one;
2125b5996f11Shuangdaode rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
2126b5996f11Shuangdaode hns_nic_tx_fini_pro_v2;
2127b5996f11Shuangdaode
2128b5996f11Shuangdaode netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
2129b5996f11Shuangdaode rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2130b5996f11Shuangdaode }
2131b5996f11Shuangdaode for (i = h->q_num; i < h->q_num * 2; i++) {
2132b5996f11Shuangdaode rd = &priv->ring_data[i];
2133b5996f11Shuangdaode rd->queue_index = i - h->q_num;
2134b5996f11Shuangdaode rd->ring = &h->qs[i - h->q_num]->rx_ring;
2135b5996f11Shuangdaode rd->poll_one = hns_nic_rx_poll_one;
2136b5996f11Shuangdaode rd->ex_process = hns_nic_rx_up_pro;
2137b5996f11Shuangdaode rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
2138b5996f11Shuangdaode hns_nic_rx_fini_pro_v2;
213913ac695eSSalil
214013ac695eSSalil netif_napi_add(priv->netdev, &rd->napi, hns_nic_common_poll);
2141b5996f11Shuangdaode rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2142b5996f11Shuangdaode }
2143b5996f11Shuangdaode
2144b5996f11Shuangdaode return 0;
2145b5996f11Shuangdaode }
2146b5996f11Shuangdaode
hns_nic_uninit_ring_data(struct hns_nic_priv * priv)2147b5996f11Shuangdaode static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
2148b5996f11Shuangdaode {
2149b5996f11Shuangdaode struct hnae_handle *h = priv->ae_handle;
2150b5996f11Shuangdaode int i;
215113ac695eSSalil
215213ac695eSSalil for (i = 0; i < h->q_num * 2; i++) {
215313ac695eSSalil netif_napi_del(&priv->ring_data[i].napi);
215464353af6SSalil if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
215513ac695eSSalil (void)irq_set_affinity_hint(
215613ac695eSSalil priv->ring_data[i].ring->irq,
215713ac695eSSalil NULL);
215813ac695eSSalil free_irq(priv->ring_data[i].ring->irq,
215913ac695eSSalil &priv->ring_data[i]);
216013ac695eSSalil }
216113ac695eSSalil
216264353af6SSalil priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
216364353af6SSalil }
216464353af6SSalil kfree(priv->ring_data);
216564353af6SSalil }
216664353af6SSalil
hns_nic_set_priv_ops(struct net_device * netdev)2167ee8b7a11SJakub Kicinski static void hns_nic_set_priv_ops(struct net_device *netdev)
216864353af6SSalil {
216913ac695eSSalil struct hns_nic_priv *priv = netdev_priv(netdev);
217013ac695eSSalil struct hnae_handle *h = priv->ae_handle;
217113ac695eSSalil
21726fe27464SDaode Huang if (AE_IS_VER1(priv->enet_ver)) {
21736fe27464SDaode Huang priv->ops.fill_desc = fill_desc;
21746fe27464SDaode Huang priv->ops.get_rxd_bnum = get_rx_desc_bnum;
21756fe27464SDaode Huang priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
217613ac695eSSalil } else {
217764353af6SSalil priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
217813ac695eSSalil priv->ops.fill_desc = fill_desc_v2;
2179b5996f11Shuangdaode priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx_v2;
2180b5996f11Shuangdaode netif_set_tso_max_size(netdev, 7 * 4096);
2181b5996f11Shuangdaode /* enable tso when init
2182b5996f11Shuangdaode * control tso on/off through TSE bit in bd
2183b5996f11Shuangdaode */
2184b5996f11Shuangdaode h->dev->ops->set_tso_stats(h, 1);
2185b5996f11Shuangdaode }
21867b2acae6SKejian Yan }
2187b5996f11Shuangdaode
hns_nic_try_get_ae(struct net_device * ndev)2188daa8cfd9SKejian Yan static int hns_nic_try_get_ae(struct net_device *ndev)
2189b5996f11Shuangdaode {
2190b5996f11Shuangdaode struct hns_nic_priv *priv = netdev_priv(ndev);
2191b5996f11Shuangdaode struct hnae_handle *h;
2192b5996f11Shuangdaode int ret;
2193b5996f11Shuangdaode
2194b5996f11Shuangdaode h = hnae_get_handle(&priv->netdev->dev,
2195b5996f11Shuangdaode priv->fwnode, priv->port_id, NULL);
2196b5996f11Shuangdaode if (IS_ERR_OR_NULL(h)) {
2197b5996f11Shuangdaode ret = -ENODEV;
2198b5996f11Shuangdaode dev_dbg(priv->dev, "has not handle, register notifier!\n");
2199b5996f11Shuangdaode goto out;
2200b5996f11Shuangdaode }
2201b5996f11Shuangdaode priv->ae_handle = h;
2202b5996f11Shuangdaode
2203b5996f11Shuangdaode ret = hns_nic_init_phy(ndev, h);
2204b5996f11Shuangdaode if (ret) {
2205b5996f11Shuangdaode dev_err(priv->dev, "probe phy device fail!\n");
220613ac695eSSalil goto out_init_phy;
220713ac695eSSalil }
2208b5996f11Shuangdaode
2209b5996f11Shuangdaode ret = hns_nic_init_ring_data(priv);
2210b5996f11Shuangdaode if (ret) {
2211b5996f11Shuangdaode ret = -ENOMEM;
2212b5996f11Shuangdaode goto out_init_ring_data;
2213b5996f11Shuangdaode }
2214b5996f11Shuangdaode
2215b5996f11Shuangdaode hns_nic_set_priv_ops(ndev);
2216b5996f11Shuangdaode
2217b5996f11Shuangdaode ret = register_netdev(ndev);
2218b5996f11Shuangdaode if (ret) {
2219b5996f11Shuangdaode dev_err(priv->dev, "probe register netdev fail!\n");
2220b5996f11Shuangdaode goto out_reg_ndev_fail;
2221b5996f11Shuangdaode }
2222b5996f11Shuangdaode return 0;
2223b5996f11Shuangdaode
2224b5996f11Shuangdaode out_reg_ndev_fail:
2225b5996f11Shuangdaode hns_nic_uninit_ring_data(priv);
2226b5996f11Shuangdaode priv->ring_data = NULL;
2227b5996f11Shuangdaode out_init_phy:
2228b5996f11Shuangdaode out_init_ring_data:
2229b5996f11Shuangdaode hnae_put_handle(priv->ae_handle);
2230b5996f11Shuangdaode priv->ae_handle = NULL;
2231b5996f11Shuangdaode out:
2232b5996f11Shuangdaode return ret;
2233b5996f11Shuangdaode }
2234b5996f11Shuangdaode
hns_nic_notifier_action(struct notifier_block * nb,unsigned long action,void * data)2235b5996f11Shuangdaode static int hns_nic_notifier_action(struct notifier_block *nb,
2236b5996f11Shuangdaode unsigned long action, void *data)
2237b5996f11Shuangdaode {
2238b5996f11Shuangdaode struct hns_nic_priv *priv =
2239b5996f11Shuangdaode container_of(nb, struct hns_nic_priv, notifier_block);
2240b5996f11Shuangdaode
2241b5996f11Shuangdaode assert(action == HNAE_AE_REGISTER);
2242b5996f11Shuangdaode
2243b5996f11Shuangdaode if (!hns_nic_try_get_ae(priv->netdev)) {
2244b5996f11Shuangdaode hnae_unregister_notifier(&priv->notifier_block);
2245b5996f11Shuangdaode priv->notifier_block.notifier_call = NULL;
2246406adee9SYisen.Zhuang\(Zhuangyuzeng\) }
2247b5996f11Shuangdaode return 0;
2248b5996f11Shuangdaode }
2249b5996f11Shuangdaode
hns_nic_dev_probe(struct platform_device * pdev)2250b5996f11Shuangdaode static int hns_nic_dev_probe(struct platform_device *pdev)
2251b5996f11Shuangdaode {
2252b5996f11Shuangdaode struct device *dev = &pdev->dev;
2253b5996f11Shuangdaode struct net_device *ndev;
2254b5996f11Shuangdaode struct hns_nic_priv *priv;
2255b5996f11Shuangdaode u32 port_id;
2256b5996f11Shuangdaode int ret;
2257b5996f11Shuangdaode
2258b5996f11Shuangdaode ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
225963434888SKejian Yan if (!ndev)
226063434888SKejian Yan return -ENOMEM;
226163434888SKejian Yan
226263434888SKejian Yan platform_set_drvdata(pdev, ndev);
226363434888SKejian Yan
2264b5996f11Shuangdaode priv = netdev_priv(ndev);
226513ac695eSSalil priv->dev = dev;
226613ac695eSSalil priv->netdev = ndev;
2267b5996f11Shuangdaode
22687b2acae6SKejian Yan if (dev_of_node(dev)) {
2269d2083d0eSPan Bian struct device_node *ae_node;
2270d2083d0eSPan Bian
227148189d6aSyankejian if (of_device_is_compatible(dev->of_node,
227248189d6aSyankejian "hisilicon,hns-nic-v1"))
227348189d6aSyankejian priv->enet_ver = AE_VERSION_1;
22747b2acae6SKejian Yan else
227563434888SKejian Yan priv->enet_ver = AE_VERSION_2;
2276977d5ad3SSakari Ailus
227763434888SKejian Yan ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
227863434888SKejian Yan if (!ae_node) {
227963434888SKejian Yan ret = -ENODEV;
228063434888SKejian Yan dev_err(dev, "not find ae-handle\n");
228163434888SKejian Yan goto out_read_prop_fail;
2282100e3345SDinghao Liu }
2283100e3345SDinghao Liu priv->fwnode = &ae_node->fwnode;
2284100e3345SDinghao Liu } else if (is_acpi_node(dev->fwnode)) {
2285100e3345SDinghao Liu struct fwnode_reference_args args;
22867b2acae6SKejian Yan
2287406adee9SYisen.Zhuang\(Zhuangyuzeng\) if (acpi_dev_found(hns_enet_acpi_match[0].id))
228863434888SKejian Yan priv->enet_ver = AE_VERSION_1;
228963434888SKejian Yan else if (acpi_dev_found(hns_enet_acpi_match[1].id))
229063434888SKejian Yan priv->enet_ver = AE_VERSION_2;
229163434888SKejian Yan else {
229263434888SKejian Yan ret = -ENXIO;
229363434888SKejian Yan goto out_read_prop_fail;
2294977d5ad3SSakari Ailus }
2295977d5ad3SSakari Ailus
2296977d5ad3SSakari Ailus /* try to find port-idx-in-ae first */
2297977d5ad3SSakari Ailus ret = acpi_node_get_property_reference(dev->fwnode,
2298977d5ad3SSakari Ailus "ae-handle", 0, &args);
229963434888SKejian Yan if (ret) {
230063434888SKejian Yan dev_err(dev, "not find ae-handle\n");
2301100e3345SDinghao Liu goto out_read_prop_fail;
2302100e3345SDinghao Liu }
230363434888SKejian Yan if (!is_acpi_device_node(args.fwnode)) {
230463434888SKejian Yan ret = -EINVAL;
23056162928cSKejian Yan goto out_read_prop_fail;
2306406adee9SYisen.Zhuang\(Zhuangyuzeng\) }
2307406adee9SYisen.Zhuang\(Zhuangyuzeng\) priv->fwnode = args.fwnode;
23086162928cSKejian Yan } else {
2309b5996f11Shuangdaode dev_err(dev, "cannot read cfg data from OF or acpi\n");
231048189d6aSyankejian ret = -ENXIO;
2311406adee9SYisen.Zhuang\(Zhuangyuzeng\) goto out_read_prop_fail;
2312406adee9SYisen.Zhuang\(Zhuangyuzeng\) }
2313406adee9SYisen.Zhuang\(Zhuangyuzeng\)
2314406adee9SYisen.Zhuang\(Zhuangyuzeng\) ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
2315406adee9SYisen.Zhuang\(Zhuangyuzeng\) if (ret) {
2316b5996f11Shuangdaode /* only for old code compatible */
2317b5996f11Shuangdaode ret = device_property_read_u32(dev, "port-id", &port_id);
2318b5996f11Shuangdaode if (ret)
2319b5996f11Shuangdaode goto out_read_prop_fail;
2320b5996f11Shuangdaode /* for old dts, we need to caculate the port offset */
2321b5996f11Shuangdaode port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
2322b5996f11Shuangdaode : port_id - HNS_SRV_OFFSET;
232313ac695eSSalil }
2324b5996f11Shuangdaode priv->port_id = port_id;
2325b5996f11Shuangdaode
2326b5996f11Shuangdaode hns_init_mac_addr(ndev);
2327b5996f11Shuangdaode
2328b5996f11Shuangdaode ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
2329b5996f11Shuangdaode ndev->priv_flags |= IFF_UNICAST_FLT;
2330b5996f11Shuangdaode ndev->netdev_ops = &hns_nic_netdev_ops;
233144770e11SJarod Wilson hns_ethtool_set_ops(ndev);
233244770e11SJarod Wilson
233313ac695eSSalil ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
233413ac695eSSalil NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
23357e74a19cSYonglong Liu NETIF_F_GRO;
233613ac695eSSalil ndev->vlan_features |=
233713ac695eSSalil NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
233864353af6SSalil ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
23390d581ba3SYonglong Liu
234044770e11SJarod Wilson /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
234144770e11SJarod Wilson ndev->min_mtu = MAC_MIN_MTU;
234213ac695eSSalil switch (priv->enet_ver) {
234313ac695eSSalil case AE_VERSION_2:
234444770e11SJarod Wilson ndev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_NTUPLE;
234544770e11SJarod Wilson ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
234613ac695eSSalil NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
234713ac695eSSalil NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
234813ac695eSSalil ndev->vlan_features |= NETIF_F_TSO | NETIF_F_TSO6;
2349b5996f11Shuangdaode ndev->max_mtu = MAC_MAX_MTU_V2 -
2350b5996f11Shuangdaode (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2351b5996f11Shuangdaode break;
2352b5996f11Shuangdaode default:
2353b5996f11Shuangdaode ndev->max_mtu = MAC_MAX_MTU -
235439c94417SQianqian Xie (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
2355b5996f11Shuangdaode break;
2356b5996f11Shuangdaode }
2357b5996f11Shuangdaode
2358b5996f11Shuangdaode SET_NETDEV_DEV(ndev, dev);
2359d039ef68SKees Cook
2360b5996f11Shuangdaode if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
2361b5996f11Shuangdaode dev_dbg(dev, "set mask to 64bit\n");
2362b5996f11Shuangdaode else
2363b5996f11Shuangdaode dev_err(dev, "set mask to 64bit fail!\n");
2364b5996f11Shuangdaode
2365b5996f11Shuangdaode /* carrier off reporting is important to ethtool even BEFORE open */
2366b5996f11Shuangdaode netif_carrier_off(ndev);
2367b5996f11Shuangdaode
2368b5996f11Shuangdaode timer_setup(&priv->service_timer, hns_nic_service_timer, 0);
2369b5996f11Shuangdaode INIT_WORK(&priv->service_task, hns_nic_service_task);
2370b5996f11Shuangdaode
2371b5996f11Shuangdaode set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
2372b5996f11Shuangdaode clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2373b5996f11Shuangdaode set_bit(NIC_STATE_DOWN, &priv->state);
2374b5996f11Shuangdaode
2375b5996f11Shuangdaode if (hns_nic_try_get_ae(priv->netdev)) {
2376b5996f11Shuangdaode priv->notifier_block.notifier_call = hns_nic_notifier_action;
2377b5996f11Shuangdaode ret = hnae_register_notifier(&priv->notifier_block);
2378b5996f11Shuangdaode if (ret) {
2379b5996f11Shuangdaode dev_err(dev, "register notifier fail!\n");
238048189d6aSyankejian goto out_notify_fail;
2381263c6d75SYonglong Liu }
2382263c6d75SYonglong Liu dev_dbg(dev, "has not handle, register notifier!\n");
2383b5996f11Shuangdaode }
2384b5996f11Shuangdaode
2385b5996f11Shuangdaode return 0;
2386b5996f11Shuangdaode
2387b5996f11Shuangdaode out_notify_fail:
2388b5996f11Shuangdaode (void)cancel_work_sync(&priv->service_task);
2389b5996f11Shuangdaode out_read_prop_fail:
2390b5996f11Shuangdaode /* safe for ACPI FW */
2391b5996f11Shuangdaode of_node_put(to_of_node(priv->fwnode));
2392b5996f11Shuangdaode free_netdev(ndev);
2393b5996f11Shuangdaode return ret;
2394b5996f11Shuangdaode }
2395b5996f11Shuangdaode
hns_nic_dev_remove(struct platform_device * pdev)2396b5996f11Shuangdaode static void hns_nic_dev_remove(struct platform_device *pdev)
2397b5996f11Shuangdaode {
2398b5996f11Shuangdaode struct net_device *ndev = platform_get_drvdata(pdev);
2399262b38cdSPhilippe Reynes struct hns_nic_priv *priv = netdev_priv(ndev);
2400262b38cdSPhilippe Reynes
2401b5996f11Shuangdaode if (ndev->reg_state != NETREG_UNINITIALIZED)
2402b5996f11Shuangdaode unregister_netdev(ndev);
2403b5996f11Shuangdaode
2404b5996f11Shuangdaode if (priv->ring_data)
2405b5996f11Shuangdaode hns_nic_uninit_ring_data(priv);
2406b5996f11Shuangdaode priv->ring_data = NULL;
2407b5996f11Shuangdaode
2408b5996f11Shuangdaode if (ndev->phydev)
2409b5996f11Shuangdaode phy_disconnect(ndev->phydev);
2410b5996f11Shuangdaode
2411b5996f11Shuangdaode if (!IS_ERR_OR_NULL(priv->ae_handle))
2412263c6d75SYonglong Liu hnae_put_handle(priv->ae_handle);
2413263c6d75SYonglong Liu priv->ae_handle = NULL;
2414263c6d75SYonglong Liu if (priv->notifier_block.notifier_call)
2415b5996f11Shuangdaode hnae_unregister_notifier(&priv->notifier_block);
2416b5996f11Shuangdaode priv->notifier_block.notifier_call = NULL;
2417b5996f11Shuangdaode
2418b5996f11Shuangdaode set_bit(NIC_STATE_REMOVING, &priv->state);
2419b5996f11Shuangdaode (void)cancel_work_sync(&priv->service_task);
2420b5996f11Shuangdaode
2421b5996f11Shuangdaode /* safe for ACPI FW */
2422b5996f11Shuangdaode of_node_put(to_of_node(priv->fwnode));
2423b5996f11Shuangdaode
2424b5996f11Shuangdaode free_netdev(ndev);
2425b5996f11Shuangdaode }
2426b5996f11Shuangdaode
2427b5996f11Shuangdaode static const struct of_device_id hns_enet_of_match[] = {
2428b5996f11Shuangdaode {.compatible = "hisilicon,hns-nic-v1",},
2429b5996f11Shuangdaode {.compatible = "hisilicon,hns-nic-v2",},
2430b5996f11Shuangdaode {},
243163434888SKejian Yan };
2432b5996f11Shuangdaode
2433b5996f11Shuangdaode MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2434b5996f11Shuangdaode
2435b5996f11Shuangdaode static struct platform_driver hns_nic_dev_driver = {
2436b5996f11Shuangdaode .driver = {
2437b5996f11Shuangdaode .name = "hns-nic",
2438b5996f11Shuangdaode .of_match_table = hns_enet_of_match,
2439b5996f11Shuangdaode .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
2440b5996f11Shuangdaode },
2441b5996f11Shuangdaode .probe = hns_nic_dev_probe,
2442b5996f11Shuangdaode .remove = hns_nic_dev_remove,
2443 };
2444
2445 module_platform_driver(hns_nic_dev_driver);
2446
2447 MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2448 MODULE_AUTHOR("Hisilicon, Inc.");
2449 MODULE_LICENSE("GPL");
2450 MODULE_ALIAS("platform:hns-nic");
2451