1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Faraday FTGMAC100 Gigabit Ethernet
4 *
5 * (C) Copyright 2009-2011 Faraday Technology
6 * Po-Yu Chuang <ratbert@faraday-tech.com>
7 */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/clk.h>
12 #include <linux/reset.h>
13 #include <linux/dma-mapping.h>
14 #include <linux/etherdevice.h>
15 #include <linux/ethtool.h>
16 #include <linux/interrupt.h>
17 #include <linux/io.h>
18 #include <linux/module.h>
19 #include <linux/netdevice.h>
20 #include <linux/of.h>
21 #include <linux/of_mdio.h>
22 #include <linux/phy.h>
23 #include <linux/platform_device.h>
24 #include <linux/property.h>
25 #include <linux/crc32.h>
26 #include <linux/if_vlan.h>
27 #include <linux/of_net.h>
28 #include <linux/phy_fixed.h>
29 #include <net/ip.h>
30 #include <net/ncsi.h>
31
32 #include "ftgmac100.h"
33
34 #define DRV_NAME "ftgmac100"
35
36 enum ftgmac100_mac_id {
37 FTGMAC100_FARADAY = 1,
38 FTGMAC100_AST2400,
39 FTGMAC100_AST2500,
40 FTGMAC100_AST2600
41 };
42
43 struct ftgmac100_match_data {
44 enum ftgmac100_mac_id mac_id;
45 };
46
47 /* Arbitrary values, I am not sure the HW has limits */
48 #define MAX_RX_QUEUE_ENTRIES 1024
49 #define MAX_TX_QUEUE_ENTRIES 1024
50 #define MIN_RX_QUEUE_ENTRIES 32
51 #define MIN_TX_QUEUE_ENTRIES 32
52
53 /* Defaults */
54 #define DEF_RX_QUEUE_ENTRIES 128
55 #define DEF_TX_QUEUE_ENTRIES 128
56
57 #define MAX_PKT_SIZE 1536
58 #define RX_BUF_SIZE MAX_PKT_SIZE /* must be smaller than 0x3fff */
59
60 /* Min number of tx ring entries before stopping queue */
61 #define TX_THRESHOLD (MAX_SKB_FRAGS + 1)
62
63 #define FTGMAC_100MHZ 100000000
64 #define FTGMAC_25MHZ 25000000
65
66 /* For NC-SI to register a fixed-link phy device */
67 static struct fixed_phy_status ncsi_phy_status = {
68 .link = 1,
69 .speed = SPEED_100,
70 .duplex = DUPLEX_FULL,
71 .pause = 0,
72 .asym_pause = 0
73 };
74
75 struct ftgmac100 {
76 /* Registers */
77 struct resource *res;
78 void __iomem *base;
79
80 enum ftgmac100_mac_id mac_id;
81
82 /* Rx ring */
83 unsigned int rx_q_entries;
84 struct ftgmac100_rxdes *rxdes;
85 dma_addr_t rxdes_dma;
86 struct sk_buff **rx_skbs;
87 unsigned int rx_pointer;
88 u32 rxdes0_edorr_mask;
89
90 /* Tx ring */
91 unsigned int tx_q_entries;
92 struct ftgmac100_txdes *txdes;
93 dma_addr_t txdes_dma;
94 struct sk_buff **tx_skbs;
95 unsigned int tx_clean_pointer;
96 unsigned int tx_pointer;
97 u32 txdes0_edotr_mask;
98
99 /* Used to signal the reset task of ring change request */
100 unsigned int new_rx_q_entries;
101 unsigned int new_tx_q_entries;
102
103 /* Scratch page to use when rx skb alloc fails */
104 void *rx_scratch;
105 dma_addr_t rx_scratch_dma;
106
107 /* Component structures */
108 struct net_device *netdev;
109 struct device *dev;
110 struct ncsi_dev *ndev;
111 struct napi_struct napi;
112 struct work_struct reset_task;
113 struct mii_bus *mii_bus;
114 struct clk *clk;
115
116 /* AST2500/AST2600 RMII ref clock gate */
117 struct clk *rclk;
118 /* Aspeed reset control */
119 struct reset_control *rst;
120
121 /* Link management */
122 int cur_speed;
123 int cur_duplex;
124 bool use_ncsi;
125
126 /* Multicast filter settings */
127 u32 maht0;
128 u32 maht1;
129
130 /* Flow control settings */
131 bool tx_pause;
132 bool rx_pause;
133 bool aneg_pause;
134
135 /* Misc */
136 bool need_mac_restart;
137 bool is_aspeed;
138 };
139
ftgmac100_reset_mac(struct ftgmac100 * priv,u32 maccr)140 static int ftgmac100_reset_mac(struct ftgmac100 *priv, u32 maccr)
141 {
142 struct net_device *netdev = priv->netdev;
143 int i;
144
145 /* NOTE: reset clears all registers */
146 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
147 iowrite32(maccr | FTGMAC100_MACCR_SW_RST,
148 priv->base + FTGMAC100_OFFSET_MACCR);
149 for (i = 0; i < 200; i++) {
150 unsigned int maccr;
151
152 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
153 if (!(maccr & FTGMAC100_MACCR_SW_RST))
154 return 0;
155
156 udelay(1);
157 }
158
159 netdev_err(netdev, "Hardware reset failed\n");
160 return -EIO;
161 }
162
ftgmac100_reset_and_config_mac(struct ftgmac100 * priv)163 static int ftgmac100_reset_and_config_mac(struct ftgmac100 *priv)
164 {
165 u32 maccr = 0;
166
167 /* Aspeed RMII needs SCU reset to clear status */
168 if (priv->is_aspeed && priv->netdev->phydev->interface == PHY_INTERFACE_MODE_RMII) {
169 int err;
170
171 err = reset_control_assert(priv->rst);
172 if (err) {
173 dev_err(priv->dev, "Failed to reset mac (%d)\n", err);
174 return err;
175 }
176 usleep_range(10000, 20000);
177 err = reset_control_deassert(priv->rst);
178 if (err) {
179 dev_err(priv->dev, "Failed to deassert mac reset (%d)\n", err);
180 return err;
181 }
182 }
183
184 switch (priv->cur_speed) {
185 case SPEED_10:
186 case 0: /* no link */
187 break;
188
189 case SPEED_100:
190 maccr |= FTGMAC100_MACCR_FAST_MODE;
191 break;
192
193 case SPEED_1000:
194 maccr |= FTGMAC100_MACCR_GIGA_MODE;
195 break;
196 default:
197 netdev_err(priv->netdev, "Unknown speed %d !\n",
198 priv->cur_speed);
199 break;
200 }
201
202 /* (Re)initialize the queue pointers */
203 priv->rx_pointer = 0;
204 priv->tx_clean_pointer = 0;
205 priv->tx_pointer = 0;
206
207 /* The doc says reset twice with 10us interval */
208 if (ftgmac100_reset_mac(priv, maccr))
209 return -EIO;
210 usleep_range(10, 1000);
211 return ftgmac100_reset_mac(priv, maccr);
212 }
213
ftgmac100_write_mac_addr(struct ftgmac100 * priv,const u8 * mac)214 static void ftgmac100_write_mac_addr(struct ftgmac100 *priv, const u8 *mac)
215 {
216 unsigned int maddr = mac[0] << 8 | mac[1];
217 unsigned int laddr = mac[2] << 24 | mac[3] << 16 | mac[4] << 8 | mac[5];
218
219 iowrite32(maddr, priv->base + FTGMAC100_OFFSET_MAC_MADR);
220 iowrite32(laddr, priv->base + FTGMAC100_OFFSET_MAC_LADR);
221 }
222
ftgmac100_initial_mac(struct ftgmac100 * priv)223 static int ftgmac100_initial_mac(struct ftgmac100 *priv)
224 {
225 u8 mac[ETH_ALEN];
226 unsigned int m;
227 unsigned int l;
228 int err;
229
230 err = of_get_ethdev_address(priv->dev->of_node, priv->netdev);
231 if (err == -EPROBE_DEFER)
232 return err;
233 if (!err) {
234 dev_info(priv->dev, "Read MAC address %pM from device tree\n",
235 priv->netdev->dev_addr);
236 return 0;
237 }
238
239 m = ioread32(priv->base + FTGMAC100_OFFSET_MAC_MADR);
240 l = ioread32(priv->base + FTGMAC100_OFFSET_MAC_LADR);
241
242 mac[0] = (m >> 8) & 0xff;
243 mac[1] = m & 0xff;
244 mac[2] = (l >> 24) & 0xff;
245 mac[3] = (l >> 16) & 0xff;
246 mac[4] = (l >> 8) & 0xff;
247 mac[5] = l & 0xff;
248
249 if (is_valid_ether_addr(mac)) {
250 eth_hw_addr_set(priv->netdev, mac);
251 dev_info(priv->dev, "Read MAC address %pM from chip\n", mac);
252 } else {
253 eth_hw_addr_random(priv->netdev);
254 dev_info(priv->dev, "Generated random MAC address %pM\n",
255 priv->netdev->dev_addr);
256 }
257
258 return 0;
259 }
260
ftgmac100_set_mac_addr(struct net_device * dev,void * p)261 static int ftgmac100_set_mac_addr(struct net_device *dev, void *p)
262 {
263 int ret;
264
265 ret = eth_prepare_mac_addr_change(dev, p);
266 if (ret < 0)
267 return ret;
268
269 eth_commit_mac_addr_change(dev, p);
270 ftgmac100_write_mac_addr(netdev_priv(dev), dev->dev_addr);
271
272 return 0;
273 }
274
ftgmac100_config_pause(struct ftgmac100 * priv)275 static void ftgmac100_config_pause(struct ftgmac100 *priv)
276 {
277 u32 fcr = FTGMAC100_FCR_PAUSE_TIME(16);
278
279 /* Throttle tx queue when receiving pause frames */
280 if (priv->rx_pause)
281 fcr |= FTGMAC100_FCR_FC_EN;
282
283 /* Enables sending pause frames when the RX queue is past a
284 * certain threshold.
285 */
286 if (priv->tx_pause)
287 fcr |= FTGMAC100_FCR_FCTHR_EN;
288
289 iowrite32(fcr, priv->base + FTGMAC100_OFFSET_FCR);
290 }
291
ftgmac100_init_hw(struct ftgmac100 * priv)292 static void ftgmac100_init_hw(struct ftgmac100 *priv)
293 {
294 u32 reg, rfifo_sz, tfifo_sz;
295
296 /* Clear stale interrupts */
297 reg = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
298 iowrite32(reg, priv->base + FTGMAC100_OFFSET_ISR);
299
300 /* Setup RX ring buffer base */
301 iowrite32(priv->rxdes_dma, priv->base + FTGMAC100_OFFSET_RXR_BADR);
302
303 /* Setup TX ring buffer base */
304 iowrite32(priv->txdes_dma, priv->base + FTGMAC100_OFFSET_NPTXR_BADR);
305
306 /* Configure RX buffer size */
307 iowrite32(FTGMAC100_RBSR_SIZE(RX_BUF_SIZE),
308 priv->base + FTGMAC100_OFFSET_RBSR);
309
310 /* Set RX descriptor autopoll */
311 iowrite32(FTGMAC100_APTC_RXPOLL_CNT(1),
312 priv->base + FTGMAC100_OFFSET_APTC);
313
314 /* Write MAC address */
315 ftgmac100_write_mac_addr(priv, priv->netdev->dev_addr);
316
317 /* Write multicast filter */
318 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
319 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
320
321 /* Configure descriptor sizes and increase burst sizes according
322 * to values in Aspeed SDK. The FIFO arbitration is enabled and
323 * the thresholds set based on the recommended values in the
324 * AST2400 specification.
325 */
326 iowrite32(FTGMAC100_DBLAC_RXDES_SIZE(2) | /* 2*8 bytes RX descs */
327 FTGMAC100_DBLAC_TXDES_SIZE(2) | /* 2*8 bytes TX descs */
328 FTGMAC100_DBLAC_RXBURST_SIZE(3) | /* 512 bytes max RX bursts */
329 FTGMAC100_DBLAC_TXBURST_SIZE(3) | /* 512 bytes max TX bursts */
330 FTGMAC100_DBLAC_RX_THR_EN | /* Enable fifo threshold arb */
331 FTGMAC100_DBLAC_RXFIFO_HTHR(6) | /* 6/8 of FIFO high threshold */
332 FTGMAC100_DBLAC_RXFIFO_LTHR(2), /* 2/8 of FIFO low threshold */
333 priv->base + FTGMAC100_OFFSET_DBLAC);
334
335 /* Interrupt mitigation configured for 1 interrupt/packet. HW interrupt
336 * mitigation doesn't seem to provide any benefit with NAPI so leave
337 * it at that.
338 */
339 iowrite32(FTGMAC100_ITC_RXINT_THR(1) |
340 FTGMAC100_ITC_TXINT_THR(1),
341 priv->base + FTGMAC100_OFFSET_ITC);
342
343 /* Configure FIFO sizes in the TPAFCR register */
344 reg = ioread32(priv->base + FTGMAC100_OFFSET_FEAR);
345 rfifo_sz = reg & 0x00000007;
346 tfifo_sz = (reg >> 3) & 0x00000007;
347 reg = ioread32(priv->base + FTGMAC100_OFFSET_TPAFCR);
348 reg &= ~0x3f000000;
349 reg |= (tfifo_sz << 27);
350 reg |= (rfifo_sz << 24);
351 iowrite32(reg, priv->base + FTGMAC100_OFFSET_TPAFCR);
352 }
353
ftgmac100_start_hw(struct ftgmac100 * priv)354 static void ftgmac100_start_hw(struct ftgmac100 *priv)
355 {
356 u32 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
357
358 /* Keep the original GMAC and FAST bits */
359 maccr &= (FTGMAC100_MACCR_FAST_MODE | FTGMAC100_MACCR_GIGA_MODE);
360
361 /* Add all the main enable bits */
362 maccr |= FTGMAC100_MACCR_TXDMA_EN |
363 FTGMAC100_MACCR_RXDMA_EN |
364 FTGMAC100_MACCR_TXMAC_EN |
365 FTGMAC100_MACCR_RXMAC_EN |
366 FTGMAC100_MACCR_CRC_APD |
367 FTGMAC100_MACCR_PHY_LINK_LEVEL |
368 FTGMAC100_MACCR_RX_RUNT |
369 FTGMAC100_MACCR_RX_BROADPKT;
370
371 /* Add other bits as needed */
372 if (priv->cur_duplex == DUPLEX_FULL)
373 maccr |= FTGMAC100_MACCR_FULLDUP;
374 if (priv->netdev->flags & IFF_PROMISC)
375 maccr |= FTGMAC100_MACCR_RX_ALL;
376 if (priv->netdev->flags & IFF_ALLMULTI)
377 maccr |= FTGMAC100_MACCR_RX_MULTIPKT;
378 else if (netdev_mc_count(priv->netdev))
379 maccr |= FTGMAC100_MACCR_HT_MULTI_EN;
380
381 /* Vlan filtering enabled */
382 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
383 maccr |= FTGMAC100_MACCR_RM_VLAN;
384
385 /* Hit the HW */
386 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
387 }
388
ftgmac100_stop_hw(struct ftgmac100 * priv)389 static void ftgmac100_stop_hw(struct ftgmac100 *priv)
390 {
391 iowrite32(0, priv->base + FTGMAC100_OFFSET_MACCR);
392 }
393
ftgmac100_calc_mc_hash(struct ftgmac100 * priv)394 static void ftgmac100_calc_mc_hash(struct ftgmac100 *priv)
395 {
396 struct netdev_hw_addr *ha;
397
398 priv->maht1 = 0;
399 priv->maht0 = 0;
400 netdev_for_each_mc_addr(ha, priv->netdev) {
401 u32 crc_val = ether_crc_le(ETH_ALEN, ha->addr);
402
403 crc_val = (~(crc_val >> 2)) & 0x3f;
404 if (crc_val >= 32)
405 priv->maht1 |= 1ul << (crc_val - 32);
406 else
407 priv->maht0 |= 1ul << (crc_val);
408 }
409 }
410
ftgmac100_set_rx_mode(struct net_device * netdev)411 static void ftgmac100_set_rx_mode(struct net_device *netdev)
412 {
413 struct ftgmac100 *priv = netdev_priv(netdev);
414
415 /* Setup the hash filter */
416 ftgmac100_calc_mc_hash(priv);
417
418 /* Interface down ? that's all there is to do */
419 if (!netif_running(netdev))
420 return;
421
422 /* Update the HW */
423 iowrite32(priv->maht0, priv->base + FTGMAC100_OFFSET_MAHT0);
424 iowrite32(priv->maht1, priv->base + FTGMAC100_OFFSET_MAHT1);
425
426 /* Reconfigure MACCR */
427 ftgmac100_start_hw(priv);
428 }
429
ftgmac100_alloc_rx_buf(struct ftgmac100 * priv,unsigned int entry,struct ftgmac100_rxdes * rxdes,gfp_t gfp)430 static int ftgmac100_alloc_rx_buf(struct ftgmac100 *priv, unsigned int entry,
431 struct ftgmac100_rxdes *rxdes, gfp_t gfp)
432 {
433 struct net_device *netdev = priv->netdev;
434 struct sk_buff *skb;
435 dma_addr_t map;
436 int err = 0;
437
438 skb = netdev_alloc_skb_ip_align(netdev, RX_BUF_SIZE);
439 if (unlikely(!skb)) {
440 if (net_ratelimit())
441 netdev_warn(netdev, "failed to allocate rx skb\n");
442 err = -ENOMEM;
443 map = priv->rx_scratch_dma;
444 } else {
445 map = dma_map_single(priv->dev, skb->data, RX_BUF_SIZE,
446 DMA_FROM_DEVICE);
447 if (unlikely(dma_mapping_error(priv->dev, map))) {
448 if (net_ratelimit())
449 netdev_err(netdev, "failed to map rx page\n");
450 dev_kfree_skb_any(skb);
451 map = priv->rx_scratch_dma;
452 skb = NULL;
453 err = -ENOMEM;
454 }
455 }
456
457 /* Store skb */
458 priv->rx_skbs[entry] = skb;
459
460 /* Store DMA address into RX desc */
461 rxdes->rxdes3 = cpu_to_le32(map);
462
463 /* Ensure the above is ordered vs clearing the OWN bit */
464 dma_wmb();
465
466 /* Clean status (which resets own bit) */
467 if (entry == (priv->rx_q_entries - 1))
468 rxdes->rxdes0 = cpu_to_le32(priv->rxdes0_edorr_mask);
469 else
470 rxdes->rxdes0 = 0;
471
472 return err;
473 }
474
ftgmac100_next_rx_pointer(struct ftgmac100 * priv,unsigned int pointer)475 static unsigned int ftgmac100_next_rx_pointer(struct ftgmac100 *priv,
476 unsigned int pointer)
477 {
478 return (pointer + 1) & (priv->rx_q_entries - 1);
479 }
480
ftgmac100_rx_packet_error(struct ftgmac100 * priv,u32 status)481 static void ftgmac100_rx_packet_error(struct ftgmac100 *priv, u32 status)
482 {
483 struct net_device *netdev = priv->netdev;
484
485 if (status & FTGMAC100_RXDES0_RX_ERR)
486 netdev->stats.rx_errors++;
487
488 if (status & FTGMAC100_RXDES0_CRC_ERR)
489 netdev->stats.rx_crc_errors++;
490
491 if (status & (FTGMAC100_RXDES0_FTL |
492 FTGMAC100_RXDES0_RUNT |
493 FTGMAC100_RXDES0_RX_ODD_NB))
494 netdev->stats.rx_length_errors++;
495 }
496
ftgmac100_rx_packet(struct ftgmac100 * priv,int * processed)497 static bool ftgmac100_rx_packet(struct ftgmac100 *priv, int *processed)
498 {
499 struct net_device *netdev = priv->netdev;
500 struct ftgmac100_rxdes *rxdes;
501 struct sk_buff *skb;
502 unsigned int pointer, size;
503 u32 status, csum_vlan;
504 dma_addr_t map;
505
506 /* Grab next RX descriptor */
507 pointer = priv->rx_pointer;
508 rxdes = &priv->rxdes[pointer];
509
510 /* Grab descriptor status */
511 status = le32_to_cpu(rxdes->rxdes0);
512
513 /* Do we have a packet ? */
514 if (!(status & FTGMAC100_RXDES0_RXPKT_RDY))
515 return false;
516
517 /* Order subsequent reads with the test for the ready bit */
518 dma_rmb();
519
520 /* We don't cope with fragmented RX packets */
521 if (unlikely(!(status & FTGMAC100_RXDES0_FRS) ||
522 !(status & FTGMAC100_RXDES0_LRS)))
523 goto drop;
524
525 /* Grab received size and csum vlan field in the descriptor */
526 size = status & FTGMAC100_RXDES0_VDBC;
527 csum_vlan = le32_to_cpu(rxdes->rxdes1);
528
529 /* Any error (other than csum offload) flagged ? */
530 if (unlikely(status & RXDES0_ANY_ERROR)) {
531 /* Correct for incorrect flagging of runt packets
532 * with vlan tags... Just accept a runt packet that
533 * has been flagged as vlan and whose size is at
534 * least 60 bytes.
535 */
536 if ((status & FTGMAC100_RXDES0_RUNT) &&
537 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL) &&
538 (size >= 60))
539 status &= ~FTGMAC100_RXDES0_RUNT;
540
541 /* Any error still in there ? */
542 if (status & RXDES0_ANY_ERROR) {
543 ftgmac100_rx_packet_error(priv, status);
544 goto drop;
545 }
546 }
547
548 /* If the packet had no skb (failed to allocate earlier)
549 * then try to allocate one and skip
550 */
551 skb = priv->rx_skbs[pointer];
552 if (!unlikely(skb)) {
553 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
554 goto drop;
555 }
556
557 if (unlikely(status & FTGMAC100_RXDES0_MULTICAST))
558 netdev->stats.multicast++;
559
560 /* If the HW found checksum errors, bounce it to software.
561 *
562 * If we didn't, we need to see if the packet was recognized
563 * by HW as one of the supported checksummed protocols before
564 * we accept the HW test results.
565 */
566 if (netdev->features & NETIF_F_RXCSUM) {
567 u32 err_bits = FTGMAC100_RXDES1_TCP_CHKSUM_ERR |
568 FTGMAC100_RXDES1_UDP_CHKSUM_ERR |
569 FTGMAC100_RXDES1_IP_CHKSUM_ERR;
570 if ((csum_vlan & err_bits) ||
571 !(csum_vlan & FTGMAC100_RXDES1_PROT_MASK))
572 skb->ip_summed = CHECKSUM_NONE;
573 else
574 skb->ip_summed = CHECKSUM_UNNECESSARY;
575 }
576
577 /* Transfer received size to skb */
578 skb_put(skb, size);
579
580 /* Extract vlan tag */
581 if ((netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
582 (csum_vlan & FTGMAC100_RXDES1_VLANTAG_AVAIL))
583 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
584 csum_vlan & 0xffff);
585
586 /* Tear down DMA mapping, do necessary cache management */
587 map = le32_to_cpu(rxdes->rxdes3);
588
589 #if defined(CONFIG_ARM) && !defined(CONFIG_ARM_DMA_USE_IOMMU)
590 /* When we don't have an iommu, we can save cycles by not
591 * invalidating the cache for the part of the packet that
592 * wasn't received.
593 */
594 dma_unmap_single(priv->dev, map, size, DMA_FROM_DEVICE);
595 #else
596 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
597 #endif
598
599
600 /* Resplenish rx ring */
601 ftgmac100_alloc_rx_buf(priv, pointer, rxdes, GFP_ATOMIC);
602 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
603
604 skb->protocol = eth_type_trans(skb, netdev);
605
606 netdev->stats.rx_packets++;
607 netdev->stats.rx_bytes += size;
608
609 /* push packet to protocol stack */
610 if (skb->ip_summed == CHECKSUM_NONE)
611 netif_receive_skb(skb);
612 else
613 napi_gro_receive(&priv->napi, skb);
614
615 (*processed)++;
616 return true;
617
618 drop:
619 /* Clean rxdes0 (which resets own bit) */
620 rxdes->rxdes0 = cpu_to_le32(status & priv->rxdes0_edorr_mask);
621 priv->rx_pointer = ftgmac100_next_rx_pointer(priv, pointer);
622 netdev->stats.rx_dropped++;
623 return true;
624 }
625
ftgmac100_base_tx_ctlstat(struct ftgmac100 * priv,unsigned int index)626 static u32 ftgmac100_base_tx_ctlstat(struct ftgmac100 *priv,
627 unsigned int index)
628 {
629 if (index == (priv->tx_q_entries - 1))
630 return priv->txdes0_edotr_mask;
631 else
632 return 0;
633 }
634
ftgmac100_next_tx_pointer(struct ftgmac100 * priv,unsigned int pointer)635 static unsigned int ftgmac100_next_tx_pointer(struct ftgmac100 *priv,
636 unsigned int pointer)
637 {
638 return (pointer + 1) & (priv->tx_q_entries - 1);
639 }
640
ftgmac100_tx_buf_avail(struct ftgmac100 * priv)641 static u32 ftgmac100_tx_buf_avail(struct ftgmac100 *priv)
642 {
643 /* Returns the number of available slots in the TX queue
644 *
645 * This always leaves one free slot so we don't have to
646 * worry about empty vs. full, and this simplifies the
647 * test for ftgmac100_tx_buf_cleanable() below
648 */
649 return (priv->tx_clean_pointer - priv->tx_pointer - 1) &
650 (priv->tx_q_entries - 1);
651 }
652
ftgmac100_tx_buf_cleanable(struct ftgmac100 * priv)653 static bool ftgmac100_tx_buf_cleanable(struct ftgmac100 *priv)
654 {
655 return priv->tx_pointer != priv->tx_clean_pointer;
656 }
657
ftgmac100_free_tx_packet(struct ftgmac100 * priv,unsigned int pointer,struct sk_buff * skb,struct ftgmac100_txdes * txdes,u32 ctl_stat)658 static void ftgmac100_free_tx_packet(struct ftgmac100 *priv,
659 unsigned int pointer,
660 struct sk_buff *skb,
661 struct ftgmac100_txdes *txdes,
662 u32 ctl_stat)
663 {
664 dma_addr_t map = le32_to_cpu(txdes->txdes3);
665 size_t len;
666
667 if (ctl_stat & FTGMAC100_TXDES0_FTS) {
668 len = skb_headlen(skb);
669 dma_unmap_single(priv->dev, map, len, DMA_TO_DEVICE);
670 } else {
671 len = FTGMAC100_TXDES0_TXBUF_SIZE(ctl_stat);
672 dma_unmap_page(priv->dev, map, len, DMA_TO_DEVICE);
673 }
674
675 /* Free SKB on last segment */
676 if (ctl_stat & FTGMAC100_TXDES0_LTS)
677 dev_kfree_skb(skb);
678 priv->tx_skbs[pointer] = NULL;
679 }
680
ftgmac100_tx_complete_packet(struct ftgmac100 * priv)681 static bool ftgmac100_tx_complete_packet(struct ftgmac100 *priv)
682 {
683 struct net_device *netdev = priv->netdev;
684 struct ftgmac100_txdes *txdes;
685 struct sk_buff *skb;
686 unsigned int pointer;
687 u32 ctl_stat;
688
689 pointer = priv->tx_clean_pointer;
690 txdes = &priv->txdes[pointer];
691
692 ctl_stat = le32_to_cpu(txdes->txdes0);
693 if (ctl_stat & FTGMAC100_TXDES0_TXDMA_OWN)
694 return false;
695
696 skb = priv->tx_skbs[pointer];
697 netdev->stats.tx_packets++;
698 netdev->stats.tx_bytes += skb->len;
699 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
700 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
701
702 /* Ensure the descriptor config is visible before setting the tx
703 * pointer.
704 */
705 smp_wmb();
706
707 priv->tx_clean_pointer = ftgmac100_next_tx_pointer(priv, pointer);
708
709 return true;
710 }
711
ftgmac100_tx_complete(struct ftgmac100 * priv)712 static void ftgmac100_tx_complete(struct ftgmac100 *priv)
713 {
714 struct net_device *netdev = priv->netdev;
715
716 /* Process all completed packets */
717 while (ftgmac100_tx_buf_cleanable(priv) &&
718 ftgmac100_tx_complete_packet(priv))
719 ;
720
721 /* Restart queue if needed */
722 smp_mb();
723 if (unlikely(netif_queue_stopped(netdev) &&
724 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)) {
725 struct netdev_queue *txq;
726
727 txq = netdev_get_tx_queue(netdev, 0);
728 __netif_tx_lock(txq, smp_processor_id());
729 if (netif_queue_stopped(netdev) &&
730 ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
731 netif_wake_queue(netdev);
732 __netif_tx_unlock(txq);
733 }
734 }
735
ftgmac100_prep_tx_csum(struct sk_buff * skb,u32 * csum_vlan)736 static bool ftgmac100_prep_tx_csum(struct sk_buff *skb, u32 *csum_vlan)
737 {
738 if (skb->protocol == cpu_to_be16(ETH_P_IP)) {
739 u8 ip_proto = ip_hdr(skb)->protocol;
740
741 *csum_vlan |= FTGMAC100_TXDES1_IP_CHKSUM;
742 switch(ip_proto) {
743 case IPPROTO_TCP:
744 *csum_vlan |= FTGMAC100_TXDES1_TCP_CHKSUM;
745 return true;
746 case IPPROTO_UDP:
747 *csum_vlan |= FTGMAC100_TXDES1_UDP_CHKSUM;
748 return true;
749 case IPPROTO_IP:
750 return true;
751 }
752 }
753 return skb_checksum_help(skb) == 0;
754 }
755
ftgmac100_hard_start_xmit(struct sk_buff * skb,struct net_device * netdev)756 static netdev_tx_t ftgmac100_hard_start_xmit(struct sk_buff *skb,
757 struct net_device *netdev)
758 {
759 struct ftgmac100 *priv = netdev_priv(netdev);
760 struct ftgmac100_txdes *txdes, *first;
761 unsigned int pointer, nfrags, len, i, j;
762 u32 f_ctl_stat, ctl_stat, csum_vlan;
763 dma_addr_t map;
764
765 /* The HW doesn't pad small frames */
766 if (eth_skb_pad(skb)) {
767 netdev->stats.tx_dropped++;
768 return NETDEV_TX_OK;
769 }
770
771 /* Reject oversize packets */
772 if (unlikely(skb->len > MAX_PKT_SIZE)) {
773 if (net_ratelimit())
774 netdev_dbg(netdev, "tx packet too big\n");
775 goto drop;
776 }
777
778 /* Do we have a limit on #fragments ? I yet have to get a reply
779 * from Aspeed. If there's one I haven't hit it.
780 */
781 nfrags = skb_shinfo(skb)->nr_frags;
782
783 /* Setup HW checksumming */
784 csum_vlan = 0;
785 if (skb->ip_summed == CHECKSUM_PARTIAL &&
786 !ftgmac100_prep_tx_csum(skb, &csum_vlan))
787 goto drop;
788
789 /* Add VLAN tag */
790 if (skb_vlan_tag_present(skb)) {
791 csum_vlan |= FTGMAC100_TXDES1_INS_VLANTAG;
792 csum_vlan |= skb_vlan_tag_get(skb) & 0xffff;
793 }
794
795 /* Get header len */
796 len = skb_headlen(skb);
797
798 /* Map the packet head */
799 map = dma_map_single(priv->dev, skb->data, len, DMA_TO_DEVICE);
800 if (dma_mapping_error(priv->dev, map)) {
801 if (net_ratelimit())
802 netdev_err(netdev, "map tx packet head failed\n");
803 goto drop;
804 }
805
806 /* Grab the next free tx descriptor */
807 pointer = priv->tx_pointer;
808 txdes = first = &priv->txdes[pointer];
809
810 /* Setup it up with the packet head. Don't write the head to the
811 * ring just yet
812 */
813 priv->tx_skbs[pointer] = skb;
814 f_ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
815 f_ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
816 f_ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
817 f_ctl_stat |= FTGMAC100_TXDES0_FTS;
818 if (nfrags == 0)
819 f_ctl_stat |= FTGMAC100_TXDES0_LTS;
820 txdes->txdes3 = cpu_to_le32(map);
821 txdes->txdes1 = cpu_to_le32(csum_vlan);
822
823 /* Next descriptor */
824 pointer = ftgmac100_next_tx_pointer(priv, pointer);
825
826 /* Add the fragments */
827 for (i = 0; i < nfrags; i++) {
828 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
829
830 len = skb_frag_size(frag);
831
832 /* Map it */
833 map = skb_frag_dma_map(priv->dev, frag, 0, len,
834 DMA_TO_DEVICE);
835 if (dma_mapping_error(priv->dev, map))
836 goto dma_err;
837
838 /* Setup descriptor */
839 priv->tx_skbs[pointer] = skb;
840 txdes = &priv->txdes[pointer];
841 ctl_stat = ftgmac100_base_tx_ctlstat(priv, pointer);
842 ctl_stat |= FTGMAC100_TXDES0_TXDMA_OWN;
843 ctl_stat |= FTGMAC100_TXDES0_TXBUF_SIZE(len);
844 if (i == (nfrags - 1))
845 ctl_stat |= FTGMAC100_TXDES0_LTS;
846 txdes->txdes0 = cpu_to_le32(ctl_stat);
847 txdes->txdes1 = 0;
848 txdes->txdes3 = cpu_to_le32(map);
849
850 /* Next one */
851 pointer = ftgmac100_next_tx_pointer(priv, pointer);
852 }
853
854 /* Order the previous packet and descriptor udpates
855 * before setting the OWN bit on the first descriptor.
856 */
857 dma_wmb();
858 first->txdes0 = cpu_to_le32(f_ctl_stat);
859
860 /* Ensure the descriptor config is visible before setting the tx
861 * pointer.
862 */
863 smp_wmb();
864
865 /* Update next TX pointer */
866 priv->tx_pointer = pointer;
867
868 /* If there isn't enough room for all the fragments of a new packet
869 * in the TX ring, stop the queue. The sequence below is race free
870 * vs. a concurrent restart in ftgmac100_poll()
871 */
872 if (unlikely(ftgmac100_tx_buf_avail(priv) < TX_THRESHOLD)) {
873 netif_stop_queue(netdev);
874 /* Order the queue stop with the test below */
875 smp_mb();
876 if (ftgmac100_tx_buf_avail(priv) >= TX_THRESHOLD)
877 netif_wake_queue(netdev);
878 }
879
880 /* Poke transmitter to read the updated TX descriptors */
881 iowrite32(1, priv->base + FTGMAC100_OFFSET_NPTXPD);
882
883 return NETDEV_TX_OK;
884
885 dma_err:
886 if (net_ratelimit())
887 netdev_err(netdev, "map tx fragment failed\n");
888
889 /* Free head */
890 pointer = priv->tx_pointer;
891 ftgmac100_free_tx_packet(priv, pointer, skb, first, f_ctl_stat);
892 first->txdes0 = cpu_to_le32(f_ctl_stat & priv->txdes0_edotr_mask);
893
894 /* Then all fragments */
895 for (j = 0; j < i; j++) {
896 pointer = ftgmac100_next_tx_pointer(priv, pointer);
897 txdes = &priv->txdes[pointer];
898 ctl_stat = le32_to_cpu(txdes->txdes0);
899 ftgmac100_free_tx_packet(priv, pointer, skb, txdes, ctl_stat);
900 txdes->txdes0 = cpu_to_le32(ctl_stat & priv->txdes0_edotr_mask);
901 }
902
903 /* This cannot be reached if we successfully mapped the
904 * last fragment, so we know ftgmac100_free_tx_packet()
905 * hasn't freed the skb yet.
906 */
907 drop:
908 /* Drop the packet */
909 dev_kfree_skb_any(skb);
910 netdev->stats.tx_dropped++;
911
912 return NETDEV_TX_OK;
913 }
914
ftgmac100_free_buffers(struct ftgmac100 * priv)915 static void ftgmac100_free_buffers(struct ftgmac100 *priv)
916 {
917 int i;
918
919 /* Free all RX buffers */
920 for (i = 0; i < priv->rx_q_entries; i++) {
921 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
922 struct sk_buff *skb = priv->rx_skbs[i];
923 dma_addr_t map = le32_to_cpu(rxdes->rxdes3);
924
925 if (!skb)
926 continue;
927
928 priv->rx_skbs[i] = NULL;
929 dma_unmap_single(priv->dev, map, RX_BUF_SIZE, DMA_FROM_DEVICE);
930 dev_kfree_skb_any(skb);
931 }
932
933 /* Free all TX buffers */
934 for (i = 0; i < priv->tx_q_entries; i++) {
935 struct ftgmac100_txdes *txdes = &priv->txdes[i];
936 struct sk_buff *skb = priv->tx_skbs[i];
937
938 if (!skb)
939 continue;
940 ftgmac100_free_tx_packet(priv, i, skb, txdes,
941 le32_to_cpu(txdes->txdes0));
942 }
943 }
944
ftgmac100_free_rings(struct ftgmac100 * priv)945 static void ftgmac100_free_rings(struct ftgmac100 *priv)
946 {
947 /* Free skb arrays */
948 kfree(priv->rx_skbs);
949 kfree(priv->tx_skbs);
950
951 /* Free descriptors */
952 if (priv->rxdes)
953 dma_free_coherent(priv->dev, MAX_RX_QUEUE_ENTRIES *
954 sizeof(struct ftgmac100_rxdes),
955 priv->rxdes, priv->rxdes_dma);
956 priv->rxdes = NULL;
957
958 if (priv->txdes)
959 dma_free_coherent(priv->dev, MAX_TX_QUEUE_ENTRIES *
960 sizeof(struct ftgmac100_txdes),
961 priv->txdes, priv->txdes_dma);
962 priv->txdes = NULL;
963
964 /* Free scratch packet buffer */
965 if (priv->rx_scratch)
966 dma_free_coherent(priv->dev, RX_BUF_SIZE,
967 priv->rx_scratch, priv->rx_scratch_dma);
968 }
969
ftgmac100_alloc_rings(struct ftgmac100 * priv)970 static int ftgmac100_alloc_rings(struct ftgmac100 *priv)
971 {
972 /* Allocate skb arrays */
973 priv->rx_skbs = kcalloc(MAX_RX_QUEUE_ENTRIES, sizeof(void *),
974 GFP_KERNEL);
975 if (!priv->rx_skbs)
976 return -ENOMEM;
977 priv->tx_skbs = kcalloc(MAX_TX_QUEUE_ENTRIES, sizeof(void *),
978 GFP_KERNEL);
979 if (!priv->tx_skbs)
980 goto err_free_rx_skbs;
981
982 /* Allocate descriptors */
983 priv->rxdes = dma_alloc_coherent(priv->dev,
984 MAX_RX_QUEUE_ENTRIES * sizeof(struct ftgmac100_rxdes),
985 &priv->rxdes_dma, GFP_KERNEL);
986 if (!priv->rxdes)
987 goto err_free_tx_skbs;
988 priv->txdes = dma_alloc_coherent(priv->dev,
989 MAX_TX_QUEUE_ENTRIES * sizeof(struct ftgmac100_txdes),
990 &priv->txdes_dma, GFP_KERNEL);
991 if (!priv->txdes)
992 goto err_free_rxdes;
993
994 /* Allocate scratch packet buffer */
995 priv->rx_scratch = dma_alloc_coherent(priv->dev,
996 RX_BUF_SIZE,
997 &priv->rx_scratch_dma,
998 GFP_KERNEL);
999 if (!priv->rx_scratch)
1000 goto err_free_txdes;
1001
1002 return 0;
1003
1004 err_free_txdes:
1005 dma_free_coherent(priv->dev,
1006 MAX_TX_QUEUE_ENTRIES *
1007 sizeof(struct ftgmac100_txdes),
1008 priv->txdes, priv->txdes_dma);
1009 priv->txdes = NULL;
1010 err_free_rxdes:
1011 dma_free_coherent(priv->dev,
1012 MAX_RX_QUEUE_ENTRIES *
1013 sizeof(struct ftgmac100_rxdes),
1014 priv->rxdes, priv->rxdes_dma);
1015 priv->rxdes = NULL;
1016 err_free_tx_skbs:
1017 kfree(priv->tx_skbs);
1018 priv->tx_skbs = NULL;
1019 err_free_rx_skbs:
1020 kfree(priv->rx_skbs);
1021 priv->rx_skbs = NULL;
1022 return -ENOMEM;
1023 }
1024
ftgmac100_init_rings(struct ftgmac100 * priv)1025 static void ftgmac100_init_rings(struct ftgmac100 *priv)
1026 {
1027 struct ftgmac100_rxdes *rxdes = NULL;
1028 struct ftgmac100_txdes *txdes = NULL;
1029 int i;
1030
1031 /* Update entries counts */
1032 priv->rx_q_entries = priv->new_rx_q_entries;
1033 priv->tx_q_entries = priv->new_tx_q_entries;
1034
1035 if (WARN_ON(priv->rx_q_entries < MIN_RX_QUEUE_ENTRIES))
1036 return;
1037
1038 /* Initialize RX ring */
1039 for (i = 0; i < priv->rx_q_entries; i++) {
1040 rxdes = &priv->rxdes[i];
1041 rxdes->rxdes0 = 0;
1042 rxdes->rxdes3 = cpu_to_le32(priv->rx_scratch_dma);
1043 }
1044 /* Mark the end of the ring */
1045 rxdes->rxdes0 |= cpu_to_le32(priv->rxdes0_edorr_mask);
1046
1047 if (WARN_ON(priv->tx_q_entries < MIN_RX_QUEUE_ENTRIES))
1048 return;
1049
1050 /* Initialize TX ring */
1051 for (i = 0; i < priv->tx_q_entries; i++) {
1052 txdes = &priv->txdes[i];
1053 txdes->txdes0 = 0;
1054 }
1055 txdes->txdes0 |= cpu_to_le32(priv->txdes0_edotr_mask);
1056 }
1057
ftgmac100_alloc_rx_buffers(struct ftgmac100 * priv)1058 static int ftgmac100_alloc_rx_buffers(struct ftgmac100 *priv)
1059 {
1060 int i;
1061
1062 for (i = 0; i < priv->rx_q_entries; i++) {
1063 struct ftgmac100_rxdes *rxdes = &priv->rxdes[i];
1064
1065 if (ftgmac100_alloc_rx_buf(priv, i, rxdes, GFP_KERNEL))
1066 return -ENOMEM;
1067 }
1068 return 0;
1069 }
1070
ftgmac100_mdiobus_read(struct mii_bus * bus,int phy_addr,int regnum)1071 static int ftgmac100_mdiobus_read(struct mii_bus *bus, int phy_addr, int regnum)
1072 {
1073 struct net_device *netdev = bus->priv;
1074 struct ftgmac100 *priv = netdev_priv(netdev);
1075 unsigned int phycr;
1076 int i;
1077
1078 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
1079
1080 /* preserve MDC cycle threshold */
1081 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
1082
1083 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
1084 FTGMAC100_PHYCR_REGAD(regnum) |
1085 FTGMAC100_PHYCR_MIIRD;
1086
1087 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
1088
1089 for (i = 0; i < 10; i++) {
1090 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
1091
1092 if ((phycr & FTGMAC100_PHYCR_MIIRD) == 0) {
1093 int data;
1094
1095 data = ioread32(priv->base + FTGMAC100_OFFSET_PHYDATA);
1096 return FTGMAC100_PHYDATA_MIIRDATA(data);
1097 }
1098
1099 udelay(100);
1100 }
1101
1102 netdev_err(netdev, "mdio read timed out\n");
1103 return -EIO;
1104 }
1105
ftgmac100_mdiobus_write(struct mii_bus * bus,int phy_addr,int regnum,u16 value)1106 static int ftgmac100_mdiobus_write(struct mii_bus *bus, int phy_addr,
1107 int regnum, u16 value)
1108 {
1109 struct net_device *netdev = bus->priv;
1110 struct ftgmac100 *priv = netdev_priv(netdev);
1111 unsigned int phycr;
1112 int data;
1113 int i;
1114
1115 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
1116
1117 /* preserve MDC cycle threshold */
1118 phycr &= FTGMAC100_PHYCR_MDC_CYCTHR_MASK;
1119
1120 phycr |= FTGMAC100_PHYCR_PHYAD(phy_addr) |
1121 FTGMAC100_PHYCR_REGAD(regnum) |
1122 FTGMAC100_PHYCR_MIIWR;
1123
1124 data = FTGMAC100_PHYDATA_MIIWDATA(value);
1125
1126 iowrite32(data, priv->base + FTGMAC100_OFFSET_PHYDATA);
1127 iowrite32(phycr, priv->base + FTGMAC100_OFFSET_PHYCR);
1128
1129 for (i = 0; i < 10; i++) {
1130 phycr = ioread32(priv->base + FTGMAC100_OFFSET_PHYCR);
1131
1132 if ((phycr & FTGMAC100_PHYCR_MIIWR) == 0)
1133 return 0;
1134
1135 udelay(100);
1136 }
1137
1138 netdev_err(netdev, "mdio write timed out\n");
1139 return -EIO;
1140 }
1141
ftgmac100_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)1142 static void ftgmac100_get_drvinfo(struct net_device *netdev,
1143 struct ethtool_drvinfo *info)
1144 {
1145 strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1146 strscpy(info->bus_info, dev_name(&netdev->dev), sizeof(info->bus_info));
1147 }
1148
1149 static void
ftgmac100_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1150 ftgmac100_get_ringparam(struct net_device *netdev,
1151 struct ethtool_ringparam *ering,
1152 struct kernel_ethtool_ringparam *kernel_ering,
1153 struct netlink_ext_ack *extack)
1154 {
1155 struct ftgmac100 *priv = netdev_priv(netdev);
1156
1157 memset(ering, 0, sizeof(*ering));
1158 ering->rx_max_pending = MAX_RX_QUEUE_ENTRIES;
1159 ering->tx_max_pending = MAX_TX_QUEUE_ENTRIES;
1160 ering->rx_pending = priv->rx_q_entries;
1161 ering->tx_pending = priv->tx_q_entries;
1162 }
1163
1164 static int
ftgmac100_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ering,struct kernel_ethtool_ringparam * kernel_ering,struct netlink_ext_ack * extack)1165 ftgmac100_set_ringparam(struct net_device *netdev,
1166 struct ethtool_ringparam *ering,
1167 struct kernel_ethtool_ringparam *kernel_ering,
1168 struct netlink_ext_ack *extack)
1169 {
1170 struct ftgmac100 *priv = netdev_priv(netdev);
1171
1172 if (ering->rx_pending > MAX_RX_QUEUE_ENTRIES ||
1173 ering->tx_pending > MAX_TX_QUEUE_ENTRIES ||
1174 ering->rx_pending < MIN_RX_QUEUE_ENTRIES ||
1175 ering->tx_pending < MIN_TX_QUEUE_ENTRIES ||
1176 !is_power_of_2(ering->rx_pending) ||
1177 !is_power_of_2(ering->tx_pending))
1178 return -EINVAL;
1179
1180 priv->new_rx_q_entries = ering->rx_pending;
1181 priv->new_tx_q_entries = ering->tx_pending;
1182 if (netif_running(netdev))
1183 schedule_work(&priv->reset_task);
1184
1185 return 0;
1186 }
1187
ftgmac100_get_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)1188 static void ftgmac100_get_pauseparam(struct net_device *netdev,
1189 struct ethtool_pauseparam *pause)
1190 {
1191 struct ftgmac100 *priv = netdev_priv(netdev);
1192
1193 pause->autoneg = priv->aneg_pause;
1194 pause->tx_pause = priv->tx_pause;
1195 pause->rx_pause = priv->rx_pause;
1196 }
1197
ftgmac100_set_pauseparam(struct net_device * netdev,struct ethtool_pauseparam * pause)1198 static int ftgmac100_set_pauseparam(struct net_device *netdev,
1199 struct ethtool_pauseparam *pause)
1200 {
1201 struct ftgmac100 *priv = netdev_priv(netdev);
1202 struct phy_device *phydev = netdev->phydev;
1203
1204 priv->aneg_pause = pause->autoneg;
1205 priv->tx_pause = pause->tx_pause;
1206 priv->rx_pause = pause->rx_pause;
1207
1208 if (phydev)
1209 phy_set_asym_pause(phydev, pause->rx_pause, pause->tx_pause);
1210
1211 if (netif_running(netdev)) {
1212 if (!(phydev && priv->aneg_pause))
1213 ftgmac100_config_pause(priv);
1214 }
1215
1216 return 0;
1217 }
1218
1219 static const struct ethtool_ops ftgmac100_ethtool_ops = {
1220 .get_drvinfo = ftgmac100_get_drvinfo,
1221 .get_link = ethtool_op_get_link,
1222 .get_link_ksettings = phy_ethtool_get_link_ksettings,
1223 .set_link_ksettings = phy_ethtool_set_link_ksettings,
1224 .nway_reset = phy_ethtool_nway_reset,
1225 .get_ringparam = ftgmac100_get_ringparam,
1226 .set_ringparam = ftgmac100_set_ringparam,
1227 .get_pauseparam = ftgmac100_get_pauseparam,
1228 .set_pauseparam = ftgmac100_set_pauseparam,
1229 };
1230
ftgmac100_interrupt(int irq,void * dev_id)1231 static irqreturn_t ftgmac100_interrupt(int irq, void *dev_id)
1232 {
1233 struct net_device *netdev = dev_id;
1234 struct ftgmac100 *priv = netdev_priv(netdev);
1235 unsigned int status, new_mask = FTGMAC100_INT_BAD;
1236
1237 /* Fetch and clear interrupt bits, process abnormal ones */
1238 status = ioread32(priv->base + FTGMAC100_OFFSET_ISR);
1239 iowrite32(status, priv->base + FTGMAC100_OFFSET_ISR);
1240 if (unlikely(status & FTGMAC100_INT_BAD)) {
1241
1242 /* RX buffer unavailable */
1243 if (status & FTGMAC100_INT_NO_RXBUF)
1244 netdev->stats.rx_over_errors++;
1245
1246 /* received packet lost due to RX FIFO full */
1247 if (status & FTGMAC100_INT_RPKT_LOST)
1248 netdev->stats.rx_fifo_errors++;
1249
1250 /* sent packet lost due to excessive TX collision */
1251 if (status & FTGMAC100_INT_XPKT_LOST)
1252 netdev->stats.tx_fifo_errors++;
1253
1254 /* AHB error -> Reset the chip */
1255 if (status & FTGMAC100_INT_AHB_ERR) {
1256 if (net_ratelimit())
1257 netdev_warn(netdev,
1258 "AHB bus error ! Resetting chip.\n");
1259 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1260 schedule_work(&priv->reset_task);
1261 return IRQ_HANDLED;
1262 }
1263
1264 /* We may need to restart the MAC after such errors, delay
1265 * this until after we have freed some Rx buffers though
1266 */
1267 priv->need_mac_restart = true;
1268
1269 /* Disable those errors until we restart */
1270 new_mask &= ~status;
1271 }
1272
1273 /* Only enable "bad" interrupts while NAPI is on */
1274 iowrite32(new_mask, priv->base + FTGMAC100_OFFSET_IER);
1275
1276 /* Schedule NAPI bh */
1277 napi_schedule_irqoff(&priv->napi);
1278
1279 return IRQ_HANDLED;
1280 }
1281
ftgmac100_check_rx(struct ftgmac100 * priv)1282 static bool ftgmac100_check_rx(struct ftgmac100 *priv)
1283 {
1284 struct ftgmac100_rxdes *rxdes = &priv->rxdes[priv->rx_pointer];
1285
1286 /* Do we have a packet ? */
1287 return !!(rxdes->rxdes0 & cpu_to_le32(FTGMAC100_RXDES0_RXPKT_RDY));
1288 }
1289
ftgmac100_poll(struct napi_struct * napi,int budget)1290 static int ftgmac100_poll(struct napi_struct *napi, int budget)
1291 {
1292 struct ftgmac100 *priv = container_of(napi, struct ftgmac100, napi);
1293 int work_done = 0;
1294 bool more;
1295
1296 /* Handle TX completions */
1297 if (ftgmac100_tx_buf_cleanable(priv))
1298 ftgmac100_tx_complete(priv);
1299
1300 /* Handle RX packets */
1301 do {
1302 more = ftgmac100_rx_packet(priv, &work_done);
1303 } while (more && work_done < budget);
1304
1305
1306 /* The interrupt is telling us to kick the MAC back to life
1307 * after an RX overflow
1308 */
1309 if (unlikely(priv->need_mac_restart)) {
1310 ftgmac100_start_hw(priv);
1311 priv->need_mac_restart = false;
1312
1313 /* Re-enable "bad" interrupts */
1314 iowrite32(FTGMAC100_INT_BAD,
1315 priv->base + FTGMAC100_OFFSET_IER);
1316 }
1317
1318 /* As long as we are waiting for transmit packets to be
1319 * completed we keep NAPI going
1320 */
1321 if (ftgmac100_tx_buf_cleanable(priv))
1322 work_done = budget;
1323
1324 if (work_done < budget) {
1325 /* We are about to re-enable all interrupts. However
1326 * the HW has been latching RX/TX packet interrupts while
1327 * they were masked. So we clear them first, then we need
1328 * to re-check if there's something to process
1329 */
1330 iowrite32(FTGMAC100_INT_RXTX,
1331 priv->base + FTGMAC100_OFFSET_ISR);
1332
1333 /* Push the above (and provides a barrier vs. subsequent
1334 * reads of the descriptor).
1335 */
1336 ioread32(priv->base + FTGMAC100_OFFSET_ISR);
1337
1338 /* Check RX and TX descriptors for more work to do */
1339 if (ftgmac100_check_rx(priv) ||
1340 ftgmac100_tx_buf_cleanable(priv))
1341 return budget;
1342
1343 /* deschedule NAPI */
1344 napi_complete(napi);
1345
1346 /* enable all interrupts */
1347 iowrite32(FTGMAC100_INT_ALL,
1348 priv->base + FTGMAC100_OFFSET_IER);
1349 }
1350
1351 return work_done;
1352 }
1353
ftgmac100_init_all(struct ftgmac100 * priv,bool ignore_alloc_err)1354 static int ftgmac100_init_all(struct ftgmac100 *priv, bool ignore_alloc_err)
1355 {
1356 int err = 0;
1357
1358 /* Re-init descriptors (adjust queue sizes) */
1359 ftgmac100_init_rings(priv);
1360
1361 /* Realloc rx descriptors */
1362 err = ftgmac100_alloc_rx_buffers(priv);
1363 if (err && !ignore_alloc_err)
1364 return err;
1365
1366 /* Reinit and restart HW */
1367 ftgmac100_init_hw(priv);
1368 ftgmac100_config_pause(priv);
1369 ftgmac100_start_hw(priv);
1370
1371 /* Re-enable the device */
1372 napi_enable(&priv->napi);
1373 netif_start_queue(priv->netdev);
1374
1375 /* Enable all interrupts */
1376 iowrite32(FTGMAC100_INT_ALL, priv->base + FTGMAC100_OFFSET_IER);
1377
1378 return err;
1379 }
1380
ftgmac100_reset(struct ftgmac100 * priv)1381 static void ftgmac100_reset(struct ftgmac100 *priv)
1382 {
1383 struct net_device *netdev = priv->netdev;
1384 int err;
1385
1386 netdev_dbg(netdev, "Resetting NIC...\n");
1387
1388 /* Lock the world */
1389 rtnl_lock();
1390 if (netdev->phydev)
1391 mutex_lock(&netdev->phydev->lock);
1392 if (priv->mii_bus)
1393 mutex_lock(&priv->mii_bus->mdio_lock);
1394
1395
1396 /* Check if the interface is still up */
1397 if (!netif_running(netdev))
1398 goto bail;
1399
1400 /* Stop the network stack */
1401 netif_trans_update(netdev);
1402 napi_disable(&priv->napi);
1403 netif_tx_disable(netdev);
1404
1405 /* Stop and reset the MAC */
1406 ftgmac100_stop_hw(priv);
1407 err = ftgmac100_reset_and_config_mac(priv);
1408 if (err) {
1409 /* Not much we can do ... it might come back... */
1410 netdev_err(netdev, "attempting to continue...\n");
1411 }
1412
1413 /* Free all rx and tx buffers */
1414 ftgmac100_free_buffers(priv);
1415
1416 /* Setup everything again and restart chip */
1417 ftgmac100_init_all(priv, true);
1418
1419 netdev_dbg(netdev, "Reset done !\n");
1420 bail:
1421 if (priv->mii_bus)
1422 mutex_unlock(&priv->mii_bus->mdio_lock);
1423 if (netdev->phydev)
1424 mutex_unlock(&netdev->phydev->lock);
1425 rtnl_unlock();
1426 }
1427
ftgmac100_reset_task(struct work_struct * work)1428 static void ftgmac100_reset_task(struct work_struct *work)
1429 {
1430 struct ftgmac100 *priv = container_of(work, struct ftgmac100,
1431 reset_task);
1432
1433 ftgmac100_reset(priv);
1434 }
1435
ftgmac100_adjust_link(struct net_device * netdev)1436 static void ftgmac100_adjust_link(struct net_device *netdev)
1437 {
1438 struct ftgmac100 *priv = netdev_priv(netdev);
1439 struct phy_device *phydev = netdev->phydev;
1440 bool tx_pause, rx_pause;
1441 int new_speed;
1442
1443 /* We store "no link" as speed 0 */
1444 if (!phydev->link)
1445 new_speed = 0;
1446 else
1447 new_speed = phydev->speed;
1448
1449 /* Grab pause settings from PHY if configured to do so */
1450 if (priv->aneg_pause) {
1451 rx_pause = tx_pause = phydev->pause;
1452 if (phydev->asym_pause)
1453 tx_pause = !rx_pause;
1454 } else {
1455 rx_pause = priv->rx_pause;
1456 tx_pause = priv->tx_pause;
1457 }
1458
1459 /* Link hasn't changed, do nothing */
1460 if (phydev->speed == priv->cur_speed &&
1461 phydev->duplex == priv->cur_duplex &&
1462 rx_pause == priv->rx_pause &&
1463 tx_pause == priv->tx_pause)
1464 return;
1465
1466 /* Print status if we have a link or we had one and just lost it,
1467 * don't print otherwise.
1468 */
1469 if (new_speed || priv->cur_speed)
1470 phy_print_status(phydev);
1471
1472 priv->cur_speed = new_speed;
1473 priv->cur_duplex = phydev->duplex;
1474 priv->rx_pause = rx_pause;
1475 priv->tx_pause = tx_pause;
1476
1477 /* Link is down, do nothing else */
1478 if (!new_speed)
1479 return;
1480
1481 /* Disable all interrupts */
1482 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1483
1484 /* Release phy lock to allow ftgmac100_reset to acquire it, keeping lock
1485 * order consistent to prevent dead lock.
1486 */
1487 if (netdev->phydev)
1488 mutex_unlock(&netdev->phydev->lock);
1489
1490 ftgmac100_reset(priv);
1491
1492 if (netdev->phydev)
1493 mutex_lock(&netdev->phydev->lock);
1494
1495 }
1496
ftgmac100_mii_probe(struct net_device * netdev)1497 static int ftgmac100_mii_probe(struct net_device *netdev)
1498 {
1499 struct ftgmac100 *priv = netdev_priv(netdev);
1500 struct platform_device *pdev = to_platform_device(priv->dev);
1501 struct device_node *np = pdev->dev.of_node;
1502 struct phy_device *phydev;
1503 phy_interface_t phy_intf;
1504 int err;
1505
1506 if (!priv->mii_bus) {
1507 dev_err(priv->dev, "No MDIO bus available\n");
1508 return -ENODEV;
1509 }
1510
1511 /* Default to RGMII. It's a gigabit part after all */
1512 err = of_get_phy_mode(np, &phy_intf);
1513 if (err)
1514 phy_intf = PHY_INTERFACE_MODE_RGMII;
1515
1516 /* Aspeed only supports these. I don't know about other IP
1517 * block vendors so I'm going to just let them through for
1518 * now. Note that this is only a warning if for some obscure
1519 * reason the DT really means to lie about it or it's a newer
1520 * part we don't know about.
1521 *
1522 * On the Aspeed SoC there are additionally straps and SCU
1523 * control bits that could tell us what the interface is
1524 * (or allow us to configure it while the IP block is held
1525 * in reset). For now I chose to keep this driver away from
1526 * those SoC specific bits and assume the device-tree is
1527 * right and the SCU has been configured properly by pinmux
1528 * or the firmware.
1529 */
1530 if (priv->is_aspeed && !(phy_interface_mode_is_rgmii(phy_intf))) {
1531 netdev_warn(netdev,
1532 "Unsupported PHY mode %s !\n",
1533 phy_modes(phy_intf));
1534 }
1535
1536 phydev = phy_find_first(priv->mii_bus);
1537 if (!phydev) {
1538 netdev_info(netdev, "%s: no PHY found\n", netdev->name);
1539 return -ENODEV;
1540 }
1541
1542 phydev = phy_connect(netdev, phydev_name(phydev),
1543 &ftgmac100_adjust_link, phy_intf);
1544
1545 if (IS_ERR(phydev)) {
1546 netdev_err(netdev, "%s: Could not attach to PHY\n", netdev->name);
1547 return PTR_ERR(phydev);
1548 }
1549
1550 /* Indicate that we support PAUSE frames (see comment in
1551 * Documentation/networking/phy.rst)
1552 */
1553 phy_support_asym_pause(phydev);
1554
1555 /* Display what we found */
1556 phy_attached_info(phydev);
1557
1558 return 0;
1559 }
1560
ftgmac100_open(struct net_device * netdev)1561 static int ftgmac100_open(struct net_device *netdev)
1562 {
1563 struct ftgmac100 *priv = netdev_priv(netdev);
1564 int err;
1565
1566 /* Allocate ring buffers */
1567 err = ftgmac100_alloc_rings(priv);
1568 if (err) {
1569 netdev_err(netdev, "Failed to allocate descriptors\n");
1570 return err;
1571 }
1572
1573 /* When using NC-SI we force the speed to 100Mbit/s full duplex,
1574 *
1575 * Otherwise we leave it set to 0 (no link), the link
1576 * message from the PHY layer will handle setting it up to
1577 * something else if needed.
1578 */
1579 if (priv->use_ncsi) {
1580 priv->cur_duplex = DUPLEX_FULL;
1581 priv->cur_speed = SPEED_100;
1582 } else {
1583 priv->cur_duplex = 0;
1584 priv->cur_speed = 0;
1585 }
1586
1587 /* Reset the hardware */
1588 err = ftgmac100_reset_and_config_mac(priv);
1589 if (err)
1590 goto err_hw;
1591
1592 /* Initialize NAPI */
1593 netif_napi_add(netdev, &priv->napi, ftgmac100_poll);
1594
1595 /* Grab our interrupt */
1596 err = request_irq(netdev->irq, ftgmac100_interrupt, 0, netdev->name, netdev);
1597 if (err) {
1598 netdev_err(netdev, "failed to request irq %d\n", netdev->irq);
1599 goto err_irq;
1600 }
1601
1602 /* Start things up */
1603 err = ftgmac100_init_all(priv, false);
1604 if (err) {
1605 netdev_err(netdev, "Failed to allocate packet buffers\n");
1606 goto err_alloc;
1607 }
1608
1609 if (netdev->phydev) {
1610 /* If we have a PHY, start polling */
1611 phy_start(netdev->phydev);
1612 }
1613 if (priv->use_ncsi) {
1614 /* If using NC-SI, set our carrier on and start the stack */
1615 netif_carrier_on(netdev);
1616
1617 /* Start the NCSI device */
1618 err = ncsi_start_dev(priv->ndev);
1619 if (err)
1620 goto err_ncsi;
1621 }
1622
1623 return 0;
1624
1625 err_ncsi:
1626 phy_stop(netdev->phydev);
1627 napi_disable(&priv->napi);
1628 netif_stop_queue(netdev);
1629 err_alloc:
1630 ftgmac100_free_buffers(priv);
1631 free_irq(netdev->irq, netdev);
1632 err_irq:
1633 netif_napi_del(&priv->napi);
1634 err_hw:
1635 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1636 ftgmac100_free_rings(priv);
1637 return err;
1638 }
1639
ftgmac100_stop(struct net_device * netdev)1640 static int ftgmac100_stop(struct net_device *netdev)
1641 {
1642 struct ftgmac100 *priv = netdev_priv(netdev);
1643
1644 /* Note about the reset task: We are called with the rtnl lock
1645 * held, so we are synchronized against the core of the reset
1646 * task. We must not try to synchronously cancel it otherwise
1647 * we can deadlock. But since it will test for netif_running()
1648 * which has already been cleared by the net core, we don't
1649 * anything special to do.
1650 */
1651
1652 /* disable all interrupts */
1653 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1654
1655 netif_stop_queue(netdev);
1656 napi_disable(&priv->napi);
1657 netif_napi_del(&priv->napi);
1658 if (netdev->phydev)
1659 phy_stop(netdev->phydev);
1660 if (priv->use_ncsi)
1661 ncsi_stop_dev(priv->ndev);
1662
1663 ftgmac100_stop_hw(priv);
1664 free_irq(netdev->irq, netdev);
1665 ftgmac100_free_buffers(priv);
1666 ftgmac100_free_rings(priv);
1667
1668 return 0;
1669 }
1670
ftgmac100_tx_timeout(struct net_device * netdev,unsigned int txqueue)1671 static void ftgmac100_tx_timeout(struct net_device *netdev, unsigned int txqueue)
1672 {
1673 struct ftgmac100 *priv = netdev_priv(netdev);
1674
1675 /* Disable all interrupts */
1676 iowrite32(0, priv->base + FTGMAC100_OFFSET_IER);
1677
1678 /* Do the reset outside of interrupt context */
1679 schedule_work(&priv->reset_task);
1680 }
1681
ftgmac100_set_features(struct net_device * netdev,netdev_features_t features)1682 static int ftgmac100_set_features(struct net_device *netdev,
1683 netdev_features_t features)
1684 {
1685 struct ftgmac100 *priv = netdev_priv(netdev);
1686 netdev_features_t changed = netdev->features ^ features;
1687
1688 if (!netif_running(netdev))
1689 return 0;
1690
1691 /* Update the vlan filtering bit */
1692 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1693 u32 maccr;
1694
1695 maccr = ioread32(priv->base + FTGMAC100_OFFSET_MACCR);
1696 if (priv->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
1697 maccr |= FTGMAC100_MACCR_RM_VLAN;
1698 else
1699 maccr &= ~FTGMAC100_MACCR_RM_VLAN;
1700 iowrite32(maccr, priv->base + FTGMAC100_OFFSET_MACCR);
1701 }
1702
1703 return 0;
1704 }
1705
1706 #ifdef CONFIG_NET_POLL_CONTROLLER
ftgmac100_poll_controller(struct net_device * netdev)1707 static void ftgmac100_poll_controller(struct net_device *netdev)
1708 {
1709 unsigned long flags;
1710
1711 local_irq_save(flags);
1712 ftgmac100_interrupt(netdev->irq, netdev);
1713 local_irq_restore(flags);
1714 }
1715 #endif
1716
1717 static const struct net_device_ops ftgmac100_netdev_ops = {
1718 .ndo_open = ftgmac100_open,
1719 .ndo_stop = ftgmac100_stop,
1720 .ndo_start_xmit = ftgmac100_hard_start_xmit,
1721 .ndo_set_mac_address = ftgmac100_set_mac_addr,
1722 .ndo_validate_addr = eth_validate_addr,
1723 .ndo_eth_ioctl = phy_do_ioctl,
1724 .ndo_tx_timeout = ftgmac100_tx_timeout,
1725 .ndo_set_rx_mode = ftgmac100_set_rx_mode,
1726 .ndo_set_features = ftgmac100_set_features,
1727 #ifdef CONFIG_NET_POLL_CONTROLLER
1728 .ndo_poll_controller = ftgmac100_poll_controller,
1729 #endif
1730 .ndo_vlan_rx_add_vid = ncsi_vlan_rx_add_vid,
1731 .ndo_vlan_rx_kill_vid = ncsi_vlan_rx_kill_vid,
1732 };
1733
ftgmac100_setup_mdio(struct net_device * netdev)1734 static int ftgmac100_setup_mdio(struct net_device *netdev)
1735 {
1736 struct ftgmac100 *priv = netdev_priv(netdev);
1737 struct platform_device *pdev = to_platform_device(priv->dev);
1738 struct device_node *np = pdev->dev.of_node;
1739 struct device_node *mdio_np;
1740 int err = 0;
1741 u32 reg;
1742
1743 /* initialize mdio bus */
1744 priv->mii_bus = devm_mdiobus_alloc(priv->dev);
1745 if (!priv->mii_bus)
1746 return -EIO;
1747
1748 if (priv->mac_id == FTGMAC100_AST2400 ||
1749 priv->mac_id == FTGMAC100_AST2500) {
1750 /* The AST2600 has a separate MDIO controller */
1751
1752 /* For the AST2400 and AST2500 this driver only supports the
1753 * old MDIO interface
1754 */
1755 reg = ioread32(priv->base + FTGMAC100_OFFSET_REVR);
1756 reg &= ~FTGMAC100_REVR_NEW_MDIO_INTERFACE;
1757 iowrite32(reg, priv->base + FTGMAC100_OFFSET_REVR);
1758 }
1759
1760 priv->mii_bus->name = "ftgmac100_mdio";
1761 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%d",
1762 pdev->name, pdev->id);
1763 priv->mii_bus->parent = priv->dev;
1764 priv->mii_bus->priv = priv->netdev;
1765 priv->mii_bus->read = ftgmac100_mdiobus_read;
1766 priv->mii_bus->write = ftgmac100_mdiobus_write;
1767
1768 mdio_np = of_get_child_by_name(np, "mdio");
1769
1770 err = devm_of_mdiobus_register(priv->dev, priv->mii_bus, mdio_np);
1771 of_node_put(mdio_np);
1772 if (err) {
1773 dev_err(priv->dev, "Cannot register MDIO bus!\n");
1774 return err;
1775 }
1776
1777 return 0;
1778 }
1779
ftgmac100_phy_disconnect(struct net_device * netdev)1780 static void ftgmac100_phy_disconnect(struct net_device *netdev)
1781 {
1782 struct ftgmac100 *priv = netdev_priv(netdev);
1783 struct phy_device *phydev = netdev->phydev;
1784
1785 if (!phydev)
1786 return;
1787
1788 phy_disconnect(phydev);
1789 if (of_phy_is_fixed_link(priv->dev->of_node))
1790 of_phy_deregister_fixed_link(priv->dev->of_node);
1791
1792 if (priv->use_ncsi)
1793 fixed_phy_unregister(phydev);
1794 }
1795
ftgmac100_ncsi_handler(struct ncsi_dev * nd)1796 static void ftgmac100_ncsi_handler(struct ncsi_dev *nd)
1797 {
1798 if (unlikely(nd->state != ncsi_dev_state_functional))
1799 return;
1800
1801 netdev_dbg(nd->dev, "NCSI interface %s\n",
1802 nd->link_up ? "up" : "down");
1803 }
1804
ftgmac100_setup_clk(struct ftgmac100 * priv)1805 static int ftgmac100_setup_clk(struct ftgmac100 *priv)
1806 {
1807 struct clk *clk;
1808 int rc;
1809
1810 clk = devm_clk_get_enabled(priv->dev, NULL /* MACCLK */);
1811 if (IS_ERR(clk))
1812 return PTR_ERR(clk);
1813 priv->clk = clk;
1814
1815 /* Aspeed specifies a 100MHz clock is required for up to
1816 * 1000Mbit link speeds. As NCSI is limited to 100Mbit, 25MHz
1817 * is sufficient
1818 */
1819 rc = clk_set_rate(priv->clk, priv->use_ncsi ? FTGMAC_25MHZ :
1820 FTGMAC_100MHZ);
1821 if (rc)
1822 return rc;
1823
1824 /* RCLK is for RMII, typically used for NCSI. Optional because it's not
1825 * necessary if it's the AST2400 MAC, or the MAC is configured for
1826 * RGMII, or the controller is not an ASPEED-based controller.
1827 */
1828 priv->rclk = devm_clk_get_optional_enabled(priv->dev, "RCLK");
1829 if (IS_ERR(priv->rclk))
1830 return PTR_ERR(priv->rclk);
1831
1832 return 0;
1833 }
1834
ftgmac100_has_child_node(struct device_node * np,const char * name)1835 static bool ftgmac100_has_child_node(struct device_node *np, const char *name)
1836 {
1837 struct device_node *child_np = of_get_child_by_name(np, name);
1838 bool ret = false;
1839
1840 if (child_np) {
1841 ret = true;
1842 of_node_put(child_np);
1843 }
1844
1845 return ret;
1846 }
1847
ftgmac100_probe_ncsi(struct net_device * netdev,struct ftgmac100 * priv,struct platform_device * pdev)1848 static int ftgmac100_probe_ncsi(struct net_device *netdev,
1849 struct ftgmac100 *priv,
1850 struct platform_device *pdev)
1851 {
1852 struct device_node *np = pdev->dev.of_node;
1853 struct phy_device *phydev;
1854 int err;
1855
1856 if (!IS_ENABLED(CONFIG_NET_NCSI)) {
1857 dev_err(&pdev->dev, "NCSI stack not enabled\n");
1858 return -EINVAL;
1859 }
1860
1861 dev_info(&pdev->dev, "Using NCSI interface\n");
1862 priv->use_ncsi = true;
1863 priv->ndev = ncsi_register_dev(netdev, ftgmac100_ncsi_handler);
1864 if (!priv->ndev)
1865 return -EINVAL;
1866
1867 phydev = fixed_phy_register(&ncsi_phy_status, np);
1868 if (IS_ERR(phydev)) {
1869 dev_err(&pdev->dev, "failed to register fixed PHY device\n");
1870 err = PTR_ERR(phydev);
1871 goto err_register_ndev;
1872 }
1873 err = phy_connect_direct(netdev, phydev, ftgmac100_adjust_link,
1874 PHY_INTERFACE_MODE_RMII);
1875 if (err) {
1876 dev_err(&pdev->dev, "Connecting PHY failed\n");
1877 goto err_register_phy;
1878 }
1879
1880 return 0;
1881 err_register_phy:
1882 fixed_phy_unregister(phydev);
1883 err_register_ndev:
1884 if (priv->ndev)
1885 ncsi_unregister_dev(priv->ndev);
1886 priv->ndev = NULL;
1887 return err;
1888 }
1889
ftgmac100_probe_dt(struct net_device * netdev,struct platform_device * pdev,struct ftgmac100 * priv,struct device_node * np)1890 static int ftgmac100_probe_dt(struct net_device *netdev,
1891 struct platform_device *pdev,
1892 struct ftgmac100 *priv,
1893 struct device_node *np)
1894 {
1895 struct phy_device *phy;
1896 int err;
1897
1898 if (of_get_property(np, "use-ncsi", NULL))
1899 return ftgmac100_probe_ncsi(netdev, priv, pdev);
1900
1901 if (of_phy_is_fixed_link(np) ||
1902 of_get_property(np, "phy-handle", NULL)) {
1903 /* Support "mdio"/"phy" child nodes for ast2400/2500
1904 * with an embedded MDIO controller. Automatically
1905 * scan the DTS for available PHYs and register
1906 * them. 2600 has an independent MDIO controller, not
1907 * part of the MAC.
1908 */
1909 phy = of_phy_get_and_connect(priv->netdev, np,
1910 &ftgmac100_adjust_link);
1911 if (!phy) {
1912 dev_err(&pdev->dev, "Failed to connect to phy\n");
1913 return -EINVAL;
1914 }
1915
1916 /* Indicate that we support PAUSE frames (see comment in
1917 * Documentation/networking/phy.rst)
1918 */
1919 phy_support_asym_pause(phy);
1920
1921 /* Display what we found */
1922 phy_attached_info(phy);
1923 return 0;
1924 }
1925
1926 if (!ftgmac100_has_child_node(np, "mdio")) {
1927 /* Support legacy ASPEED devicetree descriptions that
1928 * decribe a MAC with an embedded MDIO controller but
1929 * have no "mdio" child node. Automatically scan the
1930 * MDIO bus for available PHYs.
1931 */
1932 err = ftgmac100_mii_probe(netdev);
1933 if (err) {
1934 dev_err(priv->dev, "MII probe failed!\n");
1935 return err;
1936 }
1937 }
1938
1939 return 0;
1940 }
1941
ftgmac100_probe(struct platform_device * pdev)1942 static int ftgmac100_probe(struct platform_device *pdev)
1943 {
1944 const struct ftgmac100_match_data *match_data;
1945 enum ftgmac100_mac_id mac_id;
1946 struct resource *res;
1947 int irq;
1948 struct net_device *netdev;
1949 struct ftgmac100 *priv;
1950 struct device_node *np;
1951 int err = 0;
1952
1953 np = pdev->dev.of_node;
1954 if (np) {
1955 match_data = of_device_get_match_data(&pdev->dev);
1956 if (!match_data)
1957 return -EINVAL;
1958 mac_id = match_data->mac_id;
1959 } else {
1960 mac_id = FTGMAC100_FARADAY;
1961 }
1962
1963 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1964 if (!res)
1965 return -ENXIO;
1966
1967 irq = platform_get_irq(pdev, 0);
1968 if (irq < 0)
1969 return irq;
1970
1971 /* setup net_device */
1972 netdev = devm_alloc_etherdev(&pdev->dev, sizeof(*priv));
1973 if (!netdev)
1974 return -ENOMEM;
1975
1976 SET_NETDEV_DEV(netdev, &pdev->dev);
1977
1978 netdev->ethtool_ops = &ftgmac100_ethtool_ops;
1979 netdev->netdev_ops = &ftgmac100_netdev_ops;
1980 netdev->watchdog_timeo = 5 * HZ;
1981
1982 platform_set_drvdata(pdev, netdev);
1983
1984 /* setup private data */
1985 priv = netdev_priv(netdev);
1986 priv->netdev = netdev;
1987 priv->dev = &pdev->dev;
1988 priv->mac_id = mac_id;
1989 INIT_WORK(&priv->reset_task, ftgmac100_reset_task);
1990
1991 /* map io memory */
1992 priv->res = devm_request_mem_region(&pdev->dev,
1993 res->start, resource_size(res),
1994 dev_name(&pdev->dev));
1995 if (!priv->res) {
1996 dev_err(&pdev->dev, "Could not reserve memory region\n");
1997 return -ENOMEM;
1998 }
1999
2000 priv->base = devm_ioremap(&pdev->dev, res->start, resource_size(res));
2001 if (!priv->base) {
2002 dev_err(&pdev->dev, "Failed to ioremap ethernet registers\n");
2003 return -EIO;
2004 }
2005
2006 netdev->irq = irq;
2007
2008 /* Enable pause */
2009 priv->tx_pause = true;
2010 priv->rx_pause = true;
2011 priv->aneg_pause = true;
2012
2013 /* MAC address from chip or random one */
2014 err = ftgmac100_initial_mac(priv);
2015 if (err)
2016 return err;
2017
2018 if (priv->mac_id == FTGMAC100_AST2400 ||
2019 priv->mac_id == FTGMAC100_AST2500 ||
2020 priv->mac_id == FTGMAC100_AST2600) {
2021 priv->rxdes0_edorr_mask = BIT(30);
2022 priv->txdes0_edotr_mask = BIT(30);
2023 priv->is_aspeed = true;
2024 } else {
2025 priv->rxdes0_edorr_mask = BIT(15);
2026 priv->txdes0_edotr_mask = BIT(15);
2027 }
2028
2029 if (priv->mac_id == FTGMAC100_FARADAY ||
2030 priv->mac_id == FTGMAC100_AST2400 ||
2031 priv->mac_id == FTGMAC100_AST2500) {
2032 err = ftgmac100_setup_mdio(netdev);
2033 if (err)
2034 return err;
2035 }
2036
2037 if (np) {
2038 err = ftgmac100_probe_dt(netdev, pdev, priv, np);
2039 if (err)
2040 goto err;
2041 }
2042
2043 priv->rst = devm_reset_control_get_optional_exclusive(priv->dev, NULL);
2044 if (IS_ERR(priv->rst)) {
2045 err = PTR_ERR(priv->rst);
2046 goto err;
2047 }
2048
2049 if (priv->is_aspeed) {
2050 err = ftgmac100_setup_clk(priv);
2051 if (err)
2052 goto err;
2053 }
2054
2055 /* Disable ast2600 problematic HW arbitration */
2056 if (priv->mac_id == FTGMAC100_AST2600)
2057 iowrite32(FTGMAC100_TM_DEFAULT,
2058 priv->base + FTGMAC100_OFFSET_TM);
2059
2060 /* Default ring sizes */
2061 priv->rx_q_entries = priv->new_rx_q_entries = DEF_RX_QUEUE_ENTRIES;
2062 priv->tx_q_entries = priv->new_tx_q_entries = DEF_TX_QUEUE_ENTRIES;
2063
2064 /* Base feature set */
2065 netdev->hw_features = NETIF_F_RXCSUM | NETIF_F_HW_CSUM |
2066 NETIF_F_GRO | NETIF_F_SG | NETIF_F_HW_VLAN_CTAG_RX |
2067 NETIF_F_HW_VLAN_CTAG_TX;
2068
2069 if (priv->use_ncsi)
2070 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2071
2072 /* AST2400 doesn't have working HW checksum generation */
2073 if (priv->mac_id == FTGMAC100_AST2400)
2074 netdev->hw_features &= ~NETIF_F_HW_CSUM;
2075
2076 /* AST2600 tx checksum with NCSI is broken */
2077 if (priv->use_ncsi && priv->mac_id == FTGMAC100_AST2600)
2078 netdev->hw_features &= ~NETIF_F_HW_CSUM;
2079
2080 if (np && of_get_property(np, "no-hw-checksum", NULL))
2081 netdev->hw_features &= ~(NETIF_F_HW_CSUM | NETIF_F_RXCSUM);
2082 netdev->features |= netdev->hw_features;
2083
2084 /* register network device */
2085 err = register_netdev(netdev);
2086 if (err) {
2087 dev_err(&pdev->dev, "Failed to register netdev\n");
2088 goto err;
2089 }
2090
2091 netdev_info(netdev, "irq %d, mapped at %p\n", netdev->irq, priv->base);
2092
2093 return 0;
2094
2095 err:
2096 ftgmac100_phy_disconnect(netdev);
2097 if (priv->ndev)
2098 ncsi_unregister_dev(priv->ndev);
2099 return err;
2100 }
2101
ftgmac100_remove(struct platform_device * pdev)2102 static void ftgmac100_remove(struct platform_device *pdev)
2103 {
2104 struct net_device *netdev;
2105 struct ftgmac100 *priv;
2106
2107 netdev = platform_get_drvdata(pdev);
2108 priv = netdev_priv(netdev);
2109
2110 if (priv->ndev)
2111 ncsi_unregister_dev(priv->ndev);
2112 unregister_netdev(netdev);
2113
2114 /* There's a small chance the reset task will have been re-queued,
2115 * during stop, make sure it's gone before we free the structure.
2116 */
2117 cancel_work_sync(&priv->reset_task);
2118
2119 ftgmac100_phy_disconnect(netdev);
2120 }
2121
2122 static const struct ftgmac100_match_data ftgmac100_match_data_ast2400 = {
2123 .mac_id = FTGMAC100_AST2400
2124 };
2125
2126 static const struct ftgmac100_match_data ftgmac100_match_data_ast2500 = {
2127 .mac_id = FTGMAC100_AST2500
2128 };
2129
2130 static const struct ftgmac100_match_data ftgmac100_match_data_ast2600 = {
2131 .mac_id = FTGMAC100_AST2600
2132 };
2133
2134 static const struct ftgmac100_match_data ftgmac100_match_data_faraday = {
2135 .mac_id = FTGMAC100_FARADAY
2136 };
2137
2138 static const struct of_device_id ftgmac100_of_match[] = {
2139 { .compatible = "aspeed,ast2400-mac",
2140 .data = &ftgmac100_match_data_ast2400},
2141 { .compatible = "aspeed,ast2500-mac",
2142 .data = &ftgmac100_match_data_ast2500 },
2143 { .compatible = "aspeed,ast2600-mac",
2144 .data = &ftgmac100_match_data_ast2600 },
2145 { .compatible = "faraday,ftgmac100",
2146 .data = &ftgmac100_match_data_faraday },
2147 { }
2148 };
2149 MODULE_DEVICE_TABLE(of, ftgmac100_of_match);
2150
2151 static struct platform_driver ftgmac100_driver = {
2152 .probe = ftgmac100_probe,
2153 .remove = ftgmac100_remove,
2154 .driver = {
2155 .name = DRV_NAME,
2156 .of_match_table = ftgmac100_of_match,
2157 },
2158 };
2159 module_platform_driver(ftgmac100_driver);
2160
2161 MODULE_AUTHOR("Po-Yu Chuang <ratbert@faraday-tech.com>");
2162 MODULE_DESCRIPTION("FTGMAC100 driver");
2163 MODULE_LICENSE("GPL");
2164