1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Cadence MACB/GEM Ethernet Controller driver
4 *
5 * Copyright (C) 2004-2006 Atmel Corporation
6 */
7
8 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9 #include <linux/circ_buf.h>
10 #include <linux/clk-provider.h>
11 #include <linux/clk.h>
12 #include <linux/crc32.h>
13 #include <linux/delay.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/etherdevice.h>
16 #include <linux/firmware/xlnx-zynqmp.h>
17 #include <linux/inetdevice.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/io.h>
21 #include <linux/iopoll.h>
22 #include <linux/ip.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/moduleparam.h>
26 #include <linux/netdevice.h>
27 #include <linux/of.h>
28 #include <linux/of_mdio.h>
29 #include <linux/of_net.h>
30 #include <linux/phy/phy.h>
31 #include <linux/phylink.h>
32 #include <linux/platform_device.h>
33 #include <linux/pm_runtime.h>
34 #include <linux/ptp_classify.h>
35 #include <linux/reset.h>
36 #include <linux/slab.h>
37 #include <linux/tcp.h>
38 #include <linux/types.h>
39 #include <linux/udp.h>
40 #include <linux/gcd.h>
41 #include <net/pkt_sched.h>
42 #include "macb.h"
43
44 /* This structure is only used for MACB on SiFive FU540 devices */
45 struct sifive_fu540_macb_mgmt {
46 void __iomem *reg;
47 unsigned long rate;
48 struct clk_hw hw;
49 };
50
51 #define MACB_RX_BUFFER_SIZE 128
52 #define RX_BUFFER_MULTIPLE 64 /* bytes */
53 #define RX_BUFFER_MAX (0xFF * RX_BUFFER_MULTIPLE) /* 16320 bytes */
54
55 #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
56 #define MIN_RX_RING_SIZE 64
57 #define MAX_RX_RING_SIZE 8192
58
59 #define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
60 #define MIN_TX_RING_SIZE 64
61 #define MAX_TX_RING_SIZE 4096
62
63 /* level of occupied TX descriptors under which we wake up TX process */
64 #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
65
66 #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
67 #define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
68 | MACB_BIT(ISR_RLE) \
69 | MACB_BIT(TXERR))
70 #define MACB_TX_INT_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP) \
71 | MACB_BIT(TXUBR))
72
73 #define MACB_INT_MISC_FLAGS (MACB_TX_ERR_FLAGS | MACB_BIT(RXUBR) | \
74 MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP) | \
75 GEM_BIT(WOL) | MACB_BIT(WOL))
76
77 /* Max length of transmit frame must be a multiple of 8 bytes */
78 #define MACB_TX_LEN_ALIGN 8
79 #define MACB_MAX_TX_LEN ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1) & ~((unsigned int)(MACB_TX_LEN_ALIGN - 1)))
80 /* Limit maximum TX length as per Cadence TSO errata. This is to avoid a
81 * false amba_error in TX path from the DMA assuming there is not enough
82 * space in the SRAM (16KB) even when there is.
83 */
84 #define GEM_MAX_TX_LEN (unsigned int)(0x3FC0)
85
86 #define GEM_MTU_MIN_SIZE ETH_MIN_MTU
87 #define MACB_NETIF_LSO NETIF_F_TSO
88
89 #define MACB_WOL_ENABLED BIT(0)
90
91 #define HS_SPEED_10000M 4
92 #define MACB_SERDES_RATE_10G 1
93
94 /* Graceful stop timeouts in us. We should allow up to
95 * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
96 */
97 #define MACB_HALT_TIMEOUT 14000
98 #define MACB_PM_TIMEOUT 100 /* ms */
99
100 #define MACB_MDIO_TIMEOUT 1000000 /* in usecs */
101
102 /* DMA buffer descriptor might be different size
103 * depends on hardware configuration:
104 *
105 * 1. dma address width 32 bits:
106 * word 1: 32 bit address of Data Buffer
107 * word 2: control
108 *
109 * 2. dma address width 64 bits:
110 * word 1: 32 bit address of Data Buffer
111 * word 2: control
112 * word 3: upper 32 bit address of Data Buffer
113 * word 4: unused
114 *
115 * 3. dma address width 32 bits with hardware timestamping:
116 * word 1: 32 bit address of Data Buffer
117 * word 2: control
118 * word 3: timestamp word 1
119 * word 4: timestamp word 2
120 *
121 * 4. dma address width 64 bits with hardware timestamping:
122 * word 1: 32 bit address of Data Buffer
123 * word 2: control
124 * word 3: upper 32 bit address of Data Buffer
125 * word 4: unused
126 * word 5: timestamp word 1
127 * word 6: timestamp word 2
128 */
macb_dma_desc_get_size(struct macb * bp)129 static unsigned int macb_dma_desc_get_size(struct macb *bp)
130 {
131 unsigned int desc_size = sizeof(struct macb_dma_desc);
132
133 if (macb_dma64(bp))
134 desc_size += sizeof(struct macb_dma_desc_64);
135 if (macb_dma_ptp(bp))
136 desc_size += sizeof(struct macb_dma_desc_ptp);
137
138 return desc_size;
139 }
140
macb_adj_dma_desc_idx(struct macb * bp,unsigned int desc_idx)141 static unsigned int macb_adj_dma_desc_idx(struct macb *bp, unsigned int desc_idx)
142 {
143 return desc_idx * (1 + macb_dma64(bp) + macb_dma_ptp(bp));
144 }
145
macb_64b_desc(struct macb * bp,struct macb_dma_desc * desc)146 static struct macb_dma_desc_64 *macb_64b_desc(struct macb *bp, struct macb_dma_desc *desc)
147 {
148 return (struct macb_dma_desc_64 *)((void *)desc
149 + sizeof(struct macb_dma_desc));
150 }
151
152 /* Ring buffer accessors */
macb_tx_ring_wrap(struct macb * bp,unsigned int index)153 static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
154 {
155 return index & (bp->tx_ring_size - 1);
156 }
157
macb_tx_desc(struct macb_queue * queue,unsigned int index)158 static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
159 unsigned int index)
160 {
161 index = macb_tx_ring_wrap(queue->bp, index);
162 index = macb_adj_dma_desc_idx(queue->bp, index);
163 return &queue->tx_ring[index];
164 }
165
macb_tx_skb(struct macb_queue * queue,unsigned int index)166 static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
167 unsigned int index)
168 {
169 return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
170 }
171
macb_tx_dma(struct macb_queue * queue,unsigned int index)172 static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
173 {
174 dma_addr_t offset;
175
176 offset = macb_tx_ring_wrap(queue->bp, index) *
177 macb_dma_desc_get_size(queue->bp);
178
179 return queue->tx_ring_dma + offset;
180 }
181
macb_rx_ring_wrap(struct macb * bp,unsigned int index)182 static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
183 {
184 return index & (bp->rx_ring_size - 1);
185 }
186
macb_rx_desc(struct macb_queue * queue,unsigned int index)187 static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
188 {
189 index = macb_rx_ring_wrap(queue->bp, index);
190 index = macb_adj_dma_desc_idx(queue->bp, index);
191 return &queue->rx_ring[index];
192 }
193
macb_rx_buffer(struct macb_queue * queue,unsigned int index)194 static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
195 {
196 return queue->rx_buffers + queue->bp->rx_buffer_size *
197 macb_rx_ring_wrap(queue->bp, index);
198 }
199
200 /* I/O accessors */
hw_readl_native(struct macb * bp,int offset)201 static u32 hw_readl_native(struct macb *bp, int offset)
202 {
203 return __raw_readl(bp->regs + offset);
204 }
205
hw_writel_native(struct macb * bp,int offset,u32 value)206 static void hw_writel_native(struct macb *bp, int offset, u32 value)
207 {
208 __raw_writel(value, bp->regs + offset);
209 }
210
hw_readl(struct macb * bp,int offset)211 static u32 hw_readl(struct macb *bp, int offset)
212 {
213 return readl_relaxed(bp->regs + offset);
214 }
215
hw_writel(struct macb * bp,int offset,u32 value)216 static void hw_writel(struct macb *bp, int offset, u32 value)
217 {
218 writel_relaxed(value, bp->regs + offset);
219 }
220
221 /* Find the CPU endianness by using the loopback bit of NCR register. When the
222 * CPU is in big endian we need to program swapped mode for management
223 * descriptor access.
224 */
hw_is_native_io(void __iomem * addr)225 static bool hw_is_native_io(void __iomem *addr)
226 {
227 u32 value = MACB_BIT(LLB);
228
229 __raw_writel(value, addr + MACB_NCR);
230 value = __raw_readl(addr + MACB_NCR);
231
232 /* Write 0 back to disable everything */
233 __raw_writel(0, addr + MACB_NCR);
234
235 return value == MACB_BIT(LLB);
236 }
237
hw_is_gem(void __iomem * addr,bool native_io)238 static bool hw_is_gem(void __iomem *addr, bool native_io)
239 {
240 u32 id;
241
242 if (native_io)
243 id = __raw_readl(addr + MACB_MID);
244 else
245 id = readl_relaxed(addr + MACB_MID);
246
247 return MACB_BFEXT(IDNUM, id) >= 0x2;
248 }
249
macb_set_hwaddr(struct macb * bp)250 static void macb_set_hwaddr(struct macb *bp)
251 {
252 u32 bottom;
253 u16 top;
254
255 bottom = get_unaligned_le32(bp->dev->dev_addr);
256 macb_or_gem_writel(bp, SA1B, bottom);
257 top = get_unaligned_le16(bp->dev->dev_addr + 4);
258 macb_or_gem_writel(bp, SA1T, top);
259
260 if (gem_has_ptp(bp)) {
261 gem_writel(bp, RXPTPUNI, bottom);
262 gem_writel(bp, TXPTPUNI, bottom);
263 }
264
265 /* Clear unused address register sets */
266 macb_or_gem_writel(bp, SA2B, 0);
267 macb_or_gem_writel(bp, SA2T, 0);
268 macb_or_gem_writel(bp, SA3B, 0);
269 macb_or_gem_writel(bp, SA3T, 0);
270 macb_or_gem_writel(bp, SA4B, 0);
271 macb_or_gem_writel(bp, SA4T, 0);
272 }
273
macb_get_hwaddr(struct macb * bp)274 static void macb_get_hwaddr(struct macb *bp)
275 {
276 u32 bottom;
277 u16 top;
278 u8 addr[6];
279 int i;
280
281 /* Check all 4 address register for valid address */
282 for (i = 0; i < 4; i++) {
283 bottom = macb_or_gem_readl(bp, SA1B + i * 8);
284 top = macb_or_gem_readl(bp, SA1T + i * 8);
285
286 addr[0] = bottom & 0xff;
287 addr[1] = (bottom >> 8) & 0xff;
288 addr[2] = (bottom >> 16) & 0xff;
289 addr[3] = (bottom >> 24) & 0xff;
290 addr[4] = top & 0xff;
291 addr[5] = (top >> 8) & 0xff;
292
293 if (is_valid_ether_addr(addr)) {
294 eth_hw_addr_set(bp->dev, addr);
295 return;
296 }
297 }
298
299 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
300 eth_hw_addr_random(bp->dev);
301 }
302
macb_mdio_wait_for_idle(struct macb * bp)303 static int macb_mdio_wait_for_idle(struct macb *bp)
304 {
305 u32 val;
306
307 return readx_poll_timeout(MACB_READ_NSR, bp, val, val & MACB_BIT(IDLE),
308 1, MACB_MDIO_TIMEOUT);
309 }
310
macb_mdio_read_c22(struct mii_bus * bus,int mii_id,int regnum)311 static int macb_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum)
312 {
313 struct macb *bp = bus->priv;
314 int status;
315
316 status = pm_runtime_resume_and_get(&bp->pdev->dev);
317 if (status < 0)
318 goto mdio_pm_exit;
319
320 status = macb_mdio_wait_for_idle(bp);
321 if (status < 0)
322 goto mdio_read_exit;
323
324 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
325 | MACB_BF(RW, MACB_MAN_C22_READ)
326 | MACB_BF(PHYA, mii_id)
327 | MACB_BF(REGA, regnum)
328 | MACB_BF(CODE, MACB_MAN_C22_CODE)));
329
330 status = macb_mdio_wait_for_idle(bp);
331 if (status < 0)
332 goto mdio_read_exit;
333
334 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
335
336 mdio_read_exit:
337 pm_runtime_put_autosuspend(&bp->pdev->dev);
338 mdio_pm_exit:
339 return status;
340 }
341
macb_mdio_read_c45(struct mii_bus * bus,int mii_id,int devad,int regnum)342 static int macb_mdio_read_c45(struct mii_bus *bus, int mii_id, int devad,
343 int regnum)
344 {
345 struct macb *bp = bus->priv;
346 int status;
347
348 status = pm_runtime_get_sync(&bp->pdev->dev);
349 if (status < 0) {
350 pm_runtime_put_noidle(&bp->pdev->dev);
351 goto mdio_pm_exit;
352 }
353
354 status = macb_mdio_wait_for_idle(bp);
355 if (status < 0)
356 goto mdio_read_exit;
357
358 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
359 | MACB_BF(RW, MACB_MAN_C45_ADDR)
360 | MACB_BF(PHYA, mii_id)
361 | MACB_BF(REGA, devad & 0x1F)
362 | MACB_BF(DATA, regnum & 0xFFFF)
363 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
364
365 status = macb_mdio_wait_for_idle(bp);
366 if (status < 0)
367 goto mdio_read_exit;
368
369 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
370 | MACB_BF(RW, MACB_MAN_C45_READ)
371 | MACB_BF(PHYA, mii_id)
372 | MACB_BF(REGA, devad & 0x1F)
373 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
374
375 status = macb_mdio_wait_for_idle(bp);
376 if (status < 0)
377 goto mdio_read_exit;
378
379 status = MACB_BFEXT(DATA, macb_readl(bp, MAN));
380
381 mdio_read_exit:
382 pm_runtime_put_autosuspend(&bp->pdev->dev);
383 mdio_pm_exit:
384 return status;
385 }
386
macb_mdio_write_c22(struct mii_bus * bus,int mii_id,int regnum,u16 value)387 static int macb_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum,
388 u16 value)
389 {
390 struct macb *bp = bus->priv;
391 int status;
392
393 status = pm_runtime_resume_and_get(&bp->pdev->dev);
394 if (status < 0)
395 goto mdio_pm_exit;
396
397 status = macb_mdio_wait_for_idle(bp);
398 if (status < 0)
399 goto mdio_write_exit;
400
401 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C22_SOF)
402 | MACB_BF(RW, MACB_MAN_C22_WRITE)
403 | MACB_BF(PHYA, mii_id)
404 | MACB_BF(REGA, regnum)
405 | MACB_BF(CODE, MACB_MAN_C22_CODE)
406 | MACB_BF(DATA, value)));
407
408 status = macb_mdio_wait_for_idle(bp);
409 if (status < 0)
410 goto mdio_write_exit;
411
412 mdio_write_exit:
413 pm_runtime_put_autosuspend(&bp->pdev->dev);
414 mdio_pm_exit:
415 return status;
416 }
417
macb_mdio_write_c45(struct mii_bus * bus,int mii_id,int devad,int regnum,u16 value)418 static int macb_mdio_write_c45(struct mii_bus *bus, int mii_id,
419 int devad, int regnum,
420 u16 value)
421 {
422 struct macb *bp = bus->priv;
423 int status;
424
425 status = pm_runtime_get_sync(&bp->pdev->dev);
426 if (status < 0) {
427 pm_runtime_put_noidle(&bp->pdev->dev);
428 goto mdio_pm_exit;
429 }
430
431 status = macb_mdio_wait_for_idle(bp);
432 if (status < 0)
433 goto mdio_write_exit;
434
435 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
436 | MACB_BF(RW, MACB_MAN_C45_ADDR)
437 | MACB_BF(PHYA, mii_id)
438 | MACB_BF(REGA, devad & 0x1F)
439 | MACB_BF(DATA, regnum & 0xFFFF)
440 | MACB_BF(CODE, MACB_MAN_C45_CODE)));
441
442 status = macb_mdio_wait_for_idle(bp);
443 if (status < 0)
444 goto mdio_write_exit;
445
446 macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_C45_SOF)
447 | MACB_BF(RW, MACB_MAN_C45_WRITE)
448 | MACB_BF(PHYA, mii_id)
449 | MACB_BF(REGA, devad & 0x1F)
450 | MACB_BF(CODE, MACB_MAN_C45_CODE)
451 | MACB_BF(DATA, value)));
452
453 status = macb_mdio_wait_for_idle(bp);
454 if (status < 0)
455 goto mdio_write_exit;
456
457 mdio_write_exit:
458 pm_runtime_put_autosuspend(&bp->pdev->dev);
459 mdio_pm_exit:
460 return status;
461 }
462
macb_init_buffers(struct macb * bp)463 static void macb_init_buffers(struct macb *bp)
464 {
465 struct macb_queue *queue;
466 unsigned int q;
467
468 /* Single register for all queues' high 32 bits. */
469 if (macb_dma64(bp)) {
470 macb_writel(bp, RBQPH,
471 upper_32_bits(bp->queues[0].rx_ring_dma));
472 macb_writel(bp, TBQPH,
473 upper_32_bits(bp->queues[0].tx_ring_dma));
474 }
475
476 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
477 queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
478 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
479 }
480 }
481
482 /**
483 * macb_set_tx_clk() - Set a clock to a new frequency
484 * @bp: pointer to struct macb
485 * @speed: New frequency in Hz
486 */
macb_set_tx_clk(struct macb * bp,int speed)487 static void macb_set_tx_clk(struct macb *bp, int speed)
488 {
489 long ferr, rate, rate_rounded;
490
491 if (!bp->tx_clk || (bp->caps & MACB_CAPS_CLK_HW_CHG))
492 return;
493
494 /* In case of MII the PHY is the clock master */
495 if (bp->phy_interface == PHY_INTERFACE_MODE_MII)
496 return;
497
498 rate = rgmii_clock(speed);
499 if (rate < 0)
500 return;
501
502 rate_rounded = clk_round_rate(bp->tx_clk, rate);
503 if (rate_rounded < 0)
504 return;
505
506 /* RGMII allows 50 ppm frequency error. Test and warn if this limit
507 * is not satisfied.
508 */
509 ferr = abs(rate_rounded - rate);
510 ferr = DIV_ROUND_UP(ferr, rate / 100000);
511 if (ferr > 5)
512 netdev_warn(bp->dev,
513 "unable to generate target frequency: %ld Hz\n",
514 rate);
515
516 if (clk_set_rate(bp->tx_clk, rate_rounded))
517 netdev_err(bp->dev, "adjusting tx_clk failed.\n");
518 }
519
macb_usx_pcs_link_up(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,int speed,int duplex)520 static void macb_usx_pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
521 phy_interface_t interface, int speed,
522 int duplex)
523 {
524 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
525 u32 config;
526
527 config = gem_readl(bp, USX_CONTROL);
528 config = GEM_BFINS(SERDES_RATE, MACB_SERDES_RATE_10G, config);
529 config = GEM_BFINS(USX_CTRL_SPEED, HS_SPEED_10000M, config);
530 config &= ~(GEM_BIT(TX_SCR_BYPASS) | GEM_BIT(RX_SCR_BYPASS));
531 config |= GEM_BIT(TX_EN);
532 gem_writel(bp, USX_CONTROL, config);
533 }
534
macb_usx_pcs_get_state(struct phylink_pcs * pcs,unsigned int neg_mode,struct phylink_link_state * state)535 static void macb_usx_pcs_get_state(struct phylink_pcs *pcs,
536 unsigned int neg_mode,
537 struct phylink_link_state *state)
538 {
539 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
540 u32 val;
541
542 state->speed = SPEED_10000;
543 state->duplex = 1;
544 state->an_complete = 1;
545
546 val = gem_readl(bp, USX_STATUS);
547 state->link = !!(val & GEM_BIT(USX_BLOCK_LOCK));
548 val = gem_readl(bp, NCFGR);
549 if (val & GEM_BIT(PAE))
550 state->pause = MLO_PAUSE_RX;
551 }
552
macb_usx_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)553 static int macb_usx_pcs_config(struct phylink_pcs *pcs,
554 unsigned int neg_mode,
555 phy_interface_t interface,
556 const unsigned long *advertising,
557 bool permit_pause_to_mac)
558 {
559 struct macb *bp = container_of(pcs, struct macb, phylink_usx_pcs);
560
561 gem_writel(bp, USX_CONTROL, gem_readl(bp, USX_CONTROL) |
562 GEM_BIT(SIGNAL_OK));
563
564 return 0;
565 }
566
macb_pcs_inband_caps(struct phylink_pcs * pcs,phy_interface_t interface)567 static unsigned int macb_pcs_inband_caps(struct phylink_pcs *pcs,
568 phy_interface_t interface)
569 {
570 return LINK_INBAND_DISABLE | LINK_INBAND_ENABLE;
571 }
572
macb_pcs_get_state(struct phylink_pcs * pcs,unsigned int neg_mode,struct phylink_link_state * state)573 static void macb_pcs_get_state(struct phylink_pcs *pcs, unsigned int neg_mode,
574 struct phylink_link_state *state)
575 {
576 struct macb *bp = container_of(pcs, struct macb, phylink_sgmii_pcs);
577 u16 bmsr, lpa;
578
579 bmsr = gem_readl(bp, PCSSTS);
580 lpa = gem_readl(bp, PCSANLPBASE);
581 phylink_mii_c22_pcs_decode_state(state, neg_mode, bmsr, lpa);
582 }
583
macb_pcs_an_restart(struct phylink_pcs * pcs)584 static void macb_pcs_an_restart(struct phylink_pcs *pcs)
585 {
586 /* Not supported */
587 }
588
macb_pcs_config(struct phylink_pcs * pcs,unsigned int neg_mode,phy_interface_t interface,const unsigned long * advertising,bool permit_pause_to_mac)589 static int macb_pcs_config(struct phylink_pcs *pcs,
590 unsigned int neg_mode,
591 phy_interface_t interface,
592 const unsigned long *advertising,
593 bool permit_pause_to_mac)
594 {
595 struct macb *bp = container_of(pcs, struct macb, phylink_sgmii_pcs);
596 u32 old, new;
597
598 old = gem_readl(bp, PCSANADV);
599 new = phylink_mii_c22_pcs_encode_advertisement(interface, advertising);
600 if (new != -EINVAL && old != new)
601 gem_writel(bp, PCSANADV, new);
602
603 /* Disable AN if it's not to be used, enable otherwise.
604 * Must be written after PCSSEL is set in NCFGR which is done in
605 * macb_mac_config(), otherwise writes will not take effect.
606 */
607 old = gem_readl(bp, PCSCNTRL);
608 if (neg_mode == PHYLINK_PCS_NEG_INBAND_ENABLED)
609 new = old | BMCR_ANENABLE;
610 else
611 new = old & ~BMCR_ANENABLE;
612 if (old != new)
613 gem_writel(bp, PCSCNTRL, new);
614
615 return 0;
616 }
617
618 static const struct phylink_pcs_ops macb_phylink_usx_pcs_ops = {
619 .pcs_get_state = macb_usx_pcs_get_state,
620 .pcs_config = macb_usx_pcs_config,
621 .pcs_link_up = macb_usx_pcs_link_up,
622 };
623
624 static const struct phylink_pcs_ops macb_phylink_pcs_ops = {
625 .pcs_inband_caps = macb_pcs_inband_caps,
626 .pcs_get_state = macb_pcs_get_state,
627 .pcs_an_restart = macb_pcs_an_restart,
628 .pcs_config = macb_pcs_config,
629 };
630
macb_tx_lpi_set(struct macb * bp,bool enable)631 static bool macb_tx_lpi_set(struct macb *bp, bool enable)
632 {
633 u32 old, ncr;
634
635 lockdep_assert_held(&bp->lock);
636
637 ncr = macb_readl(bp, NCR);
638 old = ncr;
639 if (enable)
640 ncr |= GEM_BIT(TXLPIEN);
641 else
642 ncr &= ~GEM_BIT(TXLPIEN);
643 if (old != ncr)
644 macb_writel(bp, NCR, ncr);
645
646 return old != ncr;
647 }
648
macb_tx_all_queues_idle(struct macb * bp)649 static bool macb_tx_all_queues_idle(struct macb *bp)
650 {
651 struct macb_queue *queue;
652 unsigned int q;
653
654 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
655 if (READ_ONCE(queue->tx_head) != READ_ONCE(queue->tx_tail))
656 return false;
657 }
658 return true;
659 }
660
macb_tx_lpi_work_fn(struct work_struct * work)661 static void macb_tx_lpi_work_fn(struct work_struct *work)
662 {
663 struct macb *bp = container_of(work, struct macb, tx_lpi_work.work);
664 unsigned long flags;
665
666 spin_lock_irqsave(&bp->lock, flags);
667 if (bp->eee_active && macb_tx_all_queues_idle(bp))
668 macb_tx_lpi_set(bp, true);
669 spin_unlock_irqrestore(&bp->lock, flags);
670 }
671
macb_tx_lpi_schedule(struct macb * bp)672 static void macb_tx_lpi_schedule(struct macb *bp)
673 {
674 if (bp->eee_active)
675 mod_delayed_work(system_wq, &bp->tx_lpi_work,
676 usecs_to_jiffies(bp->tx_lpi_timer));
677 }
678
679 /* Wake from LPI before transmitting. The MAC must deassert TXLPIEN
680 * and wait for the PHY to exit LPI before any frame can be sent.
681 * IEEE 802.3az Tw_sys is ~17us for 1000BASE-T, ~30us for 100BASE-TX;
682 * we use a conservative 50us.
683 */
macb_tx_lpi_wake(struct macb * bp)684 static void macb_tx_lpi_wake(struct macb *bp)
685 {
686 lockdep_assert_held(&bp->lock);
687
688 if (!bp->eee_active)
689 return;
690
691 if (!macb_tx_lpi_set(bp, false))
692 return;
693
694 cancel_delayed_work(&bp->tx_lpi_work);
695 udelay(50);
696 }
697
macb_mac_disable_tx_lpi(struct phylink_config * config)698 static void macb_mac_disable_tx_lpi(struct phylink_config *config)
699 {
700 struct net_device *ndev = to_net_dev(config->dev);
701 struct macb *bp = netdev_priv(ndev);
702 unsigned long flags;
703
704 cancel_delayed_work_sync(&bp->tx_lpi_work);
705
706 spin_lock_irqsave(&bp->lock, flags);
707 bp->eee_active = false;
708 macb_tx_lpi_set(bp, false);
709 spin_unlock_irqrestore(&bp->lock, flags);
710 }
711
macb_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)712 static int macb_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
713 bool tx_clk_stop)
714 {
715 struct net_device *ndev = to_net_dev(config->dev);
716 struct macb *bp = netdev_priv(ndev);
717 unsigned long flags;
718
719 spin_lock_irqsave(&bp->lock, flags);
720 bp->tx_lpi_timer = timer;
721 bp->eee_active = true;
722 spin_unlock_irqrestore(&bp->lock, flags);
723
724 /* Defer initial LPI entry by 1 second after link-up per
725 * IEEE 802.3az section 22.7a.
726 */
727 mod_delayed_work(system_wq, &bp->tx_lpi_work, msecs_to_jiffies(1000));
728
729 return 0;
730 }
731
macb_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)732 static void macb_mac_config(struct phylink_config *config, unsigned int mode,
733 const struct phylink_link_state *state)
734 {
735 struct net_device *ndev = to_net_dev(config->dev);
736 struct macb *bp = netdev_priv(ndev);
737 unsigned long flags;
738 u32 old_ctrl, ctrl;
739 u32 old_ncr, ncr;
740
741 spin_lock_irqsave(&bp->lock, flags);
742
743 old_ctrl = ctrl = macb_or_gem_readl(bp, NCFGR);
744 old_ncr = ncr = macb_or_gem_readl(bp, NCR);
745
746 if (bp->caps & MACB_CAPS_MACB_IS_EMAC) {
747 if (state->interface == PHY_INTERFACE_MODE_RMII)
748 ctrl |= MACB_BIT(RM9200_RMII);
749 } else if (macb_is_gem(bp)) {
750 ctrl &= ~(GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL));
751 ncr &= ~GEM_BIT(ENABLE_HS_MAC);
752
753 if (state->interface == PHY_INTERFACE_MODE_SGMII) {
754 ctrl |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
755 } else if (state->interface == PHY_INTERFACE_MODE_10GBASER) {
756 ctrl |= GEM_BIT(PCSSEL);
757 ncr |= GEM_BIT(ENABLE_HS_MAC);
758 } else if (bp->caps & MACB_CAPS_MIIONRGMII &&
759 bp->phy_interface == PHY_INTERFACE_MODE_MII) {
760 ncr |= MACB_BIT(MIIONRGMII);
761 }
762 }
763
764 /* Apply the new configuration, if any */
765 if (old_ctrl ^ ctrl)
766 macb_or_gem_writel(bp, NCFGR, ctrl);
767
768 if (old_ncr ^ ncr)
769 macb_or_gem_writel(bp, NCR, ncr);
770
771 spin_unlock_irqrestore(&bp->lock, flags);
772 }
773
macb_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)774 static void macb_mac_link_down(struct phylink_config *config, unsigned int mode,
775 phy_interface_t interface)
776 {
777 struct net_device *ndev = to_net_dev(config->dev);
778 struct macb *bp = netdev_priv(ndev);
779 struct macb_queue *queue;
780 unsigned int q;
781 u32 ctrl;
782
783 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
784 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
785 queue_writel(queue, IDR,
786 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
787
788 /* Disable Rx and Tx */
789 ctrl = macb_readl(bp, NCR) & ~(MACB_BIT(RE) | MACB_BIT(TE));
790 macb_writel(bp, NCR, ctrl);
791
792 netif_tx_stop_all_queues(ndev);
793 }
794
795 /* Use juggling algorithm to left rotate tx ring and tx skb array */
gem_shuffle_tx_one_ring(struct macb_queue * queue)796 static void gem_shuffle_tx_one_ring(struct macb_queue *queue)
797 {
798 unsigned int head, tail, count, ring_size, desc_size;
799 struct macb_tx_skb tx_skb, *skb_curr, *skb_next;
800 struct macb_dma_desc *desc_curr, *desc_next;
801 unsigned int i, cycles, shift, curr, next;
802 struct macb *bp = queue->bp;
803 unsigned char desc[24];
804 unsigned long flags;
805
806 desc_size = macb_dma_desc_get_size(bp);
807
808 if (WARN_ON_ONCE(desc_size > ARRAY_SIZE(desc)))
809 return;
810
811 spin_lock_irqsave(&queue->tx_ptr_lock, flags);
812 head = queue->tx_head;
813 tail = queue->tx_tail;
814 ring_size = bp->tx_ring_size;
815 count = CIRC_CNT(head, tail, ring_size);
816
817 if (!(tail % ring_size))
818 goto unlock;
819
820 if (!count) {
821 queue->tx_head = 0;
822 queue->tx_tail = 0;
823 goto unlock;
824 }
825
826 shift = tail % ring_size;
827 cycles = gcd(ring_size, shift);
828
829 for (i = 0; i < cycles; i++) {
830 memcpy(&desc, macb_tx_desc(queue, i), desc_size);
831 memcpy(&tx_skb, macb_tx_skb(queue, i),
832 sizeof(struct macb_tx_skb));
833
834 curr = i;
835 next = (curr + shift) % ring_size;
836
837 while (next != i) {
838 desc_curr = macb_tx_desc(queue, curr);
839 desc_next = macb_tx_desc(queue, next);
840
841 memcpy(desc_curr, desc_next, desc_size);
842
843 if (next == ring_size - 1)
844 desc_curr->ctrl &= ~MACB_BIT(TX_WRAP);
845 if (curr == ring_size - 1)
846 desc_curr->ctrl |= MACB_BIT(TX_WRAP);
847
848 skb_curr = macb_tx_skb(queue, curr);
849 skb_next = macb_tx_skb(queue, next);
850 memcpy(skb_curr, skb_next, sizeof(struct macb_tx_skb));
851
852 curr = next;
853 next = (curr + shift) % ring_size;
854 }
855
856 desc_curr = macb_tx_desc(queue, curr);
857 memcpy(desc_curr, &desc, desc_size);
858 if (i == ring_size - 1)
859 desc_curr->ctrl &= ~MACB_BIT(TX_WRAP);
860 if (curr == ring_size - 1)
861 desc_curr->ctrl |= MACB_BIT(TX_WRAP);
862 memcpy(macb_tx_skb(queue, curr), &tx_skb,
863 sizeof(struct macb_tx_skb));
864 }
865
866 queue->tx_head = count;
867 queue->tx_tail = 0;
868
869 /* Make descriptor updates visible to hardware */
870 wmb();
871
872 unlock:
873 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
874 }
875
876 /* Rotate the queue so that the tail is at index 0 */
gem_shuffle_tx_rings(struct macb * bp)877 static void gem_shuffle_tx_rings(struct macb *bp)
878 {
879 struct macb_queue *queue;
880 int q;
881
882 for (q = 0, queue = bp->queues; q < bp->num_queues; q++, queue++)
883 gem_shuffle_tx_one_ring(queue);
884 }
885
macb_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)886 static void macb_mac_link_up(struct phylink_config *config,
887 struct phy_device *phy,
888 unsigned int mode, phy_interface_t interface,
889 int speed, int duplex,
890 bool tx_pause, bool rx_pause)
891 {
892 struct net_device *ndev = to_net_dev(config->dev);
893 struct macb *bp = netdev_priv(ndev);
894 struct macb_queue *queue;
895 unsigned long flags;
896 unsigned int q;
897 u32 ctrl;
898
899 spin_lock_irqsave(&bp->lock, flags);
900
901 ctrl = macb_or_gem_readl(bp, NCFGR);
902
903 ctrl &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
904
905 if (speed == SPEED_100)
906 ctrl |= MACB_BIT(SPD);
907
908 if (duplex)
909 ctrl |= MACB_BIT(FD);
910
911 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
912 ctrl &= ~MACB_BIT(PAE);
913 if (macb_is_gem(bp)) {
914 ctrl &= ~GEM_BIT(GBE);
915
916 if (speed == SPEED_1000)
917 ctrl |= GEM_BIT(GBE);
918 }
919
920 if (rx_pause)
921 ctrl |= MACB_BIT(PAE);
922
923 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
924 queue_writel(queue, IER,
925 bp->rx_intr_mask | MACB_TX_INT_FLAGS | MACB_BIT(HRESP));
926 }
927 }
928
929 macb_or_gem_writel(bp, NCFGR, ctrl);
930
931 if (bp->phy_interface == PHY_INTERFACE_MODE_10GBASER)
932 gem_writel(bp, HS_MAC_CONFIG, GEM_BFINS(HS_MAC_SPEED, HS_SPEED_10000M,
933 gem_readl(bp, HS_MAC_CONFIG)));
934
935 spin_unlock_irqrestore(&bp->lock, flags);
936
937 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
938 macb_set_tx_clk(bp, speed);
939 gem_shuffle_tx_rings(bp);
940 }
941
942 /* Enable Rx and Tx; Enable PTP unicast */
943 ctrl = macb_readl(bp, NCR);
944 if (gem_has_ptp(bp))
945 ctrl |= MACB_BIT(PTPUNI);
946
947 macb_writel(bp, NCR, ctrl | MACB_BIT(RE) | MACB_BIT(TE));
948
949 netif_tx_wake_all_queues(ndev);
950 }
951
macb_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)952 static struct phylink_pcs *macb_mac_select_pcs(struct phylink_config *config,
953 phy_interface_t interface)
954 {
955 struct net_device *ndev = to_net_dev(config->dev);
956 struct macb *bp = netdev_priv(ndev);
957
958 if (interface == PHY_INTERFACE_MODE_10GBASER)
959 return &bp->phylink_usx_pcs;
960 else if (interface == PHY_INTERFACE_MODE_SGMII)
961 return &bp->phylink_sgmii_pcs;
962 else
963 return NULL;
964 }
965
966 static const struct phylink_mac_ops macb_phylink_ops = {
967 .mac_select_pcs = macb_mac_select_pcs,
968 .mac_config = macb_mac_config,
969 .mac_link_down = macb_mac_link_down,
970 .mac_link_up = macb_mac_link_up,
971 .mac_disable_tx_lpi = macb_mac_disable_tx_lpi,
972 .mac_enable_tx_lpi = macb_mac_enable_tx_lpi,
973 };
974
macb_phy_handle_exists(struct device_node * dn)975 static bool macb_phy_handle_exists(struct device_node *dn)
976 {
977 dn = of_parse_phandle(dn, "phy-handle", 0);
978 of_node_put(dn);
979 return dn != NULL;
980 }
981
macb_phylink_connect(struct macb * bp)982 static int macb_phylink_connect(struct macb *bp)
983 {
984 struct device_node *dn = bp->pdev->dev.of_node;
985 struct net_device *dev = bp->dev;
986 struct phy_device *phydev;
987 int ret;
988
989 if (dn)
990 ret = phylink_of_phy_connect(bp->phylink, dn, 0);
991
992 if (!dn || (ret && !macb_phy_handle_exists(dn))) {
993 phydev = phy_find_first(bp->mii_bus);
994 if (!phydev) {
995 netdev_err(dev, "no PHY found\n");
996 return -ENXIO;
997 }
998
999 /* attach the mac to the phy */
1000 ret = phylink_connect_phy(bp->phylink, phydev);
1001 }
1002
1003 if (ret) {
1004 netdev_err(dev, "Could not attach PHY (%d)\n", ret);
1005 return ret;
1006 }
1007
1008 phylink_start(bp->phylink);
1009
1010 return 0;
1011 }
1012
macb_get_pcs_fixed_state(struct phylink_config * config,struct phylink_link_state * state)1013 static void macb_get_pcs_fixed_state(struct phylink_config *config,
1014 struct phylink_link_state *state)
1015 {
1016 struct net_device *ndev = to_net_dev(config->dev);
1017 struct macb *bp = netdev_priv(ndev);
1018
1019 state->link = (macb_readl(bp, NSR) & MACB_BIT(NSR_LINK)) != 0;
1020 }
1021
1022 /* based on au1000_eth. c*/
macb_mii_probe(struct net_device * dev)1023 static int macb_mii_probe(struct net_device *dev)
1024 {
1025 struct macb *bp = netdev_priv(dev);
1026
1027 bp->phylink_sgmii_pcs.ops = &macb_phylink_pcs_ops;
1028 bp->phylink_usx_pcs.ops = &macb_phylink_usx_pcs_ops;
1029
1030 bp->phylink_config.dev = &dev->dev;
1031 bp->phylink_config.type = PHYLINK_NETDEV;
1032 bp->phylink_config.mac_managed_pm = true;
1033
1034 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
1035 bp->phylink_config.poll_fixed_state = true;
1036 bp->phylink_config.get_fixed_state = macb_get_pcs_fixed_state;
1037 /* The PCSAUTONEG bit in PCSCNTRL is on out of reset. Setting
1038 * default_an_inband to true tells phylink to turn it off only
1039 * if necessary (e.g. a fixed link or a PHY that doesn't support
1040 * inband).
1041 */
1042 bp->phylink_config.default_an_inband = true;
1043 }
1044
1045 bp->phylink_config.mac_capabilities = MAC_ASYM_PAUSE |
1046 MAC_10 | MAC_100;
1047
1048 __set_bit(PHY_INTERFACE_MODE_MII,
1049 bp->phylink_config.supported_interfaces);
1050 __set_bit(PHY_INTERFACE_MODE_RMII,
1051 bp->phylink_config.supported_interfaces);
1052
1053 /* Determine what modes are supported */
1054 if (macb_is_gem(bp) && (bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)) {
1055 bp->phylink_config.mac_capabilities |= MAC_1000FD;
1056 if (!(bp->caps & MACB_CAPS_NO_GIGABIT_HALF))
1057 bp->phylink_config.mac_capabilities |= MAC_1000HD;
1058
1059 __set_bit(PHY_INTERFACE_MODE_GMII,
1060 bp->phylink_config.supported_interfaces);
1061 phy_interface_set_rgmii(bp->phylink_config.supported_interfaces);
1062
1063 if (bp->caps & MACB_CAPS_PCS)
1064 __set_bit(PHY_INTERFACE_MODE_SGMII,
1065 bp->phylink_config.supported_interfaces);
1066
1067 if (bp->caps & MACB_CAPS_HIGH_SPEED) {
1068 __set_bit(PHY_INTERFACE_MODE_10GBASER,
1069 bp->phylink_config.supported_interfaces);
1070 bp->phylink_config.mac_capabilities |= MAC_10000FD;
1071 }
1072 }
1073
1074 /* Configure EEE LPI if supported */
1075 if (bp->caps & MACB_CAPS_EEE) {
1076 __set_bit(PHY_INTERFACE_MODE_MII,
1077 bp->phylink_config.lpi_interfaces);
1078 __set_bit(PHY_INTERFACE_MODE_GMII,
1079 bp->phylink_config.lpi_interfaces);
1080 phy_interface_set_rgmii(bp->phylink_config.lpi_interfaces);
1081 bp->phylink_config.lpi_capabilities = MAC_100FD | MAC_1000FD;
1082 bp->phylink_config.lpi_timer_default = 250000;
1083 bp->phylink_config.eee_enabled_default = true;
1084 }
1085
1086 bp->phylink = phylink_create(&bp->phylink_config, bp->pdev->dev.fwnode,
1087 bp->phy_interface, &macb_phylink_ops);
1088 if (IS_ERR(bp->phylink)) {
1089 netdev_err(dev, "Could not create a phylink instance (%ld)\n",
1090 PTR_ERR(bp->phylink));
1091 return PTR_ERR(bp->phylink);
1092 }
1093
1094 return 0;
1095 }
1096
macb_mdiobus_register(struct macb * bp,struct device_node * mdio_np)1097 static int macb_mdiobus_register(struct macb *bp, struct device_node *mdio_np)
1098 {
1099 struct device_node *child, *np = bp->pdev->dev.of_node;
1100
1101 /* If we have a child named mdio, probe it instead of looking for PHYs
1102 * directly under the MAC node
1103 */
1104 if (mdio_np)
1105 return of_mdiobus_register(bp->mii_bus, mdio_np);
1106
1107 /* Only create the PHY from the device tree if at least one PHY is
1108 * described. Otherwise scan the entire MDIO bus. We do this to support
1109 * old device tree that did not follow the best practices and did not
1110 * describe their network PHYs.
1111 */
1112 for_each_available_child_of_node(np, child)
1113 if (of_mdiobus_child_is_phy(child)) {
1114 /* The loop increments the child refcount,
1115 * decrement it before returning.
1116 */
1117 of_node_put(child);
1118
1119 return of_mdiobus_register(bp->mii_bus, np);
1120 }
1121
1122 return mdiobus_register(bp->mii_bus);
1123 }
1124
macb_mii_init(struct macb * bp)1125 static int macb_mii_init(struct macb *bp)
1126 {
1127 struct device_node *mdio_np, *np = bp->pdev->dev.of_node;
1128 int err = -ENXIO;
1129
1130 /* With fixed-link, we don't need to register the MDIO bus,
1131 * except if we have a child named "mdio" in the device tree.
1132 * In that case, some devices may be attached to the MACB's MDIO bus.
1133 */
1134 mdio_np = of_get_child_by_name(np, "mdio");
1135 if (!mdio_np && of_phy_is_fixed_link(np))
1136 return macb_mii_probe(bp->dev);
1137
1138 /* Enable management port */
1139 macb_writel(bp, NCR, MACB_BIT(MPE));
1140
1141 bp->mii_bus = mdiobus_alloc();
1142 if (!bp->mii_bus) {
1143 err = -ENOMEM;
1144 goto err_out;
1145 }
1146
1147 bp->mii_bus->name = "MACB_mii_bus";
1148 bp->mii_bus->read = &macb_mdio_read_c22;
1149 bp->mii_bus->write = &macb_mdio_write_c22;
1150 bp->mii_bus->read_c45 = &macb_mdio_read_c45;
1151 bp->mii_bus->write_c45 = &macb_mdio_write_c45;
1152 snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
1153 bp->pdev->name, bp->pdev->id);
1154 bp->mii_bus->priv = bp;
1155 bp->mii_bus->parent = &bp->pdev->dev;
1156
1157 dev_set_drvdata(&bp->dev->dev, bp->mii_bus);
1158
1159 err = macb_mdiobus_register(bp, mdio_np);
1160 if (err)
1161 goto err_out_free_mdiobus;
1162
1163 err = macb_mii_probe(bp->dev);
1164 if (err)
1165 goto err_out_unregister_bus;
1166
1167 return 0;
1168
1169 err_out_unregister_bus:
1170 mdiobus_unregister(bp->mii_bus);
1171 err_out_free_mdiobus:
1172 mdiobus_free(bp->mii_bus);
1173 err_out:
1174 of_node_put(mdio_np);
1175
1176 return err;
1177 }
1178
macb_update_stats(struct macb * bp)1179 static void macb_update_stats(struct macb *bp)
1180 {
1181 u64 *p = &bp->hw_stats.macb.rx_pause_frames;
1182 u64 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
1183 int offset = MACB_PFR;
1184
1185 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
1186
1187 for (; p < end; p++, offset += 4)
1188 *p += bp->macb_reg_readl(bp, offset);
1189 }
1190
macb_halt_tx(struct macb * bp)1191 static int macb_halt_tx(struct macb *bp)
1192 {
1193 u32 status;
1194
1195 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
1196
1197 /* Poll TSR until TGO is cleared or timeout. */
1198 return read_poll_timeout_atomic(macb_readl, status,
1199 !(status & MACB_BIT(TGO)),
1200 250, MACB_HALT_TIMEOUT, false,
1201 bp, TSR);
1202 }
1203
macb_tx_unmap(struct macb * bp,struct macb_tx_skb * tx_skb,int budget)1204 static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb, int budget)
1205 {
1206 if (tx_skb->mapping) {
1207 if (tx_skb->mapped_as_page)
1208 dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
1209 tx_skb->size, DMA_TO_DEVICE);
1210 else
1211 dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
1212 tx_skb->size, DMA_TO_DEVICE);
1213 tx_skb->mapping = 0;
1214 }
1215
1216 if (tx_skb->skb) {
1217 dev_consume_skb_any(tx_skb->skb);
1218 tx_skb->skb = NULL;
1219 }
1220 }
1221
macb_set_addr(struct macb * bp,struct macb_dma_desc * desc,dma_addr_t addr)1222 static void macb_set_addr(struct macb *bp, struct macb_dma_desc *desc, dma_addr_t addr)
1223 {
1224 if (macb_dma64(bp)) {
1225 struct macb_dma_desc_64 *desc_64;
1226
1227 desc_64 = macb_64b_desc(bp, desc);
1228 desc_64->addrh = upper_32_bits(addr);
1229 /* The low bits of RX address contain the RX_USED bit, clearing
1230 * of which allows packet RX. Make sure the high bits are also
1231 * visible to HW at that point.
1232 */
1233 dma_wmb();
1234 }
1235
1236 desc->addr = lower_32_bits(addr);
1237 }
1238
macb_get_addr(struct macb * bp,struct macb_dma_desc * desc)1239 static dma_addr_t macb_get_addr(struct macb *bp, struct macb_dma_desc *desc)
1240 {
1241 dma_addr_t addr = 0;
1242
1243 if (macb_dma64(bp)) {
1244 struct macb_dma_desc_64 *desc_64;
1245
1246 desc_64 = macb_64b_desc(bp, desc);
1247 addr = ((u64)(desc_64->addrh) << 32);
1248 }
1249 addr |= MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
1250 if (macb_dma_ptp(bp))
1251 addr &= ~GEM_BIT(DMA_RXVALID);
1252 return addr;
1253 }
1254
macb_tx_error_task(struct work_struct * work)1255 static void macb_tx_error_task(struct work_struct *work)
1256 {
1257 struct macb_queue *queue = container_of(work, struct macb_queue,
1258 tx_error_task);
1259 bool halt_timeout = false;
1260 struct macb *bp = queue->bp;
1261 u32 queue_index;
1262 u32 packets = 0;
1263 u32 bytes = 0;
1264 struct macb_tx_skb *tx_skb;
1265 struct macb_dma_desc *desc;
1266 struct sk_buff *skb;
1267 unsigned int tail;
1268 unsigned long flags;
1269
1270 queue_index = queue - bp->queues;
1271 netdev_vdbg(bp->dev, "macb_tx_error_task: q = %u, t = %u, h = %u\n",
1272 queue_index, queue->tx_tail, queue->tx_head);
1273
1274 /* Prevent the queue NAPI TX poll from running, as it calls
1275 * macb_tx_complete(), which in turn may call netif_wake_subqueue().
1276 * As explained below, we have to halt the transmission before updating
1277 * TBQP registers so we call netif_tx_stop_all_queues() to notify the
1278 * network engine about the macb/gem being halted.
1279 */
1280 napi_disable(&queue->napi_tx);
1281 spin_lock_irqsave(&bp->lock, flags);
1282
1283 /* Make sure nobody is trying to queue up new packets */
1284 netif_tx_stop_all_queues(bp->dev);
1285
1286 /* Stop transmission now
1287 * (in case we have just queued new packets)
1288 * macb/gem must be halted to write TBQP register
1289 */
1290 if (macb_halt_tx(bp)) {
1291 netdev_err(bp->dev, "BUG: halt tx timed out\n");
1292 macb_writel(bp, NCR, macb_readl(bp, NCR) & (~MACB_BIT(TE)));
1293 halt_timeout = true;
1294 }
1295
1296 /* Treat frames in TX queue including the ones that caused the error.
1297 * Free transmit buffers in upper layer.
1298 */
1299 for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
1300 u32 ctrl;
1301
1302 desc = macb_tx_desc(queue, tail);
1303 ctrl = desc->ctrl;
1304 tx_skb = macb_tx_skb(queue, tail);
1305 skb = tx_skb->skb;
1306
1307 if (ctrl & MACB_BIT(TX_USED)) {
1308 /* skb is set for the last buffer of the frame */
1309 while (!skb) {
1310 macb_tx_unmap(bp, tx_skb, 0);
1311 tail++;
1312 tx_skb = macb_tx_skb(queue, tail);
1313 skb = tx_skb->skb;
1314 }
1315
1316 /* ctrl still refers to the first buffer descriptor
1317 * since it's the only one written back by the hardware
1318 */
1319 if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
1320 netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
1321 macb_tx_ring_wrap(bp, tail),
1322 skb->data);
1323 bp->dev->stats.tx_packets++;
1324 queue->stats.tx_packets++;
1325 packets++;
1326 bp->dev->stats.tx_bytes += skb->len;
1327 queue->stats.tx_bytes += skb->len;
1328 bytes += skb->len;
1329 }
1330 } else {
1331 /* "Buffers exhausted mid-frame" errors may only happen
1332 * if the driver is buggy, so complain loudly about
1333 * those. Statistics are updated by hardware.
1334 */
1335 if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
1336 netdev_err(bp->dev,
1337 "BUG: TX buffers exhausted mid-frame\n");
1338
1339 desc->ctrl = ctrl | MACB_BIT(TX_USED);
1340 }
1341
1342 macb_tx_unmap(bp, tx_skb, 0);
1343 }
1344
1345 netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
1346 packets, bytes);
1347
1348 /* Set end of TX queue */
1349 desc = macb_tx_desc(queue, 0);
1350 macb_set_addr(bp, desc, 0);
1351 desc->ctrl = MACB_BIT(TX_USED);
1352
1353 /* Make descriptor updates visible to hardware */
1354 wmb();
1355
1356 /* Reinitialize the TX desc queue */
1357 queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
1358 /* Make TX ring reflect state of hardware */
1359 queue->tx_head = 0;
1360 queue->tx_tail = 0;
1361
1362 /* Housework before enabling TX IRQ */
1363 macb_writel(bp, TSR, macb_readl(bp, TSR));
1364 queue_writel(queue, IER, MACB_TX_INT_FLAGS);
1365
1366 if (halt_timeout)
1367 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE));
1368
1369 /* Now we are ready to start transmission again */
1370 netif_tx_start_all_queues(bp->dev);
1371 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1372
1373 spin_unlock_irqrestore(&bp->lock, flags);
1374 napi_enable(&queue->napi_tx);
1375 }
1376
ptp_one_step_sync(struct sk_buff * skb)1377 static bool ptp_one_step_sync(struct sk_buff *skb)
1378 {
1379 struct ptp_header *hdr;
1380 unsigned int ptp_class;
1381 u8 msgtype;
1382
1383 /* No need to parse packet if PTP TS is not involved */
1384 if (likely(!(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)))
1385 goto not_oss;
1386
1387 /* Identify and return whether PTP one step sync is being processed */
1388 ptp_class = ptp_classify_raw(skb);
1389 if (ptp_class == PTP_CLASS_NONE)
1390 goto not_oss;
1391
1392 hdr = ptp_parse_header(skb, ptp_class);
1393 if (!hdr)
1394 goto not_oss;
1395
1396 if (hdr->flag_field[0] & PTP_FLAG_TWOSTEP)
1397 goto not_oss;
1398
1399 msgtype = ptp_get_msgtype(hdr, ptp_class);
1400 if (msgtype == PTP_MSGTYPE_SYNC)
1401 return true;
1402
1403 not_oss:
1404 return false;
1405 }
1406
macb_tx_complete(struct macb_queue * queue,int budget)1407 static int macb_tx_complete(struct macb_queue *queue, int budget)
1408 {
1409 struct macb *bp = queue->bp;
1410 u16 queue_index = queue - bp->queues;
1411 unsigned long flags;
1412 unsigned int tail;
1413 unsigned int head;
1414 int packets = 0;
1415 u32 bytes = 0;
1416
1417 spin_lock_irqsave(&queue->tx_ptr_lock, flags);
1418 head = queue->tx_head;
1419 for (tail = queue->tx_tail; tail != head && packets < budget; tail++) {
1420 struct macb_tx_skb *tx_skb;
1421 struct sk_buff *skb;
1422 struct macb_dma_desc *desc;
1423 u32 ctrl;
1424
1425 desc = macb_tx_desc(queue, tail);
1426
1427 /* Make hw descriptor updates visible to CPU */
1428 rmb();
1429
1430 ctrl = desc->ctrl;
1431
1432 /* TX_USED bit is only set by hardware on the very first buffer
1433 * descriptor of the transmitted frame.
1434 */
1435 if (!(ctrl & MACB_BIT(TX_USED)))
1436 break;
1437
1438 /* Process all buffers of the current transmitted frame */
1439 for (;; tail++) {
1440 tx_skb = macb_tx_skb(queue, tail);
1441 skb = tx_skb->skb;
1442
1443 /* First, update TX stats if needed */
1444 if (skb) {
1445 if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1446 !ptp_one_step_sync(skb))
1447 gem_ptp_do_txstamp(bp, skb, desc);
1448
1449 netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
1450 macb_tx_ring_wrap(bp, tail),
1451 skb->data);
1452 bp->dev->stats.tx_packets++;
1453 queue->stats.tx_packets++;
1454 bp->dev->stats.tx_bytes += skb->len;
1455 queue->stats.tx_bytes += skb->len;
1456 packets++;
1457 bytes += skb->len;
1458 }
1459
1460 /* Now we can safely release resources */
1461 macb_tx_unmap(bp, tx_skb, budget);
1462
1463 /* skb is set only for the last buffer of the frame.
1464 * WARNING: at this point skb has been freed by
1465 * macb_tx_unmap().
1466 */
1467 if (skb)
1468 break;
1469 }
1470 }
1471
1472 netdev_tx_completed_queue(netdev_get_tx_queue(bp->dev, queue_index),
1473 packets, bytes);
1474
1475 queue->tx_tail = tail;
1476 if (__netif_subqueue_stopped(bp->dev, queue_index) &&
1477 CIRC_CNT(queue->tx_head, queue->tx_tail,
1478 bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
1479 netif_wake_subqueue(bp->dev, queue_index);
1480 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
1481
1482 if (packets)
1483 macb_tx_lpi_schedule(bp);
1484
1485 return packets;
1486 }
1487
gem_rx_refill(struct macb_queue * queue)1488 static void gem_rx_refill(struct macb_queue *queue)
1489 {
1490 unsigned int entry;
1491 struct sk_buff *skb;
1492 dma_addr_t paddr;
1493 struct macb *bp = queue->bp;
1494 struct macb_dma_desc *desc;
1495
1496 while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
1497 bp->rx_ring_size) > 0) {
1498 entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
1499
1500 /* Make hw descriptor updates visible to CPU */
1501 rmb();
1502
1503 desc = macb_rx_desc(queue, entry);
1504
1505 if (!queue->rx_skbuff[entry]) {
1506 /* allocate sk_buff for this free entry in ring */
1507 skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
1508 if (unlikely(!skb)) {
1509 netdev_err(bp->dev,
1510 "Unable to allocate sk_buff\n");
1511 break;
1512 }
1513
1514 /* now fill corresponding descriptor entry */
1515 paddr = dma_map_single(&bp->pdev->dev, skb->data,
1516 bp->rx_buffer_size,
1517 DMA_FROM_DEVICE);
1518 if (dma_mapping_error(&bp->pdev->dev, paddr)) {
1519 dev_kfree_skb(skb);
1520 break;
1521 }
1522
1523 queue->rx_skbuff[entry] = skb;
1524
1525 if (entry == bp->rx_ring_size - 1)
1526 paddr |= MACB_BIT(RX_WRAP);
1527 desc->ctrl = 0;
1528 /* Setting addr clears RX_USED and allows reception,
1529 * make sure ctrl is cleared first to avoid a race.
1530 */
1531 dma_wmb();
1532 macb_set_addr(bp, desc, paddr);
1533
1534 /* Properly align Ethernet header.
1535 *
1536 * Hardware can add dummy bytes if asked using the RBOF
1537 * field inside the NCFGR register. That feature isn't
1538 * available if hardware is RSC capable.
1539 *
1540 * We cannot fallback to doing the 2-byte shift before
1541 * DMA mapping because the address field does not allow
1542 * setting the low 2/3 bits.
1543 * It is 3 bits if HW_DMA_CAP_PTP, else 2 bits.
1544 */
1545 if (!(bp->caps & MACB_CAPS_RSC))
1546 skb_reserve(skb, NET_IP_ALIGN);
1547 } else {
1548 desc->ctrl = 0;
1549 dma_wmb();
1550 desc->addr &= ~MACB_BIT(RX_USED);
1551 }
1552 queue->rx_prepared_head++;
1553 }
1554
1555 /* Make descriptor updates visible to hardware */
1556 wmb();
1557
1558 netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
1559 queue, queue->rx_prepared_head, queue->rx_tail);
1560 }
1561
1562 /* Mark DMA descriptors from begin up to and not including end as unused */
discard_partial_frame(struct macb_queue * queue,unsigned int begin,unsigned int end)1563 static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
1564 unsigned int end)
1565 {
1566 unsigned int frag;
1567
1568 for (frag = begin; frag != end; frag++) {
1569 struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
1570
1571 desc->addr &= ~MACB_BIT(RX_USED);
1572 }
1573
1574 /* Make descriptor updates visible to hardware */
1575 wmb();
1576
1577 /* When this happens, the hardware stats registers for
1578 * whatever caused this is updated, so we don't have to record
1579 * anything.
1580 */
1581 }
1582
gem_rx(struct macb_queue * queue,struct napi_struct * napi,int budget)1583 static int gem_rx(struct macb_queue *queue, struct napi_struct *napi,
1584 int budget)
1585 {
1586 struct macb *bp = queue->bp;
1587 unsigned int len;
1588 unsigned int entry;
1589 struct sk_buff *skb;
1590 struct macb_dma_desc *desc;
1591 int count = 0;
1592
1593 while (count < budget) {
1594 u32 ctrl;
1595 dma_addr_t addr;
1596 bool rxused;
1597
1598 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1599 desc = macb_rx_desc(queue, entry);
1600
1601 /* Make hw descriptor updates visible to CPU */
1602 rmb();
1603
1604 rxused = (desc->addr & MACB_BIT(RX_USED)) ? true : false;
1605 addr = macb_get_addr(bp, desc);
1606
1607 if (!rxused)
1608 break;
1609
1610 /* Ensure ctrl is at least as up-to-date as rxused */
1611 dma_rmb();
1612
1613 ctrl = desc->ctrl;
1614
1615 queue->rx_tail++;
1616 count++;
1617
1618 if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
1619 netdev_err(bp->dev,
1620 "not whole frame pointed by descriptor\n");
1621 bp->dev->stats.rx_dropped++;
1622 queue->stats.rx_dropped++;
1623 break;
1624 }
1625 skb = queue->rx_skbuff[entry];
1626 if (unlikely(!skb)) {
1627 netdev_err(bp->dev,
1628 "inconsistent Rx descriptor chain\n");
1629 bp->dev->stats.rx_dropped++;
1630 queue->stats.rx_dropped++;
1631 break;
1632 }
1633 /* now everything is ready for receiving packet */
1634 queue->rx_skbuff[entry] = NULL;
1635 len = ctrl & bp->rx_frm_len_mask;
1636
1637 netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
1638
1639 skb_put(skb, len);
1640 dma_unmap_single(&bp->pdev->dev, addr,
1641 bp->rx_buffer_size, DMA_FROM_DEVICE);
1642
1643 skb->protocol = eth_type_trans(skb, bp->dev);
1644 skb_checksum_none_assert(skb);
1645 if (bp->dev->features & NETIF_F_RXCSUM &&
1646 !(bp->dev->flags & IFF_PROMISC) &&
1647 GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
1648 skb->ip_summed = CHECKSUM_UNNECESSARY;
1649
1650 bp->dev->stats.rx_packets++;
1651 queue->stats.rx_packets++;
1652 bp->dev->stats.rx_bytes += skb->len;
1653 queue->stats.rx_bytes += skb->len;
1654
1655 gem_ptp_do_rxstamp(bp, skb, desc);
1656
1657 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
1658 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1659 skb->len, skb->csum);
1660 print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
1661 skb_mac_header(skb), 16, true);
1662 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
1663 skb->data, 32, true);
1664 #endif
1665
1666 napi_gro_receive(napi, skb);
1667 }
1668
1669 gem_rx_refill(queue);
1670
1671 return count;
1672 }
1673
macb_rx_frame(struct macb_queue * queue,struct napi_struct * napi,unsigned int first_frag,unsigned int last_frag)1674 static int macb_rx_frame(struct macb_queue *queue, struct napi_struct *napi,
1675 unsigned int first_frag, unsigned int last_frag)
1676 {
1677 unsigned int len;
1678 unsigned int frag;
1679 unsigned int offset;
1680 struct sk_buff *skb;
1681 struct macb_dma_desc *desc;
1682 struct macb *bp = queue->bp;
1683
1684 desc = macb_rx_desc(queue, last_frag);
1685 len = desc->ctrl & bp->rx_frm_len_mask;
1686
1687 netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
1688 macb_rx_ring_wrap(bp, first_frag),
1689 macb_rx_ring_wrap(bp, last_frag), len);
1690
1691 /* The ethernet header starts NET_IP_ALIGN bytes into the
1692 * first buffer. Since the header is 14 bytes, this makes the
1693 * payload word-aligned.
1694 *
1695 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
1696 * the two padding bytes into the skb so that we avoid hitting
1697 * the slowpath in memcpy(), and pull them off afterwards.
1698 */
1699 skb = netdev_alloc_skb(bp->dev, len + NET_IP_ALIGN);
1700 if (!skb) {
1701 bp->dev->stats.rx_dropped++;
1702 for (frag = first_frag; ; frag++) {
1703 desc = macb_rx_desc(queue, frag);
1704 desc->addr &= ~MACB_BIT(RX_USED);
1705 if (frag == last_frag)
1706 break;
1707 }
1708
1709 /* Make descriptor updates visible to hardware */
1710 wmb();
1711
1712 return 1;
1713 }
1714
1715 offset = 0;
1716 len += NET_IP_ALIGN;
1717 skb_checksum_none_assert(skb);
1718 skb_put(skb, len);
1719
1720 for (frag = first_frag; ; frag++) {
1721 unsigned int frag_len = bp->rx_buffer_size;
1722
1723 if (offset + frag_len > len) {
1724 if (unlikely(frag != last_frag)) {
1725 dev_kfree_skb_any(skb);
1726 return -1;
1727 }
1728 frag_len = len - offset;
1729 }
1730 skb_copy_to_linear_data_offset(skb, offset,
1731 macb_rx_buffer(queue, frag),
1732 frag_len);
1733 offset += bp->rx_buffer_size;
1734 desc = macb_rx_desc(queue, frag);
1735 desc->addr &= ~MACB_BIT(RX_USED);
1736
1737 if (frag == last_frag)
1738 break;
1739 }
1740
1741 /* Make descriptor updates visible to hardware */
1742 wmb();
1743
1744 __skb_pull(skb, NET_IP_ALIGN);
1745 skb->protocol = eth_type_trans(skb, bp->dev);
1746
1747 bp->dev->stats.rx_packets++;
1748 bp->dev->stats.rx_bytes += skb->len;
1749 netdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
1750 skb->len, skb->csum);
1751 napi_gro_receive(napi, skb);
1752
1753 return 0;
1754 }
1755
macb_init_rx_ring(struct macb_queue * queue)1756 static inline void macb_init_rx_ring(struct macb_queue *queue)
1757 {
1758 struct macb *bp = queue->bp;
1759 dma_addr_t addr;
1760 struct macb_dma_desc *desc = NULL;
1761 int i;
1762
1763 addr = queue->rx_buffers_dma;
1764 for (i = 0; i < bp->rx_ring_size; i++) {
1765 desc = macb_rx_desc(queue, i);
1766 macb_set_addr(bp, desc, addr);
1767 desc->ctrl = 0;
1768 addr += bp->rx_buffer_size;
1769 }
1770 desc->addr |= MACB_BIT(RX_WRAP);
1771 queue->rx_tail = 0;
1772 }
1773
macb_rx(struct macb_queue * queue,struct napi_struct * napi,int budget)1774 static int macb_rx(struct macb_queue *queue, struct napi_struct *napi,
1775 int budget)
1776 {
1777 struct macb *bp = queue->bp;
1778 bool reset_rx_queue = false;
1779 int received = 0;
1780 unsigned int tail;
1781 int first_frag = -1;
1782
1783 for (tail = queue->rx_tail; budget > 0; tail++) {
1784 struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
1785 u32 ctrl;
1786
1787 /* Make hw descriptor updates visible to CPU */
1788 rmb();
1789
1790 if (!(desc->addr & MACB_BIT(RX_USED)))
1791 break;
1792
1793 /* Ensure ctrl is at least as up-to-date as addr */
1794 dma_rmb();
1795
1796 ctrl = desc->ctrl;
1797
1798 if (ctrl & MACB_BIT(RX_SOF)) {
1799 if (first_frag != -1)
1800 discard_partial_frame(queue, first_frag, tail);
1801 first_frag = tail;
1802 }
1803
1804 if (ctrl & MACB_BIT(RX_EOF)) {
1805 int dropped;
1806
1807 if (unlikely(first_frag == -1)) {
1808 reset_rx_queue = true;
1809 continue;
1810 }
1811
1812 dropped = macb_rx_frame(queue, napi, first_frag, tail);
1813 first_frag = -1;
1814 if (unlikely(dropped < 0)) {
1815 reset_rx_queue = true;
1816 continue;
1817 }
1818 if (!dropped) {
1819 received++;
1820 budget--;
1821 }
1822 }
1823 }
1824
1825 if (unlikely(reset_rx_queue)) {
1826 unsigned long flags;
1827 u32 ctrl;
1828
1829 netdev_err(bp->dev, "RX queue corruption: reset it\n");
1830
1831 spin_lock_irqsave(&bp->lock, flags);
1832
1833 ctrl = macb_readl(bp, NCR);
1834 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
1835
1836 macb_init_rx_ring(queue);
1837 queue_writel(queue, RBQP, queue->rx_ring_dma);
1838
1839 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
1840
1841 spin_unlock_irqrestore(&bp->lock, flags);
1842 return received;
1843 }
1844
1845 if (first_frag != -1)
1846 queue->rx_tail = first_frag;
1847 else
1848 queue->rx_tail = tail;
1849
1850 return received;
1851 }
1852
macb_rx_pending(struct macb_queue * queue)1853 static bool macb_rx_pending(struct macb_queue *queue)
1854 {
1855 struct macb *bp = queue->bp;
1856 unsigned int entry;
1857 struct macb_dma_desc *desc;
1858
1859 entry = macb_rx_ring_wrap(bp, queue->rx_tail);
1860 desc = macb_rx_desc(queue, entry);
1861
1862 /* Make hw descriptor updates visible to CPU */
1863 rmb();
1864
1865 return (desc->addr & MACB_BIT(RX_USED)) != 0;
1866 }
1867
macb_rx_poll(struct napi_struct * napi,int budget)1868 static int macb_rx_poll(struct napi_struct *napi, int budget)
1869 {
1870 struct macb_queue *queue = container_of(napi, struct macb_queue, napi_rx);
1871 struct macb *bp = queue->bp;
1872 int work_done;
1873
1874 work_done = bp->macbgem_ops.mog_rx(queue, napi, budget);
1875
1876 netdev_vdbg(bp->dev, "RX poll: queue = %u, work_done = %d, budget = %d\n",
1877 (unsigned int)(queue - bp->queues), work_done, budget);
1878
1879 if (work_done < budget && napi_complete_done(napi, work_done)) {
1880 queue_writel(queue, IER, bp->rx_intr_mask);
1881
1882 /* Packet completions only seem to propagate to raise
1883 * interrupts when interrupts are enabled at the time, so if
1884 * packets were received while interrupts were disabled,
1885 * they will not cause another interrupt to be generated when
1886 * interrupts are re-enabled.
1887 * Check for this case here to avoid losing a wakeup. This can
1888 * potentially race with the interrupt handler doing the same
1889 * actions if an interrupt is raised just after enabling them,
1890 * but this should be harmless.
1891 */
1892 if (macb_rx_pending(queue)) {
1893 queue_writel(queue, IDR, bp->rx_intr_mask);
1894 macb_queue_isr_clear(bp, queue, MACB_BIT(RCOMP));
1895 netdev_vdbg(bp->dev, "poll: packets pending, reschedule\n");
1896 napi_schedule(napi);
1897 }
1898 }
1899
1900 /* TODO: Handle errors */
1901
1902 return work_done;
1903 }
1904
macb_tx_restart(struct macb_queue * queue)1905 static void macb_tx_restart(struct macb_queue *queue)
1906 {
1907 struct macb *bp = queue->bp;
1908 unsigned int head_idx, tbqp;
1909 unsigned long flags;
1910
1911 spin_lock_irqsave(&queue->tx_ptr_lock, flags);
1912
1913 if (queue->tx_head == queue->tx_tail)
1914 goto out_tx_ptr_unlock;
1915
1916 tbqp = queue_readl(queue, TBQP) / macb_dma_desc_get_size(bp);
1917 tbqp = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, tbqp));
1918 head_idx = macb_adj_dma_desc_idx(bp, macb_tx_ring_wrap(bp, queue->tx_head));
1919
1920 if (tbqp == head_idx)
1921 goto out_tx_ptr_unlock;
1922
1923 spin_lock(&bp->lock);
1924 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
1925 spin_unlock(&bp->lock);
1926
1927 out_tx_ptr_unlock:
1928 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
1929 }
1930
macb_tx_complete_pending(struct macb_queue * queue)1931 static bool macb_tx_complete_pending(struct macb_queue *queue)
1932 {
1933 bool retval = false;
1934 unsigned long flags;
1935
1936 spin_lock_irqsave(&queue->tx_ptr_lock, flags);
1937 if (queue->tx_head != queue->tx_tail) {
1938 /* Make hw descriptor updates visible to CPU */
1939 rmb();
1940
1941 if (macb_tx_desc(queue, queue->tx_tail)->ctrl & MACB_BIT(TX_USED))
1942 retval = true;
1943 }
1944 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
1945 return retval;
1946 }
1947
macb_tx_poll(struct napi_struct * napi,int budget)1948 static int macb_tx_poll(struct napi_struct *napi, int budget)
1949 {
1950 struct macb_queue *queue = container_of(napi, struct macb_queue, napi_tx);
1951 struct macb *bp = queue->bp;
1952 int work_done;
1953
1954 work_done = macb_tx_complete(queue, budget);
1955
1956 rmb(); // ensure txubr_pending is up to date
1957 if (queue->txubr_pending) {
1958 queue->txubr_pending = false;
1959 netdev_vdbg(bp->dev, "poll: tx restart\n");
1960 macb_tx_restart(queue);
1961 }
1962
1963 netdev_vdbg(bp->dev, "TX poll: queue = %u, work_done = %d, budget = %d\n",
1964 (unsigned int)(queue - bp->queues), work_done, budget);
1965
1966 if (work_done < budget && napi_complete_done(napi, work_done)) {
1967 queue_writel(queue, IER, MACB_BIT(TCOMP));
1968
1969 /* Packet completions only seem to propagate to raise
1970 * interrupts when interrupts are enabled at the time, so if
1971 * packets were sent while interrupts were disabled,
1972 * they will not cause another interrupt to be generated when
1973 * interrupts are re-enabled.
1974 * Check for this case here to avoid losing a wakeup. This can
1975 * potentially race with the interrupt handler doing the same
1976 * actions if an interrupt is raised just after enabling them,
1977 * but this should be harmless.
1978 */
1979 if (macb_tx_complete_pending(queue)) {
1980 queue_writel(queue, IDR, MACB_BIT(TCOMP));
1981 macb_queue_isr_clear(bp, queue, MACB_BIT(TCOMP));
1982 netdev_vdbg(bp->dev, "TX poll: packets pending, reschedule\n");
1983 napi_schedule(napi);
1984 }
1985 }
1986
1987 return work_done;
1988 }
1989
macb_hresp_error_task(struct work_struct * work)1990 static void macb_hresp_error_task(struct work_struct *work)
1991 {
1992 struct macb *bp = from_work(bp, work, hresp_err_bh_work);
1993 struct net_device *dev = bp->dev;
1994 struct macb_queue *queue;
1995 unsigned int q;
1996 u32 ctrl;
1997
1998 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
1999 queue_writel(queue, IDR, bp->rx_intr_mask |
2000 MACB_TX_INT_FLAGS |
2001 MACB_BIT(HRESP));
2002 }
2003 ctrl = macb_readl(bp, NCR);
2004 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2005 macb_writel(bp, NCR, ctrl);
2006
2007 netif_tx_stop_all_queues(dev);
2008 netif_carrier_off(dev);
2009
2010 bp->macbgem_ops.mog_init_rings(bp);
2011
2012 /* Initialize TX and RX buffers */
2013 macb_init_buffers(bp);
2014
2015 /* Enable interrupts */
2016 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2017 queue_writel(queue, IER,
2018 bp->rx_intr_mask |
2019 MACB_TX_INT_FLAGS |
2020 MACB_BIT(HRESP));
2021
2022 ctrl |= MACB_BIT(RE) | MACB_BIT(TE);
2023 macb_writel(bp, NCR, ctrl);
2024
2025 netif_carrier_on(dev);
2026 netif_tx_start_all_queues(dev);
2027 }
2028
macb_wol_interrupt(struct macb_queue * queue,u32 status)2029 static void macb_wol_interrupt(struct macb_queue *queue, u32 status)
2030 {
2031 struct macb *bp = queue->bp;
2032
2033 queue_writel(queue, IDR, MACB_BIT(WOL));
2034 macb_writel(bp, WOL, 0);
2035 netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
2036 (unsigned int)(queue - bp->queues),
2037 (unsigned long)status);
2038 macb_queue_isr_clear(bp, queue, MACB_BIT(WOL));
2039 pm_wakeup_event(&bp->pdev->dev, 0);
2040 }
2041
gem_wol_interrupt(struct macb_queue * queue,u32 status)2042 static void gem_wol_interrupt(struct macb_queue *queue, u32 status)
2043 {
2044 struct macb *bp = queue->bp;
2045
2046 queue_writel(queue, IDR, GEM_BIT(WOL));
2047 gem_writel(bp, WOL, 0);
2048 netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
2049 (unsigned int)(queue - bp->queues),
2050 (unsigned long)status);
2051 macb_queue_isr_clear(bp, queue, GEM_BIT(WOL));
2052 pm_wakeup_event(&bp->pdev->dev, 0);
2053 }
2054
macb_interrupt_misc(struct macb_queue * queue,u32 status)2055 static int macb_interrupt_misc(struct macb_queue *queue, u32 status)
2056 {
2057 struct macb *bp = queue->bp;
2058 struct net_device *dev;
2059 u32 ctrl;
2060
2061 dev = bp->dev;
2062
2063 if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
2064 queue_writel(queue, IDR, MACB_TX_INT_FLAGS);
2065 schedule_work(&queue->tx_error_task);
2066 macb_queue_isr_clear(bp, queue, MACB_TX_ERR_FLAGS);
2067 return -1;
2068 }
2069
2070 /* Link change detection isn't possible with RMII, so we'll
2071 * add that if/when we get our hands on a full-blown MII PHY.
2072 */
2073
2074 /* There is a hardware issue under heavy load where DMA can
2075 * stop, this causes endless "used buffer descriptor read"
2076 * interrupts but it can be cleared by re-enabling RX. See
2077 * the at91rm9200 manual, section 41.3.1 or the Zynq manual
2078 * section 16.7.4 for details. RXUBR is only enabled for
2079 * these two versions.
2080 */
2081 if (status & MACB_BIT(RXUBR)) {
2082 ctrl = macb_readl(bp, NCR);
2083 macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
2084 wmb();
2085 macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
2086 macb_queue_isr_clear(bp, queue, MACB_BIT(RXUBR));
2087 }
2088
2089 if (status & MACB_BIT(ISR_ROVR)) {
2090 /* We missed at least one packet */
2091 spin_lock(&bp->stats_lock);
2092 if (macb_is_gem(bp))
2093 bp->hw_stats.gem.rx_overruns++;
2094 else
2095 bp->hw_stats.macb.rx_overruns++;
2096 spin_unlock(&bp->stats_lock);
2097 macb_queue_isr_clear(bp, queue, MACB_BIT(ISR_ROVR));
2098 }
2099
2100 if (status & MACB_BIT(HRESP)) {
2101 queue_work(system_bh_wq, &bp->hresp_err_bh_work);
2102 netdev_err(dev, "DMA bus error: HRESP not OK\n");
2103 macb_queue_isr_clear(bp, queue, MACB_BIT(HRESP));
2104 }
2105
2106 if (macb_is_gem(bp)) {
2107 if (status & GEM_BIT(WOL))
2108 gem_wol_interrupt(queue, status);
2109 } else {
2110 if (status & MACB_BIT(WOL))
2111 macb_wol_interrupt(queue, status);
2112 }
2113
2114 return 0;
2115 }
2116
macb_interrupt(int irq,void * dev_id)2117 static irqreturn_t macb_interrupt(int irq, void *dev_id)
2118 {
2119 struct macb_queue *queue = dev_id;
2120 struct macb *bp = queue->bp;
2121 struct net_device *dev = bp->dev;
2122 u32 status;
2123
2124 status = queue_readl(queue, ISR);
2125
2126 if (unlikely(!status))
2127 return IRQ_NONE;
2128
2129 spin_lock(&bp->lock);
2130
2131 while (status) {
2132 /* close possible race with dev_close */
2133 if (unlikely(!netif_running(dev))) {
2134 queue_writel(queue, IDR, -1);
2135 macb_queue_isr_clear(bp, queue, -1);
2136 break;
2137 }
2138
2139 netdev_vdbg(bp->dev, "queue = %u, isr = 0x%08lx\n",
2140 (unsigned int)(queue - bp->queues),
2141 (unsigned long)status);
2142
2143 if (status & bp->rx_intr_mask) {
2144 /* There's no point taking any more interrupts
2145 * until we have processed the buffers. The
2146 * scheduling call may fail if the poll routine
2147 * is already scheduled, so disable interrupts
2148 * now.
2149 */
2150 queue_writel(queue, IDR, bp->rx_intr_mask);
2151 macb_queue_isr_clear(bp, queue, MACB_BIT(RCOMP));
2152 napi_schedule_irqoff(&queue->napi_rx);
2153 }
2154
2155 if (status & (MACB_BIT(TCOMP) |
2156 MACB_BIT(TXUBR))) {
2157 queue_writel(queue, IDR, MACB_BIT(TCOMP));
2158 macb_queue_isr_clear(bp, queue, MACB_BIT(TCOMP) |
2159 MACB_BIT(TXUBR));
2160 if (status & MACB_BIT(TXUBR)) {
2161 queue->txubr_pending = true;
2162 wmb(); // ensure softirq can see update
2163 }
2164
2165 napi_schedule_irqoff(&queue->napi_tx);
2166 }
2167
2168 if (unlikely(status & MACB_INT_MISC_FLAGS))
2169 if (macb_interrupt_misc(queue, status))
2170 break;
2171
2172 status = queue_readl(queue, ISR);
2173 }
2174
2175 spin_unlock(&bp->lock);
2176
2177 return IRQ_HANDLED;
2178 }
2179
2180 #ifdef CONFIG_NET_POLL_CONTROLLER
2181 /* Polling receive - used by netconsole and other diagnostic tools
2182 * to allow network i/o with interrupts disabled.
2183 */
macb_poll_controller(struct net_device * dev)2184 static void macb_poll_controller(struct net_device *dev)
2185 {
2186 struct macb *bp = netdev_priv(dev);
2187 struct macb_queue *queue;
2188 unsigned long flags;
2189 unsigned int q;
2190
2191 local_irq_save(flags);
2192 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
2193 macb_interrupt(dev->irq, queue);
2194 local_irq_restore(flags);
2195 }
2196 #endif
2197
macb_tx_map(struct macb * bp,struct macb_queue * queue,struct sk_buff * skb,unsigned int hdrlen)2198 static unsigned int macb_tx_map(struct macb *bp,
2199 struct macb_queue *queue,
2200 struct sk_buff *skb,
2201 unsigned int hdrlen)
2202 {
2203 unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
2204 unsigned int len, i, tx_head = queue->tx_head;
2205 u32 ctrl, lso_ctrl = 0, seq_ctrl = 0;
2206 unsigned int eof = 1, mss_mfs = 0;
2207 struct macb_tx_skb *tx_skb = NULL;
2208 struct macb_dma_desc *desc;
2209 unsigned int offset, size;
2210 dma_addr_t mapping;
2211
2212 /* LSO */
2213 if (skb_shinfo(skb)->gso_size != 0) {
2214 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2215 /* UDP - UFO */
2216 lso_ctrl = MACB_LSO_UFO_ENABLE;
2217 else
2218 /* TCP - TSO */
2219 lso_ctrl = MACB_LSO_TSO_ENABLE;
2220 }
2221
2222 /* First, map non-paged data */
2223 len = skb_headlen(skb);
2224
2225 /* first buffer length */
2226 size = hdrlen;
2227
2228 offset = 0;
2229 while (len) {
2230 tx_skb = macb_tx_skb(queue, tx_head);
2231
2232 mapping = dma_map_single(&bp->pdev->dev,
2233 skb->data + offset,
2234 size, DMA_TO_DEVICE);
2235 if (dma_mapping_error(&bp->pdev->dev, mapping))
2236 goto dma_error;
2237
2238 /* Save info to properly release resources */
2239 tx_skb->skb = NULL;
2240 tx_skb->mapping = mapping;
2241 tx_skb->size = size;
2242 tx_skb->mapped_as_page = false;
2243
2244 len -= size;
2245 offset += size;
2246 tx_head++;
2247
2248 size = umin(len, bp->max_tx_length);
2249 }
2250
2251 /* Then, map paged data from fragments */
2252 for (f = 0; f < nr_frags; f++) {
2253 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2254
2255 len = skb_frag_size(frag);
2256 offset = 0;
2257 while (len) {
2258 size = umin(len, bp->max_tx_length);
2259 tx_skb = macb_tx_skb(queue, tx_head);
2260
2261 mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
2262 offset, size, DMA_TO_DEVICE);
2263 if (dma_mapping_error(&bp->pdev->dev, mapping))
2264 goto dma_error;
2265
2266 /* Save info to properly release resources */
2267 tx_skb->skb = NULL;
2268 tx_skb->mapping = mapping;
2269 tx_skb->size = size;
2270 tx_skb->mapped_as_page = true;
2271
2272 len -= size;
2273 offset += size;
2274 tx_head++;
2275 }
2276 }
2277
2278 /* Should never happen */
2279 if (unlikely(!tx_skb)) {
2280 netdev_err(bp->dev, "BUG! empty skb!\n");
2281 return 0;
2282 }
2283
2284 /* This is the last buffer of the frame: save socket buffer */
2285 tx_skb->skb = skb;
2286
2287 /* Update TX ring: update buffer descriptors in reverse order
2288 * to avoid race condition
2289 */
2290
2291 /* Set 'TX_USED' bit in buffer descriptor at tx_head position
2292 * to set the end of TX queue
2293 */
2294 i = tx_head;
2295 ctrl = MACB_BIT(TX_USED);
2296 desc = macb_tx_desc(queue, i);
2297 desc->ctrl = ctrl;
2298
2299 if (lso_ctrl) {
2300 if (lso_ctrl == MACB_LSO_UFO_ENABLE)
2301 /* include header and FCS in value given to h/w */
2302 mss_mfs = skb_shinfo(skb)->gso_size +
2303 skb_transport_offset(skb) +
2304 ETH_FCS_LEN;
2305 else /* TSO */ {
2306 mss_mfs = skb_shinfo(skb)->gso_size;
2307 /* TCP Sequence Number Source Select
2308 * can be set only for TSO
2309 */
2310 seq_ctrl = 0;
2311 }
2312 }
2313
2314 do {
2315 i--;
2316 tx_skb = macb_tx_skb(queue, i);
2317 desc = macb_tx_desc(queue, i);
2318
2319 ctrl = (u32)tx_skb->size;
2320 if (eof) {
2321 ctrl |= MACB_BIT(TX_LAST);
2322 eof = 0;
2323 }
2324 if (unlikely(macb_tx_ring_wrap(bp, i) == bp->tx_ring_size - 1))
2325 ctrl |= MACB_BIT(TX_WRAP);
2326
2327 /* First descriptor is header descriptor */
2328 if (i == queue->tx_head) {
2329 ctrl |= MACB_BF(TX_LSO, lso_ctrl);
2330 ctrl |= MACB_BF(TX_TCP_SEQ_SRC, seq_ctrl);
2331 if ((bp->dev->features & NETIF_F_HW_CSUM) &&
2332 skb->ip_summed != CHECKSUM_PARTIAL && !lso_ctrl &&
2333 !ptp_one_step_sync(skb))
2334 ctrl |= MACB_BIT(TX_NOCRC);
2335 } else
2336 /* Only set MSS/MFS on payload descriptors
2337 * (second or later descriptor)
2338 */
2339 ctrl |= MACB_BF(MSS_MFS, mss_mfs);
2340
2341 /* Set TX buffer descriptor */
2342 macb_set_addr(bp, desc, tx_skb->mapping);
2343 /* desc->addr must be visible to hardware before clearing
2344 * 'TX_USED' bit in desc->ctrl.
2345 */
2346 wmb();
2347 desc->ctrl = ctrl;
2348 } while (i != queue->tx_head);
2349
2350 queue->tx_head = tx_head;
2351
2352 return 0;
2353
2354 dma_error:
2355 netdev_err(bp->dev, "TX DMA map failed\n");
2356
2357 for (i = queue->tx_head; i != tx_head; i++) {
2358 tx_skb = macb_tx_skb(queue, i);
2359
2360 macb_tx_unmap(bp, tx_skb, 0);
2361 }
2362
2363 return -ENOMEM;
2364 }
2365
macb_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)2366 static netdev_features_t macb_features_check(struct sk_buff *skb,
2367 struct net_device *dev,
2368 netdev_features_t features)
2369 {
2370 unsigned int nr_frags, f;
2371 unsigned int hdrlen;
2372
2373 /* Validate LSO compatibility */
2374
2375 /* there is only one buffer or protocol is not UDP */
2376 if (!skb_is_nonlinear(skb) || (ip_hdr(skb)->protocol != IPPROTO_UDP))
2377 return features;
2378
2379 /* length of header */
2380 hdrlen = skb_transport_offset(skb);
2381
2382 /* For UFO only:
2383 * When software supplies two or more payload buffers all payload buffers
2384 * apart from the last must be a multiple of 8 bytes in size.
2385 */
2386 if (!IS_ALIGNED(skb_headlen(skb) - hdrlen, MACB_TX_LEN_ALIGN))
2387 return features & ~MACB_NETIF_LSO;
2388
2389 nr_frags = skb_shinfo(skb)->nr_frags;
2390 /* No need to check last fragment */
2391 nr_frags--;
2392 for (f = 0; f < nr_frags; f++) {
2393 const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
2394
2395 if (!IS_ALIGNED(skb_frag_size(frag), MACB_TX_LEN_ALIGN))
2396 return features & ~MACB_NETIF_LSO;
2397 }
2398 return features;
2399 }
2400
macb_clear_csum(struct sk_buff * skb)2401 static inline int macb_clear_csum(struct sk_buff *skb)
2402 {
2403 /* no change for packets without checksum offloading */
2404 if (skb->ip_summed != CHECKSUM_PARTIAL)
2405 return 0;
2406
2407 /* make sure we can modify the header */
2408 if (unlikely(skb_cow_head(skb, 0)))
2409 return -1;
2410
2411 /* initialize checksum field
2412 * This is required - at least for Zynq, which otherwise calculates
2413 * wrong UDP header checksums for UDP packets with UDP data len <=2
2414 */
2415 *(__sum16 *)(skb_checksum_start(skb) + skb->csum_offset) = 0;
2416 return 0;
2417 }
2418
macb_pad_and_fcs(struct sk_buff ** skb,struct net_device * ndev)2419 static int macb_pad_and_fcs(struct sk_buff **skb, struct net_device *ndev)
2420 {
2421 bool cloned = skb_cloned(*skb) || skb_header_cloned(*skb) ||
2422 skb_is_nonlinear(*skb);
2423 int padlen = ETH_ZLEN - (*skb)->len;
2424 int tailroom = skb_tailroom(*skb);
2425 struct sk_buff *nskb;
2426 u32 fcs;
2427
2428 if (!(ndev->features & NETIF_F_HW_CSUM) ||
2429 !((*skb)->ip_summed != CHECKSUM_PARTIAL) ||
2430 skb_shinfo(*skb)->gso_size || ptp_one_step_sync(*skb))
2431 return 0;
2432
2433 if (padlen <= 0) {
2434 /* FCS could be appeded to tailroom. */
2435 if (tailroom >= ETH_FCS_LEN)
2436 goto add_fcs;
2437 /* No room for FCS, need to reallocate skb. */
2438 else
2439 padlen = ETH_FCS_LEN;
2440 } else {
2441 /* Add room for FCS. */
2442 padlen += ETH_FCS_LEN;
2443 }
2444
2445 if (cloned || tailroom < padlen) {
2446 nskb = skb_copy_expand(*skb, 0, padlen, GFP_ATOMIC);
2447 if (!nskb)
2448 return -ENOMEM;
2449
2450 dev_consume_skb_any(*skb);
2451 *skb = nskb;
2452 }
2453
2454 if (padlen > ETH_FCS_LEN)
2455 skb_put_zero(*skb, padlen - ETH_FCS_LEN);
2456
2457 add_fcs:
2458 /* set FCS to packet */
2459 fcs = crc32_le(~0, (*skb)->data, (*skb)->len);
2460 fcs = ~fcs;
2461
2462 skb_put_u8(*skb, fcs & 0xff);
2463 skb_put_u8(*skb, (fcs >> 8) & 0xff);
2464 skb_put_u8(*skb, (fcs >> 16) & 0xff);
2465 skb_put_u8(*skb, (fcs >> 24) & 0xff);
2466
2467 return 0;
2468 }
2469
macb_start_xmit(struct sk_buff * skb,struct net_device * dev)2470 static netdev_tx_t macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
2471 {
2472 u16 queue_index = skb_get_queue_mapping(skb);
2473 struct macb *bp = netdev_priv(dev);
2474 struct macb_queue *queue = &bp->queues[queue_index];
2475 unsigned int desc_cnt, nr_frags, frag_size, f;
2476 unsigned int hdrlen;
2477 unsigned long flags;
2478 bool is_lso;
2479 netdev_tx_t ret = NETDEV_TX_OK;
2480
2481 if (macb_clear_csum(skb)) {
2482 dev_kfree_skb_any(skb);
2483 return ret;
2484 }
2485
2486 if (macb_pad_and_fcs(&skb, dev)) {
2487 dev_kfree_skb_any(skb);
2488 return ret;
2489 }
2490
2491 if (macb_dma_ptp(bp) &&
2492 (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP))
2493 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2494
2495 is_lso = (skb_shinfo(skb)->gso_size != 0);
2496
2497 if (is_lso) {
2498 /* length of headers */
2499 if (ip_hdr(skb)->protocol == IPPROTO_UDP)
2500 /* only queue eth + ip headers separately for UDP */
2501 hdrlen = skb_transport_offset(skb);
2502 else
2503 hdrlen = skb_tcp_all_headers(skb);
2504 if (skb_headlen(skb) < hdrlen) {
2505 netdev_err(bp->dev, "Error - LSO headers fragmented!!!\n");
2506 /* if this is required, would need to copy to single buffer */
2507 return NETDEV_TX_BUSY;
2508 }
2509 } else
2510 hdrlen = umin(skb_headlen(skb), bp->max_tx_length);
2511
2512 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
2513 netdev_vdbg(bp->dev,
2514 "start_xmit: queue %hu len %u head %p data %p tail %p end %p\n",
2515 queue_index, skb->len, skb->head, skb->data,
2516 skb_tail_pointer(skb), skb_end_pointer(skb));
2517 print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
2518 skb->data, 16, true);
2519 #endif
2520
2521 /* Count how many TX buffer descriptors are needed to send this
2522 * socket buffer: skb fragments of jumbo frames may need to be
2523 * split into many buffer descriptors.
2524 */
2525 if (is_lso && (skb_headlen(skb) > hdrlen))
2526 /* extra header descriptor if also payload in first buffer */
2527 desc_cnt = DIV_ROUND_UP((skb_headlen(skb) - hdrlen), bp->max_tx_length) + 1;
2528 else
2529 desc_cnt = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
2530 nr_frags = skb_shinfo(skb)->nr_frags;
2531 for (f = 0; f < nr_frags; f++) {
2532 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
2533 desc_cnt += DIV_ROUND_UP(frag_size, bp->max_tx_length);
2534 }
2535
2536 spin_lock_irqsave(&queue->tx_ptr_lock, flags);
2537
2538 /* This is a hard error, log it. */
2539 if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
2540 bp->tx_ring_size) < desc_cnt) {
2541 netif_stop_subqueue(dev, queue_index);
2542 netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
2543 queue->tx_head, queue->tx_tail);
2544 ret = NETDEV_TX_BUSY;
2545 goto unlock;
2546 }
2547
2548 /* Map socket buffer for DMA transfer */
2549 if (macb_tx_map(bp, queue, skb, hdrlen)) {
2550 dev_kfree_skb_any(skb);
2551 goto unlock;
2552 }
2553
2554 /* Make newly initialized descriptor visible to hardware */
2555 wmb();
2556 skb_tx_timestamp(skb);
2557 netdev_tx_sent_queue(netdev_get_tx_queue(bp->dev, queue_index),
2558 skb->len);
2559
2560 spin_lock(&bp->lock);
2561 macb_tx_lpi_wake(bp);
2562 macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
2563 spin_unlock(&bp->lock);
2564
2565 if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
2566 netif_stop_subqueue(dev, queue_index);
2567
2568 unlock:
2569 spin_unlock_irqrestore(&queue->tx_ptr_lock, flags);
2570
2571 return ret;
2572 }
2573
macb_init_rx_buffer_size(struct macb * bp,size_t size)2574 static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
2575 {
2576 if (!macb_is_gem(bp)) {
2577 bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
2578 } else {
2579 bp->rx_buffer_size = MIN(size, RX_BUFFER_MAX);
2580
2581 if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
2582 netdev_dbg(bp->dev,
2583 "RX buffer must be multiple of %d bytes, expanding\n",
2584 RX_BUFFER_MULTIPLE);
2585 bp->rx_buffer_size =
2586 roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
2587 }
2588 }
2589
2590 netdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%zu]\n",
2591 bp->dev->mtu, bp->rx_buffer_size);
2592 }
2593
gem_free_rx_buffers(struct macb * bp)2594 static void gem_free_rx_buffers(struct macb *bp)
2595 {
2596 struct sk_buff *skb;
2597 struct macb_dma_desc *desc;
2598 struct macb_queue *queue;
2599 dma_addr_t addr;
2600 unsigned int q;
2601 int i;
2602
2603 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2604 if (!queue->rx_skbuff)
2605 continue;
2606
2607 for (i = 0; i < bp->rx_ring_size; i++) {
2608 skb = queue->rx_skbuff[i];
2609
2610 if (!skb)
2611 continue;
2612
2613 desc = macb_rx_desc(queue, i);
2614 addr = macb_get_addr(bp, desc);
2615
2616 dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
2617 DMA_FROM_DEVICE);
2618 dev_kfree_skb_any(skb);
2619 skb = NULL;
2620 }
2621
2622 kfree(queue->rx_skbuff);
2623 queue->rx_skbuff = NULL;
2624 }
2625 }
2626
macb_free_rx_buffers(struct macb * bp)2627 static void macb_free_rx_buffers(struct macb *bp)
2628 {
2629 struct macb_queue *queue = &bp->queues[0];
2630
2631 if (queue->rx_buffers) {
2632 dma_free_coherent(&bp->pdev->dev,
2633 bp->rx_ring_size * bp->rx_buffer_size,
2634 queue->rx_buffers, queue->rx_buffers_dma);
2635 queue->rx_buffers = NULL;
2636 }
2637 }
2638
macb_tx_ring_size_per_queue(struct macb * bp)2639 static unsigned int macb_tx_ring_size_per_queue(struct macb *bp)
2640 {
2641 return macb_dma_desc_get_size(bp) * bp->tx_ring_size + bp->tx_bd_rd_prefetch;
2642 }
2643
macb_rx_ring_size_per_queue(struct macb * bp)2644 static unsigned int macb_rx_ring_size_per_queue(struct macb *bp)
2645 {
2646 return macb_dma_desc_get_size(bp) * bp->rx_ring_size + bp->rx_bd_rd_prefetch;
2647 }
2648
macb_free_consistent(struct macb * bp)2649 static void macb_free_consistent(struct macb *bp)
2650 {
2651 struct device *dev = &bp->pdev->dev;
2652 struct macb_queue *queue;
2653 unsigned int q;
2654 size_t size;
2655
2656 if (bp->rx_ring_tieoff) {
2657 dma_free_coherent(dev, macb_dma_desc_get_size(bp),
2658 bp->rx_ring_tieoff, bp->rx_ring_tieoff_dma);
2659 bp->rx_ring_tieoff = NULL;
2660 }
2661
2662 bp->macbgem_ops.mog_free_rx_buffers(bp);
2663
2664 size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
2665 dma_free_coherent(dev, size, bp->queues[0].tx_ring, bp->queues[0].tx_ring_dma);
2666
2667 size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
2668 dma_free_coherent(dev, size, bp->queues[0].rx_ring, bp->queues[0].rx_ring_dma);
2669
2670 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2671 kfree(queue->tx_skb);
2672 queue->tx_skb = NULL;
2673 queue->tx_ring = NULL;
2674 queue->rx_ring = NULL;
2675 }
2676 }
2677
gem_alloc_rx_buffers(struct macb * bp)2678 static int gem_alloc_rx_buffers(struct macb *bp)
2679 {
2680 struct macb_queue *queue;
2681 unsigned int q;
2682 int size;
2683
2684 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2685 size = bp->rx_ring_size * sizeof(struct sk_buff *);
2686 queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
2687 if (!queue->rx_skbuff)
2688 return -ENOMEM;
2689 else
2690 netdev_dbg(bp->dev,
2691 "Allocated %d RX struct sk_buff entries at %p\n",
2692 bp->rx_ring_size, queue->rx_skbuff);
2693 }
2694 return 0;
2695 }
2696
macb_alloc_rx_buffers(struct macb * bp)2697 static int macb_alloc_rx_buffers(struct macb *bp)
2698 {
2699 struct macb_queue *queue = &bp->queues[0];
2700 int size;
2701
2702 size = bp->rx_ring_size * bp->rx_buffer_size;
2703 queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
2704 &queue->rx_buffers_dma, GFP_KERNEL);
2705 if (!queue->rx_buffers)
2706 return -ENOMEM;
2707
2708 netdev_dbg(bp->dev,
2709 "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
2710 size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
2711 return 0;
2712 }
2713
macb_alloc_consistent(struct macb * bp)2714 static int macb_alloc_consistent(struct macb *bp)
2715 {
2716 struct device *dev = &bp->pdev->dev;
2717 dma_addr_t tx_dma, rx_dma;
2718 struct macb_queue *queue;
2719 unsigned int q;
2720 void *tx, *rx;
2721 size_t size;
2722
2723 /*
2724 * Upper 32-bits of Tx/Rx DMA descriptor for each queues much match!
2725 * We cannot enforce this guarantee, the best we can do is do a single
2726 * allocation and hope it will land into alloc_pages() that guarantees
2727 * natural alignment of physical addresses.
2728 */
2729
2730 size = bp->num_queues * macb_tx_ring_size_per_queue(bp);
2731 tx = dma_alloc_coherent(dev, size, &tx_dma, GFP_KERNEL);
2732 if (!tx || upper_32_bits(tx_dma) != upper_32_bits(tx_dma + size - 1))
2733 goto out_err;
2734 netdev_dbg(bp->dev, "Allocated %zu bytes for %u TX rings at %08lx (mapped %p)\n",
2735 size, bp->num_queues, (unsigned long)tx_dma, tx);
2736
2737 size = bp->num_queues * macb_rx_ring_size_per_queue(bp);
2738 rx = dma_alloc_coherent(dev, size, &rx_dma, GFP_KERNEL);
2739 if (!rx || upper_32_bits(rx_dma) != upper_32_bits(rx_dma + size - 1))
2740 goto out_err;
2741 netdev_dbg(bp->dev, "Allocated %zu bytes for %u RX rings at %08lx (mapped %p)\n",
2742 size, bp->num_queues, (unsigned long)rx_dma, rx);
2743
2744 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2745 queue->tx_ring = tx + macb_tx_ring_size_per_queue(bp) * q;
2746 queue->tx_ring_dma = tx_dma + macb_tx_ring_size_per_queue(bp) * q;
2747
2748 queue->rx_ring = rx + macb_rx_ring_size_per_queue(bp) * q;
2749 queue->rx_ring_dma = rx_dma + macb_rx_ring_size_per_queue(bp) * q;
2750
2751 size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
2752 queue->tx_skb = kmalloc(size, GFP_KERNEL);
2753 if (!queue->tx_skb)
2754 goto out_err;
2755 }
2756 if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
2757 goto out_err;
2758
2759 /* Required for tie off descriptor for PM cases */
2760 if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE)) {
2761 bp->rx_ring_tieoff = dma_alloc_coherent(&bp->pdev->dev,
2762 macb_dma_desc_get_size(bp),
2763 &bp->rx_ring_tieoff_dma,
2764 GFP_KERNEL);
2765 if (!bp->rx_ring_tieoff)
2766 goto out_err;
2767 }
2768
2769 return 0;
2770
2771 out_err:
2772 macb_free_consistent(bp);
2773 return -ENOMEM;
2774 }
2775
macb_init_tieoff(struct macb * bp)2776 static void macb_init_tieoff(struct macb *bp)
2777 {
2778 struct macb_dma_desc *desc = bp->rx_ring_tieoff;
2779
2780 if (bp->caps & MACB_CAPS_QUEUE_DISABLE)
2781 return;
2782 /* Setup a wrapping descriptor with no free slots
2783 * (WRAP and USED) to tie off/disable unused RX queues.
2784 */
2785 macb_set_addr(bp, desc, MACB_BIT(RX_WRAP) | MACB_BIT(RX_USED));
2786 desc->ctrl = 0;
2787 }
2788
gem_init_rx_ring(struct macb_queue * queue)2789 static void gem_init_rx_ring(struct macb_queue *queue)
2790 {
2791 queue->rx_tail = 0;
2792 queue->rx_prepared_head = 0;
2793
2794 gem_rx_refill(queue);
2795 }
2796
gem_init_rings(struct macb * bp)2797 static void gem_init_rings(struct macb *bp)
2798 {
2799 struct macb_queue *queue;
2800 struct macb_dma_desc *desc = NULL;
2801 unsigned int q;
2802 int i;
2803
2804 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2805 for (i = 0; i < bp->tx_ring_size; i++) {
2806 desc = macb_tx_desc(queue, i);
2807 macb_set_addr(bp, desc, 0);
2808 desc->ctrl = MACB_BIT(TX_USED);
2809 }
2810 desc->ctrl |= MACB_BIT(TX_WRAP);
2811 queue->tx_head = 0;
2812 queue->tx_tail = 0;
2813
2814 gem_init_rx_ring(queue);
2815 }
2816
2817 macb_init_tieoff(bp);
2818 }
2819
macb_init_rings(struct macb * bp)2820 static void macb_init_rings(struct macb *bp)
2821 {
2822 int i;
2823 struct macb_dma_desc *desc = NULL;
2824
2825 macb_init_rx_ring(&bp->queues[0]);
2826
2827 for (i = 0; i < bp->tx_ring_size; i++) {
2828 desc = macb_tx_desc(&bp->queues[0], i);
2829 macb_set_addr(bp, desc, 0);
2830 desc->ctrl = MACB_BIT(TX_USED);
2831 }
2832 bp->queues[0].tx_head = 0;
2833 bp->queues[0].tx_tail = 0;
2834 desc->ctrl |= MACB_BIT(TX_WRAP);
2835
2836 macb_init_tieoff(bp);
2837 }
2838
macb_reset_hw(struct macb * bp)2839 static void macb_reset_hw(struct macb *bp)
2840 {
2841 struct macb_queue *queue;
2842 unsigned int q;
2843 u32 ctrl = macb_readl(bp, NCR);
2844
2845 /* Disable RX and TX (XXX: Should we halt the transmission
2846 * more gracefully?)
2847 */
2848 ctrl &= ~(MACB_BIT(RE) | MACB_BIT(TE));
2849
2850 /* Clear the stats registers (XXX: Update stats first?) */
2851 ctrl |= MACB_BIT(CLRSTAT);
2852
2853 macb_writel(bp, NCR, ctrl);
2854
2855 /* Clear all status flags */
2856 macb_writel(bp, TSR, -1);
2857 macb_writel(bp, RSR, -1);
2858
2859 /* Disable RX partial store and forward and reset watermark value */
2860 gem_writel(bp, PBUFRXCUT, 0);
2861
2862 /* Disable all interrupts */
2863 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2864 queue_writel(queue, IDR, -1);
2865 queue_readl(queue, ISR);
2866 macb_queue_isr_clear(bp, queue, -1);
2867 }
2868 }
2869
gem_mdc_clk_div(struct macb * bp)2870 static u32 gem_mdc_clk_div(struct macb *bp)
2871 {
2872 u32 config;
2873 unsigned long pclk_hz = clk_get_rate(bp->pclk);
2874
2875 if (pclk_hz <= 20000000)
2876 config = GEM_BF(CLK, GEM_CLK_DIV8);
2877 else if (pclk_hz <= 40000000)
2878 config = GEM_BF(CLK, GEM_CLK_DIV16);
2879 else if (pclk_hz <= 80000000)
2880 config = GEM_BF(CLK, GEM_CLK_DIV32);
2881 else if (pclk_hz <= 120000000)
2882 config = GEM_BF(CLK, GEM_CLK_DIV48);
2883 else if (pclk_hz <= 160000000)
2884 config = GEM_BF(CLK, GEM_CLK_DIV64);
2885 else if (pclk_hz <= 240000000)
2886 config = GEM_BF(CLK, GEM_CLK_DIV96);
2887 else if (pclk_hz <= 320000000)
2888 config = GEM_BF(CLK, GEM_CLK_DIV128);
2889 else
2890 config = GEM_BF(CLK, GEM_CLK_DIV224);
2891
2892 return config;
2893 }
2894
macb_mdc_clk_div(struct macb * bp)2895 static u32 macb_mdc_clk_div(struct macb *bp)
2896 {
2897 u32 config;
2898 unsigned long pclk_hz;
2899
2900 if (macb_is_gem(bp))
2901 return gem_mdc_clk_div(bp);
2902
2903 pclk_hz = clk_get_rate(bp->pclk);
2904 if (pclk_hz <= 20000000)
2905 config = MACB_BF(CLK, MACB_CLK_DIV8);
2906 else if (pclk_hz <= 40000000)
2907 config = MACB_BF(CLK, MACB_CLK_DIV16);
2908 else if (pclk_hz <= 80000000)
2909 config = MACB_BF(CLK, MACB_CLK_DIV32);
2910 else
2911 config = MACB_BF(CLK, MACB_CLK_DIV64);
2912
2913 return config;
2914 }
2915
2916 /* Get the DMA bus width field of the network configuration register that we
2917 * should program. We find the width from decoding the design configuration
2918 * register to find the maximum supported data bus width.
2919 */
macb_dbw(struct macb * bp)2920 static u32 macb_dbw(struct macb *bp)
2921 {
2922 if (!macb_is_gem(bp))
2923 return 0;
2924
2925 switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
2926 case 4:
2927 return GEM_BF(DBW, GEM_DBW128);
2928 case 2:
2929 return GEM_BF(DBW, GEM_DBW64);
2930 case 1:
2931 default:
2932 return GEM_BF(DBW, GEM_DBW32);
2933 }
2934 }
2935
2936 /* Configure the receive DMA engine
2937 * - use the correct receive buffer size
2938 * - set best burst length for DMA operations
2939 * (if not supported by FIFO, it will fallback to default)
2940 * - set both rx/tx packet buffers to full memory size
2941 * These are configurable parameters for GEM.
2942 */
macb_configure_dma(struct macb * bp)2943 static void macb_configure_dma(struct macb *bp)
2944 {
2945 struct macb_queue *queue;
2946 u32 buffer_size;
2947 unsigned int q;
2948 u32 dmacfg;
2949
2950 buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
2951 if (macb_is_gem(bp)) {
2952 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
2953 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
2954 if (q)
2955 queue_writel(queue, RBQS, buffer_size);
2956 else
2957 dmacfg |= GEM_BF(RXBS, buffer_size);
2958 }
2959 if (bp->dma_burst_length)
2960 dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
2961 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
2962 dmacfg &= ~GEM_BIT(ENDIA_PKT);
2963
2964 if (bp->native_io)
2965 dmacfg &= ~GEM_BIT(ENDIA_DESC);
2966 else
2967 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
2968
2969 if (bp->dev->features & NETIF_F_HW_CSUM)
2970 dmacfg |= GEM_BIT(TXCOEN);
2971 else
2972 dmacfg &= ~GEM_BIT(TXCOEN);
2973
2974 dmacfg &= ~GEM_BIT(ADDR64);
2975 if (macb_dma64(bp))
2976 dmacfg |= GEM_BIT(ADDR64);
2977 if (macb_dma_ptp(bp))
2978 dmacfg |= GEM_BIT(RXEXT) | GEM_BIT(TXEXT);
2979 netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
2980 dmacfg);
2981 gem_writel(bp, DMACFG, dmacfg);
2982 }
2983 }
2984
macb_init_hw(struct macb * bp)2985 static void macb_init_hw(struct macb *bp)
2986 {
2987 u32 config;
2988
2989 macb_reset_hw(bp);
2990 macb_set_hwaddr(bp);
2991
2992 config = macb_mdc_clk_div(bp);
2993 /* Make eth data aligned.
2994 * If RSC capable, that offset is ignored by HW.
2995 */
2996 if (!(bp->caps & MACB_CAPS_RSC))
2997 config |= MACB_BF(RBOF, NET_IP_ALIGN);
2998 config |= MACB_BIT(DRFCS); /* Discard Rx FCS */
2999 if (bp->caps & MACB_CAPS_JUMBO)
3000 config |= MACB_BIT(JFRAME); /* Enable jumbo frames */
3001 else
3002 config |= MACB_BIT(BIG); /* Receive oversized frames */
3003 if (bp->dev->flags & IFF_PROMISC)
3004 config |= MACB_BIT(CAF); /* Copy All Frames */
3005 else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
3006 config |= GEM_BIT(RXCOEN);
3007 if (!(bp->dev->flags & IFF_BROADCAST))
3008 config |= MACB_BIT(NBC); /* No BroadCast */
3009 config |= macb_dbw(bp);
3010 macb_writel(bp, NCFGR, config);
3011 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
3012 gem_writel(bp, JML, bp->jumbo_max_len);
3013 bp->rx_frm_len_mask = MACB_RX_FRMLEN_MASK;
3014 if (bp->caps & MACB_CAPS_JUMBO)
3015 bp->rx_frm_len_mask = MACB_RX_JFRMLEN_MASK;
3016
3017 macb_configure_dma(bp);
3018
3019 /* Enable RX partial store and forward and set watermark */
3020 if (bp->rx_watermark)
3021 gem_writel(bp, PBUFRXCUT, (bp->rx_watermark | GEM_BIT(ENCUTTHRU)));
3022 }
3023
3024 /* The hash address register is 64 bits long and takes up two
3025 * locations in the memory map. The least significant bits are stored
3026 * in EMAC_HSL and the most significant bits in EMAC_HSH.
3027 *
3028 * The unicast hash enable and the multicast hash enable bits in the
3029 * network configuration register enable the reception of hash matched
3030 * frames. The destination address is reduced to a 6 bit index into
3031 * the 64 bit hash register using the following hash function. The
3032 * hash function is an exclusive or of every sixth bit of the
3033 * destination address.
3034 *
3035 * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47]
3036 * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46]
3037 * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45]
3038 * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44]
3039 * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43]
3040 * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42]
3041 *
3042 * da[0] represents the least significant bit of the first byte
3043 * received, that is, the multicast/unicast indicator, and da[47]
3044 * represents the most significant bit of the last byte received. If
3045 * the hash index, hi[n], points to a bit that is set in the hash
3046 * register then the frame will be matched according to whether the
3047 * frame is multicast or unicast. A multicast match will be signalled
3048 * if the multicast hash enable bit is set, da[0] is 1 and the hash
3049 * index points to a bit set in the hash register. A unicast match
3050 * will be signalled if the unicast hash enable bit is set, da[0] is 0
3051 * and the hash index points to a bit set in the hash register. To
3052 * receive all multicast frames, the hash register should be set with
3053 * all ones and the multicast hash enable bit should be set in the
3054 * network configuration register.
3055 */
3056
hash_bit_value(int bitnr,__u8 * addr)3057 static inline int hash_bit_value(int bitnr, __u8 *addr)
3058 {
3059 if (addr[bitnr / 8] & (1 << (bitnr % 8)))
3060 return 1;
3061 return 0;
3062 }
3063
3064 /* Return the hash index value for the specified address. */
hash_get_index(__u8 * addr)3065 static int hash_get_index(__u8 *addr)
3066 {
3067 int i, j, bitval;
3068 int hash_index = 0;
3069
3070 for (j = 0; j < 6; j++) {
3071 for (i = 0, bitval = 0; i < 8; i++)
3072 bitval ^= hash_bit_value(i * 6 + j, addr);
3073
3074 hash_index |= (bitval << j);
3075 }
3076
3077 return hash_index;
3078 }
3079
3080 /* Add multicast addresses to the internal multicast-hash table. */
macb_sethashtable(struct net_device * dev)3081 static void macb_sethashtable(struct net_device *dev)
3082 {
3083 struct netdev_hw_addr *ha;
3084 unsigned long mc_filter[2];
3085 unsigned int bitnr;
3086 struct macb *bp = netdev_priv(dev);
3087
3088 mc_filter[0] = 0;
3089 mc_filter[1] = 0;
3090
3091 netdev_for_each_mc_addr(ha, dev) {
3092 bitnr = hash_get_index(ha->addr);
3093 mc_filter[bitnr >> 5] |= 1 << (bitnr & 31);
3094 }
3095
3096 macb_or_gem_writel(bp, HRB, mc_filter[0]);
3097 macb_or_gem_writel(bp, HRT, mc_filter[1]);
3098 }
3099
3100 /* Enable/Disable promiscuous and multicast modes. */
macb_set_rx_mode(struct net_device * dev)3101 static void macb_set_rx_mode(struct net_device *dev)
3102 {
3103 unsigned long cfg;
3104 struct macb *bp = netdev_priv(dev);
3105
3106 cfg = macb_readl(bp, NCFGR);
3107
3108 if (dev->flags & IFF_PROMISC) {
3109 /* Enable promiscuous mode */
3110 cfg |= MACB_BIT(CAF);
3111
3112 /* Disable RX checksum offload */
3113 if (macb_is_gem(bp))
3114 cfg &= ~GEM_BIT(RXCOEN);
3115 } else {
3116 /* Disable promiscuous mode */
3117 cfg &= ~MACB_BIT(CAF);
3118
3119 /* Enable RX checksum offload only if requested */
3120 if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
3121 cfg |= GEM_BIT(RXCOEN);
3122 }
3123
3124 if (dev->flags & IFF_ALLMULTI) {
3125 /* Enable all multicast mode */
3126 macb_or_gem_writel(bp, HRB, -1);
3127 macb_or_gem_writel(bp, HRT, -1);
3128 cfg |= MACB_BIT(NCFGR_MTI);
3129 } else if (!netdev_mc_empty(dev)) {
3130 /* Enable specific multicasts */
3131 macb_sethashtable(dev);
3132 cfg |= MACB_BIT(NCFGR_MTI);
3133 } else if (dev->flags & (~IFF_ALLMULTI)) {
3134 /* Disable all multicast mode */
3135 macb_or_gem_writel(bp, HRB, 0);
3136 macb_or_gem_writel(bp, HRT, 0);
3137 cfg &= ~MACB_BIT(NCFGR_MTI);
3138 }
3139
3140 macb_writel(bp, NCFGR, cfg);
3141 }
3142
macb_open(struct net_device * dev)3143 static int macb_open(struct net_device *dev)
3144 {
3145 size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
3146 struct macb *bp = netdev_priv(dev);
3147 struct macb_queue *queue;
3148 unsigned int q;
3149 int err;
3150
3151 netdev_dbg(bp->dev, "open\n");
3152
3153 err = pm_runtime_resume_and_get(&bp->pdev->dev);
3154 if (err < 0)
3155 return err;
3156
3157 /* RX buffers initialization */
3158 macb_init_rx_buffer_size(bp, bufsz);
3159
3160 err = macb_alloc_consistent(bp);
3161 if (err) {
3162 netdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
3163 err);
3164 goto pm_exit;
3165 }
3166
3167 bp->macbgem_ops.mog_init_rings(bp);
3168 macb_init_buffers(bp);
3169
3170 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3171 napi_enable(&queue->napi_rx);
3172 napi_enable(&queue->napi_tx);
3173 }
3174
3175 macb_init_hw(bp);
3176
3177 err = phy_set_mode_ext(bp->phy, PHY_MODE_ETHERNET, bp->phy_interface);
3178 if (err)
3179 goto reset_hw;
3180
3181 err = phy_power_on(bp->phy);
3182 if (err)
3183 goto reset_hw;
3184
3185 err = macb_phylink_connect(bp);
3186 if (err)
3187 goto phy_off;
3188
3189 netif_tx_start_all_queues(dev);
3190
3191 if (bp->ptp_info)
3192 bp->ptp_info->ptp_init(dev);
3193
3194 return 0;
3195
3196 phy_off:
3197 phy_power_off(bp->phy);
3198
3199 reset_hw:
3200 macb_reset_hw(bp);
3201 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3202 napi_disable(&queue->napi_rx);
3203 napi_disable(&queue->napi_tx);
3204 }
3205 macb_free_consistent(bp);
3206 pm_exit:
3207 pm_runtime_put_sync(&bp->pdev->dev);
3208 return err;
3209 }
3210
macb_close(struct net_device * dev)3211 static int macb_close(struct net_device *dev)
3212 {
3213 struct macb *bp = netdev_priv(dev);
3214 struct macb_queue *queue;
3215 unsigned long flags;
3216 unsigned int q;
3217
3218 netif_tx_stop_all_queues(dev);
3219
3220 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3221 napi_disable(&queue->napi_rx);
3222 napi_disable(&queue->napi_tx);
3223 netdev_tx_reset_queue(netdev_get_tx_queue(dev, q));
3224 }
3225
3226 cancel_delayed_work_sync(&bp->tx_lpi_work);
3227
3228 phylink_stop(bp->phylink);
3229 phylink_disconnect_phy(bp->phylink);
3230
3231 phy_power_off(bp->phy);
3232
3233 spin_lock_irqsave(&bp->lock, flags);
3234 macb_reset_hw(bp);
3235 netif_carrier_off(dev);
3236 spin_unlock_irqrestore(&bp->lock, flags);
3237
3238 macb_free_consistent(bp);
3239
3240 if (bp->ptp_info)
3241 bp->ptp_info->ptp_remove(dev);
3242
3243 pm_runtime_put(&bp->pdev->dev);
3244
3245 return 0;
3246 }
3247
macb_change_mtu(struct net_device * dev,int new_mtu)3248 static int macb_change_mtu(struct net_device *dev, int new_mtu)
3249 {
3250 if (netif_running(dev))
3251 return -EBUSY;
3252
3253 WRITE_ONCE(dev->mtu, new_mtu);
3254
3255 return 0;
3256 }
3257
macb_set_mac_addr(struct net_device * dev,void * addr)3258 static int macb_set_mac_addr(struct net_device *dev, void *addr)
3259 {
3260 int err;
3261
3262 err = eth_mac_addr(dev, addr);
3263 if (err < 0)
3264 return err;
3265
3266 macb_set_hwaddr(netdev_priv(dev));
3267 return 0;
3268 }
3269
gem_update_stats(struct macb * bp)3270 static void gem_update_stats(struct macb *bp)
3271 {
3272 struct macb_queue *queue;
3273 unsigned int i, q, idx;
3274 unsigned long *stat;
3275
3276 u64 *p = &bp->hw_stats.gem.tx_octets;
3277
3278 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
3279 u32 offset = gem_statistics[i].offset;
3280 u64 val = bp->macb_reg_readl(bp, offset);
3281
3282 bp->ethtool_stats[i] += val;
3283 *p += val;
3284
3285 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
3286 /* Add GEM_OCTTXH, GEM_OCTRXH */
3287 val = bp->macb_reg_readl(bp, offset + 4);
3288 bp->ethtool_stats[i] += ((u64)val) << 32;
3289 *p += ((u64)val) << 32;
3290 }
3291 }
3292
3293 idx = GEM_STATS_LEN;
3294 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
3295 for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
3296 bp->ethtool_stats[idx++] = *stat;
3297 }
3298
gem_get_stats(struct macb * bp,struct rtnl_link_stats64 * nstat)3299 static void gem_get_stats(struct macb *bp, struct rtnl_link_stats64 *nstat)
3300 {
3301 struct gem_stats *hwstat = &bp->hw_stats.gem;
3302
3303 spin_lock_irq(&bp->stats_lock);
3304 if (netif_running(bp->dev))
3305 gem_update_stats(bp);
3306
3307 nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
3308 hwstat->rx_alignment_errors +
3309 hwstat->rx_resource_errors +
3310 hwstat->rx_overruns +
3311 hwstat->rx_oversize_frames +
3312 hwstat->rx_jabbers +
3313 hwstat->rx_undersized_frames +
3314 hwstat->rx_length_field_frame_errors);
3315 nstat->tx_errors = (hwstat->tx_late_collisions +
3316 hwstat->tx_excessive_collisions +
3317 hwstat->tx_underrun +
3318 hwstat->tx_carrier_sense_errors);
3319 nstat->multicast = hwstat->rx_multicast_frames;
3320 nstat->collisions = (hwstat->tx_single_collision_frames +
3321 hwstat->tx_multiple_collision_frames +
3322 hwstat->tx_excessive_collisions);
3323 nstat->rx_length_errors = (hwstat->rx_oversize_frames +
3324 hwstat->rx_jabbers +
3325 hwstat->rx_undersized_frames +
3326 hwstat->rx_length_field_frame_errors);
3327 nstat->rx_over_errors = hwstat->rx_resource_errors;
3328 nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
3329 nstat->rx_frame_errors = hwstat->rx_alignment_errors;
3330 nstat->rx_fifo_errors = hwstat->rx_overruns;
3331 nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
3332 nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
3333 nstat->tx_fifo_errors = hwstat->tx_underrun;
3334 spin_unlock_irq(&bp->stats_lock);
3335 }
3336
gem_get_ethtool_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)3337 static void gem_get_ethtool_stats(struct net_device *dev,
3338 struct ethtool_stats *stats, u64 *data)
3339 {
3340 struct macb *bp = netdev_priv(dev);
3341
3342 spin_lock_irq(&bp->stats_lock);
3343 gem_update_stats(bp);
3344 memcpy(data, &bp->ethtool_stats, sizeof(u64)
3345 * (GEM_STATS_LEN + QUEUE_STATS_LEN * bp->num_queues));
3346 spin_unlock_irq(&bp->stats_lock);
3347 }
3348
gem_get_sset_count(struct net_device * dev,int sset)3349 static int gem_get_sset_count(struct net_device *dev, int sset)
3350 {
3351 struct macb *bp = netdev_priv(dev);
3352
3353 switch (sset) {
3354 case ETH_SS_STATS:
3355 return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
3356 default:
3357 return -EOPNOTSUPP;
3358 }
3359 }
3360
gem_get_ethtool_strings(struct net_device * dev,u32 sset,u8 * p)3361 static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
3362 {
3363 struct macb *bp = netdev_priv(dev);
3364 struct macb_queue *queue;
3365 unsigned int i;
3366 unsigned int q;
3367
3368 switch (sset) {
3369 case ETH_SS_STATS:
3370 for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
3371 memcpy(p, gem_statistics[i].stat_string,
3372 ETH_GSTRING_LEN);
3373
3374 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
3375 for (i = 0; i < QUEUE_STATS_LEN; i++)
3376 ethtool_sprintf(&p, "q%u_%s", q, queue_statistics[i].stat_string);
3377 }
3378 break;
3379 }
3380 }
3381
macb_get_stats(struct net_device * dev,struct rtnl_link_stats64 * nstat)3382 static void macb_get_stats(struct net_device *dev,
3383 struct rtnl_link_stats64 *nstat)
3384 {
3385 struct macb *bp = netdev_priv(dev);
3386 struct macb_stats *hwstat = &bp->hw_stats.macb;
3387
3388 netdev_stats_to_stats64(nstat, &bp->dev->stats);
3389 if (macb_is_gem(bp)) {
3390 gem_get_stats(bp, nstat);
3391 return;
3392 }
3393
3394 /* read stats from hardware */
3395 spin_lock_irq(&bp->stats_lock);
3396 macb_update_stats(bp);
3397
3398 /* Convert HW stats into netdevice stats */
3399 nstat->rx_errors = (hwstat->rx_fcs_errors +
3400 hwstat->rx_align_errors +
3401 hwstat->rx_resource_errors +
3402 hwstat->rx_overruns +
3403 hwstat->rx_oversize_pkts +
3404 hwstat->rx_jabbers +
3405 hwstat->rx_undersize_pkts +
3406 hwstat->rx_length_mismatch);
3407 nstat->tx_errors = (hwstat->tx_late_cols +
3408 hwstat->tx_excessive_cols +
3409 hwstat->tx_underruns +
3410 hwstat->tx_carrier_errors +
3411 hwstat->sqe_test_errors);
3412 nstat->collisions = (hwstat->tx_single_cols +
3413 hwstat->tx_multiple_cols +
3414 hwstat->tx_excessive_cols);
3415 nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
3416 hwstat->rx_jabbers +
3417 hwstat->rx_undersize_pkts +
3418 hwstat->rx_length_mismatch);
3419 nstat->rx_over_errors = hwstat->rx_resource_errors +
3420 hwstat->rx_overruns;
3421 nstat->rx_crc_errors = hwstat->rx_fcs_errors;
3422 nstat->rx_frame_errors = hwstat->rx_align_errors;
3423 nstat->rx_fifo_errors = hwstat->rx_overruns;
3424 /* XXX: What does "missed" mean? */
3425 nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
3426 nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
3427 nstat->tx_fifo_errors = hwstat->tx_underruns;
3428 /* Don't know about heartbeat or window errors... */
3429 spin_unlock_irq(&bp->stats_lock);
3430 }
3431
macb_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)3432 static void macb_get_pause_stats(struct net_device *dev,
3433 struct ethtool_pause_stats *pause_stats)
3434 {
3435 struct macb *bp = netdev_priv(dev);
3436 struct macb_stats *hwstat = &bp->hw_stats.macb;
3437
3438 spin_lock_irq(&bp->stats_lock);
3439 macb_update_stats(bp);
3440 pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
3441 pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
3442 spin_unlock_irq(&bp->stats_lock);
3443 }
3444
gem_get_pause_stats(struct net_device * dev,struct ethtool_pause_stats * pause_stats)3445 static void gem_get_pause_stats(struct net_device *dev,
3446 struct ethtool_pause_stats *pause_stats)
3447 {
3448 struct macb *bp = netdev_priv(dev);
3449 struct gem_stats *hwstat = &bp->hw_stats.gem;
3450
3451 spin_lock_irq(&bp->stats_lock);
3452 gem_update_stats(bp);
3453 pause_stats->tx_pause_frames = hwstat->tx_pause_frames;
3454 pause_stats->rx_pause_frames = hwstat->rx_pause_frames;
3455 spin_unlock_irq(&bp->stats_lock);
3456 }
3457
macb_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)3458 static void macb_get_eth_mac_stats(struct net_device *dev,
3459 struct ethtool_eth_mac_stats *mac_stats)
3460 {
3461 struct macb *bp = netdev_priv(dev);
3462 struct macb_stats *hwstat = &bp->hw_stats.macb;
3463
3464 spin_lock_irq(&bp->stats_lock);
3465 macb_update_stats(bp);
3466 mac_stats->FramesTransmittedOK = hwstat->tx_ok;
3467 mac_stats->SingleCollisionFrames = hwstat->tx_single_cols;
3468 mac_stats->MultipleCollisionFrames = hwstat->tx_multiple_cols;
3469 mac_stats->FramesReceivedOK = hwstat->rx_ok;
3470 mac_stats->FrameCheckSequenceErrors = hwstat->rx_fcs_errors;
3471 mac_stats->AlignmentErrors = hwstat->rx_align_errors;
3472 mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred;
3473 mac_stats->LateCollisions = hwstat->tx_late_cols;
3474 mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_cols;
3475 mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underruns;
3476 mac_stats->CarrierSenseErrors = hwstat->tx_carrier_errors;
3477 mac_stats->FramesLostDueToIntMACRcvError = hwstat->rx_overruns;
3478 mac_stats->InRangeLengthErrors = hwstat->rx_length_mismatch;
3479 mac_stats->FrameTooLongErrors = hwstat->rx_oversize_pkts;
3480 spin_unlock_irq(&bp->stats_lock);
3481 }
3482
gem_get_eth_mac_stats(struct net_device * dev,struct ethtool_eth_mac_stats * mac_stats)3483 static void gem_get_eth_mac_stats(struct net_device *dev,
3484 struct ethtool_eth_mac_stats *mac_stats)
3485 {
3486 struct macb *bp = netdev_priv(dev);
3487 struct gem_stats *hwstat = &bp->hw_stats.gem;
3488
3489 spin_lock_irq(&bp->stats_lock);
3490 gem_update_stats(bp);
3491 mac_stats->FramesTransmittedOK = hwstat->tx_frames;
3492 mac_stats->SingleCollisionFrames = hwstat->tx_single_collision_frames;
3493 mac_stats->MultipleCollisionFrames =
3494 hwstat->tx_multiple_collision_frames;
3495 mac_stats->FramesReceivedOK = hwstat->rx_frames;
3496 mac_stats->FrameCheckSequenceErrors =
3497 hwstat->rx_frame_check_sequence_errors;
3498 mac_stats->AlignmentErrors = hwstat->rx_alignment_errors;
3499 mac_stats->OctetsTransmittedOK = hwstat->tx_octets;
3500 mac_stats->FramesWithDeferredXmissions = hwstat->tx_deferred_frames;
3501 mac_stats->LateCollisions = hwstat->tx_late_collisions;
3502 mac_stats->FramesAbortedDueToXSColls = hwstat->tx_excessive_collisions;
3503 mac_stats->FramesLostDueToIntMACXmitError = hwstat->tx_underrun;
3504 mac_stats->CarrierSenseErrors = hwstat->tx_carrier_sense_errors;
3505 mac_stats->OctetsReceivedOK = hwstat->rx_octets;
3506 mac_stats->MulticastFramesXmittedOK = hwstat->tx_multicast_frames;
3507 mac_stats->BroadcastFramesXmittedOK = hwstat->tx_broadcast_frames;
3508 mac_stats->MulticastFramesReceivedOK = hwstat->rx_multicast_frames;
3509 mac_stats->BroadcastFramesReceivedOK = hwstat->rx_broadcast_frames;
3510 mac_stats->InRangeLengthErrors = hwstat->rx_length_field_frame_errors;
3511 mac_stats->FrameTooLongErrors = hwstat->rx_oversize_frames;
3512 spin_unlock_irq(&bp->stats_lock);
3513 }
3514
3515 /* TODO: Report SQE test errors when added to phy_stats */
macb_get_eth_phy_stats(struct net_device * dev,struct ethtool_eth_phy_stats * phy_stats)3516 static void macb_get_eth_phy_stats(struct net_device *dev,
3517 struct ethtool_eth_phy_stats *phy_stats)
3518 {
3519 struct macb *bp = netdev_priv(dev);
3520 struct macb_stats *hwstat = &bp->hw_stats.macb;
3521
3522 spin_lock_irq(&bp->stats_lock);
3523 macb_update_stats(bp);
3524 phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
3525 spin_unlock_irq(&bp->stats_lock);
3526 }
3527
gem_get_eth_phy_stats(struct net_device * dev,struct ethtool_eth_phy_stats * phy_stats)3528 static void gem_get_eth_phy_stats(struct net_device *dev,
3529 struct ethtool_eth_phy_stats *phy_stats)
3530 {
3531 struct macb *bp = netdev_priv(dev);
3532 struct gem_stats *hwstat = &bp->hw_stats.gem;
3533
3534 spin_lock_irq(&bp->stats_lock);
3535 gem_update_stats(bp);
3536 phy_stats->SymbolErrorDuringCarrier = hwstat->rx_symbol_errors;
3537 spin_unlock_irq(&bp->stats_lock);
3538 }
3539
macb_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)3540 static void macb_get_rmon_stats(struct net_device *dev,
3541 struct ethtool_rmon_stats *rmon_stats,
3542 const struct ethtool_rmon_hist_range **ranges)
3543 {
3544 struct macb *bp = netdev_priv(dev);
3545 struct macb_stats *hwstat = &bp->hw_stats.macb;
3546
3547 spin_lock_irq(&bp->stats_lock);
3548 macb_update_stats(bp);
3549 rmon_stats->undersize_pkts = hwstat->rx_undersize_pkts;
3550 rmon_stats->oversize_pkts = hwstat->rx_oversize_pkts;
3551 rmon_stats->jabbers = hwstat->rx_jabbers;
3552 spin_unlock_irq(&bp->stats_lock);
3553 }
3554
3555 static const struct ethtool_rmon_hist_range gem_rmon_ranges[] = {
3556 { 64, 64 },
3557 { 65, 127 },
3558 { 128, 255 },
3559 { 256, 511 },
3560 { 512, 1023 },
3561 { 1024, 1518 },
3562 { 1519, 16384 },
3563 { },
3564 };
3565
gem_get_rmon_stats(struct net_device * dev,struct ethtool_rmon_stats * rmon_stats,const struct ethtool_rmon_hist_range ** ranges)3566 static void gem_get_rmon_stats(struct net_device *dev,
3567 struct ethtool_rmon_stats *rmon_stats,
3568 const struct ethtool_rmon_hist_range **ranges)
3569 {
3570 struct macb *bp = netdev_priv(dev);
3571 struct gem_stats *hwstat = &bp->hw_stats.gem;
3572
3573 spin_lock_irq(&bp->stats_lock);
3574 gem_update_stats(bp);
3575 rmon_stats->undersize_pkts = hwstat->rx_undersized_frames;
3576 rmon_stats->oversize_pkts = hwstat->rx_oversize_frames;
3577 rmon_stats->jabbers = hwstat->rx_jabbers;
3578 rmon_stats->hist[0] = hwstat->rx_64_byte_frames;
3579 rmon_stats->hist[1] = hwstat->rx_65_127_byte_frames;
3580 rmon_stats->hist[2] = hwstat->rx_128_255_byte_frames;
3581 rmon_stats->hist[3] = hwstat->rx_256_511_byte_frames;
3582 rmon_stats->hist[4] = hwstat->rx_512_1023_byte_frames;
3583 rmon_stats->hist[5] = hwstat->rx_1024_1518_byte_frames;
3584 rmon_stats->hist[6] = hwstat->rx_greater_than_1518_byte_frames;
3585 rmon_stats->hist_tx[0] = hwstat->tx_64_byte_frames;
3586 rmon_stats->hist_tx[1] = hwstat->tx_65_127_byte_frames;
3587 rmon_stats->hist_tx[2] = hwstat->tx_128_255_byte_frames;
3588 rmon_stats->hist_tx[3] = hwstat->tx_256_511_byte_frames;
3589 rmon_stats->hist_tx[4] = hwstat->tx_512_1023_byte_frames;
3590 rmon_stats->hist_tx[5] = hwstat->tx_1024_1518_byte_frames;
3591 rmon_stats->hist_tx[6] = hwstat->tx_greater_than_1518_byte_frames;
3592 spin_unlock_irq(&bp->stats_lock);
3593 *ranges = gem_rmon_ranges;
3594 }
3595
macb_get_regs_len(struct net_device * netdev)3596 static int macb_get_regs_len(struct net_device *netdev)
3597 {
3598 return MACB_GREGS_NBR * sizeof(u32);
3599 }
3600
macb_get_regs(struct net_device * dev,struct ethtool_regs * regs,void * p)3601 static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
3602 void *p)
3603 {
3604 struct macb *bp = netdev_priv(dev);
3605 unsigned int tail, head;
3606 u32 *regs_buff = p;
3607
3608 regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
3609 | MACB_GREGS_VERSION;
3610
3611 tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
3612 head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
3613
3614 regs_buff[0] = macb_readl(bp, NCR);
3615 regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
3616 regs_buff[2] = macb_readl(bp, NSR);
3617 regs_buff[3] = macb_readl(bp, TSR);
3618 regs_buff[4] = macb_readl(bp, RBQP);
3619 regs_buff[5] = macb_readl(bp, TBQP);
3620 regs_buff[6] = macb_readl(bp, RSR);
3621 regs_buff[7] = macb_readl(bp, IMR);
3622
3623 regs_buff[8] = tail;
3624 regs_buff[9] = head;
3625 regs_buff[10] = macb_tx_dma(&bp->queues[0], tail);
3626 regs_buff[11] = macb_tx_dma(&bp->queues[0], head);
3627
3628 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
3629 regs_buff[12] = macb_or_gem_readl(bp, USRIO);
3630 if (macb_is_gem(bp))
3631 regs_buff[13] = gem_readl(bp, DMACFG);
3632 }
3633
macb_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)3634 static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3635 {
3636 struct macb *bp = netdev_priv(netdev);
3637
3638 phylink_ethtool_get_wol(bp->phylink, wol);
3639 wol->supported |= (WAKE_MAGIC | WAKE_ARP);
3640
3641 /* Add macb wolopts to phy wolopts */
3642 wol->wolopts |= bp->wolopts;
3643 }
3644
macb_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)3645 static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
3646 {
3647 struct macb *bp = netdev_priv(netdev);
3648 int ret;
3649
3650 /* Pass the order to phylink layer */
3651 ret = phylink_ethtool_set_wol(bp->phylink, wol);
3652 /* Don't manage WoL on MAC, if PHY set_wol() fails */
3653 if (ret && ret != -EOPNOTSUPP)
3654 return ret;
3655
3656 bp->wolopts = (wol->wolopts & WAKE_MAGIC) ? WAKE_MAGIC : 0;
3657 bp->wolopts |= (wol->wolopts & WAKE_ARP) ? WAKE_ARP : 0;
3658 bp->wol = (wol->wolopts) ? MACB_WOL_ENABLED : 0;
3659
3660 device_set_wakeup_enable(&bp->pdev->dev, bp->wol);
3661
3662 return 0;
3663 }
3664
macb_get_link_ksettings(struct net_device * netdev,struct ethtool_link_ksettings * kset)3665 static int macb_get_link_ksettings(struct net_device *netdev,
3666 struct ethtool_link_ksettings *kset)
3667 {
3668 struct macb *bp = netdev_priv(netdev);
3669
3670 return phylink_ethtool_ksettings_get(bp->phylink, kset);
3671 }
3672
macb_set_link_ksettings(struct net_device * netdev,const struct ethtool_link_ksettings * kset)3673 static int macb_set_link_ksettings(struct net_device *netdev,
3674 const struct ethtool_link_ksettings *kset)
3675 {
3676 struct macb *bp = netdev_priv(netdev);
3677
3678 return phylink_ethtool_ksettings_set(bp->phylink, kset);
3679 }
3680
macb_get_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3681 static void macb_get_ringparam(struct net_device *netdev,
3682 struct ethtool_ringparam *ring,
3683 struct kernel_ethtool_ringparam *kernel_ring,
3684 struct netlink_ext_ack *extack)
3685 {
3686 struct macb *bp = netdev_priv(netdev);
3687
3688 ring->rx_max_pending = MAX_RX_RING_SIZE;
3689 ring->tx_max_pending = MAX_TX_RING_SIZE;
3690
3691 ring->rx_pending = bp->rx_ring_size;
3692 ring->tx_pending = bp->tx_ring_size;
3693 }
3694
macb_set_ringparam(struct net_device * netdev,struct ethtool_ringparam * ring,struct kernel_ethtool_ringparam * kernel_ring,struct netlink_ext_ack * extack)3695 static int macb_set_ringparam(struct net_device *netdev,
3696 struct ethtool_ringparam *ring,
3697 struct kernel_ethtool_ringparam *kernel_ring,
3698 struct netlink_ext_ack *extack)
3699 {
3700 struct macb *bp = netdev_priv(netdev);
3701 u32 new_rx_size, new_tx_size;
3702 unsigned int reset = 0;
3703
3704 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
3705 return -EINVAL;
3706
3707 new_rx_size = clamp_t(u32, ring->rx_pending,
3708 MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
3709 new_rx_size = roundup_pow_of_two(new_rx_size);
3710
3711 new_tx_size = clamp_t(u32, ring->tx_pending,
3712 MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
3713 new_tx_size = roundup_pow_of_two(new_tx_size);
3714
3715 if ((new_tx_size == bp->tx_ring_size) &&
3716 (new_rx_size == bp->rx_ring_size)) {
3717 /* nothing to do */
3718 return 0;
3719 }
3720
3721 if (netif_running(bp->dev)) {
3722 reset = 1;
3723 macb_close(bp->dev);
3724 }
3725
3726 bp->rx_ring_size = new_rx_size;
3727 bp->tx_ring_size = new_tx_size;
3728
3729 if (reset)
3730 macb_open(bp->dev);
3731
3732 return 0;
3733 }
3734
3735 #ifdef CONFIG_MACB_USE_HWSTAMP
gem_get_tsu_rate(struct macb * bp)3736 static unsigned int gem_get_tsu_rate(struct macb *bp)
3737 {
3738 struct clk *tsu_clk;
3739 unsigned int tsu_rate;
3740
3741 if (!IS_ERR_OR_NULL(bp->tsu_clk)) {
3742 tsu_rate = clk_get_rate(bp->tsu_clk);
3743 } else {
3744 tsu_clk = bp->pclk;
3745 tsu_rate = clk_get_rate(tsu_clk);
3746 dev_warn(&bp->pdev->dev, "devicetree missing tsu_clk, using pclk as fallback\n");
3747 }
3748
3749 return tsu_rate;
3750 }
3751
gem_get_ptp_max_adj(void)3752 static s32 gem_get_ptp_max_adj(void)
3753 {
3754 return 64000000;
3755 }
3756
gem_get_ts_info(struct net_device * dev,struct kernel_ethtool_ts_info * info)3757 static int gem_get_ts_info(struct net_device *dev,
3758 struct kernel_ethtool_ts_info *info)
3759 {
3760 struct macb *bp = netdev_priv(dev);
3761
3762 if (!macb_dma_ptp(bp)) {
3763 ethtool_op_get_ts_info(dev, info);
3764 return 0;
3765 }
3766
3767 info->so_timestamping =
3768 SOF_TIMESTAMPING_TX_SOFTWARE |
3769 SOF_TIMESTAMPING_TX_HARDWARE |
3770 SOF_TIMESTAMPING_RX_HARDWARE |
3771 SOF_TIMESTAMPING_RAW_HARDWARE;
3772 info->tx_types =
3773 (1 << HWTSTAMP_TX_ONESTEP_SYNC) |
3774 (1 << HWTSTAMP_TX_OFF) |
3775 (1 << HWTSTAMP_TX_ON);
3776 info->rx_filters =
3777 (1 << HWTSTAMP_FILTER_NONE) |
3778 (1 << HWTSTAMP_FILTER_ALL);
3779
3780 if (bp->ptp_clock)
3781 info->phc_index = ptp_clock_index(bp->ptp_clock);
3782
3783 return 0;
3784 }
3785
3786 static struct macb_ptp_info gem_ptp_info = {
3787 .ptp_init = gem_ptp_init,
3788 .ptp_remove = gem_ptp_remove,
3789 .get_ptp_max_adj = gem_get_ptp_max_adj,
3790 .get_tsu_rate = gem_get_tsu_rate,
3791 .get_ts_info = gem_get_ts_info,
3792 .get_hwtst = gem_get_hwtst,
3793 .set_hwtst = gem_set_hwtst,
3794 };
3795 #endif
3796
macb_get_ts_info(struct net_device * netdev,struct kernel_ethtool_ts_info * info)3797 static int macb_get_ts_info(struct net_device *netdev,
3798 struct kernel_ethtool_ts_info *info)
3799 {
3800 struct macb *bp = netdev_priv(netdev);
3801
3802 if (bp->ptp_info)
3803 return bp->ptp_info->get_ts_info(netdev, info);
3804
3805 return ethtool_op_get_ts_info(netdev, info);
3806 }
3807
gem_enable_flow_filters(struct macb * bp,bool enable)3808 static void gem_enable_flow_filters(struct macb *bp, bool enable)
3809 {
3810 struct net_device *netdev = bp->dev;
3811 struct ethtool_rx_fs_item *item;
3812 u32 t2_scr;
3813 int num_t2_scr;
3814
3815 if (!(netdev->features & NETIF_F_NTUPLE))
3816 return;
3817
3818 num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
3819
3820 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3821 struct ethtool_rx_flow_spec *fs = &item->fs;
3822 struct ethtool_tcpip4_spec *tp4sp_m;
3823
3824 if (fs->location >= num_t2_scr)
3825 continue;
3826
3827 t2_scr = gem_readl_n(bp, SCRT2, fs->location);
3828
3829 /* enable/disable screener regs for the flow entry */
3830 t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
3831
3832 /* only enable fields with no masking */
3833 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3834
3835 if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
3836 t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
3837 else
3838 t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
3839
3840 if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
3841 t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
3842 else
3843 t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
3844
3845 if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
3846 t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
3847 else
3848 t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
3849
3850 gem_writel_n(bp, SCRT2, fs->location, t2_scr);
3851 }
3852 }
3853
gem_prog_cmp_regs(struct macb * bp,struct ethtool_rx_flow_spec * fs)3854 static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
3855 {
3856 struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
3857 uint16_t index = fs->location;
3858 u32 w0, w1, t2_scr;
3859 bool cmp_a = false;
3860 bool cmp_b = false;
3861 bool cmp_c = false;
3862
3863 if (!macb_is_gem(bp))
3864 return;
3865
3866 tp4sp_v = &(fs->h_u.tcp_ip4_spec);
3867 tp4sp_m = &(fs->m_u.tcp_ip4_spec);
3868
3869 /* ignore field if any masking set */
3870 if (tp4sp_m->ip4src == 0xFFFFFFFF) {
3871 /* 1st compare reg - IP source address */
3872 w0 = 0;
3873 w1 = 0;
3874 w0 = tp4sp_v->ip4src;
3875 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3876 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3877 w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
3878 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
3879 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
3880 cmp_a = true;
3881 }
3882
3883 /* ignore field if any masking set */
3884 if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
3885 /* 2nd compare reg - IP destination address */
3886 w0 = 0;
3887 w1 = 0;
3888 w0 = tp4sp_v->ip4dst;
3889 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3890 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
3891 w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
3892 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
3893 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
3894 cmp_b = true;
3895 }
3896
3897 /* ignore both port fields if masking set in both */
3898 if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
3899 /* 3rd compare reg - source port, destination port */
3900 w0 = 0;
3901 w1 = 0;
3902 w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
3903 if (tp4sp_m->psrc == tp4sp_m->pdst) {
3904 w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
3905 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3906 w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
3907 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3908 } else {
3909 /* only one port definition */
3910 w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
3911 w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
3912 if (tp4sp_m->psrc == 0xFFFF) { /* src port */
3913 w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
3914 w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
3915 } else { /* dst port */
3916 w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
3917 w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
3918 }
3919 }
3920 gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
3921 gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
3922 cmp_c = true;
3923 }
3924
3925 t2_scr = 0;
3926 t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
3927 t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
3928 if (cmp_a)
3929 t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
3930 if (cmp_b)
3931 t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
3932 if (cmp_c)
3933 t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
3934 gem_writel_n(bp, SCRT2, index, t2_scr);
3935 }
3936
gem_add_flow_filter(struct net_device * netdev,struct ethtool_rxnfc * cmd)3937 static int gem_add_flow_filter(struct net_device *netdev,
3938 struct ethtool_rxnfc *cmd)
3939 {
3940 struct macb *bp = netdev_priv(netdev);
3941 struct ethtool_rx_flow_spec *fs = &cmd->fs;
3942 struct ethtool_rx_fs_item *item, *newfs;
3943 unsigned long flags;
3944 int ret = -EINVAL;
3945 bool added = false;
3946
3947 newfs = kmalloc_obj(*newfs);
3948 if (newfs == NULL)
3949 return -ENOMEM;
3950 memcpy(&newfs->fs, fs, sizeof(newfs->fs));
3951
3952 netdev_dbg(netdev,
3953 "Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
3954 fs->flow_type, (int)fs->ring_cookie, fs->location,
3955 htonl(fs->h_u.tcp_ip4_spec.ip4src),
3956 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
3957 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
3958 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
3959
3960 spin_lock_irqsave(&bp->rx_fs_lock, flags);
3961
3962 /* find correct place to add in list */
3963 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
3964 if (item->fs.location > newfs->fs.location) {
3965 list_add_tail(&newfs->list, &item->list);
3966 added = true;
3967 break;
3968 } else if (item->fs.location == fs->location) {
3969 netdev_err(netdev, "Rule not added: location %d not free!\n",
3970 fs->location);
3971 ret = -EBUSY;
3972 goto err;
3973 }
3974 }
3975 if (!added)
3976 list_add_tail(&newfs->list, &bp->rx_fs_list.list);
3977
3978 gem_prog_cmp_regs(bp, fs);
3979 bp->rx_fs_list.count++;
3980 /* enable filtering if NTUPLE on */
3981 gem_enable_flow_filters(bp, 1);
3982
3983 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3984 return 0;
3985
3986 err:
3987 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
3988 kfree(newfs);
3989 return ret;
3990 }
3991
gem_del_flow_filter(struct net_device * netdev,struct ethtool_rxnfc * cmd)3992 static int gem_del_flow_filter(struct net_device *netdev,
3993 struct ethtool_rxnfc *cmd)
3994 {
3995 struct macb *bp = netdev_priv(netdev);
3996 struct ethtool_rx_fs_item *item;
3997 struct ethtool_rx_flow_spec *fs;
3998 unsigned long flags;
3999
4000 spin_lock_irqsave(&bp->rx_fs_lock, flags);
4001
4002 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
4003 if (item->fs.location == cmd->fs.location) {
4004 /* disable screener regs for the flow entry */
4005 fs = &(item->fs);
4006 netdev_dbg(netdev,
4007 "Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
4008 fs->flow_type, (int)fs->ring_cookie, fs->location,
4009 htonl(fs->h_u.tcp_ip4_spec.ip4src),
4010 htonl(fs->h_u.tcp_ip4_spec.ip4dst),
4011 be16_to_cpu(fs->h_u.tcp_ip4_spec.psrc),
4012 be16_to_cpu(fs->h_u.tcp_ip4_spec.pdst));
4013
4014 gem_writel_n(bp, SCRT2, fs->location, 0);
4015
4016 list_del(&item->list);
4017 bp->rx_fs_list.count--;
4018 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
4019 kfree(item);
4020 return 0;
4021 }
4022 }
4023
4024 spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
4025 return -EINVAL;
4026 }
4027
gem_get_flow_entry(struct net_device * netdev,struct ethtool_rxnfc * cmd)4028 static int gem_get_flow_entry(struct net_device *netdev,
4029 struct ethtool_rxnfc *cmd)
4030 {
4031 struct macb *bp = netdev_priv(netdev);
4032 struct ethtool_rx_fs_item *item;
4033
4034 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
4035 if (item->fs.location == cmd->fs.location) {
4036 memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
4037 return 0;
4038 }
4039 }
4040 return -EINVAL;
4041 }
4042
gem_get_all_flow_entries(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4043 static int gem_get_all_flow_entries(struct net_device *netdev,
4044 struct ethtool_rxnfc *cmd, u32 *rule_locs)
4045 {
4046 struct macb *bp = netdev_priv(netdev);
4047 struct ethtool_rx_fs_item *item;
4048 uint32_t cnt = 0;
4049
4050 list_for_each_entry(item, &bp->rx_fs_list.list, list) {
4051 if (cnt == cmd->rule_cnt)
4052 return -EMSGSIZE;
4053 rule_locs[cnt] = item->fs.location;
4054 cnt++;
4055 }
4056 cmd->data = bp->max_tuples;
4057 cmd->rule_cnt = cnt;
4058
4059 return 0;
4060 }
4061
gem_get_rx_ring_count(struct net_device * netdev)4062 static u32 gem_get_rx_ring_count(struct net_device *netdev)
4063 {
4064 struct macb *bp = netdev_priv(netdev);
4065
4066 return bp->num_queues;
4067 }
4068
gem_get_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd,u32 * rule_locs)4069 static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
4070 u32 *rule_locs)
4071 {
4072 struct macb *bp = netdev_priv(netdev);
4073 int ret = 0;
4074
4075 switch (cmd->cmd) {
4076 case ETHTOOL_GRXCLSRLCNT:
4077 cmd->rule_cnt = bp->rx_fs_list.count;
4078 break;
4079 case ETHTOOL_GRXCLSRULE:
4080 ret = gem_get_flow_entry(netdev, cmd);
4081 break;
4082 case ETHTOOL_GRXCLSRLALL:
4083 ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
4084 break;
4085 default:
4086 netdev_err(netdev,
4087 "Command parameter %d is not supported\n", cmd->cmd);
4088 ret = -EOPNOTSUPP;
4089 }
4090
4091 return ret;
4092 }
4093
gem_set_rxnfc(struct net_device * netdev,struct ethtool_rxnfc * cmd)4094 static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
4095 {
4096 struct macb *bp = netdev_priv(netdev);
4097 int ret;
4098
4099 if (!(netdev->hw_features & NETIF_F_NTUPLE))
4100 return -EOPNOTSUPP;
4101
4102 switch (cmd->cmd) {
4103 case ETHTOOL_SRXCLSRLINS:
4104 if ((cmd->fs.location >= bp->max_tuples)
4105 || (cmd->fs.ring_cookie >= bp->num_queues)) {
4106 ret = -EINVAL;
4107 break;
4108 }
4109 ret = gem_add_flow_filter(netdev, cmd);
4110 break;
4111 case ETHTOOL_SRXCLSRLDEL:
4112 ret = gem_del_flow_filter(netdev, cmd);
4113 break;
4114 default:
4115 netdev_err(netdev,
4116 "Command parameter %d is not supported\n", cmd->cmd);
4117 ret = -EOPNOTSUPP;
4118 }
4119
4120 return ret;
4121 }
4122
4123 static const struct ethtool_ops macb_ethtool_ops = {
4124 .get_regs_len = macb_get_regs_len,
4125 .get_regs = macb_get_regs,
4126 .get_link = ethtool_op_get_link,
4127 .get_ts_info = ethtool_op_get_ts_info,
4128 .get_pause_stats = macb_get_pause_stats,
4129 .get_eth_mac_stats = macb_get_eth_mac_stats,
4130 .get_eth_phy_stats = macb_get_eth_phy_stats,
4131 .get_rmon_stats = macb_get_rmon_stats,
4132 .get_wol = macb_get_wol,
4133 .set_wol = macb_set_wol,
4134 .get_link_ksettings = macb_get_link_ksettings,
4135 .set_link_ksettings = macb_set_link_ksettings,
4136 .get_ringparam = macb_get_ringparam,
4137 .set_ringparam = macb_set_ringparam,
4138 };
4139
macb_get_eee(struct net_device * dev,struct ethtool_keee * eee)4140 static int macb_get_eee(struct net_device *dev, struct ethtool_keee *eee)
4141 {
4142 struct macb *bp = netdev_priv(dev);
4143
4144 return phylink_ethtool_get_eee(bp->phylink, eee);
4145 }
4146
macb_set_eee(struct net_device * dev,struct ethtool_keee * eee)4147 static int macb_set_eee(struct net_device *dev, struct ethtool_keee *eee)
4148 {
4149 struct macb *bp = netdev_priv(dev);
4150
4151 return phylink_ethtool_set_eee(bp->phylink, eee);
4152 }
4153
4154 static const struct ethtool_ops gem_ethtool_ops = {
4155 .get_regs_len = macb_get_regs_len,
4156 .get_regs = macb_get_regs,
4157 .get_wol = macb_get_wol,
4158 .set_wol = macb_set_wol,
4159 .get_link = ethtool_op_get_link,
4160 .get_ts_info = macb_get_ts_info,
4161 .get_ethtool_stats = gem_get_ethtool_stats,
4162 .get_strings = gem_get_ethtool_strings,
4163 .get_sset_count = gem_get_sset_count,
4164 .get_pause_stats = gem_get_pause_stats,
4165 .get_eth_mac_stats = gem_get_eth_mac_stats,
4166 .get_eth_phy_stats = gem_get_eth_phy_stats,
4167 .get_rmon_stats = gem_get_rmon_stats,
4168 .get_link_ksettings = macb_get_link_ksettings,
4169 .set_link_ksettings = macb_set_link_ksettings,
4170 .get_ringparam = macb_get_ringparam,
4171 .set_ringparam = macb_set_ringparam,
4172 .get_rxnfc = gem_get_rxnfc,
4173 .set_rxnfc = gem_set_rxnfc,
4174 .get_rx_ring_count = gem_get_rx_ring_count,
4175 .nway_reset = phy_ethtool_nway_reset,
4176 .get_eee = macb_get_eee,
4177 .set_eee = macb_set_eee,
4178 };
4179
macb_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)4180 static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4181 {
4182 struct macb *bp = netdev_priv(dev);
4183
4184 if (!netif_running(dev))
4185 return -EINVAL;
4186
4187 return phylink_mii_ioctl(bp->phylink, rq, cmd);
4188 }
4189
macb_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * cfg)4190 static int macb_hwtstamp_get(struct net_device *dev,
4191 struct kernel_hwtstamp_config *cfg)
4192 {
4193 struct macb *bp = netdev_priv(dev);
4194
4195 if (!netif_running(dev))
4196 return -EINVAL;
4197
4198 if (!bp->ptp_info)
4199 return -EOPNOTSUPP;
4200
4201 return bp->ptp_info->get_hwtst(dev, cfg);
4202 }
4203
macb_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)4204 static int macb_hwtstamp_set(struct net_device *dev,
4205 struct kernel_hwtstamp_config *cfg,
4206 struct netlink_ext_ack *extack)
4207 {
4208 struct macb *bp = netdev_priv(dev);
4209
4210 if (!netif_running(dev))
4211 return -EINVAL;
4212
4213 if (!bp->ptp_info)
4214 return -EOPNOTSUPP;
4215
4216 return bp->ptp_info->set_hwtst(dev, cfg, extack);
4217 }
4218
macb_set_txcsum_feature(struct macb * bp,netdev_features_t features)4219 static inline void macb_set_txcsum_feature(struct macb *bp,
4220 netdev_features_t features)
4221 {
4222 u32 val;
4223
4224 if (!macb_is_gem(bp))
4225 return;
4226
4227 val = gem_readl(bp, DMACFG);
4228 if (features & NETIF_F_HW_CSUM)
4229 val |= GEM_BIT(TXCOEN);
4230 else
4231 val &= ~GEM_BIT(TXCOEN);
4232
4233 gem_writel(bp, DMACFG, val);
4234 }
4235
macb_set_rxcsum_feature(struct macb * bp,netdev_features_t features)4236 static inline void macb_set_rxcsum_feature(struct macb *bp,
4237 netdev_features_t features)
4238 {
4239 struct net_device *netdev = bp->dev;
4240 u32 val;
4241
4242 if (!macb_is_gem(bp))
4243 return;
4244
4245 val = gem_readl(bp, NCFGR);
4246 if ((features & NETIF_F_RXCSUM) && !(netdev->flags & IFF_PROMISC))
4247 val |= GEM_BIT(RXCOEN);
4248 else
4249 val &= ~GEM_BIT(RXCOEN);
4250
4251 gem_writel(bp, NCFGR, val);
4252 }
4253
macb_set_rxflow_feature(struct macb * bp,netdev_features_t features)4254 static inline void macb_set_rxflow_feature(struct macb *bp,
4255 netdev_features_t features)
4256 {
4257 if (!macb_is_gem(bp))
4258 return;
4259
4260 gem_enable_flow_filters(bp, !!(features & NETIF_F_NTUPLE));
4261 }
4262
macb_set_features(struct net_device * netdev,netdev_features_t features)4263 static int macb_set_features(struct net_device *netdev,
4264 netdev_features_t features)
4265 {
4266 struct macb *bp = netdev_priv(netdev);
4267 netdev_features_t changed = features ^ netdev->features;
4268
4269 /* TX checksum offload */
4270 if (changed & NETIF_F_HW_CSUM)
4271 macb_set_txcsum_feature(bp, features);
4272
4273 /* RX checksum offload */
4274 if (changed & NETIF_F_RXCSUM)
4275 macb_set_rxcsum_feature(bp, features);
4276
4277 /* RX Flow Filters */
4278 if (changed & NETIF_F_NTUPLE)
4279 macb_set_rxflow_feature(bp, features);
4280
4281 return 0;
4282 }
4283
macb_restore_features(struct macb * bp)4284 static void macb_restore_features(struct macb *bp)
4285 {
4286 struct net_device *netdev = bp->dev;
4287 netdev_features_t features = netdev->features;
4288 struct ethtool_rx_fs_item *item;
4289
4290 /* TX checksum offload */
4291 macb_set_txcsum_feature(bp, features);
4292
4293 /* RX checksum offload */
4294 macb_set_rxcsum_feature(bp, features);
4295
4296 /* RX Flow Filters */
4297 list_for_each_entry(item, &bp->rx_fs_list.list, list)
4298 gem_prog_cmp_regs(bp, &item->fs);
4299
4300 macb_set_rxflow_feature(bp, features);
4301 }
4302
macb_taprio_setup_replace(struct net_device * ndev,struct tc_taprio_qopt_offload * conf)4303 static int macb_taprio_setup_replace(struct net_device *ndev,
4304 struct tc_taprio_qopt_offload *conf)
4305 {
4306 u64 total_on_time = 0, start_time_sec = 0, start_time = conf->base_time;
4307 u32 configured_queues = 0, speed = 0, start_time_nsec;
4308 struct macb_queue_enst_config *enst_queue;
4309 struct tc_taprio_sched_entry *entry;
4310 struct macb *bp = netdev_priv(ndev);
4311 struct ethtool_link_ksettings kset;
4312 struct macb_queue *queue;
4313 u32 queue_mask;
4314 u8 queue_id;
4315 size_t i;
4316 int err;
4317
4318 if (conf->num_entries > bp->num_queues) {
4319 netdev_err(ndev, "Too many TAPRIO entries: %zu > %d queues\n",
4320 conf->num_entries, bp->num_queues);
4321 return -EINVAL;
4322 }
4323
4324 if (conf->base_time < 0) {
4325 netdev_err(ndev, "Invalid base_time: must be 0 or positive, got %lld\n",
4326 conf->base_time);
4327 return -ERANGE;
4328 }
4329
4330 /* Get the current link speed */
4331 err = phylink_ethtool_ksettings_get(bp->phylink, &kset);
4332 if (unlikely(err)) {
4333 netdev_err(ndev, "Failed to get link settings: %d\n", err);
4334 return err;
4335 }
4336
4337 speed = kset.base.speed;
4338 if (unlikely(speed <= 0)) {
4339 netdev_err(ndev, "Invalid speed: %d\n", speed);
4340 return -EINVAL;
4341 }
4342
4343 enst_queue = kcalloc(conf->num_entries, sizeof(*enst_queue), GFP_KERNEL);
4344 if (unlikely(!enst_queue))
4345 return -ENOMEM;
4346
4347 /* Pre-validate all entries before making any hardware changes */
4348 for (i = 0; i < conf->num_entries; i++) {
4349 entry = &conf->entries[i];
4350
4351 if (entry->command != TC_TAPRIO_CMD_SET_GATES) {
4352 netdev_err(ndev, "Entry %zu: unsupported command %d\n",
4353 i, entry->command);
4354 err = -EOPNOTSUPP;
4355 goto cleanup;
4356 }
4357
4358 /* Validate gate_mask: must be nonzero, single queue, and within range */
4359 if (!is_power_of_2(entry->gate_mask)) {
4360 netdev_err(ndev, "Entry %zu: gate_mask 0x%x is not a power of 2 (only one queue per entry allowed)\n",
4361 i, entry->gate_mask);
4362 err = -EINVAL;
4363 goto cleanup;
4364 }
4365
4366 /* gate_mask must not select queues outside the valid queues */
4367 queue_id = order_base_2(entry->gate_mask);
4368 if (queue_id >= bp->num_queues) {
4369 netdev_err(ndev, "Entry %zu: gate_mask 0x%x exceeds queue range (max_queues=%d)\n",
4370 i, entry->gate_mask, bp->num_queues);
4371 err = -EINVAL;
4372 goto cleanup;
4373 }
4374
4375 /* Check for start time limits */
4376 start_time_sec = start_time;
4377 start_time_nsec = do_div(start_time_sec, NSEC_PER_SEC);
4378 if (start_time_sec > GENMASK(GEM_START_TIME_SEC_SIZE - 1, 0)) {
4379 netdev_err(ndev, "Entry %zu: Start time %llu s exceeds hardware limit\n",
4380 i, start_time_sec);
4381 err = -ERANGE;
4382 goto cleanup;
4383 }
4384
4385 /* Check for on time limit */
4386 if (entry->interval > enst_max_hw_interval(speed)) {
4387 netdev_err(ndev, "Entry %zu: interval %u ns exceeds hardware limit %llu ns\n",
4388 i, entry->interval, enst_max_hw_interval(speed));
4389 err = -ERANGE;
4390 goto cleanup;
4391 }
4392
4393 /* Check for off time limit*/
4394 if ((conf->cycle_time - entry->interval) > enst_max_hw_interval(speed)) {
4395 netdev_err(ndev, "Entry %zu: off_time %llu ns exceeds hardware limit %llu ns\n",
4396 i, conf->cycle_time - entry->interval,
4397 enst_max_hw_interval(speed));
4398 err = -ERANGE;
4399 goto cleanup;
4400 }
4401
4402 enst_queue[i].queue_id = queue_id;
4403 enst_queue[i].start_time_mask =
4404 (start_time_sec << GEM_START_TIME_SEC_OFFSET) |
4405 start_time_nsec;
4406 enst_queue[i].on_time_bytes =
4407 enst_ns_to_hw_units(entry->interval, speed);
4408 enst_queue[i].off_time_bytes =
4409 enst_ns_to_hw_units(conf->cycle_time - entry->interval, speed);
4410
4411 configured_queues |= entry->gate_mask;
4412 total_on_time += entry->interval;
4413 start_time += entry->interval;
4414 }
4415
4416 /* Check total interval doesn't exceed cycle time */
4417 if (total_on_time > conf->cycle_time) {
4418 netdev_err(ndev, "Total ON %llu ns exceeds cycle time %llu ns\n",
4419 total_on_time, conf->cycle_time);
4420 err = -EINVAL;
4421 goto cleanup;
4422 }
4423
4424 netdev_dbg(ndev, "TAPRIO setup: %zu entries, base_time=%lld ns, cycle_time=%llu ns\n",
4425 conf->num_entries, conf->base_time, conf->cycle_time);
4426
4427 /* All validations passed - proceed with hardware configuration */
4428 scoped_guard(spinlock_irqsave, &bp->lock) {
4429 /* Disable ENST queues if running before configuring */
4430 queue_mask = BIT_U32(bp->num_queues) - 1;
4431 gem_writel(bp, ENST_CONTROL,
4432 queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
4433
4434 for (i = 0; i < conf->num_entries; i++) {
4435 queue = &bp->queues[enst_queue[i].queue_id];
4436 /* Configure queue timing registers */
4437 queue_writel(queue, ENST_START_TIME,
4438 enst_queue[i].start_time_mask);
4439 queue_writel(queue, ENST_ON_TIME,
4440 enst_queue[i].on_time_bytes);
4441 queue_writel(queue, ENST_OFF_TIME,
4442 enst_queue[i].off_time_bytes);
4443 }
4444
4445 /* Enable ENST for all configured queues in one write */
4446 gem_writel(bp, ENST_CONTROL, configured_queues);
4447 }
4448
4449 netdev_info(ndev, "TAPRIO configuration completed successfully: %zu entries, %d queues configured\n",
4450 conf->num_entries, hweight32(configured_queues));
4451
4452 cleanup:
4453 kfree(enst_queue);
4454 return err;
4455 }
4456
macb_taprio_destroy(struct net_device * ndev)4457 static void macb_taprio_destroy(struct net_device *ndev)
4458 {
4459 struct macb *bp = netdev_priv(ndev);
4460 struct macb_queue *queue;
4461 u32 queue_mask;
4462 unsigned int q;
4463
4464 netdev_reset_tc(ndev);
4465 queue_mask = BIT_U32(bp->num_queues) - 1;
4466
4467 scoped_guard(spinlock_irqsave, &bp->lock) {
4468 /* Single disable command for all queues */
4469 gem_writel(bp, ENST_CONTROL,
4470 queue_mask << GEM_ENST_DISABLE_QUEUE_OFFSET);
4471
4472 /* Clear all queue ENST registers in batch */
4473 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
4474 queue_writel(queue, ENST_START_TIME, 0);
4475 queue_writel(queue, ENST_ON_TIME, 0);
4476 queue_writel(queue, ENST_OFF_TIME, 0);
4477 }
4478 }
4479 netdev_info(ndev, "TAPRIO destroy: All gates disabled\n");
4480 }
4481
macb_setup_taprio(struct net_device * ndev,struct tc_taprio_qopt_offload * taprio)4482 static int macb_setup_taprio(struct net_device *ndev,
4483 struct tc_taprio_qopt_offload *taprio)
4484 {
4485 struct macb *bp = netdev_priv(ndev);
4486 int err = 0;
4487
4488 if (unlikely(!(ndev->hw_features & NETIF_F_HW_TC)))
4489 return -EOPNOTSUPP;
4490
4491 /* Check if Device is in runtime suspend */
4492 if (unlikely(pm_runtime_suspended(&bp->pdev->dev))) {
4493 netdev_err(ndev, "Device is in runtime suspend\n");
4494 return -EOPNOTSUPP;
4495 }
4496
4497 switch (taprio->cmd) {
4498 case TAPRIO_CMD_REPLACE:
4499 err = macb_taprio_setup_replace(ndev, taprio);
4500 break;
4501 case TAPRIO_CMD_DESTROY:
4502 macb_taprio_destroy(ndev);
4503 break;
4504 default:
4505 err = -EOPNOTSUPP;
4506 }
4507
4508 return err;
4509 }
4510
macb_setup_tc(struct net_device * dev,enum tc_setup_type type,void * type_data)4511 static int macb_setup_tc(struct net_device *dev, enum tc_setup_type type,
4512 void *type_data)
4513 {
4514 if (!dev || !type_data)
4515 return -EINVAL;
4516
4517 switch (type) {
4518 case TC_SETUP_QDISC_TAPRIO:
4519 return macb_setup_taprio(dev, type_data);
4520 default:
4521 return -EOPNOTSUPP;
4522 }
4523 }
4524
4525 static const struct net_device_ops macb_netdev_ops = {
4526 .ndo_open = macb_open,
4527 .ndo_stop = macb_close,
4528 .ndo_start_xmit = macb_start_xmit,
4529 .ndo_set_rx_mode = macb_set_rx_mode,
4530 .ndo_get_stats64 = macb_get_stats,
4531 .ndo_eth_ioctl = macb_ioctl,
4532 .ndo_validate_addr = eth_validate_addr,
4533 .ndo_change_mtu = macb_change_mtu,
4534 .ndo_set_mac_address = macb_set_mac_addr,
4535 #ifdef CONFIG_NET_POLL_CONTROLLER
4536 .ndo_poll_controller = macb_poll_controller,
4537 #endif
4538 .ndo_set_features = macb_set_features,
4539 .ndo_features_check = macb_features_check,
4540 .ndo_hwtstamp_set = macb_hwtstamp_set,
4541 .ndo_hwtstamp_get = macb_hwtstamp_get,
4542 .ndo_setup_tc = macb_setup_tc,
4543 };
4544
4545 /* Configure peripheral capabilities according to device tree
4546 * and integration options used
4547 */
macb_configure_caps(struct macb * bp,const struct macb_config * dt_conf)4548 static void macb_configure_caps(struct macb *bp,
4549 const struct macb_config *dt_conf)
4550 {
4551 u32 dcfg;
4552
4553 bp->caps = dt_conf->caps;
4554
4555 if (!dt_conf->usrio)
4556 bp->caps |= MACB_CAPS_USRIO_DISABLED;
4557
4558 if (hw_is_gem(bp->regs, bp->native_io)) {
4559 bp->caps |= MACB_CAPS_MACB_IS_GEM;
4560
4561 dcfg = gem_readl(bp, DCFG1);
4562 if (GEM_BFEXT(IRQCOR, dcfg) == 0)
4563 bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
4564 if (GEM_BFEXT(NO_PCS, dcfg) == 0)
4565 bp->caps |= MACB_CAPS_PCS;
4566 if (!(dcfg & GEM_BIT(USERIO)))
4567 bp->caps |= MACB_CAPS_USRIO_DISABLED;
4568 dcfg = gem_readl(bp, DCFG12);
4569 if (GEM_BFEXT(HIGH_SPEED, dcfg) == 1)
4570 bp->caps |= MACB_CAPS_HIGH_SPEED;
4571 dcfg = gem_readl(bp, DCFG2);
4572 if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
4573 bp->caps |= MACB_CAPS_FIFO_MODE;
4574 if (GEM_BFEXT(PBUF_RSC, gem_readl(bp, DCFG6)))
4575 bp->caps |= MACB_CAPS_RSC;
4576 if (gem_has_ptp(bp)) {
4577 if (!GEM_BFEXT(TSU, gem_readl(bp, DCFG5)))
4578 dev_err(&bp->pdev->dev,
4579 "GEM doesn't support hardware ptp.\n");
4580 else {
4581 #ifdef CONFIG_MACB_USE_HWSTAMP
4582 bp->caps |= MACB_CAPS_DMA_PTP;
4583 bp->ptp_info = &gem_ptp_info;
4584 #endif
4585 }
4586 }
4587 }
4588
4589 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
4590 }
4591
macb_probe_queues(struct device * dev,void __iomem * mem,bool native_io)4592 static int macb_probe_queues(struct device *dev, void __iomem *mem, bool native_io)
4593 {
4594 /* BIT(0) is never set but queue 0 always exists. */
4595 unsigned int queue_mask = 0x1;
4596
4597 /* Use hw_is_gem() as MACB_CAPS_MACB_IS_GEM is not yet positioned. */
4598 if (hw_is_gem(mem, native_io)) {
4599 if (native_io)
4600 queue_mask |= __raw_readl(mem + GEM_DCFG6) & 0xFF;
4601 else
4602 queue_mask |= readl_relaxed(mem + GEM_DCFG6) & 0xFF;
4603
4604 if (fls(queue_mask) != ffz(queue_mask)) {
4605 dev_err(dev, "queue mask %#x has a hole\n", queue_mask);
4606 return -EINVAL;
4607 }
4608 }
4609
4610 return hweight32(queue_mask);
4611 }
4612
macb_clks_disable(struct clk * pclk,struct clk * hclk,struct clk * tx_clk,struct clk * rx_clk,struct clk * tsu_clk)4613 static void macb_clks_disable(struct clk *pclk, struct clk *hclk, struct clk *tx_clk,
4614 struct clk *rx_clk, struct clk *tsu_clk)
4615 {
4616 struct clk_bulk_data clks[] = {
4617 { .clk = tsu_clk, },
4618 { .clk = rx_clk, },
4619 { .clk = pclk, },
4620 { .clk = hclk, },
4621 { .clk = tx_clk },
4622 };
4623
4624 clk_bulk_disable_unprepare(ARRAY_SIZE(clks), clks);
4625 }
4626
macb_clk_init_dflt(struct platform_device * pdev,struct clk ** pclk,struct clk ** hclk,struct clk ** tx_clk,struct clk ** rx_clk,struct clk ** tsu_clk)4627 static int macb_clk_init_dflt(struct platform_device *pdev, struct clk **pclk,
4628 struct clk **hclk, struct clk **tx_clk,
4629 struct clk **rx_clk, struct clk **tsu_clk)
4630 {
4631 struct macb_platform_data *pdata;
4632 int err;
4633
4634 pdata = dev_get_platdata(&pdev->dev);
4635 if (pdata) {
4636 *pclk = pdata->pclk;
4637 *hclk = pdata->hclk;
4638 } else {
4639 *pclk = devm_clk_get(&pdev->dev, "pclk");
4640 *hclk = devm_clk_get(&pdev->dev, "hclk");
4641 }
4642
4643 if (IS_ERR_OR_NULL(*pclk))
4644 return dev_err_probe(&pdev->dev,
4645 IS_ERR(*pclk) ? PTR_ERR(*pclk) : -ENODEV,
4646 "failed to get pclk\n");
4647
4648 if (IS_ERR_OR_NULL(*hclk))
4649 return dev_err_probe(&pdev->dev,
4650 IS_ERR(*hclk) ? PTR_ERR(*hclk) : -ENODEV,
4651 "failed to get hclk\n");
4652
4653 *tx_clk = devm_clk_get_optional(&pdev->dev, "tx_clk");
4654 if (IS_ERR(*tx_clk))
4655 return PTR_ERR(*tx_clk);
4656
4657 *rx_clk = devm_clk_get_optional(&pdev->dev, "rx_clk");
4658 if (IS_ERR(*rx_clk))
4659 return PTR_ERR(*rx_clk);
4660
4661 *tsu_clk = devm_clk_get_optional(&pdev->dev, "tsu_clk");
4662 if (IS_ERR(*tsu_clk))
4663 return PTR_ERR(*tsu_clk);
4664
4665 err = clk_prepare_enable(*pclk);
4666 if (err) {
4667 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
4668 return err;
4669 }
4670
4671 err = clk_prepare_enable(*hclk);
4672 if (err) {
4673 dev_err(&pdev->dev, "failed to enable hclk (%d)\n", err);
4674 goto err_disable_pclk;
4675 }
4676
4677 err = clk_prepare_enable(*tx_clk);
4678 if (err) {
4679 dev_err(&pdev->dev, "failed to enable tx_clk (%d)\n", err);
4680 goto err_disable_hclk;
4681 }
4682
4683 err = clk_prepare_enable(*rx_clk);
4684 if (err) {
4685 dev_err(&pdev->dev, "failed to enable rx_clk (%d)\n", err);
4686 goto err_disable_txclk;
4687 }
4688
4689 err = clk_prepare_enable(*tsu_clk);
4690 if (err) {
4691 dev_err(&pdev->dev, "failed to enable tsu_clk (%d)\n", err);
4692 goto err_disable_rxclk;
4693 }
4694
4695 return 0;
4696
4697 err_disable_rxclk:
4698 clk_disable_unprepare(*rx_clk);
4699
4700 err_disable_txclk:
4701 clk_disable_unprepare(*tx_clk);
4702
4703 err_disable_hclk:
4704 clk_disable_unprepare(*hclk);
4705
4706 err_disable_pclk:
4707 clk_disable_unprepare(*pclk);
4708
4709 return err;
4710 }
4711
macb_clk_init(struct platform_device * pdev,struct clk ** pclk,struct clk ** hclk,struct clk ** tx_clk,struct clk ** rx_clk,struct clk ** tsu_clk,const struct macb_config * config)4712 static int macb_clk_init(struct platform_device *pdev, struct clk **pclk,
4713 struct clk **hclk, struct clk **tx_clk,
4714 struct clk **rx_clk, struct clk **tsu_clk,
4715 const struct macb_config *config)
4716 {
4717 if (config->clk_init)
4718 return config->clk_init(pdev, pclk, hclk, tx_clk, rx_clk,
4719 tsu_clk);
4720 else
4721 return macb_clk_init_dflt(pdev, pclk, hclk, tx_clk, rx_clk,
4722 tsu_clk);
4723 }
4724
macb_init_dflt(struct platform_device * pdev)4725 static int macb_init_dflt(struct platform_device *pdev)
4726 {
4727 struct net_device *dev = platform_get_drvdata(pdev);
4728 unsigned int hw_q, q;
4729 struct macb *bp = netdev_priv(dev);
4730 struct macb_queue *queue;
4731 int err;
4732 u32 val, reg;
4733
4734 bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
4735 bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
4736
4737 /* set the queue register mapping once for all: queue0 has a special
4738 * register mapping but we don't want to test the queue index then
4739 * compute the corresponding register offset at run time.
4740 */
4741 for (hw_q = 0, q = 0; hw_q < bp->num_queues; ++hw_q) {
4742 queue = &bp->queues[q];
4743 queue->bp = bp;
4744 spin_lock_init(&queue->tx_ptr_lock);
4745 netif_napi_add(dev, &queue->napi_rx, macb_rx_poll);
4746 netif_napi_add_tx(dev, &queue->napi_tx, macb_tx_poll);
4747 if (hw_q) {
4748 queue->ISR = GEM_ISR(hw_q - 1);
4749 queue->IER = GEM_IER(hw_q - 1);
4750 queue->IDR = GEM_IDR(hw_q - 1);
4751 queue->IMR = GEM_IMR(hw_q - 1);
4752 queue->TBQP = GEM_TBQP(hw_q - 1);
4753 queue->RBQP = GEM_RBQP(hw_q - 1);
4754 queue->RBQS = GEM_RBQS(hw_q - 1);
4755 } else {
4756 /* queue0 uses legacy registers */
4757 queue->ISR = MACB_ISR;
4758 queue->IER = MACB_IER;
4759 queue->IDR = MACB_IDR;
4760 queue->IMR = MACB_IMR;
4761 queue->TBQP = MACB_TBQP;
4762 queue->RBQP = MACB_RBQP;
4763 }
4764
4765 queue->ENST_START_TIME = GEM_ENST_START_TIME(hw_q);
4766 queue->ENST_ON_TIME = GEM_ENST_ON_TIME(hw_q);
4767 queue->ENST_OFF_TIME = GEM_ENST_OFF_TIME(hw_q);
4768
4769 /* get irq: here we use the linux queue index, not the hardware
4770 * queue index. the queue irq definitions in the device tree
4771 * must remove the optional gaps that could exist in the
4772 * hardware queue mask.
4773 */
4774 queue->irq = platform_get_irq(pdev, q);
4775 err = devm_request_irq(&pdev->dev, queue->irq, macb_interrupt,
4776 IRQF_SHARED, dev->name, queue);
4777 if (err) {
4778 dev_err(&pdev->dev,
4779 "Unable to request IRQ %d (error %d)\n",
4780 queue->irq, err);
4781 return err;
4782 }
4783
4784 INIT_WORK(&queue->tx_error_task, macb_tx_error_task);
4785 q++;
4786 }
4787
4788 dev->netdev_ops = &macb_netdev_ops;
4789
4790 /* setup appropriated routines according to adapter type */
4791 if (macb_is_gem(bp)) {
4792 bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
4793 bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
4794 bp->macbgem_ops.mog_init_rings = gem_init_rings;
4795 bp->macbgem_ops.mog_rx = gem_rx;
4796 dev->ethtool_ops = &gem_ethtool_ops;
4797 } else {
4798 bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
4799 bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
4800 bp->macbgem_ops.mog_init_rings = macb_init_rings;
4801 bp->macbgem_ops.mog_rx = macb_rx;
4802 dev->ethtool_ops = &macb_ethtool_ops;
4803 }
4804
4805 netdev_sw_irq_coalesce_default_on(dev);
4806
4807 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
4808
4809 /* Set features */
4810 dev->hw_features = NETIF_F_SG;
4811
4812 /* Check LSO capability; runtime detection can be overridden by a cap
4813 * flag if the hardware is known to be buggy
4814 */
4815 if (!(bp->caps & MACB_CAPS_NO_LSO) &&
4816 GEM_BFEXT(PBUF_LSO, gem_readl(bp, DCFG6)))
4817 dev->hw_features |= MACB_NETIF_LSO;
4818
4819 /* Checksum offload is only available on gem with packet buffer */
4820 if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
4821 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
4822 if (bp->caps & MACB_CAPS_SG_DISABLED)
4823 dev->hw_features &= ~NETIF_F_SG;
4824 /* Enable HW_TC if hardware supports QBV */
4825 if (bp->caps & MACB_CAPS_QBV)
4826 dev->hw_features |= NETIF_F_HW_TC;
4827
4828 dev->features = dev->hw_features;
4829
4830 /* Check RX Flow Filters support.
4831 * Max Rx flows set by availability of screeners & compare regs:
4832 * each 4-tuple define requires 1 T2 screener reg + 3 compare regs
4833 */
4834 reg = gem_readl(bp, DCFG8);
4835 bp->max_tuples = umin((GEM_BFEXT(SCR2CMP, reg) / 3),
4836 GEM_BFEXT(T2SCR, reg));
4837 INIT_LIST_HEAD(&bp->rx_fs_list.list);
4838 if (bp->max_tuples > 0) {
4839 /* also needs one ethtype match to check IPv4 */
4840 if (GEM_BFEXT(SCR2ETH, reg) > 0) {
4841 /* program this reg now */
4842 reg = 0;
4843 reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
4844 gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
4845 /* Filtering is supported in hw but don't enable it in kernel now */
4846 dev->hw_features |= NETIF_F_NTUPLE;
4847 /* init Rx flow definitions */
4848 bp->rx_fs_list.count = 0;
4849 spin_lock_init(&bp->rx_fs_lock);
4850 } else
4851 bp->max_tuples = 0;
4852 }
4853
4854 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
4855 val = 0;
4856 if (bp->caps & MACB_CAPS_USRIO_HAS_MII) {
4857 if (phy_interface_mode_is_rgmii(bp->phy_interface))
4858 val = bp->usrio->rgmii;
4859 else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII &&
4860 (bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4861 val = bp->usrio->rmii;
4862 else if (!(bp->caps & MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII))
4863 val = bp->usrio->mii;
4864 }
4865
4866 if (bp->caps & MACB_CAPS_USRIO_HAS_CLKEN)
4867 val |= bp->usrio->clken;
4868
4869 if (bp->caps & MACB_CAPS_USRIO_HAS_REFCLK_SOURCE) {
4870 const char *prop;
4871 bool refclk_ext;
4872 int ret;
4873
4874 /* Default to whatever was set in the match data for
4875 * this device. There's two properties for refclk
4876 * control, but the boolean one is deprecated so is
4877 * a lower priority to check, no device should have
4878 * both.
4879 */
4880 refclk_ext = bp->usrio->refclk_default_external;
4881
4882 ret = of_property_read_string(pdev->dev.of_node,
4883 "cdns,refclk-source", &prop);
4884 if (!ret) {
4885 if (!strcmp(prop, "external"))
4886 refclk_ext = true;
4887 else
4888 refclk_ext = false;
4889 } else {
4890 ret = of_property_read_bool(pdev->dev.of_node,
4891 "cdns,refclk-ext");
4892 if (ret)
4893 refclk_ext = true;
4894 }
4895
4896 if (refclk_ext)
4897 val |= bp->usrio->refclk;
4898 }
4899
4900 if (bp->caps & MACB_CAPS_USRIO_HAS_TSUCLK_SOURCE)
4901 val |= bp->usrio->tsu_source;
4902
4903 macb_or_gem_writel(bp, USRIO, val);
4904 }
4905
4906 /* Set MII management clock divider */
4907 val = macb_mdc_clk_div(bp);
4908 val |= macb_dbw(bp);
4909 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII)
4910 val |= GEM_BIT(SGMIIEN) | GEM_BIT(PCSSEL);
4911 macb_writel(bp, NCFGR, val);
4912
4913 return 0;
4914 }
4915
macb_init(struct platform_device * pdev,const struct macb_config * config)4916 static int macb_init(struct platform_device *pdev,
4917 const struct macb_config *config)
4918 {
4919 if (config->init)
4920 return config->init(pdev);
4921 else
4922 return macb_init_dflt(pdev);
4923 }
4924
4925 static const struct macb_usrio_config at91_default_usrio = {
4926 .mii = MACB_BIT(MII),
4927 .rmii = MACB_BIT(RMII),
4928 .rgmii = GEM_BIT(RGMII),
4929 .clken = MACB_BIT(CLKEN),
4930 };
4931
4932 #if defined(CONFIG_OF)
4933 /* 1518 rounded up */
4934 #define AT91ETHER_MAX_RBUFF_SZ 0x600
4935 /* max number of receive buffers */
4936 #define AT91ETHER_MAX_RX_DESCR 9
4937
4938 static struct sifive_fu540_macb_mgmt *mgmt;
4939
at91ether_alloc_coherent(struct macb * lp)4940 static int at91ether_alloc_coherent(struct macb *lp)
4941 {
4942 struct macb_queue *q = &lp->queues[0];
4943
4944 q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
4945 (AT91ETHER_MAX_RX_DESCR *
4946 macb_dma_desc_get_size(lp)),
4947 &q->rx_ring_dma, GFP_KERNEL);
4948 if (!q->rx_ring)
4949 return -ENOMEM;
4950
4951 q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
4952 AT91ETHER_MAX_RX_DESCR *
4953 AT91ETHER_MAX_RBUFF_SZ,
4954 &q->rx_buffers_dma, GFP_KERNEL);
4955 if (!q->rx_buffers) {
4956 dma_free_coherent(&lp->pdev->dev,
4957 AT91ETHER_MAX_RX_DESCR *
4958 macb_dma_desc_get_size(lp),
4959 q->rx_ring, q->rx_ring_dma);
4960 q->rx_ring = NULL;
4961 return -ENOMEM;
4962 }
4963
4964 return 0;
4965 }
4966
at91ether_free_coherent(struct macb * lp)4967 static void at91ether_free_coherent(struct macb *lp)
4968 {
4969 struct macb_queue *q = &lp->queues[0];
4970
4971 if (q->rx_ring) {
4972 dma_free_coherent(&lp->pdev->dev,
4973 AT91ETHER_MAX_RX_DESCR *
4974 macb_dma_desc_get_size(lp),
4975 q->rx_ring, q->rx_ring_dma);
4976 q->rx_ring = NULL;
4977 }
4978
4979 if (q->rx_buffers) {
4980 dma_free_coherent(&lp->pdev->dev,
4981 AT91ETHER_MAX_RX_DESCR *
4982 AT91ETHER_MAX_RBUFF_SZ,
4983 q->rx_buffers, q->rx_buffers_dma);
4984 q->rx_buffers = NULL;
4985 }
4986 }
4987
4988 /* Initialize and start the Receiver and Transmit subsystems */
at91ether_start(struct macb * lp)4989 static int at91ether_start(struct macb *lp)
4990 {
4991 struct macb_queue *q = &lp->queues[0];
4992 struct macb_dma_desc *desc;
4993 dma_addr_t addr;
4994 u32 ctl;
4995 int i, ret;
4996
4997 ret = at91ether_alloc_coherent(lp);
4998 if (ret)
4999 return ret;
5000
5001 addr = q->rx_buffers_dma;
5002 for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
5003 desc = macb_rx_desc(q, i);
5004 macb_set_addr(lp, desc, addr);
5005 desc->ctrl = 0;
5006 addr += AT91ETHER_MAX_RBUFF_SZ;
5007 }
5008
5009 /* Set the Wrap bit on the last descriptor */
5010 desc->addr |= MACB_BIT(RX_WRAP);
5011
5012 /* Reset buffer index */
5013 q->rx_tail = 0;
5014
5015 /* Program address of descriptor list in Rx Buffer Queue register */
5016 macb_writel(lp, RBQP, q->rx_ring_dma);
5017
5018 /* Enable Receive and Transmit */
5019 ctl = macb_readl(lp, NCR);
5020 macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
5021
5022 /* Enable MAC interrupts */
5023 macb_writel(lp, IER, MACB_BIT(RCOMP) |
5024 MACB_BIT(RXUBR) |
5025 MACB_BIT(ISR_TUND) |
5026 MACB_BIT(ISR_RLE) |
5027 MACB_BIT(TCOMP) |
5028 MACB_BIT(ISR_ROVR) |
5029 MACB_BIT(HRESP));
5030
5031 return 0;
5032 }
5033
at91ether_stop(struct macb * lp)5034 static void at91ether_stop(struct macb *lp)
5035 {
5036 u32 ctl;
5037
5038 /* Disable MAC interrupts */
5039 macb_writel(lp, IDR, MACB_BIT(RCOMP) |
5040 MACB_BIT(RXUBR) |
5041 MACB_BIT(ISR_TUND) |
5042 MACB_BIT(ISR_RLE) |
5043 MACB_BIT(TCOMP) |
5044 MACB_BIT(ISR_ROVR) |
5045 MACB_BIT(HRESP));
5046
5047 /* Disable Receiver and Transmitter */
5048 ctl = macb_readl(lp, NCR);
5049 macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
5050
5051 /* Free resources. */
5052 at91ether_free_coherent(lp);
5053 }
5054
5055 /* Open the ethernet interface */
at91ether_open(struct net_device * dev)5056 static int at91ether_open(struct net_device *dev)
5057 {
5058 struct macb *lp = netdev_priv(dev);
5059 u32 ctl;
5060 int ret;
5061
5062 ret = pm_runtime_resume_and_get(&lp->pdev->dev);
5063 if (ret < 0)
5064 return ret;
5065
5066 /* Clear internal statistics */
5067 ctl = macb_readl(lp, NCR);
5068 macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
5069
5070 macb_set_hwaddr(lp);
5071
5072 ret = at91ether_start(lp);
5073 if (ret)
5074 goto pm_exit;
5075
5076 ret = macb_phylink_connect(lp);
5077 if (ret)
5078 goto stop;
5079
5080 netif_start_queue(dev);
5081
5082 return 0;
5083
5084 stop:
5085 at91ether_stop(lp);
5086 pm_exit:
5087 pm_runtime_put_sync(&lp->pdev->dev);
5088 return ret;
5089 }
5090
5091 /* Close the interface */
at91ether_close(struct net_device * dev)5092 static int at91ether_close(struct net_device *dev)
5093 {
5094 struct macb *lp = netdev_priv(dev);
5095
5096 netif_stop_queue(dev);
5097
5098 phylink_stop(lp->phylink);
5099 phylink_disconnect_phy(lp->phylink);
5100
5101 at91ether_stop(lp);
5102
5103 pm_runtime_put(&lp->pdev->dev);
5104
5105 return 0;
5106 }
5107
5108 /* Transmit packet */
at91ether_start_xmit(struct sk_buff * skb,struct net_device * dev)5109 static netdev_tx_t at91ether_start_xmit(struct sk_buff *skb,
5110 struct net_device *dev)
5111 {
5112 struct macb *lp = netdev_priv(dev);
5113
5114 if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
5115 int desc = 0;
5116
5117 netif_stop_queue(dev);
5118
5119 /* Store packet information (to free when Tx completed) */
5120 lp->rm9200_txq[desc].skb = skb;
5121 lp->rm9200_txq[desc].size = skb->len;
5122 lp->rm9200_txq[desc].mapping = dma_map_single(&lp->pdev->dev, skb->data,
5123 skb->len, DMA_TO_DEVICE);
5124 if (dma_mapping_error(&lp->pdev->dev, lp->rm9200_txq[desc].mapping)) {
5125 dev_kfree_skb_any(skb);
5126 dev->stats.tx_dropped++;
5127 netdev_err(dev, "%s: DMA mapping error\n", __func__);
5128 return NETDEV_TX_OK;
5129 }
5130
5131 /* Set address of the data in the Transmit Address register */
5132 macb_writel(lp, TAR, lp->rm9200_txq[desc].mapping);
5133 /* Set length of the packet in the Transmit Control register */
5134 macb_writel(lp, TCR, skb->len);
5135
5136 } else {
5137 netdev_err(dev, "%s called, but device is busy!\n", __func__);
5138 return NETDEV_TX_BUSY;
5139 }
5140
5141 return NETDEV_TX_OK;
5142 }
5143
5144 /* Extract received frame from buffer descriptors and sent to upper layers.
5145 * (Called from interrupt context)
5146 */
at91ether_rx(struct net_device * dev)5147 static void at91ether_rx(struct net_device *dev)
5148 {
5149 struct macb *lp = netdev_priv(dev);
5150 struct macb_queue *q = &lp->queues[0];
5151 struct macb_dma_desc *desc;
5152 unsigned char *p_recv;
5153 struct sk_buff *skb;
5154 unsigned int pktlen;
5155
5156 desc = macb_rx_desc(q, q->rx_tail);
5157 while (desc->addr & MACB_BIT(RX_USED)) {
5158 p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
5159 pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
5160 skb = netdev_alloc_skb(dev, pktlen + 2);
5161 if (skb) {
5162 skb_reserve(skb, 2);
5163 skb_put_data(skb, p_recv, pktlen);
5164
5165 skb->protocol = eth_type_trans(skb, dev);
5166 dev->stats.rx_packets++;
5167 dev->stats.rx_bytes += pktlen;
5168 netif_rx(skb);
5169 } else {
5170 dev->stats.rx_dropped++;
5171 }
5172
5173 if (desc->ctrl & MACB_BIT(RX_MHASH_MATCH))
5174 dev->stats.multicast++;
5175
5176 /* reset ownership bit */
5177 desc->addr &= ~MACB_BIT(RX_USED);
5178
5179 /* wrap after last buffer */
5180 if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
5181 q->rx_tail = 0;
5182 else
5183 q->rx_tail++;
5184
5185 desc = macb_rx_desc(q, q->rx_tail);
5186 }
5187 }
5188
5189 /* MAC interrupt handler */
at91ether_interrupt(int irq,void * dev_id)5190 static irqreturn_t at91ether_interrupt(int irq, void *dev_id)
5191 {
5192 struct net_device *dev = dev_id;
5193 struct macb *lp = netdev_priv(dev);
5194 u32 intstatus, ctl;
5195 unsigned int desc;
5196
5197 /* MAC Interrupt Status register indicates what interrupts are pending.
5198 * It is automatically cleared once read.
5199 */
5200 intstatus = macb_readl(lp, ISR);
5201
5202 /* Receive complete */
5203 if (intstatus & MACB_BIT(RCOMP))
5204 at91ether_rx(dev);
5205
5206 /* Transmit complete */
5207 if (intstatus & MACB_BIT(TCOMP)) {
5208 /* The TCOM bit is set even if the transmission failed */
5209 if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
5210 dev->stats.tx_errors++;
5211
5212 desc = 0;
5213 if (lp->rm9200_txq[desc].skb) {
5214 dev_consume_skb_irq(lp->rm9200_txq[desc].skb);
5215 lp->rm9200_txq[desc].skb = NULL;
5216 dma_unmap_single(&lp->pdev->dev, lp->rm9200_txq[desc].mapping,
5217 lp->rm9200_txq[desc].size, DMA_TO_DEVICE);
5218 dev->stats.tx_packets++;
5219 dev->stats.tx_bytes += lp->rm9200_txq[desc].size;
5220 }
5221 netif_wake_queue(dev);
5222 }
5223
5224 /* Work-around for EMAC Errata section 41.3.1 */
5225 if (intstatus & MACB_BIT(RXUBR)) {
5226 ctl = macb_readl(lp, NCR);
5227 macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
5228 wmb();
5229 macb_writel(lp, NCR, ctl | MACB_BIT(RE));
5230 }
5231
5232 if (intstatus & MACB_BIT(ISR_ROVR))
5233 netdev_err(dev, "ROVR error\n");
5234
5235 return IRQ_HANDLED;
5236 }
5237
5238 #ifdef CONFIG_NET_POLL_CONTROLLER
at91ether_poll_controller(struct net_device * dev)5239 static void at91ether_poll_controller(struct net_device *dev)
5240 {
5241 unsigned long flags;
5242
5243 local_irq_save(flags);
5244 at91ether_interrupt(dev->irq, dev);
5245 local_irq_restore(flags);
5246 }
5247 #endif
5248
5249 static const struct net_device_ops at91ether_netdev_ops = {
5250 .ndo_open = at91ether_open,
5251 .ndo_stop = at91ether_close,
5252 .ndo_start_xmit = at91ether_start_xmit,
5253 .ndo_get_stats64 = macb_get_stats,
5254 .ndo_set_rx_mode = macb_set_rx_mode,
5255 .ndo_set_mac_address = eth_mac_addr,
5256 .ndo_eth_ioctl = macb_ioctl,
5257 .ndo_validate_addr = eth_validate_addr,
5258 #ifdef CONFIG_NET_POLL_CONTROLLER
5259 .ndo_poll_controller = at91ether_poll_controller,
5260 #endif
5261 .ndo_hwtstamp_set = macb_hwtstamp_set,
5262 .ndo_hwtstamp_get = macb_hwtstamp_get,
5263 };
5264
at91ether_clk_init(struct platform_device * pdev,struct clk ** pclk,struct clk ** hclk,struct clk ** tx_clk,struct clk ** rx_clk,struct clk ** tsu_clk)5265 static int at91ether_clk_init(struct platform_device *pdev, struct clk **pclk,
5266 struct clk **hclk, struct clk **tx_clk,
5267 struct clk **rx_clk, struct clk **tsu_clk)
5268 {
5269 int err;
5270
5271 *hclk = NULL;
5272 *tx_clk = NULL;
5273 *rx_clk = NULL;
5274 *tsu_clk = NULL;
5275
5276 *pclk = devm_clk_get(&pdev->dev, "ether_clk");
5277 if (IS_ERR(*pclk))
5278 return PTR_ERR(*pclk);
5279
5280 err = clk_prepare_enable(*pclk);
5281 if (err) {
5282 dev_err(&pdev->dev, "failed to enable pclk (%d)\n", err);
5283 return err;
5284 }
5285
5286 return 0;
5287 }
5288
at91ether_init(struct platform_device * pdev)5289 static int at91ether_init(struct platform_device *pdev)
5290 {
5291 struct net_device *dev = platform_get_drvdata(pdev);
5292 struct macb *bp = netdev_priv(dev);
5293 int err;
5294
5295 bp->queues[0].bp = bp;
5296
5297 dev->netdev_ops = &at91ether_netdev_ops;
5298 dev->ethtool_ops = &macb_ethtool_ops;
5299
5300 err = devm_request_irq(&pdev->dev, dev->irq, at91ether_interrupt,
5301 0, dev->name, dev);
5302 if (err)
5303 return err;
5304
5305 macb_writel(bp, NCR, 0);
5306
5307 macb_writel(bp, NCFGR, MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG));
5308
5309 return 0;
5310 }
5311
fu540_macb_tx_recalc_rate(struct clk_hw * hw,unsigned long parent_rate)5312 static unsigned long fu540_macb_tx_recalc_rate(struct clk_hw *hw,
5313 unsigned long parent_rate)
5314 {
5315 return mgmt->rate;
5316 }
5317
fu540_macb_tx_determine_rate(struct clk_hw * hw,struct clk_rate_request * req)5318 static int fu540_macb_tx_determine_rate(struct clk_hw *hw,
5319 struct clk_rate_request *req)
5320 {
5321 if (WARN_ON(req->rate < 2500000))
5322 req->rate = 2500000;
5323 else if (req->rate == 2500000)
5324 req->rate = 2500000;
5325 else if (WARN_ON(req->rate < 13750000))
5326 req->rate = 2500000;
5327 else if (WARN_ON(req->rate < 25000000))
5328 req->rate = 25000000;
5329 else if (req->rate == 25000000)
5330 req->rate = 25000000;
5331 else if (WARN_ON(req->rate < 75000000))
5332 req->rate = 25000000;
5333 else if (WARN_ON(req->rate < 125000000))
5334 req->rate = 125000000;
5335 else if (req->rate == 125000000)
5336 req->rate = 125000000;
5337 else if (WARN_ON(req->rate > 125000000))
5338 req->rate = 125000000;
5339 else
5340 req->rate = 125000000;
5341
5342 return 0;
5343 }
5344
fu540_macb_tx_set_rate(struct clk_hw * hw,unsigned long rate,unsigned long parent_rate)5345 static int fu540_macb_tx_set_rate(struct clk_hw *hw, unsigned long rate,
5346 unsigned long parent_rate)
5347 {
5348 struct clk_rate_request req;
5349 int ret;
5350
5351 clk_hw_init_rate_request(hw, &req, rate);
5352 ret = fu540_macb_tx_determine_rate(hw, &req);
5353 if (ret != 0)
5354 return ret;
5355
5356 if (req.rate != 125000000)
5357 iowrite32(1, mgmt->reg);
5358 else
5359 iowrite32(0, mgmt->reg);
5360 mgmt->rate = rate;
5361
5362 return 0;
5363 }
5364
5365 static const struct clk_ops fu540_c000_ops = {
5366 .recalc_rate = fu540_macb_tx_recalc_rate,
5367 .determine_rate = fu540_macb_tx_determine_rate,
5368 .set_rate = fu540_macb_tx_set_rate,
5369 };
5370
fu540_c000_clk_init(struct platform_device * pdev,struct clk ** pclk,struct clk ** hclk,struct clk ** tx_clk,struct clk ** rx_clk,struct clk ** tsu_clk)5371 static int fu540_c000_clk_init(struct platform_device *pdev, struct clk **pclk,
5372 struct clk **hclk, struct clk **tx_clk,
5373 struct clk **rx_clk, struct clk **tsu_clk)
5374 {
5375 struct clk_init_data init;
5376 int err = 0;
5377
5378 err = macb_clk_init_dflt(pdev, pclk, hclk, tx_clk, rx_clk, tsu_clk);
5379 if (err)
5380 return err;
5381
5382 mgmt = devm_kzalloc(&pdev->dev, sizeof(*mgmt), GFP_KERNEL);
5383 if (!mgmt) {
5384 err = -ENOMEM;
5385 goto err_disable_clks;
5386 }
5387
5388 init.name = "sifive-gemgxl-mgmt";
5389 init.ops = &fu540_c000_ops;
5390 init.flags = 0;
5391 init.num_parents = 0;
5392
5393 mgmt->rate = 0;
5394 mgmt->hw.init = &init;
5395
5396 *tx_clk = devm_clk_register(&pdev->dev, &mgmt->hw);
5397 if (IS_ERR(*tx_clk)) {
5398 err = PTR_ERR(*tx_clk);
5399 goto err_disable_clks;
5400 }
5401
5402 err = clk_prepare_enable(*tx_clk);
5403 if (err) {
5404 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
5405 *tx_clk = NULL;
5406 goto err_disable_clks;
5407 } else {
5408 dev_info(&pdev->dev, "Registered clk switch '%s'\n", init.name);
5409 }
5410
5411 return 0;
5412
5413 err_disable_clks:
5414 macb_clks_disable(*pclk, *hclk, *tx_clk, *rx_clk, *tsu_clk);
5415
5416 return err;
5417 }
5418
fu540_c000_init(struct platform_device * pdev)5419 static int fu540_c000_init(struct platform_device *pdev)
5420 {
5421 mgmt->reg = devm_platform_ioremap_resource(pdev, 1);
5422 if (IS_ERR(mgmt->reg))
5423 return PTR_ERR(mgmt->reg);
5424
5425 return macb_init_dflt(pdev);
5426 }
5427
init_reset_optional(struct platform_device * pdev)5428 static int init_reset_optional(struct platform_device *pdev)
5429 {
5430 struct net_device *dev = platform_get_drvdata(pdev);
5431 struct macb *bp = netdev_priv(dev);
5432 int ret;
5433
5434 if (bp->phy_interface == PHY_INTERFACE_MODE_SGMII) {
5435 /* Ensure PHY device used in SGMII mode is ready */
5436 bp->phy = devm_phy_optional_get(&pdev->dev, NULL);
5437
5438 if (IS_ERR(bp->phy))
5439 return dev_err_probe(&pdev->dev, PTR_ERR(bp->phy),
5440 "failed to get SGMII PHY\n");
5441
5442 ret = phy_init(bp->phy);
5443 if (ret)
5444 return dev_err_probe(&pdev->dev, ret,
5445 "failed to init SGMII PHY\n");
5446
5447 ret = zynqmp_pm_is_function_supported(PM_IOCTL, IOCTL_SET_GEM_CONFIG);
5448 if (!ret) {
5449 u32 pm_info[2];
5450
5451 ret = of_property_read_u32_array(pdev->dev.of_node, "power-domains",
5452 pm_info, ARRAY_SIZE(pm_info));
5453 if (ret) {
5454 dev_err(&pdev->dev, "Failed to read power management information\n");
5455 goto err_out_phy_exit;
5456 }
5457 ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_FIXED, 0);
5458 if (ret)
5459 goto err_out_phy_exit;
5460
5461 ret = zynqmp_pm_set_gem_config(pm_info[1], GEM_CONFIG_SGMII_MODE, 1);
5462 if (ret)
5463 goto err_out_phy_exit;
5464 }
5465
5466 }
5467
5468 /* Fully reset controller at hardware level if mapped in device tree */
5469 ret = device_reset_optional(&pdev->dev);
5470 if (ret) {
5471 phy_exit(bp->phy);
5472 return dev_err_probe(&pdev->dev, ret, "failed to reset controller");
5473 }
5474
5475 ret = macb_init_dflt(pdev);
5476
5477 err_out_phy_exit:
5478 if (ret)
5479 phy_exit(bp->phy);
5480
5481 return ret;
5482 }
5483
eyeq5_init(struct platform_device * pdev)5484 static int eyeq5_init(struct platform_device *pdev)
5485 {
5486 struct net_device *netdev = platform_get_drvdata(pdev);
5487 struct macb *bp = netdev_priv(netdev);
5488 struct device *dev = &pdev->dev;
5489 int ret;
5490
5491 bp->phy = devm_phy_get(dev, NULL);
5492 if (IS_ERR(bp->phy))
5493 return dev_err_probe(dev, PTR_ERR(bp->phy),
5494 "failed to get PHY\n");
5495
5496 ret = phy_init(bp->phy);
5497 if (ret)
5498 return dev_err_probe(dev, ret, "failed to init PHY\n");
5499
5500 ret = macb_init_dflt(pdev);
5501 if (ret)
5502 phy_exit(bp->phy);
5503 return ret;
5504 }
5505
5506 static const struct macb_usrio_config mpfs_usrio = {
5507 .tsu_source = 0,
5508 };
5509
5510 static const struct macb_usrio_config sama7g5_gem_usrio = {
5511 .mii = 0,
5512 .rmii = 1,
5513 .rgmii = 2,
5514 .refclk = BIT(2),
5515 .refclk_default_external = false,
5516 .hdfctlen = BIT(6),
5517 };
5518
5519 static const struct macb_usrio_config sama7g5_emac_usrio = {
5520 .mii = 0,
5521 .rmii = 1,
5522 .rgmii = 2,
5523 .refclk = BIT(2),
5524 .refclk_default_external = true,
5525 .hdfctlen = BIT(6),
5526 };
5527
5528 static const struct macb_config fu540_c000_config = {
5529 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
5530 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_USRIO_HAS_MII,
5531 .dma_burst_length = 16,
5532 .clk_init = fu540_c000_clk_init,
5533 .init = fu540_c000_init,
5534 .jumbo_max_len = 10240,
5535 .usrio = &at91_default_usrio,
5536 };
5537
5538 static const struct macb_config at91sam9260_config = {
5539 .caps = MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
5540 MACB_CAPS_USRIO_HAS_MII,
5541 .usrio = &at91_default_usrio,
5542 };
5543
5544 static const struct macb_config sama5d3macb_config = {
5545 .caps = MACB_CAPS_SG_DISABLED |
5546 MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
5547 MACB_CAPS_USRIO_HAS_MII,
5548 .usrio = &at91_default_usrio,
5549 };
5550
5551 static const struct macb_config pc302gem_config = {
5552 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5553 MACB_CAPS_USRIO_HAS_MII,
5554 .dma_burst_length = 16,
5555 .usrio = &at91_default_usrio,
5556 };
5557
5558 static const struct macb_config sama5d2_config = {
5559 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO |
5560 MACB_CAPS_USRIO_HAS_MII,
5561 .dma_burst_length = 16,
5562 .jumbo_max_len = 10240,
5563 .usrio = &at91_default_usrio,
5564 };
5565
5566 static const struct macb_config sama5d29_config = {
5567 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_GEM_HAS_PTP |
5568 MACB_CAPS_USRIO_HAS_MII,
5569 .dma_burst_length = 16,
5570 .usrio = &at91_default_usrio,
5571 };
5572
5573 static const struct macb_config sama5d3_config = {
5574 .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5575 MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII | MACB_CAPS_JUMBO |
5576 MACB_CAPS_USRIO_HAS_MII,
5577 .dma_burst_length = 16,
5578 .jumbo_max_len = 10240,
5579 .usrio = &at91_default_usrio,
5580 };
5581
5582 static const struct macb_config sama5d4_config = {
5583 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
5584 MACB_CAPS_USRIO_HAS_MII,
5585 .dma_burst_length = 4,
5586 .usrio = &at91_default_usrio,
5587 };
5588
5589 static const struct macb_config emac_config = {
5590 .caps = MACB_CAPS_NEEDS_RSTONUBR | MACB_CAPS_MACB_IS_EMAC |
5591 MACB_CAPS_USRIO_HAS_MII,
5592 .clk_init = at91ether_clk_init,
5593 .init = at91ether_init,
5594 .usrio = &at91_default_usrio,
5595 };
5596
5597 static const struct macb_config np4_config = {
5598 .caps = MACB_CAPS_USRIO_DISABLED,
5599 };
5600
5601 static const struct macb_config zynqmp_config = {
5602 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5603 MACB_CAPS_JUMBO |
5604 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
5605 MACB_CAPS_USRIO_HAS_MII,
5606 .dma_burst_length = 16,
5607 .init = init_reset_optional,
5608 .jumbo_max_len = 10240,
5609 .usrio = &at91_default_usrio,
5610 };
5611
5612 static const struct macb_config zynq_config = {
5613 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
5614 MACB_CAPS_NEEDS_RSTONUBR |
5615 MACB_CAPS_USRIO_HAS_MII,
5616 .dma_burst_length = 16,
5617 .usrio = &at91_default_usrio,
5618 };
5619
5620 static const struct macb_config mpfs_config = {
5621 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5622 MACB_CAPS_JUMBO |
5623 MACB_CAPS_GEM_HAS_PTP |
5624 MACB_CAPS_USRIO_HAS_TSUCLK_SOURCE,
5625 .dma_burst_length = 16,
5626 .init = init_reset_optional,
5627 .usrio = &mpfs_usrio,
5628 .max_tx_length = 4040, /* Cadence Erratum 1686 */
5629 .jumbo_max_len = 4040,
5630 };
5631
5632 static const struct macb_config sama7g5_gem_config = {
5633 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
5634 MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
5635 MACB_CAPS_USRIO_HAS_REFCLK_SOURCE |
5636 MACB_CAPS_MIIONRGMII | MACB_CAPS_GEM_HAS_PTP |
5637 MACB_CAPS_USRIO_HAS_MII,
5638 .dma_burst_length = 16,
5639 .usrio = &sama7g5_gem_usrio,
5640 };
5641
5642 static const struct macb_config sama7g5_emac_config = {
5643 .caps = MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII |
5644 MACB_CAPS_MIIONRGMII |
5645 MACB_CAPS_USRIO_HAS_REFCLK_SOURCE |
5646 MACB_CAPS_GEM_HAS_PTP |
5647 MACB_CAPS_USRIO_HAS_MII,
5648 .dma_burst_length = 16,
5649 .usrio = &sama7g5_emac_usrio,
5650 };
5651
5652 static const struct macb_config versal_config = {
5653 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
5654 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_BD_RD_PREFETCH |
5655 MACB_CAPS_NEED_TSUCLK | MACB_CAPS_QUEUE_DISABLE |
5656 MACB_CAPS_QBV |
5657 MACB_CAPS_USRIO_HAS_MII,
5658 .dma_burst_length = 16,
5659 .init = init_reset_optional,
5660 .jumbo_max_len = 10240,
5661 .usrio = &at91_default_usrio,
5662 };
5663
5664 static const struct macb_config eyeq5_config = {
5665 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
5666 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_QUEUE_DISABLE |
5667 MACB_CAPS_NO_LSO | MACB_CAPS_EEE,
5668 .dma_burst_length = 16,
5669 .init = eyeq5_init,
5670 .jumbo_max_len = 10240,
5671 };
5672
5673 static const struct macb_config raspberrypi_rp1_config = {
5674 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_CLK_HW_CHG |
5675 MACB_CAPS_JUMBO |
5676 MACB_CAPS_GEM_HAS_PTP |
5677 MACB_CAPS_EEE |
5678 MACB_CAPS_USRIO_HAS_MII,
5679 .dma_burst_length = 16,
5680 .usrio = &at91_default_usrio,
5681 .jumbo_max_len = 10240,
5682 };
5683
5684 static const struct macb_config pic64hpsc_config = {
5685 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_JUMBO |
5686 MACB_CAPS_GEM_HAS_PTP | MACB_CAPS_USRIO_DISABLED,
5687 .dma_burst_length = 16,
5688 .init = init_reset_optional,
5689 .jumbo_max_len = 16383,
5690 };
5691
5692 static const struct of_device_id macb_dt_ids[] = {
5693 { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
5694 { .compatible = "cdns,macb" },
5695 { .compatible = "cdns,np4-macb", .data = &np4_config },
5696 { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
5697 { .compatible = "cdns,gem", .data = &pc302gem_config },
5698 { .compatible = "cdns,sam9x60-macb", .data = &at91sam9260_config },
5699 { .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
5700 { .compatible = "atmel,sama5d29-gem", .data = &sama5d29_config },
5701 { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
5702 { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
5703 { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
5704 { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
5705 { .compatible = "cdns,emac", .data = &emac_config },
5706 { .compatible = "cdns,zynqmp-gem", .data = &zynqmp_config}, /* deprecated */
5707 { .compatible = "cdns,zynq-gem", .data = &zynq_config }, /* deprecated */
5708 { .compatible = "sifive,fu540-c000-gem", .data = &fu540_c000_config },
5709 { .compatible = "microchip,mpfs-macb", .data = &mpfs_config },
5710 { .compatible = "microchip,pic64hpsc-gem", .data = &pic64hpsc_config},
5711 { .compatible = "microchip,sama7g5-gem", .data = &sama7g5_gem_config },
5712 { .compatible = "microchip,sama7g5-emac", .data = &sama7g5_emac_config },
5713 { .compatible = "mobileye,eyeq5-gem", .data = &eyeq5_config },
5714 { .compatible = "raspberrypi,rp1-gem", .data = &raspberrypi_rp1_config },
5715 { .compatible = "xlnx,zynqmp-gem", .data = &zynqmp_config},
5716 { .compatible = "xlnx,zynq-gem", .data = &zynq_config },
5717 { .compatible = "xlnx,versal-gem", .data = &versal_config},
5718 { /* sentinel */ }
5719 };
5720 MODULE_DEVICE_TABLE(of, macb_dt_ids);
5721 #endif /* CONFIG_OF */
5722
5723 static const struct macb_config default_gem_config = {
5724 .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE |
5725 MACB_CAPS_JUMBO |
5726 MACB_CAPS_GEM_HAS_PTP,
5727 .dma_burst_length = 16,
5728 .usrio = &at91_default_usrio,
5729 .jumbo_max_len = 10240,
5730 };
5731
macb_probe(struct platform_device * pdev)5732 static int macb_probe(struct platform_device *pdev)
5733 {
5734 struct clk *pclk, *hclk = NULL, *tx_clk = NULL, *rx_clk = NULL;
5735 struct device_node *np = pdev->dev.of_node;
5736 const struct macb_config *macb_config;
5737 struct clk *tsu_clk = NULL;
5738 phy_interface_t interface;
5739 struct net_device *dev;
5740 struct resource *regs;
5741 u32 wtrmrk_rst_val;
5742 void __iomem *mem;
5743 struct macb *bp;
5744 int num_queues;
5745 bool native_io;
5746 int err, val;
5747
5748 mem = devm_platform_get_and_ioremap_resource(pdev, 0, ®s);
5749 if (IS_ERR(mem))
5750 return PTR_ERR(mem);
5751
5752 macb_config = of_device_get_match_data(&pdev->dev);
5753 if (!macb_config)
5754 macb_config = &default_gem_config;
5755
5756 err = macb_clk_init(pdev, &pclk, &hclk, &tx_clk, &rx_clk, &tsu_clk,
5757 macb_config);
5758 if (err)
5759 return err;
5760
5761 pm_runtime_set_autosuspend_delay(&pdev->dev, MACB_PM_TIMEOUT);
5762 pm_runtime_use_autosuspend(&pdev->dev);
5763 pm_runtime_get_noresume(&pdev->dev);
5764 pm_runtime_set_active(&pdev->dev);
5765 pm_runtime_enable(&pdev->dev);
5766 native_io = hw_is_native_io(mem);
5767
5768 num_queues = macb_probe_queues(&pdev->dev, mem, native_io);
5769 if (num_queues < 0) {
5770 err = num_queues;
5771 goto err_disable_clocks;
5772 }
5773
5774 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
5775 if (!dev) {
5776 err = -ENOMEM;
5777 goto err_disable_clocks;
5778 }
5779
5780 dev->base_addr = regs->start;
5781
5782 SET_NETDEV_DEV(dev, &pdev->dev);
5783
5784 bp = netdev_priv(dev);
5785 bp->pdev = pdev;
5786 bp->dev = dev;
5787 bp->regs = mem;
5788 bp->native_io = native_io;
5789 if (native_io) {
5790 bp->macb_reg_readl = hw_readl_native;
5791 bp->macb_reg_writel = hw_writel_native;
5792 } else {
5793 bp->macb_reg_readl = hw_readl;
5794 bp->macb_reg_writel = hw_writel;
5795 }
5796 bp->num_queues = num_queues;
5797 bp->dma_burst_length = macb_config->dma_burst_length;
5798 bp->pclk = pclk;
5799 bp->hclk = hclk;
5800 bp->tx_clk = tx_clk;
5801 bp->rx_clk = rx_clk;
5802 bp->tsu_clk = tsu_clk;
5803 bp->jumbo_max_len = macb_config->jumbo_max_len;
5804
5805 if (!hw_is_gem(bp->regs, bp->native_io))
5806 bp->max_tx_length = MACB_MAX_TX_LEN;
5807 else if (macb_config->max_tx_length)
5808 bp->max_tx_length = macb_config->max_tx_length;
5809 else
5810 bp->max_tx_length = GEM_MAX_TX_LEN;
5811
5812 bp->wol = 0;
5813 device_set_wakeup_capable(&pdev->dev, 1);
5814
5815 bp->usrio = macb_config->usrio;
5816
5817 if (of_property_read_bool(bp->pdev->dev.of_node, "cdns,timer-adjust") &&
5818 IS_ENABLED(CONFIG_MACB_USE_HWSTAMP)) {
5819 dev_err(&pdev->dev, "Timer adjust mode is not supported\n");
5820 err = -EINVAL;
5821 goto err_out_free_netdev;
5822 }
5823
5824 /* By default we set to partial store and forward mode for zynqmp.
5825 * Disable if not set in devicetree.
5826 */
5827 if (GEM_BFEXT(PBUF_CUTTHRU, gem_readl(bp, DCFG6))) {
5828 err = of_property_read_u32(bp->pdev->dev.of_node,
5829 "cdns,rx-watermark",
5830 &bp->rx_watermark);
5831
5832 if (!err) {
5833 /* Disable partial store and forward in case of error or
5834 * invalid watermark value
5835 */
5836 wtrmrk_rst_val = (1 << (GEM_BFEXT(RX_PBUF_ADDR, gem_readl(bp, DCFG2)))) - 1;
5837 if (bp->rx_watermark > wtrmrk_rst_val || !bp->rx_watermark) {
5838 dev_info(&bp->pdev->dev, "Invalid watermark value\n");
5839 bp->rx_watermark = 0;
5840 }
5841 }
5842 }
5843 spin_lock_init(&bp->lock);
5844 spin_lock_init(&bp->stats_lock);
5845
5846 /* setup capabilities */
5847 macb_configure_caps(bp, macb_config);
5848
5849 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
5850 if (GEM_BFEXT(DAW64, gem_readl(bp, DCFG6))) {
5851 err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(44));
5852 if (err) {
5853 dev_err(&pdev->dev, "failed to set DMA mask\n");
5854 goto err_out_free_netdev;
5855 }
5856 bp->caps |= MACB_CAPS_DMA_64B;
5857 }
5858 #endif
5859 platform_set_drvdata(pdev, dev);
5860
5861 dev->irq = platform_get_irq(pdev, 0);
5862 if (dev->irq < 0) {
5863 err = dev->irq;
5864 goto err_out_free_netdev;
5865 }
5866
5867 /* MTU range: 68 - 1518 or 10240 */
5868 dev->min_mtu = GEM_MTU_MIN_SIZE;
5869 if ((bp->caps & MACB_CAPS_JUMBO) && bp->jumbo_max_len)
5870 dev->max_mtu = MIN(bp->jumbo_max_len, RX_BUFFER_MAX) -
5871 ETH_HLEN - ETH_FCS_LEN;
5872 else
5873 dev->max_mtu = 1536 - ETH_HLEN - ETH_FCS_LEN;
5874
5875 if (bp->caps & MACB_CAPS_BD_RD_PREFETCH) {
5876 val = GEM_BFEXT(RXBD_RDBUFF, gem_readl(bp, DCFG10));
5877 if (val)
5878 bp->rx_bd_rd_prefetch = (2 << (val - 1)) *
5879 macb_dma_desc_get_size(bp);
5880
5881 val = GEM_BFEXT(TXBD_RDBUFF, gem_readl(bp, DCFG10));
5882 if (val)
5883 bp->tx_bd_rd_prefetch = (2 << (val - 1)) *
5884 macb_dma_desc_get_size(bp);
5885 }
5886
5887 bp->rx_intr_mask = MACB_RX_INT_FLAGS;
5888 if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
5889 bp->rx_intr_mask |= MACB_BIT(RXUBR);
5890
5891 err = of_get_ethdev_address(np, bp->dev);
5892 if (err == -EPROBE_DEFER)
5893 goto err_out_free_netdev;
5894 else if (err)
5895 macb_get_hwaddr(bp);
5896
5897 err = of_get_phy_mode(np, &interface);
5898 if (err)
5899 /* not found in DT, MII by default */
5900 bp->phy_interface = PHY_INTERFACE_MODE_MII;
5901 else
5902 bp->phy_interface = interface;
5903
5904 /* IP specific init */
5905 err = macb_init(pdev, macb_config);
5906 if (err)
5907 goto err_out_free_netdev;
5908
5909 err = macb_mii_init(bp);
5910 if (err)
5911 goto err_out_phy_exit;
5912
5913 netif_carrier_off(dev);
5914
5915 err = register_netdev(dev);
5916 if (err) {
5917 dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
5918 goto err_out_unregister_mdio;
5919 }
5920
5921 INIT_WORK(&bp->hresp_err_bh_work, macb_hresp_error_task);
5922 INIT_DELAYED_WORK(&bp->tx_lpi_work, macb_tx_lpi_work_fn);
5923
5924 netdev_info(dev, "Cadence %s rev 0x%08x at 0x%08lx irq %d (%pM)\n",
5925 macb_is_gem(bp) ? "GEM" : "MACB", macb_readl(bp, MID),
5926 dev->base_addr, dev->irq, dev->dev_addr);
5927
5928 pm_runtime_put_autosuspend(&bp->pdev->dev);
5929
5930 return 0;
5931
5932 err_out_unregister_mdio:
5933 mdiobus_unregister(bp->mii_bus);
5934 mdiobus_free(bp->mii_bus);
5935
5936 err_out_phy_exit:
5937 phy_exit(bp->phy);
5938
5939 err_out_free_netdev:
5940 free_netdev(dev);
5941
5942 err_disable_clocks:
5943 macb_clks_disable(pclk, hclk, tx_clk, rx_clk, tsu_clk);
5944 pm_runtime_disable(&pdev->dev);
5945 pm_runtime_set_suspended(&pdev->dev);
5946 pm_runtime_dont_use_autosuspend(&pdev->dev);
5947
5948 return err;
5949 }
5950
macb_remove(struct platform_device * pdev)5951 static void macb_remove(struct platform_device *pdev)
5952 {
5953 struct net_device *dev;
5954 struct macb *bp;
5955
5956 dev = platform_get_drvdata(pdev);
5957
5958 if (dev) {
5959 bp = netdev_priv(dev);
5960 unregister_netdev(dev);
5961 phy_exit(bp->phy);
5962 mdiobus_unregister(bp->mii_bus);
5963 mdiobus_free(bp->mii_bus);
5964
5965 device_set_wakeup_enable(&bp->pdev->dev, 0);
5966 cancel_delayed_work_sync(&bp->tx_lpi_work);
5967 cancel_work_sync(&bp->hresp_err_bh_work);
5968 pm_runtime_disable(&pdev->dev);
5969 pm_runtime_dont_use_autosuspend(&pdev->dev);
5970 pm_runtime_set_suspended(&pdev->dev);
5971 phylink_destroy(bp->phylink);
5972 free_netdev(dev);
5973 }
5974 }
5975
macb_suspend(struct device * dev)5976 static int __maybe_unused macb_suspend(struct device *dev)
5977 {
5978 struct net_device *netdev = dev_get_drvdata(dev);
5979 struct macb *bp = netdev_priv(netdev);
5980 struct in_ifaddr *ifa = NULL;
5981 struct macb_queue *queue;
5982 struct in_device *idev;
5983 unsigned long flags;
5984 u32 tmp, ifa_local;
5985 unsigned int q;
5986
5987 if (!device_may_wakeup(&bp->dev->dev))
5988 phy_exit(bp->phy);
5989
5990 if (!netif_running(netdev))
5991 return 0;
5992
5993 if (bp->wol & MACB_WOL_ENABLED) {
5994 if (bp->wolopts & WAKE_ARP) {
5995 /* Check for IP address in WOL ARP mode */
5996 rcu_read_lock();
5997 idev = __in_dev_get_rcu(bp->dev);
5998 if (idev)
5999 ifa = rcu_dereference(idev->ifa_list);
6000 if (!ifa) {
6001 rcu_read_unlock();
6002 netdev_err(netdev, "IP address not assigned as required by WoL walk ARP\n");
6003 return -EOPNOTSUPP;
6004 }
6005 ifa_local = be32_to_cpu(ifa->ifa_local);
6006 rcu_read_unlock();
6007 }
6008
6009 spin_lock_irqsave(&bp->lock, flags);
6010
6011 /* Disable Tx and Rx engines before disabling the queues,
6012 * this is mandatory as per the IP spec sheet
6013 */
6014 tmp = macb_readl(bp, NCR);
6015 macb_writel(bp, NCR, tmp & ~(MACB_BIT(TE) | MACB_BIT(RE)));
6016 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
6017 if (!(bp->caps & MACB_CAPS_QUEUE_DISABLE))
6018 macb_writel(bp, RBQPH,
6019 upper_32_bits(bp->rx_ring_tieoff_dma));
6020 #endif
6021 for (q = 0, queue = bp->queues; q < bp->num_queues;
6022 ++q, ++queue) {
6023 /* Disable RX queues */
6024 if (bp->caps & MACB_CAPS_QUEUE_DISABLE) {
6025 queue_writel(queue, RBQP, MACB_BIT(QUEUE_DISABLE));
6026 } else {
6027 /* Tie off RX queues */
6028 queue_writel(queue, RBQP,
6029 lower_32_bits(bp->rx_ring_tieoff_dma));
6030 }
6031 /* Disable all interrupts */
6032 queue_writel(queue, IDR, -1);
6033 queue_readl(queue, ISR);
6034 macb_queue_isr_clear(bp, queue, -1);
6035 }
6036 /* Enable Receive engine */
6037 macb_writel(bp, NCR, tmp | MACB_BIT(RE));
6038 /* Flush all status bits */
6039 macb_writel(bp, TSR, -1);
6040 macb_writel(bp, RSR, -1);
6041
6042 tmp = (bp->wolopts & WAKE_MAGIC) ? MACB_BIT(MAG) : 0;
6043 if (bp->wolopts & WAKE_ARP) {
6044 tmp |= MACB_BIT(ARP);
6045 /* write IP address into register */
6046 tmp |= MACB_BFEXT(IP, ifa_local);
6047 }
6048
6049 if (macb_is_gem(bp)) {
6050 queue_writel(bp->queues, IER, GEM_BIT(WOL));
6051 gem_writel(bp, WOL, tmp);
6052 } else {
6053 queue_writel(bp->queues, IER, MACB_BIT(WOL));
6054 macb_writel(bp, WOL, tmp);
6055 }
6056 spin_unlock_irqrestore(&bp->lock, flags);
6057
6058 enable_irq_wake(bp->queues[0].irq);
6059 }
6060
6061 netif_device_detach(netdev);
6062 for (q = 0, queue = bp->queues; q < bp->num_queues;
6063 ++q, ++queue) {
6064 napi_disable(&queue->napi_rx);
6065 napi_disable(&queue->napi_tx);
6066 }
6067
6068 if (!(bp->wol & MACB_WOL_ENABLED)) {
6069 rtnl_lock();
6070 phylink_stop(bp->phylink);
6071 rtnl_unlock();
6072 spin_lock_irqsave(&bp->lock, flags);
6073 macb_reset_hw(bp);
6074 spin_unlock_irqrestore(&bp->lock, flags);
6075 }
6076
6077 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
6078 bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
6079
6080 if (netdev->hw_features & NETIF_F_NTUPLE)
6081 bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
6082
6083 if (bp->ptp_info)
6084 bp->ptp_info->ptp_remove(netdev);
6085 if (!device_may_wakeup(dev))
6086 pm_runtime_force_suspend(dev);
6087
6088 return 0;
6089 }
6090
macb_resume(struct device * dev)6091 static int __maybe_unused macb_resume(struct device *dev)
6092 {
6093 struct net_device *netdev = dev_get_drvdata(dev);
6094 struct macb *bp = netdev_priv(netdev);
6095 struct macb_queue *queue;
6096 unsigned long flags;
6097 unsigned int q;
6098
6099 if (!device_may_wakeup(&bp->dev->dev))
6100 phy_init(bp->phy);
6101
6102 if (!netif_running(netdev))
6103 return 0;
6104
6105 if (!device_may_wakeup(dev))
6106 pm_runtime_force_resume(dev);
6107
6108 if (bp->wol & MACB_WOL_ENABLED) {
6109 spin_lock_irqsave(&bp->lock, flags);
6110 /* Disable WoL */
6111 if (macb_is_gem(bp)) {
6112 queue_writel(bp->queues, IDR, GEM_BIT(WOL));
6113 gem_writel(bp, WOL, 0);
6114 } else {
6115 queue_writel(bp->queues, IDR, MACB_BIT(WOL));
6116 macb_writel(bp, WOL, 0);
6117 }
6118 /* Clear ISR on queue 0 */
6119 queue_readl(bp->queues, ISR);
6120 macb_queue_isr_clear(bp, bp->queues, -1);
6121 spin_unlock_irqrestore(&bp->lock, flags);
6122
6123 disable_irq_wake(bp->queues[0].irq);
6124
6125 /* Now make sure we disable phy before moving
6126 * to common restore path
6127 */
6128 rtnl_lock();
6129 phylink_stop(bp->phylink);
6130 rtnl_unlock();
6131 }
6132
6133 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC))
6134 macb_init_buffers(bp);
6135
6136 for (q = 0, queue = bp->queues; q < bp->num_queues;
6137 ++q, ++queue) {
6138 if (!(bp->caps & MACB_CAPS_MACB_IS_EMAC)) {
6139 if (macb_is_gem(bp))
6140 gem_init_rx_ring(queue);
6141 else
6142 macb_init_rx_ring(queue);
6143 }
6144
6145 napi_enable(&queue->napi_rx);
6146 napi_enable(&queue->napi_tx);
6147 }
6148
6149 if (netdev->hw_features & NETIF_F_NTUPLE)
6150 gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
6151
6152 if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
6153 macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
6154
6155 macb_writel(bp, NCR, MACB_BIT(MPE));
6156 macb_init_hw(bp);
6157 macb_set_rx_mode(netdev);
6158 macb_restore_features(bp);
6159 rtnl_lock();
6160
6161 phylink_start(bp->phylink);
6162 rtnl_unlock();
6163
6164 netif_device_attach(netdev);
6165 if (bp->ptp_info)
6166 bp->ptp_info->ptp_init(netdev);
6167
6168 return 0;
6169 }
6170
macb_runtime_suspend(struct device * dev)6171 static int __maybe_unused macb_runtime_suspend(struct device *dev)
6172 {
6173 struct net_device *netdev = dev_get_drvdata(dev);
6174 struct macb *bp = netdev_priv(netdev);
6175
6176 if (!(device_may_wakeup(dev)))
6177 macb_clks_disable(bp->pclk, bp->hclk, bp->tx_clk, bp->rx_clk, bp->tsu_clk);
6178 else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK))
6179 macb_clks_disable(NULL, NULL, NULL, NULL, bp->tsu_clk);
6180
6181 return 0;
6182 }
6183
macb_runtime_resume(struct device * dev)6184 static int __maybe_unused macb_runtime_resume(struct device *dev)
6185 {
6186 struct net_device *netdev = dev_get_drvdata(dev);
6187 struct macb *bp = netdev_priv(netdev);
6188
6189 if (!(device_may_wakeup(dev))) {
6190 clk_prepare_enable(bp->pclk);
6191 clk_prepare_enable(bp->hclk);
6192 clk_prepare_enable(bp->tx_clk);
6193 clk_prepare_enable(bp->rx_clk);
6194 clk_prepare_enable(bp->tsu_clk);
6195 } else if (!(bp->caps & MACB_CAPS_NEED_TSUCLK)) {
6196 clk_prepare_enable(bp->tsu_clk);
6197 }
6198
6199 return 0;
6200 }
6201
macb_shutdown(struct platform_device * pdev)6202 static void macb_shutdown(struct platform_device *pdev)
6203 {
6204 struct net_device *netdev = platform_get_drvdata(pdev);
6205
6206 rtnl_lock();
6207
6208 if (netif_running(netdev))
6209 dev_close(netdev);
6210
6211 netif_device_detach(netdev);
6212
6213 rtnl_unlock();
6214 }
6215
6216 static const struct dev_pm_ops macb_pm_ops = {
6217 SET_SYSTEM_SLEEP_PM_OPS(macb_suspend, macb_resume)
6218 SET_RUNTIME_PM_OPS(macb_runtime_suspend, macb_runtime_resume, NULL)
6219 };
6220
6221 static struct platform_driver macb_driver = {
6222 .probe = macb_probe,
6223 .remove = macb_remove,
6224 .driver = {
6225 .name = "macb",
6226 .of_match_table = of_match_ptr(macb_dt_ids),
6227 .pm = &macb_pm_ops,
6228 },
6229 .shutdown = macb_shutdown,
6230 };
6231
6232 module_platform_driver(macb_driver);
6233
6234 MODULE_LICENSE("GPL");
6235 MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
6236 MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
6237 MODULE_ALIAS("platform:macb");
6238