1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* drivers/net/ethernet/freescale/gianfar.c
3 *
4 * Gianfar Ethernet Driver
5 * This driver is designed for the non-CPM ethernet controllers
6 * on the 85xx and 83xx family of integrated processors
7 * Based on 8260_io/fcc_enet.c
8 *
9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
12 *
13 * Copyright 2002-2009, 2011-2013 Freescale Semiconductor, Inc.
14 * Copyright 2007 MontaVista Software, Inc.
15 *
16 * Gianfar: AKA Lambda Draconis, "Dragon"
17 * RA 11 31 24.2
18 * Dec +69 19 52
19 * V 3.84
20 * B-V +1.62
21 *
22 * Theory of operation
23 *
24 * The driver is initialized through of_device. Configuration information
25 * is therefore conveyed through an OF-style device tree.
26 *
27 * The Gianfar Ethernet Controller uses a ring of buffer
28 * descriptors. The beginning is indicated by a register
29 * pointing to the physical address of the start of the ring.
30 * The end is determined by a "wrap" bit being set in the
31 * last descriptor of the ring.
32 *
33 * When a packet is received, the RXF bit in the
34 * IEVENT register is set, triggering an interrupt when the
35 * corresponding bit in the IMASK register is also set (if
36 * interrupt coalescing is active, then the interrupt may not
37 * happen immediately, but will wait until either a set number
38 * of frames or amount of time have passed). In NAPI, the
39 * interrupt handler will signal there is work to be done, and
40 * exit. This method will start at the last known empty
41 * descriptor, and process every subsequent descriptor until there
42 * are none left with data (NAPI will stop after a set number of
43 * packets to give time to other tasks, but will eventually
44 * process all the packets). The data arrives inside a
45 * pre-allocated skb, and so after the skb is passed up to the
46 * stack, a new skb must be allocated, and the address field in
47 * the buffer descriptor must be updated to indicate this new
48 * skb.
49 *
50 * When the kernel requests that a packet be transmitted, the
51 * driver starts where it left off last time, and points the
52 * descriptor at the buffer which was passed in. The driver
53 * then informs the DMA engine that there are packets ready to
54 * be transmitted. Once the controller is finished transmitting
55 * the packet, an interrupt may be triggered (under the same
56 * conditions as for reception, but depending on the TXF bit).
57 * The driver then cleans up the buffer.
58 */
59
60 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
61
62 #include <linux/kernel.h>
63 #include <linux/platform_device.h>
64 #include <linux/string.h>
65 #include <linux/errno.h>
66 #include <linux/unistd.h>
67 #include <linux/slab.h>
68 #include <linux/interrupt.h>
69 #include <linux/delay.h>
70 #include <linux/netdevice.h>
71 #include <linux/etherdevice.h>
72 #include <linux/skbuff.h>
73 #include <linux/if_vlan.h>
74 #include <linux/spinlock.h>
75 #include <linux/mm.h>
76 #include <linux/of_address.h>
77 #include <linux/of_irq.h>
78 #include <linux/of_mdio.h>
79 #include <linux/ip.h>
80 #include <linux/tcp.h>
81 #include <linux/udp.h>
82 #include <linux/in.h>
83 #include <linux/net_tstamp.h>
84
85 #include <asm/io.h>
86 #ifdef CONFIG_PPC
87 #include <asm/reg.h>
88 #include <asm/mpc85xx.h>
89 #endif
90 #include <asm/irq.h>
91 #include <linux/uaccess.h>
92 #include <linux/module.h>
93 #include <linux/dma-mapping.h>
94 #include <linux/crc32.h>
95 #include <linux/mii.h>
96 #include <linux/phy.h>
97 #include <linux/phy_fixed.h>
98 #include <linux/of.h>
99 #include <linux/of_net.h>
100 #include <linux/property.h>
101
102 #include "gianfar.h"
103
104 #define TX_TIMEOUT (5*HZ)
105
106 MODULE_AUTHOR("Freescale Semiconductor, Inc");
107 MODULE_DESCRIPTION("Gianfar Ethernet Driver");
108 MODULE_LICENSE("GPL");
109
gfar_init_rxbdp(struct gfar_priv_rx_q * rx_queue,struct rxbd8 * bdp,dma_addr_t buf)110 static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
111 dma_addr_t buf)
112 {
113 u32 lstatus;
114
115 bdp->bufPtr = cpu_to_be32(buf);
116
117 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
118 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
119 lstatus |= BD_LFLAG(RXBD_WRAP);
120
121 gfar_wmb();
122
123 bdp->lstatus = cpu_to_be32(lstatus);
124 }
125
gfar_init_tx_rx_base(struct gfar_private * priv)126 static void gfar_init_tx_rx_base(struct gfar_private *priv)
127 {
128 struct gfar __iomem *regs = priv->gfargrp[0].regs;
129 u32 __iomem *baddr;
130 int i;
131
132 baddr = ®s->tbase0;
133 for (i = 0; i < priv->num_tx_queues; i++) {
134 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
135 baddr += 2;
136 }
137
138 baddr = ®s->rbase0;
139 for (i = 0; i < priv->num_rx_queues; i++) {
140 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
141 baddr += 2;
142 }
143 }
144
gfar_init_rqprm(struct gfar_private * priv)145 static void gfar_init_rqprm(struct gfar_private *priv)
146 {
147 struct gfar __iomem *regs = priv->gfargrp[0].regs;
148 u32 __iomem *baddr;
149 int i;
150
151 baddr = ®s->rqprm0;
152 for (i = 0; i < priv->num_rx_queues; i++) {
153 gfar_write(baddr, priv->rx_queue[i]->rx_ring_size |
154 (DEFAULT_RX_LFC_THR << FBTHR_SHIFT));
155 baddr++;
156 }
157 }
158
gfar_rx_offload_en(struct gfar_private * priv)159 static void gfar_rx_offload_en(struct gfar_private *priv)
160 {
161 /* set this when rx hw offload (TOE) functions are being used */
162 priv->uses_rxfcb = 0;
163
164 if (priv->ndev->features & (NETIF_F_RXCSUM | NETIF_F_HW_VLAN_CTAG_RX))
165 priv->uses_rxfcb = 1;
166
167 if (priv->hwts_rx_en || priv->rx_filer_enable)
168 priv->uses_rxfcb = 1;
169 }
170
gfar_mac_rx_config(struct gfar_private * priv)171 static void gfar_mac_rx_config(struct gfar_private *priv)
172 {
173 struct gfar __iomem *regs = priv->gfargrp[0].regs;
174 u32 rctrl = 0;
175
176 if (priv->rx_filer_enable) {
177 rctrl |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
178 /* Program the RIR0 reg with the required distribution */
179 gfar_write(®s->rir0, DEFAULT_2RXQ_RIR0);
180 }
181
182 /* Restore PROMISC mode */
183 if (priv->ndev->flags & IFF_PROMISC)
184 rctrl |= RCTRL_PROM;
185
186 if (priv->ndev->features & NETIF_F_RXCSUM)
187 rctrl |= RCTRL_CHECKSUMMING;
188
189 if (priv->extended_hash)
190 rctrl |= RCTRL_EXTHASH | RCTRL_EMEN;
191
192 if (priv->padding) {
193 rctrl &= ~RCTRL_PAL_MASK;
194 rctrl |= RCTRL_PADDING(priv->padding);
195 }
196
197 /* Enable HW time stamping if requested from user space */
198 if (priv->hwts_rx_en)
199 rctrl |= RCTRL_PRSDEP_INIT | RCTRL_TS_ENABLE;
200
201 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_RX)
202 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
203
204 /* Clear the LFC bit */
205 gfar_write(®s->rctrl, rctrl);
206 /* Init flow control threshold values */
207 gfar_init_rqprm(priv);
208 gfar_write(®s->ptv, DEFAULT_LFC_PTVVAL);
209 rctrl |= RCTRL_LFC;
210
211 /* Init rctrl based on our settings */
212 gfar_write(®s->rctrl, rctrl);
213 }
214
gfar_mac_tx_config(struct gfar_private * priv)215 static void gfar_mac_tx_config(struct gfar_private *priv)
216 {
217 struct gfar __iomem *regs = priv->gfargrp[0].regs;
218 u32 tctrl = 0;
219
220 if (priv->ndev->features & NETIF_F_IP_CSUM)
221 tctrl |= TCTRL_INIT_CSUM;
222
223 if (priv->prio_sched_en)
224 tctrl |= TCTRL_TXSCHED_PRIO;
225 else {
226 tctrl |= TCTRL_TXSCHED_WRRS;
227 gfar_write(®s->tr03wt, DEFAULT_WRRS_WEIGHT);
228 gfar_write(®s->tr47wt, DEFAULT_WRRS_WEIGHT);
229 }
230
231 if (priv->ndev->features & NETIF_F_HW_VLAN_CTAG_TX)
232 tctrl |= TCTRL_VLINS;
233
234 gfar_write(®s->tctrl, tctrl);
235 }
236
gfar_configure_coalescing(struct gfar_private * priv,unsigned long tx_mask,unsigned long rx_mask)237 static void gfar_configure_coalescing(struct gfar_private *priv,
238 unsigned long tx_mask, unsigned long rx_mask)
239 {
240 struct gfar __iomem *regs = priv->gfargrp[0].regs;
241 u32 __iomem *baddr;
242
243 if (priv->mode == MQ_MG_MODE) {
244 int i = 0;
245
246 baddr = ®s->txic0;
247 for_each_set_bit(i, &tx_mask, priv->num_tx_queues) {
248 gfar_write(baddr + i, 0);
249 if (likely(priv->tx_queue[i]->txcoalescing))
250 gfar_write(baddr + i, priv->tx_queue[i]->txic);
251 }
252
253 baddr = ®s->rxic0;
254 for_each_set_bit(i, &rx_mask, priv->num_rx_queues) {
255 gfar_write(baddr + i, 0);
256 if (likely(priv->rx_queue[i]->rxcoalescing))
257 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
258 }
259 } else {
260 /* Backward compatible case -- even if we enable
261 * multiple queues, there's only single reg to program
262 */
263 gfar_write(®s->txic, 0);
264 if (likely(priv->tx_queue[0]->txcoalescing))
265 gfar_write(®s->txic, priv->tx_queue[0]->txic);
266
267 gfar_write(®s->rxic, 0);
268 if (unlikely(priv->rx_queue[0]->rxcoalescing))
269 gfar_write(®s->rxic, priv->rx_queue[0]->rxic);
270 }
271 }
272
gfar_configure_coalescing_all(struct gfar_private * priv)273 static void gfar_configure_coalescing_all(struct gfar_private *priv)
274 {
275 gfar_configure_coalescing(priv, 0xFF, 0xFF);
276 }
277
gfar_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)278 static void gfar_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
279 {
280 struct gfar_private *priv = netdev_priv(dev);
281 int i;
282
283 for (i = 0; i < priv->num_rx_queues; i++) {
284 stats->rx_packets += priv->rx_queue[i]->stats.rx_packets;
285 stats->rx_bytes += priv->rx_queue[i]->stats.rx_bytes;
286 stats->rx_dropped += priv->rx_queue[i]->stats.rx_dropped;
287 }
288
289 for (i = 0; i < priv->num_tx_queues; i++) {
290 stats->tx_bytes += priv->tx_queue[i]->stats.tx_bytes;
291 stats->tx_packets += priv->tx_queue[i]->stats.tx_packets;
292 }
293
294 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
295 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
296 unsigned long flags;
297 u32 rdrp, car, car_before;
298 u64 rdrp_offset;
299
300 spin_lock_irqsave(&priv->rmon_overflow.lock, flags);
301 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
302 do {
303 car_before = car;
304 rdrp = gfar_read(&rmon->rdrp);
305 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
306 } while (car != car_before);
307 if (car) {
308 priv->rmon_overflow.rdrp++;
309 gfar_write(&rmon->car1, car);
310 }
311 rdrp_offset = priv->rmon_overflow.rdrp;
312 spin_unlock_irqrestore(&priv->rmon_overflow.lock, flags);
313
314 stats->rx_missed_errors = rdrp + (rdrp_offset << 16);
315 }
316 }
317
318 /* Set the appropriate hash bit for the given addr */
319 /* The algorithm works like so:
320 * 1) Take the Destination Address (ie the multicast address), and
321 * do a CRC on it (little endian), and reverse the bits of the
322 * result.
323 * 2) Use the 8 most significant bits as a hash into a 256-entry
324 * table. The table is controlled through 8 32-bit registers:
325 * gaddr0-7. gaddr0's MSB is entry 0, and gaddr7's LSB is
326 * gaddr7. This means that the 3 most significant bits in the
327 * hash index which gaddr register to use, and the 5 other bits
328 * indicate which bit (assuming an IBM numbering scheme, which
329 * for PowerPC (tm) is usually the case) in the register holds
330 * the entry.
331 */
gfar_set_hash_for_addr(struct net_device * dev,u8 * addr)332 static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
333 {
334 u32 tempval;
335 struct gfar_private *priv = netdev_priv(dev);
336 u32 result = ether_crc(ETH_ALEN, addr);
337 int width = priv->hash_width;
338 u8 whichbit = (result >> (32 - width)) & 0x1f;
339 u8 whichreg = result >> (32 - width + 5);
340 u32 value = (1 << (31-whichbit));
341
342 tempval = gfar_read(priv->hash_regs[whichreg]);
343 tempval |= value;
344 gfar_write(priv->hash_regs[whichreg], tempval);
345 }
346
347 /* There are multiple MAC Address register pairs on some controllers
348 * This function sets the numth pair to a given address
349 */
gfar_set_mac_for_addr(struct net_device * dev,int num,const u8 * addr)350 static void gfar_set_mac_for_addr(struct net_device *dev, int num,
351 const u8 *addr)
352 {
353 struct gfar_private *priv = netdev_priv(dev);
354 struct gfar __iomem *regs = priv->gfargrp[0].regs;
355 u32 tempval;
356 u32 __iomem *macptr = ®s->macstnaddr1;
357
358 macptr += num*2;
359
360 /* For a station address of 0x12345678ABCD in transmission
361 * order (BE), MACnADDR1 is set to 0xCDAB7856 and
362 * MACnADDR2 is set to 0x34120000.
363 */
364 tempval = (addr[5] << 24) | (addr[4] << 16) |
365 (addr[3] << 8) | addr[2];
366
367 gfar_write(macptr, tempval);
368
369 tempval = (addr[1] << 24) | (addr[0] << 16);
370
371 gfar_write(macptr+1, tempval);
372 }
373
gfar_set_mac_addr(struct net_device * dev,void * p)374 static int gfar_set_mac_addr(struct net_device *dev, void *p)
375 {
376 int ret;
377
378 ret = eth_mac_addr(dev, p);
379 if (ret)
380 return ret;
381
382 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
383
384 return 0;
385 }
386
gfar_ints_disable(struct gfar_private * priv)387 static void gfar_ints_disable(struct gfar_private *priv)
388 {
389 int i;
390 for (i = 0; i < priv->num_grps; i++) {
391 struct gfar __iomem *regs = priv->gfargrp[i].regs;
392 /* Clear IEVENT */
393 gfar_write(®s->ievent, IEVENT_INIT_CLEAR);
394
395 /* Initialize IMASK */
396 gfar_write(®s->imask, IMASK_INIT_CLEAR);
397 }
398 }
399
gfar_ints_enable(struct gfar_private * priv)400 static void gfar_ints_enable(struct gfar_private *priv)
401 {
402 int i;
403 for (i = 0; i < priv->num_grps; i++) {
404 struct gfar __iomem *regs = priv->gfargrp[i].regs;
405 /* Unmask the interrupts we look for */
406 gfar_write(®s->imask,
407 IMASK_DEFAULT | priv->rmon_overflow.imask);
408 }
409 }
410
gfar_alloc_tx_queues(struct gfar_private * priv)411 static int gfar_alloc_tx_queues(struct gfar_private *priv)
412 {
413 int i;
414
415 for (i = 0; i < priv->num_tx_queues; i++) {
416 priv->tx_queue[i] = kzalloc(sizeof(struct gfar_priv_tx_q),
417 GFP_KERNEL);
418 if (!priv->tx_queue[i])
419 return -ENOMEM;
420
421 priv->tx_queue[i]->tx_skbuff = NULL;
422 priv->tx_queue[i]->qindex = i;
423 priv->tx_queue[i]->dev = priv->ndev;
424 spin_lock_init(&(priv->tx_queue[i]->txlock));
425 }
426 return 0;
427 }
428
gfar_alloc_rx_queues(struct gfar_private * priv)429 static int gfar_alloc_rx_queues(struct gfar_private *priv)
430 {
431 int i;
432
433 for (i = 0; i < priv->num_rx_queues; i++) {
434 priv->rx_queue[i] = kzalloc(sizeof(struct gfar_priv_rx_q),
435 GFP_KERNEL);
436 if (!priv->rx_queue[i])
437 return -ENOMEM;
438
439 priv->rx_queue[i]->qindex = i;
440 priv->rx_queue[i]->ndev = priv->ndev;
441 }
442 return 0;
443 }
444
gfar_free_tx_queues(struct gfar_private * priv)445 static void gfar_free_tx_queues(struct gfar_private *priv)
446 {
447 int i;
448
449 for (i = 0; i < priv->num_tx_queues; i++)
450 kfree(priv->tx_queue[i]);
451 }
452
gfar_free_rx_queues(struct gfar_private * priv)453 static void gfar_free_rx_queues(struct gfar_private *priv)
454 {
455 int i;
456
457 for (i = 0; i < priv->num_rx_queues; i++)
458 kfree(priv->rx_queue[i]);
459 }
460
unmap_group_regs(struct gfar_private * priv)461 static void unmap_group_regs(struct gfar_private *priv)
462 {
463 int i;
464
465 for (i = 0; i < MAXGROUPS; i++)
466 if (priv->gfargrp[i].regs)
467 iounmap(priv->gfargrp[i].regs);
468 }
469
free_gfar_dev(struct gfar_private * priv)470 static void free_gfar_dev(struct gfar_private *priv)
471 {
472 int i, j;
473
474 for (i = 0; i < priv->num_grps; i++)
475 for (j = 0; j < GFAR_NUM_IRQS; j++) {
476 kfree(priv->gfargrp[i].irqinfo[j]);
477 priv->gfargrp[i].irqinfo[j] = NULL;
478 }
479
480 free_netdev(priv->ndev);
481 }
482
disable_napi(struct gfar_private * priv)483 static void disable_napi(struct gfar_private *priv)
484 {
485 int i;
486
487 for (i = 0; i < priv->num_grps; i++) {
488 napi_disable(&priv->gfargrp[i].napi_rx);
489 napi_disable(&priv->gfargrp[i].napi_tx);
490 }
491 }
492
enable_napi(struct gfar_private * priv)493 static void enable_napi(struct gfar_private *priv)
494 {
495 int i;
496
497 for (i = 0; i < priv->num_grps; i++) {
498 napi_enable(&priv->gfargrp[i].napi_rx);
499 napi_enable(&priv->gfargrp[i].napi_tx);
500 }
501 }
502
gfar_parse_group(struct device_node * np,struct gfar_private * priv,const char * model)503 static int gfar_parse_group(struct device_node *np,
504 struct gfar_private *priv, const char *model)
505 {
506 struct gfar_priv_grp *grp = &priv->gfargrp[priv->num_grps];
507 int i;
508
509 for (i = 0; i < GFAR_NUM_IRQS; i++) {
510 grp->irqinfo[i] = kzalloc(sizeof(struct gfar_irqinfo),
511 GFP_KERNEL);
512 if (!grp->irqinfo[i])
513 return -ENOMEM;
514 }
515
516 grp->regs = of_iomap(np, 0);
517 if (!grp->regs)
518 return -ENOMEM;
519
520 gfar_irq(grp, TX)->irq = irq_of_parse_and_map(np, 0);
521
522 /* If we aren't the FEC we have multiple interrupts */
523 if (model && strcasecmp(model, "FEC")) {
524 gfar_irq(grp, RX)->irq = irq_of_parse_and_map(np, 1);
525 gfar_irq(grp, ER)->irq = irq_of_parse_and_map(np, 2);
526 if (!gfar_irq(grp, TX)->irq ||
527 !gfar_irq(grp, RX)->irq ||
528 !gfar_irq(grp, ER)->irq)
529 return -EINVAL;
530 }
531
532 grp->priv = priv;
533 spin_lock_init(&grp->grplock);
534 if (priv->mode == MQ_MG_MODE) {
535 /* One Q per interrupt group: Q0 to G0, Q1 to G1 */
536 grp->rx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
537 grp->tx_bit_map = (DEFAULT_MAPPING >> priv->num_grps);
538 } else {
539 grp->rx_bit_map = 0xFF;
540 grp->tx_bit_map = 0xFF;
541 }
542
543 /* bit_map's MSB is q0 (from q0 to q7) but, for_each_set_bit parses
544 * right to left, so we need to revert the 8 bits to get the q index
545 */
546 grp->rx_bit_map = bitrev8(grp->rx_bit_map);
547 grp->tx_bit_map = bitrev8(grp->tx_bit_map);
548
549 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
550 * also assign queues to groups
551 */
552 for_each_set_bit(i, &grp->rx_bit_map, priv->num_rx_queues) {
553 if (!grp->rx_queue)
554 grp->rx_queue = priv->rx_queue[i];
555 grp->num_rx_queues++;
556 grp->rstat |= (RSTAT_CLEAR_RHALT >> i);
557 priv->rqueue |= ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
558 priv->rx_queue[i]->grp = grp;
559 }
560
561 for_each_set_bit(i, &grp->tx_bit_map, priv->num_tx_queues) {
562 if (!grp->tx_queue)
563 grp->tx_queue = priv->tx_queue[i];
564 grp->num_tx_queues++;
565 grp->tstat |= (TSTAT_CLEAR_THALT >> i);
566 priv->tqueue |= (TQUEUE_EN0 >> i);
567 priv->tx_queue[i]->grp = grp;
568 }
569
570 priv->num_grps++;
571
572 return 0;
573 }
574
575 /* Reads the controller's registers to determine what interface
576 * connects it to the PHY.
577 */
gfar_get_interface(struct net_device * dev)578 static phy_interface_t gfar_get_interface(struct net_device *dev)
579 {
580 struct gfar_private *priv = netdev_priv(dev);
581 struct gfar __iomem *regs = priv->gfargrp[0].regs;
582 u32 ecntrl;
583
584 ecntrl = gfar_read(®s->ecntrl);
585
586 if (ecntrl & ECNTRL_SGMII_MODE)
587 return PHY_INTERFACE_MODE_SGMII;
588
589 if (ecntrl & ECNTRL_TBI_MODE) {
590 if (ecntrl & ECNTRL_REDUCED_MODE)
591 return PHY_INTERFACE_MODE_RTBI;
592 else
593 return PHY_INTERFACE_MODE_TBI;
594 }
595
596 if (ecntrl & ECNTRL_REDUCED_MODE) {
597 if (ecntrl & ECNTRL_REDUCED_MII_MODE) {
598 return PHY_INTERFACE_MODE_RMII;
599 }
600 else {
601 phy_interface_t interface = priv->interface;
602
603 /* This isn't autodetected right now, so it must
604 * be set by the device tree or platform code.
605 */
606 if (interface == PHY_INTERFACE_MODE_RGMII_ID)
607 return PHY_INTERFACE_MODE_RGMII_ID;
608
609 return PHY_INTERFACE_MODE_RGMII;
610 }
611 }
612
613 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT)
614 return PHY_INTERFACE_MODE_GMII;
615
616 return PHY_INTERFACE_MODE_MII;
617 }
618
gfar_of_init(struct platform_device * ofdev,struct net_device ** pdev)619 static int gfar_of_init(struct platform_device *ofdev, struct net_device **pdev)
620 {
621 const char *model;
622 int err = 0, i;
623 phy_interface_t interface;
624 struct net_device *dev = NULL;
625 struct gfar_private *priv = NULL;
626 struct device_node *np = ofdev->dev.of_node;
627 struct device_node *child = NULL;
628 u32 stash_len = 0;
629 u32 stash_idx = 0;
630 unsigned int num_tx_qs, num_rx_qs;
631 unsigned short mode;
632
633 if (!np)
634 return -ENODEV;
635
636 if (of_device_is_compatible(np, "fsl,etsec2"))
637 mode = MQ_MG_MODE;
638 else
639 mode = SQ_SG_MODE;
640
641 if (mode == SQ_SG_MODE) {
642 num_tx_qs = 1;
643 num_rx_qs = 1;
644 } else { /* MQ_MG_MODE */
645 /* get the actual number of supported groups */
646 unsigned int num_grps;
647
648 num_grps = device_get_named_child_node_count(&ofdev->dev,
649 "queue-group");
650 if (num_grps == 0 || num_grps > MAXGROUPS) {
651 dev_err(&ofdev->dev, "Invalid # of int groups(%d)\n",
652 num_grps);
653 pr_err("Cannot do alloc_etherdev, aborting\n");
654 return -EINVAL;
655 }
656
657 num_tx_qs = num_grps; /* one txq per int group */
658 num_rx_qs = num_grps; /* one rxq per int group */
659 }
660
661 if (num_tx_qs > MAX_TX_QS) {
662 pr_err("num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
663 num_tx_qs, MAX_TX_QS);
664 pr_err("Cannot do alloc_etherdev, aborting\n");
665 return -EINVAL;
666 }
667
668 if (num_rx_qs > MAX_RX_QS) {
669 pr_err("num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
670 num_rx_qs, MAX_RX_QS);
671 pr_err("Cannot do alloc_etherdev, aborting\n");
672 return -EINVAL;
673 }
674
675 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
676 dev = *pdev;
677 if (NULL == dev)
678 return -ENOMEM;
679
680 priv = netdev_priv(dev);
681 priv->ndev = dev;
682
683 priv->mode = mode;
684
685 priv->num_tx_queues = num_tx_qs;
686 netif_set_real_num_rx_queues(dev, num_rx_qs);
687 priv->num_rx_queues = num_rx_qs;
688
689 err = gfar_alloc_tx_queues(priv);
690 if (err)
691 goto tx_alloc_failed;
692
693 err = gfar_alloc_rx_queues(priv);
694 if (err)
695 goto rx_alloc_failed;
696
697 err = of_property_read_string(np, "model", &model);
698 if (err) {
699 pr_err("Device model property missing, aborting\n");
700 goto rx_alloc_failed;
701 }
702
703 /* Init Rx queue filer rule set linked list */
704 INIT_LIST_HEAD(&priv->rx_list.list);
705 priv->rx_list.count = 0;
706 mutex_init(&priv->rx_queue_access);
707
708 for (i = 0; i < MAXGROUPS; i++)
709 priv->gfargrp[i].regs = NULL;
710
711 /* Parse and initialize group specific information */
712 if (priv->mode == MQ_MG_MODE) {
713 for_each_available_child_of_node(np, child) {
714 if (!of_node_name_eq(child, "queue-group"))
715 continue;
716
717 err = gfar_parse_group(child, priv, model);
718 if (err) {
719 of_node_put(child);
720 goto err_grp_init;
721 }
722 }
723 } else { /* SQ_SG_MODE */
724 err = gfar_parse_group(np, priv, model);
725 if (err)
726 goto err_grp_init;
727 }
728
729 if (of_property_read_bool(np, "bd-stash")) {
730 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
731 priv->bd_stash_en = 1;
732 }
733
734 err = of_property_read_u32(np, "rx-stash-len", &stash_len);
735
736 if (err == 0)
737 priv->rx_stash_size = stash_len;
738
739 err = of_property_read_u32(np, "rx-stash-idx", &stash_idx);
740
741 if (err == 0)
742 priv->rx_stash_index = stash_idx;
743
744 if (stash_len || stash_idx)
745 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BUF_STASHING;
746
747 err = of_get_ethdev_address(np, dev);
748 if (err == -EPROBE_DEFER)
749 goto err_grp_init;
750 if (err) {
751 eth_hw_addr_random(dev);
752 dev_info(&ofdev->dev, "Using random MAC address: %pM\n", dev->dev_addr);
753 }
754
755 if (model && !strcasecmp(model, "TSEC"))
756 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
757 FSL_GIANFAR_DEV_HAS_COALESCE |
758 FSL_GIANFAR_DEV_HAS_RMON |
759 FSL_GIANFAR_DEV_HAS_MULTI_INTR;
760
761 if (model && !strcasecmp(model, "eTSEC"))
762 priv->device_flags |= FSL_GIANFAR_DEV_HAS_GIGABIT |
763 FSL_GIANFAR_DEV_HAS_COALESCE |
764 FSL_GIANFAR_DEV_HAS_RMON |
765 FSL_GIANFAR_DEV_HAS_MULTI_INTR |
766 FSL_GIANFAR_DEV_HAS_CSUM |
767 FSL_GIANFAR_DEV_HAS_VLAN |
768 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET |
769 FSL_GIANFAR_DEV_HAS_EXTENDED_HASH |
770 FSL_GIANFAR_DEV_HAS_TIMER |
771 FSL_GIANFAR_DEV_HAS_RX_FILER;
772
773 /* Use PHY connection type from the DT node if one is specified there.
774 * rgmii-id really needs to be specified. Other types can be
775 * detected by hardware
776 */
777 err = of_get_phy_mode(np, &interface);
778 if (!err)
779 priv->interface = interface;
780 else
781 priv->interface = gfar_get_interface(dev);
782
783 if (of_property_read_bool(np, "fsl,magic-packet"))
784 priv->device_flags |= FSL_GIANFAR_DEV_HAS_MAGIC_PACKET;
785
786 if (of_property_read_bool(np, "fsl,wake-on-filer"))
787 priv->device_flags |= FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER;
788
789 priv->phy_node = of_parse_phandle(np, "phy-handle", 0);
790
791 /* In the case of a fixed PHY, the DT node associated
792 * to the PHY is the Ethernet MAC DT node.
793 */
794 if (!priv->phy_node && of_phy_is_fixed_link(np)) {
795 err = of_phy_register_fixed_link(np);
796 if (err)
797 goto err_grp_init;
798
799 priv->phy_node = of_node_get(np);
800 }
801
802 /* Find the TBI PHY. If it's not there, we don't support SGMII */
803 priv->tbi_node = of_parse_phandle(np, "tbi-handle", 0);
804
805 return 0;
806
807 err_grp_init:
808 unmap_group_regs(priv);
809 rx_alloc_failed:
810 gfar_free_rx_queues(priv);
811 tx_alloc_failed:
812 gfar_free_tx_queues(priv);
813 free_gfar_dev(priv);
814 return err;
815 }
816
cluster_entry_per_class(struct gfar_private * priv,u32 rqfar,u32 class)817 static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
818 u32 class)
819 {
820 u32 rqfpr = FPR_FILER_MASK;
821 u32 rqfcr = 0x0;
822
823 rqfar--;
824 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
825 priv->ftp_rqfpr[rqfar] = rqfpr;
826 priv->ftp_rqfcr[rqfar] = rqfcr;
827 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
828
829 rqfar--;
830 rqfcr = RQFCR_CMP_NOMATCH;
831 priv->ftp_rqfpr[rqfar] = rqfpr;
832 priv->ftp_rqfcr[rqfar] = rqfcr;
833 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
834
835 rqfar--;
836 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
837 rqfpr = class;
838 priv->ftp_rqfcr[rqfar] = rqfcr;
839 priv->ftp_rqfpr[rqfar] = rqfpr;
840 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
841
842 rqfar--;
843 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
844 rqfpr = class;
845 priv->ftp_rqfcr[rqfar] = rqfcr;
846 priv->ftp_rqfpr[rqfar] = rqfpr;
847 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
848
849 return rqfar;
850 }
851
gfar_init_filer_table(struct gfar_private * priv)852 static void gfar_init_filer_table(struct gfar_private *priv)
853 {
854 int i = 0x0;
855 u32 rqfar = MAX_FILER_IDX;
856 u32 rqfcr = 0x0;
857 u32 rqfpr = FPR_FILER_MASK;
858
859 /* Default rule */
860 rqfcr = RQFCR_CMP_MATCH;
861 priv->ftp_rqfcr[rqfar] = rqfcr;
862 priv->ftp_rqfpr[rqfar] = rqfpr;
863 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
864
865 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
866 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
867 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
868 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
869 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
870 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
871
872 /* cur_filer_idx indicated the first non-masked rule */
873 priv->cur_filer_idx = rqfar;
874
875 /* Rest are masked rules */
876 rqfcr = RQFCR_CMP_NOMATCH;
877 for (i = 0; i < rqfar; i++) {
878 priv->ftp_rqfcr[i] = rqfcr;
879 priv->ftp_rqfpr[i] = rqfpr;
880 gfar_write_filer(priv, i, rqfcr, rqfpr);
881 }
882 }
883
884 #ifdef CONFIG_PPC
__gfar_detect_errata_83xx(struct gfar_private * priv)885 static void __gfar_detect_errata_83xx(struct gfar_private *priv)
886 {
887 unsigned int pvr = mfspr(SPRN_PVR);
888 unsigned int svr = mfspr(SPRN_SVR);
889 unsigned int mod = (svr >> 16) & 0xfff6; /* w/o E suffix */
890 unsigned int rev = svr & 0xffff;
891
892 /* MPC8313 Rev 2.0 and higher; All MPC837x */
893 if ((pvr == 0x80850010 && mod == 0x80b0 && rev >= 0x0020) ||
894 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
895 priv->errata |= GFAR_ERRATA_74;
896
897 /* MPC8313 and MPC837x all rev */
898 if ((pvr == 0x80850010 && mod == 0x80b0) ||
899 (pvr == 0x80861010 && (mod & 0xfff9) == 0x80c0))
900 priv->errata |= GFAR_ERRATA_76;
901
902 /* MPC8313 Rev < 2.0 */
903 if (pvr == 0x80850010 && mod == 0x80b0 && rev < 0x0020)
904 priv->errata |= GFAR_ERRATA_12;
905 }
906
__gfar_detect_errata_85xx(struct gfar_private * priv)907 static void __gfar_detect_errata_85xx(struct gfar_private *priv)
908 {
909 unsigned int svr = mfspr(SPRN_SVR);
910
911 if ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) == 0x20))
912 priv->errata |= GFAR_ERRATA_12;
913 /* P2020/P1010 Rev 1; MPC8548 Rev 2 */
914 if (((SVR_SOC_VER(svr) == SVR_P2020) && (SVR_REV(svr) < 0x20)) ||
915 ((SVR_SOC_VER(svr) == SVR_P2010) && (SVR_REV(svr) < 0x20)) ||
916 ((SVR_SOC_VER(svr) == SVR_8548) && (SVR_REV(svr) < 0x31)))
917 priv->errata |= GFAR_ERRATA_76; /* aka eTSEC 20 */
918 }
919 #endif
920
gfar_detect_errata(struct gfar_private * priv)921 static void gfar_detect_errata(struct gfar_private *priv)
922 {
923 struct device *dev = &priv->ofdev->dev;
924
925 /* no plans to fix */
926 priv->errata |= GFAR_ERRATA_A002;
927
928 #ifdef CONFIG_PPC
929 if (pvr_version_is(PVR_VER_E500V1) || pvr_version_is(PVR_VER_E500V2))
930 __gfar_detect_errata_85xx(priv);
931 else /* non-mpc85xx parts, i.e. e300 core based */
932 __gfar_detect_errata_83xx(priv);
933 #endif
934
935 if (priv->errata)
936 dev_info(dev, "enabled errata workarounds, flags: 0x%x\n",
937 priv->errata);
938 }
939
gfar_init_addr_hash_table(struct gfar_private * priv)940 static void gfar_init_addr_hash_table(struct gfar_private *priv)
941 {
942 struct gfar __iomem *regs = priv->gfargrp[0].regs;
943
944 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_EXTENDED_HASH) {
945 priv->extended_hash = 1;
946 priv->hash_width = 9;
947
948 priv->hash_regs[0] = ®s->igaddr0;
949 priv->hash_regs[1] = ®s->igaddr1;
950 priv->hash_regs[2] = ®s->igaddr2;
951 priv->hash_regs[3] = ®s->igaddr3;
952 priv->hash_regs[4] = ®s->igaddr4;
953 priv->hash_regs[5] = ®s->igaddr5;
954 priv->hash_regs[6] = ®s->igaddr6;
955 priv->hash_regs[7] = ®s->igaddr7;
956 priv->hash_regs[8] = ®s->gaddr0;
957 priv->hash_regs[9] = ®s->gaddr1;
958 priv->hash_regs[10] = ®s->gaddr2;
959 priv->hash_regs[11] = ®s->gaddr3;
960 priv->hash_regs[12] = ®s->gaddr4;
961 priv->hash_regs[13] = ®s->gaddr5;
962 priv->hash_regs[14] = ®s->gaddr6;
963 priv->hash_regs[15] = ®s->gaddr7;
964
965 } else {
966 priv->extended_hash = 0;
967 priv->hash_width = 8;
968
969 priv->hash_regs[0] = ®s->gaddr0;
970 priv->hash_regs[1] = ®s->gaddr1;
971 priv->hash_regs[2] = ®s->gaddr2;
972 priv->hash_regs[3] = ®s->gaddr3;
973 priv->hash_regs[4] = ®s->gaddr4;
974 priv->hash_regs[5] = ®s->gaddr5;
975 priv->hash_regs[6] = ®s->gaddr6;
976 priv->hash_regs[7] = ®s->gaddr7;
977 }
978 }
979
__gfar_is_rx_idle(struct gfar_private * priv)980 static int __gfar_is_rx_idle(struct gfar_private *priv)
981 {
982 u32 res;
983
984 /* Normaly TSEC should not hang on GRS commands, so we should
985 * actually wait for IEVENT_GRSC flag.
986 */
987 if (!gfar_has_errata(priv, GFAR_ERRATA_A002))
988 return 0;
989
990 /* Read the eTSEC register at offset 0xD1C. If bits 7-14 are
991 * the same as bits 23-30, the eTSEC Rx is assumed to be idle
992 * and the Rx can be safely reset.
993 */
994 res = gfar_read((void __iomem *)priv->gfargrp[0].regs + 0xd1c);
995 res &= 0x7f807f80;
996 if ((res & 0xffff) == (res >> 16))
997 return 1;
998
999 return 0;
1000 }
1001
1002 /* Halt the receive and transmit queues */
gfar_halt_nodisable(struct gfar_private * priv)1003 static void gfar_halt_nodisable(struct gfar_private *priv)
1004 {
1005 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1006 u32 tempval;
1007 unsigned int timeout;
1008 int stopped;
1009
1010 gfar_ints_disable(priv);
1011
1012 if (gfar_is_dma_stopped(priv))
1013 return;
1014
1015 /* Stop the DMA, and wait for it to stop */
1016 tempval = gfar_read(®s->dmactrl);
1017 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
1018 gfar_write(®s->dmactrl, tempval);
1019
1020 retry:
1021 timeout = 1000;
1022 while (!(stopped = gfar_is_dma_stopped(priv)) && timeout) {
1023 cpu_relax();
1024 timeout--;
1025 }
1026
1027 if (!timeout)
1028 stopped = gfar_is_dma_stopped(priv);
1029
1030 if (!stopped && !gfar_is_rx_dma_stopped(priv) &&
1031 !__gfar_is_rx_idle(priv))
1032 goto retry;
1033 }
1034
1035 /* Halt the receive and transmit queues */
gfar_halt(struct gfar_private * priv)1036 static void gfar_halt(struct gfar_private *priv)
1037 {
1038 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1039 u32 tempval;
1040
1041 /* Dissable the Rx/Tx hw queues */
1042 gfar_write(®s->rqueue, 0);
1043 gfar_write(®s->tqueue, 0);
1044
1045 mdelay(10);
1046
1047 gfar_halt_nodisable(priv);
1048
1049 /* Disable Rx/Tx DMA */
1050 tempval = gfar_read(®s->maccfg1);
1051 tempval &= ~(MACCFG1_RX_EN | MACCFG1_TX_EN);
1052 gfar_write(®s->maccfg1, tempval);
1053 }
1054
free_skb_tx_queue(struct gfar_priv_tx_q * tx_queue)1055 static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
1056 {
1057 struct txbd8 *txbdp;
1058 struct gfar_private *priv = netdev_priv(tx_queue->dev);
1059 int i, j;
1060
1061 txbdp = tx_queue->tx_bd_base;
1062
1063 for (i = 0; i < tx_queue->tx_ring_size; i++) {
1064 if (!tx_queue->tx_skbuff[i])
1065 continue;
1066
1067 dma_unmap_single(priv->dev, be32_to_cpu(txbdp->bufPtr),
1068 be16_to_cpu(txbdp->length), DMA_TO_DEVICE);
1069 txbdp->lstatus = 0;
1070 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1071 j++) {
1072 txbdp++;
1073 dma_unmap_page(priv->dev, be32_to_cpu(txbdp->bufPtr),
1074 be16_to_cpu(txbdp->length),
1075 DMA_TO_DEVICE);
1076 }
1077 txbdp++;
1078 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
1079 tx_queue->tx_skbuff[i] = NULL;
1080 }
1081 kfree(tx_queue->tx_skbuff);
1082 tx_queue->tx_skbuff = NULL;
1083 }
1084
free_skb_rx_queue(struct gfar_priv_rx_q * rx_queue)1085 static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
1086 {
1087 int i;
1088
1089 struct rxbd8 *rxbdp = rx_queue->rx_bd_base;
1090
1091 dev_kfree_skb(rx_queue->skb);
1092
1093 for (i = 0; i < rx_queue->rx_ring_size; i++) {
1094 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[i];
1095
1096 rxbdp->lstatus = 0;
1097 rxbdp->bufPtr = 0;
1098 rxbdp++;
1099
1100 if (!rxb->page)
1101 continue;
1102
1103 dma_unmap_page(rx_queue->dev, rxb->dma,
1104 PAGE_SIZE, DMA_FROM_DEVICE);
1105 __free_page(rxb->page);
1106
1107 rxb->page = NULL;
1108 }
1109
1110 kfree(rx_queue->rx_buff);
1111 rx_queue->rx_buff = NULL;
1112 }
1113
1114 /* If there are any tx skbs or rx skbs still around, free them.
1115 * Then free tx_skbuff and rx_skbuff
1116 */
free_skb_resources(struct gfar_private * priv)1117 static void free_skb_resources(struct gfar_private *priv)
1118 {
1119 struct gfar_priv_tx_q *tx_queue = NULL;
1120 struct gfar_priv_rx_q *rx_queue = NULL;
1121 int i;
1122
1123 /* Go through all the buffer descriptors and free their data buffers */
1124 for (i = 0; i < priv->num_tx_queues; i++) {
1125 struct netdev_queue *txq;
1126
1127 tx_queue = priv->tx_queue[i];
1128 txq = netdev_get_tx_queue(tx_queue->dev, tx_queue->qindex);
1129 if (tx_queue->tx_skbuff)
1130 free_skb_tx_queue(tx_queue);
1131 netdev_tx_reset_queue(txq);
1132 }
1133
1134 for (i = 0; i < priv->num_rx_queues; i++) {
1135 rx_queue = priv->rx_queue[i];
1136 if (rx_queue->rx_buff)
1137 free_skb_rx_queue(rx_queue);
1138 }
1139
1140 dma_free_coherent(priv->dev,
1141 sizeof(struct txbd8) * priv->total_tx_ring_size +
1142 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1143 priv->tx_queue[0]->tx_bd_base,
1144 priv->tx_queue[0]->tx_bd_dma_base);
1145 }
1146
stop_gfar(struct net_device * dev)1147 void stop_gfar(struct net_device *dev)
1148 {
1149 struct gfar_private *priv = netdev_priv(dev);
1150
1151 netif_tx_stop_all_queues(dev);
1152
1153 smp_mb__before_atomic();
1154 set_bit(GFAR_DOWN, &priv->state);
1155 smp_mb__after_atomic();
1156
1157 disable_napi(priv);
1158
1159 /* disable ints and gracefully shut down Rx/Tx DMA */
1160 gfar_halt(priv);
1161
1162 phy_stop(dev->phydev);
1163
1164 free_skb_resources(priv);
1165 }
1166
gfar_start(struct gfar_private * priv)1167 static void gfar_start(struct gfar_private *priv)
1168 {
1169 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1170 u32 tempval;
1171 int i = 0;
1172
1173 /* Enable Rx/Tx hw queues */
1174 gfar_write(®s->rqueue, priv->rqueue);
1175 gfar_write(®s->tqueue, priv->tqueue);
1176
1177 /* Initialize DMACTRL to have WWR and WOP */
1178 tempval = gfar_read(®s->dmactrl);
1179 tempval |= DMACTRL_INIT_SETTINGS;
1180 gfar_write(®s->dmactrl, tempval);
1181
1182 /* Make sure we aren't stopped */
1183 tempval = gfar_read(®s->dmactrl);
1184 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
1185 gfar_write(®s->dmactrl, tempval);
1186
1187 for (i = 0; i < priv->num_grps; i++) {
1188 regs = priv->gfargrp[i].regs;
1189 /* Clear THLT/RHLT, so that the DMA starts polling now */
1190 gfar_write(®s->tstat, priv->gfargrp[i].tstat);
1191 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
1192 }
1193
1194 /* Enable Rx/Tx DMA */
1195 tempval = gfar_read(®s->maccfg1);
1196 tempval |= (MACCFG1_RX_EN | MACCFG1_TX_EN);
1197 gfar_write(®s->maccfg1, tempval);
1198
1199 gfar_ints_enable(priv);
1200
1201 netif_trans_update(priv->ndev); /* prevent tx timeout */
1202 }
1203
gfar_new_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * rxb)1204 static bool gfar_new_page(struct gfar_priv_rx_q *rxq, struct gfar_rx_buff *rxb)
1205 {
1206 struct page *page;
1207 dma_addr_t addr;
1208
1209 page = dev_alloc_page();
1210 if (unlikely(!page))
1211 return false;
1212
1213 addr = dma_map_page(rxq->dev, page, 0, PAGE_SIZE, DMA_FROM_DEVICE);
1214 if (unlikely(dma_mapping_error(rxq->dev, addr))) {
1215 __free_page(page);
1216
1217 return false;
1218 }
1219
1220 rxb->dma = addr;
1221 rxb->page = page;
1222 rxb->page_offset = 0;
1223
1224 return true;
1225 }
1226
gfar_rx_alloc_err(struct gfar_priv_rx_q * rx_queue)1227 static void gfar_rx_alloc_err(struct gfar_priv_rx_q *rx_queue)
1228 {
1229 struct gfar_private *priv = netdev_priv(rx_queue->ndev);
1230 struct gfar_extra_stats *estats = &priv->extra_stats;
1231
1232 netdev_err(rx_queue->ndev, "Can't alloc RX buffers\n");
1233 atomic64_inc(&estats->rx_alloc_err);
1234 }
1235
gfar_alloc_rx_buffs(struct gfar_priv_rx_q * rx_queue,int alloc_cnt)1236 static void gfar_alloc_rx_buffs(struct gfar_priv_rx_q *rx_queue,
1237 int alloc_cnt)
1238 {
1239 struct rxbd8 *bdp;
1240 struct gfar_rx_buff *rxb;
1241 int i;
1242
1243 i = rx_queue->next_to_use;
1244 bdp = &rx_queue->rx_bd_base[i];
1245 rxb = &rx_queue->rx_buff[i];
1246
1247 while (alloc_cnt--) {
1248 /* try reuse page */
1249 if (unlikely(!rxb->page)) {
1250 if (unlikely(!gfar_new_page(rx_queue, rxb))) {
1251 gfar_rx_alloc_err(rx_queue);
1252 break;
1253 }
1254 }
1255
1256 /* Setup the new RxBD */
1257 gfar_init_rxbdp(rx_queue, bdp,
1258 rxb->dma + rxb->page_offset + RXBUF_ALIGNMENT);
1259
1260 /* Update to the next pointer */
1261 bdp++;
1262 rxb++;
1263
1264 if (unlikely(++i == rx_queue->rx_ring_size)) {
1265 i = 0;
1266 bdp = rx_queue->rx_bd_base;
1267 rxb = rx_queue->rx_buff;
1268 }
1269 }
1270
1271 rx_queue->next_to_use = i;
1272 rx_queue->next_to_alloc = i;
1273 }
1274
gfar_init_bds(struct net_device * ndev)1275 static void gfar_init_bds(struct net_device *ndev)
1276 {
1277 struct gfar_private *priv = netdev_priv(ndev);
1278 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1279 struct gfar_priv_tx_q *tx_queue = NULL;
1280 struct gfar_priv_rx_q *rx_queue = NULL;
1281 struct txbd8 *txbdp;
1282 u32 __iomem *rfbptr;
1283 int i, j;
1284
1285 for (i = 0; i < priv->num_tx_queues; i++) {
1286 tx_queue = priv->tx_queue[i];
1287 /* Initialize some variables in our dev structure */
1288 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
1289 tx_queue->dirty_tx = tx_queue->tx_bd_base;
1290 tx_queue->cur_tx = tx_queue->tx_bd_base;
1291 tx_queue->skb_curtx = 0;
1292 tx_queue->skb_dirtytx = 0;
1293
1294 /* Initialize Transmit Descriptor Ring */
1295 txbdp = tx_queue->tx_bd_base;
1296 for (j = 0; j < tx_queue->tx_ring_size; j++) {
1297 txbdp->lstatus = 0;
1298 txbdp->bufPtr = 0;
1299 txbdp++;
1300 }
1301
1302 /* Set the last descriptor in the ring to indicate wrap */
1303 txbdp--;
1304 txbdp->status = cpu_to_be16(be16_to_cpu(txbdp->status) |
1305 TXBD_WRAP);
1306 }
1307
1308 rfbptr = ®s->rfbptr0;
1309 for (i = 0; i < priv->num_rx_queues; i++) {
1310 rx_queue = priv->rx_queue[i];
1311
1312 rx_queue->next_to_clean = 0;
1313 rx_queue->next_to_use = 0;
1314 rx_queue->next_to_alloc = 0;
1315
1316 /* make sure next_to_clean != next_to_use after this
1317 * by leaving at least 1 unused descriptor
1318 */
1319 gfar_alloc_rx_buffs(rx_queue, gfar_rxbd_unused(rx_queue));
1320
1321 rx_queue->rfbptr = rfbptr;
1322 rfbptr += 2;
1323 }
1324 }
1325
gfar_alloc_skb_resources(struct net_device * ndev)1326 static int gfar_alloc_skb_resources(struct net_device *ndev)
1327 {
1328 void *vaddr;
1329 dma_addr_t addr;
1330 int i, j;
1331 struct gfar_private *priv = netdev_priv(ndev);
1332 struct device *dev = priv->dev;
1333 struct gfar_priv_tx_q *tx_queue = NULL;
1334 struct gfar_priv_rx_q *rx_queue = NULL;
1335
1336 priv->total_tx_ring_size = 0;
1337 for (i = 0; i < priv->num_tx_queues; i++)
1338 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
1339
1340 priv->total_rx_ring_size = 0;
1341 for (i = 0; i < priv->num_rx_queues; i++)
1342 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
1343
1344 /* Allocate memory for the buffer descriptors */
1345 vaddr = dma_alloc_coherent(dev,
1346 (priv->total_tx_ring_size *
1347 sizeof(struct txbd8)) +
1348 (priv->total_rx_ring_size *
1349 sizeof(struct rxbd8)),
1350 &addr, GFP_KERNEL);
1351 if (!vaddr)
1352 return -ENOMEM;
1353
1354 for (i = 0; i < priv->num_tx_queues; i++) {
1355 tx_queue = priv->tx_queue[i];
1356 tx_queue->tx_bd_base = vaddr;
1357 tx_queue->tx_bd_dma_base = addr;
1358 tx_queue->dev = ndev;
1359 /* enet DMA only understands physical addresses */
1360 addr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1361 vaddr += sizeof(struct txbd8) * tx_queue->tx_ring_size;
1362 }
1363
1364 /* Start the rx descriptor ring where the tx ring leaves off */
1365 for (i = 0; i < priv->num_rx_queues; i++) {
1366 rx_queue = priv->rx_queue[i];
1367 rx_queue->rx_bd_base = vaddr;
1368 rx_queue->rx_bd_dma_base = addr;
1369 rx_queue->ndev = ndev;
1370 rx_queue->dev = dev;
1371 addr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1372 vaddr += sizeof(struct rxbd8) * rx_queue->rx_ring_size;
1373 }
1374
1375 /* Setup the skbuff rings */
1376 for (i = 0; i < priv->num_tx_queues; i++) {
1377 tx_queue = priv->tx_queue[i];
1378 tx_queue->tx_skbuff =
1379 kmalloc_array(tx_queue->tx_ring_size,
1380 sizeof(*tx_queue->tx_skbuff),
1381 GFP_KERNEL);
1382 if (!tx_queue->tx_skbuff)
1383 goto cleanup;
1384
1385 for (j = 0; j < tx_queue->tx_ring_size; j++)
1386 tx_queue->tx_skbuff[j] = NULL;
1387 }
1388
1389 for (i = 0; i < priv->num_rx_queues; i++) {
1390 rx_queue = priv->rx_queue[i];
1391 rx_queue->rx_buff = kcalloc(rx_queue->rx_ring_size,
1392 sizeof(*rx_queue->rx_buff),
1393 GFP_KERNEL);
1394 if (!rx_queue->rx_buff)
1395 goto cleanup;
1396 }
1397
1398 gfar_init_bds(ndev);
1399
1400 return 0;
1401
1402 cleanup:
1403 free_skb_resources(priv);
1404 return -ENOMEM;
1405 }
1406
1407 /* Bring the controller up and running */
startup_gfar(struct net_device * ndev)1408 int startup_gfar(struct net_device *ndev)
1409 {
1410 struct gfar_private *priv = netdev_priv(ndev);
1411 int err;
1412
1413 gfar_mac_reset(priv);
1414
1415 err = gfar_alloc_skb_resources(ndev);
1416 if (err)
1417 return err;
1418
1419 gfar_init_tx_rx_base(priv);
1420
1421 smp_mb__before_atomic();
1422 clear_bit(GFAR_DOWN, &priv->state);
1423 smp_mb__after_atomic();
1424
1425 /* Start Rx/Tx DMA and enable the interrupts */
1426 gfar_start(priv);
1427
1428 /* force link state update after mac reset */
1429 priv->oldlink = 0;
1430 priv->oldspeed = 0;
1431 priv->oldduplex = -1;
1432
1433 phy_start(ndev->phydev);
1434
1435 enable_napi(priv);
1436
1437 netif_tx_wake_all_queues(ndev);
1438
1439 return 0;
1440 }
1441
gfar_get_flowctrl_cfg(struct gfar_private * priv)1442 static u32 gfar_get_flowctrl_cfg(struct gfar_private *priv)
1443 {
1444 struct net_device *ndev = priv->ndev;
1445 struct phy_device *phydev = ndev->phydev;
1446 u32 val = 0;
1447
1448 if (!phydev->duplex)
1449 return val;
1450
1451 if (!priv->pause_aneg_en) {
1452 if (priv->tx_pause_en)
1453 val |= MACCFG1_TX_FLOW;
1454 if (priv->rx_pause_en)
1455 val |= MACCFG1_RX_FLOW;
1456 } else {
1457 u16 lcl_adv, rmt_adv;
1458 u8 flowctrl;
1459 /* get link partner capabilities */
1460 rmt_adv = 0;
1461 if (phydev->pause)
1462 rmt_adv = LPA_PAUSE_CAP;
1463 if (phydev->asym_pause)
1464 rmt_adv |= LPA_PAUSE_ASYM;
1465
1466 lcl_adv = linkmode_adv_to_lcl_adv_t(phydev->advertising);
1467 flowctrl = mii_resolve_flowctrl_fdx(lcl_adv, rmt_adv);
1468 if (flowctrl & FLOW_CTRL_TX)
1469 val |= MACCFG1_TX_FLOW;
1470 if (flowctrl & FLOW_CTRL_RX)
1471 val |= MACCFG1_RX_FLOW;
1472 }
1473
1474 return val;
1475 }
1476
gfar_update_link_state(struct gfar_private * priv)1477 static noinline void gfar_update_link_state(struct gfar_private *priv)
1478 {
1479 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1480 struct net_device *ndev = priv->ndev;
1481 struct phy_device *phydev = ndev->phydev;
1482 struct gfar_priv_rx_q *rx_queue = NULL;
1483 int i;
1484
1485 if (unlikely(test_bit(GFAR_RESETTING, &priv->state)))
1486 return;
1487
1488 if (phydev->link) {
1489 u32 tempval1 = gfar_read(®s->maccfg1);
1490 u32 tempval = gfar_read(®s->maccfg2);
1491 u32 ecntrl = gfar_read(®s->ecntrl);
1492 u32 tx_flow_oldval = (tempval1 & MACCFG1_TX_FLOW);
1493
1494 if (phydev->duplex != priv->oldduplex) {
1495 if (!(phydev->duplex))
1496 tempval &= ~(MACCFG2_FULL_DUPLEX);
1497 else
1498 tempval |= MACCFG2_FULL_DUPLEX;
1499
1500 priv->oldduplex = phydev->duplex;
1501 }
1502
1503 if (phydev->speed != priv->oldspeed) {
1504 switch (phydev->speed) {
1505 case 1000:
1506 tempval =
1507 ((tempval & ~(MACCFG2_IF)) | MACCFG2_GMII);
1508
1509 ecntrl &= ~(ECNTRL_R100);
1510 break;
1511 case 100:
1512 case 10:
1513 tempval =
1514 ((tempval & ~(MACCFG2_IF)) | MACCFG2_MII);
1515
1516 /* Reduced mode distinguishes
1517 * between 10 and 100
1518 */
1519 if (phydev->speed == SPEED_100)
1520 ecntrl |= ECNTRL_R100;
1521 else
1522 ecntrl &= ~(ECNTRL_R100);
1523 break;
1524 default:
1525 netif_warn(priv, link, priv->ndev,
1526 "Ack! Speed (%d) is not 10/100/1000!\n",
1527 phydev->speed);
1528 break;
1529 }
1530
1531 priv->oldspeed = phydev->speed;
1532 }
1533
1534 tempval1 &= ~(MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
1535 tempval1 |= gfar_get_flowctrl_cfg(priv);
1536
1537 /* Turn last free buffer recording on */
1538 if ((tempval1 & MACCFG1_TX_FLOW) && !tx_flow_oldval) {
1539 for (i = 0; i < priv->num_rx_queues; i++) {
1540 u32 bdp_dma;
1541
1542 rx_queue = priv->rx_queue[i];
1543 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
1544 gfar_write(rx_queue->rfbptr, bdp_dma);
1545 }
1546
1547 priv->tx_actual_en = 1;
1548 }
1549
1550 if (unlikely(!(tempval1 & MACCFG1_TX_FLOW) && tx_flow_oldval))
1551 priv->tx_actual_en = 0;
1552
1553 gfar_write(®s->maccfg1, tempval1);
1554 gfar_write(®s->maccfg2, tempval);
1555 gfar_write(®s->ecntrl, ecntrl);
1556
1557 if (!priv->oldlink)
1558 priv->oldlink = 1;
1559
1560 } else if (priv->oldlink) {
1561 priv->oldlink = 0;
1562 priv->oldspeed = 0;
1563 priv->oldduplex = -1;
1564 }
1565
1566 if (netif_msg_link(priv))
1567 phy_print_status(phydev);
1568 }
1569
1570 /* Called every time the controller might need to be made
1571 * aware of new link state. The PHY code conveys this
1572 * information through variables in the phydev structure, and this
1573 * function converts those variables into the appropriate
1574 * register values, and can bring down the device if needed.
1575 */
adjust_link(struct net_device * dev)1576 static void adjust_link(struct net_device *dev)
1577 {
1578 struct gfar_private *priv = netdev_priv(dev);
1579 struct phy_device *phydev = dev->phydev;
1580
1581 if (unlikely(phydev->link != priv->oldlink ||
1582 (phydev->link && (phydev->duplex != priv->oldduplex ||
1583 phydev->speed != priv->oldspeed))))
1584 gfar_update_link_state(priv);
1585 }
1586
1587 /* Initialize TBI PHY interface for communicating with the
1588 * SERDES lynx PHY on the chip. We communicate with this PHY
1589 * through the MDIO bus on each controller, treating it as a
1590 * "normal" PHY at the address found in the TBIPA register. We assume
1591 * that the TBIPA register is valid. Either the MDIO bus code will set
1592 * it to a value that doesn't conflict with other PHYs on the bus, or the
1593 * value doesn't matter, as there are no other PHYs on the bus.
1594 */
gfar_configure_serdes(struct net_device * dev)1595 static void gfar_configure_serdes(struct net_device *dev)
1596 {
1597 struct gfar_private *priv = netdev_priv(dev);
1598 struct phy_device *tbiphy;
1599
1600 if (!priv->tbi_node) {
1601 dev_warn(&dev->dev, "error: SGMII mode requires that the "
1602 "device tree specify a tbi-handle\n");
1603 return;
1604 }
1605
1606 tbiphy = of_phy_find_device(priv->tbi_node);
1607 if (!tbiphy) {
1608 dev_err(&dev->dev, "error: Could not get TBI device\n");
1609 return;
1610 }
1611
1612 /* If the link is already up, we must already be ok, and don't need to
1613 * configure and reset the TBI<->SerDes link. Maybe U-Boot configured
1614 * everything for us? Resetting it takes the link down and requires
1615 * several seconds for it to come back.
1616 */
1617 if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) {
1618 put_device(&tbiphy->mdio.dev);
1619 return;
1620 }
1621
1622 /* Single clk mode, mii mode off(for serdes communication) */
1623 phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT);
1624
1625 phy_write(tbiphy, MII_ADVERTISE,
1626 ADVERTISE_1000XFULL | ADVERTISE_1000XPAUSE |
1627 ADVERTISE_1000XPSE_ASYM);
1628
1629 phy_write(tbiphy, MII_BMCR,
1630 BMCR_ANENABLE | BMCR_ANRESTART | BMCR_FULLDPLX |
1631 BMCR_SPEED1000);
1632
1633 put_device(&tbiphy->mdio.dev);
1634 }
1635
1636 /* Initializes driver's PHY state, and attaches to the PHY.
1637 * Returns 0 on success.
1638 */
init_phy(struct net_device * dev)1639 static int init_phy(struct net_device *dev)
1640 {
1641 struct gfar_private *priv = netdev_priv(dev);
1642 phy_interface_t interface = priv->interface;
1643 struct phy_device *phydev;
1644 struct ethtool_keee edata;
1645
1646 priv->oldlink = 0;
1647 priv->oldspeed = 0;
1648 priv->oldduplex = -1;
1649
1650 phydev = of_phy_connect(dev, priv->phy_node, &adjust_link, 0,
1651 interface);
1652 if (!phydev) {
1653 dev_err(&dev->dev, "could not attach to PHY\n");
1654 return -ENODEV;
1655 }
1656
1657 if (interface == PHY_INTERFACE_MODE_SGMII)
1658 gfar_configure_serdes(dev);
1659
1660 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_GIGABIT))
1661 phy_set_max_speed(phydev, SPEED_100);
1662
1663 /* Add support for flow control */
1664 phy_support_asym_pause(phydev);
1665
1666 /* disable EEE autoneg, EEE not supported by eTSEC */
1667 memset(&edata, 0, sizeof(struct ethtool_keee));
1668 phy_ethtool_set_eee(phydev, &edata);
1669
1670 return 0;
1671 }
1672
gfar_add_fcb(struct sk_buff * skb)1673 static inline struct txfcb *gfar_add_fcb(struct sk_buff *skb)
1674 {
1675 struct txfcb *fcb = skb_push(skb, GMAC_FCB_LEN);
1676
1677 memset(fcb, 0, GMAC_FCB_LEN);
1678
1679 return fcb;
1680 }
1681
gfar_tx_checksum(struct sk_buff * skb,struct txfcb * fcb,int fcb_length)1682 static inline void gfar_tx_checksum(struct sk_buff *skb, struct txfcb *fcb,
1683 int fcb_length)
1684 {
1685 /* If we're here, it's a IP packet with a TCP or UDP
1686 * payload. We set it to checksum, using a pseudo-header
1687 * we provide
1688 */
1689 u8 flags = TXFCB_DEFAULT;
1690
1691 /* Tell the controller what the protocol is
1692 * And provide the already calculated phcs
1693 */
1694 if (ip_hdr(skb)->protocol == IPPROTO_UDP) {
1695 flags |= TXFCB_UDP;
1696 fcb->phcs = (__force __be16)(udp_hdr(skb)->check);
1697 } else
1698 fcb->phcs = (__force __be16)(tcp_hdr(skb)->check);
1699
1700 /* l3os is the distance between the start of the
1701 * frame (skb->data) and the start of the IP hdr.
1702 * l4os is the distance between the start of the
1703 * l3 hdr and the l4 hdr
1704 */
1705 fcb->l3os = (u8)(skb_network_offset(skb) - fcb_length);
1706 fcb->l4os = skb_network_header_len(skb);
1707
1708 fcb->flags = flags;
1709 }
1710
gfar_tx_vlan(struct sk_buff * skb,struct txfcb * fcb)1711 static inline void gfar_tx_vlan(struct sk_buff *skb, struct txfcb *fcb)
1712 {
1713 fcb->flags |= TXFCB_VLN;
1714 fcb->vlctl = cpu_to_be16(skb_vlan_tag_get(skb));
1715 }
1716
skip_txbd(struct txbd8 * bdp,int stride,struct txbd8 * base,int ring_size)1717 static inline struct txbd8 *skip_txbd(struct txbd8 *bdp, int stride,
1718 struct txbd8 *base, int ring_size)
1719 {
1720 struct txbd8 *new_bd = bdp + stride;
1721
1722 return (new_bd >= (base + ring_size)) ? (new_bd - ring_size) : new_bd;
1723 }
1724
next_txbd(struct txbd8 * bdp,struct txbd8 * base,int ring_size)1725 static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1726 int ring_size)
1727 {
1728 return skip_txbd(bdp, 1, base, ring_size);
1729 }
1730
1731 /* eTSEC12: csum generation not supported for some fcb offsets */
gfar_csum_errata_12(struct gfar_private * priv,unsigned long fcb_addr)1732 static inline bool gfar_csum_errata_12(struct gfar_private *priv,
1733 unsigned long fcb_addr)
1734 {
1735 return (gfar_has_errata(priv, GFAR_ERRATA_12) &&
1736 (fcb_addr % 0x20) > 0x18);
1737 }
1738
1739 /* eTSEC76: csum generation for frames larger than 2500 may
1740 * cause excess delays before start of transmission
1741 */
gfar_csum_errata_76(struct gfar_private * priv,unsigned int len)1742 static inline bool gfar_csum_errata_76(struct gfar_private *priv,
1743 unsigned int len)
1744 {
1745 return (gfar_has_errata(priv, GFAR_ERRATA_76) &&
1746 (len > 2500));
1747 }
1748
1749 /* This is called by the kernel when a frame is ready for transmission.
1750 * It is pointed to by the dev->hard_start_xmit function pointer
1751 */
gfar_start_xmit(struct sk_buff * skb,struct net_device * dev)1752 static netdev_tx_t gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1753 {
1754 struct gfar_private *priv = netdev_priv(dev);
1755 struct gfar_priv_tx_q *tx_queue = NULL;
1756 struct netdev_queue *txq;
1757 struct gfar __iomem *regs = NULL;
1758 struct txfcb *fcb = NULL;
1759 struct txbd8 *txbdp, *txbdp_start, *base, *txbdp_tstamp = NULL;
1760 u32 lstatus;
1761 skb_frag_t *frag;
1762 int i, rq = 0;
1763 int do_tstamp, do_csum, do_vlan;
1764 u32 bufaddr;
1765 unsigned int nr_frags, nr_txbds, bytes_sent, fcb_len = 0;
1766
1767 rq = skb->queue_mapping;
1768 tx_queue = priv->tx_queue[rq];
1769 txq = netdev_get_tx_queue(dev, rq);
1770 base = tx_queue->tx_bd_base;
1771 regs = tx_queue->grp->regs;
1772
1773 do_csum = (CHECKSUM_PARTIAL == skb->ip_summed);
1774 do_vlan = skb_vlan_tag_present(skb);
1775 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
1776 priv->hwts_tx_en;
1777
1778 if (do_csum || do_vlan)
1779 fcb_len = GMAC_FCB_LEN;
1780
1781 /* check if time stamp should be generated */
1782 if (unlikely(do_tstamp))
1783 fcb_len = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
1784
1785 /* make space for additional header when fcb is needed */
1786 if (fcb_len) {
1787 if (unlikely(skb_cow_head(skb, fcb_len))) {
1788 dev->stats.tx_errors++;
1789 dev_kfree_skb_any(skb);
1790 return NETDEV_TX_OK;
1791 }
1792 }
1793
1794 /* total number of fragments in the SKB */
1795 nr_frags = skb_shinfo(skb)->nr_frags;
1796
1797 /* calculate the required number of TxBDs for this skb */
1798 if (unlikely(do_tstamp))
1799 nr_txbds = nr_frags + 2;
1800 else
1801 nr_txbds = nr_frags + 1;
1802
1803 /* check if there is space to queue this packet */
1804 if (nr_txbds > tx_queue->num_txbdfree) {
1805 /* no space, stop the queue */
1806 netif_tx_stop_queue(txq);
1807 dev->stats.tx_fifo_errors++;
1808 return NETDEV_TX_BUSY;
1809 }
1810
1811 /* Update transmit stats */
1812 bytes_sent = skb->len;
1813 tx_queue->stats.tx_bytes += bytes_sent;
1814 /* keep Tx bytes on wire for BQL accounting */
1815 GFAR_CB(skb)->bytes_sent = bytes_sent;
1816 tx_queue->stats.tx_packets++;
1817
1818 txbdp = txbdp_start = tx_queue->cur_tx;
1819 lstatus = be32_to_cpu(txbdp->lstatus);
1820
1821 /* Add TxPAL between FCB and frame if required */
1822 if (unlikely(do_tstamp)) {
1823 skb_push(skb, GMAC_TXPAL_LEN);
1824 memset(skb->data, 0, GMAC_TXPAL_LEN);
1825 }
1826
1827 /* Add TxFCB if required */
1828 if (fcb_len) {
1829 fcb = gfar_add_fcb(skb);
1830 lstatus |= BD_LFLAG(TXBD_TOE);
1831 }
1832
1833 /* Set up checksumming */
1834 if (do_csum) {
1835 gfar_tx_checksum(skb, fcb, fcb_len);
1836
1837 if (unlikely(gfar_csum_errata_12(priv, (unsigned long)fcb)) ||
1838 unlikely(gfar_csum_errata_76(priv, skb->len))) {
1839 __skb_pull(skb, GMAC_FCB_LEN);
1840 skb_checksum_help(skb);
1841 if (do_vlan || do_tstamp) {
1842 /* put back a new fcb for vlan/tstamp TOE */
1843 fcb = gfar_add_fcb(skb);
1844 } else {
1845 /* Tx TOE not used */
1846 lstatus &= ~(BD_LFLAG(TXBD_TOE));
1847 fcb = NULL;
1848 }
1849 }
1850 }
1851
1852 if (do_vlan)
1853 gfar_tx_vlan(skb, fcb);
1854
1855 bufaddr = dma_map_single(priv->dev, skb->data, skb_headlen(skb),
1856 DMA_TO_DEVICE);
1857 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1858 goto dma_map_err;
1859
1860 txbdp_start->bufPtr = cpu_to_be32(bufaddr);
1861
1862 /* Time stamp insertion requires one additional TxBD */
1863 if (unlikely(do_tstamp))
1864 txbdp_tstamp = txbdp = next_txbd(txbdp, base,
1865 tx_queue->tx_ring_size);
1866
1867 if (likely(!nr_frags)) {
1868 if (likely(!do_tstamp))
1869 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1870 } else {
1871 u32 lstatus_start = lstatus;
1872
1873 /* Place the fragment addresses and lengths into the TxBDs */
1874 frag = &skb_shinfo(skb)->frags[0];
1875 for (i = 0; i < nr_frags; i++, frag++) {
1876 unsigned int size;
1877
1878 /* Point at the next BD, wrapping as needed */
1879 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1880
1881 size = skb_frag_size(frag);
1882
1883 lstatus = be32_to_cpu(txbdp->lstatus) | size |
1884 BD_LFLAG(TXBD_READY);
1885
1886 /* Handle the last BD specially */
1887 if (i == nr_frags - 1)
1888 lstatus |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1889
1890 bufaddr = skb_frag_dma_map(priv->dev, frag, 0,
1891 size, DMA_TO_DEVICE);
1892 if (unlikely(dma_mapping_error(priv->dev, bufaddr)))
1893 goto dma_map_err;
1894
1895 /* set the TxBD length and buffer pointer */
1896 txbdp->bufPtr = cpu_to_be32(bufaddr);
1897 txbdp->lstatus = cpu_to_be32(lstatus);
1898 }
1899
1900 lstatus = lstatus_start;
1901 }
1902
1903 /* If time stamping is requested one additional TxBD must be set up. The
1904 * first TxBD points to the FCB and must have a data length of
1905 * GMAC_FCB_LEN. The second TxBD points to the actual frame data with
1906 * the full frame length.
1907 */
1908 if (unlikely(do_tstamp)) {
1909 u32 lstatus_ts = be32_to_cpu(txbdp_tstamp->lstatus);
1910
1911 bufaddr = be32_to_cpu(txbdp_start->bufPtr);
1912 bufaddr += fcb_len;
1913
1914 lstatus_ts |= BD_LFLAG(TXBD_READY) |
1915 (skb_headlen(skb) - fcb_len);
1916 if (!nr_frags)
1917 lstatus_ts |= BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
1918
1919 txbdp_tstamp->bufPtr = cpu_to_be32(bufaddr);
1920 txbdp_tstamp->lstatus = cpu_to_be32(lstatus_ts);
1921 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | GMAC_FCB_LEN;
1922
1923 /* Setup tx hardware time stamping */
1924 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1925 fcb->ptp = 1;
1926 } else {
1927 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1928 }
1929
1930 skb_tx_timestamp(skb);
1931 netdev_tx_sent_queue(txq, bytes_sent);
1932
1933 gfar_wmb();
1934
1935 txbdp_start->lstatus = cpu_to_be32(lstatus);
1936
1937 gfar_wmb(); /* force lstatus write before tx_skbuff */
1938
1939 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1940
1941 /* Update the current skb pointer to the next entry we will use
1942 * (wrapping if necessary)
1943 */
1944 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1945 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1946
1947 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1948
1949 /* We can work in parallel with gfar_clean_tx_ring(), except
1950 * when modifying num_txbdfree. Note that we didn't grab the lock
1951 * when we were reading the num_txbdfree and checking for available
1952 * space, that's because outside of this function it can only grow.
1953 */
1954 spin_lock_bh(&tx_queue->txlock);
1955 /* reduce TxBD free count */
1956 tx_queue->num_txbdfree -= (nr_txbds);
1957 spin_unlock_bh(&tx_queue->txlock);
1958
1959 /* If the next BD still needs to be cleaned up, then the bds
1960 * are full. We need to tell the kernel to stop sending us stuff.
1961 */
1962 if (!tx_queue->num_txbdfree) {
1963 netif_tx_stop_queue(txq);
1964
1965 dev->stats.tx_fifo_errors++;
1966 }
1967
1968 /* Tell the DMA to go go go */
1969 gfar_write(®s->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1970
1971 return NETDEV_TX_OK;
1972
1973 dma_map_err:
1974 txbdp = next_txbd(txbdp_start, base, tx_queue->tx_ring_size);
1975 if (do_tstamp)
1976 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1977 for (i = 0; i < nr_frags; i++) {
1978 lstatus = be32_to_cpu(txbdp->lstatus);
1979 if (!(lstatus & BD_LFLAG(TXBD_READY)))
1980 break;
1981
1982 lstatus &= ~BD_LFLAG(TXBD_READY);
1983 txbdp->lstatus = cpu_to_be32(lstatus);
1984 bufaddr = be32_to_cpu(txbdp->bufPtr);
1985 dma_unmap_page(priv->dev, bufaddr, be16_to_cpu(txbdp->length),
1986 DMA_TO_DEVICE);
1987 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1988 }
1989 gfar_wmb();
1990 dev_kfree_skb_any(skb);
1991 return NETDEV_TX_OK;
1992 }
1993
1994 /* Changes the mac address if the controller is not running. */
gfar_set_mac_address(struct net_device * dev)1995 static int gfar_set_mac_address(struct net_device *dev)
1996 {
1997 gfar_set_mac_for_addr(dev, 0, dev->dev_addr);
1998
1999 return 0;
2000 }
2001
gfar_change_mtu(struct net_device * dev,int new_mtu)2002 static int gfar_change_mtu(struct net_device *dev, int new_mtu)
2003 {
2004 struct gfar_private *priv = netdev_priv(dev);
2005
2006 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2007 cpu_relax();
2008
2009 if (dev->flags & IFF_UP)
2010 stop_gfar(dev);
2011
2012 WRITE_ONCE(dev->mtu, new_mtu);
2013
2014 if (dev->flags & IFF_UP)
2015 startup_gfar(dev);
2016
2017 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2018
2019 return 0;
2020 }
2021
reset_gfar(struct net_device * ndev)2022 static void reset_gfar(struct net_device *ndev)
2023 {
2024 struct gfar_private *priv = netdev_priv(ndev);
2025
2026 while (test_and_set_bit_lock(GFAR_RESETTING, &priv->state))
2027 cpu_relax();
2028
2029 stop_gfar(ndev);
2030 startup_gfar(ndev);
2031
2032 clear_bit_unlock(GFAR_RESETTING, &priv->state);
2033 }
2034
2035 /* gfar_reset_task gets scheduled when a packet has not been
2036 * transmitted after a set amount of time.
2037 * For now, assume that clearing out all the structures, and
2038 * starting over will fix the problem.
2039 */
gfar_reset_task(struct work_struct * work)2040 static void gfar_reset_task(struct work_struct *work)
2041 {
2042 struct gfar_private *priv = container_of(work, struct gfar_private,
2043 reset_task);
2044 reset_gfar(priv->ndev);
2045 }
2046
gfar_timeout(struct net_device * dev,unsigned int txqueue)2047 static void gfar_timeout(struct net_device *dev, unsigned int txqueue)
2048 {
2049 struct gfar_private *priv = netdev_priv(dev);
2050
2051 dev->stats.tx_errors++;
2052 schedule_work(&priv->reset_task);
2053 }
2054
gfar_hwtstamp_set(struct net_device * netdev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)2055 static int gfar_hwtstamp_set(struct net_device *netdev,
2056 struct kernel_hwtstamp_config *config,
2057 struct netlink_ext_ack *extack)
2058 {
2059 struct gfar_private *priv = netdev_priv(netdev);
2060
2061 switch (config->tx_type) {
2062 case HWTSTAMP_TX_OFF:
2063 priv->hwts_tx_en = 0;
2064 break;
2065 case HWTSTAMP_TX_ON:
2066 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2067 return -ERANGE;
2068 priv->hwts_tx_en = 1;
2069 break;
2070 default:
2071 return -ERANGE;
2072 }
2073
2074 switch (config->rx_filter) {
2075 case HWTSTAMP_FILTER_NONE:
2076 if (priv->hwts_rx_en) {
2077 priv->hwts_rx_en = 0;
2078 reset_gfar(netdev);
2079 }
2080 break;
2081 default:
2082 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER))
2083 return -ERANGE;
2084 if (!priv->hwts_rx_en) {
2085 priv->hwts_rx_en = 1;
2086 reset_gfar(netdev);
2087 }
2088 config->rx_filter = HWTSTAMP_FILTER_ALL;
2089 break;
2090 }
2091
2092 return 0;
2093 }
2094
gfar_hwtstamp_get(struct net_device * netdev,struct kernel_hwtstamp_config * config)2095 static int gfar_hwtstamp_get(struct net_device *netdev,
2096 struct kernel_hwtstamp_config *config)
2097 {
2098 struct gfar_private *priv = netdev_priv(netdev);
2099
2100 config->tx_type = priv->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
2101 config->rx_filter = priv->hwts_rx_en ? HWTSTAMP_FILTER_ALL :
2102 HWTSTAMP_FILTER_NONE;
2103
2104 return 0;
2105 }
2106
2107 /* Interrupt Handler for Transmit complete */
gfar_clean_tx_ring(struct gfar_priv_tx_q * tx_queue)2108 static void gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
2109 {
2110 struct net_device *dev = tx_queue->dev;
2111 struct netdev_queue *txq;
2112 struct gfar_private *priv = netdev_priv(dev);
2113 struct txbd8 *bdp, *next = NULL;
2114 struct txbd8 *lbdp = NULL;
2115 struct txbd8 *base = tx_queue->tx_bd_base;
2116 struct sk_buff *skb;
2117 int skb_dirtytx;
2118 int tx_ring_size = tx_queue->tx_ring_size;
2119 int frags = 0, nr_txbds = 0;
2120 int i;
2121 int howmany = 0;
2122 int tqi = tx_queue->qindex;
2123 unsigned int bytes_sent = 0;
2124 u32 lstatus;
2125 size_t buflen;
2126
2127 txq = netdev_get_tx_queue(dev, tqi);
2128 bdp = tx_queue->dirty_tx;
2129 skb_dirtytx = tx_queue->skb_dirtytx;
2130
2131 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2132 bool do_tstamp;
2133
2134 do_tstamp = (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
2135 priv->hwts_tx_en;
2136
2137 frags = skb_shinfo(skb)->nr_frags;
2138
2139 /* When time stamping, one additional TxBD must be freed.
2140 * Also, we need to dma_unmap_single() the TxPAL.
2141 */
2142 if (unlikely(do_tstamp))
2143 nr_txbds = frags + 2;
2144 else
2145 nr_txbds = frags + 1;
2146
2147 lbdp = skip_txbd(bdp, nr_txbds - 1, base, tx_ring_size);
2148
2149 lstatus = be32_to_cpu(lbdp->lstatus);
2150
2151 /* Only clean completed frames */
2152 if ((lstatus & BD_LFLAG(TXBD_READY)) &&
2153 (lstatus & BD_LENGTH_MASK))
2154 break;
2155
2156 if (unlikely(do_tstamp)) {
2157 next = next_txbd(bdp, base, tx_ring_size);
2158 buflen = be16_to_cpu(next->length) +
2159 GMAC_FCB_LEN + GMAC_TXPAL_LEN;
2160 } else
2161 buflen = be16_to_cpu(bdp->length);
2162
2163 dma_unmap_single(priv->dev, be32_to_cpu(bdp->bufPtr),
2164 buflen, DMA_TO_DEVICE);
2165
2166 if (unlikely(do_tstamp)) {
2167 struct skb_shared_hwtstamps shhwtstamps;
2168 __be64 *ns;
2169
2170 ns = (__be64 *)(((uintptr_t)skb->data + 0x10) & ~0x7UL);
2171
2172 memset(&shhwtstamps, 0, sizeof(shhwtstamps));
2173 shhwtstamps.hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2174 skb_pull(skb, GMAC_FCB_LEN + GMAC_TXPAL_LEN);
2175 skb_tstamp_tx(skb, &shhwtstamps);
2176 gfar_clear_txbd_status(bdp);
2177 bdp = next;
2178 }
2179
2180 gfar_clear_txbd_status(bdp);
2181 bdp = next_txbd(bdp, base, tx_ring_size);
2182
2183 for (i = 0; i < frags; i++) {
2184 dma_unmap_page(priv->dev, be32_to_cpu(bdp->bufPtr),
2185 be16_to_cpu(bdp->length),
2186 DMA_TO_DEVICE);
2187 gfar_clear_txbd_status(bdp);
2188 bdp = next_txbd(bdp, base, tx_ring_size);
2189 }
2190
2191 bytes_sent += GFAR_CB(skb)->bytes_sent;
2192
2193 dev_kfree_skb_any(skb);
2194
2195 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
2196
2197 skb_dirtytx = (skb_dirtytx + 1) &
2198 TX_RING_MOD_MASK(tx_ring_size);
2199
2200 howmany++;
2201 spin_lock(&tx_queue->txlock);
2202 tx_queue->num_txbdfree += nr_txbds;
2203 spin_unlock(&tx_queue->txlock);
2204 }
2205
2206 /* If we freed a buffer, we can restart transmission, if necessary */
2207 if (tx_queue->num_txbdfree &&
2208 netif_tx_queue_stopped(txq) &&
2209 !(test_bit(GFAR_DOWN, &priv->state)))
2210 netif_wake_subqueue(priv->ndev, tqi);
2211
2212 /* Update dirty indicators */
2213 tx_queue->skb_dirtytx = skb_dirtytx;
2214 tx_queue->dirty_tx = bdp;
2215
2216 netdev_tx_completed_queue(txq, howmany, bytes_sent);
2217 }
2218
count_errors(u32 lstatus,struct net_device * ndev)2219 static void count_errors(u32 lstatus, struct net_device *ndev)
2220 {
2221 struct gfar_private *priv = netdev_priv(ndev);
2222 struct net_device_stats *stats = &ndev->stats;
2223 struct gfar_extra_stats *estats = &priv->extra_stats;
2224
2225 /* If the packet was truncated, none of the other errors matter */
2226 if (lstatus & BD_LFLAG(RXBD_TRUNCATED)) {
2227 stats->rx_length_errors++;
2228
2229 atomic64_inc(&estats->rx_trunc);
2230
2231 return;
2232 }
2233 /* Count the errors, if there were any */
2234 if (lstatus & BD_LFLAG(RXBD_LARGE | RXBD_SHORT)) {
2235 stats->rx_length_errors++;
2236
2237 if (lstatus & BD_LFLAG(RXBD_LARGE))
2238 atomic64_inc(&estats->rx_large);
2239 else
2240 atomic64_inc(&estats->rx_short);
2241 }
2242 if (lstatus & BD_LFLAG(RXBD_NONOCTET)) {
2243 stats->rx_frame_errors++;
2244 atomic64_inc(&estats->rx_nonoctet);
2245 }
2246 if (lstatus & BD_LFLAG(RXBD_CRCERR)) {
2247 atomic64_inc(&estats->rx_crcerr);
2248 stats->rx_crc_errors++;
2249 }
2250 if (lstatus & BD_LFLAG(RXBD_OVERRUN)) {
2251 atomic64_inc(&estats->rx_overrun);
2252 stats->rx_over_errors++;
2253 }
2254 }
2255
gfar_receive(int irq,void * grp_id)2256 static irqreturn_t gfar_receive(int irq, void *grp_id)
2257 {
2258 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2259 unsigned long flags;
2260 u32 imask, ievent;
2261
2262 ievent = gfar_read(&grp->regs->ievent);
2263
2264 if (unlikely(ievent & IEVENT_FGPI)) {
2265 gfar_write(&grp->regs->ievent, IEVENT_FGPI);
2266 return IRQ_HANDLED;
2267 }
2268
2269 if (likely(napi_schedule_prep(&grp->napi_rx))) {
2270 spin_lock_irqsave(&grp->grplock, flags);
2271 imask = gfar_read(&grp->regs->imask);
2272 imask &= IMASK_RX_DISABLED | grp->priv->rmon_overflow.imask;
2273 gfar_write(&grp->regs->imask, imask);
2274 spin_unlock_irqrestore(&grp->grplock, flags);
2275 __napi_schedule(&grp->napi_rx);
2276 } else {
2277 /* Clear IEVENT, so interrupts aren't called again
2278 * because of the packets that have already arrived.
2279 */
2280 gfar_write(&grp->regs->ievent, IEVENT_RX_MASK);
2281 }
2282
2283 return IRQ_HANDLED;
2284 }
2285
2286 /* Interrupt Handler for Transmit complete */
gfar_transmit(int irq,void * grp_id)2287 static irqreturn_t gfar_transmit(int irq, void *grp_id)
2288 {
2289 struct gfar_priv_grp *grp = (struct gfar_priv_grp *)grp_id;
2290 unsigned long flags;
2291 u32 imask;
2292
2293 if (likely(napi_schedule_prep(&grp->napi_tx))) {
2294 spin_lock_irqsave(&grp->grplock, flags);
2295 imask = gfar_read(&grp->regs->imask);
2296 imask &= IMASK_TX_DISABLED | grp->priv->rmon_overflow.imask;
2297 gfar_write(&grp->regs->imask, imask);
2298 spin_unlock_irqrestore(&grp->grplock, flags);
2299 __napi_schedule(&grp->napi_tx);
2300 } else {
2301 /* Clear IEVENT, so interrupts aren't called again
2302 * because of the packets that have already arrived.
2303 */
2304 gfar_write(&grp->regs->ievent, IEVENT_TX_MASK);
2305 }
2306
2307 return IRQ_HANDLED;
2308 }
2309
gfar_add_rx_frag(struct gfar_rx_buff * rxb,u32 lstatus,struct sk_buff * skb,bool first)2310 static bool gfar_add_rx_frag(struct gfar_rx_buff *rxb, u32 lstatus,
2311 struct sk_buff *skb, bool first)
2312 {
2313 int size = lstatus & BD_LENGTH_MASK;
2314 struct page *page = rxb->page;
2315
2316 if (likely(first)) {
2317 skb_put(skb, size);
2318 } else {
2319 /* the last fragments' length contains the full frame length */
2320 if (lstatus & BD_LFLAG(RXBD_LAST))
2321 size -= skb->len;
2322
2323 WARN(size < 0, "gianfar: rx fragment size underflow");
2324 if (size < 0)
2325 return false;
2326
2327 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
2328 rxb->page_offset + RXBUF_ALIGNMENT,
2329 size, GFAR_RXB_TRUESIZE);
2330 }
2331
2332 /* try reuse page */
2333 if (unlikely(page_count(page) != 1 || page_is_pfmemalloc(page)))
2334 return false;
2335
2336 /* change offset to the other half */
2337 rxb->page_offset ^= GFAR_RXB_TRUESIZE;
2338
2339 page_ref_inc(page);
2340
2341 return true;
2342 }
2343
gfar_reuse_rx_page(struct gfar_priv_rx_q * rxq,struct gfar_rx_buff * old_rxb)2344 static void gfar_reuse_rx_page(struct gfar_priv_rx_q *rxq,
2345 struct gfar_rx_buff *old_rxb)
2346 {
2347 struct gfar_rx_buff *new_rxb;
2348 u16 nta = rxq->next_to_alloc;
2349
2350 new_rxb = &rxq->rx_buff[nta];
2351
2352 /* find next buf that can reuse a page */
2353 nta++;
2354 rxq->next_to_alloc = (nta < rxq->rx_ring_size) ? nta : 0;
2355
2356 /* copy page reference */
2357 *new_rxb = *old_rxb;
2358
2359 /* sync for use by the device */
2360 dma_sync_single_range_for_device(rxq->dev, old_rxb->dma,
2361 old_rxb->page_offset,
2362 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2363 }
2364
gfar_get_next_rxbuff(struct gfar_priv_rx_q * rx_queue,u32 lstatus,struct sk_buff * skb)2365 static struct sk_buff *gfar_get_next_rxbuff(struct gfar_priv_rx_q *rx_queue,
2366 u32 lstatus, struct sk_buff *skb)
2367 {
2368 struct gfar_rx_buff *rxb = &rx_queue->rx_buff[rx_queue->next_to_clean];
2369 struct page *page = rxb->page;
2370 bool first = false;
2371
2372 if (likely(!skb)) {
2373 void *buff_addr = page_address(page) + rxb->page_offset;
2374
2375 skb = build_skb(buff_addr, GFAR_SKBFRAG_SIZE);
2376 if (unlikely(!skb)) {
2377 gfar_rx_alloc_err(rx_queue);
2378 return NULL;
2379 }
2380 skb_reserve(skb, RXBUF_ALIGNMENT);
2381 first = true;
2382 }
2383
2384 dma_sync_single_range_for_cpu(rx_queue->dev, rxb->dma, rxb->page_offset,
2385 GFAR_RXB_TRUESIZE, DMA_FROM_DEVICE);
2386
2387 if (gfar_add_rx_frag(rxb, lstatus, skb, first)) {
2388 /* reuse the free half of the page */
2389 gfar_reuse_rx_page(rx_queue, rxb);
2390 } else {
2391 /* page cannot be reused, unmap it */
2392 dma_unmap_page(rx_queue->dev, rxb->dma,
2393 PAGE_SIZE, DMA_FROM_DEVICE);
2394 }
2395
2396 /* clear rxb content */
2397 rxb->page = NULL;
2398
2399 return skb;
2400 }
2401
gfar_rx_checksum(struct sk_buff * skb,struct rxfcb * fcb)2402 static inline void gfar_rx_checksum(struct sk_buff *skb, struct rxfcb *fcb)
2403 {
2404 /* If valid headers were found, and valid sums
2405 * were verified, then we tell the kernel that no
2406 * checksumming is necessary. Otherwise, it is [FIXME]
2407 */
2408 if ((be16_to_cpu(fcb->flags) & RXFCB_CSUM_MASK) ==
2409 (RXFCB_CIP | RXFCB_CTU))
2410 skb->ip_summed = CHECKSUM_UNNECESSARY;
2411 else
2412 skb_checksum_none_assert(skb);
2413 }
2414
2415 /* gfar_process_frame() -- handle one incoming packet if skb isn't NULL. */
gfar_process_frame(struct net_device * ndev,struct sk_buff * skb)2416 static void gfar_process_frame(struct net_device *ndev, struct sk_buff *skb)
2417 {
2418 struct gfar_private *priv = netdev_priv(ndev);
2419 struct rxfcb *fcb = NULL;
2420
2421 /* fcb is at the beginning if exists */
2422 fcb = (struct rxfcb *)skb->data;
2423
2424 /* Remove the FCB from the skb
2425 * Remove the padded bytes, if there are any
2426 */
2427 if (priv->uses_rxfcb)
2428 skb_pull(skb, GMAC_FCB_LEN);
2429
2430 /* Get receive timestamp from the skb */
2431 if (priv->hwts_rx_en) {
2432 struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
2433 __be64 *ns = (__be64 *)skb->data;
2434
2435 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
2436 shhwtstamps->hwtstamp = ns_to_ktime(be64_to_cpu(*ns));
2437 }
2438
2439 if (priv->padding)
2440 skb_pull(skb, priv->padding);
2441
2442 /* Trim off the FCS */
2443 pskb_trim(skb, skb->len - ETH_FCS_LEN);
2444
2445 if (ndev->features & NETIF_F_RXCSUM)
2446 gfar_rx_checksum(skb, fcb);
2447
2448 /* There's need to check for NETIF_F_HW_VLAN_CTAG_RX here.
2449 * Even if vlan rx accel is disabled, on some chips
2450 * RXFCB_VLN is pseudo randomly set.
2451 */
2452 if (ndev->features & NETIF_F_HW_VLAN_CTAG_RX &&
2453 be16_to_cpu(fcb->flags) & RXFCB_VLN)
2454 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
2455 be16_to_cpu(fcb->vlctl));
2456 }
2457
2458 /* gfar_clean_rx_ring() -- Processes each frame in the rx ring
2459 * until the budget/quota has been reached. Returns the number
2460 * of frames handled
2461 */
gfar_clean_rx_ring(struct gfar_priv_rx_q * rx_queue,int rx_work_limit)2462 static int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue,
2463 int rx_work_limit)
2464 {
2465 struct net_device *ndev = rx_queue->ndev;
2466 struct gfar_private *priv = netdev_priv(ndev);
2467 struct rxbd8 *bdp;
2468 int i, howmany = 0;
2469 struct sk_buff *skb = rx_queue->skb;
2470 int cleaned_cnt = gfar_rxbd_unused(rx_queue);
2471 unsigned int total_bytes = 0, total_pkts = 0;
2472
2473 /* Get the first full descriptor */
2474 i = rx_queue->next_to_clean;
2475
2476 while (rx_work_limit--) {
2477 u32 lstatus;
2478
2479 if (cleaned_cnt >= GFAR_RX_BUFF_ALLOC) {
2480 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2481 cleaned_cnt = 0;
2482 }
2483
2484 bdp = &rx_queue->rx_bd_base[i];
2485 lstatus = be32_to_cpu(bdp->lstatus);
2486 if (lstatus & BD_LFLAG(RXBD_EMPTY))
2487 break;
2488
2489 /* lost RXBD_LAST descriptor due to overrun */
2490 if (skb &&
2491 (lstatus & BD_LFLAG(RXBD_FIRST))) {
2492 /* discard faulty buffer */
2493 dev_kfree_skb(skb);
2494 skb = NULL;
2495 rx_queue->stats.rx_dropped++;
2496
2497 /* can continue normally */
2498 }
2499
2500 /* order rx buffer descriptor reads */
2501 rmb();
2502
2503 /* fetch next to clean buffer from the ring */
2504 skb = gfar_get_next_rxbuff(rx_queue, lstatus, skb);
2505 if (unlikely(!skb))
2506 break;
2507
2508 cleaned_cnt++;
2509 howmany++;
2510
2511 if (unlikely(++i == rx_queue->rx_ring_size))
2512 i = 0;
2513
2514 rx_queue->next_to_clean = i;
2515
2516 /* fetch next buffer if not the last in frame */
2517 if (!(lstatus & BD_LFLAG(RXBD_LAST)))
2518 continue;
2519
2520 if (unlikely(lstatus & BD_LFLAG(RXBD_ERR))) {
2521 count_errors(lstatus, ndev);
2522
2523 /* discard faulty buffer */
2524 dev_kfree_skb(skb);
2525 skb = NULL;
2526 rx_queue->stats.rx_dropped++;
2527 continue;
2528 }
2529
2530 gfar_process_frame(ndev, skb);
2531
2532 /* Increment the number of packets */
2533 total_pkts++;
2534 total_bytes += skb->len;
2535
2536 skb_record_rx_queue(skb, rx_queue->qindex);
2537
2538 skb->protocol = eth_type_trans(skb, ndev);
2539
2540 /* Send the packet up the stack */
2541 napi_gro_receive(&rx_queue->grp->napi_rx, skb);
2542
2543 skb = NULL;
2544 }
2545
2546 /* Store incomplete frames for completion */
2547 rx_queue->skb = skb;
2548
2549 rx_queue->stats.rx_packets += total_pkts;
2550 rx_queue->stats.rx_bytes += total_bytes;
2551
2552 if (cleaned_cnt)
2553 gfar_alloc_rx_buffs(rx_queue, cleaned_cnt);
2554
2555 /* Update Last Free RxBD pointer for LFC */
2556 if (unlikely(priv->tx_actual_en)) {
2557 u32 bdp_dma = gfar_rxbd_dma_lastfree(rx_queue);
2558
2559 gfar_write(rx_queue->rfbptr, bdp_dma);
2560 }
2561
2562 return howmany;
2563 }
2564
gfar_poll_rx_sq(struct napi_struct * napi,int budget)2565 static int gfar_poll_rx_sq(struct napi_struct *napi, int budget)
2566 {
2567 struct gfar_priv_grp *gfargrp =
2568 container_of(napi, struct gfar_priv_grp, napi_rx);
2569 struct gfar __iomem *regs = gfargrp->regs;
2570 struct gfar_priv_rx_q *rx_queue = gfargrp->rx_queue;
2571 int work_done = 0;
2572
2573 /* Clear IEVENT, so interrupts aren't called again
2574 * because of the packets that have already arrived
2575 */
2576 gfar_write(®s->ievent, IEVENT_RX_MASK);
2577
2578 work_done = gfar_clean_rx_ring(rx_queue, budget);
2579
2580 if (work_done < budget) {
2581 u32 imask;
2582 napi_complete_done(napi, work_done);
2583 /* Clear the halt bit in RSTAT */
2584 gfar_write(®s->rstat, gfargrp->rstat);
2585
2586 spin_lock_irq(&gfargrp->grplock);
2587 imask = gfar_read(®s->imask);
2588 imask |= IMASK_RX_DEFAULT;
2589 gfar_write(®s->imask, imask);
2590 spin_unlock_irq(&gfargrp->grplock);
2591 }
2592
2593 return work_done;
2594 }
2595
gfar_poll_tx_sq(struct napi_struct * napi,int budget)2596 static int gfar_poll_tx_sq(struct napi_struct *napi, int budget)
2597 {
2598 struct gfar_priv_grp *gfargrp =
2599 container_of(napi, struct gfar_priv_grp, napi_tx);
2600 struct gfar __iomem *regs = gfargrp->regs;
2601 struct gfar_priv_tx_q *tx_queue = gfargrp->tx_queue;
2602 u32 imask;
2603
2604 /* Clear IEVENT, so interrupts aren't called again
2605 * because of the packets that have already arrived
2606 */
2607 gfar_write(®s->ievent, IEVENT_TX_MASK);
2608
2609 /* run Tx cleanup to completion */
2610 if (tx_queue->tx_skbuff[tx_queue->skb_dirtytx])
2611 gfar_clean_tx_ring(tx_queue);
2612
2613 napi_complete(napi);
2614
2615 spin_lock_irq(&gfargrp->grplock);
2616 imask = gfar_read(®s->imask);
2617 imask |= IMASK_TX_DEFAULT;
2618 gfar_write(®s->imask, imask);
2619 spin_unlock_irq(&gfargrp->grplock);
2620
2621 return 0;
2622 }
2623
2624 /* GFAR error interrupt handler */
gfar_error(int irq,void * grp_id)2625 static irqreturn_t gfar_error(int irq, void *grp_id)
2626 {
2627 struct gfar_priv_grp *gfargrp = grp_id;
2628 struct gfar __iomem *regs = gfargrp->regs;
2629 struct gfar_private *priv= gfargrp->priv;
2630 struct net_device *dev = priv->ndev;
2631
2632 /* Save ievent for future reference */
2633 u32 events = gfar_read(®s->ievent);
2634
2635 /* Clear IEVENT */
2636 gfar_write(®s->ievent, events & IEVENT_ERR_MASK);
2637
2638 /* Magic Packet is not an error. */
2639 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
2640 (events & IEVENT_MAG))
2641 events &= ~IEVENT_MAG;
2642
2643 /* Hmm... */
2644 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2645 netdev_dbg(dev,
2646 "error interrupt (ievent=0x%08x imask=0x%08x)\n",
2647 events, gfar_read(®s->imask));
2648
2649 /* Update the error counters */
2650 if (events & IEVENT_TXE) {
2651 dev->stats.tx_errors++;
2652
2653 if (events & IEVENT_LC)
2654 dev->stats.tx_window_errors++;
2655 if (events & IEVENT_CRL)
2656 dev->stats.tx_aborted_errors++;
2657 if (events & IEVENT_XFUN) {
2658 netif_dbg(priv, tx_err, dev,
2659 "TX FIFO underrun, packet dropped\n");
2660 dev->stats.tx_dropped++;
2661 atomic64_inc(&priv->extra_stats.tx_underrun);
2662
2663 schedule_work(&priv->reset_task);
2664 }
2665 netif_dbg(priv, tx_err, dev, "Transmit Error\n");
2666 }
2667 if (events & IEVENT_MSRO) {
2668 struct rmon_mib __iomem *rmon = ®s->rmon;
2669 u32 car;
2670
2671 spin_lock(&priv->rmon_overflow.lock);
2672 car = gfar_read(&rmon->car1) & CAR1_C1RDR;
2673 if (car) {
2674 priv->rmon_overflow.rdrp++;
2675 gfar_write(&rmon->car1, car);
2676 }
2677 spin_unlock(&priv->rmon_overflow.lock);
2678 }
2679 if (events & IEVENT_BSY) {
2680 dev->stats.rx_over_errors++;
2681 atomic64_inc(&priv->extra_stats.rx_bsy);
2682
2683 netif_dbg(priv, rx_err, dev, "busy error (rstat: %x)\n",
2684 gfar_read(®s->rstat));
2685 }
2686 if (events & IEVENT_BABR) {
2687 dev->stats.rx_errors++;
2688 atomic64_inc(&priv->extra_stats.rx_babr);
2689
2690 netif_dbg(priv, rx_err, dev, "babbling RX error\n");
2691 }
2692 if (events & IEVENT_EBERR) {
2693 atomic64_inc(&priv->extra_stats.eberr);
2694 netif_dbg(priv, rx_err, dev, "bus error\n");
2695 }
2696 if (events & IEVENT_RXC)
2697 netif_dbg(priv, rx_status, dev, "control frame\n");
2698
2699 if (events & IEVENT_BABT) {
2700 atomic64_inc(&priv->extra_stats.tx_babt);
2701 netif_dbg(priv, tx_err, dev, "babbling TX error\n");
2702 }
2703 return IRQ_HANDLED;
2704 }
2705
2706 /* The interrupt handler for devices with one interrupt */
gfar_interrupt(int irq,void * grp_id)2707 static irqreturn_t gfar_interrupt(int irq, void *grp_id)
2708 {
2709 struct gfar_priv_grp *gfargrp = grp_id;
2710
2711 /* Save ievent for future reference */
2712 u32 events = gfar_read(&gfargrp->regs->ievent);
2713
2714 /* Check for reception */
2715 if (events & IEVENT_RX_MASK)
2716 gfar_receive(irq, grp_id);
2717
2718 /* Check for transmit completion */
2719 if (events & IEVENT_TX_MASK)
2720 gfar_transmit(irq, grp_id);
2721
2722 /* Check for errors */
2723 if (events & IEVENT_ERR_MASK)
2724 gfar_error(irq, grp_id);
2725
2726 return IRQ_HANDLED;
2727 }
2728
2729 #ifdef CONFIG_NET_POLL_CONTROLLER
2730 /* Polling 'interrupt' - used by things like netconsole to send skbs
2731 * without having to re-enable interrupts. It's not called while
2732 * the interrupt routine is executing.
2733 */
gfar_netpoll(struct net_device * dev)2734 static void gfar_netpoll(struct net_device *dev)
2735 {
2736 struct gfar_private *priv = netdev_priv(dev);
2737 int i;
2738
2739 /* If the device has multiple interrupts, run tx/rx */
2740 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2741 for (i = 0; i < priv->num_grps; i++) {
2742 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2743
2744 disable_irq(gfar_irq(grp, TX)->irq);
2745 disable_irq(gfar_irq(grp, RX)->irq);
2746 disable_irq(gfar_irq(grp, ER)->irq);
2747 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2748 enable_irq(gfar_irq(grp, ER)->irq);
2749 enable_irq(gfar_irq(grp, RX)->irq);
2750 enable_irq(gfar_irq(grp, TX)->irq);
2751 }
2752 } else {
2753 for (i = 0; i < priv->num_grps; i++) {
2754 struct gfar_priv_grp *grp = &priv->gfargrp[i];
2755
2756 disable_irq(gfar_irq(grp, TX)->irq);
2757 gfar_interrupt(gfar_irq(grp, TX)->irq, grp);
2758 enable_irq(gfar_irq(grp, TX)->irq);
2759 }
2760 }
2761 }
2762 #endif
2763
free_grp_irqs(struct gfar_priv_grp * grp)2764 static void free_grp_irqs(struct gfar_priv_grp *grp)
2765 {
2766 free_irq(gfar_irq(grp, TX)->irq, grp);
2767 free_irq(gfar_irq(grp, RX)->irq, grp);
2768 free_irq(gfar_irq(grp, ER)->irq, grp);
2769 }
2770
register_grp_irqs(struct gfar_priv_grp * grp)2771 static int register_grp_irqs(struct gfar_priv_grp *grp)
2772 {
2773 struct gfar_private *priv = grp->priv;
2774 struct net_device *dev = priv->ndev;
2775 int err;
2776
2777 /* If the device has multiple interrupts, register for
2778 * them. Otherwise, only register for the one
2779 */
2780 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2781 /* Install our interrupt handlers for Error,
2782 * Transmit, and Receive
2783 */
2784 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0,
2785 gfar_irq(grp, ER)->name, grp);
2786 if (err < 0) {
2787 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2788 gfar_irq(grp, ER)->irq);
2789
2790 goto err_irq_fail;
2791 }
2792 enable_irq_wake(gfar_irq(grp, ER)->irq);
2793
2794 err = request_irq(gfar_irq(grp, TX)->irq, gfar_transmit, 0,
2795 gfar_irq(grp, TX)->name, grp);
2796 if (err < 0) {
2797 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2798 gfar_irq(grp, TX)->irq);
2799 goto tx_irq_fail;
2800 }
2801 err = request_irq(gfar_irq(grp, RX)->irq, gfar_receive, 0,
2802 gfar_irq(grp, RX)->name, grp);
2803 if (err < 0) {
2804 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2805 gfar_irq(grp, RX)->irq);
2806 goto rx_irq_fail;
2807 }
2808 enable_irq_wake(gfar_irq(grp, RX)->irq);
2809
2810 } else {
2811 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0,
2812 gfar_irq(grp, TX)->name, grp);
2813 if (err < 0) {
2814 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
2815 gfar_irq(grp, TX)->irq);
2816 goto err_irq_fail;
2817 }
2818 enable_irq_wake(gfar_irq(grp, TX)->irq);
2819 }
2820
2821 return 0;
2822
2823 rx_irq_fail:
2824 free_irq(gfar_irq(grp, TX)->irq, grp);
2825 tx_irq_fail:
2826 free_irq(gfar_irq(grp, ER)->irq, grp);
2827 err_irq_fail:
2828 return err;
2829
2830 }
2831
gfar_free_irq(struct gfar_private * priv)2832 static void gfar_free_irq(struct gfar_private *priv)
2833 {
2834 int i;
2835
2836 /* Free the IRQs */
2837 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
2838 for (i = 0; i < priv->num_grps; i++)
2839 free_grp_irqs(&priv->gfargrp[i]);
2840 } else {
2841 for (i = 0; i < priv->num_grps; i++)
2842 free_irq(gfar_irq(&priv->gfargrp[i], TX)->irq,
2843 &priv->gfargrp[i]);
2844 }
2845 }
2846
gfar_request_irq(struct gfar_private * priv)2847 static int gfar_request_irq(struct gfar_private *priv)
2848 {
2849 int err, i, j;
2850
2851 for (i = 0; i < priv->num_grps; i++) {
2852 err = register_grp_irqs(&priv->gfargrp[i]);
2853 if (err) {
2854 for (j = 0; j < i; j++)
2855 free_grp_irqs(&priv->gfargrp[j]);
2856 return err;
2857 }
2858 }
2859
2860 return 0;
2861 }
2862
2863 /* Called when something needs to use the ethernet device
2864 * Returns 0 for success.
2865 */
gfar_enet_open(struct net_device * dev)2866 static int gfar_enet_open(struct net_device *dev)
2867 {
2868 struct gfar_private *priv = netdev_priv(dev);
2869 int err;
2870
2871 err = init_phy(dev);
2872 if (err)
2873 return err;
2874
2875 err = gfar_request_irq(priv);
2876 if (err)
2877 return err;
2878
2879 err = startup_gfar(dev);
2880 if (err)
2881 return err;
2882
2883 return err;
2884 }
2885
2886 /* Stops the kernel queue, and halts the controller */
gfar_close(struct net_device * dev)2887 static int gfar_close(struct net_device *dev)
2888 {
2889 struct gfar_private *priv = netdev_priv(dev);
2890
2891 cancel_work_sync(&priv->reset_task);
2892 stop_gfar(dev);
2893
2894 /* Disconnect from the PHY */
2895 phy_disconnect(dev->phydev);
2896
2897 gfar_free_irq(priv);
2898
2899 return 0;
2900 }
2901
2902 /* Clears each of the exact match registers to zero, so they
2903 * don't interfere with normal reception
2904 */
gfar_clear_exact_match(struct net_device * dev)2905 static void gfar_clear_exact_match(struct net_device *dev)
2906 {
2907 int idx;
2908 static const u8 zero_arr[ETH_ALEN] = {0, 0, 0, 0, 0, 0};
2909
2910 for (idx = 1; idx < GFAR_EM_NUM + 1; idx++)
2911 gfar_set_mac_for_addr(dev, idx, zero_arr);
2912 }
2913
2914 /* Update the hash table based on the current list of multicast
2915 * addresses we subscribe to. Also, change the promiscuity of
2916 * the device based on the flags (this function is called
2917 * whenever dev->flags is changed
2918 */
gfar_set_multi(struct net_device * dev)2919 static void gfar_set_multi(struct net_device *dev)
2920 {
2921 struct netdev_hw_addr *ha;
2922 struct gfar_private *priv = netdev_priv(dev);
2923 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2924 u32 tempval;
2925
2926 if (dev->flags & IFF_PROMISC) {
2927 /* Set RCTRL to PROM */
2928 tempval = gfar_read(®s->rctrl);
2929 tempval |= RCTRL_PROM;
2930 gfar_write(®s->rctrl, tempval);
2931 } else {
2932 /* Set RCTRL to not PROM */
2933 tempval = gfar_read(®s->rctrl);
2934 tempval &= ~(RCTRL_PROM);
2935 gfar_write(®s->rctrl, tempval);
2936 }
2937
2938 if (dev->flags & IFF_ALLMULTI) {
2939 /* Set the hash to rx all multicast frames */
2940 gfar_write(®s->igaddr0, 0xffffffff);
2941 gfar_write(®s->igaddr1, 0xffffffff);
2942 gfar_write(®s->igaddr2, 0xffffffff);
2943 gfar_write(®s->igaddr3, 0xffffffff);
2944 gfar_write(®s->igaddr4, 0xffffffff);
2945 gfar_write(®s->igaddr5, 0xffffffff);
2946 gfar_write(®s->igaddr6, 0xffffffff);
2947 gfar_write(®s->igaddr7, 0xffffffff);
2948 gfar_write(®s->gaddr0, 0xffffffff);
2949 gfar_write(®s->gaddr1, 0xffffffff);
2950 gfar_write(®s->gaddr2, 0xffffffff);
2951 gfar_write(®s->gaddr3, 0xffffffff);
2952 gfar_write(®s->gaddr4, 0xffffffff);
2953 gfar_write(®s->gaddr5, 0xffffffff);
2954 gfar_write(®s->gaddr6, 0xffffffff);
2955 gfar_write(®s->gaddr7, 0xffffffff);
2956 } else {
2957 int em_num;
2958 int idx;
2959
2960 /* zero out the hash */
2961 gfar_write(®s->igaddr0, 0x0);
2962 gfar_write(®s->igaddr1, 0x0);
2963 gfar_write(®s->igaddr2, 0x0);
2964 gfar_write(®s->igaddr3, 0x0);
2965 gfar_write(®s->igaddr4, 0x0);
2966 gfar_write(®s->igaddr5, 0x0);
2967 gfar_write(®s->igaddr6, 0x0);
2968 gfar_write(®s->igaddr7, 0x0);
2969 gfar_write(®s->gaddr0, 0x0);
2970 gfar_write(®s->gaddr1, 0x0);
2971 gfar_write(®s->gaddr2, 0x0);
2972 gfar_write(®s->gaddr3, 0x0);
2973 gfar_write(®s->gaddr4, 0x0);
2974 gfar_write(®s->gaddr5, 0x0);
2975 gfar_write(®s->gaddr6, 0x0);
2976 gfar_write(®s->gaddr7, 0x0);
2977
2978 /* If we have extended hash tables, we need to
2979 * clear the exact match registers to prepare for
2980 * setting them
2981 */
2982 if (priv->extended_hash) {
2983 em_num = GFAR_EM_NUM + 1;
2984 gfar_clear_exact_match(dev);
2985 idx = 1;
2986 } else {
2987 idx = 0;
2988 em_num = 0;
2989 }
2990
2991 if (netdev_mc_empty(dev))
2992 return;
2993
2994 /* Parse the list, and set the appropriate bits */
2995 netdev_for_each_mc_addr(ha, dev) {
2996 if (idx < em_num) {
2997 gfar_set_mac_for_addr(dev, idx, ha->addr);
2998 idx++;
2999 } else
3000 gfar_set_hash_for_addr(dev, ha->addr);
3001 }
3002 }
3003 }
3004
gfar_mac_reset(struct gfar_private * priv)3005 void gfar_mac_reset(struct gfar_private *priv)
3006 {
3007 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3008 u32 tempval;
3009
3010 /* Reset MAC layer */
3011 gfar_write(®s->maccfg1, MACCFG1_SOFT_RESET);
3012
3013 /* We need to delay at least 3 TX clocks */
3014 udelay(3);
3015
3016 /* the soft reset bit is not self-resetting, so we need to
3017 * clear it before resuming normal operation
3018 */
3019 gfar_write(®s->maccfg1, 0);
3020
3021 udelay(3);
3022
3023 gfar_rx_offload_en(priv);
3024
3025 /* Initialize the max receive frame/buffer lengths */
3026 gfar_write(®s->maxfrm, GFAR_JUMBO_FRAME_SIZE);
3027 gfar_write(®s->mrblr, GFAR_RXB_SIZE);
3028
3029 /* Initialize the Minimum Frame Length Register */
3030 gfar_write(®s->minflr, MINFLR_INIT_SETTINGS);
3031
3032 /* Initialize MACCFG2. */
3033 tempval = MACCFG2_INIT_SETTINGS;
3034
3035 /* eTSEC74 erratum: Rx frames of length MAXFRM or MAXFRM-1
3036 * are marked as truncated. Avoid this by MACCFG2[Huge Frame]=1,
3037 * and by checking RxBD[LG] and discarding larger than MAXFRM.
3038 */
3039 if (gfar_has_errata(priv, GFAR_ERRATA_74))
3040 tempval |= MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK;
3041
3042 gfar_write(®s->maccfg2, tempval);
3043
3044 /* Clear mac addr hash registers */
3045 gfar_write(®s->igaddr0, 0);
3046 gfar_write(®s->igaddr1, 0);
3047 gfar_write(®s->igaddr2, 0);
3048 gfar_write(®s->igaddr3, 0);
3049 gfar_write(®s->igaddr4, 0);
3050 gfar_write(®s->igaddr5, 0);
3051 gfar_write(®s->igaddr6, 0);
3052 gfar_write(®s->igaddr7, 0);
3053
3054 gfar_write(®s->gaddr0, 0);
3055 gfar_write(®s->gaddr1, 0);
3056 gfar_write(®s->gaddr2, 0);
3057 gfar_write(®s->gaddr3, 0);
3058 gfar_write(®s->gaddr4, 0);
3059 gfar_write(®s->gaddr5, 0);
3060 gfar_write(®s->gaddr6, 0);
3061 gfar_write(®s->gaddr7, 0);
3062
3063 if (priv->extended_hash)
3064 gfar_clear_exact_match(priv->ndev);
3065
3066 gfar_mac_rx_config(priv);
3067
3068 gfar_mac_tx_config(priv);
3069
3070 gfar_set_mac_address(priv->ndev);
3071
3072 gfar_set_multi(priv->ndev);
3073
3074 /* clear ievent and imask before configuring coalescing */
3075 gfar_ints_disable(priv);
3076
3077 /* Configure the coalescing support */
3078 gfar_configure_coalescing_all(priv);
3079 }
3080
gfar_hw_init(struct gfar_private * priv)3081 static void gfar_hw_init(struct gfar_private *priv)
3082 {
3083 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3084 u32 attrs;
3085
3086 /* Stop the DMA engine now, in case it was running before
3087 * (The firmware could have used it, and left it running).
3088 */
3089 gfar_halt(priv);
3090
3091 gfar_mac_reset(priv);
3092
3093 /* Zero out the rmon mib registers if it has them */
3094 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3095 memset_io(®s->rmon, 0, offsetof(struct rmon_mib, car1));
3096
3097 /* Mask off the CAM interrupts */
3098 gfar_write(®s->rmon.cam1, 0xffffffff);
3099 gfar_write(®s->rmon.cam2, 0xffffffff);
3100 /* Clear the CAR registers (w1c style) */
3101 gfar_write(®s->rmon.car1, 0xffffffff);
3102 gfar_write(®s->rmon.car2, 0xffffffff);
3103 }
3104
3105 /* Initialize ECNTRL */
3106 gfar_write(®s->ecntrl, ECNTRL_INIT_SETTINGS);
3107
3108 /* Set the extraction length and index */
3109 attrs = ATTRELI_EL(priv->rx_stash_size) |
3110 ATTRELI_EI(priv->rx_stash_index);
3111
3112 gfar_write(®s->attreli, attrs);
3113
3114 /* Start with defaults, and add stashing
3115 * depending on driver parameters
3116 */
3117 attrs = ATTR_INIT_SETTINGS;
3118
3119 if (priv->bd_stash_en)
3120 attrs |= ATTR_BDSTASH;
3121
3122 if (priv->rx_stash_size != 0)
3123 attrs |= ATTR_BUFSTASH;
3124
3125 gfar_write(®s->attr, attrs);
3126
3127 /* FIFO configs */
3128 gfar_write(®s->fifo_tx_thr, DEFAULT_FIFO_TX_THR);
3129 gfar_write(®s->fifo_tx_starve, DEFAULT_FIFO_TX_STARVE);
3130 gfar_write(®s->fifo_tx_starve_shutoff, DEFAULT_FIFO_TX_STARVE_OFF);
3131
3132 /* Program the interrupt steering regs, only for MG devices */
3133 if (priv->num_grps > 1)
3134 gfar_write_isrg(priv);
3135 }
3136
3137 static const struct net_device_ops gfar_netdev_ops = {
3138 .ndo_open = gfar_enet_open,
3139 .ndo_start_xmit = gfar_start_xmit,
3140 .ndo_stop = gfar_close,
3141 .ndo_change_mtu = gfar_change_mtu,
3142 .ndo_set_features = gfar_set_features,
3143 .ndo_set_rx_mode = gfar_set_multi,
3144 .ndo_tx_timeout = gfar_timeout,
3145 .ndo_eth_ioctl = phy_do_ioctl_running,
3146 .ndo_get_stats64 = gfar_get_stats64,
3147 .ndo_change_carrier = fixed_phy_change_carrier,
3148 .ndo_set_mac_address = gfar_set_mac_addr,
3149 .ndo_validate_addr = eth_validate_addr,
3150 #ifdef CONFIG_NET_POLL_CONTROLLER
3151 .ndo_poll_controller = gfar_netpoll,
3152 #endif
3153 .ndo_hwtstamp_get = gfar_hwtstamp_get,
3154 .ndo_hwtstamp_set = gfar_hwtstamp_set,
3155 };
3156
3157 /* Set up the ethernet device structure, private data,
3158 * and anything else we need before we start
3159 */
gfar_probe(struct platform_device * ofdev)3160 static int gfar_probe(struct platform_device *ofdev)
3161 {
3162 struct device_node *np = ofdev->dev.of_node;
3163 struct net_device *dev = NULL;
3164 struct gfar_private *priv = NULL;
3165 int err = 0, i;
3166
3167 err = gfar_of_init(ofdev, &dev);
3168
3169 if (err)
3170 return err;
3171
3172 priv = netdev_priv(dev);
3173 priv->ndev = dev;
3174 priv->ofdev = ofdev;
3175 priv->dev = &ofdev->dev;
3176 SET_NETDEV_DEV(dev, &ofdev->dev);
3177
3178 INIT_WORK(&priv->reset_task, gfar_reset_task);
3179
3180 platform_set_drvdata(ofdev, priv);
3181
3182 gfar_detect_errata(priv);
3183
3184 /* Set the dev->base_addr to the gfar reg region */
3185 dev->base_addr = (unsigned long) priv->gfargrp[0].regs;
3186
3187 /* Fill in the dev structure */
3188 dev->watchdog_timeo = TX_TIMEOUT;
3189 /* MTU range: 50 - 9586 */
3190 dev->mtu = 1500;
3191 dev->min_mtu = 50;
3192 dev->max_mtu = GFAR_JUMBO_FRAME_SIZE - ETH_HLEN;
3193 dev->netdev_ops = &gfar_netdev_ops;
3194 dev->ethtool_ops = &gfar_ethtool_ops;
3195
3196 /* Register for napi ...We are registering NAPI for each grp */
3197 for (i = 0; i < priv->num_grps; i++) {
3198 netif_napi_add(dev, &priv->gfargrp[i].napi_rx,
3199 gfar_poll_rx_sq);
3200 netif_napi_add_tx_weight(dev, &priv->gfargrp[i].napi_tx,
3201 gfar_poll_tx_sq, 2);
3202 }
3203
3204 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
3205 dev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG |
3206 NETIF_F_RXCSUM;
3207 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG |
3208 NETIF_F_RXCSUM | NETIF_F_HIGHDMA;
3209 }
3210
3211 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_VLAN) {
3212 dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX |
3213 NETIF_F_HW_VLAN_CTAG_RX;
3214 dev->features |= NETIF_F_HW_VLAN_CTAG_RX;
3215 }
3216
3217 dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
3218
3219 gfar_init_addr_hash_table(priv);
3220
3221 /* Insert receive time stamps into padding alignment bytes, and
3222 * plus 2 bytes padding to ensure the cpu alignment.
3223 */
3224 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3225 priv->padding = 8 + DEFAULT_PADDING;
3226
3227 if (dev->features & NETIF_F_IP_CSUM ||
3228 priv->device_flags & FSL_GIANFAR_DEV_HAS_TIMER)
3229 dev->needed_headroom = GMAC_FCB_LEN + GMAC_TXPAL_LEN;
3230
3231 /* Initializing some of the rx/tx queue level parameters */
3232 for (i = 0; i < priv->num_tx_queues; i++) {
3233 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
3234 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
3235 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
3236 priv->tx_queue[i]->txic = DEFAULT_TXIC;
3237 }
3238
3239 for (i = 0; i < priv->num_rx_queues; i++) {
3240 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
3241 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
3242 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
3243 }
3244
3245 /* Always enable rx filer if available */
3246 priv->rx_filer_enable =
3247 (priv->device_flags & FSL_GIANFAR_DEV_HAS_RX_FILER) ? 1 : 0;
3248 /* Enable most messages by default */
3249 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
3250 /* use pritority h/w tx queue scheduling for single queue devices */
3251 if (priv->num_tx_queues == 1)
3252 priv->prio_sched_en = 1;
3253
3254 set_bit(GFAR_DOWN, &priv->state);
3255
3256 gfar_hw_init(priv);
3257
3258 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
3259 struct rmon_mib __iomem *rmon = &priv->gfargrp[0].regs->rmon;
3260
3261 spin_lock_init(&priv->rmon_overflow.lock);
3262 priv->rmon_overflow.imask = IMASK_MSRO;
3263 gfar_write(&rmon->cam1, gfar_read(&rmon->cam1) & ~CAM1_M1RDR);
3264 }
3265
3266 /* Carrier starts down, phylib will bring it up */
3267 netif_carrier_off(dev);
3268
3269 err = register_netdev(dev);
3270
3271 if (err) {
3272 pr_err("%s: Cannot register net device, aborting\n", dev->name);
3273 goto register_fail;
3274 }
3275
3276 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET)
3277 priv->wol_supported |= GFAR_WOL_MAGIC;
3278
3279 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_WAKE_ON_FILER) &&
3280 priv->rx_filer_enable)
3281 priv->wol_supported |= GFAR_WOL_FILER_UCAST;
3282
3283 device_set_wakeup_capable(&ofdev->dev, priv->wol_supported);
3284
3285 /* fill out IRQ number and name fields */
3286 for (i = 0; i < priv->num_grps; i++) {
3287 struct gfar_priv_grp *grp = &priv->gfargrp[i];
3288 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
3289 sprintf(gfar_irq(grp, TX)->name, "%s%s%c%s",
3290 dev->name, "_g", '0' + i, "_tx");
3291 sprintf(gfar_irq(grp, RX)->name, "%s%s%c%s",
3292 dev->name, "_g", '0' + i, "_rx");
3293 sprintf(gfar_irq(grp, ER)->name, "%s%s%c%s",
3294 dev->name, "_g", '0' + i, "_er");
3295 } else
3296 strcpy(gfar_irq(grp, TX)->name, dev->name);
3297 }
3298
3299 /* Initialize the filer table */
3300 gfar_init_filer_table(priv);
3301
3302 /* Print out the device info */
3303 netdev_info(dev, "mac: %pM\n", dev->dev_addr);
3304
3305 /* Even more device info helps when determining which kernel
3306 * provided which set of benchmarks.
3307 */
3308 netdev_info(dev, "Running with NAPI enabled\n");
3309 for (i = 0; i < priv->num_rx_queues; i++)
3310 netdev_info(dev, "RX BD ring size for Q[%d]: %d\n",
3311 i, priv->rx_queue[i]->rx_ring_size);
3312 for (i = 0; i < priv->num_tx_queues; i++)
3313 netdev_info(dev, "TX BD ring size for Q[%d]: %d\n",
3314 i, priv->tx_queue[i]->tx_ring_size);
3315
3316 return 0;
3317
3318 register_fail:
3319 if (of_phy_is_fixed_link(np))
3320 of_phy_deregister_fixed_link(np);
3321 unmap_group_regs(priv);
3322 gfar_free_rx_queues(priv);
3323 gfar_free_tx_queues(priv);
3324 of_node_put(priv->phy_node);
3325 of_node_put(priv->tbi_node);
3326 free_gfar_dev(priv);
3327 return err;
3328 }
3329
gfar_remove(struct platform_device * ofdev)3330 static void gfar_remove(struct platform_device *ofdev)
3331 {
3332 struct gfar_private *priv = platform_get_drvdata(ofdev);
3333 struct device_node *np = ofdev->dev.of_node;
3334
3335 of_node_put(priv->phy_node);
3336 of_node_put(priv->tbi_node);
3337
3338 unregister_netdev(priv->ndev);
3339
3340 if (of_phy_is_fixed_link(np))
3341 of_phy_deregister_fixed_link(np);
3342
3343 unmap_group_regs(priv);
3344 gfar_free_rx_queues(priv);
3345 gfar_free_tx_queues(priv);
3346 free_gfar_dev(priv);
3347 }
3348
3349 #ifdef CONFIG_PM
3350
__gfar_filer_disable(struct gfar_private * priv)3351 static void __gfar_filer_disable(struct gfar_private *priv)
3352 {
3353 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3354 u32 temp;
3355
3356 temp = gfar_read(®s->rctrl);
3357 temp &= ~(RCTRL_FILREN | RCTRL_PRSDEP_INIT);
3358 gfar_write(®s->rctrl, temp);
3359 }
3360
__gfar_filer_enable(struct gfar_private * priv)3361 static void __gfar_filer_enable(struct gfar_private *priv)
3362 {
3363 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3364 u32 temp;
3365
3366 temp = gfar_read(®s->rctrl);
3367 temp |= RCTRL_FILREN | RCTRL_PRSDEP_INIT;
3368 gfar_write(®s->rctrl, temp);
3369 }
3370
3371 /* Filer rules implementing wol capabilities */
gfar_filer_config_wol(struct gfar_private * priv)3372 static void gfar_filer_config_wol(struct gfar_private *priv)
3373 {
3374 unsigned int i;
3375 u32 rqfcr;
3376
3377 __gfar_filer_disable(priv);
3378
3379 /* clear the filer table, reject any packet by default */
3380 rqfcr = RQFCR_RJE | RQFCR_CMP_MATCH;
3381 for (i = 0; i <= MAX_FILER_IDX; i++)
3382 gfar_write_filer(priv, i, rqfcr, 0);
3383
3384 i = 0;
3385 if (priv->wol_opts & GFAR_WOL_FILER_UCAST) {
3386 /* unicast packet, accept it */
3387 struct net_device *ndev = priv->ndev;
3388 /* get the default rx queue index */
3389 u8 qindex = (u8)priv->gfargrp[0].rx_queue->qindex;
3390 u32 dest_mac_addr = (ndev->dev_addr[0] << 16) |
3391 (ndev->dev_addr[1] << 8) |
3392 ndev->dev_addr[2];
3393
3394 rqfcr = (qindex << 10) | RQFCR_AND |
3395 RQFCR_CMP_EXACT | RQFCR_PID_DAH;
3396
3397 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3398
3399 dest_mac_addr = (ndev->dev_addr[3] << 16) |
3400 (ndev->dev_addr[4] << 8) |
3401 ndev->dev_addr[5];
3402 rqfcr = (qindex << 10) | RQFCR_GPI |
3403 RQFCR_CMP_EXACT | RQFCR_PID_DAL;
3404 gfar_write_filer(priv, i++, rqfcr, dest_mac_addr);
3405 }
3406
3407 __gfar_filer_enable(priv);
3408 }
3409
gfar_filer_restore_table(struct gfar_private * priv)3410 static void gfar_filer_restore_table(struct gfar_private *priv)
3411 {
3412 u32 rqfcr, rqfpr;
3413 unsigned int i;
3414
3415 __gfar_filer_disable(priv);
3416
3417 for (i = 0; i <= MAX_FILER_IDX; i++) {
3418 rqfcr = priv->ftp_rqfcr[i];
3419 rqfpr = priv->ftp_rqfpr[i];
3420 gfar_write_filer(priv, i, rqfcr, rqfpr);
3421 }
3422
3423 __gfar_filer_enable(priv);
3424 }
3425
3426 /* gfar_start() for Rx only and with the FGPI filer interrupt enabled */
gfar_start_wol_filer(struct gfar_private * priv)3427 static void gfar_start_wol_filer(struct gfar_private *priv)
3428 {
3429 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3430 u32 tempval;
3431 int i = 0;
3432
3433 /* Enable Rx hw queues */
3434 gfar_write(®s->rqueue, priv->rqueue);
3435
3436 /* Initialize DMACTRL to have WWR and WOP */
3437 tempval = gfar_read(®s->dmactrl);
3438 tempval |= DMACTRL_INIT_SETTINGS;
3439 gfar_write(®s->dmactrl, tempval);
3440
3441 /* Make sure we aren't stopped */
3442 tempval = gfar_read(®s->dmactrl);
3443 tempval &= ~DMACTRL_GRS;
3444 gfar_write(®s->dmactrl, tempval);
3445
3446 for (i = 0; i < priv->num_grps; i++) {
3447 regs = priv->gfargrp[i].regs;
3448 /* Clear RHLT, so that the DMA starts polling now */
3449 gfar_write(®s->rstat, priv->gfargrp[i].rstat);
3450 /* enable the Filer General Purpose Interrupt */
3451 gfar_write(®s->imask, IMASK_FGPI);
3452 }
3453
3454 /* Enable Rx DMA */
3455 tempval = gfar_read(®s->maccfg1);
3456 tempval |= MACCFG1_RX_EN;
3457 gfar_write(®s->maccfg1, tempval);
3458 }
3459
gfar_suspend(struct device * dev)3460 static int gfar_suspend(struct device *dev)
3461 {
3462 struct gfar_private *priv = dev_get_drvdata(dev);
3463 struct net_device *ndev = priv->ndev;
3464 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3465 u32 tempval;
3466 u16 wol = priv->wol_opts;
3467
3468 if (!netif_running(ndev))
3469 return 0;
3470
3471 disable_napi(priv);
3472 netif_tx_lock(ndev);
3473 netif_device_detach(ndev);
3474 netif_tx_unlock(ndev);
3475
3476 gfar_halt(priv);
3477
3478 if (wol & GFAR_WOL_MAGIC) {
3479 /* Enable interrupt on Magic Packet */
3480 gfar_write(®s->imask, IMASK_MAG);
3481
3482 /* Enable Magic Packet mode */
3483 tempval = gfar_read(®s->maccfg2);
3484 tempval |= MACCFG2_MPEN;
3485 gfar_write(®s->maccfg2, tempval);
3486
3487 /* re-enable the Rx block */
3488 tempval = gfar_read(®s->maccfg1);
3489 tempval |= MACCFG1_RX_EN;
3490 gfar_write(®s->maccfg1, tempval);
3491
3492 } else if (wol & GFAR_WOL_FILER_UCAST) {
3493 gfar_filer_config_wol(priv);
3494 gfar_start_wol_filer(priv);
3495
3496 } else {
3497 phy_stop(ndev->phydev);
3498 }
3499
3500 return 0;
3501 }
3502
gfar_resume(struct device * dev)3503 static int gfar_resume(struct device *dev)
3504 {
3505 struct gfar_private *priv = dev_get_drvdata(dev);
3506 struct net_device *ndev = priv->ndev;
3507 struct gfar __iomem *regs = priv->gfargrp[0].regs;
3508 u32 tempval;
3509 u16 wol = priv->wol_opts;
3510
3511 if (!netif_running(ndev))
3512 return 0;
3513
3514 if (wol & GFAR_WOL_MAGIC) {
3515 /* Disable Magic Packet mode */
3516 tempval = gfar_read(®s->maccfg2);
3517 tempval &= ~MACCFG2_MPEN;
3518 gfar_write(®s->maccfg2, tempval);
3519
3520 } else if (wol & GFAR_WOL_FILER_UCAST) {
3521 /* need to stop rx only, tx is already down */
3522 gfar_halt(priv);
3523 gfar_filer_restore_table(priv);
3524
3525 } else {
3526 phy_start(ndev->phydev);
3527 }
3528
3529 gfar_start(priv);
3530
3531 netif_device_attach(ndev);
3532 enable_napi(priv);
3533
3534 return 0;
3535 }
3536
gfar_restore(struct device * dev)3537 static int gfar_restore(struct device *dev)
3538 {
3539 struct gfar_private *priv = dev_get_drvdata(dev);
3540 struct net_device *ndev = priv->ndev;
3541
3542 if (!netif_running(ndev)) {
3543 netif_device_attach(ndev);
3544
3545 return 0;
3546 }
3547
3548 gfar_init_bds(ndev);
3549
3550 gfar_mac_reset(priv);
3551
3552 gfar_init_tx_rx_base(priv);
3553
3554 gfar_start(priv);
3555
3556 priv->oldlink = 0;
3557 priv->oldspeed = 0;
3558 priv->oldduplex = -1;
3559
3560 if (ndev->phydev)
3561 phy_start(ndev->phydev);
3562
3563 netif_device_attach(ndev);
3564 enable_napi(priv);
3565
3566 return 0;
3567 }
3568
3569 static const struct dev_pm_ops gfar_pm_ops = {
3570 .suspend = gfar_suspend,
3571 .resume = gfar_resume,
3572 .freeze = gfar_suspend,
3573 .thaw = gfar_resume,
3574 .restore = gfar_restore,
3575 };
3576
3577 #define GFAR_PM_OPS (&gfar_pm_ops)
3578
3579 #else
3580
3581 #define GFAR_PM_OPS NULL
3582
3583 #endif
3584
3585 static const struct of_device_id gfar_match[] =
3586 {
3587 {
3588 .type = "network",
3589 .compatible = "gianfar",
3590 },
3591 {
3592 .compatible = "fsl,etsec2",
3593 },
3594 {},
3595 };
3596 MODULE_DEVICE_TABLE(of, gfar_match);
3597
3598 /* Structure for a device driver */
3599 static struct platform_driver gfar_driver = {
3600 .driver = {
3601 .name = "fsl-gianfar",
3602 .pm = GFAR_PM_OPS,
3603 .of_match_table = gfar_match,
3604 },
3605 .probe = gfar_probe,
3606 .remove = gfar_remove,
3607 };
3608
3609 module_platform_driver(gfar_driver);
3610