1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright (C) 2015 Microchip Technology
4 */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/phylink.h>
10 #include <linux/usb.h>
11 #include <linux/crc32.h>
12 #include <linux/signal.h>
13 #include <linux/slab.h>
14 #include <linux/if_vlan.h>
15 #include <linux/uaccess.h>
16 #include <linux/linkmode.h>
17 #include <linux/list.h>
18 #include <linux/ip.h>
19 #include <linux/ipv6.h>
20 #include <linux/mdio.h>
21 #include <linux/phy.h>
22 #include <net/ip6_checksum.h>
23 #include <net/vxlan.h>
24 #include <linux/interrupt.h>
25 #include <linux/irqdomain.h>
26 #include <linux/irq.h>
27 #include <linux/irqchip/chained_irq.h>
28 #include <linux/microchipphy.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32
33 #define DRIVER_AUTHOR "WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC "LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME "lan78xx"
36
37 #define TX_TIMEOUT_JIFFIES (5 * HZ)
38 #define THROTTLE_JIFFIES (HZ / 8)
39 #define UNLINK_TIMEOUT_MS 3
40
41 #define RX_MAX_QUEUE_MEMORY (60 * 1518)
42
43 #define SS_USB_PKT_SIZE (1024)
44 #define HS_USB_PKT_SIZE (512)
45 #define FS_USB_PKT_SIZE (64)
46
47 #define MAX_RX_FIFO_SIZE (12 * 1024)
48 #define MAX_TX_FIFO_SIZE (12 * 1024)
49
50 #define FLOW_THRESHOLD(n) ((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off) ((FLOW_THRESHOLD(on) << 0) | \
52 (FLOW_THRESHOLD(off) << 8))
53
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS 9216
56 #define FLOW_ON_HS 8704
57
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS 4096
60 #define FLOW_OFF_HS 1024
61
62 #define DEFAULT_BURST_CAP_SIZE (MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY (0x0800)
64 #define MAX_SINGLE_PACKET_SIZE (9000)
65 #define DEFAULT_TX_CSUM_ENABLE (true)
66 #define DEFAULT_RX_CSUM_ENABLE (true)
67 #define DEFAULT_TSO_CSUM_ENABLE (true)
68 #define DEFAULT_VLAN_FILTER_ENABLE (true)
69 #define DEFAULT_VLAN_RX_OFFLOAD (true)
70 #define TX_ALIGNMENT (4)
71 #define RXW_PADDING 2
72
73 #define LAN78XX_USB_VENDOR_ID (0x0424)
74 #define LAN7800_USB_PRODUCT_ID (0x7800)
75 #define LAN7850_USB_PRODUCT_ID (0x7850)
76 #define LAN7801_USB_PRODUCT_ID (0x7801)
77 #define LAN78XX_EEPROM_MAGIC (0x78A5)
78 #define LAN78XX_OTP_MAGIC (0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID (0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID (0x0012)
81
82 #define MII_READ 1
83 #define MII_WRITE 0
84
85 #define EEPROM_INDICATOR (0xA5)
86 #define EEPROM_MAC_OFFSET (0x01)
87 #define MAX_EEPROM_SIZE 512
88 #define OTP_INDICATOR_1 (0xF3)
89 #define OTP_INDICATOR_2 (0xF7)
90
91 #define WAKE_ALL (WAKE_PHY | WAKE_UCAST | \
92 WAKE_MCAST | WAKE_BCAST | \
93 WAKE_ARP | WAKE_MAGIC)
94
95 #define TX_URB_NUM 10
96 #define TX_SS_URB_NUM TX_URB_NUM
97 #define TX_HS_URB_NUM TX_URB_NUM
98 #define TX_FS_URB_NUM TX_URB_NUM
99
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101 */
102 #define TX_SS_URB_SIZE (32 * 1024)
103 #define TX_HS_URB_SIZE (16 * 1024)
104 #define TX_FS_URB_SIZE (10 * 1024)
105
106 #define RX_SS_URB_NUM 30
107 #define RX_HS_URB_NUM 10
108 #define RX_FS_URB_NUM 10
109 #define RX_SS_URB_SIZE TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE TX_FS_URB_SIZE
112
113 #define SS_BURST_CAP_SIZE RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY 0x2000
115 #define HS_BURST_CAP_SIZE RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY 0x2000
117 #define FS_BURST_CAP_SIZE RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY 0x2000
119
120 #define TX_CMD_LEN 8
121 #define TX_SKB_MIN_LEN (TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev) ((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123
124 #define RX_CMD_LEN 10
125 #define RX_SKB_MIN_LEN (RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu) ((mtu) + ETH_HLEN + VLAN_HLEN)
127
128 /* USB related defines */
129 #define BULK_IN_PIPE 1
130 #define BULK_OUT_PIPE 2
131
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY (10 * 1000)
134
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER (1 * 1000)
137
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT (HZ / 10)
140
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS 1
143
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP (32)
146 #define INT_EP_INTEP (31)
147 #define INT_EP_OTP_WR_DONE (28)
148 #define INT_EP_EEE_TX_LPI_START (26)
149 #define INT_EP_EEE_TX_LPI_STOP (25)
150 #define INT_EP_EEE_RX_LPI (24)
151 #define INT_EP_MAC_RESET_TIMEOUT (23)
152 #define INT_EP_RDFO (22)
153 #define INT_EP_TXE (21)
154 #define INT_EP_USB_STATUS (20)
155 #define INT_EP_TX_DIS (19)
156 #define INT_EP_RX_DIS (18)
157 #define INT_EP_PHY (17)
158 #define INT_EP_DP (16)
159 #define INT_EP_MAC_ERR (15)
160 #define INT_EP_TDFU (14)
161 #define INT_EP_TDFO (13)
162 #define INT_EP_UTX (12)
163 #define INT_EP_GPIO_11 (11)
164 #define INT_EP_GPIO_10 (10)
165 #define INT_EP_GPIO_9 (9)
166 #define INT_EP_GPIO_8 (8)
167 #define INT_EP_GPIO_7 (7)
168 #define INT_EP_GPIO_6 (6)
169 #define INT_EP_GPIO_5 (5)
170 #define INT_EP_GPIO_4 (4)
171 #define INT_EP_GPIO_3 (3)
172 #define INT_EP_GPIO_2 (2)
173 #define INT_EP_GPIO_1 (1)
174 #define INT_EP_GPIO_0 (0)
175
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 "RX FCS Errors",
178 "RX Alignment Errors",
179 "Rx Fragment Errors",
180 "RX Jabber Errors",
181 "RX Undersize Frame Errors",
182 "RX Oversize Frame Errors",
183 "RX Dropped Frames",
184 "RX Unicast Byte Count",
185 "RX Broadcast Byte Count",
186 "RX Multicast Byte Count",
187 "RX Unicast Frames",
188 "RX Broadcast Frames",
189 "RX Multicast Frames",
190 "RX Pause Frames",
191 "RX 64 Byte Frames",
192 "RX 65 - 127 Byte Frames",
193 "RX 128 - 255 Byte Frames",
194 "RX 256 - 511 Bytes Frames",
195 "RX 512 - 1023 Byte Frames",
196 "RX 1024 - 1518 Byte Frames",
197 "RX Greater 1518 Byte Frames",
198 "EEE RX LPI Transitions",
199 "EEE RX LPI Time",
200 "TX FCS Errors",
201 "TX Excess Deferral Errors",
202 "TX Carrier Errors",
203 "TX Bad Byte Count",
204 "TX Single Collisions",
205 "TX Multiple Collisions",
206 "TX Excessive Collision",
207 "TX Late Collisions",
208 "TX Unicast Byte Count",
209 "TX Broadcast Byte Count",
210 "TX Multicast Byte Count",
211 "TX Unicast Frames",
212 "TX Broadcast Frames",
213 "TX Multicast Frames",
214 "TX Pause Frames",
215 "TX 64 Byte Frames",
216 "TX 65 - 127 Byte Frames",
217 "TX 128 - 255 Byte Frames",
218 "TX 256 - 511 Bytes Frames",
219 "TX 512 - 1023 Byte Frames",
220 "TX 1024 - 1518 Byte Frames",
221 "TX Greater 1518 Byte Frames",
222 "EEE TX LPI Transitions",
223 "EEE TX LPI Time",
224 };
225
226 struct lan78xx_statstage {
227 u32 rx_fcs_errors;
228 u32 rx_alignment_errors;
229 u32 rx_fragment_errors;
230 u32 rx_jabber_errors;
231 u32 rx_undersize_frame_errors;
232 u32 rx_oversize_frame_errors;
233 u32 rx_dropped_frames;
234 u32 rx_unicast_byte_count;
235 u32 rx_broadcast_byte_count;
236 u32 rx_multicast_byte_count;
237 u32 rx_unicast_frames;
238 u32 rx_broadcast_frames;
239 u32 rx_multicast_frames;
240 u32 rx_pause_frames;
241 u32 rx_64_byte_frames;
242 u32 rx_65_127_byte_frames;
243 u32 rx_128_255_byte_frames;
244 u32 rx_256_511_bytes_frames;
245 u32 rx_512_1023_byte_frames;
246 u32 rx_1024_1518_byte_frames;
247 u32 rx_greater_1518_byte_frames;
248 u32 eee_rx_lpi_transitions;
249 u32 eee_rx_lpi_time;
250 u32 tx_fcs_errors;
251 u32 tx_excess_deferral_errors;
252 u32 tx_carrier_errors;
253 u32 tx_bad_byte_count;
254 u32 tx_single_collisions;
255 u32 tx_multiple_collisions;
256 u32 tx_excessive_collision;
257 u32 tx_late_collisions;
258 u32 tx_unicast_byte_count;
259 u32 tx_broadcast_byte_count;
260 u32 tx_multicast_byte_count;
261 u32 tx_unicast_frames;
262 u32 tx_broadcast_frames;
263 u32 tx_multicast_frames;
264 u32 tx_pause_frames;
265 u32 tx_64_byte_frames;
266 u32 tx_65_127_byte_frames;
267 u32 tx_128_255_byte_frames;
268 u32 tx_256_511_bytes_frames;
269 u32 tx_512_1023_byte_frames;
270 u32 tx_1024_1518_byte_frames;
271 u32 tx_greater_1518_byte_frames;
272 u32 eee_tx_lpi_transitions;
273 u32 eee_tx_lpi_time;
274 };
275
276 struct lan78xx_statstage64 {
277 u64 rx_fcs_errors;
278 u64 rx_alignment_errors;
279 u64 rx_fragment_errors;
280 u64 rx_jabber_errors;
281 u64 rx_undersize_frame_errors;
282 u64 rx_oversize_frame_errors;
283 u64 rx_dropped_frames;
284 u64 rx_unicast_byte_count;
285 u64 rx_broadcast_byte_count;
286 u64 rx_multicast_byte_count;
287 u64 rx_unicast_frames;
288 u64 rx_broadcast_frames;
289 u64 rx_multicast_frames;
290 u64 rx_pause_frames;
291 u64 rx_64_byte_frames;
292 u64 rx_65_127_byte_frames;
293 u64 rx_128_255_byte_frames;
294 u64 rx_256_511_bytes_frames;
295 u64 rx_512_1023_byte_frames;
296 u64 rx_1024_1518_byte_frames;
297 u64 rx_greater_1518_byte_frames;
298 u64 eee_rx_lpi_transitions;
299 u64 eee_rx_lpi_time;
300 u64 tx_fcs_errors;
301 u64 tx_excess_deferral_errors;
302 u64 tx_carrier_errors;
303 u64 tx_bad_byte_count;
304 u64 tx_single_collisions;
305 u64 tx_multiple_collisions;
306 u64 tx_excessive_collision;
307 u64 tx_late_collisions;
308 u64 tx_unicast_byte_count;
309 u64 tx_broadcast_byte_count;
310 u64 tx_multicast_byte_count;
311 u64 tx_unicast_frames;
312 u64 tx_broadcast_frames;
313 u64 tx_multicast_frames;
314 u64 tx_pause_frames;
315 u64 tx_64_byte_frames;
316 u64 tx_65_127_byte_frames;
317 u64 tx_128_255_byte_frames;
318 u64 tx_256_511_bytes_frames;
319 u64 tx_512_1023_byte_frames;
320 u64 tx_1024_1518_byte_frames;
321 u64 tx_greater_1518_byte_frames;
322 u64 eee_tx_lpi_transitions;
323 u64 eee_tx_lpi_time;
324 };
325
326 static u32 lan78xx_regs[] = {
327 ID_REV,
328 INT_STS,
329 HW_CFG,
330 PMT_CTL,
331 E2P_CMD,
332 E2P_DATA,
333 USB_STATUS,
334 VLAN_TYPE,
335 MAC_CR,
336 MAC_RX,
337 MAC_TX,
338 FLOW,
339 ERR_STS,
340 MII_ACC,
341 MII_DATA,
342 EEE_TX_LPI_REQ_DLY,
343 EEE_TW_TX_SYS,
344 EEE_TX_LPI_REM_DLY,
345 WUCSR
346 };
347
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349
350 struct lan78xx_net;
351
352 struct lan78xx_priv {
353 struct lan78xx_net *dev;
354 u32 rfe_ctl;
355 u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 struct mutex dataport_mutex; /* for dataport access */
359 spinlock_t rfe_ctl_lock; /* for rfe register access */
360 struct work_struct set_multicast;
361 struct work_struct set_vlan;
362 u32 wol;
363 };
364
365 enum skb_state {
366 illegal = 0,
367 tx_start,
368 tx_done,
369 rx_start,
370 rx_done,
371 rx_cleanup,
372 unlink_start
373 };
374
375 struct skb_data { /* skb->cb is one of these */
376 struct urb *urb;
377 struct lan78xx_net *dev;
378 enum skb_state state;
379 size_t length;
380 int num_of_packet;
381 };
382
383 #define EVENT_TX_HALT 0
384 #define EVENT_RX_HALT 1
385 #define EVENT_RX_MEMORY 2
386 #define EVENT_STS_SPLIT 3
387 #define EVENT_PHY_INT_ACK 4
388 #define EVENT_RX_PAUSED 5
389 #define EVENT_DEV_WAKING 6
390 #define EVENT_DEV_ASLEEP 7
391 #define EVENT_DEV_OPEN 8
392 #define EVENT_STAT_UPDATE 9
393 #define EVENT_DEV_DISCONNECT 10
394
395 struct statstage {
396 struct mutex access_lock; /* for stats access */
397 struct lan78xx_statstage saved;
398 struct lan78xx_statstage rollover_count;
399 struct lan78xx_statstage rollover_max;
400 struct lan78xx_statstage64 curr_stat;
401 };
402
403 struct irq_domain_data {
404 struct irq_domain *irqdomain;
405 unsigned int phyirq;
406 struct irq_chip *irqchip;
407 irq_flow_handler_t irq_handler;
408 u32 irqenable;
409 struct mutex irq_lock; /* for irq bus access */
410 };
411
412 struct lan78xx_net {
413 struct net_device *net;
414 struct usb_device *udev;
415 struct usb_interface *intf;
416
417 unsigned int tx_pend_data_len;
418 size_t n_tx_urbs;
419 size_t n_rx_urbs;
420 size_t tx_urb_size;
421 size_t rx_urb_size;
422
423 struct sk_buff_head rxq_free;
424 struct sk_buff_head rxq;
425 struct sk_buff_head rxq_done;
426 struct sk_buff_head rxq_overflow;
427 struct sk_buff_head txq_free;
428 struct sk_buff_head txq;
429 struct sk_buff_head txq_pend;
430
431 struct napi_struct napi;
432
433 struct delayed_work wq;
434
435 int msg_enable;
436
437 struct urb *urb_intr;
438 struct usb_anchor deferred;
439
440 struct mutex dev_mutex; /* serialise open/stop wrt suspend/resume */
441 struct mutex mdiobus_mutex; /* for MDIO bus access */
442 unsigned int pipe_in, pipe_out, pipe_intr;
443
444 unsigned int bulk_in_delay;
445 unsigned int burst_cap;
446
447 unsigned long flags;
448
449 wait_queue_head_t *wait;
450
451 unsigned int maxpacket;
452 struct timer_list stat_monitor;
453
454 unsigned long data[5];
455
456 u32 chipid;
457 u32 chiprev;
458 struct mii_bus *mdiobus;
459 phy_interface_t interface;
460
461 int delta;
462 struct statstage stats;
463
464 struct irq_domain_data domain_data;
465
466 struct phylink *phylink;
467 struct phylink_config phylink_config;
468 };
469
470 /* use ethtool to change the level for any given device */
471 static int msg_level = -1;
472 module_param(msg_level, int, 0);
473 MODULE_PARM_DESC(msg_level, "Override default message level");
474
lan78xx_get_buf(struct sk_buff_head * buf_pool)475 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
476 {
477 if (skb_queue_empty(buf_pool))
478 return NULL;
479
480 return skb_dequeue(buf_pool);
481 }
482
lan78xx_release_buf(struct sk_buff_head * buf_pool,struct sk_buff * buf)483 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
484 struct sk_buff *buf)
485 {
486 buf->data = buf->head;
487 skb_reset_tail_pointer(buf);
488
489 buf->len = 0;
490 buf->data_len = 0;
491
492 skb_queue_tail(buf_pool, buf);
493 }
494
lan78xx_free_buf_pool(struct sk_buff_head * buf_pool)495 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
496 {
497 struct skb_data *entry;
498 struct sk_buff *buf;
499
500 while (!skb_queue_empty(buf_pool)) {
501 buf = skb_dequeue(buf_pool);
502 if (buf) {
503 entry = (struct skb_data *)buf->cb;
504 usb_free_urb(entry->urb);
505 dev_kfree_skb_any(buf);
506 }
507 }
508 }
509
lan78xx_alloc_buf_pool(struct sk_buff_head * buf_pool,size_t n_urbs,size_t urb_size,struct lan78xx_net * dev)510 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
511 size_t n_urbs, size_t urb_size,
512 struct lan78xx_net *dev)
513 {
514 struct skb_data *entry;
515 struct sk_buff *buf;
516 struct urb *urb;
517 int i;
518
519 skb_queue_head_init(buf_pool);
520
521 for (i = 0; i < n_urbs; i++) {
522 buf = alloc_skb(urb_size, GFP_ATOMIC);
523 if (!buf)
524 goto error;
525
526 if (skb_linearize(buf) != 0) {
527 dev_kfree_skb_any(buf);
528 goto error;
529 }
530
531 urb = usb_alloc_urb(0, GFP_ATOMIC);
532 if (!urb) {
533 dev_kfree_skb_any(buf);
534 goto error;
535 }
536
537 entry = (struct skb_data *)buf->cb;
538 entry->urb = urb;
539 entry->dev = dev;
540 entry->length = 0;
541 entry->num_of_packet = 0;
542
543 skb_queue_tail(buf_pool, buf);
544 }
545
546 return 0;
547
548 error:
549 lan78xx_free_buf_pool(buf_pool);
550
551 return -ENOMEM;
552 }
553
lan78xx_get_rx_buf(struct lan78xx_net * dev)554 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
555 {
556 return lan78xx_get_buf(&dev->rxq_free);
557 }
558
lan78xx_release_rx_buf(struct lan78xx_net * dev,struct sk_buff * rx_buf)559 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
560 struct sk_buff *rx_buf)
561 {
562 lan78xx_release_buf(&dev->rxq_free, rx_buf);
563 }
564
lan78xx_free_rx_resources(struct lan78xx_net * dev)565 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
566 {
567 lan78xx_free_buf_pool(&dev->rxq_free);
568 }
569
lan78xx_alloc_rx_resources(struct lan78xx_net * dev)570 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
571 {
572 return lan78xx_alloc_buf_pool(&dev->rxq_free,
573 dev->n_rx_urbs, dev->rx_urb_size, dev);
574 }
575
lan78xx_get_tx_buf(struct lan78xx_net * dev)576 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
577 {
578 return lan78xx_get_buf(&dev->txq_free);
579 }
580
lan78xx_release_tx_buf(struct lan78xx_net * dev,struct sk_buff * tx_buf)581 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
582 struct sk_buff *tx_buf)
583 {
584 lan78xx_release_buf(&dev->txq_free, tx_buf);
585 }
586
lan78xx_free_tx_resources(struct lan78xx_net * dev)587 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
588 {
589 lan78xx_free_buf_pool(&dev->txq_free);
590 }
591
lan78xx_alloc_tx_resources(struct lan78xx_net * dev)592 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
593 {
594 return lan78xx_alloc_buf_pool(&dev->txq_free,
595 dev->n_tx_urbs, dev->tx_urb_size, dev);
596 }
597
lan78xx_read_reg(struct lan78xx_net * dev,u32 index,u32 * data)598 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
599 {
600 u32 *buf;
601 int ret;
602
603 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
604 return -ENODEV;
605
606 buf = kmalloc(sizeof(u32), GFP_KERNEL);
607 if (!buf)
608 return -ENOMEM;
609
610 ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
611 USB_VENDOR_REQUEST_READ_REGISTER,
612 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
613 0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
614 if (likely(ret >= 0)) {
615 le32_to_cpus(buf);
616 *data = *buf;
617 } else if (net_ratelimit()) {
618 netdev_warn(dev->net,
619 "Failed to read register index 0x%08x. ret = %pe",
620 index, ERR_PTR(ret));
621 }
622
623 kfree(buf);
624
625 return ret < 0 ? ret : 0;
626 }
627
lan78xx_write_reg(struct lan78xx_net * dev,u32 index,u32 data)628 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
629 {
630 u32 *buf;
631 int ret;
632
633 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
634 return -ENODEV;
635
636 buf = kmalloc(sizeof(u32), GFP_KERNEL);
637 if (!buf)
638 return -ENOMEM;
639
640 *buf = data;
641 cpu_to_le32s(buf);
642
643 ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
644 USB_VENDOR_REQUEST_WRITE_REGISTER,
645 USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
646 0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
647 if (unlikely(ret < 0) &&
648 net_ratelimit()) {
649 netdev_warn(dev->net,
650 "Failed to write register index 0x%08x. ret = %pe",
651 index, ERR_PTR(ret));
652 }
653
654 kfree(buf);
655
656 return ret < 0 ? ret : 0;
657 }
658
lan78xx_update_reg(struct lan78xx_net * dev,u32 reg,u32 mask,u32 data)659 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
660 u32 data)
661 {
662 int ret;
663 u32 buf;
664
665 ret = lan78xx_read_reg(dev, reg, &buf);
666 if (ret < 0)
667 return ret;
668
669 buf &= ~mask;
670 buf |= (mask & data);
671
672 return lan78xx_write_reg(dev, reg, buf);
673 }
674
lan78xx_read_stats(struct lan78xx_net * dev,struct lan78xx_statstage * data)675 static int lan78xx_read_stats(struct lan78xx_net *dev,
676 struct lan78xx_statstage *data)
677 {
678 int ret = 0;
679 int i;
680 struct lan78xx_statstage *stats;
681 u32 *src;
682 u32 *dst;
683
684 stats = kmalloc(sizeof(*stats), GFP_KERNEL);
685 if (!stats)
686 return -ENOMEM;
687
688 ret = usb_control_msg(dev->udev,
689 usb_rcvctrlpipe(dev->udev, 0),
690 USB_VENDOR_REQUEST_GET_STATS,
691 USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
692 0,
693 0,
694 (void *)stats,
695 sizeof(*stats),
696 USB_CTRL_SET_TIMEOUT);
697 if (likely(ret >= 0)) {
698 src = (u32 *)stats;
699 dst = (u32 *)data;
700 for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
701 le32_to_cpus(&src[i]);
702 dst[i] = src[i];
703 }
704 } else {
705 netdev_warn(dev->net,
706 "Failed to read stat ret = %d", ret);
707 }
708
709 kfree(stats);
710
711 return ret;
712 }
713
714 #define check_counter_rollover(struct1, dev_stats, member) \
715 do { \
716 if ((struct1)->member < (dev_stats).saved.member) \
717 (dev_stats).rollover_count.member++; \
718 } while (0)
719
lan78xx_check_stat_rollover(struct lan78xx_net * dev,struct lan78xx_statstage * stats)720 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
721 struct lan78xx_statstage *stats)
722 {
723 check_counter_rollover(stats, dev->stats, rx_fcs_errors);
724 check_counter_rollover(stats, dev->stats, rx_alignment_errors);
725 check_counter_rollover(stats, dev->stats, rx_fragment_errors);
726 check_counter_rollover(stats, dev->stats, rx_jabber_errors);
727 check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
728 check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
729 check_counter_rollover(stats, dev->stats, rx_dropped_frames);
730 check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
731 check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
732 check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
733 check_counter_rollover(stats, dev->stats, rx_unicast_frames);
734 check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
735 check_counter_rollover(stats, dev->stats, rx_multicast_frames);
736 check_counter_rollover(stats, dev->stats, rx_pause_frames);
737 check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
738 check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
739 check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
740 check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
741 check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
742 check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
743 check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
744 check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
745 check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
746 check_counter_rollover(stats, dev->stats, tx_fcs_errors);
747 check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
748 check_counter_rollover(stats, dev->stats, tx_carrier_errors);
749 check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
750 check_counter_rollover(stats, dev->stats, tx_single_collisions);
751 check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
752 check_counter_rollover(stats, dev->stats, tx_excessive_collision);
753 check_counter_rollover(stats, dev->stats, tx_late_collisions);
754 check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
755 check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
756 check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
757 check_counter_rollover(stats, dev->stats, tx_unicast_frames);
758 check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
759 check_counter_rollover(stats, dev->stats, tx_multicast_frames);
760 check_counter_rollover(stats, dev->stats, tx_pause_frames);
761 check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
762 check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
763 check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
764 check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
765 check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
766 check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
767 check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
768 check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
769 check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
770
771 memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
772 }
773
lan78xx_update_stats(struct lan78xx_net * dev)774 static void lan78xx_update_stats(struct lan78xx_net *dev)
775 {
776 u32 *p, *count, *max;
777 u64 *data;
778 int i;
779 struct lan78xx_statstage lan78xx_stats;
780
781 if (usb_autopm_get_interface(dev->intf) < 0)
782 return;
783
784 p = (u32 *)&lan78xx_stats;
785 count = (u32 *)&dev->stats.rollover_count;
786 max = (u32 *)&dev->stats.rollover_max;
787 data = (u64 *)&dev->stats.curr_stat;
788
789 mutex_lock(&dev->stats.access_lock);
790
791 if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
792 lan78xx_check_stat_rollover(dev, &lan78xx_stats);
793
794 for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
795 data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
796
797 mutex_unlock(&dev->stats.access_lock);
798
799 usb_autopm_put_interface(dev->intf);
800 }
801
lan78xx_start_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enable)802 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
803 {
804 return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
805 }
806
lan78xx_stop_hw(struct lan78xx_net * dev,u32 reg,u32 hw_enabled,u32 hw_disabled)807 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
808 u32 hw_disabled)
809 {
810 unsigned long timeout;
811 bool stopped = true;
812 int ret;
813 u32 buf;
814
815 /* Stop the h/w block (if not already stopped) */
816
817 ret = lan78xx_read_reg(dev, reg, &buf);
818 if (ret < 0)
819 return ret;
820
821 if (buf & hw_enabled) {
822 buf &= ~hw_enabled;
823
824 ret = lan78xx_write_reg(dev, reg, buf);
825 if (ret < 0)
826 return ret;
827
828 stopped = false;
829 timeout = jiffies + HW_DISABLE_TIMEOUT;
830 do {
831 ret = lan78xx_read_reg(dev, reg, &buf);
832 if (ret < 0)
833 return ret;
834
835 if (buf & hw_disabled)
836 stopped = true;
837 else
838 msleep(HW_DISABLE_DELAY_MS);
839 } while (!stopped && !time_after(jiffies, timeout));
840 }
841
842 return stopped ? 0 : -ETIMEDOUT;
843 }
844
lan78xx_flush_fifo(struct lan78xx_net * dev,u32 reg,u32 fifo_flush)845 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
846 {
847 return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
848 }
849
lan78xx_start_tx_path(struct lan78xx_net * dev)850 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
851 {
852 int ret;
853
854 netif_dbg(dev, drv, dev->net, "start tx path");
855
856 /* Start the MAC transmitter */
857
858 ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
859 if (ret < 0)
860 return ret;
861
862 /* Start the Tx FIFO */
863
864 ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
865 if (ret < 0)
866 return ret;
867
868 return 0;
869 }
870
lan78xx_stop_tx_path(struct lan78xx_net * dev)871 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
872 {
873 int ret;
874
875 netif_dbg(dev, drv, dev->net, "stop tx path");
876
877 /* Stop the Tx FIFO */
878
879 ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
880 if (ret < 0)
881 return ret;
882
883 /* Stop the MAC transmitter */
884
885 ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
886 if (ret < 0)
887 return ret;
888
889 return 0;
890 }
891
892 /* The caller must ensure the Tx path is stopped before calling
893 * lan78xx_flush_tx_fifo().
894 */
lan78xx_flush_tx_fifo(struct lan78xx_net * dev)895 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
896 {
897 return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
898 }
899
lan78xx_start_rx_path(struct lan78xx_net * dev)900 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
901 {
902 int ret;
903
904 netif_dbg(dev, drv, dev->net, "start rx path");
905
906 /* Start the Rx FIFO */
907
908 ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
909 if (ret < 0)
910 return ret;
911
912 /* Start the MAC receiver*/
913
914 ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
915 if (ret < 0)
916 return ret;
917
918 return 0;
919 }
920
lan78xx_stop_rx_path(struct lan78xx_net * dev)921 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
922 {
923 int ret;
924
925 netif_dbg(dev, drv, dev->net, "stop rx path");
926
927 /* Stop the MAC receiver */
928
929 ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
930 if (ret < 0)
931 return ret;
932
933 /* Stop the Rx FIFO */
934
935 ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
936 if (ret < 0)
937 return ret;
938
939 return 0;
940 }
941
942 /* The caller must ensure the Rx path is stopped before calling
943 * lan78xx_flush_rx_fifo().
944 */
lan78xx_flush_rx_fifo(struct lan78xx_net * dev)945 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
946 {
947 return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
948 }
949
950 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
lan78xx_mdiobus_wait_not_busy(struct lan78xx_net * dev)951 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
952 {
953 unsigned long start_time = jiffies;
954 u32 val;
955 int ret;
956
957 do {
958 ret = lan78xx_read_reg(dev, MII_ACC, &val);
959 if (ret < 0)
960 return ret;
961
962 if (!(val & MII_ACC_MII_BUSY_))
963 return 0;
964 } while (!time_after(jiffies, start_time + HZ));
965
966 return -ETIMEDOUT;
967 }
968
mii_access(int id,int index,int read)969 static inline u32 mii_access(int id, int index, int read)
970 {
971 u32 ret;
972
973 ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
974 ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
975 if (read)
976 ret |= MII_ACC_MII_READ_;
977 else
978 ret |= MII_ACC_MII_WRITE_;
979 ret |= MII_ACC_MII_BUSY_;
980
981 return ret;
982 }
983
lan78xx_wait_eeprom(struct lan78xx_net * dev)984 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
985 {
986 unsigned long start_time = jiffies;
987 u32 val;
988 int ret;
989
990 do {
991 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
992 if (ret < 0)
993 return ret;
994
995 if (!(val & E2P_CMD_EPC_BUSY_) ||
996 (val & E2P_CMD_EPC_TIMEOUT_))
997 break;
998 usleep_range(40, 100);
999 } while (!time_after(jiffies, start_time + HZ));
1000
1001 if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1002 netdev_warn(dev->net, "EEPROM read operation timeout");
1003 return -ETIMEDOUT;
1004 }
1005
1006 return 0;
1007 }
1008
lan78xx_eeprom_confirm_not_busy(struct lan78xx_net * dev)1009 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1010 {
1011 unsigned long start_time = jiffies;
1012 u32 val;
1013 int ret;
1014
1015 do {
1016 ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1017 if (ret < 0)
1018 return ret;
1019
1020 if (!(val & E2P_CMD_EPC_BUSY_))
1021 return 0;
1022
1023 usleep_range(40, 100);
1024 } while (!time_after(jiffies, start_time + HZ));
1025
1026 netdev_warn(dev->net, "EEPROM is busy");
1027 return -ETIMEDOUT;
1028 }
1029
lan78xx_read_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1030 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1031 u32 length, u8 *data)
1032 {
1033 u32 val, saved;
1034 int i, ret;
1035
1036 /* depends on chip, some EEPROM pins are muxed with LED function.
1037 * disable & restore LED function to access EEPROM.
1038 */
1039 ret = lan78xx_read_reg(dev, HW_CFG, &val);
1040 if (ret < 0)
1041 return ret;
1042
1043 saved = val;
1044 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1045 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1046 ret = lan78xx_write_reg(dev, HW_CFG, val);
1047 if (ret < 0)
1048 return ret;
1049 }
1050
1051 ret = lan78xx_eeprom_confirm_not_busy(dev);
1052 if (ret == -ETIMEDOUT)
1053 goto read_raw_eeprom_done;
1054 /* If USB fails, there is nothing to do */
1055 if (ret < 0)
1056 return ret;
1057
1058 for (i = 0; i < length; i++) {
1059 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1060 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1061 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1062 if (ret < 0)
1063 return ret;
1064
1065 ret = lan78xx_wait_eeprom(dev);
1066 /* Looks like not USB specific error, try to recover */
1067 if (ret == -ETIMEDOUT)
1068 goto read_raw_eeprom_done;
1069 /* If USB fails, there is nothing to do */
1070 if (ret < 0)
1071 return ret;
1072
1073 ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1074 if (ret < 0)
1075 return ret;
1076
1077 data[i] = val & 0xFF;
1078 offset++;
1079 }
1080
1081 read_raw_eeprom_done:
1082 if (dev->chipid == ID_REV_CHIP_ID_7800_)
1083 return lan78xx_write_reg(dev, HW_CFG, saved);
1084
1085 return 0;
1086 }
1087
lan78xx_read_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1088 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1089 u32 length, u8 *data)
1090 {
1091 int ret;
1092 u8 sig;
1093
1094 ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1095 if (ret < 0)
1096 return ret;
1097
1098 if (sig != EEPROM_INDICATOR)
1099 return -ENODATA;
1100
1101 return lan78xx_read_raw_eeprom(dev, offset, length, data);
1102 }
1103
lan78xx_write_raw_eeprom(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1104 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1105 u32 length, u8 *data)
1106 {
1107 u32 val;
1108 u32 saved;
1109 int i, ret;
1110
1111 /* depends on chip, some EEPROM pins are muxed with LED function.
1112 * disable & restore LED function to access EEPROM.
1113 */
1114 ret = lan78xx_read_reg(dev, HW_CFG, &val);
1115 if (ret < 0)
1116 return ret;
1117
1118 saved = val;
1119 if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1120 val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1121 ret = lan78xx_write_reg(dev, HW_CFG, val);
1122 if (ret < 0)
1123 return ret;
1124 }
1125
1126 ret = lan78xx_eeprom_confirm_not_busy(dev);
1127 /* Looks like not USB specific error, try to recover */
1128 if (ret == -ETIMEDOUT)
1129 goto write_raw_eeprom_done;
1130 /* If USB fails, there is nothing to do */
1131 if (ret < 0)
1132 return ret;
1133
1134 /* Issue write/erase enable command */
1135 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1136 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1137 if (ret < 0)
1138 return ret;
1139
1140 ret = lan78xx_wait_eeprom(dev);
1141 /* Looks like not USB specific error, try to recover */
1142 if (ret == -ETIMEDOUT)
1143 goto write_raw_eeprom_done;
1144 /* If USB fails, there is nothing to do */
1145 if (ret < 0)
1146 return ret;
1147
1148 for (i = 0; i < length; i++) {
1149 /* Fill data register */
1150 val = data[i];
1151 ret = lan78xx_write_reg(dev, E2P_DATA, val);
1152 if (ret < 0)
1153 return ret;
1154
1155 /* Send "write" command */
1156 val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1157 val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1158 ret = lan78xx_write_reg(dev, E2P_CMD, val);
1159 if (ret < 0)
1160 return ret;
1161
1162 ret = lan78xx_wait_eeprom(dev);
1163 /* Looks like not USB specific error, try to recover */
1164 if (ret == -ETIMEDOUT)
1165 goto write_raw_eeprom_done;
1166 /* If USB fails, there is nothing to do */
1167 if (ret < 0)
1168 return ret;
1169
1170 offset++;
1171 }
1172
1173 write_raw_eeprom_done:
1174 if (dev->chipid == ID_REV_CHIP_ID_7800_)
1175 return lan78xx_write_reg(dev, HW_CFG, saved);
1176
1177 return 0;
1178 }
1179
lan78xx_read_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1180 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1181 u32 length, u8 *data)
1182 {
1183 unsigned long timeout;
1184 int ret, i;
1185 u32 buf;
1186
1187 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1188 if (ret < 0)
1189 return ret;
1190
1191 if (buf & OTP_PWR_DN_PWRDN_N_) {
1192 /* clear it and wait to be cleared */
1193 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1194 if (ret < 0)
1195 return ret;
1196
1197 timeout = jiffies + HZ;
1198 do {
1199 usleep_range(1, 10);
1200 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1201 if (ret < 0)
1202 return ret;
1203
1204 if (time_after(jiffies, timeout)) {
1205 netdev_warn(dev->net,
1206 "timeout on OTP_PWR_DN");
1207 return -ETIMEDOUT;
1208 }
1209 } while (buf & OTP_PWR_DN_PWRDN_N_);
1210 }
1211
1212 for (i = 0; i < length; i++) {
1213 ret = lan78xx_write_reg(dev, OTP_ADDR1,
1214 ((offset + i) >> 8) & OTP_ADDR1_15_11);
1215 if (ret < 0)
1216 return ret;
1217
1218 ret = lan78xx_write_reg(dev, OTP_ADDR2,
1219 ((offset + i) & OTP_ADDR2_10_3));
1220 if (ret < 0)
1221 return ret;
1222
1223 ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1224 if (ret < 0)
1225 return ret;
1226
1227 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1228 if (ret < 0)
1229 return ret;
1230
1231 timeout = jiffies + HZ;
1232 do {
1233 udelay(1);
1234 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1235 if (ret < 0)
1236 return ret;
1237
1238 if (time_after(jiffies, timeout)) {
1239 netdev_warn(dev->net,
1240 "timeout on OTP_STATUS");
1241 return -ETIMEDOUT;
1242 }
1243 } while (buf & OTP_STATUS_BUSY_);
1244
1245 ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1246 if (ret < 0)
1247 return ret;
1248
1249 data[i] = (u8)(buf & 0xFF);
1250 }
1251
1252 return 0;
1253 }
1254
lan78xx_write_raw_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1255 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1256 u32 length, u8 *data)
1257 {
1258 int i;
1259 u32 buf;
1260 unsigned long timeout;
1261 int ret;
1262
1263 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1264 if (ret < 0)
1265 return ret;
1266
1267 if (buf & OTP_PWR_DN_PWRDN_N_) {
1268 /* clear it and wait to be cleared */
1269 ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1270 if (ret < 0)
1271 return ret;
1272
1273 timeout = jiffies + HZ;
1274 do {
1275 udelay(1);
1276 ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1277 if (ret < 0)
1278 return ret;
1279
1280 if (time_after(jiffies, timeout)) {
1281 netdev_warn(dev->net,
1282 "timeout on OTP_PWR_DN completion");
1283 return -ETIMEDOUT;
1284 }
1285 } while (buf & OTP_PWR_DN_PWRDN_N_);
1286 }
1287
1288 /* set to BYTE program mode */
1289 ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1290 if (ret < 0)
1291 return ret;
1292
1293 for (i = 0; i < length; i++) {
1294 ret = lan78xx_write_reg(dev, OTP_ADDR1,
1295 ((offset + i) >> 8) & OTP_ADDR1_15_11);
1296 if (ret < 0)
1297 return ret;
1298
1299 ret = lan78xx_write_reg(dev, OTP_ADDR2,
1300 ((offset + i) & OTP_ADDR2_10_3));
1301 if (ret < 0)
1302 return ret;
1303
1304 ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1305 if (ret < 0)
1306 return ret;
1307
1308 ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1309 if (ret < 0)
1310 return ret;
1311
1312 ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1313 if (ret < 0)
1314 return ret;
1315
1316 timeout = jiffies + HZ;
1317 do {
1318 udelay(1);
1319 ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1320 if (ret < 0)
1321 return ret;
1322
1323 if (time_after(jiffies, timeout)) {
1324 netdev_warn(dev->net,
1325 "Timeout on OTP_STATUS completion");
1326 return -ETIMEDOUT;
1327 }
1328 } while (buf & OTP_STATUS_BUSY_);
1329 }
1330
1331 return 0;
1332 }
1333
lan78xx_read_otp(struct lan78xx_net * dev,u32 offset,u32 length,u8 * data)1334 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1335 u32 length, u8 *data)
1336 {
1337 u8 sig;
1338 int ret;
1339
1340 ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1341
1342 if (ret == 0) {
1343 if (sig == OTP_INDICATOR_2)
1344 offset += 0x100;
1345 else if (sig != OTP_INDICATOR_1)
1346 ret = -EINVAL;
1347 if (!ret)
1348 ret = lan78xx_read_raw_otp(dev, offset, length, data);
1349 }
1350
1351 return ret;
1352 }
1353
lan78xx_dataport_wait_not_busy(struct lan78xx_net * dev)1354 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1355 {
1356 int i, ret;
1357
1358 for (i = 0; i < 100; i++) {
1359 u32 dp_sel;
1360
1361 ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1362 if (unlikely(ret < 0))
1363 return ret;
1364
1365 if (dp_sel & DP_SEL_DPRDY_)
1366 return 0;
1367
1368 usleep_range(40, 100);
1369 }
1370
1371 netdev_warn(dev->net, "%s timed out", __func__);
1372
1373 return -ETIMEDOUT;
1374 }
1375
lan78xx_dataport_write(struct lan78xx_net * dev,u32 ram_select,u32 addr,u32 length,u32 * buf)1376 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1377 u32 addr, u32 length, u32 *buf)
1378 {
1379 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1380 int i, ret;
1381
1382 ret = usb_autopm_get_interface(dev->intf);
1383 if (ret < 0)
1384 return ret;
1385
1386 mutex_lock(&pdata->dataport_mutex);
1387
1388 ret = lan78xx_dataport_wait_not_busy(dev);
1389 if (ret < 0)
1390 goto dataport_write;
1391
1392 ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1393 if (ret < 0)
1394 goto dataport_write;
1395
1396 for (i = 0; i < length; i++) {
1397 ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1398 if (ret < 0)
1399 goto dataport_write;
1400
1401 ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1402 if (ret < 0)
1403 goto dataport_write;
1404
1405 ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1406 if (ret < 0)
1407 goto dataport_write;
1408
1409 ret = lan78xx_dataport_wait_not_busy(dev);
1410 if (ret < 0)
1411 goto dataport_write;
1412 }
1413
1414 dataport_write:
1415 if (ret < 0)
1416 netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1417
1418 mutex_unlock(&pdata->dataport_mutex);
1419 usb_autopm_put_interface(dev->intf);
1420
1421 return ret;
1422 }
1423
lan78xx_set_addr_filter(struct lan78xx_priv * pdata,int index,u8 addr[ETH_ALEN])1424 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1425 int index, u8 addr[ETH_ALEN])
1426 {
1427 u32 temp;
1428
1429 if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1430 temp = addr[3];
1431 temp = addr[2] | (temp << 8);
1432 temp = addr[1] | (temp << 8);
1433 temp = addr[0] | (temp << 8);
1434 pdata->pfilter_table[index][1] = temp;
1435 temp = addr[5];
1436 temp = addr[4] | (temp << 8);
1437 temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1438 pdata->pfilter_table[index][0] = temp;
1439 }
1440 }
1441
1442 /* returns hash bit number for given MAC address */
lan78xx_hash(char addr[ETH_ALEN])1443 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1444 {
1445 return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1446 }
1447
lan78xx_deferred_multicast_write(struct work_struct * param)1448 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1449 {
1450 struct lan78xx_priv *pdata =
1451 container_of(param, struct lan78xx_priv, set_multicast);
1452 struct lan78xx_net *dev = pdata->dev;
1453 int i, ret;
1454
1455 netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1456 pdata->rfe_ctl);
1457
1458 ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1459 DP_SEL_VHF_VLAN_LEN,
1460 DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1461 if (ret < 0)
1462 goto multicast_write_done;
1463
1464 for (i = 1; i < NUM_OF_MAF; i++) {
1465 ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1466 if (ret < 0)
1467 goto multicast_write_done;
1468
1469 ret = lan78xx_write_reg(dev, MAF_LO(i),
1470 pdata->pfilter_table[i][1]);
1471 if (ret < 0)
1472 goto multicast_write_done;
1473
1474 ret = lan78xx_write_reg(dev, MAF_HI(i),
1475 pdata->pfilter_table[i][0]);
1476 if (ret < 0)
1477 goto multicast_write_done;
1478 }
1479
1480 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1481
1482 multicast_write_done:
1483 if (ret < 0)
1484 netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1485 return;
1486 }
1487
lan78xx_set_multicast(struct net_device * netdev)1488 static void lan78xx_set_multicast(struct net_device *netdev)
1489 {
1490 struct lan78xx_net *dev = netdev_priv(netdev);
1491 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1492 unsigned long flags;
1493 int i;
1494
1495 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1496
1497 pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1498 RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1499
1500 for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1501 pdata->mchash_table[i] = 0;
1502
1503 /* pfilter_table[0] has own HW address */
1504 for (i = 1; i < NUM_OF_MAF; i++) {
1505 pdata->pfilter_table[i][0] = 0;
1506 pdata->pfilter_table[i][1] = 0;
1507 }
1508
1509 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1510
1511 if (dev->net->flags & IFF_PROMISC) {
1512 netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1513 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1514 } else {
1515 if (dev->net->flags & IFF_ALLMULTI) {
1516 netif_dbg(dev, drv, dev->net,
1517 "receive all multicast enabled");
1518 pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1519 }
1520 }
1521
1522 if (netdev_mc_count(dev->net)) {
1523 struct netdev_hw_addr *ha;
1524 int i;
1525
1526 netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1527
1528 pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1529
1530 i = 1;
1531 netdev_for_each_mc_addr(ha, netdev) {
1532 /* set first 32 into Perfect Filter */
1533 if (i < 33) {
1534 lan78xx_set_addr_filter(pdata, i, ha->addr);
1535 } else {
1536 u32 bitnum = lan78xx_hash(ha->addr);
1537
1538 pdata->mchash_table[bitnum / 32] |=
1539 (1 << (bitnum % 32));
1540 pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1541 }
1542 i++;
1543 }
1544 }
1545
1546 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1547
1548 /* defer register writes to a sleepable context */
1549 schedule_work(&pdata->set_multicast);
1550 }
1551
1552 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1553
lan78xx_mac_reset(struct lan78xx_net * dev)1554 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1555 {
1556 unsigned long start_time = jiffies;
1557 u32 val;
1558 int ret;
1559
1560 mutex_lock(&dev->mdiobus_mutex);
1561
1562 /* Resetting the device while there is activity on the MDIO
1563 * bus can result in the MAC interface locking up and not
1564 * completing register access transactions.
1565 */
1566 ret = lan78xx_mdiobus_wait_not_busy(dev);
1567 if (ret < 0)
1568 goto exit_unlock;
1569
1570 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1571 if (ret < 0)
1572 goto exit_unlock;
1573
1574 val |= MAC_CR_RST_;
1575 ret = lan78xx_write_reg(dev, MAC_CR, val);
1576 if (ret < 0)
1577 goto exit_unlock;
1578
1579 /* Wait for the reset to complete before allowing any further
1580 * MAC register accesses otherwise the MAC may lock up.
1581 */
1582 do {
1583 ret = lan78xx_read_reg(dev, MAC_CR, &val);
1584 if (ret < 0)
1585 goto exit_unlock;
1586
1587 if (!(val & MAC_CR_RST_)) {
1588 ret = 0;
1589 goto exit_unlock;
1590 }
1591 } while (!time_after(jiffies, start_time + HZ));
1592
1593 ret = -ETIMEDOUT;
1594 exit_unlock:
1595 mutex_unlock(&dev->mdiobus_mutex);
1596
1597 return ret;
1598 }
1599
1600 /**
1601 * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1602 * @dev: pointer to the LAN78xx device structure
1603 *
1604 * This function acknowledges the PHY interrupt by setting the
1605 * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1606 *
1607 * Return: 0 on success or a negative error code on failure.
1608 */
lan78xx_phy_int_ack(struct lan78xx_net * dev)1609 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1610 {
1611 return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1612 }
1613
1614 /* some work can't be done in tasklets, so we use keventd
1615 *
1616 * NOTE: annoying asymmetry: if it's active, schedule_work() fails,
1617 * but tasklet_schedule() doesn't. hope the failure is rare.
1618 */
lan78xx_defer_kevent(struct lan78xx_net * dev,int work)1619 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1620 {
1621 set_bit(work, &dev->flags);
1622 if (!schedule_delayed_work(&dev->wq, 0))
1623 netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1624 }
1625
lan78xx_status(struct lan78xx_net * dev,struct urb * urb)1626 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1627 {
1628 u32 intdata;
1629
1630 if (urb->actual_length != 4) {
1631 netdev_warn(dev->net,
1632 "unexpected urb length %d", urb->actual_length);
1633 return;
1634 }
1635
1636 intdata = get_unaligned_le32(urb->transfer_buffer);
1637
1638 if (intdata & INT_ENP_PHY_INT) {
1639 netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1640 lan78xx_defer_kevent(dev, EVENT_PHY_INT_ACK);
1641
1642 if (dev->domain_data.phyirq > 0)
1643 generic_handle_irq_safe(dev->domain_data.phyirq);
1644 } else {
1645 netdev_warn(dev->net,
1646 "unexpected interrupt: 0x%08x\n", intdata);
1647 }
1648 }
1649
lan78xx_ethtool_get_eeprom_len(struct net_device * netdev)1650 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1651 {
1652 return MAX_EEPROM_SIZE;
1653 }
1654
lan78xx_ethtool_get_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1655 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1656 struct ethtool_eeprom *ee, u8 *data)
1657 {
1658 struct lan78xx_net *dev = netdev_priv(netdev);
1659 int ret;
1660
1661 ret = usb_autopm_get_interface(dev->intf);
1662 if (ret)
1663 return ret;
1664
1665 ee->magic = LAN78XX_EEPROM_MAGIC;
1666
1667 ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1668
1669 usb_autopm_put_interface(dev->intf);
1670
1671 return ret;
1672 }
1673
lan78xx_ethtool_set_eeprom(struct net_device * netdev,struct ethtool_eeprom * ee,u8 * data)1674 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1675 struct ethtool_eeprom *ee, u8 *data)
1676 {
1677 struct lan78xx_net *dev = netdev_priv(netdev);
1678 int ret;
1679
1680 ret = usb_autopm_get_interface(dev->intf);
1681 if (ret)
1682 return ret;
1683
1684 /* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1685 * to load data from EEPROM
1686 */
1687 if (ee->magic == LAN78XX_EEPROM_MAGIC)
1688 ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1689 else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1690 (ee->offset == 0) &&
1691 (ee->len == 512) &&
1692 (data[0] == OTP_INDICATOR_1))
1693 ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1694
1695 usb_autopm_put_interface(dev->intf);
1696
1697 return ret;
1698 }
1699
lan78xx_get_strings(struct net_device * netdev,u32 stringset,u8 * data)1700 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1701 u8 *data)
1702 {
1703 if (stringset == ETH_SS_STATS)
1704 memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1705 }
1706
lan78xx_get_sset_count(struct net_device * netdev,int sset)1707 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1708 {
1709 if (sset == ETH_SS_STATS)
1710 return ARRAY_SIZE(lan78xx_gstrings);
1711 else
1712 return -EOPNOTSUPP;
1713 }
1714
lan78xx_get_stats(struct net_device * netdev,struct ethtool_stats * stats,u64 * data)1715 static void lan78xx_get_stats(struct net_device *netdev,
1716 struct ethtool_stats *stats, u64 *data)
1717 {
1718 struct lan78xx_net *dev = netdev_priv(netdev);
1719
1720 lan78xx_update_stats(dev);
1721
1722 mutex_lock(&dev->stats.access_lock);
1723 memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1724 mutex_unlock(&dev->stats.access_lock);
1725 }
1726
lan78xx_get_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1727 static void lan78xx_get_wol(struct net_device *netdev,
1728 struct ethtool_wolinfo *wol)
1729 {
1730 struct lan78xx_net *dev = netdev_priv(netdev);
1731 int ret;
1732 u32 buf;
1733 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1734
1735 if (usb_autopm_get_interface(dev->intf) < 0)
1736 return;
1737
1738 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1739 if (unlikely(ret < 0)) {
1740 netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1741 wol->supported = 0;
1742 wol->wolopts = 0;
1743 } else {
1744 if (buf & USB_CFG_RMT_WKP_) {
1745 wol->supported = WAKE_ALL;
1746 wol->wolopts = pdata->wol;
1747 } else {
1748 wol->supported = 0;
1749 wol->wolopts = 0;
1750 }
1751 }
1752
1753 usb_autopm_put_interface(dev->intf);
1754 }
1755
lan78xx_set_wol(struct net_device * netdev,struct ethtool_wolinfo * wol)1756 static int lan78xx_set_wol(struct net_device *netdev,
1757 struct ethtool_wolinfo *wol)
1758 {
1759 struct lan78xx_net *dev = netdev_priv(netdev);
1760 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1761 int ret;
1762
1763 if (wol->wolopts & ~WAKE_ALL)
1764 return -EINVAL;
1765
1766 ret = usb_autopm_get_interface(dev->intf);
1767 if (ret < 0)
1768 return ret;
1769
1770 pdata->wol = wol->wolopts;
1771
1772 ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1773 if (ret < 0)
1774 goto exit_pm_put;
1775
1776 ret = phy_ethtool_set_wol(netdev->phydev, wol);
1777
1778 exit_pm_put:
1779 usb_autopm_put_interface(dev->intf);
1780
1781 return ret;
1782 }
1783
lan78xx_get_eee(struct net_device * net,struct ethtool_keee * edata)1784 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1785 {
1786 struct lan78xx_net *dev = netdev_priv(net);
1787
1788 return phylink_ethtool_get_eee(dev->phylink, edata);
1789 }
1790
lan78xx_set_eee(struct net_device * net,struct ethtool_keee * edata)1791 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1792 {
1793 struct lan78xx_net *dev = netdev_priv(net);
1794
1795 return phylink_ethtool_set_eee(dev->phylink, edata);
1796 }
1797
lan78xx_get_drvinfo(struct net_device * net,struct ethtool_drvinfo * info)1798 static void lan78xx_get_drvinfo(struct net_device *net,
1799 struct ethtool_drvinfo *info)
1800 {
1801 struct lan78xx_net *dev = netdev_priv(net);
1802
1803 strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1804 usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1805 }
1806
lan78xx_get_msglevel(struct net_device * net)1807 static u32 lan78xx_get_msglevel(struct net_device *net)
1808 {
1809 struct lan78xx_net *dev = netdev_priv(net);
1810
1811 return dev->msg_enable;
1812 }
1813
lan78xx_set_msglevel(struct net_device * net,u32 level)1814 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1815 {
1816 struct lan78xx_net *dev = netdev_priv(net);
1817
1818 dev->msg_enable = level;
1819 }
1820
lan78xx_get_link_ksettings(struct net_device * net,struct ethtool_link_ksettings * cmd)1821 static int lan78xx_get_link_ksettings(struct net_device *net,
1822 struct ethtool_link_ksettings *cmd)
1823 {
1824 struct lan78xx_net *dev = netdev_priv(net);
1825
1826 return phylink_ethtool_ksettings_get(dev->phylink, cmd);
1827 }
1828
lan78xx_set_link_ksettings(struct net_device * net,const struct ethtool_link_ksettings * cmd)1829 static int lan78xx_set_link_ksettings(struct net_device *net,
1830 const struct ethtool_link_ksettings *cmd)
1831 {
1832 struct lan78xx_net *dev = netdev_priv(net);
1833
1834 return phylink_ethtool_ksettings_set(dev->phylink, cmd);
1835 }
1836
lan78xx_get_pause(struct net_device * net,struct ethtool_pauseparam * pause)1837 static void lan78xx_get_pause(struct net_device *net,
1838 struct ethtool_pauseparam *pause)
1839 {
1840 struct lan78xx_net *dev = netdev_priv(net);
1841
1842 phylink_ethtool_get_pauseparam(dev->phylink, pause);
1843 }
1844
lan78xx_set_pause(struct net_device * net,struct ethtool_pauseparam * pause)1845 static int lan78xx_set_pause(struct net_device *net,
1846 struct ethtool_pauseparam *pause)
1847 {
1848 struct lan78xx_net *dev = netdev_priv(net);
1849
1850 return phylink_ethtool_set_pauseparam(dev->phylink, pause);
1851 }
1852
lan78xx_get_regs_len(struct net_device * netdev)1853 static int lan78xx_get_regs_len(struct net_device *netdev)
1854 {
1855 return sizeof(lan78xx_regs);
1856 }
1857
1858 static void
lan78xx_get_regs(struct net_device * netdev,struct ethtool_regs * regs,void * buf)1859 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
1860 void *buf)
1861 {
1862 struct lan78xx_net *dev = netdev_priv(netdev);
1863 unsigned int data_count = 0;
1864 u32 *data = buf;
1865 int i, ret;
1866
1867 /* Read Device/MAC registers */
1868 for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
1869 ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
1870 if (ret < 0) {
1871 netdev_warn(dev->net,
1872 "failed to read register 0x%08x\n",
1873 lan78xx_regs[i]);
1874 goto clean_data;
1875 }
1876
1877 data_count++;
1878 }
1879
1880 return;
1881
1882 clean_data:
1883 memset(data, 0, data_count * sizeof(u32));
1884 }
1885
1886 static const struct ethtool_ops lan78xx_ethtool_ops = {
1887 .get_link = ethtool_op_get_link,
1888 .nway_reset = phy_ethtool_nway_reset,
1889 .get_drvinfo = lan78xx_get_drvinfo,
1890 .get_msglevel = lan78xx_get_msglevel,
1891 .set_msglevel = lan78xx_set_msglevel,
1892 .get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
1893 .get_eeprom = lan78xx_ethtool_get_eeprom,
1894 .set_eeprom = lan78xx_ethtool_set_eeprom,
1895 .get_ethtool_stats = lan78xx_get_stats,
1896 .get_sset_count = lan78xx_get_sset_count,
1897 .get_strings = lan78xx_get_strings,
1898 .get_wol = lan78xx_get_wol,
1899 .set_wol = lan78xx_set_wol,
1900 .get_ts_info = ethtool_op_get_ts_info,
1901 .get_eee = lan78xx_get_eee,
1902 .set_eee = lan78xx_set_eee,
1903 .get_pauseparam = lan78xx_get_pause,
1904 .set_pauseparam = lan78xx_set_pause,
1905 .get_link_ksettings = lan78xx_get_link_ksettings,
1906 .set_link_ksettings = lan78xx_set_link_ksettings,
1907 .get_regs_len = lan78xx_get_regs_len,
1908 .get_regs = lan78xx_get_regs,
1909 };
1910
lan78xx_init_mac_address(struct lan78xx_net * dev)1911 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
1912 {
1913 u32 addr_lo, addr_hi;
1914 u8 addr[6];
1915 int ret;
1916
1917 ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
1918 if (ret < 0)
1919 return ret;
1920
1921 ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
1922 if (ret < 0)
1923 return ret;
1924
1925 addr[0] = addr_lo & 0xFF;
1926 addr[1] = (addr_lo >> 8) & 0xFF;
1927 addr[2] = (addr_lo >> 16) & 0xFF;
1928 addr[3] = (addr_lo >> 24) & 0xFF;
1929 addr[4] = addr_hi & 0xFF;
1930 addr[5] = (addr_hi >> 8) & 0xFF;
1931
1932 if (!is_valid_ether_addr(addr)) {
1933 if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
1934 /* valid address present in Device Tree */
1935 netif_dbg(dev, ifup, dev->net,
1936 "MAC address read from Device Tree");
1937 } else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
1938 ETH_ALEN, addr) == 0) ||
1939 (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
1940 ETH_ALEN, addr) == 0)) &&
1941 is_valid_ether_addr(addr)) {
1942 /* eeprom values are valid so use them */
1943 netif_dbg(dev, ifup, dev->net,
1944 "MAC address read from EEPROM");
1945 } else {
1946 /* generate random MAC */
1947 eth_random_addr(addr);
1948 netif_dbg(dev, ifup, dev->net,
1949 "MAC address set to random addr");
1950 }
1951
1952 addr_lo = addr[0] | (addr[1] << 8) |
1953 (addr[2] << 16) | (addr[3] << 24);
1954 addr_hi = addr[4] | (addr[5] << 8);
1955
1956 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
1957 if (ret < 0)
1958 return ret;
1959
1960 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
1961 if (ret < 0)
1962 return ret;
1963 }
1964
1965 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
1966 if (ret < 0)
1967 return ret;
1968
1969 ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
1970 if (ret < 0)
1971 return ret;
1972
1973 eth_hw_addr_set(dev->net, addr);
1974
1975 return 0;
1976 }
1977
1978 /* MDIO read and write wrappers for phylib */
lan78xx_mdiobus_read(struct mii_bus * bus,int phy_id,int idx)1979 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
1980 {
1981 struct lan78xx_net *dev = bus->priv;
1982 u32 val, addr;
1983 int ret;
1984
1985 ret = usb_autopm_get_interface(dev->intf);
1986 if (ret < 0)
1987 return ret;
1988
1989 mutex_lock(&dev->mdiobus_mutex);
1990
1991 /* confirm MII not busy */
1992 ret = lan78xx_mdiobus_wait_not_busy(dev);
1993 if (ret < 0)
1994 goto done;
1995
1996 /* set the address, index & direction (read from PHY) */
1997 addr = mii_access(phy_id, idx, MII_READ);
1998 ret = lan78xx_write_reg(dev, MII_ACC, addr);
1999 if (ret < 0)
2000 goto done;
2001
2002 ret = lan78xx_mdiobus_wait_not_busy(dev);
2003 if (ret < 0)
2004 goto done;
2005
2006 ret = lan78xx_read_reg(dev, MII_DATA, &val);
2007 if (ret < 0)
2008 goto done;
2009
2010 ret = (int)(val & 0xFFFF);
2011
2012 done:
2013 mutex_unlock(&dev->mdiobus_mutex);
2014 usb_autopm_put_interface(dev->intf);
2015
2016 return ret;
2017 }
2018
lan78xx_mdiobus_write(struct mii_bus * bus,int phy_id,int idx,u16 regval)2019 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2020 u16 regval)
2021 {
2022 struct lan78xx_net *dev = bus->priv;
2023 u32 val, addr;
2024 int ret;
2025
2026 ret = usb_autopm_get_interface(dev->intf);
2027 if (ret < 0)
2028 return ret;
2029
2030 mutex_lock(&dev->mdiobus_mutex);
2031
2032 /* confirm MII not busy */
2033 ret = lan78xx_mdiobus_wait_not_busy(dev);
2034 if (ret < 0)
2035 goto done;
2036
2037 val = (u32)regval;
2038 ret = lan78xx_write_reg(dev, MII_DATA, val);
2039 if (ret < 0)
2040 goto done;
2041
2042 /* set the address, index & direction (write to PHY) */
2043 addr = mii_access(phy_id, idx, MII_WRITE);
2044 ret = lan78xx_write_reg(dev, MII_ACC, addr);
2045 if (ret < 0)
2046 goto done;
2047
2048 ret = lan78xx_mdiobus_wait_not_busy(dev);
2049 if (ret < 0)
2050 goto done;
2051
2052 done:
2053 mutex_unlock(&dev->mdiobus_mutex);
2054 usb_autopm_put_interface(dev->intf);
2055 return ret;
2056 }
2057
lan78xx_mdio_init(struct lan78xx_net * dev)2058 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2059 {
2060 struct device_node *node;
2061 int ret;
2062
2063 dev->mdiobus = mdiobus_alloc();
2064 if (!dev->mdiobus) {
2065 netdev_err(dev->net, "can't allocate MDIO bus\n");
2066 return -ENOMEM;
2067 }
2068
2069 dev->mdiobus->priv = (void *)dev;
2070 dev->mdiobus->read = lan78xx_mdiobus_read;
2071 dev->mdiobus->write = lan78xx_mdiobus_write;
2072 dev->mdiobus->name = "lan78xx-mdiobus";
2073 dev->mdiobus->parent = &dev->udev->dev;
2074
2075 snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2076 dev->udev->bus->busnum, dev->udev->devnum);
2077
2078 switch (dev->chipid) {
2079 case ID_REV_CHIP_ID_7800_:
2080 case ID_REV_CHIP_ID_7850_:
2081 /* set to internal PHY id */
2082 dev->mdiobus->phy_mask = ~(1 << 1);
2083 break;
2084 case ID_REV_CHIP_ID_7801_:
2085 /* scan thru PHYAD[2..0] */
2086 dev->mdiobus->phy_mask = ~(0xFF);
2087 break;
2088 }
2089
2090 node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2091 ret = of_mdiobus_register(dev->mdiobus, node);
2092 of_node_put(node);
2093 if (ret) {
2094 netdev_err(dev->net, "can't register MDIO bus\n");
2095 goto exit1;
2096 }
2097
2098 netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2099 return 0;
2100 exit1:
2101 mdiobus_free(dev->mdiobus);
2102 return ret;
2103 }
2104
lan78xx_remove_mdio(struct lan78xx_net * dev)2105 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2106 {
2107 mdiobus_unregister(dev->mdiobus);
2108 mdiobus_free(dev->mdiobus);
2109 }
2110
irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hwirq)2111 static int irq_map(struct irq_domain *d, unsigned int irq,
2112 irq_hw_number_t hwirq)
2113 {
2114 struct irq_domain_data *data = d->host_data;
2115
2116 irq_set_chip_data(irq, data);
2117 irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2118 irq_set_noprobe(irq);
2119
2120 return 0;
2121 }
2122
irq_unmap(struct irq_domain * d,unsigned int irq)2123 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2124 {
2125 irq_set_chip_and_handler(irq, NULL, NULL);
2126 irq_set_chip_data(irq, NULL);
2127 }
2128
2129 static const struct irq_domain_ops chip_domain_ops = {
2130 .map = irq_map,
2131 .unmap = irq_unmap,
2132 };
2133
lan78xx_irq_mask(struct irq_data * irqd)2134 static void lan78xx_irq_mask(struct irq_data *irqd)
2135 {
2136 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2137
2138 data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2139 }
2140
lan78xx_irq_unmask(struct irq_data * irqd)2141 static void lan78xx_irq_unmask(struct irq_data *irqd)
2142 {
2143 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2144
2145 data->irqenable |= BIT(irqd_to_hwirq(irqd));
2146 }
2147
lan78xx_irq_bus_lock(struct irq_data * irqd)2148 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2149 {
2150 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2151
2152 mutex_lock(&data->irq_lock);
2153 }
2154
lan78xx_irq_bus_sync_unlock(struct irq_data * irqd)2155 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2156 {
2157 struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2158 struct lan78xx_net *dev =
2159 container_of(data, struct lan78xx_net, domain_data);
2160 u32 buf;
2161 int ret;
2162
2163 /* call register access here because irq_bus_lock & irq_bus_sync_unlock
2164 * are only two callbacks executed in non-atomic contex.
2165 */
2166 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2167 if (ret < 0)
2168 goto irq_bus_sync_unlock;
2169
2170 if (buf != data->irqenable)
2171 ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2172
2173 irq_bus_sync_unlock:
2174 if (ret < 0)
2175 netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2176 ERR_PTR(ret));
2177
2178 mutex_unlock(&data->irq_lock);
2179 }
2180
2181 static struct irq_chip lan78xx_irqchip = {
2182 .name = "lan78xx-irqs",
2183 .irq_mask = lan78xx_irq_mask,
2184 .irq_unmask = lan78xx_irq_unmask,
2185 .irq_bus_lock = lan78xx_irq_bus_lock,
2186 .irq_bus_sync_unlock = lan78xx_irq_bus_sync_unlock,
2187 };
2188
lan78xx_setup_irq_domain(struct lan78xx_net * dev)2189 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2190 {
2191 struct irq_domain *irqdomain;
2192 unsigned int irqmap = 0;
2193 u32 buf;
2194 int ret = 0;
2195
2196 mutex_init(&dev->domain_data.irq_lock);
2197
2198 ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2199 if (ret < 0)
2200 return ret;
2201
2202 dev->domain_data.irqenable = buf;
2203
2204 dev->domain_data.irqchip = &lan78xx_irqchip;
2205 dev->domain_data.irq_handler = handle_simple_irq;
2206
2207 irqdomain = irq_domain_create_simple(dev_fwnode(dev->udev->dev.parent), MAX_INT_EP, 0,
2208 &chip_domain_ops, &dev->domain_data);
2209 if (irqdomain) {
2210 /* create mapping for PHY interrupt */
2211 irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2212 if (!irqmap) {
2213 irq_domain_remove(irqdomain);
2214
2215 irqdomain = NULL;
2216 ret = -EINVAL;
2217 }
2218 } else {
2219 ret = -EINVAL;
2220 }
2221
2222 dev->domain_data.irqdomain = irqdomain;
2223 dev->domain_data.phyirq = irqmap;
2224
2225 return ret;
2226 }
2227
lan78xx_remove_irq_domain(struct lan78xx_net * dev)2228 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2229 {
2230 if (dev->domain_data.phyirq > 0) {
2231 irq_dispose_mapping(dev->domain_data.phyirq);
2232
2233 if (dev->domain_data.irqdomain)
2234 irq_domain_remove(dev->domain_data.irqdomain);
2235 }
2236 dev->domain_data.phyirq = 0;
2237 dev->domain_data.irqdomain = NULL;
2238 }
2239
lan78xx_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)2240 static void lan78xx_mac_config(struct phylink_config *config, unsigned int mode,
2241 const struct phylink_link_state *state)
2242 {
2243 struct net_device *net = to_net_dev(config->dev);
2244 struct lan78xx_net *dev = netdev_priv(net);
2245 u32 mac_cr = 0;
2246 int ret;
2247
2248 /* Check if the mode is supported */
2249 if (mode != MLO_AN_FIXED && mode != MLO_AN_PHY) {
2250 netdev_err(net, "Unsupported negotiation mode: %u\n", mode);
2251 return;
2252 }
2253
2254 switch (state->interface) {
2255 case PHY_INTERFACE_MODE_GMII:
2256 mac_cr |= MAC_CR_GMII_EN_;
2257 break;
2258 case PHY_INTERFACE_MODE_RGMII:
2259 case PHY_INTERFACE_MODE_RGMII_ID:
2260 case PHY_INTERFACE_MODE_RGMII_TXID:
2261 case PHY_INTERFACE_MODE_RGMII_RXID:
2262 break;
2263 default:
2264 netdev_warn(net, "Unsupported interface mode: %d\n",
2265 state->interface);
2266 return;
2267 }
2268
2269 ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_GMII_EN_, mac_cr);
2270 if (ret < 0)
2271 netdev_err(net, "Failed to config MAC with error %pe\n",
2272 ERR_PTR(ret));
2273 }
2274
lan78xx_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)2275 static void lan78xx_mac_link_down(struct phylink_config *config,
2276 unsigned int mode, phy_interface_t interface)
2277 {
2278 struct net_device *net = to_net_dev(config->dev);
2279 struct lan78xx_net *dev = netdev_priv(net);
2280 int ret;
2281
2282 netif_stop_queue(net);
2283
2284 /* MAC reset will not de-assert TXEN/RXEN, we need to stop them
2285 * manually before reset. TX and RX should be disabled before running
2286 * link_up sequence.
2287 */
2288 ret = lan78xx_stop_tx_path(dev);
2289 if (ret < 0)
2290 goto link_down_fail;
2291
2292 ret = lan78xx_stop_rx_path(dev);
2293 if (ret < 0)
2294 goto link_down_fail;
2295
2296 /* MAC reset seems to not affect MAC configuration, no idea if it is
2297 * really needed, but it was done in previous driver version. So, leave
2298 * it here.
2299 */
2300 ret = lan78xx_mac_reset(dev);
2301 if (ret < 0)
2302 goto link_down_fail;
2303
2304 return;
2305
2306 link_down_fail:
2307 netdev_err(dev->net, "Failed to set MAC down with error %pe\n",
2308 ERR_PTR(ret));
2309 }
2310
2311 /**
2312 * lan78xx_configure_usb - Configure USB link power settings
2313 * @dev: pointer to the LAN78xx device structure
2314 * @speed: negotiated Ethernet link speed (in Mbps)
2315 *
2316 * This function configures U1/U2 link power management for SuperSpeed
2317 * USB devices based on the current Ethernet link speed. It uses the
2318 * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2319 *
2320 * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2321 * LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2322 *
2323 * Return: 0 on success or a negative error code on failure.
2324 */
lan78xx_configure_usb(struct lan78xx_net * dev,int speed)2325 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2326 {
2327 u32 mask, val;
2328 int ret;
2329
2330 /* Only configure USB settings for SuperSpeed devices */
2331 if (dev->udev->speed != USB_SPEED_SUPER)
2332 return 0;
2333
2334 /* LAN7850 does not support USB 3.x */
2335 if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2336 netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2337 return 0;
2338 }
2339
2340 switch (speed) {
2341 case SPEED_1000:
2342 /* Disable U2, enable U1 */
2343 ret = lan78xx_update_reg(dev, USB_CFG1,
2344 USB_CFG1_DEV_U2_INIT_EN_, 0);
2345 if (ret < 0)
2346 return ret;
2347
2348 return lan78xx_update_reg(dev, USB_CFG1,
2349 USB_CFG1_DEV_U1_INIT_EN_,
2350 USB_CFG1_DEV_U1_INIT_EN_);
2351
2352 case SPEED_100:
2353 case SPEED_10:
2354 /* Enable both U1 and U2 */
2355 mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2356 val = mask;
2357 return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2358
2359 default:
2360 netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2361 return -EINVAL;
2362 }
2363 }
2364
2365 /**
2366 * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2367 * @dev: pointer to the LAN78xx device structure
2368 * @tx_pause: enable transmission of pause frames
2369 * @rx_pause: enable reception of pause frames
2370 *
2371 * This function configures the LAN78xx flow control settings by writing
2372 * to the FLOW and FCT_FLOW registers. The pause time is set to the
2373 * maximum allowed value (65535 quanta). FIFO thresholds are selected
2374 * based on USB speed.
2375 *
2376 * The Pause Time field is measured in units of 512-bit times (quanta):
2377 * - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2378 * - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2379 * - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2380 *
2381 * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2382 * - RXUSED is the number of bytes used in the RX FIFO
2383 * - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2384 * - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2385 * - Both thresholds are encoded in units of 512 bytes (rounded up)
2386 *
2387 * Thresholds differ by USB speed because available USB bandwidth
2388 * affects how fast packets can be drained from the RX FIFO:
2389 * - USB 3.x (SuperSpeed):
2390 * FLOW_ON = 9216 bytes → 18 units
2391 * FLOW_OFF = 4096 bytes → 8 units
2392 * - USB 2.0 (High-Speed):
2393 * FLOW_ON = 8704 bytes → 17 units
2394 * FLOW_OFF = 1024 bytes → 2 units
2395 *
2396 * Note: The FCT_FLOW register must be configured before enabling TX pause
2397 * (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2398 *
2399 * Return: 0 on success or a negative error code on failure.
2400 */
lan78xx_configure_flowcontrol(struct lan78xx_net * dev,bool tx_pause,bool rx_pause)2401 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2402 bool tx_pause, bool rx_pause)
2403 {
2404 /* Use maximum pause time: 65535 quanta (512-bit times) */
2405 const u32 pause_time_quanta = 65535;
2406 u32 fct_flow = 0;
2407 u32 flow = 0;
2408 int ret;
2409
2410 /* Prepare MAC flow control bits */
2411 if (tx_pause)
2412 flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2413
2414 if (rx_pause)
2415 flow |= FLOW_CR_RX_FCEN_;
2416
2417 /* Select RX FIFO thresholds based on USB speed
2418 *
2419 * FCT_FLOW layout:
2420 * bits [6:0] FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2421 * bits [14:8] FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2422 * thresholds are expressed in units of 512 bytes
2423 */
2424 switch (dev->udev->speed) {
2425 case USB_SPEED_SUPER:
2426 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2427 break;
2428 case USB_SPEED_HIGH:
2429 fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2430 break;
2431 default:
2432 netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2433 dev->udev->speed);
2434 return -EINVAL;
2435 }
2436
2437 /* Step 1: Write FIFO thresholds before enabling pause frames */
2438 ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2439 if (ret < 0)
2440 return ret;
2441
2442 /* Step 2: Enable MAC pause functionality */
2443 return lan78xx_write_reg(dev, FLOW, flow);
2444 }
2445
lan78xx_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)2446 static void lan78xx_mac_link_up(struct phylink_config *config,
2447 struct phy_device *phy,
2448 unsigned int mode, phy_interface_t interface,
2449 int speed, int duplex,
2450 bool tx_pause, bool rx_pause)
2451 {
2452 struct net_device *net = to_net_dev(config->dev);
2453 struct lan78xx_net *dev = netdev_priv(net);
2454 u32 mac_cr = 0;
2455 int ret;
2456
2457 switch (speed) {
2458 case SPEED_1000:
2459 mac_cr |= MAC_CR_SPEED_1000_;
2460 break;
2461 case SPEED_100:
2462 mac_cr |= MAC_CR_SPEED_100_;
2463 break;
2464 case SPEED_10:
2465 mac_cr |= MAC_CR_SPEED_10_;
2466 break;
2467 default:
2468 netdev_err(dev->net, "Unsupported speed %d\n", speed);
2469 return;
2470 }
2471
2472 if (duplex == DUPLEX_FULL)
2473 mac_cr |= MAC_CR_FULL_DUPLEX_;
2474
2475 /* make sure TXEN and RXEN are disabled before reconfiguring MAC */
2476 ret = lan78xx_update_reg(dev, MAC_CR, MAC_CR_SPEED_MASK_ |
2477 MAC_CR_FULL_DUPLEX_ | MAC_CR_EEE_EN_, mac_cr);
2478 if (ret < 0)
2479 goto link_up_fail;
2480
2481 ret = lan78xx_configure_flowcontrol(dev, tx_pause, rx_pause);
2482 if (ret < 0)
2483 goto link_up_fail;
2484
2485 ret = lan78xx_configure_usb(dev, speed);
2486 if (ret < 0)
2487 goto link_up_fail;
2488
2489 lan78xx_rx_urb_submit_all(dev);
2490
2491 ret = lan78xx_flush_rx_fifo(dev);
2492 if (ret < 0)
2493 goto link_up_fail;
2494
2495 ret = lan78xx_flush_tx_fifo(dev);
2496 if (ret < 0)
2497 goto link_up_fail;
2498
2499 ret = lan78xx_start_tx_path(dev);
2500 if (ret < 0)
2501 goto link_up_fail;
2502
2503 ret = lan78xx_start_rx_path(dev);
2504 if (ret < 0)
2505 goto link_up_fail;
2506
2507 netif_start_queue(net);
2508
2509 return;
2510
2511 link_up_fail:
2512 netdev_err(dev->net, "Failed to set MAC up with error %pe\n",
2513 ERR_PTR(ret));
2514 }
2515
2516 /**
2517 * lan78xx_mac_eee_enable - Enable or disable MAC-side EEE support
2518 * @dev: LAN78xx device
2519 * @enable: true to enable EEE, false to disable
2520 *
2521 * This function sets or clears the MAC_CR_EEE_EN_ bit to control Energy
2522 * Efficient Ethernet (EEE) operation. According to current understanding
2523 * of the LAN7800 documentation, this bit can be modified while TX and RX
2524 * are enabled. No explicit requirement was found to disable data paths
2525 * before changing this bit.
2526 *
2527 * Return: 0 on success or a negative error code
2528 */
lan78xx_mac_eee_enable(struct lan78xx_net * dev,bool enable)2529 static int lan78xx_mac_eee_enable(struct lan78xx_net *dev, bool enable)
2530 {
2531 u32 mac_cr = 0;
2532
2533 if (enable)
2534 mac_cr |= MAC_CR_EEE_EN_;
2535
2536 return lan78xx_update_reg(dev, MAC_CR, MAC_CR_EEE_EN_, mac_cr);
2537 }
2538
lan78xx_mac_disable_tx_lpi(struct phylink_config * config)2539 static void lan78xx_mac_disable_tx_lpi(struct phylink_config *config)
2540 {
2541 struct net_device *net = to_net_dev(config->dev);
2542 struct lan78xx_net *dev = netdev_priv(net);
2543
2544 lan78xx_mac_eee_enable(dev, false);
2545 }
2546
lan78xx_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)2547 static int lan78xx_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
2548 bool tx_clk_stop)
2549 {
2550 struct net_device *net = to_net_dev(config->dev);
2551 struct lan78xx_net *dev = netdev_priv(net);
2552 int ret;
2553
2554 /* Software should only change this field when Energy Efficient
2555 * Ethernet Enable (EEEEN) is cleared. We ensure that by clearing
2556 * EEEEN during probe, and phylink itself guarantees that
2557 * mac_disable_tx_lpi() will have been previously called.
2558 */
2559 ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, timer);
2560 if (ret < 0)
2561 return ret;
2562
2563 return lan78xx_mac_eee_enable(dev, true);
2564 }
2565
2566 static const struct phylink_mac_ops lan78xx_phylink_mac_ops = {
2567 .mac_config = lan78xx_mac_config,
2568 .mac_link_down = lan78xx_mac_link_down,
2569 .mac_link_up = lan78xx_mac_link_up,
2570 .mac_disable_tx_lpi = lan78xx_mac_disable_tx_lpi,
2571 .mac_enable_tx_lpi = lan78xx_mac_enable_tx_lpi,
2572 };
2573
2574 /**
2575 * lan78xx_set_fixed_link() - Set fixed link configuration for LAN7801
2576 * @dev: LAN78xx device
2577 *
2578 * Use fixed link configuration with 1 Gbps full duplex. This is used in special
2579 * cases like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface
2580 * to a switch without a visible PHY.
2581 *
2582 * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2583 */
lan78xx_set_fixed_link(struct lan78xx_net * dev)2584 static int lan78xx_set_fixed_link(struct lan78xx_net *dev)
2585 {
2586 static const struct phylink_link_state state = {
2587 .speed = SPEED_1000,
2588 .duplex = DUPLEX_FULL,
2589 };
2590
2591 netdev_info(dev->net,
2592 "No PHY found on LAN7801 – using fixed link instead (e.g. EVB-KSZ9897-1)\n");
2593
2594 return phylink_set_fixed_link(dev->phylink, &state);
2595 }
2596
2597 /**
2598 * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2599 * @dev: LAN78xx device structure
2600 *
2601 * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2602 * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2603 * sets dev->interface based on chip ID and detected PHY type.
2604 *
2605 * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2606 */
lan78xx_get_phy(struct lan78xx_net * dev)2607 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2608 {
2609 struct phy_device *phydev;
2610
2611 /* Attempt to locate a PHY on the MDIO bus */
2612 phydev = phy_find_first(dev->mdiobus);
2613
2614 switch (dev->chipid) {
2615 case ID_REV_CHIP_ID_7801_:
2616 if (phydev) {
2617 /* External RGMII PHY detected */
2618 dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2619 phydev->is_internal = false;
2620
2621 if (!phydev->drv)
2622 netdev_warn(dev->net,
2623 "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2624
2625 return phydev;
2626 }
2627
2628 dev->interface = PHY_INTERFACE_MODE_RGMII;
2629 /* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2630 return NULL;
2631
2632 case ID_REV_CHIP_ID_7800_:
2633 case ID_REV_CHIP_ID_7850_:
2634 if (!phydev)
2635 return ERR_PTR(-ENODEV);
2636
2637 /* These use internal GMII-connected PHY */
2638 dev->interface = PHY_INTERFACE_MODE_GMII;
2639 phydev->is_internal = true;
2640 return phydev;
2641
2642 default:
2643 netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2644 return ERR_PTR(-ENODEV);
2645 }
2646 }
2647
2648 /**
2649 * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2650 * @dev: LAN78xx device
2651 *
2652 * Configure MAC-side registers according to dev->interface, which should be
2653 * set by lan78xx_get_phy().
2654 *
2655 * - For PHY_INTERFACE_MODE_RGMII:
2656 * Enable MAC-side TXC delay. This mode seems to be used in a special setup
2657 * without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2658 * connected to the KSZ9897 switch, and the link timing is expected to be
2659 * hardwired (e.g. via strapping or board layout). No devicetree support is
2660 * assumed here.
2661 *
2662 * - For PHY_INTERFACE_MODE_RGMII_ID:
2663 * Disable MAC-side delay and rely on the PHY driver to provide delay.
2664 *
2665 * - For GMII, no MAC-specific config is needed.
2666 *
2667 * Return: 0 on success or a negative error code.
2668 */
lan78xx_mac_prepare_for_phy(struct lan78xx_net * dev)2669 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2670 {
2671 int ret;
2672
2673 switch (dev->interface) {
2674 case PHY_INTERFACE_MODE_RGMII:
2675 /* Enable MAC-side TX clock delay */
2676 ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2677 MAC_RGMII_ID_TXC_DELAY_EN_);
2678 if (ret < 0)
2679 return ret;
2680
2681 ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2682 if (ret < 0)
2683 return ret;
2684
2685 ret = lan78xx_update_reg(dev, HW_CFG,
2686 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2687 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2688 if (ret < 0)
2689 return ret;
2690
2691 break;
2692
2693 case PHY_INTERFACE_MODE_RGMII_ID:
2694 /* Disable MAC-side TXC delay, PHY provides it */
2695 ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2696 if (ret < 0)
2697 return ret;
2698
2699 break;
2700
2701 case PHY_INTERFACE_MODE_GMII:
2702 /* No MAC-specific configuration required */
2703 break;
2704
2705 default:
2706 netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2707 dev->interface);
2708 break;
2709 }
2710
2711 return 0;
2712 }
2713
2714 /**
2715 * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2716 * @dev: LAN78xx device
2717 * @phydev: PHY device (must be valid)
2718 *
2719 * Reads "microchip,led-modes" property from the PHY's DT node and enables
2720 * the corresponding number of LEDs by writing to HW_CFG.
2721 *
2722 * This helper preserves the original logic, enabling up to 4 LEDs.
2723 * If the property is not present, this function does nothing.
2724 *
2725 * Return: 0 on success or a negative error code.
2726 */
lan78xx_configure_leds_from_dt(struct lan78xx_net * dev,struct phy_device * phydev)2727 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2728 struct phy_device *phydev)
2729 {
2730 struct device_node *np = phydev->mdio.dev.of_node;
2731 u32 reg;
2732 int len, ret;
2733
2734 if (!np)
2735 return 0;
2736
2737 len = of_property_count_elems_of_size(np, "microchip,led-modes",
2738 sizeof(u32));
2739 if (len < 0)
2740 return 0;
2741
2742 ret = lan78xx_read_reg(dev, HW_CFG, ®);
2743 if (ret < 0)
2744 return ret;
2745
2746 reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2747 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2748
2749 reg |= (len > 0) * HW_CFG_LED0_EN_ |
2750 (len > 1) * HW_CFG_LED1_EN_ |
2751 (len > 2) * HW_CFG_LED2_EN_ |
2752 (len > 3) * HW_CFG_LED3_EN_;
2753
2754 return lan78xx_write_reg(dev, HW_CFG, reg);
2755 }
2756
lan78xx_phylink_setup(struct lan78xx_net * dev)2757 static int lan78xx_phylink_setup(struct lan78xx_net *dev)
2758 {
2759 struct phylink_config *pc = &dev->phylink_config;
2760 struct phylink *phylink;
2761
2762 pc->dev = &dev->net->dev;
2763 pc->type = PHYLINK_NETDEV;
2764 pc->mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | MAC_10 |
2765 MAC_100 | MAC_1000FD;
2766 pc->mac_managed_pm = true;
2767 pc->lpi_capabilities = MAC_100FD | MAC_1000FD;
2768 /*
2769 * Default TX LPI (Low Power Idle) request delay count is set to 50us.
2770 *
2771 * Source: LAN7800 Documentation, DS00001992H, Section 15.1.57, Page 204.
2772 *
2773 * Reasoning:
2774 * According to the application note in the LAN7800 documentation, a
2775 * zero delay may negatively impact the TX data path’s ability to
2776 * support Gigabit operation. A value of 50us is recommended as a
2777 * reasonable default when the part operates at Gigabit speeds,
2778 * balancing stability and power efficiency in EEE mode. This delay can
2779 * be increased based on performance testing, as EEE is designed for
2780 * scenarios with mostly idle links and occasional bursts of full
2781 * bandwidth transmission. The goal is to ensure reliable Gigabit
2782 * performance without overly aggressive power optimization during
2783 * inactive periods.
2784 */
2785 pc->lpi_timer_default = 50;
2786 pc->eee_enabled_default = true;
2787
2788 if (dev->chipid == ID_REV_CHIP_ID_7801_)
2789 phy_interface_set_rgmii(pc->supported_interfaces);
2790 else
2791 __set_bit(PHY_INTERFACE_MODE_GMII, pc->supported_interfaces);
2792
2793 memcpy(dev->phylink_config.lpi_interfaces,
2794 dev->phylink_config.supported_interfaces,
2795 sizeof(dev->phylink_config.lpi_interfaces));
2796
2797 phylink = phylink_create(pc, dev->net->dev.fwnode,
2798 dev->interface, &lan78xx_phylink_mac_ops);
2799 if (IS_ERR(phylink))
2800 return PTR_ERR(phylink);
2801
2802 dev->phylink = phylink;
2803
2804 return 0;
2805 }
2806
lan78xx_phy_uninit(struct lan78xx_net * dev)2807 static void lan78xx_phy_uninit(struct lan78xx_net *dev)
2808 {
2809 if (dev->phylink) {
2810 phylink_disconnect_phy(dev->phylink);
2811 phylink_destroy(dev->phylink);
2812 dev->phylink = NULL;
2813 }
2814 }
2815
lan78xx_phy_init(struct lan78xx_net * dev)2816 static int lan78xx_phy_init(struct lan78xx_net *dev)
2817 {
2818 struct phy_device *phydev;
2819 int ret;
2820
2821 phydev = lan78xx_get_phy(dev);
2822 /* phydev can be NULL if no PHY is found and the chip is LAN7801,
2823 * which will use a fixed link later.
2824 * If an error occurs, return the error code immediately.
2825 */
2826 if (IS_ERR(phydev))
2827 return PTR_ERR(phydev);
2828
2829 ret = lan78xx_phylink_setup(dev);
2830 if (ret < 0)
2831 return ret;
2832
2833 ret = lan78xx_mac_prepare_for_phy(dev);
2834 if (ret < 0)
2835 goto phylink_uninit;
2836
2837 /* If no PHY is found, set up a fixed link. It is very specific to
2838 * the LAN7801 and is used in special cases like EVB-KSZ9897-1 where
2839 * LAN7801 acts as a USB-to-Ethernet interface to a switch without
2840 * a visible PHY.
2841 */
2842 if (!phydev) {
2843 ret = lan78xx_set_fixed_link(dev);
2844 if (ret < 0)
2845 goto phylink_uninit;
2846
2847 /* No PHY found, so set up a fixed link and return early.
2848 * No need to configure PHY IRQ or attach to phylink.
2849 */
2850 return 0;
2851 }
2852
2853 /* if phyirq is not set, use polling mode in phylib */
2854 if (dev->domain_data.phyirq > 0)
2855 phydev->irq = dev->domain_data.phyirq;
2856 else
2857 phydev->irq = PHY_POLL;
2858 netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2859
2860 ret = phylink_connect_phy(dev->phylink, phydev);
2861 if (ret) {
2862 netdev_err(dev->net, "can't attach PHY to %s, error %pe\n",
2863 dev->mdiobus->id, ERR_PTR(ret));
2864 goto phylink_uninit;
2865 }
2866
2867 ret = lan78xx_configure_leds_from_dt(dev, phydev);
2868 if (ret < 0)
2869 goto phylink_uninit;
2870
2871 return 0;
2872
2873 phylink_uninit:
2874 lan78xx_phy_uninit(dev);
2875
2876 return ret;
2877 }
2878
lan78xx_set_rx_max_frame_length(struct lan78xx_net * dev,int size)2879 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2880 {
2881 bool rxenabled;
2882 u32 buf;
2883 int ret;
2884
2885 ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2886 if (ret < 0)
2887 return ret;
2888
2889 rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2890
2891 if (rxenabled) {
2892 buf &= ~MAC_RX_RXEN_;
2893 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2894 if (ret < 0)
2895 return ret;
2896 }
2897
2898 /* add 4 to size for FCS */
2899 buf &= ~MAC_RX_MAX_SIZE_MASK_;
2900 buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2901
2902 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2903 if (ret < 0)
2904 return ret;
2905
2906 if (rxenabled) {
2907 buf |= MAC_RX_RXEN_;
2908 ret = lan78xx_write_reg(dev, MAC_RX, buf);
2909 if (ret < 0)
2910 return ret;
2911 }
2912
2913 return 0;
2914 }
2915
unlink_urbs(struct lan78xx_net * dev,struct sk_buff_head * q)2916 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2917 {
2918 struct sk_buff *skb;
2919 unsigned long flags;
2920 int count = 0;
2921
2922 spin_lock_irqsave(&q->lock, flags);
2923 while (!skb_queue_empty(q)) {
2924 struct skb_data *entry;
2925 struct urb *urb;
2926 int ret;
2927
2928 skb_queue_walk(q, skb) {
2929 entry = (struct skb_data *)skb->cb;
2930 if (entry->state != unlink_start)
2931 goto found;
2932 }
2933 break;
2934 found:
2935 entry->state = unlink_start;
2936 urb = entry->urb;
2937
2938 /* Get reference count of the URB to avoid it to be
2939 * freed during usb_unlink_urb, which may trigger
2940 * use-after-free problem inside usb_unlink_urb since
2941 * usb_unlink_urb is always racing with .complete
2942 * handler(include defer_bh).
2943 */
2944 usb_get_urb(urb);
2945 spin_unlock_irqrestore(&q->lock, flags);
2946 /* during some PM-driven resume scenarios,
2947 * these (async) unlinks complete immediately
2948 */
2949 ret = usb_unlink_urb(urb);
2950 if (ret != -EINPROGRESS && ret != 0)
2951 netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2952 else
2953 count++;
2954 usb_put_urb(urb);
2955 spin_lock_irqsave(&q->lock, flags);
2956 }
2957 spin_unlock_irqrestore(&q->lock, flags);
2958 return count;
2959 }
2960
lan78xx_change_mtu(struct net_device * netdev,int new_mtu)2961 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2962 {
2963 struct lan78xx_net *dev = netdev_priv(netdev);
2964 int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2965 int ret;
2966
2967 /* no second zero-length packet read wanted after mtu-sized packets */
2968 if ((max_frame_len % dev->maxpacket) == 0)
2969 return -EDOM;
2970
2971 ret = usb_autopm_get_interface(dev->intf);
2972 if (ret < 0)
2973 return ret;
2974
2975 ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2976 if (ret < 0)
2977 netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2978 new_mtu, netdev->mtu, ERR_PTR(ret));
2979 else
2980 WRITE_ONCE(netdev->mtu, new_mtu);
2981
2982 usb_autopm_put_interface(dev->intf);
2983
2984 return ret;
2985 }
2986
lan78xx_set_mac_addr(struct net_device * netdev,void * p)2987 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2988 {
2989 struct lan78xx_net *dev = netdev_priv(netdev);
2990 struct sockaddr *addr = p;
2991 u32 addr_lo, addr_hi;
2992 int ret;
2993
2994 if (netif_running(netdev))
2995 return -EBUSY;
2996
2997 if (!is_valid_ether_addr(addr->sa_data))
2998 return -EADDRNOTAVAIL;
2999
3000 eth_hw_addr_set(netdev, addr->sa_data);
3001
3002 addr_lo = netdev->dev_addr[0] |
3003 netdev->dev_addr[1] << 8 |
3004 netdev->dev_addr[2] << 16 |
3005 netdev->dev_addr[3] << 24;
3006 addr_hi = netdev->dev_addr[4] |
3007 netdev->dev_addr[5] << 8;
3008
3009 ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3010 if (ret < 0)
3011 return ret;
3012
3013 ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3014 if (ret < 0)
3015 return ret;
3016
3017 /* Added to support MAC address changes */
3018 ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3019 if (ret < 0)
3020 return ret;
3021
3022 return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3023 }
3024
3025 /* Enable or disable Rx checksum offload engine */
lan78xx_set_features(struct net_device * netdev,netdev_features_t features)3026 static int lan78xx_set_features(struct net_device *netdev,
3027 netdev_features_t features)
3028 {
3029 struct lan78xx_net *dev = netdev_priv(netdev);
3030 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3031 unsigned long flags;
3032
3033 spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3034
3035 if (features & NETIF_F_RXCSUM) {
3036 pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3037 pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3038 } else {
3039 pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3040 pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3041 }
3042
3043 if (features & NETIF_F_HW_VLAN_CTAG_RX)
3044 pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3045 else
3046 pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3047
3048 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3049 pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3050 else
3051 pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3052
3053 spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3054
3055 return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3056 }
3057
lan78xx_deferred_vlan_write(struct work_struct * param)3058 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3059 {
3060 struct lan78xx_priv *pdata =
3061 container_of(param, struct lan78xx_priv, set_vlan);
3062 struct lan78xx_net *dev = pdata->dev;
3063
3064 lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3065 DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3066 }
3067
lan78xx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)3068 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3069 __be16 proto, u16 vid)
3070 {
3071 struct lan78xx_net *dev = netdev_priv(netdev);
3072 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3073 u16 vid_bit_index;
3074 u16 vid_dword_index;
3075
3076 vid_dword_index = (vid >> 5) & 0x7F;
3077 vid_bit_index = vid & 0x1F;
3078
3079 pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3080
3081 /* defer register writes to a sleepable context */
3082 schedule_work(&pdata->set_vlan);
3083
3084 return 0;
3085 }
3086
lan78xx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)3087 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3088 __be16 proto, u16 vid)
3089 {
3090 struct lan78xx_net *dev = netdev_priv(netdev);
3091 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3092 u16 vid_bit_index;
3093 u16 vid_dword_index;
3094
3095 vid_dword_index = (vid >> 5) & 0x7F;
3096 vid_bit_index = vid & 0x1F;
3097
3098 pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3099
3100 /* defer register writes to a sleepable context */
3101 schedule_work(&pdata->set_vlan);
3102
3103 return 0;
3104 }
3105
lan78xx_init_ltm(struct lan78xx_net * dev)3106 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3107 {
3108 u32 regs[6] = { 0 };
3109 int ret;
3110 u32 buf;
3111
3112 ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3113 if (ret < 0)
3114 goto init_ltm_failed;
3115
3116 if (buf & USB_CFG1_LTM_ENABLE_) {
3117 u8 temp[2];
3118 /* Get values from EEPROM first */
3119 if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3120 if (temp[0] == 24) {
3121 ret = lan78xx_read_raw_eeprom(dev,
3122 temp[1] * 2,
3123 24,
3124 (u8 *)regs);
3125 if (ret < 0)
3126 return ret;
3127 }
3128 } else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3129 if (temp[0] == 24) {
3130 ret = lan78xx_read_raw_otp(dev,
3131 temp[1] * 2,
3132 24,
3133 (u8 *)regs);
3134 if (ret < 0)
3135 return ret;
3136 }
3137 }
3138 }
3139
3140 ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3141 if (ret < 0)
3142 goto init_ltm_failed;
3143
3144 ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3145 if (ret < 0)
3146 goto init_ltm_failed;
3147
3148 ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3149 if (ret < 0)
3150 goto init_ltm_failed;
3151
3152 ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3153 if (ret < 0)
3154 goto init_ltm_failed;
3155
3156 ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3157 if (ret < 0)
3158 goto init_ltm_failed;
3159
3160 ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3161 if (ret < 0)
3162 goto init_ltm_failed;
3163
3164 return 0;
3165
3166 init_ltm_failed:
3167 netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3168 return ret;
3169 }
3170
lan78xx_urb_config_init(struct lan78xx_net * dev)3171 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3172 {
3173 int result = 0;
3174
3175 switch (dev->udev->speed) {
3176 case USB_SPEED_SUPER:
3177 dev->rx_urb_size = RX_SS_URB_SIZE;
3178 dev->tx_urb_size = TX_SS_URB_SIZE;
3179 dev->n_rx_urbs = RX_SS_URB_NUM;
3180 dev->n_tx_urbs = TX_SS_URB_NUM;
3181 dev->bulk_in_delay = SS_BULK_IN_DELAY;
3182 dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3183 break;
3184 case USB_SPEED_HIGH:
3185 dev->rx_urb_size = RX_HS_URB_SIZE;
3186 dev->tx_urb_size = TX_HS_URB_SIZE;
3187 dev->n_rx_urbs = RX_HS_URB_NUM;
3188 dev->n_tx_urbs = TX_HS_URB_NUM;
3189 dev->bulk_in_delay = HS_BULK_IN_DELAY;
3190 dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3191 break;
3192 case USB_SPEED_FULL:
3193 dev->rx_urb_size = RX_FS_URB_SIZE;
3194 dev->tx_urb_size = TX_FS_URB_SIZE;
3195 dev->n_rx_urbs = RX_FS_URB_NUM;
3196 dev->n_tx_urbs = TX_FS_URB_NUM;
3197 dev->bulk_in_delay = FS_BULK_IN_DELAY;
3198 dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3199 break;
3200 default:
3201 netdev_warn(dev->net, "USB bus speed not supported\n");
3202 result = -EIO;
3203 break;
3204 }
3205
3206 return result;
3207 }
3208
lan78xx_reset(struct lan78xx_net * dev)3209 static int lan78xx_reset(struct lan78xx_net *dev)
3210 {
3211 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3212 unsigned long timeout;
3213 int ret;
3214 u32 buf;
3215
3216 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3217 if (ret < 0)
3218 return ret;
3219
3220 buf |= HW_CFG_LRST_;
3221
3222 ret = lan78xx_write_reg(dev, HW_CFG, buf);
3223 if (ret < 0)
3224 return ret;
3225
3226 timeout = jiffies + HZ;
3227 do {
3228 mdelay(1);
3229 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3230 if (ret < 0)
3231 return ret;
3232
3233 if (time_after(jiffies, timeout)) {
3234 netdev_warn(dev->net,
3235 "timeout on completion of LiteReset");
3236 ret = -ETIMEDOUT;
3237 return ret;
3238 }
3239 } while (buf & HW_CFG_LRST_);
3240
3241 ret = lan78xx_init_mac_address(dev);
3242 if (ret < 0)
3243 return ret;
3244
3245 /* save DEVID for later usage */
3246 ret = lan78xx_read_reg(dev, ID_REV, &buf);
3247 if (ret < 0)
3248 return ret;
3249
3250 dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3251 dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3252
3253 /* Respond to the IN token with a NAK */
3254 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3255 if (ret < 0)
3256 return ret;
3257
3258 buf |= USB_CFG_BIR_;
3259
3260 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3261 if (ret < 0)
3262 return ret;
3263
3264 /* Init LTM */
3265 ret = lan78xx_init_ltm(dev);
3266 if (ret < 0)
3267 return ret;
3268
3269 ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3270 if (ret < 0)
3271 return ret;
3272
3273 ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3274 if (ret < 0)
3275 return ret;
3276
3277 ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3278 if (ret < 0)
3279 return ret;
3280
3281 buf |= HW_CFG_MEF_;
3282 buf |= HW_CFG_CLK125_EN_;
3283 buf |= HW_CFG_REFCLK25_EN_;
3284
3285 ret = lan78xx_write_reg(dev, HW_CFG, buf);
3286 if (ret < 0)
3287 return ret;
3288
3289 ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3290 if (ret < 0)
3291 return ret;
3292
3293 buf |= USB_CFG_BCE_;
3294
3295 ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3296 if (ret < 0)
3297 return ret;
3298
3299 /* set FIFO sizes */
3300 buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3301
3302 ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3303 if (ret < 0)
3304 return ret;
3305
3306 buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3307
3308 ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3309 if (ret < 0)
3310 return ret;
3311
3312 ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3313 if (ret < 0)
3314 return ret;
3315
3316 ret = lan78xx_write_reg(dev, FLOW, 0);
3317 if (ret < 0)
3318 return ret;
3319
3320 ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3321 if (ret < 0)
3322 return ret;
3323
3324 /* Don't need rfe_ctl_lock during initialisation */
3325 ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3326 if (ret < 0)
3327 return ret;
3328
3329 pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3330
3331 ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3332 if (ret < 0)
3333 return ret;
3334
3335 /* Enable or disable checksum offload engines */
3336 ret = lan78xx_set_features(dev->net, dev->net->features);
3337 if (ret < 0)
3338 return ret;
3339
3340 lan78xx_set_multicast(dev->net);
3341
3342 /* reset PHY */
3343 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3344 if (ret < 0)
3345 return ret;
3346
3347 buf |= PMT_CTL_PHY_RST_;
3348
3349 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3350 if (ret < 0)
3351 return ret;
3352
3353 timeout = jiffies + HZ;
3354 do {
3355 mdelay(1);
3356 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3357 if (ret < 0)
3358 return ret;
3359
3360 if (time_after(jiffies, timeout)) {
3361 netdev_warn(dev->net, "timeout waiting for PHY Reset");
3362 ret = -ETIMEDOUT;
3363 return ret;
3364 }
3365 } while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3366
3367 ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3368 if (ret < 0)
3369 return ret;
3370
3371 buf &= ~(MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_ | MAC_CR_EEE_EN_);
3372
3373 /* LAN7801 only has RGMII mode */
3374 if (dev->chipid == ID_REV_CHIP_ID_7801_)
3375 buf &= ~MAC_CR_GMII_EN_;
3376
3377 ret = lan78xx_write_reg(dev, MAC_CR, buf);
3378 if (ret < 0)
3379 return ret;
3380
3381 ret = lan78xx_set_rx_max_frame_length(dev,
3382 RX_MAX_FRAME_LEN(dev->net->mtu));
3383
3384 return ret;
3385 }
3386
lan78xx_init_stats(struct lan78xx_net * dev)3387 static void lan78xx_init_stats(struct lan78xx_net *dev)
3388 {
3389 u32 *p;
3390 int i;
3391
3392 /* initialize for stats update
3393 * some counters are 20bits and some are 32bits
3394 */
3395 p = (u32 *)&dev->stats.rollover_max;
3396 for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3397 p[i] = 0xFFFFF;
3398
3399 dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3400 dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3401 dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3402 dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3403 dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3404 dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3405 dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3406 dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3407 dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3408 dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3409
3410 set_bit(EVENT_STAT_UPDATE, &dev->flags);
3411 }
3412
lan78xx_open(struct net_device * net)3413 static int lan78xx_open(struct net_device *net)
3414 {
3415 struct lan78xx_net *dev = netdev_priv(net);
3416 int ret;
3417
3418 netif_dbg(dev, ifup, dev->net, "open device");
3419
3420 ret = usb_autopm_get_interface(dev->intf);
3421 if (ret < 0)
3422 return ret;
3423
3424 mutex_lock(&dev->dev_mutex);
3425
3426 lan78xx_init_stats(dev);
3427
3428 napi_enable(&dev->napi);
3429
3430 set_bit(EVENT_DEV_OPEN, &dev->flags);
3431
3432 /* for Link Check */
3433 if (dev->urb_intr) {
3434 ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3435 if (ret < 0) {
3436 netif_err(dev, ifup, dev->net,
3437 "intr submit %d\n", ret);
3438 goto done;
3439 }
3440 }
3441
3442 phylink_start(dev->phylink);
3443
3444 done:
3445 mutex_unlock(&dev->dev_mutex);
3446
3447 if (ret < 0)
3448 usb_autopm_put_interface(dev->intf);
3449
3450 return ret;
3451 }
3452
lan78xx_terminate_urbs(struct lan78xx_net * dev)3453 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3454 {
3455 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3456 DECLARE_WAITQUEUE(wait, current);
3457 int temp;
3458
3459 /* ensure there are no more active urbs */
3460 add_wait_queue(&unlink_wakeup, &wait);
3461 set_current_state(TASK_UNINTERRUPTIBLE);
3462 dev->wait = &unlink_wakeup;
3463 temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3464
3465 /* maybe wait for deletions to finish. */
3466 while (!skb_queue_empty(&dev->rxq) ||
3467 !skb_queue_empty(&dev->txq)) {
3468 schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3469 set_current_state(TASK_UNINTERRUPTIBLE);
3470 netif_dbg(dev, ifdown, dev->net,
3471 "waited for %d urb completions", temp);
3472 }
3473 set_current_state(TASK_RUNNING);
3474 dev->wait = NULL;
3475 remove_wait_queue(&unlink_wakeup, &wait);
3476
3477 /* empty Rx done, Rx overflow and Tx pend queues
3478 */
3479 while (!skb_queue_empty(&dev->rxq_done)) {
3480 struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3481
3482 lan78xx_release_rx_buf(dev, skb);
3483 }
3484
3485 skb_queue_purge(&dev->rxq_overflow);
3486 skb_queue_purge(&dev->txq_pend);
3487 }
3488
lan78xx_stop(struct net_device * net)3489 static int lan78xx_stop(struct net_device *net)
3490 {
3491 struct lan78xx_net *dev = netdev_priv(net);
3492
3493 netif_dbg(dev, ifup, dev->net, "stop device");
3494
3495 mutex_lock(&dev->dev_mutex);
3496
3497 if (timer_pending(&dev->stat_monitor))
3498 timer_delete_sync(&dev->stat_monitor);
3499
3500 clear_bit(EVENT_DEV_OPEN, &dev->flags);
3501 napi_disable(&dev->napi);
3502
3503 lan78xx_terminate_urbs(dev);
3504
3505 netif_info(dev, ifdown, dev->net,
3506 "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3507 net->stats.rx_packets, net->stats.tx_packets,
3508 net->stats.rx_errors, net->stats.tx_errors);
3509
3510 phylink_stop(dev->phylink);
3511
3512 usb_kill_urb(dev->urb_intr);
3513
3514 /* deferred work (task, timer, softirq) must also stop.
3515 * can't flush_scheduled_work() until we drop rtnl (later),
3516 * else workers could deadlock; so make workers a NOP.
3517 */
3518 clear_bit(EVENT_TX_HALT, &dev->flags);
3519 clear_bit(EVENT_RX_HALT, &dev->flags);
3520 clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
3521 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3522
3523 cancel_delayed_work_sync(&dev->wq);
3524
3525 usb_autopm_put_interface(dev->intf);
3526
3527 mutex_unlock(&dev->dev_mutex);
3528
3529 return 0;
3530 }
3531
defer_bh(struct lan78xx_net * dev,struct sk_buff * skb,struct sk_buff_head * list,enum skb_state state)3532 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3533 struct sk_buff_head *list, enum skb_state state)
3534 {
3535 unsigned long flags;
3536 enum skb_state old_state;
3537 struct skb_data *entry = (struct skb_data *)skb->cb;
3538
3539 spin_lock_irqsave(&list->lock, flags);
3540 old_state = entry->state;
3541 entry->state = state;
3542
3543 __skb_unlink(skb, list);
3544 spin_unlock(&list->lock);
3545 spin_lock(&dev->rxq_done.lock);
3546
3547 __skb_queue_tail(&dev->rxq_done, skb);
3548 if (skb_queue_len(&dev->rxq_done) == 1)
3549 napi_schedule(&dev->napi);
3550
3551 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3552
3553 return old_state;
3554 }
3555
tx_complete(struct urb * urb)3556 static void tx_complete(struct urb *urb)
3557 {
3558 struct sk_buff *skb = (struct sk_buff *)urb->context;
3559 struct skb_data *entry = (struct skb_data *)skb->cb;
3560 struct lan78xx_net *dev = entry->dev;
3561
3562 if (urb->status == 0) {
3563 dev->net->stats.tx_packets += entry->num_of_packet;
3564 dev->net->stats.tx_bytes += entry->length;
3565 } else {
3566 dev->net->stats.tx_errors += entry->num_of_packet;
3567
3568 switch (urb->status) {
3569 case -EPIPE:
3570 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3571 break;
3572
3573 /* software-driven interface shutdown */
3574 case -ECONNRESET:
3575 case -ESHUTDOWN:
3576 netif_dbg(dev, tx_err, dev->net,
3577 "tx err interface gone %d\n",
3578 entry->urb->status);
3579 break;
3580
3581 case -EPROTO:
3582 case -ETIME:
3583 case -EILSEQ:
3584 netif_stop_queue(dev->net);
3585 netif_dbg(dev, tx_err, dev->net,
3586 "tx err queue stopped %d\n",
3587 entry->urb->status);
3588 break;
3589 default:
3590 netif_dbg(dev, tx_err, dev->net,
3591 "unknown tx err %d\n",
3592 entry->urb->status);
3593 break;
3594 }
3595 }
3596
3597 usb_autopm_put_interface_async(dev->intf);
3598
3599 skb_unlink(skb, &dev->txq);
3600
3601 lan78xx_release_tx_buf(dev, skb);
3602
3603 /* Re-schedule NAPI if Tx data pending but no URBs in progress.
3604 */
3605 if (skb_queue_empty(&dev->txq) &&
3606 !skb_queue_empty(&dev->txq_pend))
3607 napi_schedule(&dev->napi);
3608 }
3609
lan78xx_queue_skb(struct sk_buff_head * list,struct sk_buff * newsk,enum skb_state state)3610 static void lan78xx_queue_skb(struct sk_buff_head *list,
3611 struct sk_buff *newsk, enum skb_state state)
3612 {
3613 struct skb_data *entry = (struct skb_data *)newsk->cb;
3614
3615 __skb_queue_tail(list, newsk);
3616 entry->state = state;
3617 }
3618
lan78xx_tx_urb_space(struct lan78xx_net * dev)3619 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3620 {
3621 return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3622 }
3623
lan78xx_tx_pend_data_len(struct lan78xx_net * dev)3624 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3625 {
3626 return dev->tx_pend_data_len;
3627 }
3628
lan78xx_tx_pend_skb_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3629 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3630 struct sk_buff *skb,
3631 unsigned int *tx_pend_data_len)
3632 {
3633 unsigned long flags;
3634
3635 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3636
3637 __skb_queue_tail(&dev->txq_pend, skb);
3638
3639 dev->tx_pend_data_len += skb->len;
3640 *tx_pend_data_len = dev->tx_pend_data_len;
3641
3642 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3643 }
3644
lan78xx_tx_pend_skb_head_add(struct lan78xx_net * dev,struct sk_buff * skb,unsigned int * tx_pend_data_len)3645 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3646 struct sk_buff *skb,
3647 unsigned int *tx_pend_data_len)
3648 {
3649 unsigned long flags;
3650
3651 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3652
3653 __skb_queue_head(&dev->txq_pend, skb);
3654
3655 dev->tx_pend_data_len += skb->len;
3656 *tx_pend_data_len = dev->tx_pend_data_len;
3657
3658 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3659 }
3660
lan78xx_tx_pend_skb_get(struct lan78xx_net * dev,struct sk_buff ** skb,unsigned int * tx_pend_data_len)3661 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3662 struct sk_buff **skb,
3663 unsigned int *tx_pend_data_len)
3664 {
3665 unsigned long flags;
3666
3667 spin_lock_irqsave(&dev->txq_pend.lock, flags);
3668
3669 *skb = __skb_dequeue(&dev->txq_pend);
3670 if (*skb)
3671 dev->tx_pend_data_len -= (*skb)->len;
3672 *tx_pend_data_len = dev->tx_pend_data_len;
3673
3674 spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3675 }
3676
3677 static netdev_tx_t
lan78xx_start_xmit(struct sk_buff * skb,struct net_device * net)3678 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3679 {
3680 struct lan78xx_net *dev = netdev_priv(net);
3681 unsigned int tx_pend_data_len;
3682
3683 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3684 schedule_delayed_work(&dev->wq, 0);
3685
3686 skb_tx_timestamp(skb);
3687
3688 lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3689
3690 /* Set up a Tx URB if none is in progress */
3691
3692 if (skb_queue_empty(&dev->txq))
3693 napi_schedule(&dev->napi);
3694
3695 /* Stop stack Tx queue if we have enough data to fill
3696 * all the free Tx URBs.
3697 */
3698 if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3699 netif_stop_queue(net);
3700
3701 netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3702 tx_pend_data_len, lan78xx_tx_urb_space(dev));
3703
3704 /* Kick off transmission of pending data */
3705
3706 if (!skb_queue_empty(&dev->txq_free))
3707 napi_schedule(&dev->napi);
3708 }
3709
3710 return NETDEV_TX_OK;
3711 }
3712
lan78xx_bind(struct lan78xx_net * dev,struct usb_interface * intf)3713 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3714 {
3715 struct lan78xx_priv *pdata = NULL;
3716 int ret;
3717 int i;
3718
3719 dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3720
3721 pdata = (struct lan78xx_priv *)(dev->data[0]);
3722 if (!pdata) {
3723 netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3724 return -ENOMEM;
3725 }
3726
3727 pdata->dev = dev;
3728
3729 spin_lock_init(&pdata->rfe_ctl_lock);
3730 mutex_init(&pdata->dataport_mutex);
3731
3732 INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3733
3734 for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3735 pdata->vlan_table[i] = 0;
3736
3737 INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3738
3739 dev->net->features = 0;
3740
3741 if (DEFAULT_TX_CSUM_ENABLE)
3742 dev->net->features |= NETIF_F_HW_CSUM;
3743
3744 if (DEFAULT_RX_CSUM_ENABLE)
3745 dev->net->features |= NETIF_F_RXCSUM;
3746
3747 if (DEFAULT_TSO_CSUM_ENABLE)
3748 dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3749
3750 if (DEFAULT_VLAN_RX_OFFLOAD)
3751 dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3752
3753 if (DEFAULT_VLAN_FILTER_ENABLE)
3754 dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3755
3756 dev->net->hw_features = dev->net->features;
3757
3758 ret = lan78xx_setup_irq_domain(dev);
3759 if (ret < 0) {
3760 netdev_warn(dev->net,
3761 "lan78xx_setup_irq_domain() failed : %d", ret);
3762 goto out1;
3763 }
3764
3765 /* Init all registers */
3766 ret = lan78xx_reset(dev);
3767 if (ret) {
3768 netdev_warn(dev->net, "Registers INIT FAILED....");
3769 goto out2;
3770 }
3771
3772 ret = lan78xx_mdio_init(dev);
3773 if (ret) {
3774 netdev_warn(dev->net, "MDIO INIT FAILED.....");
3775 goto out2;
3776 }
3777
3778 dev->net->flags |= IFF_MULTICAST;
3779
3780 pdata->wol = WAKE_MAGIC;
3781
3782 return ret;
3783
3784 out2:
3785 lan78xx_remove_irq_domain(dev);
3786
3787 out1:
3788 netdev_warn(dev->net, "Bind routine FAILED");
3789 cancel_work_sync(&pdata->set_multicast);
3790 cancel_work_sync(&pdata->set_vlan);
3791 kfree(pdata);
3792 return ret;
3793 }
3794
lan78xx_unbind(struct lan78xx_net * dev,struct usb_interface * intf)3795 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3796 {
3797 struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3798
3799 lan78xx_remove_irq_domain(dev);
3800
3801 lan78xx_remove_mdio(dev);
3802
3803 if (pdata) {
3804 cancel_work_sync(&pdata->set_multicast);
3805 cancel_work_sync(&pdata->set_vlan);
3806 netif_dbg(dev, ifdown, dev->net, "free pdata");
3807 kfree(pdata);
3808 pdata = NULL;
3809 dev->data[0] = 0;
3810 }
3811 }
3812
lan78xx_rx_csum_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3813 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3814 struct sk_buff *skb,
3815 u32 rx_cmd_a, u32 rx_cmd_b)
3816 {
3817 /* HW Checksum offload appears to be flawed if used when not stripping
3818 * VLAN headers. Drop back to S/W checksums under these conditions.
3819 */
3820 if (!(dev->net->features & NETIF_F_RXCSUM) ||
3821 unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3822 ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3823 !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3824 skb->ip_summed = CHECKSUM_NONE;
3825 } else {
3826 skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3827 skb->ip_summed = CHECKSUM_COMPLETE;
3828 }
3829 }
3830
lan78xx_rx_vlan_offload(struct lan78xx_net * dev,struct sk_buff * skb,u32 rx_cmd_a,u32 rx_cmd_b)3831 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3832 struct sk_buff *skb,
3833 u32 rx_cmd_a, u32 rx_cmd_b)
3834 {
3835 if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3836 (rx_cmd_a & RX_CMD_A_FVTG_))
3837 __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3838 (rx_cmd_b & 0xffff));
3839 }
3840
lan78xx_skb_return(struct lan78xx_net * dev,struct sk_buff * skb)3841 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3842 {
3843 dev->net->stats.rx_packets++;
3844 dev->net->stats.rx_bytes += skb->len;
3845
3846 skb->protocol = eth_type_trans(skb, dev->net);
3847
3848 netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3849 skb->len + sizeof(struct ethhdr), skb->protocol);
3850 memset(skb->cb, 0, sizeof(struct skb_data));
3851
3852 if (skb_defer_rx_timestamp(skb))
3853 return;
3854
3855 napi_gro_receive(&dev->napi, skb);
3856 }
3857
lan78xx_rx(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3858 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3859 int budget, int *work_done)
3860 {
3861 if (skb->len < RX_SKB_MIN_LEN)
3862 return 0;
3863
3864 /* Extract frames from the URB buffer and pass each one to
3865 * the stack in a new NAPI SKB.
3866 */
3867 while (skb->len > 0) {
3868 u32 rx_cmd_a, rx_cmd_b, align_count, size;
3869 u16 rx_cmd_c;
3870 unsigned char *packet;
3871
3872 rx_cmd_a = get_unaligned_le32(skb->data);
3873 skb_pull(skb, sizeof(rx_cmd_a));
3874
3875 rx_cmd_b = get_unaligned_le32(skb->data);
3876 skb_pull(skb, sizeof(rx_cmd_b));
3877
3878 rx_cmd_c = get_unaligned_le16(skb->data);
3879 skb_pull(skb, sizeof(rx_cmd_c));
3880
3881 packet = skb->data;
3882
3883 /* get the packet length */
3884 size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3885 align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3886
3887 if (unlikely(size > skb->len)) {
3888 netif_dbg(dev, rx_err, dev->net,
3889 "size err rx_cmd_a=0x%08x\n",
3890 rx_cmd_a);
3891 return 0;
3892 }
3893
3894 if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3895 netif_dbg(dev, rx_err, dev->net,
3896 "Error rx_cmd_a=0x%08x", rx_cmd_a);
3897 } else {
3898 u32 frame_len;
3899 struct sk_buff *skb2;
3900
3901 if (unlikely(size < ETH_FCS_LEN)) {
3902 netif_dbg(dev, rx_err, dev->net,
3903 "size err rx_cmd_a=0x%08x\n",
3904 rx_cmd_a);
3905 return 0;
3906 }
3907
3908 frame_len = size - ETH_FCS_LEN;
3909
3910 skb2 = napi_alloc_skb(&dev->napi, frame_len);
3911 if (!skb2)
3912 return 0;
3913
3914 memcpy(skb2->data, packet, frame_len);
3915
3916 skb_put(skb2, frame_len);
3917
3918 lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3919 lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3920
3921 /* Processing of the URB buffer must complete once
3922 * it has started. If the NAPI work budget is exhausted
3923 * while frames remain they are added to the overflow
3924 * queue for delivery in the next NAPI polling cycle.
3925 */
3926 if (*work_done < budget) {
3927 lan78xx_skb_return(dev, skb2);
3928 ++(*work_done);
3929 } else {
3930 skb_queue_tail(&dev->rxq_overflow, skb2);
3931 }
3932 }
3933
3934 skb_pull(skb, size);
3935
3936 /* skip padding bytes before the next frame starts */
3937 if (skb->len)
3938 skb_pull(skb, align_count);
3939 }
3940
3941 return 1;
3942 }
3943
rx_process(struct lan78xx_net * dev,struct sk_buff * skb,int budget,int * work_done)3944 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3945 int budget, int *work_done)
3946 {
3947 if (!lan78xx_rx(dev, skb, budget, work_done)) {
3948 netif_dbg(dev, rx_err, dev->net, "drop\n");
3949 dev->net->stats.rx_errors++;
3950 }
3951 }
3952
rx_complete(struct urb * urb)3953 static void rx_complete(struct urb *urb)
3954 {
3955 struct sk_buff *skb = (struct sk_buff *)urb->context;
3956 struct skb_data *entry = (struct skb_data *)skb->cb;
3957 struct lan78xx_net *dev = entry->dev;
3958 int urb_status = urb->status;
3959 enum skb_state state;
3960
3961 netif_dbg(dev, rx_status, dev->net,
3962 "rx done: status %d", urb->status);
3963
3964 skb_put(skb, urb->actual_length);
3965 state = rx_done;
3966
3967 if (urb != entry->urb)
3968 netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
3969
3970 switch (urb_status) {
3971 case 0:
3972 if (skb->len < RX_SKB_MIN_LEN) {
3973 state = rx_cleanup;
3974 dev->net->stats.rx_errors++;
3975 dev->net->stats.rx_length_errors++;
3976 netif_dbg(dev, rx_err, dev->net,
3977 "rx length %d\n", skb->len);
3978 }
3979 usb_mark_last_busy(dev->udev);
3980 break;
3981 case -EPIPE:
3982 dev->net->stats.rx_errors++;
3983 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
3984 fallthrough;
3985 case -ECONNRESET: /* async unlink */
3986 case -ESHUTDOWN: /* hardware gone */
3987 netif_dbg(dev, ifdown, dev->net,
3988 "rx shutdown, code %d\n", urb_status);
3989 state = rx_cleanup;
3990 break;
3991 case -EPROTO:
3992 case -ETIME:
3993 case -EILSEQ:
3994 dev->net->stats.rx_errors++;
3995 state = rx_cleanup;
3996 break;
3997
3998 /* data overrun ... flush fifo? */
3999 case -EOVERFLOW:
4000 dev->net->stats.rx_over_errors++;
4001 fallthrough;
4002
4003 default:
4004 state = rx_cleanup;
4005 dev->net->stats.rx_errors++;
4006 netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4007 break;
4008 }
4009
4010 state = defer_bh(dev, skb, &dev->rxq, state);
4011 }
4012
rx_submit(struct lan78xx_net * dev,struct sk_buff * skb,gfp_t flags)4013 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4014 {
4015 struct skb_data *entry = (struct skb_data *)skb->cb;
4016 size_t size = dev->rx_urb_size;
4017 struct urb *urb = entry->urb;
4018 unsigned long lockflags;
4019 int ret = 0;
4020
4021 usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4022 skb->data, size, rx_complete, skb);
4023
4024 spin_lock_irqsave(&dev->rxq.lock, lockflags);
4025
4026 if (netif_device_present(dev->net) &&
4027 netif_running(dev->net) &&
4028 !test_bit(EVENT_RX_HALT, &dev->flags) &&
4029 !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4030 ret = usb_submit_urb(urb, flags);
4031 switch (ret) {
4032 case 0:
4033 lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4034 break;
4035 case -EPIPE:
4036 lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4037 break;
4038 case -ENODEV:
4039 case -ENOENT:
4040 netif_dbg(dev, ifdown, dev->net, "device gone\n");
4041 netif_device_detach(dev->net);
4042 break;
4043 case -EHOSTUNREACH:
4044 ret = -ENOLINK;
4045 napi_schedule(&dev->napi);
4046 break;
4047 default:
4048 netif_dbg(dev, rx_err, dev->net,
4049 "rx submit, %d\n", ret);
4050 napi_schedule(&dev->napi);
4051 break;
4052 }
4053 } else {
4054 netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4055 ret = -ENOLINK;
4056 }
4057 spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4058
4059 if (ret)
4060 lan78xx_release_rx_buf(dev, skb);
4061
4062 return ret;
4063 }
4064
lan78xx_rx_urb_submit_all(struct lan78xx_net * dev)4065 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4066 {
4067 struct sk_buff *rx_buf;
4068
4069 /* Ensure the maximum number of Rx URBs is submitted
4070 */
4071 while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4072 if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4073 break;
4074 }
4075 }
4076
lan78xx_rx_urb_resubmit(struct lan78xx_net * dev,struct sk_buff * rx_buf)4077 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4078 struct sk_buff *rx_buf)
4079 {
4080 /* reset SKB data pointers */
4081
4082 rx_buf->data = rx_buf->head;
4083 skb_reset_tail_pointer(rx_buf);
4084 rx_buf->len = 0;
4085 rx_buf->data_len = 0;
4086
4087 rx_submit(dev, rx_buf, GFP_ATOMIC);
4088 }
4089
lan78xx_fill_tx_cmd_words(struct sk_buff * skb,u8 * buffer)4090 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4091 {
4092 u32 tx_cmd_a;
4093 u32 tx_cmd_b;
4094
4095 tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4096
4097 if (skb->ip_summed == CHECKSUM_PARTIAL)
4098 tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4099
4100 tx_cmd_b = 0;
4101 if (skb_is_gso(skb)) {
4102 u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4103
4104 tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4105
4106 tx_cmd_a |= TX_CMD_A_LSO_;
4107 }
4108
4109 if (skb_vlan_tag_present(skb)) {
4110 tx_cmd_a |= TX_CMD_A_IVTG_;
4111 tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4112 }
4113
4114 put_unaligned_le32(tx_cmd_a, buffer);
4115 put_unaligned_le32(tx_cmd_b, buffer + 4);
4116 }
4117
lan78xx_tx_buf_fill(struct lan78xx_net * dev,struct sk_buff * tx_buf)4118 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4119 struct sk_buff *tx_buf)
4120 {
4121 struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4122 int remain = dev->tx_urb_size;
4123 u8 *tx_data = tx_buf->data;
4124 u32 urb_len = 0;
4125
4126 entry->num_of_packet = 0;
4127 entry->length = 0;
4128
4129 /* Work through the pending SKBs and copy the data of each SKB into
4130 * the URB buffer if there room for all the SKB data.
4131 *
4132 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4133 */
4134 while (remain >= TX_SKB_MIN_LEN) {
4135 unsigned int pending_bytes;
4136 unsigned int align_bytes;
4137 struct sk_buff *skb;
4138 unsigned int len;
4139
4140 lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4141
4142 if (!skb)
4143 break;
4144
4145 align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4146 TX_ALIGNMENT;
4147 len = align_bytes + TX_CMD_LEN + skb->len;
4148 if (len > remain) {
4149 lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4150 break;
4151 }
4152
4153 tx_data += align_bytes;
4154
4155 lan78xx_fill_tx_cmd_words(skb, tx_data);
4156 tx_data += TX_CMD_LEN;
4157
4158 len = skb->len;
4159 if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4160 struct net_device_stats *stats = &dev->net->stats;
4161
4162 stats->tx_dropped++;
4163 dev_kfree_skb_any(skb);
4164 tx_data -= TX_CMD_LEN;
4165 continue;
4166 }
4167
4168 tx_data += len;
4169 entry->length += len;
4170 entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4171
4172 dev_kfree_skb_any(skb);
4173
4174 urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4175
4176 remain = dev->tx_urb_size - urb_len;
4177 }
4178
4179 skb_put(tx_buf, urb_len);
4180
4181 return entry;
4182 }
4183
lan78xx_tx_bh(struct lan78xx_net * dev)4184 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4185 {
4186 int ret;
4187
4188 /* Start the stack Tx queue if it was stopped
4189 */
4190 netif_tx_lock(dev->net);
4191 if (netif_queue_stopped(dev->net)) {
4192 if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4193 netif_wake_queue(dev->net);
4194 }
4195 netif_tx_unlock(dev->net);
4196
4197 /* Go through the Tx pending queue and set up URBs to transfer
4198 * the data to the device. Stop if no more pending data or URBs,
4199 * or if an error occurs when a URB is submitted.
4200 */
4201 do {
4202 struct skb_data *entry;
4203 struct sk_buff *tx_buf;
4204 unsigned long flags;
4205
4206 if (skb_queue_empty(&dev->txq_pend))
4207 break;
4208
4209 tx_buf = lan78xx_get_tx_buf(dev);
4210 if (!tx_buf)
4211 break;
4212
4213 entry = lan78xx_tx_buf_fill(dev, tx_buf);
4214
4215 spin_lock_irqsave(&dev->txq.lock, flags);
4216 ret = usb_autopm_get_interface_async(dev->intf);
4217 if (ret < 0) {
4218 spin_unlock_irqrestore(&dev->txq.lock, flags);
4219 goto out;
4220 }
4221
4222 usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4223 tx_buf->data, tx_buf->len, tx_complete,
4224 tx_buf);
4225
4226 if (tx_buf->len % dev->maxpacket == 0) {
4227 /* send USB_ZERO_PACKET */
4228 entry->urb->transfer_flags |= URB_ZERO_PACKET;
4229 }
4230
4231 #ifdef CONFIG_PM
4232 /* if device is asleep stop outgoing packet processing */
4233 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4234 usb_anchor_urb(entry->urb, &dev->deferred);
4235 netif_stop_queue(dev->net);
4236 spin_unlock_irqrestore(&dev->txq.lock, flags);
4237 netdev_dbg(dev->net,
4238 "Delaying transmission for resumption\n");
4239 return;
4240 }
4241 #endif
4242 ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4243 switch (ret) {
4244 case 0:
4245 netif_trans_update(dev->net);
4246 lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4247 break;
4248 case -EPIPE:
4249 netif_stop_queue(dev->net);
4250 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4251 usb_autopm_put_interface_async(dev->intf);
4252 break;
4253 case -ENODEV:
4254 case -ENOENT:
4255 netif_dbg(dev, tx_err, dev->net,
4256 "tx submit urb err %d (disconnected?)", ret);
4257 netif_device_detach(dev->net);
4258 break;
4259 default:
4260 usb_autopm_put_interface_async(dev->intf);
4261 netif_dbg(dev, tx_err, dev->net,
4262 "tx submit urb err %d\n", ret);
4263 break;
4264 }
4265
4266 spin_unlock_irqrestore(&dev->txq.lock, flags);
4267
4268 if (ret) {
4269 netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4270 out:
4271 dev->net->stats.tx_dropped += entry->num_of_packet;
4272 lan78xx_release_tx_buf(dev, tx_buf);
4273 }
4274 } while (ret == 0);
4275 }
4276
lan78xx_bh(struct lan78xx_net * dev,int budget)4277 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4278 {
4279 struct sk_buff_head done;
4280 struct sk_buff *rx_buf;
4281 struct skb_data *entry;
4282 unsigned long flags;
4283 int work_done = 0;
4284
4285 /* Pass frames received in the last NAPI cycle before
4286 * working on newly completed URBs.
4287 */
4288 while (!skb_queue_empty(&dev->rxq_overflow)) {
4289 lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4290 ++work_done;
4291 }
4292
4293 /* Take a snapshot of the done queue and move items to a
4294 * temporary queue. Rx URB completions will continue to add
4295 * to the done queue.
4296 */
4297 __skb_queue_head_init(&done);
4298
4299 spin_lock_irqsave(&dev->rxq_done.lock, flags);
4300 skb_queue_splice_init(&dev->rxq_done, &done);
4301 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4302
4303 /* Extract receive frames from completed URBs and
4304 * pass them to the stack. Re-submit each completed URB.
4305 */
4306 while ((work_done < budget) &&
4307 (rx_buf = __skb_dequeue(&done))) {
4308 entry = (struct skb_data *)(rx_buf->cb);
4309 switch (entry->state) {
4310 case rx_done:
4311 rx_process(dev, rx_buf, budget, &work_done);
4312 break;
4313 case rx_cleanup:
4314 break;
4315 default:
4316 netdev_dbg(dev->net, "rx buf state %d\n",
4317 entry->state);
4318 break;
4319 }
4320
4321 lan78xx_rx_urb_resubmit(dev, rx_buf);
4322 }
4323
4324 /* If budget was consumed before processing all the URBs put them
4325 * back on the front of the done queue. They will be first to be
4326 * processed in the next NAPI cycle.
4327 */
4328 spin_lock_irqsave(&dev->rxq_done.lock, flags);
4329 skb_queue_splice(&done, &dev->rxq_done);
4330 spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4331
4332 if (netif_device_present(dev->net) && netif_running(dev->net)) {
4333 /* reset update timer delta */
4334 if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4335 dev->delta = 1;
4336 mod_timer(&dev->stat_monitor,
4337 jiffies + STAT_UPDATE_TIMER);
4338 }
4339
4340 /* Submit all free Rx URBs */
4341
4342 if (!test_bit(EVENT_RX_HALT, &dev->flags))
4343 lan78xx_rx_urb_submit_all(dev);
4344
4345 /* Submit new Tx URBs */
4346
4347 lan78xx_tx_bh(dev);
4348 }
4349
4350 return work_done;
4351 }
4352
lan78xx_poll(struct napi_struct * napi,int budget)4353 static int lan78xx_poll(struct napi_struct *napi, int budget)
4354 {
4355 struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4356 int result = budget;
4357 int work_done;
4358
4359 /* Don't do any work if the device is suspended */
4360
4361 if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4362 napi_complete_done(napi, 0);
4363 return 0;
4364 }
4365
4366 /* Process completed URBs and submit new URBs */
4367
4368 work_done = lan78xx_bh(dev, budget);
4369
4370 if (work_done < budget) {
4371 napi_complete_done(napi, work_done);
4372
4373 /* Start a new polling cycle if data was received or
4374 * data is waiting to be transmitted.
4375 */
4376 if (!skb_queue_empty(&dev->rxq_done)) {
4377 napi_schedule(napi);
4378 } else if (netif_carrier_ok(dev->net)) {
4379 if (skb_queue_empty(&dev->txq) &&
4380 !skb_queue_empty(&dev->txq_pend)) {
4381 napi_schedule(napi);
4382 } else {
4383 netif_tx_lock(dev->net);
4384 if (netif_queue_stopped(dev->net)) {
4385 netif_wake_queue(dev->net);
4386 napi_schedule(napi);
4387 }
4388 netif_tx_unlock(dev->net);
4389 }
4390 }
4391 result = work_done;
4392 }
4393
4394 return result;
4395 }
4396
lan78xx_delayedwork(struct work_struct * work)4397 static void lan78xx_delayedwork(struct work_struct *work)
4398 {
4399 int status;
4400 struct lan78xx_net *dev;
4401
4402 dev = container_of(work, struct lan78xx_net, wq.work);
4403
4404 if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4405 return;
4406
4407 if (usb_autopm_get_interface(dev->intf) < 0)
4408 return;
4409
4410 if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4411 unlink_urbs(dev, &dev->txq);
4412
4413 status = usb_clear_halt(dev->udev, dev->pipe_out);
4414 if (status < 0 &&
4415 status != -EPIPE &&
4416 status != -ESHUTDOWN) {
4417 if (netif_msg_tx_err(dev))
4418 netdev_err(dev->net,
4419 "can't clear tx halt, status %d\n",
4420 status);
4421 } else {
4422 clear_bit(EVENT_TX_HALT, &dev->flags);
4423 if (status != -ESHUTDOWN)
4424 netif_wake_queue(dev->net);
4425 }
4426 }
4427
4428 if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4429 unlink_urbs(dev, &dev->rxq);
4430 status = usb_clear_halt(dev->udev, dev->pipe_in);
4431 if (status < 0 &&
4432 status != -EPIPE &&
4433 status != -ESHUTDOWN) {
4434 if (netif_msg_rx_err(dev))
4435 netdev_err(dev->net,
4436 "can't clear rx halt, status %d\n",
4437 status);
4438 } else {
4439 clear_bit(EVENT_RX_HALT, &dev->flags);
4440 napi_schedule(&dev->napi);
4441 }
4442 }
4443
4444 if (test_bit(EVENT_PHY_INT_ACK, &dev->flags)) {
4445 int ret = 0;
4446
4447 clear_bit(EVENT_PHY_INT_ACK, &dev->flags);
4448 ret = lan78xx_phy_int_ack(dev);
4449 if (ret)
4450 netdev_info(dev->net, "PHY INT ack failed (%pe)\n",
4451 ERR_PTR(ret));
4452 }
4453
4454 if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4455 lan78xx_update_stats(dev);
4456
4457 clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4458
4459 mod_timer(&dev->stat_monitor,
4460 jiffies + (STAT_UPDATE_TIMER * dev->delta));
4461
4462 dev->delta = min((dev->delta * 2), 50);
4463 }
4464
4465 usb_autopm_put_interface(dev->intf);
4466 }
4467
intr_complete(struct urb * urb)4468 static void intr_complete(struct urb *urb)
4469 {
4470 struct lan78xx_net *dev = urb->context;
4471 int status = urb->status;
4472
4473 switch (status) {
4474 /* success */
4475 case 0:
4476 lan78xx_status(dev, urb);
4477 break;
4478
4479 /* software-driven interface shutdown */
4480 case -ENOENT: /* urb killed */
4481 case -ENODEV: /* hardware gone */
4482 case -ESHUTDOWN: /* hardware gone */
4483 netif_dbg(dev, ifdown, dev->net,
4484 "intr shutdown, code %d\n", status);
4485 return;
4486
4487 /* NOTE: not throttling like RX/TX, since this endpoint
4488 * already polls infrequently
4489 */
4490 default:
4491 netdev_dbg(dev->net, "intr status %d\n", status);
4492 break;
4493 }
4494
4495 if (!netif_device_present(dev->net) ||
4496 !netif_running(dev->net)) {
4497 netdev_warn(dev->net, "not submitting new status URB");
4498 return;
4499 }
4500
4501 memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4502 status = usb_submit_urb(urb, GFP_ATOMIC);
4503
4504 switch (status) {
4505 case 0:
4506 break;
4507 case -ENODEV:
4508 case -ENOENT:
4509 netif_dbg(dev, timer, dev->net,
4510 "intr resubmit %d (disconnect?)", status);
4511 netif_device_detach(dev->net);
4512 break;
4513 default:
4514 netif_err(dev, timer, dev->net,
4515 "intr resubmit --> %d\n", status);
4516 break;
4517 }
4518 }
4519
lan78xx_disconnect(struct usb_interface * intf)4520 static void lan78xx_disconnect(struct usb_interface *intf)
4521 {
4522 struct lan78xx_net *dev;
4523 struct usb_device *udev;
4524 struct net_device *net;
4525
4526 dev = usb_get_intfdata(intf);
4527 usb_set_intfdata(intf, NULL);
4528 if (!dev)
4529 return;
4530
4531 udev = interface_to_usbdev(intf);
4532 net = dev->net;
4533
4534 rtnl_lock();
4535 phylink_stop(dev->phylink);
4536 phylink_disconnect_phy(dev->phylink);
4537 rtnl_unlock();
4538
4539 netif_napi_del(&dev->napi);
4540
4541 unregister_netdev(net);
4542
4543 timer_shutdown_sync(&dev->stat_monitor);
4544 set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4545 cancel_delayed_work_sync(&dev->wq);
4546
4547 phylink_destroy(dev->phylink);
4548
4549 usb_scuttle_anchored_urbs(&dev->deferred);
4550
4551 lan78xx_unbind(dev, intf);
4552
4553 lan78xx_free_tx_resources(dev);
4554 lan78xx_free_rx_resources(dev);
4555
4556 usb_kill_urb(dev->urb_intr);
4557 usb_free_urb(dev->urb_intr);
4558
4559 free_netdev(net);
4560 usb_put_dev(udev);
4561 }
4562
lan78xx_tx_timeout(struct net_device * net,unsigned int txqueue)4563 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4564 {
4565 struct lan78xx_net *dev = netdev_priv(net);
4566
4567 unlink_urbs(dev, &dev->txq);
4568 napi_schedule(&dev->napi);
4569 }
4570
lan78xx_features_check(struct sk_buff * skb,struct net_device * netdev,netdev_features_t features)4571 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4572 struct net_device *netdev,
4573 netdev_features_t features)
4574 {
4575 struct lan78xx_net *dev = netdev_priv(netdev);
4576
4577 if (skb->len > LAN78XX_TSO_SIZE(dev))
4578 features &= ~NETIF_F_GSO_MASK;
4579
4580 features = vlan_features_check(skb, features);
4581 features = vxlan_features_check(skb, features);
4582
4583 return features;
4584 }
4585
4586 static const struct net_device_ops lan78xx_netdev_ops = {
4587 .ndo_open = lan78xx_open,
4588 .ndo_stop = lan78xx_stop,
4589 .ndo_start_xmit = lan78xx_start_xmit,
4590 .ndo_tx_timeout = lan78xx_tx_timeout,
4591 .ndo_change_mtu = lan78xx_change_mtu,
4592 .ndo_set_mac_address = lan78xx_set_mac_addr,
4593 .ndo_validate_addr = eth_validate_addr,
4594 .ndo_eth_ioctl = phy_do_ioctl_running,
4595 .ndo_set_rx_mode = lan78xx_set_multicast,
4596 .ndo_set_features = lan78xx_set_features,
4597 .ndo_vlan_rx_add_vid = lan78xx_vlan_rx_add_vid,
4598 .ndo_vlan_rx_kill_vid = lan78xx_vlan_rx_kill_vid,
4599 .ndo_features_check = lan78xx_features_check,
4600 };
4601
lan78xx_stat_monitor(struct timer_list * t)4602 static void lan78xx_stat_monitor(struct timer_list *t)
4603 {
4604 struct lan78xx_net *dev = timer_container_of(dev, t, stat_monitor);
4605
4606 lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4607 }
4608
lan78xx_probe(struct usb_interface * intf,const struct usb_device_id * id)4609 static int lan78xx_probe(struct usb_interface *intf,
4610 const struct usb_device_id *id)
4611 {
4612 struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4613 struct lan78xx_net *dev;
4614 struct net_device *netdev;
4615 struct usb_device *udev;
4616 int ret;
4617 unsigned int maxp;
4618 unsigned int period;
4619 u8 *buf = NULL;
4620
4621 udev = interface_to_usbdev(intf);
4622 udev = usb_get_dev(udev);
4623
4624 netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4625 if (!netdev) {
4626 dev_err(&intf->dev, "Error: OOM\n");
4627 ret = -ENOMEM;
4628 goto out1;
4629 }
4630
4631 SET_NETDEV_DEV(netdev, &intf->dev);
4632
4633 dev = netdev_priv(netdev);
4634 dev->udev = udev;
4635 dev->intf = intf;
4636 dev->net = netdev;
4637 dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4638 | NETIF_MSG_PROBE | NETIF_MSG_LINK);
4639
4640 skb_queue_head_init(&dev->rxq);
4641 skb_queue_head_init(&dev->txq);
4642 skb_queue_head_init(&dev->rxq_done);
4643 skb_queue_head_init(&dev->txq_pend);
4644 skb_queue_head_init(&dev->rxq_overflow);
4645 mutex_init(&dev->mdiobus_mutex);
4646 mutex_init(&dev->dev_mutex);
4647
4648 ret = lan78xx_urb_config_init(dev);
4649 if (ret < 0)
4650 goto out2;
4651
4652 ret = lan78xx_alloc_tx_resources(dev);
4653 if (ret < 0)
4654 goto out2;
4655
4656 ret = lan78xx_alloc_rx_resources(dev);
4657 if (ret < 0)
4658 goto out3;
4659
4660 /* MTU range: 68 - 9000 */
4661 netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4662
4663 netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4664
4665 netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4666
4667 INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4668 init_usb_anchor(&dev->deferred);
4669
4670 netdev->netdev_ops = &lan78xx_netdev_ops;
4671 netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4672 netdev->ethtool_ops = &lan78xx_ethtool_ops;
4673
4674 dev->delta = 1;
4675 timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4676
4677 mutex_init(&dev->stats.access_lock);
4678
4679 if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4680 ret = -ENODEV;
4681 goto out4;
4682 }
4683
4684 dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4685 ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4686 if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4687 ret = -ENODEV;
4688 goto out4;
4689 }
4690
4691 dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4692 ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4693 if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4694 ret = -ENODEV;
4695 goto out4;
4696 }
4697
4698 ep_intr = &intf->cur_altsetting->endpoint[2];
4699 if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4700 ret = -ENODEV;
4701 goto out4;
4702 }
4703
4704 dev->pipe_intr = usb_rcvintpipe(dev->udev,
4705 usb_endpoint_num(&ep_intr->desc));
4706
4707 ret = lan78xx_bind(dev, intf);
4708 if (ret < 0)
4709 goto out4;
4710
4711 period = ep_intr->desc.bInterval;
4712 maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4713
4714 dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4715 if (!dev->urb_intr) {
4716 ret = -ENOMEM;
4717 goto out5;
4718 }
4719
4720 buf = kmalloc(maxp, GFP_KERNEL);
4721 if (!buf) {
4722 ret = -ENOMEM;
4723 goto free_urbs;
4724 }
4725
4726 usb_fill_int_urb(dev->urb_intr, dev->udev,
4727 dev->pipe_intr, buf, maxp,
4728 intr_complete, dev, period);
4729 dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4730
4731 dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4732
4733 /* Reject broken descriptors. */
4734 if (dev->maxpacket == 0) {
4735 ret = -ENODEV;
4736 goto free_urbs;
4737 }
4738
4739 /* driver requires remote-wakeup capability during autosuspend. */
4740 intf->needs_remote_wakeup = 1;
4741
4742 ret = lan78xx_phy_init(dev);
4743 if (ret < 0)
4744 goto free_urbs;
4745
4746 ret = register_netdev(netdev);
4747 if (ret != 0) {
4748 netif_err(dev, probe, netdev, "couldn't register the device\n");
4749 goto phy_uninit;
4750 }
4751
4752 usb_set_intfdata(intf, dev);
4753
4754 ret = device_set_wakeup_enable(&udev->dev, true);
4755
4756 /* Default delay of 2sec has more overhead than advantage.
4757 * Set to 10sec as default.
4758 */
4759 pm_runtime_set_autosuspend_delay(&udev->dev,
4760 DEFAULT_AUTOSUSPEND_DELAY);
4761
4762 return 0;
4763
4764 phy_uninit:
4765 lan78xx_phy_uninit(dev);
4766 free_urbs:
4767 usb_free_urb(dev->urb_intr);
4768 out5:
4769 lan78xx_unbind(dev, intf);
4770 out4:
4771 netif_napi_del(&dev->napi);
4772 lan78xx_free_rx_resources(dev);
4773 out3:
4774 lan78xx_free_tx_resources(dev);
4775 out2:
4776 free_netdev(netdev);
4777 out1:
4778 usb_put_dev(udev);
4779
4780 return ret;
4781 }
4782
lan78xx_wakeframe_crc16(const u8 * buf,int len)4783 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4784 {
4785 const u16 crc16poly = 0x8005;
4786 int i;
4787 u16 bit, crc, msb;
4788 u8 data;
4789
4790 crc = 0xFFFF;
4791 for (i = 0; i < len; i++) {
4792 data = *buf++;
4793 for (bit = 0; bit < 8; bit++) {
4794 msb = crc >> 15;
4795 crc <<= 1;
4796
4797 if (msb ^ (u16)(data & 1)) {
4798 crc ^= crc16poly;
4799 crc |= (u16)0x0001U;
4800 }
4801 data >>= 1;
4802 }
4803 }
4804
4805 return crc;
4806 }
4807
lan78xx_set_auto_suspend(struct lan78xx_net * dev)4808 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4809 {
4810 u32 buf;
4811 int ret;
4812
4813 ret = lan78xx_stop_tx_path(dev);
4814 if (ret < 0)
4815 return ret;
4816
4817 ret = lan78xx_stop_rx_path(dev);
4818 if (ret < 0)
4819 return ret;
4820
4821 /* auto suspend (selective suspend) */
4822
4823 ret = lan78xx_write_reg(dev, WUCSR, 0);
4824 if (ret < 0)
4825 return ret;
4826 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4827 if (ret < 0)
4828 return ret;
4829 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4830 if (ret < 0)
4831 return ret;
4832
4833 /* set goodframe wakeup */
4834
4835 ret = lan78xx_read_reg(dev, WUCSR, &buf);
4836 if (ret < 0)
4837 return ret;
4838
4839 buf |= WUCSR_RFE_WAKE_EN_;
4840 buf |= WUCSR_STORE_WAKE_;
4841
4842 ret = lan78xx_write_reg(dev, WUCSR, buf);
4843 if (ret < 0)
4844 return ret;
4845
4846 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4847 if (ret < 0)
4848 return ret;
4849
4850 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4851 buf |= PMT_CTL_RES_CLR_WKP_STS_;
4852 buf |= PMT_CTL_PHY_WAKE_EN_;
4853 buf |= PMT_CTL_WOL_EN_;
4854 buf &= ~PMT_CTL_SUS_MODE_MASK_;
4855 buf |= PMT_CTL_SUS_MODE_3_;
4856
4857 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4858 if (ret < 0)
4859 return ret;
4860
4861 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4862 if (ret < 0)
4863 return ret;
4864
4865 buf |= PMT_CTL_WUPS_MASK_;
4866
4867 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4868 if (ret < 0)
4869 return ret;
4870
4871 ret = lan78xx_start_rx_path(dev);
4872
4873 return ret;
4874 }
4875
lan78xx_set_suspend(struct lan78xx_net * dev,u32 wol)4876 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4877 {
4878 const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4879 const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4880 const u8 arp_type[2] = { 0x08, 0x06 };
4881 u32 temp_pmt_ctl;
4882 int mask_index;
4883 u32 temp_wucsr;
4884 u32 buf;
4885 u16 crc;
4886 int ret;
4887
4888 ret = lan78xx_stop_tx_path(dev);
4889 if (ret < 0)
4890 return ret;
4891 ret = lan78xx_stop_rx_path(dev);
4892 if (ret < 0)
4893 return ret;
4894
4895 ret = lan78xx_write_reg(dev, WUCSR, 0);
4896 if (ret < 0)
4897 return ret;
4898 ret = lan78xx_write_reg(dev, WUCSR2, 0);
4899 if (ret < 0)
4900 return ret;
4901 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4902 if (ret < 0)
4903 return ret;
4904
4905 temp_wucsr = 0;
4906
4907 temp_pmt_ctl = 0;
4908
4909 ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4910 if (ret < 0)
4911 return ret;
4912
4913 temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4914 temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4915
4916 for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4917 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4918 if (ret < 0)
4919 return ret;
4920 }
4921
4922 mask_index = 0;
4923 if (wol & WAKE_PHY) {
4924 temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4925
4926 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4927 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4928 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4929 }
4930 if (wol & WAKE_MAGIC) {
4931 temp_wucsr |= WUCSR_MPEN_;
4932
4933 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4934 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4935 temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4936 }
4937 if (wol & WAKE_BCAST) {
4938 temp_wucsr |= WUCSR_BCST_EN_;
4939
4940 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4941 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4942 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4943 }
4944 if (wol & WAKE_MCAST) {
4945 temp_wucsr |= WUCSR_WAKE_EN_;
4946
4947 /* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4948 crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4949 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4950 WUF_CFGX_EN_ |
4951 WUF_CFGX_TYPE_MCAST_ |
4952 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4953 (crc & WUF_CFGX_CRC16_MASK_));
4954 if (ret < 0)
4955 return ret;
4956
4957 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
4958 if (ret < 0)
4959 return ret;
4960 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4961 if (ret < 0)
4962 return ret;
4963 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4964 if (ret < 0)
4965 return ret;
4966 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4967 if (ret < 0)
4968 return ret;
4969
4970 mask_index++;
4971
4972 /* for IPv6 Multicast */
4973 crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
4974 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4975 WUF_CFGX_EN_ |
4976 WUF_CFGX_TYPE_MCAST_ |
4977 (0 << WUF_CFGX_OFFSET_SHIFT_) |
4978 (crc & WUF_CFGX_CRC16_MASK_));
4979 if (ret < 0)
4980 return ret;
4981
4982 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
4983 if (ret < 0)
4984 return ret;
4985 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
4986 if (ret < 0)
4987 return ret;
4988 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
4989 if (ret < 0)
4990 return ret;
4991 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
4992 if (ret < 0)
4993 return ret;
4994
4995 mask_index++;
4996
4997 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4998 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4999 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5000 }
5001 if (wol & WAKE_UCAST) {
5002 temp_wucsr |= WUCSR_PFDA_EN_;
5003
5004 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5005 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5006 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5007 }
5008 if (wol & WAKE_ARP) {
5009 temp_wucsr |= WUCSR_WAKE_EN_;
5010
5011 /* set WUF_CFG & WUF_MASK
5012 * for packettype (offset 12,13) = ARP (0x0806)
5013 */
5014 crc = lan78xx_wakeframe_crc16(arp_type, 2);
5015 ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5016 WUF_CFGX_EN_ |
5017 WUF_CFGX_TYPE_ALL_ |
5018 (0 << WUF_CFGX_OFFSET_SHIFT_) |
5019 (crc & WUF_CFGX_CRC16_MASK_));
5020 if (ret < 0)
5021 return ret;
5022
5023 ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5024 if (ret < 0)
5025 return ret;
5026 ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5027 if (ret < 0)
5028 return ret;
5029 ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5030 if (ret < 0)
5031 return ret;
5032 ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5033 if (ret < 0)
5034 return ret;
5035
5036 mask_index++;
5037
5038 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5039 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5040 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5041 }
5042
5043 ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5044 if (ret < 0)
5045 return ret;
5046
5047 /* when multiple WOL bits are set */
5048 if (hweight_long((unsigned long)wol) > 1) {
5049 temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5050 temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5051 temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5052 }
5053 ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5054 if (ret < 0)
5055 return ret;
5056
5057 /* clear WUPS */
5058 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5059 if (ret < 0)
5060 return ret;
5061
5062 buf |= PMT_CTL_WUPS_MASK_;
5063
5064 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5065 if (ret < 0)
5066 return ret;
5067
5068 ret = lan78xx_start_rx_path(dev);
5069
5070 return ret;
5071 }
5072
lan78xx_suspend(struct usb_interface * intf,pm_message_t message)5073 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5074 {
5075 struct lan78xx_net *dev = usb_get_intfdata(intf);
5076 bool dev_open;
5077 int ret;
5078
5079 mutex_lock(&dev->dev_mutex);
5080
5081 netif_dbg(dev, ifdown, dev->net,
5082 "suspending: pm event %#x", message.event);
5083
5084 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5085
5086 if (dev_open) {
5087 spin_lock_irq(&dev->txq.lock);
5088 /* don't autosuspend while transmitting */
5089 if ((skb_queue_len(&dev->txq) ||
5090 skb_queue_len(&dev->txq_pend)) &&
5091 PMSG_IS_AUTO(message)) {
5092 spin_unlock_irq(&dev->txq.lock);
5093 ret = -EBUSY;
5094 goto out;
5095 } else {
5096 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5097 spin_unlock_irq(&dev->txq.lock);
5098 }
5099
5100 rtnl_lock();
5101 phylink_suspend(dev->phylink, false);
5102 rtnl_unlock();
5103
5104 /* stop RX */
5105 ret = lan78xx_stop_rx_path(dev);
5106 if (ret < 0)
5107 goto out;
5108
5109 ret = lan78xx_flush_rx_fifo(dev);
5110 if (ret < 0)
5111 goto out;
5112
5113 /* stop Tx */
5114 ret = lan78xx_stop_tx_path(dev);
5115 if (ret < 0)
5116 goto out;
5117
5118 /* empty out the Rx and Tx queues */
5119 netif_device_detach(dev->net);
5120 lan78xx_terminate_urbs(dev);
5121 usb_kill_urb(dev->urb_intr);
5122
5123 /* reattach */
5124 netif_device_attach(dev->net);
5125
5126 timer_delete(&dev->stat_monitor);
5127
5128 if (PMSG_IS_AUTO(message)) {
5129 ret = lan78xx_set_auto_suspend(dev);
5130 if (ret < 0)
5131 goto out;
5132 } else {
5133 struct lan78xx_priv *pdata;
5134
5135 pdata = (struct lan78xx_priv *)(dev->data[0]);
5136 netif_carrier_off(dev->net);
5137 ret = lan78xx_set_suspend(dev, pdata->wol);
5138 if (ret < 0)
5139 goto out;
5140 }
5141 } else {
5142 /* Interface is down; don't allow WOL and PHY
5143 * events to wake up the host
5144 */
5145 u32 buf;
5146
5147 set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5148
5149 ret = lan78xx_write_reg(dev, WUCSR, 0);
5150 if (ret < 0)
5151 goto out;
5152 ret = lan78xx_write_reg(dev, WUCSR2, 0);
5153 if (ret < 0)
5154 goto out;
5155
5156 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5157 if (ret < 0)
5158 goto out;
5159
5160 buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5161 buf |= PMT_CTL_RES_CLR_WKP_STS_;
5162 buf &= ~PMT_CTL_SUS_MODE_MASK_;
5163 buf |= PMT_CTL_SUS_MODE_3_;
5164
5165 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5166 if (ret < 0)
5167 goto out;
5168
5169 ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5170 if (ret < 0)
5171 goto out;
5172
5173 buf |= PMT_CTL_WUPS_MASK_;
5174
5175 ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5176 if (ret < 0)
5177 goto out;
5178 }
5179
5180 ret = 0;
5181 out:
5182 mutex_unlock(&dev->dev_mutex);
5183
5184 return ret;
5185 }
5186
lan78xx_submit_deferred_urbs(struct lan78xx_net * dev)5187 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5188 {
5189 bool pipe_halted = false;
5190 struct urb *urb;
5191
5192 while ((urb = usb_get_from_anchor(&dev->deferred))) {
5193 struct sk_buff *skb = urb->context;
5194 int ret;
5195
5196 if (!netif_device_present(dev->net) ||
5197 !netif_carrier_ok(dev->net) ||
5198 pipe_halted) {
5199 lan78xx_release_tx_buf(dev, skb);
5200 continue;
5201 }
5202
5203 ret = usb_submit_urb(urb, GFP_ATOMIC);
5204
5205 if (ret == 0) {
5206 netif_trans_update(dev->net);
5207 lan78xx_queue_skb(&dev->txq, skb, tx_start);
5208 } else {
5209 if (ret == -EPIPE) {
5210 netif_stop_queue(dev->net);
5211 pipe_halted = true;
5212 } else if (ret == -ENODEV) {
5213 netif_device_detach(dev->net);
5214 }
5215
5216 lan78xx_release_tx_buf(dev, skb);
5217 }
5218 }
5219
5220 return pipe_halted;
5221 }
5222
lan78xx_resume(struct usb_interface * intf)5223 static int lan78xx_resume(struct usb_interface *intf)
5224 {
5225 struct lan78xx_net *dev = usb_get_intfdata(intf);
5226 bool dev_open;
5227 int ret;
5228
5229 mutex_lock(&dev->dev_mutex);
5230
5231 netif_dbg(dev, ifup, dev->net, "resuming device");
5232
5233 dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5234
5235 if (dev_open) {
5236 bool pipe_halted = false;
5237
5238 ret = lan78xx_flush_tx_fifo(dev);
5239 if (ret < 0)
5240 goto out;
5241
5242 if (dev->urb_intr) {
5243 int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5244
5245 if (ret < 0) {
5246 if (ret == -ENODEV)
5247 netif_device_detach(dev->net);
5248 netdev_warn(dev->net, "Failed to submit intr URB");
5249 }
5250 }
5251
5252 spin_lock_irq(&dev->txq.lock);
5253
5254 if (netif_device_present(dev->net)) {
5255 pipe_halted = lan78xx_submit_deferred_urbs(dev);
5256
5257 if (pipe_halted)
5258 lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5259 }
5260
5261 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5262
5263 spin_unlock_irq(&dev->txq.lock);
5264
5265 if (!pipe_halted &&
5266 netif_device_present(dev->net) &&
5267 (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5268 netif_start_queue(dev->net);
5269
5270 ret = lan78xx_start_tx_path(dev);
5271 if (ret < 0)
5272 goto out;
5273
5274 napi_schedule(&dev->napi);
5275
5276 if (!timer_pending(&dev->stat_monitor)) {
5277 dev->delta = 1;
5278 mod_timer(&dev->stat_monitor,
5279 jiffies + STAT_UPDATE_TIMER);
5280 }
5281
5282 } else {
5283 clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5284 }
5285
5286 ret = lan78xx_write_reg(dev, WUCSR2, 0);
5287 if (ret < 0)
5288 goto out;
5289 ret = lan78xx_write_reg(dev, WUCSR, 0);
5290 if (ret < 0)
5291 goto out;
5292 ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5293 if (ret < 0)
5294 goto out;
5295
5296 ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5297 WUCSR2_ARP_RCD_ |
5298 WUCSR2_IPV6_TCPSYN_RCD_ |
5299 WUCSR2_IPV4_TCPSYN_RCD_);
5300 if (ret < 0)
5301 goto out;
5302
5303 ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5304 WUCSR_EEE_RX_WAKE_ |
5305 WUCSR_PFDA_FR_ |
5306 WUCSR_RFE_WAKE_FR_ |
5307 WUCSR_WUFR_ |
5308 WUCSR_MPR_ |
5309 WUCSR_BCST_FR_);
5310 if (ret < 0)
5311 goto out;
5312
5313 ret = 0;
5314 out:
5315 mutex_unlock(&dev->dev_mutex);
5316
5317 return ret;
5318 }
5319
lan78xx_reset_resume(struct usb_interface * intf)5320 static int lan78xx_reset_resume(struct usb_interface *intf)
5321 {
5322 struct lan78xx_net *dev = usb_get_intfdata(intf);
5323 int ret;
5324
5325 netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5326
5327 ret = lan78xx_reset(dev);
5328 if (ret < 0)
5329 return ret;
5330
5331 ret = lan78xx_resume(intf);
5332 if (ret < 0)
5333 return ret;
5334
5335 rtnl_lock();
5336 phylink_resume(dev->phylink);
5337 rtnl_unlock();
5338
5339 return 0;
5340 }
5341
5342 static const struct usb_device_id products[] = {
5343 {
5344 /* LAN7800 USB Gigabit Ethernet Device */
5345 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5346 },
5347 {
5348 /* LAN7850 USB Gigabit Ethernet Device */
5349 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5350 },
5351 {
5352 /* LAN7801 USB Gigabit Ethernet Device */
5353 USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5354 },
5355 {
5356 /* ATM2-AF USB Gigabit Ethernet Device */
5357 USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5358 },
5359 {},
5360 };
5361 MODULE_DEVICE_TABLE(usb, products);
5362
5363 static struct usb_driver lan78xx_driver = {
5364 .name = DRIVER_NAME,
5365 .id_table = products,
5366 .probe = lan78xx_probe,
5367 .disconnect = lan78xx_disconnect,
5368 .suspend = lan78xx_suspend,
5369 .resume = lan78xx_resume,
5370 .reset_resume = lan78xx_reset_resume,
5371 .supports_autosuspend = 1,
5372 .disable_hub_initiated_lpm = 1,
5373 };
5374
5375 module_usb_driver(lan78xx_driver);
5376
5377 MODULE_AUTHOR(DRIVER_AUTHOR);
5378 MODULE_DESCRIPTION(DRIVER_DESC);
5379 MODULE_LICENSE("GPL");
5380