1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2015 Microchip Technology
4  */
5 #include <linux/module.h>
6 #include <linux/netdevice.h>
7 #include <linux/etherdevice.h>
8 #include <linux/ethtool.h>
9 #include <linux/usb.h>
10 #include <linux/crc32.h>
11 #include <linux/signal.h>
12 #include <linux/slab.h>
13 #include <linux/if_vlan.h>
14 #include <linux/uaccess.h>
15 #include <linux/linkmode.h>
16 #include <linux/list.h>
17 #include <linux/ip.h>
18 #include <linux/ipv6.h>
19 #include <linux/mdio.h>
20 #include <linux/phy.h>
21 #include <net/ip6_checksum.h>
22 #include <net/vxlan.h>
23 #include <linux/interrupt.h>
24 #include <linux/irqdomain.h>
25 #include <linux/irq.h>
26 #include <linux/irqchip/chained_irq.h>
27 #include <linux/microchipphy.h>
28 #include <linux/phy_fixed.h>
29 #include <linux/of_mdio.h>
30 #include <linux/of_net.h>
31 #include "lan78xx.h"
32 
33 #define DRIVER_AUTHOR	"WOOJUNG HUH <woojung.huh@microchip.com>"
34 #define DRIVER_DESC	"LAN78XX USB 3.0 Gigabit Ethernet Devices"
35 #define DRIVER_NAME	"lan78xx"
36 
37 #define TX_TIMEOUT_JIFFIES		(5 * HZ)
38 #define THROTTLE_JIFFIES		(HZ / 8)
39 #define UNLINK_TIMEOUT_MS		3
40 
41 #define RX_MAX_QUEUE_MEMORY		(60 * 1518)
42 
43 #define SS_USB_PKT_SIZE			(1024)
44 #define HS_USB_PKT_SIZE			(512)
45 #define FS_USB_PKT_SIZE			(64)
46 
47 #define MAX_RX_FIFO_SIZE		(12 * 1024)
48 #define MAX_TX_FIFO_SIZE		(12 * 1024)
49 
50 #define FLOW_THRESHOLD(n)		((((n) + 511) / 512) & 0x7F)
51 #define FLOW_CTRL_THRESHOLD(on, off)	((FLOW_THRESHOLD(on)  << 0) | \
52 					 (FLOW_THRESHOLD(off) << 8))
53 
54 /* Flow control turned on when Rx FIFO level rises above this level (bytes) */
55 #define FLOW_ON_SS			9216
56 #define FLOW_ON_HS			8704
57 
58 /* Flow control turned off when Rx FIFO level falls below this level (bytes) */
59 #define FLOW_OFF_SS			4096
60 #define FLOW_OFF_HS			1024
61 
62 #define DEFAULT_BURST_CAP_SIZE		(MAX_TX_FIFO_SIZE)
63 #define DEFAULT_BULK_IN_DELAY		(0x0800)
64 #define MAX_SINGLE_PACKET_SIZE		(9000)
65 #define DEFAULT_TX_CSUM_ENABLE		(true)
66 #define DEFAULT_RX_CSUM_ENABLE		(true)
67 #define DEFAULT_TSO_CSUM_ENABLE		(true)
68 #define DEFAULT_VLAN_FILTER_ENABLE	(true)
69 #define DEFAULT_VLAN_RX_OFFLOAD		(true)
70 #define TX_ALIGNMENT			(4)
71 #define RXW_PADDING			2
72 
73 #define LAN78XX_USB_VENDOR_ID		(0x0424)
74 #define LAN7800_USB_PRODUCT_ID		(0x7800)
75 #define LAN7850_USB_PRODUCT_ID		(0x7850)
76 #define LAN7801_USB_PRODUCT_ID		(0x7801)
77 #define LAN78XX_EEPROM_MAGIC		(0x78A5)
78 #define LAN78XX_OTP_MAGIC		(0x78F3)
79 #define AT29M2AF_USB_VENDOR_ID		(0x07C9)
80 #define AT29M2AF_USB_PRODUCT_ID	(0x0012)
81 
82 #define	MII_READ			1
83 #define	MII_WRITE			0
84 
85 #define EEPROM_INDICATOR		(0xA5)
86 #define EEPROM_MAC_OFFSET		(0x01)
87 #define MAX_EEPROM_SIZE			512
88 #define OTP_INDICATOR_1			(0xF3)
89 #define OTP_INDICATOR_2			(0xF7)
90 
91 #define WAKE_ALL			(WAKE_PHY | WAKE_UCAST | \
92 					 WAKE_MCAST | WAKE_BCAST | \
93 					 WAKE_ARP | WAKE_MAGIC)
94 
95 #define TX_URB_NUM			10
96 #define TX_SS_URB_NUM			TX_URB_NUM
97 #define TX_HS_URB_NUM			TX_URB_NUM
98 #define TX_FS_URB_NUM			TX_URB_NUM
99 
100 /* A single URB buffer must be large enough to hold a complete jumbo packet
101  */
102 #define TX_SS_URB_SIZE			(32 * 1024)
103 #define TX_HS_URB_SIZE			(16 * 1024)
104 #define TX_FS_URB_SIZE			(10 * 1024)
105 
106 #define RX_SS_URB_NUM			30
107 #define RX_HS_URB_NUM			10
108 #define RX_FS_URB_NUM			10
109 #define RX_SS_URB_SIZE			TX_SS_URB_SIZE
110 #define RX_HS_URB_SIZE			TX_HS_URB_SIZE
111 #define RX_FS_URB_SIZE			TX_FS_URB_SIZE
112 
113 #define SS_BURST_CAP_SIZE		RX_SS_URB_SIZE
114 #define SS_BULK_IN_DELAY		0x2000
115 #define HS_BURST_CAP_SIZE		RX_HS_URB_SIZE
116 #define HS_BULK_IN_DELAY		0x2000
117 #define FS_BURST_CAP_SIZE		RX_FS_URB_SIZE
118 #define FS_BULK_IN_DELAY		0x2000
119 
120 #define TX_CMD_LEN			8
121 #define TX_SKB_MIN_LEN			(TX_CMD_LEN + ETH_HLEN)
122 #define LAN78XX_TSO_SIZE(dev)		((dev)->tx_urb_size - TX_SKB_MIN_LEN)
123 
124 #define RX_CMD_LEN			10
125 #define RX_SKB_MIN_LEN			(RX_CMD_LEN + ETH_HLEN)
126 #define RX_MAX_FRAME_LEN(mtu)		((mtu) + ETH_HLEN + VLAN_HLEN)
127 
128 /* USB related defines */
129 #define BULK_IN_PIPE			1
130 #define BULK_OUT_PIPE			2
131 
132 /* default autosuspend delay (mSec)*/
133 #define DEFAULT_AUTOSUSPEND_DELAY	(10 * 1000)
134 
135 /* statistic update interval (mSec) */
136 #define STAT_UPDATE_TIMER		(1 * 1000)
137 
138 /* time to wait for MAC or FCT to stop (jiffies) */
139 #define HW_DISABLE_TIMEOUT		(HZ / 10)
140 
141 /* time to wait between polling MAC or FCT state (ms) */
142 #define HW_DISABLE_DELAY_MS		1
143 
144 /* defines interrupts from interrupt EP */
145 #define MAX_INT_EP			(32)
146 #define INT_EP_INTEP			(31)
147 #define INT_EP_OTP_WR_DONE		(28)
148 #define INT_EP_EEE_TX_LPI_START		(26)
149 #define INT_EP_EEE_TX_LPI_STOP		(25)
150 #define INT_EP_EEE_RX_LPI		(24)
151 #define INT_EP_MAC_RESET_TIMEOUT	(23)
152 #define INT_EP_RDFO			(22)
153 #define INT_EP_TXE			(21)
154 #define INT_EP_USB_STATUS		(20)
155 #define INT_EP_TX_DIS			(19)
156 #define INT_EP_RX_DIS			(18)
157 #define INT_EP_PHY			(17)
158 #define INT_EP_DP			(16)
159 #define INT_EP_MAC_ERR			(15)
160 #define INT_EP_TDFU			(14)
161 #define INT_EP_TDFO			(13)
162 #define INT_EP_UTX			(12)
163 #define INT_EP_GPIO_11			(11)
164 #define INT_EP_GPIO_10			(10)
165 #define INT_EP_GPIO_9			(9)
166 #define INT_EP_GPIO_8			(8)
167 #define INT_EP_GPIO_7			(7)
168 #define INT_EP_GPIO_6			(6)
169 #define INT_EP_GPIO_5			(5)
170 #define INT_EP_GPIO_4			(4)
171 #define INT_EP_GPIO_3			(3)
172 #define INT_EP_GPIO_2			(2)
173 #define INT_EP_GPIO_1			(1)
174 #define INT_EP_GPIO_0			(0)
175 
176 static const char lan78xx_gstrings[][ETH_GSTRING_LEN] = {
177 	"RX FCS Errors",
178 	"RX Alignment Errors",
179 	"Rx Fragment Errors",
180 	"RX Jabber Errors",
181 	"RX Undersize Frame Errors",
182 	"RX Oversize Frame Errors",
183 	"RX Dropped Frames",
184 	"RX Unicast Byte Count",
185 	"RX Broadcast Byte Count",
186 	"RX Multicast Byte Count",
187 	"RX Unicast Frames",
188 	"RX Broadcast Frames",
189 	"RX Multicast Frames",
190 	"RX Pause Frames",
191 	"RX 64 Byte Frames",
192 	"RX 65 - 127 Byte Frames",
193 	"RX 128 - 255 Byte Frames",
194 	"RX 256 - 511 Bytes Frames",
195 	"RX 512 - 1023 Byte Frames",
196 	"RX 1024 - 1518 Byte Frames",
197 	"RX Greater 1518 Byte Frames",
198 	"EEE RX LPI Transitions",
199 	"EEE RX LPI Time",
200 	"TX FCS Errors",
201 	"TX Excess Deferral Errors",
202 	"TX Carrier Errors",
203 	"TX Bad Byte Count",
204 	"TX Single Collisions",
205 	"TX Multiple Collisions",
206 	"TX Excessive Collision",
207 	"TX Late Collisions",
208 	"TX Unicast Byte Count",
209 	"TX Broadcast Byte Count",
210 	"TX Multicast Byte Count",
211 	"TX Unicast Frames",
212 	"TX Broadcast Frames",
213 	"TX Multicast Frames",
214 	"TX Pause Frames",
215 	"TX 64 Byte Frames",
216 	"TX 65 - 127 Byte Frames",
217 	"TX 128 - 255 Byte Frames",
218 	"TX 256 - 511 Bytes Frames",
219 	"TX 512 - 1023 Byte Frames",
220 	"TX 1024 - 1518 Byte Frames",
221 	"TX Greater 1518 Byte Frames",
222 	"EEE TX LPI Transitions",
223 	"EEE TX LPI Time",
224 };
225 
226 struct lan78xx_statstage {
227 	u32 rx_fcs_errors;
228 	u32 rx_alignment_errors;
229 	u32 rx_fragment_errors;
230 	u32 rx_jabber_errors;
231 	u32 rx_undersize_frame_errors;
232 	u32 rx_oversize_frame_errors;
233 	u32 rx_dropped_frames;
234 	u32 rx_unicast_byte_count;
235 	u32 rx_broadcast_byte_count;
236 	u32 rx_multicast_byte_count;
237 	u32 rx_unicast_frames;
238 	u32 rx_broadcast_frames;
239 	u32 rx_multicast_frames;
240 	u32 rx_pause_frames;
241 	u32 rx_64_byte_frames;
242 	u32 rx_65_127_byte_frames;
243 	u32 rx_128_255_byte_frames;
244 	u32 rx_256_511_bytes_frames;
245 	u32 rx_512_1023_byte_frames;
246 	u32 rx_1024_1518_byte_frames;
247 	u32 rx_greater_1518_byte_frames;
248 	u32 eee_rx_lpi_transitions;
249 	u32 eee_rx_lpi_time;
250 	u32 tx_fcs_errors;
251 	u32 tx_excess_deferral_errors;
252 	u32 tx_carrier_errors;
253 	u32 tx_bad_byte_count;
254 	u32 tx_single_collisions;
255 	u32 tx_multiple_collisions;
256 	u32 tx_excessive_collision;
257 	u32 tx_late_collisions;
258 	u32 tx_unicast_byte_count;
259 	u32 tx_broadcast_byte_count;
260 	u32 tx_multicast_byte_count;
261 	u32 tx_unicast_frames;
262 	u32 tx_broadcast_frames;
263 	u32 tx_multicast_frames;
264 	u32 tx_pause_frames;
265 	u32 tx_64_byte_frames;
266 	u32 tx_65_127_byte_frames;
267 	u32 tx_128_255_byte_frames;
268 	u32 tx_256_511_bytes_frames;
269 	u32 tx_512_1023_byte_frames;
270 	u32 tx_1024_1518_byte_frames;
271 	u32 tx_greater_1518_byte_frames;
272 	u32 eee_tx_lpi_transitions;
273 	u32 eee_tx_lpi_time;
274 };
275 
276 struct lan78xx_statstage64 {
277 	u64 rx_fcs_errors;
278 	u64 rx_alignment_errors;
279 	u64 rx_fragment_errors;
280 	u64 rx_jabber_errors;
281 	u64 rx_undersize_frame_errors;
282 	u64 rx_oversize_frame_errors;
283 	u64 rx_dropped_frames;
284 	u64 rx_unicast_byte_count;
285 	u64 rx_broadcast_byte_count;
286 	u64 rx_multicast_byte_count;
287 	u64 rx_unicast_frames;
288 	u64 rx_broadcast_frames;
289 	u64 rx_multicast_frames;
290 	u64 rx_pause_frames;
291 	u64 rx_64_byte_frames;
292 	u64 rx_65_127_byte_frames;
293 	u64 rx_128_255_byte_frames;
294 	u64 rx_256_511_bytes_frames;
295 	u64 rx_512_1023_byte_frames;
296 	u64 rx_1024_1518_byte_frames;
297 	u64 rx_greater_1518_byte_frames;
298 	u64 eee_rx_lpi_transitions;
299 	u64 eee_rx_lpi_time;
300 	u64 tx_fcs_errors;
301 	u64 tx_excess_deferral_errors;
302 	u64 tx_carrier_errors;
303 	u64 tx_bad_byte_count;
304 	u64 tx_single_collisions;
305 	u64 tx_multiple_collisions;
306 	u64 tx_excessive_collision;
307 	u64 tx_late_collisions;
308 	u64 tx_unicast_byte_count;
309 	u64 tx_broadcast_byte_count;
310 	u64 tx_multicast_byte_count;
311 	u64 tx_unicast_frames;
312 	u64 tx_broadcast_frames;
313 	u64 tx_multicast_frames;
314 	u64 tx_pause_frames;
315 	u64 tx_64_byte_frames;
316 	u64 tx_65_127_byte_frames;
317 	u64 tx_128_255_byte_frames;
318 	u64 tx_256_511_bytes_frames;
319 	u64 tx_512_1023_byte_frames;
320 	u64 tx_1024_1518_byte_frames;
321 	u64 tx_greater_1518_byte_frames;
322 	u64 eee_tx_lpi_transitions;
323 	u64 eee_tx_lpi_time;
324 };
325 
326 static u32 lan78xx_regs[] = {
327 	ID_REV,
328 	INT_STS,
329 	HW_CFG,
330 	PMT_CTL,
331 	E2P_CMD,
332 	E2P_DATA,
333 	USB_STATUS,
334 	VLAN_TYPE,
335 	MAC_CR,
336 	MAC_RX,
337 	MAC_TX,
338 	FLOW,
339 	ERR_STS,
340 	MII_ACC,
341 	MII_DATA,
342 	EEE_TX_LPI_REQ_DLY,
343 	EEE_TW_TX_SYS,
344 	EEE_TX_LPI_REM_DLY,
345 	WUCSR
346 };
347 
348 #define PHY_REG_SIZE (32 * sizeof(u32))
349 
350 struct lan78xx_net;
351 
352 struct lan78xx_priv {
353 	struct lan78xx_net *dev;
354 	u32 rfe_ctl;
355 	u32 mchash_table[DP_SEL_VHF_HASH_LEN]; /* multicast hash table */
356 	u32 pfilter_table[NUM_OF_MAF][2]; /* perfect filter table */
357 	u32 vlan_table[DP_SEL_VHF_VLAN_LEN];
358 	struct mutex dataport_mutex; /* for dataport access */
359 	spinlock_t rfe_ctl_lock; /* for rfe register access */
360 	struct work_struct set_multicast;
361 	struct work_struct set_vlan;
362 	u32 wol;
363 };
364 
365 enum skb_state {
366 	illegal = 0,
367 	tx_start,
368 	tx_done,
369 	rx_start,
370 	rx_done,
371 	rx_cleanup,
372 	unlink_start
373 };
374 
375 struct skb_data {		/* skb->cb is one of these */
376 	struct urb *urb;
377 	struct lan78xx_net *dev;
378 	enum skb_state state;
379 	size_t length;
380 	int num_of_packet;
381 };
382 
383 #define EVENT_TX_HALT			0
384 #define EVENT_RX_HALT			1
385 #define EVENT_RX_MEMORY			2
386 #define EVENT_STS_SPLIT			3
387 #define EVENT_LINK_RESET		4
388 #define EVENT_RX_PAUSED			5
389 #define EVENT_DEV_WAKING		6
390 #define EVENT_DEV_ASLEEP		7
391 #define EVENT_DEV_OPEN			8
392 #define EVENT_STAT_UPDATE		9
393 #define EVENT_DEV_DISCONNECT		10
394 
395 struct statstage {
396 	struct mutex			access_lock;	/* for stats access */
397 	struct lan78xx_statstage	saved;
398 	struct lan78xx_statstage	rollover_count;
399 	struct lan78xx_statstage	rollover_max;
400 	struct lan78xx_statstage64	curr_stat;
401 };
402 
403 struct irq_domain_data {
404 	struct irq_domain	*irqdomain;
405 	unsigned int		phyirq;
406 	struct irq_chip		*irqchip;
407 	irq_flow_handler_t	irq_handler;
408 	u32			irqenable;
409 	struct mutex		irq_lock;		/* for irq bus access */
410 };
411 
412 struct lan78xx_net {
413 	struct net_device	*net;
414 	struct usb_device	*udev;
415 	struct usb_interface	*intf;
416 	void			*driver_priv;
417 
418 	unsigned int		tx_pend_data_len;
419 	size_t			n_tx_urbs;
420 	size_t			n_rx_urbs;
421 	size_t			tx_urb_size;
422 	size_t			rx_urb_size;
423 
424 	struct sk_buff_head	rxq_free;
425 	struct sk_buff_head	rxq;
426 	struct sk_buff_head	rxq_done;
427 	struct sk_buff_head	rxq_overflow;
428 	struct sk_buff_head	txq_free;
429 	struct sk_buff_head	txq;
430 	struct sk_buff_head	txq_pend;
431 
432 	struct napi_struct	napi;
433 
434 	struct delayed_work	wq;
435 
436 	int			msg_enable;
437 
438 	struct urb		*urb_intr;
439 	struct usb_anchor	deferred;
440 
441 	struct mutex		dev_mutex; /* serialise open/stop wrt suspend/resume */
442 	struct mutex		mdiobus_mutex; /* for MDIO bus access */
443 	unsigned int		pipe_in, pipe_out, pipe_intr;
444 
445 	unsigned int		bulk_in_delay;
446 	unsigned int		burst_cap;
447 
448 	unsigned long		flags;
449 
450 	wait_queue_head_t	*wait;
451 	unsigned char		suspend_count;
452 
453 	unsigned int		maxpacket;
454 	struct timer_list	stat_monitor;
455 
456 	unsigned long		data[5];
457 
458 	int			link_on;
459 	u8			mdix_ctrl;
460 
461 	u32			chipid;
462 	u32			chiprev;
463 	struct mii_bus		*mdiobus;
464 	phy_interface_t		interface;
465 
466 	int			fc_autoneg;
467 	u8			fc_request_control;
468 
469 	int			delta;
470 	struct statstage	stats;
471 
472 	struct irq_domain_data	domain_data;
473 };
474 
475 /* use ethtool to change the level for any given device */
476 static int msg_level = -1;
477 module_param(msg_level, int, 0);
478 MODULE_PARM_DESC(msg_level, "Override default message level");
479 
480 static struct sk_buff *lan78xx_get_buf(struct sk_buff_head *buf_pool)
481 {
482 	if (skb_queue_empty(buf_pool))
483 		return NULL;
484 
485 	return skb_dequeue(buf_pool);
486 }
487 
488 static void lan78xx_release_buf(struct sk_buff_head *buf_pool,
489 				struct sk_buff *buf)
490 {
491 	buf->data = buf->head;
492 	skb_reset_tail_pointer(buf);
493 
494 	buf->len = 0;
495 	buf->data_len = 0;
496 
497 	skb_queue_tail(buf_pool, buf);
498 }
499 
500 static void lan78xx_free_buf_pool(struct sk_buff_head *buf_pool)
501 {
502 	struct skb_data *entry;
503 	struct sk_buff *buf;
504 
505 	while (!skb_queue_empty(buf_pool)) {
506 		buf = skb_dequeue(buf_pool);
507 		if (buf) {
508 			entry = (struct skb_data *)buf->cb;
509 			usb_free_urb(entry->urb);
510 			dev_kfree_skb_any(buf);
511 		}
512 	}
513 }
514 
515 static int lan78xx_alloc_buf_pool(struct sk_buff_head *buf_pool,
516 				  size_t n_urbs, size_t urb_size,
517 				  struct lan78xx_net *dev)
518 {
519 	struct skb_data *entry;
520 	struct sk_buff *buf;
521 	struct urb *urb;
522 	int i;
523 
524 	skb_queue_head_init(buf_pool);
525 
526 	for (i = 0; i < n_urbs; i++) {
527 		buf = alloc_skb(urb_size, GFP_ATOMIC);
528 		if (!buf)
529 			goto error;
530 
531 		if (skb_linearize(buf) != 0) {
532 			dev_kfree_skb_any(buf);
533 			goto error;
534 		}
535 
536 		urb = usb_alloc_urb(0, GFP_ATOMIC);
537 		if (!urb) {
538 			dev_kfree_skb_any(buf);
539 			goto error;
540 		}
541 
542 		entry = (struct skb_data *)buf->cb;
543 		entry->urb = urb;
544 		entry->dev = dev;
545 		entry->length = 0;
546 		entry->num_of_packet = 0;
547 
548 		skb_queue_tail(buf_pool, buf);
549 	}
550 
551 	return 0;
552 
553 error:
554 	lan78xx_free_buf_pool(buf_pool);
555 
556 	return -ENOMEM;
557 }
558 
559 static struct sk_buff *lan78xx_get_rx_buf(struct lan78xx_net *dev)
560 {
561 	return lan78xx_get_buf(&dev->rxq_free);
562 }
563 
564 static void lan78xx_release_rx_buf(struct lan78xx_net *dev,
565 				   struct sk_buff *rx_buf)
566 {
567 	lan78xx_release_buf(&dev->rxq_free, rx_buf);
568 }
569 
570 static void lan78xx_free_rx_resources(struct lan78xx_net *dev)
571 {
572 	lan78xx_free_buf_pool(&dev->rxq_free);
573 }
574 
575 static int lan78xx_alloc_rx_resources(struct lan78xx_net *dev)
576 {
577 	return lan78xx_alloc_buf_pool(&dev->rxq_free,
578 				      dev->n_rx_urbs, dev->rx_urb_size, dev);
579 }
580 
581 static struct sk_buff *lan78xx_get_tx_buf(struct lan78xx_net *dev)
582 {
583 	return lan78xx_get_buf(&dev->txq_free);
584 }
585 
586 static void lan78xx_release_tx_buf(struct lan78xx_net *dev,
587 				   struct sk_buff *tx_buf)
588 {
589 	lan78xx_release_buf(&dev->txq_free, tx_buf);
590 }
591 
592 static void lan78xx_free_tx_resources(struct lan78xx_net *dev)
593 {
594 	lan78xx_free_buf_pool(&dev->txq_free);
595 }
596 
597 static int lan78xx_alloc_tx_resources(struct lan78xx_net *dev)
598 {
599 	return lan78xx_alloc_buf_pool(&dev->txq_free,
600 				      dev->n_tx_urbs, dev->tx_urb_size, dev);
601 }
602 
603 static int lan78xx_read_reg(struct lan78xx_net *dev, u32 index, u32 *data)
604 {
605 	u32 *buf;
606 	int ret;
607 
608 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
609 		return -ENODEV;
610 
611 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
612 	if (!buf)
613 		return -ENOMEM;
614 
615 	ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
616 			      USB_VENDOR_REQUEST_READ_REGISTER,
617 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
618 			      0, index, buf, 4, USB_CTRL_GET_TIMEOUT);
619 	if (likely(ret >= 0)) {
620 		le32_to_cpus(buf);
621 		*data = *buf;
622 	} else if (net_ratelimit()) {
623 		netdev_warn(dev->net,
624 			    "Failed to read register index 0x%08x. ret = %pe",
625 			    index, ERR_PTR(ret));
626 	}
627 
628 	kfree(buf);
629 
630 	return ret < 0 ? ret : 0;
631 }
632 
633 static int lan78xx_write_reg(struct lan78xx_net *dev, u32 index, u32 data)
634 {
635 	u32 *buf;
636 	int ret;
637 
638 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
639 		return -ENODEV;
640 
641 	buf = kmalloc(sizeof(u32), GFP_KERNEL);
642 	if (!buf)
643 		return -ENOMEM;
644 
645 	*buf = data;
646 	cpu_to_le32s(buf);
647 
648 	ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
649 			      USB_VENDOR_REQUEST_WRITE_REGISTER,
650 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
651 			      0, index, buf, 4, USB_CTRL_SET_TIMEOUT);
652 	if (unlikely(ret < 0) &&
653 	    net_ratelimit()) {
654 		netdev_warn(dev->net,
655 			    "Failed to write register index 0x%08x. ret = %pe",
656 			    index, ERR_PTR(ret));
657 	}
658 
659 	kfree(buf);
660 
661 	return ret < 0 ? ret : 0;
662 }
663 
664 static int lan78xx_update_reg(struct lan78xx_net *dev, u32 reg, u32 mask,
665 			      u32 data)
666 {
667 	int ret;
668 	u32 buf;
669 
670 	ret = lan78xx_read_reg(dev, reg, &buf);
671 	if (ret < 0)
672 		return ret;
673 
674 	buf &= ~mask;
675 	buf |= (mask & data);
676 
677 	return lan78xx_write_reg(dev, reg, buf);
678 }
679 
680 static int lan78xx_read_stats(struct lan78xx_net *dev,
681 			      struct lan78xx_statstage *data)
682 {
683 	int ret = 0;
684 	int i;
685 	struct lan78xx_statstage *stats;
686 	u32 *src;
687 	u32 *dst;
688 
689 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
690 	if (!stats)
691 		return -ENOMEM;
692 
693 	ret = usb_control_msg(dev->udev,
694 			      usb_rcvctrlpipe(dev->udev, 0),
695 			      USB_VENDOR_REQUEST_GET_STATS,
696 			      USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
697 			      0,
698 			      0,
699 			      (void *)stats,
700 			      sizeof(*stats),
701 			      USB_CTRL_SET_TIMEOUT);
702 	if (likely(ret >= 0)) {
703 		src = (u32 *)stats;
704 		dst = (u32 *)data;
705 		for (i = 0; i < sizeof(*stats) / sizeof(u32); i++) {
706 			le32_to_cpus(&src[i]);
707 			dst[i] = src[i];
708 		}
709 	} else {
710 		netdev_warn(dev->net,
711 			    "Failed to read stat ret = %d", ret);
712 	}
713 
714 	kfree(stats);
715 
716 	return ret;
717 }
718 
719 #define check_counter_rollover(struct1, dev_stats, member)		\
720 	do {								\
721 		if ((struct1)->member < (dev_stats).saved.member)	\
722 			(dev_stats).rollover_count.member++;		\
723 	} while (0)
724 
725 static void lan78xx_check_stat_rollover(struct lan78xx_net *dev,
726 					struct lan78xx_statstage *stats)
727 {
728 	check_counter_rollover(stats, dev->stats, rx_fcs_errors);
729 	check_counter_rollover(stats, dev->stats, rx_alignment_errors);
730 	check_counter_rollover(stats, dev->stats, rx_fragment_errors);
731 	check_counter_rollover(stats, dev->stats, rx_jabber_errors);
732 	check_counter_rollover(stats, dev->stats, rx_undersize_frame_errors);
733 	check_counter_rollover(stats, dev->stats, rx_oversize_frame_errors);
734 	check_counter_rollover(stats, dev->stats, rx_dropped_frames);
735 	check_counter_rollover(stats, dev->stats, rx_unicast_byte_count);
736 	check_counter_rollover(stats, dev->stats, rx_broadcast_byte_count);
737 	check_counter_rollover(stats, dev->stats, rx_multicast_byte_count);
738 	check_counter_rollover(stats, dev->stats, rx_unicast_frames);
739 	check_counter_rollover(stats, dev->stats, rx_broadcast_frames);
740 	check_counter_rollover(stats, dev->stats, rx_multicast_frames);
741 	check_counter_rollover(stats, dev->stats, rx_pause_frames);
742 	check_counter_rollover(stats, dev->stats, rx_64_byte_frames);
743 	check_counter_rollover(stats, dev->stats, rx_65_127_byte_frames);
744 	check_counter_rollover(stats, dev->stats, rx_128_255_byte_frames);
745 	check_counter_rollover(stats, dev->stats, rx_256_511_bytes_frames);
746 	check_counter_rollover(stats, dev->stats, rx_512_1023_byte_frames);
747 	check_counter_rollover(stats, dev->stats, rx_1024_1518_byte_frames);
748 	check_counter_rollover(stats, dev->stats, rx_greater_1518_byte_frames);
749 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_transitions);
750 	check_counter_rollover(stats, dev->stats, eee_rx_lpi_time);
751 	check_counter_rollover(stats, dev->stats, tx_fcs_errors);
752 	check_counter_rollover(stats, dev->stats, tx_excess_deferral_errors);
753 	check_counter_rollover(stats, dev->stats, tx_carrier_errors);
754 	check_counter_rollover(stats, dev->stats, tx_bad_byte_count);
755 	check_counter_rollover(stats, dev->stats, tx_single_collisions);
756 	check_counter_rollover(stats, dev->stats, tx_multiple_collisions);
757 	check_counter_rollover(stats, dev->stats, tx_excessive_collision);
758 	check_counter_rollover(stats, dev->stats, tx_late_collisions);
759 	check_counter_rollover(stats, dev->stats, tx_unicast_byte_count);
760 	check_counter_rollover(stats, dev->stats, tx_broadcast_byte_count);
761 	check_counter_rollover(stats, dev->stats, tx_multicast_byte_count);
762 	check_counter_rollover(stats, dev->stats, tx_unicast_frames);
763 	check_counter_rollover(stats, dev->stats, tx_broadcast_frames);
764 	check_counter_rollover(stats, dev->stats, tx_multicast_frames);
765 	check_counter_rollover(stats, dev->stats, tx_pause_frames);
766 	check_counter_rollover(stats, dev->stats, tx_64_byte_frames);
767 	check_counter_rollover(stats, dev->stats, tx_65_127_byte_frames);
768 	check_counter_rollover(stats, dev->stats, tx_128_255_byte_frames);
769 	check_counter_rollover(stats, dev->stats, tx_256_511_bytes_frames);
770 	check_counter_rollover(stats, dev->stats, tx_512_1023_byte_frames);
771 	check_counter_rollover(stats, dev->stats, tx_1024_1518_byte_frames);
772 	check_counter_rollover(stats, dev->stats, tx_greater_1518_byte_frames);
773 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_transitions);
774 	check_counter_rollover(stats, dev->stats, eee_tx_lpi_time);
775 
776 	memcpy(&dev->stats.saved, stats, sizeof(struct lan78xx_statstage));
777 }
778 
779 static void lan78xx_update_stats(struct lan78xx_net *dev)
780 {
781 	u32 *p, *count, *max;
782 	u64 *data;
783 	int i;
784 	struct lan78xx_statstage lan78xx_stats;
785 
786 	if (usb_autopm_get_interface(dev->intf) < 0)
787 		return;
788 
789 	p = (u32 *)&lan78xx_stats;
790 	count = (u32 *)&dev->stats.rollover_count;
791 	max = (u32 *)&dev->stats.rollover_max;
792 	data = (u64 *)&dev->stats.curr_stat;
793 
794 	mutex_lock(&dev->stats.access_lock);
795 
796 	if (lan78xx_read_stats(dev, &lan78xx_stats) > 0)
797 		lan78xx_check_stat_rollover(dev, &lan78xx_stats);
798 
799 	for (i = 0; i < (sizeof(lan78xx_stats) / (sizeof(u32))); i++)
800 		data[i] = (u64)p[i] + ((u64)count[i] * ((u64)max[i] + 1));
801 
802 	mutex_unlock(&dev->stats.access_lock);
803 
804 	usb_autopm_put_interface(dev->intf);
805 }
806 
807 static int lan78xx_start_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enable)
808 {
809 	return lan78xx_update_reg(dev, reg, hw_enable, hw_enable);
810 }
811 
812 static int lan78xx_stop_hw(struct lan78xx_net *dev, u32 reg, u32 hw_enabled,
813 			   u32 hw_disabled)
814 {
815 	unsigned long timeout;
816 	bool stopped = true;
817 	int ret;
818 	u32 buf;
819 
820 	/* Stop the h/w block (if not already stopped) */
821 
822 	ret = lan78xx_read_reg(dev, reg, &buf);
823 	if (ret < 0)
824 		return ret;
825 
826 	if (buf & hw_enabled) {
827 		buf &= ~hw_enabled;
828 
829 		ret = lan78xx_write_reg(dev, reg, buf);
830 		if (ret < 0)
831 			return ret;
832 
833 		stopped = false;
834 		timeout = jiffies + HW_DISABLE_TIMEOUT;
835 		do  {
836 			ret = lan78xx_read_reg(dev, reg, &buf);
837 			if (ret < 0)
838 				return ret;
839 
840 			if (buf & hw_disabled)
841 				stopped = true;
842 			else
843 				msleep(HW_DISABLE_DELAY_MS);
844 		} while (!stopped && !time_after(jiffies, timeout));
845 	}
846 
847 	return stopped ? 0 : -ETIMEDOUT;
848 }
849 
850 static int lan78xx_flush_fifo(struct lan78xx_net *dev, u32 reg, u32 fifo_flush)
851 {
852 	return lan78xx_update_reg(dev, reg, fifo_flush, fifo_flush);
853 }
854 
855 static int lan78xx_start_tx_path(struct lan78xx_net *dev)
856 {
857 	int ret;
858 
859 	netif_dbg(dev, drv, dev->net, "start tx path");
860 
861 	/* Start the MAC transmitter */
862 
863 	ret = lan78xx_start_hw(dev, MAC_TX, MAC_TX_TXEN_);
864 	if (ret < 0)
865 		return ret;
866 
867 	/* Start the Tx FIFO */
868 
869 	ret = lan78xx_start_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_);
870 	if (ret < 0)
871 		return ret;
872 
873 	return 0;
874 }
875 
876 static int lan78xx_stop_tx_path(struct lan78xx_net *dev)
877 {
878 	int ret;
879 
880 	netif_dbg(dev, drv, dev->net, "stop tx path");
881 
882 	/* Stop the Tx FIFO */
883 
884 	ret = lan78xx_stop_hw(dev, FCT_TX_CTL, FCT_TX_CTL_EN_, FCT_TX_CTL_DIS_);
885 	if (ret < 0)
886 		return ret;
887 
888 	/* Stop the MAC transmitter */
889 
890 	ret = lan78xx_stop_hw(dev, MAC_TX, MAC_TX_TXEN_, MAC_TX_TXD_);
891 	if (ret < 0)
892 		return ret;
893 
894 	return 0;
895 }
896 
897 /* The caller must ensure the Tx path is stopped before calling
898  * lan78xx_flush_tx_fifo().
899  */
900 static int lan78xx_flush_tx_fifo(struct lan78xx_net *dev)
901 {
902 	return lan78xx_flush_fifo(dev, FCT_TX_CTL, FCT_TX_CTL_RST_);
903 }
904 
905 static int lan78xx_start_rx_path(struct lan78xx_net *dev)
906 {
907 	int ret;
908 
909 	netif_dbg(dev, drv, dev->net, "start rx path");
910 
911 	/* Start the Rx FIFO */
912 
913 	ret = lan78xx_start_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_);
914 	if (ret < 0)
915 		return ret;
916 
917 	/* Start the MAC receiver*/
918 
919 	ret = lan78xx_start_hw(dev, MAC_RX, MAC_RX_RXEN_);
920 	if (ret < 0)
921 		return ret;
922 
923 	return 0;
924 }
925 
926 static int lan78xx_stop_rx_path(struct lan78xx_net *dev)
927 {
928 	int ret;
929 
930 	netif_dbg(dev, drv, dev->net, "stop rx path");
931 
932 	/* Stop the MAC receiver */
933 
934 	ret = lan78xx_stop_hw(dev, MAC_RX, MAC_RX_RXEN_, MAC_RX_RXD_);
935 	if (ret < 0)
936 		return ret;
937 
938 	/* Stop the Rx FIFO */
939 
940 	ret = lan78xx_stop_hw(dev, FCT_RX_CTL, FCT_RX_CTL_EN_, FCT_RX_CTL_DIS_);
941 	if (ret < 0)
942 		return ret;
943 
944 	return 0;
945 }
946 
947 /* The caller must ensure the Rx path is stopped before calling
948  * lan78xx_flush_rx_fifo().
949  */
950 static int lan78xx_flush_rx_fifo(struct lan78xx_net *dev)
951 {
952 	return lan78xx_flush_fifo(dev, FCT_RX_CTL, FCT_RX_CTL_RST_);
953 }
954 
955 /* Loop until the read is completed with timeout called with mdiobus_mutex held */
956 static int lan78xx_mdiobus_wait_not_busy(struct lan78xx_net *dev)
957 {
958 	unsigned long start_time = jiffies;
959 	u32 val;
960 	int ret;
961 
962 	do {
963 		ret = lan78xx_read_reg(dev, MII_ACC, &val);
964 		if (ret < 0)
965 			return ret;
966 
967 		if (!(val & MII_ACC_MII_BUSY_))
968 			return 0;
969 	} while (!time_after(jiffies, start_time + HZ));
970 
971 	return -ETIMEDOUT;
972 }
973 
974 static inline u32 mii_access(int id, int index, int read)
975 {
976 	u32 ret;
977 
978 	ret = ((u32)id << MII_ACC_PHY_ADDR_SHIFT_) & MII_ACC_PHY_ADDR_MASK_;
979 	ret |= ((u32)index << MII_ACC_MIIRINDA_SHIFT_) & MII_ACC_MIIRINDA_MASK_;
980 	if (read)
981 		ret |= MII_ACC_MII_READ_;
982 	else
983 		ret |= MII_ACC_MII_WRITE_;
984 	ret |= MII_ACC_MII_BUSY_;
985 
986 	return ret;
987 }
988 
989 static int lan78xx_wait_eeprom(struct lan78xx_net *dev)
990 {
991 	unsigned long start_time = jiffies;
992 	u32 val;
993 	int ret;
994 
995 	do {
996 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
997 		if (ret < 0)
998 			return ret;
999 
1000 		if (!(val & E2P_CMD_EPC_BUSY_) ||
1001 		    (val & E2P_CMD_EPC_TIMEOUT_))
1002 			break;
1003 		usleep_range(40, 100);
1004 	} while (!time_after(jiffies, start_time + HZ));
1005 
1006 	if (val & (E2P_CMD_EPC_TIMEOUT_ | E2P_CMD_EPC_BUSY_)) {
1007 		netdev_warn(dev->net, "EEPROM read operation timeout");
1008 		return -ETIMEDOUT;
1009 	}
1010 
1011 	return 0;
1012 }
1013 
1014 static int lan78xx_eeprom_confirm_not_busy(struct lan78xx_net *dev)
1015 {
1016 	unsigned long start_time = jiffies;
1017 	u32 val;
1018 	int ret;
1019 
1020 	do {
1021 		ret = lan78xx_read_reg(dev, E2P_CMD, &val);
1022 		if (ret < 0)
1023 			return ret;
1024 
1025 		if (!(val & E2P_CMD_EPC_BUSY_))
1026 			return 0;
1027 
1028 		usleep_range(40, 100);
1029 	} while (!time_after(jiffies, start_time + HZ));
1030 
1031 	netdev_warn(dev->net, "EEPROM is busy");
1032 	return -ETIMEDOUT;
1033 }
1034 
1035 static int lan78xx_read_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1036 				   u32 length, u8 *data)
1037 {
1038 	u32 val, saved;
1039 	int i, ret;
1040 
1041 	/* depends on chip, some EEPROM pins are muxed with LED function.
1042 	 * disable & restore LED function to access EEPROM.
1043 	 */
1044 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1045 	if (ret < 0)
1046 		return ret;
1047 
1048 	saved = val;
1049 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1050 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1051 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1052 		if (ret < 0)
1053 			return ret;
1054 	}
1055 
1056 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1057 	if (ret == -ETIMEDOUT)
1058 		goto read_raw_eeprom_done;
1059 	/* If USB fails, there is nothing to do */
1060 	if (ret < 0)
1061 		return ret;
1062 
1063 	for (i = 0; i < length; i++) {
1064 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_READ_;
1065 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1066 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1067 		if (ret < 0)
1068 			return ret;
1069 
1070 		ret = lan78xx_wait_eeprom(dev);
1071 		/* Looks like not USB specific error, try to recover */
1072 		if (ret == -ETIMEDOUT)
1073 			goto read_raw_eeprom_done;
1074 		/* If USB fails, there is nothing to do */
1075 		if (ret < 0)
1076 			return ret;
1077 
1078 		ret = lan78xx_read_reg(dev, E2P_DATA, &val);
1079 		if (ret < 0)
1080 			return ret;
1081 
1082 		data[i] = val & 0xFF;
1083 		offset++;
1084 	}
1085 
1086 read_raw_eeprom_done:
1087 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1088 		return lan78xx_write_reg(dev, HW_CFG, saved);
1089 
1090 	return 0;
1091 }
1092 
1093 static int lan78xx_read_eeprom(struct lan78xx_net *dev, u32 offset,
1094 			       u32 length, u8 *data)
1095 {
1096 	int ret;
1097 	u8 sig;
1098 
1099 	ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
1100 	if (ret < 0)
1101 		return ret;
1102 
1103 	if (sig != EEPROM_INDICATOR)
1104 		return -ENODATA;
1105 
1106 	return lan78xx_read_raw_eeprom(dev, offset, length, data);
1107 }
1108 
1109 static int lan78xx_write_raw_eeprom(struct lan78xx_net *dev, u32 offset,
1110 				    u32 length, u8 *data)
1111 {
1112 	u32 val;
1113 	u32 saved;
1114 	int i, ret;
1115 
1116 	/* depends on chip, some EEPROM pins are muxed with LED function.
1117 	 * disable & restore LED function to access EEPROM.
1118 	 */
1119 	ret = lan78xx_read_reg(dev, HW_CFG, &val);
1120 	if (ret < 0)
1121 		return ret;
1122 
1123 	saved = val;
1124 	if (dev->chipid == ID_REV_CHIP_ID_7800_) {
1125 		val &= ~(HW_CFG_LED1_EN_ | HW_CFG_LED0_EN_);
1126 		ret = lan78xx_write_reg(dev, HW_CFG, val);
1127 		if (ret < 0)
1128 			return ret;
1129 	}
1130 
1131 	ret = lan78xx_eeprom_confirm_not_busy(dev);
1132 	/* Looks like not USB specific error, try to recover */
1133 	if (ret == -ETIMEDOUT)
1134 		goto write_raw_eeprom_done;
1135 	/* If USB fails, there is nothing to do */
1136 	if (ret < 0)
1137 		return ret;
1138 
1139 	/* Issue write/erase enable command */
1140 	val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_EWEN_;
1141 	ret = lan78xx_write_reg(dev, E2P_CMD, val);
1142 	if (ret < 0)
1143 		return ret;
1144 
1145 	ret = lan78xx_wait_eeprom(dev);
1146 	/* Looks like not USB specific error, try to recover */
1147 	if (ret == -ETIMEDOUT)
1148 		goto write_raw_eeprom_done;
1149 	/* If USB fails, there is nothing to do */
1150 	if (ret < 0)
1151 		return ret;
1152 
1153 	for (i = 0; i < length; i++) {
1154 		/* Fill data register */
1155 		val = data[i];
1156 		ret = lan78xx_write_reg(dev, E2P_DATA, val);
1157 		if (ret < 0)
1158 			return ret;
1159 
1160 		/* Send "write" command */
1161 		val = E2P_CMD_EPC_BUSY_ | E2P_CMD_EPC_CMD_WRITE_;
1162 		val |= (offset & E2P_CMD_EPC_ADDR_MASK_);
1163 		ret = lan78xx_write_reg(dev, E2P_CMD, val);
1164 		if (ret < 0)
1165 			return ret;
1166 
1167 		ret = lan78xx_wait_eeprom(dev);
1168 		/* Looks like not USB specific error, try to recover */
1169 		if (ret == -ETIMEDOUT)
1170 			goto write_raw_eeprom_done;
1171 		/* If USB fails, there is nothing to do */
1172 		if (ret < 0)
1173 			return ret;
1174 
1175 		offset++;
1176 	}
1177 
1178 write_raw_eeprom_done:
1179 	if (dev->chipid == ID_REV_CHIP_ID_7800_)
1180 		return lan78xx_write_reg(dev, HW_CFG, saved);
1181 
1182 	return 0;
1183 }
1184 
1185 static int lan78xx_read_raw_otp(struct lan78xx_net *dev, u32 offset,
1186 				u32 length, u8 *data)
1187 {
1188 	unsigned long timeout;
1189 	int ret, i;
1190 	u32 buf;
1191 
1192 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1193 	if (ret < 0)
1194 		return ret;
1195 
1196 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1197 		/* clear it and wait to be cleared */
1198 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1199 		if (ret < 0)
1200 			return ret;
1201 
1202 		timeout = jiffies + HZ;
1203 		do {
1204 			usleep_range(1, 10);
1205 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1206 			if (ret < 0)
1207 				return ret;
1208 
1209 			if (time_after(jiffies, timeout)) {
1210 				netdev_warn(dev->net,
1211 					    "timeout on OTP_PWR_DN");
1212 				return -ETIMEDOUT;
1213 			}
1214 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1215 	}
1216 
1217 	for (i = 0; i < length; i++) {
1218 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1219 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1220 		if (ret < 0)
1221 			return ret;
1222 
1223 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1224 					((offset + i) & OTP_ADDR2_10_3));
1225 		if (ret < 0)
1226 			return ret;
1227 
1228 		ret = lan78xx_write_reg(dev, OTP_FUNC_CMD, OTP_FUNC_CMD_READ_);
1229 		if (ret < 0)
1230 			return ret;
1231 
1232 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1233 		if (ret < 0)
1234 			return ret;
1235 
1236 		timeout = jiffies + HZ;
1237 		do {
1238 			udelay(1);
1239 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1240 			if (ret < 0)
1241 				return ret;
1242 
1243 			if (time_after(jiffies, timeout)) {
1244 				netdev_warn(dev->net,
1245 					    "timeout on OTP_STATUS");
1246 				return -ETIMEDOUT;
1247 			}
1248 		} while (buf & OTP_STATUS_BUSY_);
1249 
1250 		ret = lan78xx_read_reg(dev, OTP_RD_DATA, &buf);
1251 		if (ret < 0)
1252 			return ret;
1253 
1254 		data[i] = (u8)(buf & 0xFF);
1255 	}
1256 
1257 	return 0;
1258 }
1259 
1260 static int lan78xx_write_raw_otp(struct lan78xx_net *dev, u32 offset,
1261 				 u32 length, u8 *data)
1262 {
1263 	int i;
1264 	u32 buf;
1265 	unsigned long timeout;
1266 	int ret;
1267 
1268 	ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1269 	if (ret < 0)
1270 		return ret;
1271 
1272 	if (buf & OTP_PWR_DN_PWRDN_N_) {
1273 		/* clear it and wait to be cleared */
1274 		ret = lan78xx_write_reg(dev, OTP_PWR_DN, 0);
1275 		if (ret < 0)
1276 			return ret;
1277 
1278 		timeout = jiffies + HZ;
1279 		do {
1280 			udelay(1);
1281 			ret = lan78xx_read_reg(dev, OTP_PWR_DN, &buf);
1282 			if (ret < 0)
1283 				return ret;
1284 
1285 			if (time_after(jiffies, timeout)) {
1286 				netdev_warn(dev->net,
1287 					    "timeout on OTP_PWR_DN completion");
1288 				return -ETIMEDOUT;
1289 			}
1290 		} while (buf & OTP_PWR_DN_PWRDN_N_);
1291 	}
1292 
1293 	/* set to BYTE program mode */
1294 	ret = lan78xx_write_reg(dev, OTP_PRGM_MODE, OTP_PRGM_MODE_BYTE_);
1295 	if (ret < 0)
1296 		return ret;
1297 
1298 	for (i = 0; i < length; i++) {
1299 		ret = lan78xx_write_reg(dev, OTP_ADDR1,
1300 					((offset + i) >> 8) & OTP_ADDR1_15_11);
1301 		if (ret < 0)
1302 			return ret;
1303 
1304 		ret = lan78xx_write_reg(dev, OTP_ADDR2,
1305 					((offset + i) & OTP_ADDR2_10_3));
1306 		if (ret < 0)
1307 			return ret;
1308 
1309 		ret = lan78xx_write_reg(dev, OTP_PRGM_DATA, data[i]);
1310 		if (ret < 0)
1311 			return ret;
1312 
1313 		ret = lan78xx_write_reg(dev, OTP_TST_CMD, OTP_TST_CMD_PRGVRFY_);
1314 		if (ret < 0)
1315 			return ret;
1316 
1317 		ret = lan78xx_write_reg(dev, OTP_CMD_GO, OTP_CMD_GO_GO_);
1318 		if (ret < 0)
1319 			return ret;
1320 
1321 		timeout = jiffies + HZ;
1322 		do {
1323 			udelay(1);
1324 			ret = lan78xx_read_reg(dev, OTP_STATUS, &buf);
1325 			if (ret < 0)
1326 				return ret;
1327 
1328 			if (time_after(jiffies, timeout)) {
1329 				netdev_warn(dev->net,
1330 					    "Timeout on OTP_STATUS completion");
1331 				return -ETIMEDOUT;
1332 			}
1333 		} while (buf & OTP_STATUS_BUSY_);
1334 	}
1335 
1336 	return 0;
1337 }
1338 
1339 static int lan78xx_read_otp(struct lan78xx_net *dev, u32 offset,
1340 			    u32 length, u8 *data)
1341 {
1342 	u8 sig;
1343 	int ret;
1344 
1345 	ret = lan78xx_read_raw_otp(dev, 0, 1, &sig);
1346 
1347 	if (ret == 0) {
1348 		if (sig == OTP_INDICATOR_2)
1349 			offset += 0x100;
1350 		else if (sig != OTP_INDICATOR_1)
1351 			ret = -EINVAL;
1352 		if (!ret)
1353 			ret = lan78xx_read_raw_otp(dev, offset, length, data);
1354 	}
1355 
1356 	return ret;
1357 }
1358 
1359 static int lan78xx_dataport_wait_not_busy(struct lan78xx_net *dev)
1360 {
1361 	int i, ret;
1362 
1363 	for (i = 0; i < 100; i++) {
1364 		u32 dp_sel;
1365 
1366 		ret = lan78xx_read_reg(dev, DP_SEL, &dp_sel);
1367 		if (unlikely(ret < 0))
1368 			return ret;
1369 
1370 		if (dp_sel & DP_SEL_DPRDY_)
1371 			return 0;
1372 
1373 		usleep_range(40, 100);
1374 	}
1375 
1376 	netdev_warn(dev->net, "%s timed out", __func__);
1377 
1378 	return -ETIMEDOUT;
1379 }
1380 
1381 static int lan78xx_dataport_write(struct lan78xx_net *dev, u32 ram_select,
1382 				  u32 addr, u32 length, u32 *buf)
1383 {
1384 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1385 	int i, ret;
1386 
1387 	ret = usb_autopm_get_interface(dev->intf);
1388 	if (ret < 0)
1389 		return ret;
1390 
1391 	mutex_lock(&pdata->dataport_mutex);
1392 
1393 	ret = lan78xx_dataport_wait_not_busy(dev);
1394 	if (ret < 0)
1395 		goto dataport_write;
1396 
1397 	ret = lan78xx_update_reg(dev, DP_SEL, DP_SEL_RSEL_MASK_, ram_select);
1398 	if (ret < 0)
1399 		goto dataport_write;
1400 
1401 	for (i = 0; i < length; i++) {
1402 		ret = lan78xx_write_reg(dev, DP_ADDR, addr + i);
1403 		if (ret < 0)
1404 			goto dataport_write;
1405 
1406 		ret = lan78xx_write_reg(dev, DP_DATA, buf[i]);
1407 		if (ret < 0)
1408 			goto dataport_write;
1409 
1410 		ret = lan78xx_write_reg(dev, DP_CMD, DP_CMD_WRITE_);
1411 		if (ret < 0)
1412 			goto dataport_write;
1413 
1414 		ret = lan78xx_dataport_wait_not_busy(dev);
1415 		if (ret < 0)
1416 			goto dataport_write;
1417 	}
1418 
1419 dataport_write:
1420 	if (ret < 0)
1421 		netdev_warn(dev->net, "dataport write failed %pe", ERR_PTR(ret));
1422 
1423 	mutex_unlock(&pdata->dataport_mutex);
1424 	usb_autopm_put_interface(dev->intf);
1425 
1426 	return ret;
1427 }
1428 
1429 static void lan78xx_set_addr_filter(struct lan78xx_priv *pdata,
1430 				    int index, u8 addr[ETH_ALEN])
1431 {
1432 	u32 temp;
1433 
1434 	if ((pdata) && (index > 0) && (index < NUM_OF_MAF)) {
1435 		temp = addr[3];
1436 		temp = addr[2] | (temp << 8);
1437 		temp = addr[1] | (temp << 8);
1438 		temp = addr[0] | (temp << 8);
1439 		pdata->pfilter_table[index][1] = temp;
1440 		temp = addr[5];
1441 		temp = addr[4] | (temp << 8);
1442 		temp |= MAF_HI_VALID_ | MAF_HI_TYPE_DST_;
1443 		pdata->pfilter_table[index][0] = temp;
1444 	}
1445 }
1446 
1447 /* returns hash bit number for given MAC address */
1448 static inline u32 lan78xx_hash(char addr[ETH_ALEN])
1449 {
1450 	return (ether_crc(ETH_ALEN, addr) >> 23) & 0x1ff;
1451 }
1452 
1453 static void lan78xx_deferred_multicast_write(struct work_struct *param)
1454 {
1455 	struct lan78xx_priv *pdata =
1456 			container_of(param, struct lan78xx_priv, set_multicast);
1457 	struct lan78xx_net *dev = pdata->dev;
1458 	int i, ret;
1459 
1460 	netif_dbg(dev, drv, dev->net, "deferred multicast write 0x%08x\n",
1461 		  pdata->rfe_ctl);
1462 
1463 	ret = lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_,
1464 				     DP_SEL_VHF_VLAN_LEN,
1465 				     DP_SEL_VHF_HASH_LEN, pdata->mchash_table);
1466 	if (ret < 0)
1467 		goto multicast_write_done;
1468 
1469 	for (i = 1; i < NUM_OF_MAF; i++) {
1470 		ret = lan78xx_write_reg(dev, MAF_HI(i), 0);
1471 		if (ret < 0)
1472 			goto multicast_write_done;
1473 
1474 		ret = lan78xx_write_reg(dev, MAF_LO(i),
1475 					pdata->pfilter_table[i][1]);
1476 		if (ret < 0)
1477 			goto multicast_write_done;
1478 
1479 		ret = lan78xx_write_reg(dev, MAF_HI(i),
1480 					pdata->pfilter_table[i][0]);
1481 		if (ret < 0)
1482 			goto multicast_write_done;
1483 	}
1484 
1485 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
1486 
1487 multicast_write_done:
1488 	if (ret < 0)
1489 		netdev_warn(dev->net, "multicast write failed %pe", ERR_PTR(ret));
1490 	return;
1491 }
1492 
1493 static void lan78xx_set_multicast(struct net_device *netdev)
1494 {
1495 	struct lan78xx_net *dev = netdev_priv(netdev);
1496 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1497 	unsigned long flags;
1498 	int i;
1499 
1500 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
1501 
1502 	pdata->rfe_ctl &= ~(RFE_CTL_UCAST_EN_ | RFE_CTL_MCAST_EN_ |
1503 			    RFE_CTL_DA_PERFECT_ | RFE_CTL_MCAST_HASH_);
1504 
1505 	for (i = 0; i < DP_SEL_VHF_HASH_LEN; i++)
1506 		pdata->mchash_table[i] = 0;
1507 
1508 	/* pfilter_table[0] has own HW address */
1509 	for (i = 1; i < NUM_OF_MAF; i++) {
1510 		pdata->pfilter_table[i][0] = 0;
1511 		pdata->pfilter_table[i][1] = 0;
1512 	}
1513 
1514 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_;
1515 
1516 	if (dev->net->flags & IFF_PROMISC) {
1517 		netif_dbg(dev, drv, dev->net, "promiscuous mode enabled");
1518 		pdata->rfe_ctl |= RFE_CTL_MCAST_EN_ | RFE_CTL_UCAST_EN_;
1519 	} else {
1520 		if (dev->net->flags & IFF_ALLMULTI) {
1521 			netif_dbg(dev, drv, dev->net,
1522 				  "receive all multicast enabled");
1523 			pdata->rfe_ctl |= RFE_CTL_MCAST_EN_;
1524 		}
1525 	}
1526 
1527 	if (netdev_mc_count(dev->net)) {
1528 		struct netdev_hw_addr *ha;
1529 		int i;
1530 
1531 		netif_dbg(dev, drv, dev->net, "receive multicast hash filter");
1532 
1533 		pdata->rfe_ctl |= RFE_CTL_DA_PERFECT_;
1534 
1535 		i = 1;
1536 		netdev_for_each_mc_addr(ha, netdev) {
1537 			/* set first 32 into Perfect Filter */
1538 			if (i < 33) {
1539 				lan78xx_set_addr_filter(pdata, i, ha->addr);
1540 			} else {
1541 				u32 bitnum = lan78xx_hash(ha->addr);
1542 
1543 				pdata->mchash_table[bitnum / 32] |=
1544 							(1 << (bitnum % 32));
1545 				pdata->rfe_ctl |= RFE_CTL_MCAST_HASH_;
1546 			}
1547 			i++;
1548 		}
1549 	}
1550 
1551 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
1552 
1553 	/* defer register writes to a sleepable context */
1554 	schedule_work(&pdata->set_multicast);
1555 }
1556 
1557 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
1558 					 bool tx_pause, bool rx_pause);
1559 
1560 static int lan78xx_update_flowcontrol(struct lan78xx_net *dev, u8 duplex,
1561 				      u16 lcladv, u16 rmtadv)
1562 {
1563 	u8 cap;
1564 
1565 	if (dev->fc_autoneg)
1566 		cap = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1567 	else
1568 		cap = dev->fc_request_control;
1569 
1570 	netif_dbg(dev, link, dev->net, "rx pause %s, tx pause %s",
1571 		  (cap & FLOW_CTRL_RX ? "enabled" : "disabled"),
1572 		  (cap & FLOW_CTRL_TX ? "enabled" : "disabled"));
1573 
1574 	return lan78xx_configure_flowcontrol(dev,
1575 					     cap & FLOW_CTRL_TX,
1576 					     cap & FLOW_CTRL_RX);
1577 }
1578 
1579 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev);
1580 
1581 static int lan78xx_mac_reset(struct lan78xx_net *dev)
1582 {
1583 	unsigned long start_time = jiffies;
1584 	u32 val;
1585 	int ret;
1586 
1587 	mutex_lock(&dev->mdiobus_mutex);
1588 
1589 	/* Resetting the device while there is activity on the MDIO
1590 	 * bus can result in the MAC interface locking up and not
1591 	 * completing register access transactions.
1592 	 */
1593 	ret = lan78xx_mdiobus_wait_not_busy(dev);
1594 	if (ret < 0)
1595 		goto exit_unlock;
1596 
1597 	ret = lan78xx_read_reg(dev, MAC_CR, &val);
1598 	if (ret < 0)
1599 		goto exit_unlock;
1600 
1601 	val |= MAC_CR_RST_;
1602 	ret = lan78xx_write_reg(dev, MAC_CR, val);
1603 	if (ret < 0)
1604 		goto exit_unlock;
1605 
1606 	/* Wait for the reset to complete before allowing any further
1607 	 * MAC register accesses otherwise the MAC may lock up.
1608 	 */
1609 	do {
1610 		ret = lan78xx_read_reg(dev, MAC_CR, &val);
1611 		if (ret < 0)
1612 			goto exit_unlock;
1613 
1614 		if (!(val & MAC_CR_RST_)) {
1615 			ret = 0;
1616 			goto exit_unlock;
1617 		}
1618 	} while (!time_after(jiffies, start_time + HZ));
1619 
1620 	ret = -ETIMEDOUT;
1621 exit_unlock:
1622 	mutex_unlock(&dev->mdiobus_mutex);
1623 
1624 	return ret;
1625 }
1626 
1627 /**
1628  * lan78xx_phy_int_ack - Acknowledge PHY interrupt
1629  * @dev: pointer to the LAN78xx device structure
1630  *
1631  * This function acknowledges the PHY interrupt by setting the
1632  * INT_STS_PHY_INT_ bit in the interrupt status register (INT_STS).
1633  *
1634  * Return: 0 on success or a negative error code on failure.
1635  */
1636 static int lan78xx_phy_int_ack(struct lan78xx_net *dev)
1637 {
1638 	return lan78xx_write_reg(dev, INT_STS, INT_STS_PHY_INT_);
1639 }
1640 
1641 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed);
1642 
1643 static int lan78xx_link_reset(struct lan78xx_net *dev)
1644 {
1645 	struct phy_device *phydev = dev->net->phydev;
1646 	struct ethtool_link_ksettings ecmd;
1647 	int ladv, radv, ret, link;
1648 
1649 	/* clear LAN78xx interrupt status */
1650 	ret = lan78xx_phy_int_ack(dev);
1651 	if (unlikely(ret < 0))
1652 		return ret;
1653 
1654 	mutex_lock(&phydev->lock);
1655 	phy_read_status(phydev);
1656 	link = phydev->link;
1657 	mutex_unlock(&phydev->lock);
1658 
1659 	if (!link && dev->link_on) {
1660 		dev->link_on = false;
1661 
1662 		/* reset MAC */
1663 		ret = lan78xx_mac_reset(dev);
1664 		if (ret < 0)
1665 			return ret;
1666 
1667 		timer_delete(&dev->stat_monitor);
1668 	} else if (link && !dev->link_on) {
1669 		dev->link_on = true;
1670 
1671 		phy_ethtool_ksettings_get(phydev, &ecmd);
1672 
1673 		ret = lan78xx_configure_usb(dev, ecmd.base.speed);
1674 		if (ret < 0)
1675 			return ret;
1676 
1677 		ladv = phy_read(phydev, MII_ADVERTISE);
1678 		if (ladv < 0)
1679 			return ladv;
1680 
1681 		radv = phy_read(phydev, MII_LPA);
1682 		if (radv < 0)
1683 			return radv;
1684 
1685 		netif_dbg(dev, link, dev->net,
1686 			  "speed: %u duplex: %d anadv: 0x%04x anlpa: 0x%04x",
1687 			  ecmd.base.speed, ecmd.base.duplex, ladv, radv);
1688 
1689 		ret = lan78xx_update_flowcontrol(dev, ecmd.base.duplex, ladv,
1690 						 radv);
1691 		if (ret < 0)
1692 			return ret;
1693 
1694 		if (!timer_pending(&dev->stat_monitor)) {
1695 			dev->delta = 1;
1696 			mod_timer(&dev->stat_monitor,
1697 				  jiffies + STAT_UPDATE_TIMER);
1698 		}
1699 
1700 		lan78xx_rx_urb_submit_all(dev);
1701 
1702 		local_bh_disable();
1703 		napi_schedule(&dev->napi);
1704 		local_bh_enable();
1705 	}
1706 
1707 	return 0;
1708 }
1709 
1710 /* some work can't be done in tasklets, so we use keventd
1711  *
1712  * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
1713  * but tasklet_schedule() doesn't.	hope the failure is rare.
1714  */
1715 static void lan78xx_defer_kevent(struct lan78xx_net *dev, int work)
1716 {
1717 	set_bit(work, &dev->flags);
1718 	if (!schedule_delayed_work(&dev->wq, 0))
1719 		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
1720 }
1721 
1722 static void lan78xx_status(struct lan78xx_net *dev, struct urb *urb)
1723 {
1724 	u32 intdata;
1725 
1726 	if (urb->actual_length != 4) {
1727 		netdev_warn(dev->net,
1728 			    "unexpected urb length %d", urb->actual_length);
1729 		return;
1730 	}
1731 
1732 	intdata = get_unaligned_le32(urb->transfer_buffer);
1733 
1734 	if (intdata & INT_ENP_PHY_INT) {
1735 		netif_dbg(dev, link, dev->net, "PHY INTR: 0x%08x\n", intdata);
1736 		lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
1737 
1738 		if (dev->domain_data.phyirq > 0)
1739 			generic_handle_irq_safe(dev->domain_data.phyirq);
1740 	} else {
1741 		netdev_warn(dev->net,
1742 			    "unexpected interrupt: 0x%08x\n", intdata);
1743 	}
1744 }
1745 
1746 static int lan78xx_ethtool_get_eeprom_len(struct net_device *netdev)
1747 {
1748 	return MAX_EEPROM_SIZE;
1749 }
1750 
1751 static int lan78xx_ethtool_get_eeprom(struct net_device *netdev,
1752 				      struct ethtool_eeprom *ee, u8 *data)
1753 {
1754 	struct lan78xx_net *dev = netdev_priv(netdev);
1755 	int ret;
1756 
1757 	ret = usb_autopm_get_interface(dev->intf);
1758 	if (ret)
1759 		return ret;
1760 
1761 	ee->magic = LAN78XX_EEPROM_MAGIC;
1762 
1763 	ret = lan78xx_read_raw_eeprom(dev, ee->offset, ee->len, data);
1764 
1765 	usb_autopm_put_interface(dev->intf);
1766 
1767 	return ret;
1768 }
1769 
1770 static int lan78xx_ethtool_set_eeprom(struct net_device *netdev,
1771 				      struct ethtool_eeprom *ee, u8 *data)
1772 {
1773 	struct lan78xx_net *dev = netdev_priv(netdev);
1774 	int ret;
1775 
1776 	ret = usb_autopm_get_interface(dev->intf);
1777 	if (ret)
1778 		return ret;
1779 
1780 	/* Invalid EEPROM_INDICATOR at offset zero will result in a failure
1781 	 * to load data from EEPROM
1782 	 */
1783 	if (ee->magic == LAN78XX_EEPROM_MAGIC)
1784 		ret = lan78xx_write_raw_eeprom(dev, ee->offset, ee->len, data);
1785 	else if ((ee->magic == LAN78XX_OTP_MAGIC) &&
1786 		 (ee->offset == 0) &&
1787 		 (ee->len == 512) &&
1788 		 (data[0] == OTP_INDICATOR_1))
1789 		ret = lan78xx_write_raw_otp(dev, ee->offset, ee->len, data);
1790 
1791 	usb_autopm_put_interface(dev->intf);
1792 
1793 	return ret;
1794 }
1795 
1796 static void lan78xx_get_strings(struct net_device *netdev, u32 stringset,
1797 				u8 *data)
1798 {
1799 	if (stringset == ETH_SS_STATS)
1800 		memcpy(data, lan78xx_gstrings, sizeof(lan78xx_gstrings));
1801 }
1802 
1803 static int lan78xx_get_sset_count(struct net_device *netdev, int sset)
1804 {
1805 	if (sset == ETH_SS_STATS)
1806 		return ARRAY_SIZE(lan78xx_gstrings);
1807 	else
1808 		return -EOPNOTSUPP;
1809 }
1810 
1811 static void lan78xx_get_stats(struct net_device *netdev,
1812 			      struct ethtool_stats *stats, u64 *data)
1813 {
1814 	struct lan78xx_net *dev = netdev_priv(netdev);
1815 
1816 	lan78xx_update_stats(dev);
1817 
1818 	mutex_lock(&dev->stats.access_lock);
1819 	memcpy(data, &dev->stats.curr_stat, sizeof(dev->stats.curr_stat));
1820 	mutex_unlock(&dev->stats.access_lock);
1821 }
1822 
1823 static void lan78xx_get_wol(struct net_device *netdev,
1824 			    struct ethtool_wolinfo *wol)
1825 {
1826 	struct lan78xx_net *dev = netdev_priv(netdev);
1827 	int ret;
1828 	u32 buf;
1829 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1830 
1831 	if (usb_autopm_get_interface(dev->intf) < 0)
1832 		return;
1833 
1834 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
1835 	if (unlikely(ret < 0)) {
1836 		netdev_warn(dev->net, "failed to get WoL %pe", ERR_PTR(ret));
1837 		wol->supported = 0;
1838 		wol->wolopts = 0;
1839 	} else {
1840 		if (buf & USB_CFG_RMT_WKP_) {
1841 			wol->supported = WAKE_ALL;
1842 			wol->wolopts = pdata->wol;
1843 		} else {
1844 			wol->supported = 0;
1845 			wol->wolopts = 0;
1846 		}
1847 	}
1848 
1849 	usb_autopm_put_interface(dev->intf);
1850 }
1851 
1852 static int lan78xx_set_wol(struct net_device *netdev,
1853 			   struct ethtool_wolinfo *wol)
1854 {
1855 	struct lan78xx_net *dev = netdev_priv(netdev);
1856 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
1857 	int ret;
1858 
1859 	if (wol->wolopts & ~WAKE_ALL)
1860 		return -EINVAL;
1861 
1862 	ret = usb_autopm_get_interface(dev->intf);
1863 	if (ret < 0)
1864 		return ret;
1865 
1866 	pdata->wol = wol->wolopts;
1867 
1868 	ret = device_set_wakeup_enable(&dev->udev->dev, (bool)wol->wolopts);
1869 	if (ret < 0)
1870 		goto exit_pm_put;
1871 
1872 	ret = phy_ethtool_set_wol(netdev->phydev, wol);
1873 
1874 exit_pm_put:
1875 	usb_autopm_put_interface(dev->intf);
1876 
1877 	return ret;
1878 }
1879 
1880 static int lan78xx_get_eee(struct net_device *net, struct ethtool_keee *edata)
1881 {
1882 	struct lan78xx_net *dev = netdev_priv(net);
1883 	struct phy_device *phydev = net->phydev;
1884 	int ret;
1885 	u32 buf;
1886 
1887 	ret = usb_autopm_get_interface(dev->intf);
1888 	if (ret < 0)
1889 		return ret;
1890 
1891 	ret = phy_ethtool_get_eee(phydev, edata);
1892 	if (ret < 0)
1893 		goto exit;
1894 
1895 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
1896 	if (buf & MAC_CR_EEE_EN_) {
1897 		/* EEE_TX_LPI_REQ_DLY & tx_lpi_timer are same uSec unit */
1898 		ret = lan78xx_read_reg(dev, EEE_TX_LPI_REQ_DLY, &buf);
1899 		edata->tx_lpi_timer = buf;
1900 	} else {
1901 		edata->tx_lpi_timer = 0;
1902 	}
1903 
1904 	ret = 0;
1905 exit:
1906 	usb_autopm_put_interface(dev->intf);
1907 
1908 	return ret;
1909 }
1910 
1911 static int lan78xx_set_eee(struct net_device *net, struct ethtool_keee *edata)
1912 {
1913 	struct lan78xx_net *dev = netdev_priv(net);
1914 	int ret;
1915 	u32 buf;
1916 
1917 	ret = usb_autopm_get_interface(dev->intf);
1918 	if (ret < 0)
1919 		return ret;
1920 
1921 	ret = phy_ethtool_set_eee(net->phydev, edata);
1922 	if (ret < 0)
1923 		goto out;
1924 
1925 	buf = (u32)edata->tx_lpi_timer;
1926 	ret = lan78xx_write_reg(dev, EEE_TX_LPI_REQ_DLY, buf);
1927 out:
1928 	usb_autopm_put_interface(dev->intf);
1929 
1930 	return ret;
1931 }
1932 
1933 static u32 lan78xx_get_link(struct net_device *net)
1934 {
1935 	u32 link;
1936 
1937 	mutex_lock(&net->phydev->lock);
1938 	phy_read_status(net->phydev);
1939 	link = net->phydev->link;
1940 	mutex_unlock(&net->phydev->lock);
1941 
1942 	return link;
1943 }
1944 
1945 static void lan78xx_get_drvinfo(struct net_device *net,
1946 				struct ethtool_drvinfo *info)
1947 {
1948 	struct lan78xx_net *dev = netdev_priv(net);
1949 
1950 	strscpy(info->driver, DRIVER_NAME, sizeof(info->driver));
1951 	usb_make_path(dev->udev, info->bus_info, sizeof(info->bus_info));
1952 }
1953 
1954 static u32 lan78xx_get_msglevel(struct net_device *net)
1955 {
1956 	struct lan78xx_net *dev = netdev_priv(net);
1957 
1958 	return dev->msg_enable;
1959 }
1960 
1961 static void lan78xx_set_msglevel(struct net_device *net, u32 level)
1962 {
1963 	struct lan78xx_net *dev = netdev_priv(net);
1964 
1965 	dev->msg_enable = level;
1966 }
1967 
1968 static int lan78xx_get_link_ksettings(struct net_device *net,
1969 				      struct ethtool_link_ksettings *cmd)
1970 {
1971 	struct lan78xx_net *dev = netdev_priv(net);
1972 	struct phy_device *phydev = net->phydev;
1973 	int ret;
1974 
1975 	ret = usb_autopm_get_interface(dev->intf);
1976 	if (ret < 0)
1977 		return ret;
1978 
1979 	phy_ethtool_ksettings_get(phydev, cmd);
1980 
1981 	usb_autopm_put_interface(dev->intf);
1982 
1983 	return ret;
1984 }
1985 
1986 static int lan78xx_set_link_ksettings(struct net_device *net,
1987 				      const struct ethtool_link_ksettings *cmd)
1988 {
1989 	struct lan78xx_net *dev = netdev_priv(net);
1990 	struct phy_device *phydev = net->phydev;
1991 	int ret = 0;
1992 	int temp;
1993 
1994 	ret = usb_autopm_get_interface(dev->intf);
1995 	if (ret < 0)
1996 		return ret;
1997 
1998 	/* change speed & duplex */
1999 	ret = phy_ethtool_ksettings_set(phydev, cmd);
2000 
2001 	if (!cmd->base.autoneg) {
2002 		/* force link down */
2003 		temp = phy_read(phydev, MII_BMCR);
2004 		phy_write(phydev, MII_BMCR, temp | BMCR_LOOPBACK);
2005 		mdelay(1);
2006 		phy_write(phydev, MII_BMCR, temp);
2007 	}
2008 
2009 	usb_autopm_put_interface(dev->intf);
2010 
2011 	return ret;
2012 }
2013 
2014 static void lan78xx_get_pause(struct net_device *net,
2015 			      struct ethtool_pauseparam *pause)
2016 {
2017 	struct lan78xx_net *dev = netdev_priv(net);
2018 	struct phy_device *phydev = net->phydev;
2019 	struct ethtool_link_ksettings ecmd;
2020 
2021 	phy_ethtool_ksettings_get(phydev, &ecmd);
2022 
2023 	pause->autoneg = dev->fc_autoneg;
2024 
2025 	if (dev->fc_request_control & FLOW_CTRL_TX)
2026 		pause->tx_pause = 1;
2027 
2028 	if (dev->fc_request_control & FLOW_CTRL_RX)
2029 		pause->rx_pause = 1;
2030 }
2031 
2032 static int lan78xx_set_pause(struct net_device *net,
2033 			     struct ethtool_pauseparam *pause)
2034 {
2035 	struct lan78xx_net *dev = netdev_priv(net);
2036 	struct phy_device *phydev = net->phydev;
2037 	struct ethtool_link_ksettings ecmd;
2038 	int ret;
2039 
2040 	phy_ethtool_ksettings_get(phydev, &ecmd);
2041 
2042 	if (pause->autoneg && !ecmd.base.autoneg) {
2043 		ret = -EINVAL;
2044 		goto exit;
2045 	}
2046 
2047 	dev->fc_request_control = 0;
2048 	if (pause->rx_pause)
2049 		dev->fc_request_control |= FLOW_CTRL_RX;
2050 
2051 	if (pause->tx_pause)
2052 		dev->fc_request_control |= FLOW_CTRL_TX;
2053 
2054 	if (ecmd.base.autoneg) {
2055 		__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2056 		u32 mii_adv;
2057 
2058 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2059 				   ecmd.link_modes.advertising);
2060 		linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2061 				   ecmd.link_modes.advertising);
2062 		mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2063 		mii_adv_to_linkmode_adv_t(fc, mii_adv);
2064 		linkmode_or(ecmd.link_modes.advertising, fc,
2065 			    ecmd.link_modes.advertising);
2066 
2067 		phy_ethtool_ksettings_set(phydev, &ecmd);
2068 	}
2069 
2070 	dev->fc_autoneg = pause->autoneg;
2071 
2072 	ret = 0;
2073 exit:
2074 	return ret;
2075 }
2076 
2077 static int lan78xx_get_regs_len(struct net_device *netdev)
2078 {
2079 	return sizeof(lan78xx_regs);
2080 }
2081 
2082 static void
2083 lan78xx_get_regs(struct net_device *netdev, struct ethtool_regs *regs,
2084 		 void *buf)
2085 {
2086 	struct lan78xx_net *dev = netdev_priv(netdev);
2087 	unsigned int data_count = 0;
2088 	u32 *data = buf;
2089 	int i, ret;
2090 
2091 	/* Read Device/MAC registers */
2092 	for (i = 0; i < ARRAY_SIZE(lan78xx_regs); i++) {
2093 		ret = lan78xx_read_reg(dev, lan78xx_regs[i], &data[i]);
2094 		if (ret < 0) {
2095 			netdev_warn(dev->net,
2096 				    "failed to read register 0x%08x\n",
2097 				    lan78xx_regs[i]);
2098 			goto clean_data;
2099 		}
2100 
2101 		data_count++;
2102 	}
2103 
2104 	return;
2105 
2106 clean_data:
2107 	memset(data, 0, data_count * sizeof(u32));
2108 }
2109 
2110 static const struct ethtool_ops lan78xx_ethtool_ops = {
2111 	.get_link	= lan78xx_get_link,
2112 	.nway_reset	= phy_ethtool_nway_reset,
2113 	.get_drvinfo	= lan78xx_get_drvinfo,
2114 	.get_msglevel	= lan78xx_get_msglevel,
2115 	.set_msglevel	= lan78xx_set_msglevel,
2116 	.get_eeprom_len = lan78xx_ethtool_get_eeprom_len,
2117 	.get_eeprom	= lan78xx_ethtool_get_eeprom,
2118 	.set_eeprom	= lan78xx_ethtool_set_eeprom,
2119 	.get_ethtool_stats = lan78xx_get_stats,
2120 	.get_sset_count = lan78xx_get_sset_count,
2121 	.get_strings	= lan78xx_get_strings,
2122 	.get_wol	= lan78xx_get_wol,
2123 	.set_wol	= lan78xx_set_wol,
2124 	.get_ts_info	= ethtool_op_get_ts_info,
2125 	.get_eee	= lan78xx_get_eee,
2126 	.set_eee	= lan78xx_set_eee,
2127 	.get_pauseparam	= lan78xx_get_pause,
2128 	.set_pauseparam	= lan78xx_set_pause,
2129 	.get_link_ksettings = lan78xx_get_link_ksettings,
2130 	.set_link_ksettings = lan78xx_set_link_ksettings,
2131 	.get_regs_len	= lan78xx_get_regs_len,
2132 	.get_regs	= lan78xx_get_regs,
2133 };
2134 
2135 static int lan78xx_init_mac_address(struct lan78xx_net *dev)
2136 {
2137 	u32 addr_lo, addr_hi;
2138 	u8 addr[6];
2139 	int ret;
2140 
2141 	ret = lan78xx_read_reg(dev, RX_ADDRL, &addr_lo);
2142 	if (ret < 0)
2143 		return ret;
2144 
2145 	ret = lan78xx_read_reg(dev, RX_ADDRH, &addr_hi);
2146 	if (ret < 0)
2147 		return ret;
2148 
2149 	addr[0] = addr_lo & 0xFF;
2150 	addr[1] = (addr_lo >> 8) & 0xFF;
2151 	addr[2] = (addr_lo >> 16) & 0xFF;
2152 	addr[3] = (addr_lo >> 24) & 0xFF;
2153 	addr[4] = addr_hi & 0xFF;
2154 	addr[5] = (addr_hi >> 8) & 0xFF;
2155 
2156 	if (!is_valid_ether_addr(addr)) {
2157 		if (!eth_platform_get_mac_address(&dev->udev->dev, addr)) {
2158 			/* valid address present in Device Tree */
2159 			netif_dbg(dev, ifup, dev->net,
2160 				  "MAC address read from Device Tree");
2161 		} else if (((lan78xx_read_eeprom(dev, EEPROM_MAC_OFFSET,
2162 						 ETH_ALEN, addr) == 0) ||
2163 			    (lan78xx_read_otp(dev, EEPROM_MAC_OFFSET,
2164 					      ETH_ALEN, addr) == 0)) &&
2165 			   is_valid_ether_addr(addr)) {
2166 			/* eeprom values are valid so use them */
2167 			netif_dbg(dev, ifup, dev->net,
2168 				  "MAC address read from EEPROM");
2169 		} else {
2170 			/* generate random MAC */
2171 			eth_random_addr(addr);
2172 			netif_dbg(dev, ifup, dev->net,
2173 				  "MAC address set to random addr");
2174 		}
2175 
2176 		addr_lo = addr[0] | (addr[1] << 8) |
2177 			  (addr[2] << 16) | (addr[3] << 24);
2178 		addr_hi = addr[4] | (addr[5] << 8);
2179 
2180 		ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
2181 		if (ret < 0)
2182 			return ret;
2183 
2184 		ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
2185 		if (ret < 0)
2186 			return ret;
2187 	}
2188 
2189 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
2190 	if (ret < 0)
2191 		return ret;
2192 
2193 	ret = lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
2194 	if (ret < 0)
2195 		return ret;
2196 
2197 	eth_hw_addr_set(dev->net, addr);
2198 
2199 	return 0;
2200 }
2201 
2202 /* MDIO read and write wrappers for phylib */
2203 static int lan78xx_mdiobus_read(struct mii_bus *bus, int phy_id, int idx)
2204 {
2205 	struct lan78xx_net *dev = bus->priv;
2206 	u32 val, addr;
2207 	int ret;
2208 
2209 	ret = usb_autopm_get_interface(dev->intf);
2210 	if (ret < 0)
2211 		return ret;
2212 
2213 	mutex_lock(&dev->mdiobus_mutex);
2214 
2215 	/* confirm MII not busy */
2216 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2217 	if (ret < 0)
2218 		goto done;
2219 
2220 	/* set the address, index & direction (read from PHY) */
2221 	addr = mii_access(phy_id, idx, MII_READ);
2222 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2223 	if (ret < 0)
2224 		goto done;
2225 
2226 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2227 	if (ret < 0)
2228 		goto done;
2229 
2230 	ret = lan78xx_read_reg(dev, MII_DATA, &val);
2231 	if (ret < 0)
2232 		goto done;
2233 
2234 	ret = (int)(val & 0xFFFF);
2235 
2236 done:
2237 	mutex_unlock(&dev->mdiobus_mutex);
2238 	usb_autopm_put_interface(dev->intf);
2239 
2240 	return ret;
2241 }
2242 
2243 static int lan78xx_mdiobus_write(struct mii_bus *bus, int phy_id, int idx,
2244 				 u16 regval)
2245 {
2246 	struct lan78xx_net *dev = bus->priv;
2247 	u32 val, addr;
2248 	int ret;
2249 
2250 	ret = usb_autopm_get_interface(dev->intf);
2251 	if (ret < 0)
2252 		return ret;
2253 
2254 	mutex_lock(&dev->mdiobus_mutex);
2255 
2256 	/* confirm MII not busy */
2257 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2258 	if (ret < 0)
2259 		goto done;
2260 
2261 	val = (u32)regval;
2262 	ret = lan78xx_write_reg(dev, MII_DATA, val);
2263 	if (ret < 0)
2264 		goto done;
2265 
2266 	/* set the address, index & direction (write to PHY) */
2267 	addr = mii_access(phy_id, idx, MII_WRITE);
2268 	ret = lan78xx_write_reg(dev, MII_ACC, addr);
2269 	if (ret < 0)
2270 		goto done;
2271 
2272 	ret = lan78xx_mdiobus_wait_not_busy(dev);
2273 	if (ret < 0)
2274 		goto done;
2275 
2276 done:
2277 	mutex_unlock(&dev->mdiobus_mutex);
2278 	usb_autopm_put_interface(dev->intf);
2279 	return ret;
2280 }
2281 
2282 static int lan78xx_mdio_init(struct lan78xx_net *dev)
2283 {
2284 	struct device_node *node;
2285 	int ret;
2286 
2287 	dev->mdiobus = mdiobus_alloc();
2288 	if (!dev->mdiobus) {
2289 		netdev_err(dev->net, "can't allocate MDIO bus\n");
2290 		return -ENOMEM;
2291 	}
2292 
2293 	dev->mdiobus->priv = (void *)dev;
2294 	dev->mdiobus->read = lan78xx_mdiobus_read;
2295 	dev->mdiobus->write = lan78xx_mdiobus_write;
2296 	dev->mdiobus->name = "lan78xx-mdiobus";
2297 	dev->mdiobus->parent = &dev->udev->dev;
2298 
2299 	snprintf(dev->mdiobus->id, MII_BUS_ID_SIZE, "usb-%03d:%03d",
2300 		 dev->udev->bus->busnum, dev->udev->devnum);
2301 
2302 	switch (dev->chipid) {
2303 	case ID_REV_CHIP_ID_7800_:
2304 	case ID_REV_CHIP_ID_7850_:
2305 		/* set to internal PHY id */
2306 		dev->mdiobus->phy_mask = ~(1 << 1);
2307 		break;
2308 	case ID_REV_CHIP_ID_7801_:
2309 		/* scan thru PHYAD[2..0] */
2310 		dev->mdiobus->phy_mask = ~(0xFF);
2311 		break;
2312 	}
2313 
2314 	node = of_get_child_by_name(dev->udev->dev.of_node, "mdio");
2315 	ret = of_mdiobus_register(dev->mdiobus, node);
2316 	of_node_put(node);
2317 	if (ret) {
2318 		netdev_err(dev->net, "can't register MDIO bus\n");
2319 		goto exit1;
2320 	}
2321 
2322 	netdev_dbg(dev->net, "registered mdiobus bus %s\n", dev->mdiobus->id);
2323 	return 0;
2324 exit1:
2325 	mdiobus_free(dev->mdiobus);
2326 	return ret;
2327 }
2328 
2329 static void lan78xx_remove_mdio(struct lan78xx_net *dev)
2330 {
2331 	mdiobus_unregister(dev->mdiobus);
2332 	mdiobus_free(dev->mdiobus);
2333 }
2334 
2335 static void lan78xx_link_status_change(struct net_device *net)
2336 {
2337 	struct lan78xx_net *dev = netdev_priv(net);
2338 	struct phy_device *phydev = net->phydev;
2339 	u32 data;
2340 	int ret;
2341 
2342 	ret = lan78xx_read_reg(dev, MAC_CR, &data);
2343 	if (ret < 0)
2344 		return;
2345 
2346 	if (phydev->enable_tx_lpi)
2347 		data |=  MAC_CR_EEE_EN_;
2348 	else
2349 		data &= ~MAC_CR_EEE_EN_;
2350 	lan78xx_write_reg(dev, MAC_CR, data);
2351 
2352 	phy_print_status(phydev);
2353 }
2354 
2355 static int irq_map(struct irq_domain *d, unsigned int irq,
2356 		   irq_hw_number_t hwirq)
2357 {
2358 	struct irq_domain_data *data = d->host_data;
2359 
2360 	irq_set_chip_data(irq, data);
2361 	irq_set_chip_and_handler(irq, data->irqchip, data->irq_handler);
2362 	irq_set_noprobe(irq);
2363 
2364 	return 0;
2365 }
2366 
2367 static void irq_unmap(struct irq_domain *d, unsigned int irq)
2368 {
2369 	irq_set_chip_and_handler(irq, NULL, NULL);
2370 	irq_set_chip_data(irq, NULL);
2371 }
2372 
2373 static const struct irq_domain_ops chip_domain_ops = {
2374 	.map	= irq_map,
2375 	.unmap	= irq_unmap,
2376 };
2377 
2378 static void lan78xx_irq_mask(struct irq_data *irqd)
2379 {
2380 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2381 
2382 	data->irqenable &= ~BIT(irqd_to_hwirq(irqd));
2383 }
2384 
2385 static void lan78xx_irq_unmask(struct irq_data *irqd)
2386 {
2387 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2388 
2389 	data->irqenable |= BIT(irqd_to_hwirq(irqd));
2390 }
2391 
2392 static void lan78xx_irq_bus_lock(struct irq_data *irqd)
2393 {
2394 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2395 
2396 	mutex_lock(&data->irq_lock);
2397 }
2398 
2399 static void lan78xx_irq_bus_sync_unlock(struct irq_data *irqd)
2400 {
2401 	struct irq_domain_data *data = irq_data_get_irq_chip_data(irqd);
2402 	struct lan78xx_net *dev =
2403 			container_of(data, struct lan78xx_net, domain_data);
2404 	u32 buf;
2405 	int ret;
2406 
2407 	/* call register access here because irq_bus_lock & irq_bus_sync_unlock
2408 	 * are only two callbacks executed in non-atomic contex.
2409 	 */
2410 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2411 	if (ret < 0)
2412 		goto irq_bus_sync_unlock;
2413 
2414 	if (buf != data->irqenable)
2415 		ret = lan78xx_write_reg(dev, INT_EP_CTL, data->irqenable);
2416 
2417 irq_bus_sync_unlock:
2418 	if (ret < 0)
2419 		netdev_err(dev->net, "Failed to sync IRQ enable register: %pe\n",
2420 			   ERR_PTR(ret));
2421 
2422 	mutex_unlock(&data->irq_lock);
2423 }
2424 
2425 static struct irq_chip lan78xx_irqchip = {
2426 	.name			= "lan78xx-irqs",
2427 	.irq_mask		= lan78xx_irq_mask,
2428 	.irq_unmask		= lan78xx_irq_unmask,
2429 	.irq_bus_lock		= lan78xx_irq_bus_lock,
2430 	.irq_bus_sync_unlock	= lan78xx_irq_bus_sync_unlock,
2431 };
2432 
2433 static int lan78xx_setup_irq_domain(struct lan78xx_net *dev)
2434 {
2435 	struct irq_domain *irqdomain;
2436 	unsigned int irqmap = 0;
2437 	u32 buf;
2438 	int ret = 0;
2439 
2440 	mutex_init(&dev->domain_data.irq_lock);
2441 
2442 	ret = lan78xx_read_reg(dev, INT_EP_CTL, &buf);
2443 	if (ret < 0)
2444 		return ret;
2445 
2446 	dev->domain_data.irqenable = buf;
2447 
2448 	dev->domain_data.irqchip = &lan78xx_irqchip;
2449 	dev->domain_data.irq_handler = handle_simple_irq;
2450 
2451 	irqdomain = irq_domain_create_simple(of_fwnode_handle(dev->udev->dev.parent->of_node),
2452 					     MAX_INT_EP, 0,
2453 					     &chip_domain_ops,
2454 					     &dev->domain_data);
2455 	if (irqdomain) {
2456 		/* create mapping for PHY interrupt */
2457 		irqmap = irq_create_mapping(irqdomain, INT_EP_PHY);
2458 		if (!irqmap) {
2459 			irq_domain_remove(irqdomain);
2460 
2461 			irqdomain = NULL;
2462 			ret = -EINVAL;
2463 		}
2464 	} else {
2465 		ret = -EINVAL;
2466 	}
2467 
2468 	dev->domain_data.irqdomain = irqdomain;
2469 	dev->domain_data.phyirq = irqmap;
2470 
2471 	return ret;
2472 }
2473 
2474 static void lan78xx_remove_irq_domain(struct lan78xx_net *dev)
2475 {
2476 	if (dev->domain_data.phyirq > 0) {
2477 		irq_dispose_mapping(dev->domain_data.phyirq);
2478 
2479 		if (dev->domain_data.irqdomain)
2480 			irq_domain_remove(dev->domain_data.irqdomain);
2481 	}
2482 	dev->domain_data.phyirq = 0;
2483 	dev->domain_data.irqdomain = NULL;
2484 }
2485 
2486 /**
2487  * lan78xx_configure_usb - Configure USB link power settings
2488  * @dev: pointer to the LAN78xx device structure
2489  * @speed: negotiated Ethernet link speed (in Mbps)
2490  *
2491  * This function configures U1/U2 link power management for SuperSpeed
2492  * USB devices based on the current Ethernet link speed. It uses the
2493  * USB_CFG1 register to enable or disable U1 and U2 low-power states.
2494  *
2495  * Note: Only LAN7800 and LAN7801 support SuperSpeed (USB 3.x).
2496  *       LAN7850 is a High-Speed-only (USB 2.0) device and is skipped.
2497  *
2498  * Return: 0 on success or a negative error code on failure.
2499  */
2500 static int lan78xx_configure_usb(struct lan78xx_net *dev, int speed)
2501 {
2502 	u32 mask, val;
2503 	int ret;
2504 
2505 	/* Only configure USB settings for SuperSpeed devices */
2506 	if (dev->udev->speed != USB_SPEED_SUPER)
2507 		return 0;
2508 
2509 	/* LAN7850 does not support USB 3.x */
2510 	if (dev->chipid == ID_REV_CHIP_ID_7850_) {
2511 		netdev_warn_once(dev->net, "Unexpected SuperSpeed for LAN7850 (USB 2.0 only)\n");
2512 		return 0;
2513 	}
2514 
2515 	switch (speed) {
2516 	case SPEED_1000:
2517 		/* Disable U2, enable U1 */
2518 		ret = lan78xx_update_reg(dev, USB_CFG1,
2519 					 USB_CFG1_DEV_U2_INIT_EN_, 0);
2520 		if (ret < 0)
2521 			return ret;
2522 
2523 		return lan78xx_update_reg(dev, USB_CFG1,
2524 					  USB_CFG1_DEV_U1_INIT_EN_,
2525 					  USB_CFG1_DEV_U1_INIT_EN_);
2526 
2527 	case SPEED_100:
2528 	case SPEED_10:
2529 		/* Enable both U1 and U2 */
2530 		mask = USB_CFG1_DEV_U1_INIT_EN_ | USB_CFG1_DEV_U2_INIT_EN_;
2531 		val = mask;
2532 		return lan78xx_update_reg(dev, USB_CFG1, mask, val);
2533 
2534 	default:
2535 		netdev_warn(dev->net, "Unsupported link speed: %d\n", speed);
2536 		return -EINVAL;
2537 	}
2538 }
2539 
2540 /**
2541  * lan78xx_configure_flowcontrol - Set MAC and FIFO flow control configuration
2542  * @dev: pointer to the LAN78xx device structure
2543  * @tx_pause: enable transmission of pause frames
2544  * @rx_pause: enable reception of pause frames
2545  *
2546  * This function configures the LAN78xx flow control settings by writing
2547  * to the FLOW and FCT_FLOW registers. The pause time is set to the
2548  * maximum allowed value (65535 quanta). FIFO thresholds are selected
2549  * based on USB speed.
2550  *
2551  * The Pause Time field is measured in units of 512-bit times (quanta):
2552  *   - At 1 Gbps: 1 quanta = 512 ns → max ~33.6 ms pause
2553  *   - At 100 Mbps: 1 quanta = 5.12 µs → max ~335 ms pause
2554  *   - At 10 Mbps: 1 quanta = 51.2 µs → max ~3.3 s pause
2555  *
2556  * Flow control thresholds (FCT_FLOW) are used to trigger pause/resume:
2557  *   - RXUSED is the number of bytes used in the RX FIFO
2558  *   - Flow is turned ON when RXUSED ≥ FLOW_ON threshold
2559  *   - Flow is turned OFF when RXUSED ≤ FLOW_OFF threshold
2560  *   - Both thresholds are encoded in units of 512 bytes (rounded up)
2561  *
2562  * Thresholds differ by USB speed because available USB bandwidth
2563  * affects how fast packets can be drained from the RX FIFO:
2564  *   - USB 3.x (SuperSpeed):
2565  *       FLOW_ON  = 9216 bytes → 18 units
2566  *       FLOW_OFF = 4096 bytes →  8 units
2567  *   - USB 2.0 (High-Speed):
2568  *       FLOW_ON  = 8704 bytes → 17 units
2569  *       FLOW_OFF = 1024 bytes →  2 units
2570  *
2571  * Note: The FCT_FLOW register must be configured before enabling TX pause
2572  *       (i.e., before setting FLOW_CR_TX_FCEN_), as required by the hardware.
2573  *
2574  * Return: 0 on success or a negative error code on failure.
2575  */
2576 static int lan78xx_configure_flowcontrol(struct lan78xx_net *dev,
2577 					 bool tx_pause, bool rx_pause)
2578 {
2579 	/* Use maximum pause time: 65535 quanta (512-bit times) */
2580 	const u32 pause_time_quanta = 65535;
2581 	u32 fct_flow = 0;
2582 	u32 flow = 0;
2583 	int ret;
2584 
2585 	/* Prepare MAC flow control bits */
2586 	if (tx_pause)
2587 		flow |= FLOW_CR_TX_FCEN_ | pause_time_quanta;
2588 
2589 	if (rx_pause)
2590 		flow |= FLOW_CR_RX_FCEN_;
2591 
2592 	/* Select RX FIFO thresholds based on USB speed
2593 	 *
2594 	 * FCT_FLOW layout:
2595 	 *   bits [6:0]   FLOW_ON threshold (RXUSED ≥ ON → assert pause)
2596 	 *   bits [14:8]  FLOW_OFF threshold (RXUSED ≤ OFF → deassert pause)
2597 	 *   thresholds are expressed in units of 512 bytes
2598 	 */
2599 	switch (dev->udev->speed) {
2600 	case USB_SPEED_SUPER:
2601 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_SS, FLOW_OFF_SS);
2602 		break;
2603 	case USB_SPEED_HIGH:
2604 		fct_flow = FLOW_CTRL_THRESHOLD(FLOW_ON_HS, FLOW_OFF_HS);
2605 		break;
2606 	default:
2607 		netdev_warn(dev->net, "Unsupported USB speed: %d\n",
2608 			    dev->udev->speed);
2609 		return -EINVAL;
2610 	}
2611 
2612 	/* Step 1: Write FIFO thresholds before enabling pause frames */
2613 	ret = lan78xx_write_reg(dev, FCT_FLOW, fct_flow);
2614 	if (ret < 0)
2615 		return ret;
2616 
2617 	/* Step 2: Enable MAC pause functionality */
2618 	return lan78xx_write_reg(dev, FLOW, flow);
2619 }
2620 
2621 /**
2622  * lan78xx_register_fixed_phy() - Register a fallback fixed PHY
2623  * @dev: LAN78xx device
2624  *
2625  * Registers a fixed PHY with 1 Gbps full duplex. This is used in special cases
2626  * like EVB-KSZ9897-1, where LAN7801 acts as a USB-to-Ethernet interface to a
2627  * switch without a visible PHY.
2628  *
2629  * Return: pointer to the registered fixed PHY, or ERR_PTR() on error.
2630  */
2631 static struct phy_device *lan78xx_register_fixed_phy(struct lan78xx_net *dev)
2632 {
2633 	struct fixed_phy_status fphy_status = {
2634 		.link = 1,
2635 		.speed = SPEED_1000,
2636 		.duplex = DUPLEX_FULL,
2637 	};
2638 
2639 	netdev_info(dev->net,
2640 		    "No PHY found on LAN7801 – registering fixed PHY (e.g. EVB-KSZ9897-1)\n");
2641 
2642 	return fixed_phy_register(&fphy_status, NULL);
2643 }
2644 
2645 /**
2646  * lan78xx_get_phy() - Probe or register PHY device and set interface mode
2647  * @dev: LAN78xx device structure
2648  *
2649  * This function attempts to find a PHY on the MDIO bus. If no PHY is found
2650  * and the chip is LAN7801, it registers a fixed PHY as fallback. It also
2651  * sets dev->interface based on chip ID and detected PHY type.
2652  *
2653  * Return: a valid PHY device pointer, or ERR_PTR() on failure.
2654  */
2655 static struct phy_device *lan78xx_get_phy(struct lan78xx_net *dev)
2656 {
2657 	struct phy_device *phydev;
2658 
2659 	/* Attempt to locate a PHY on the MDIO bus */
2660 	phydev = phy_find_first(dev->mdiobus);
2661 
2662 	switch (dev->chipid) {
2663 	case ID_REV_CHIP_ID_7801_:
2664 		if (phydev) {
2665 			/* External RGMII PHY detected */
2666 			dev->interface = PHY_INTERFACE_MODE_RGMII_ID;
2667 			phydev->is_internal = false;
2668 
2669 			if (!phydev->drv)
2670 				netdev_warn(dev->net,
2671 					    "PHY driver not found – assuming RGMII delays are on PCB or strapped for the PHY\n");
2672 
2673 			return phydev;
2674 		}
2675 
2676 		dev->interface = PHY_INTERFACE_MODE_RGMII;
2677 		/* No PHY found – fallback to fixed PHY (e.g. KSZ switch board) */
2678 		return lan78xx_register_fixed_phy(dev);
2679 
2680 	case ID_REV_CHIP_ID_7800_:
2681 	case ID_REV_CHIP_ID_7850_:
2682 		if (!phydev)
2683 			return ERR_PTR(-ENODEV);
2684 
2685 		/* These use internal GMII-connected PHY */
2686 		dev->interface = PHY_INTERFACE_MODE_GMII;
2687 		phydev->is_internal = true;
2688 		return phydev;
2689 
2690 	default:
2691 		netdev_err(dev->net, "Unknown CHIP ID: 0x%08x\n", dev->chipid);
2692 		return ERR_PTR(-ENODEV);
2693 	}
2694 }
2695 
2696 /**
2697  * lan78xx_mac_prepare_for_phy() - Preconfigure MAC-side interface settings
2698  * @dev: LAN78xx device
2699  *
2700  * Configure MAC-side registers according to dev->interface, which should be
2701  * set by lan78xx_get_phy().
2702  *
2703  * - For PHY_INTERFACE_MODE_RGMII:
2704  *   Enable MAC-side TXC delay. This mode seems to be used in a special setup
2705  *   without a real PHY, likely on EVB-KSZ9897-1. In that design, LAN7801 is
2706  *   connected to the KSZ9897 switch, and the link timing is expected to be
2707  *   hardwired (e.g. via strapping or board layout). No devicetree support is
2708  *   assumed here.
2709  *
2710  * - For PHY_INTERFACE_MODE_RGMII_ID:
2711  *   Disable MAC-side delay and rely on the PHY driver to provide delay.
2712  *
2713  * - For GMII, no MAC-specific config is needed.
2714  *
2715  * Return: 0 on success or a negative error code.
2716  */
2717 static int lan78xx_mac_prepare_for_phy(struct lan78xx_net *dev)
2718 {
2719 	int ret;
2720 
2721 	switch (dev->interface) {
2722 	case PHY_INTERFACE_MODE_RGMII:
2723 		/* Enable MAC-side TX clock delay */
2724 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID,
2725 					MAC_RGMII_ID_TXC_DELAY_EN_);
2726 		if (ret < 0)
2727 			return ret;
2728 
2729 		ret = lan78xx_write_reg(dev, RGMII_TX_BYP_DLL, 0x3D00);
2730 		if (ret < 0)
2731 			return ret;
2732 
2733 		ret = lan78xx_update_reg(dev, HW_CFG,
2734 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_,
2735 					 HW_CFG_CLK125_EN_ | HW_CFG_REFCLK25_EN_);
2736 		if (ret < 0)
2737 			return ret;
2738 
2739 		break;
2740 
2741 	case PHY_INTERFACE_MODE_RGMII_ID:
2742 		/* Disable MAC-side TXC delay, PHY provides it */
2743 		ret = lan78xx_write_reg(dev, MAC_RGMII_ID, 0);
2744 		if (ret < 0)
2745 			return ret;
2746 
2747 		break;
2748 
2749 	case PHY_INTERFACE_MODE_GMII:
2750 		/* No MAC-specific configuration required */
2751 		break;
2752 
2753 	default:
2754 		netdev_warn(dev->net, "Unsupported interface mode: %d\n",
2755 			    dev->interface);
2756 		break;
2757 	}
2758 
2759 	return 0;
2760 }
2761 
2762 /**
2763  * lan78xx_configure_leds_from_dt() - Configure LED enables based on DT
2764  * @dev: LAN78xx device
2765  * @phydev: PHY device (must be valid)
2766  *
2767  * Reads "microchip,led-modes" property from the PHY's DT node and enables
2768  * the corresponding number of LEDs by writing to HW_CFG.
2769  *
2770  * This helper preserves the original logic, enabling up to 4 LEDs.
2771  * If the property is not present, this function does nothing.
2772  *
2773  * Return: 0 on success or a negative error code.
2774  */
2775 static int lan78xx_configure_leds_from_dt(struct lan78xx_net *dev,
2776 					  struct phy_device *phydev)
2777 {
2778 	struct device_node *np = phydev->mdio.dev.of_node;
2779 	u32 reg;
2780 	int len, ret;
2781 
2782 	if (!np)
2783 		return 0;
2784 
2785 	len = of_property_count_elems_of_size(np, "microchip,led-modes",
2786 					      sizeof(u32));
2787 	if (len < 0)
2788 		return 0;
2789 
2790 	ret = lan78xx_read_reg(dev, HW_CFG, &reg);
2791 	if (ret < 0)
2792 		return ret;
2793 
2794 	reg &= ~(HW_CFG_LED0_EN_ | HW_CFG_LED1_EN_ |
2795 		 HW_CFG_LED2_EN_ | HW_CFG_LED3_EN_);
2796 
2797 	reg |= (len > 0) * HW_CFG_LED0_EN_ |
2798 	       (len > 1) * HW_CFG_LED1_EN_ |
2799 	       (len > 2) * HW_CFG_LED2_EN_ |
2800 	       (len > 3) * HW_CFG_LED3_EN_;
2801 
2802 	return lan78xx_write_reg(dev, HW_CFG, reg);
2803 }
2804 
2805 static int lan78xx_phy_init(struct lan78xx_net *dev)
2806 {
2807 	__ETHTOOL_DECLARE_LINK_MODE_MASK(fc) = { 0, };
2808 	int ret;
2809 	u32 mii_adv;
2810 	struct phy_device *phydev;
2811 
2812 	phydev = lan78xx_get_phy(dev);
2813 	if (IS_ERR(phydev))
2814 		return PTR_ERR(phydev);
2815 
2816 	ret = lan78xx_mac_prepare_for_phy(dev);
2817 	if (ret < 0)
2818 		goto free_phy;
2819 
2820 	/* if phyirq is not set, use polling mode in phylib */
2821 	if (dev->domain_data.phyirq > 0)
2822 		phydev->irq = dev->domain_data.phyirq;
2823 	else
2824 		phydev->irq = PHY_POLL;
2825 	netdev_dbg(dev->net, "phydev->irq = %d\n", phydev->irq);
2826 
2827 	/* set to AUTOMDIX */
2828 	phydev->mdix = ETH_TP_MDI_AUTO;
2829 
2830 	ret = phy_connect_direct(dev->net, phydev,
2831 				 lan78xx_link_status_change,
2832 				 dev->interface);
2833 	if (ret) {
2834 		netdev_err(dev->net, "can't attach PHY to %s\n",
2835 			   dev->mdiobus->id);
2836 		if (dev->chipid == ID_REV_CHIP_ID_7801_) {
2837 			if (phy_is_pseudo_fixed_link(phydev)) {
2838 				fixed_phy_unregister(phydev);
2839 				phy_device_free(phydev);
2840 			}
2841 		}
2842 		return -EIO;
2843 	}
2844 
2845 	/* MAC doesn't support 1000T Half */
2846 	phy_remove_link_mode(phydev, ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
2847 
2848 	/* support both flow controls */
2849 	dev->fc_request_control = (FLOW_CTRL_RX | FLOW_CTRL_TX);
2850 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Pause_BIT,
2851 			   phydev->advertising);
2852 	linkmode_clear_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT,
2853 			   phydev->advertising);
2854 	mii_adv = (u32)mii_advertise_flowctrl(dev->fc_request_control);
2855 	mii_adv_to_linkmode_adv_t(fc, mii_adv);
2856 	linkmode_or(phydev->advertising, fc, phydev->advertising);
2857 
2858 	phy_support_eee(phydev);
2859 
2860 	ret = lan78xx_configure_leds_from_dt(dev, phydev);
2861 	if (ret)
2862 		goto free_phy;
2863 
2864 	genphy_config_aneg(phydev);
2865 
2866 	dev->fc_autoneg = phydev->autoneg;
2867 
2868 	return 0;
2869 
2870 free_phy:
2871 	if (phy_is_pseudo_fixed_link(phydev)) {
2872 		fixed_phy_unregister(phydev);
2873 		phy_device_free(phydev);
2874 	}
2875 
2876 	return ret;
2877 }
2878 
2879 static int lan78xx_set_rx_max_frame_length(struct lan78xx_net *dev, int size)
2880 {
2881 	bool rxenabled;
2882 	u32 buf;
2883 	int ret;
2884 
2885 	ret = lan78xx_read_reg(dev, MAC_RX, &buf);
2886 	if (ret < 0)
2887 		return ret;
2888 
2889 	rxenabled = ((buf & MAC_RX_RXEN_) != 0);
2890 
2891 	if (rxenabled) {
2892 		buf &= ~MAC_RX_RXEN_;
2893 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2894 		if (ret < 0)
2895 			return ret;
2896 	}
2897 
2898 	/* add 4 to size for FCS */
2899 	buf &= ~MAC_RX_MAX_SIZE_MASK_;
2900 	buf |= (((size + 4) << MAC_RX_MAX_SIZE_SHIFT_) & MAC_RX_MAX_SIZE_MASK_);
2901 
2902 	ret = lan78xx_write_reg(dev, MAC_RX, buf);
2903 	if (ret < 0)
2904 		return ret;
2905 
2906 	if (rxenabled) {
2907 		buf |= MAC_RX_RXEN_;
2908 		ret = lan78xx_write_reg(dev, MAC_RX, buf);
2909 		if (ret < 0)
2910 			return ret;
2911 	}
2912 
2913 	return 0;
2914 }
2915 
2916 static int unlink_urbs(struct lan78xx_net *dev, struct sk_buff_head *q)
2917 {
2918 	struct sk_buff *skb;
2919 	unsigned long flags;
2920 	int count = 0;
2921 
2922 	spin_lock_irqsave(&q->lock, flags);
2923 	while (!skb_queue_empty(q)) {
2924 		struct skb_data	*entry;
2925 		struct urb *urb;
2926 		int ret;
2927 
2928 		skb_queue_walk(q, skb) {
2929 			entry = (struct skb_data *)skb->cb;
2930 			if (entry->state != unlink_start)
2931 				goto found;
2932 		}
2933 		break;
2934 found:
2935 		entry->state = unlink_start;
2936 		urb = entry->urb;
2937 
2938 		/* Get reference count of the URB to avoid it to be
2939 		 * freed during usb_unlink_urb, which may trigger
2940 		 * use-after-free problem inside usb_unlink_urb since
2941 		 * usb_unlink_urb is always racing with .complete
2942 		 * handler(include defer_bh).
2943 		 */
2944 		usb_get_urb(urb);
2945 		spin_unlock_irqrestore(&q->lock, flags);
2946 		/* during some PM-driven resume scenarios,
2947 		 * these (async) unlinks complete immediately
2948 		 */
2949 		ret = usb_unlink_urb(urb);
2950 		if (ret != -EINPROGRESS && ret != 0)
2951 			netdev_dbg(dev->net, "unlink urb err, %d\n", ret);
2952 		else
2953 			count++;
2954 		usb_put_urb(urb);
2955 		spin_lock_irqsave(&q->lock, flags);
2956 	}
2957 	spin_unlock_irqrestore(&q->lock, flags);
2958 	return count;
2959 }
2960 
2961 static int lan78xx_change_mtu(struct net_device *netdev, int new_mtu)
2962 {
2963 	struct lan78xx_net *dev = netdev_priv(netdev);
2964 	int max_frame_len = RX_MAX_FRAME_LEN(new_mtu);
2965 	int ret;
2966 
2967 	/* no second zero-length packet read wanted after mtu-sized packets */
2968 	if ((max_frame_len % dev->maxpacket) == 0)
2969 		return -EDOM;
2970 
2971 	ret = usb_autopm_get_interface(dev->intf);
2972 	if (ret < 0)
2973 		return ret;
2974 
2975 	ret = lan78xx_set_rx_max_frame_length(dev, max_frame_len);
2976 	if (ret < 0)
2977 		netdev_err(dev->net, "MTU changed to %d from %d failed with %pe\n",
2978 			   new_mtu, netdev->mtu, ERR_PTR(ret));
2979 	else
2980 		WRITE_ONCE(netdev->mtu, new_mtu);
2981 
2982 	usb_autopm_put_interface(dev->intf);
2983 
2984 	return ret;
2985 }
2986 
2987 static int lan78xx_set_mac_addr(struct net_device *netdev, void *p)
2988 {
2989 	struct lan78xx_net *dev = netdev_priv(netdev);
2990 	struct sockaddr *addr = p;
2991 	u32 addr_lo, addr_hi;
2992 	int ret;
2993 
2994 	if (netif_running(netdev))
2995 		return -EBUSY;
2996 
2997 	if (!is_valid_ether_addr(addr->sa_data))
2998 		return -EADDRNOTAVAIL;
2999 
3000 	eth_hw_addr_set(netdev, addr->sa_data);
3001 
3002 	addr_lo = netdev->dev_addr[0] |
3003 		  netdev->dev_addr[1] << 8 |
3004 		  netdev->dev_addr[2] << 16 |
3005 		  netdev->dev_addr[3] << 24;
3006 	addr_hi = netdev->dev_addr[4] |
3007 		  netdev->dev_addr[5] << 8;
3008 
3009 	ret = lan78xx_write_reg(dev, RX_ADDRL, addr_lo);
3010 	if (ret < 0)
3011 		return ret;
3012 
3013 	ret = lan78xx_write_reg(dev, RX_ADDRH, addr_hi);
3014 	if (ret < 0)
3015 		return ret;
3016 
3017 	/* Added to support MAC address changes */
3018 	ret = lan78xx_write_reg(dev, MAF_LO(0), addr_lo);
3019 	if (ret < 0)
3020 		return ret;
3021 
3022 	return lan78xx_write_reg(dev, MAF_HI(0), addr_hi | MAF_HI_VALID_);
3023 }
3024 
3025 /* Enable or disable Rx checksum offload engine */
3026 static int lan78xx_set_features(struct net_device *netdev,
3027 				netdev_features_t features)
3028 {
3029 	struct lan78xx_net *dev = netdev_priv(netdev);
3030 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3031 	unsigned long flags;
3032 
3033 	spin_lock_irqsave(&pdata->rfe_ctl_lock, flags);
3034 
3035 	if (features & NETIF_F_RXCSUM) {
3036 		pdata->rfe_ctl |= RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_;
3037 		pdata->rfe_ctl |= RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_;
3038 	} else {
3039 		pdata->rfe_ctl &= ~(RFE_CTL_TCPUDP_COE_ | RFE_CTL_IP_COE_);
3040 		pdata->rfe_ctl &= ~(RFE_CTL_ICMP_COE_ | RFE_CTL_IGMP_COE_);
3041 	}
3042 
3043 	if (features & NETIF_F_HW_VLAN_CTAG_RX)
3044 		pdata->rfe_ctl |= RFE_CTL_VLAN_STRIP_;
3045 	else
3046 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_STRIP_;
3047 
3048 	if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
3049 		pdata->rfe_ctl |= RFE_CTL_VLAN_FILTER_;
3050 	else
3051 		pdata->rfe_ctl &= ~RFE_CTL_VLAN_FILTER_;
3052 
3053 	spin_unlock_irqrestore(&pdata->rfe_ctl_lock, flags);
3054 
3055 	return lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3056 }
3057 
3058 static void lan78xx_deferred_vlan_write(struct work_struct *param)
3059 {
3060 	struct lan78xx_priv *pdata =
3061 			container_of(param, struct lan78xx_priv, set_vlan);
3062 	struct lan78xx_net *dev = pdata->dev;
3063 
3064 	lan78xx_dataport_write(dev, DP_SEL_RSEL_VLAN_DA_, 0,
3065 			       DP_SEL_VHF_VLAN_LEN, pdata->vlan_table);
3066 }
3067 
3068 static int lan78xx_vlan_rx_add_vid(struct net_device *netdev,
3069 				   __be16 proto, u16 vid)
3070 {
3071 	struct lan78xx_net *dev = netdev_priv(netdev);
3072 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3073 	u16 vid_bit_index;
3074 	u16 vid_dword_index;
3075 
3076 	vid_dword_index = (vid >> 5) & 0x7F;
3077 	vid_bit_index = vid & 0x1F;
3078 
3079 	pdata->vlan_table[vid_dword_index] |= (1 << vid_bit_index);
3080 
3081 	/* defer register writes to a sleepable context */
3082 	schedule_work(&pdata->set_vlan);
3083 
3084 	return 0;
3085 }
3086 
3087 static int lan78xx_vlan_rx_kill_vid(struct net_device *netdev,
3088 				    __be16 proto, u16 vid)
3089 {
3090 	struct lan78xx_net *dev = netdev_priv(netdev);
3091 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3092 	u16 vid_bit_index;
3093 	u16 vid_dword_index;
3094 
3095 	vid_dword_index = (vid >> 5) & 0x7F;
3096 	vid_bit_index = vid & 0x1F;
3097 
3098 	pdata->vlan_table[vid_dword_index] &= ~(1 << vid_bit_index);
3099 
3100 	/* defer register writes to a sleepable context */
3101 	schedule_work(&pdata->set_vlan);
3102 
3103 	return 0;
3104 }
3105 
3106 static int lan78xx_init_ltm(struct lan78xx_net *dev)
3107 {
3108 	u32 regs[6] = { 0 };
3109 	int ret;
3110 	u32 buf;
3111 
3112 	ret = lan78xx_read_reg(dev, USB_CFG1, &buf);
3113 	if (ret < 0)
3114 		goto init_ltm_failed;
3115 
3116 	if (buf & USB_CFG1_LTM_ENABLE_) {
3117 		u8 temp[2];
3118 		/* Get values from EEPROM first */
3119 		if (lan78xx_read_eeprom(dev, 0x3F, 2, temp) == 0) {
3120 			if (temp[0] == 24) {
3121 				ret = lan78xx_read_raw_eeprom(dev,
3122 							      temp[1] * 2,
3123 							      24,
3124 							      (u8 *)regs);
3125 				if (ret < 0)
3126 					return ret;
3127 			}
3128 		} else if (lan78xx_read_otp(dev, 0x3F, 2, temp) == 0) {
3129 			if (temp[0] == 24) {
3130 				ret = lan78xx_read_raw_otp(dev,
3131 							   temp[1] * 2,
3132 							   24,
3133 							   (u8 *)regs);
3134 				if (ret < 0)
3135 					return ret;
3136 			}
3137 		}
3138 	}
3139 
3140 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE0, regs[0]);
3141 	if (ret < 0)
3142 		goto init_ltm_failed;
3143 
3144 	ret = lan78xx_write_reg(dev, LTM_BELT_IDLE1, regs[1]);
3145 	if (ret < 0)
3146 		goto init_ltm_failed;
3147 
3148 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT0, regs[2]);
3149 	if (ret < 0)
3150 		goto init_ltm_failed;
3151 
3152 	ret = lan78xx_write_reg(dev, LTM_BELT_ACT1, regs[3]);
3153 	if (ret < 0)
3154 		goto init_ltm_failed;
3155 
3156 	ret = lan78xx_write_reg(dev, LTM_INACTIVE0, regs[4]);
3157 	if (ret < 0)
3158 		goto init_ltm_failed;
3159 
3160 	ret = lan78xx_write_reg(dev, LTM_INACTIVE1, regs[5]);
3161 	if (ret < 0)
3162 		goto init_ltm_failed;
3163 
3164 	return 0;
3165 
3166 init_ltm_failed:
3167 	netdev_err(dev->net, "Failed to init LTM with error %pe\n", ERR_PTR(ret));
3168 	return ret;
3169 }
3170 
3171 static int lan78xx_urb_config_init(struct lan78xx_net *dev)
3172 {
3173 	int result = 0;
3174 
3175 	switch (dev->udev->speed) {
3176 	case USB_SPEED_SUPER:
3177 		dev->rx_urb_size = RX_SS_URB_SIZE;
3178 		dev->tx_urb_size = TX_SS_URB_SIZE;
3179 		dev->n_rx_urbs = RX_SS_URB_NUM;
3180 		dev->n_tx_urbs = TX_SS_URB_NUM;
3181 		dev->bulk_in_delay = SS_BULK_IN_DELAY;
3182 		dev->burst_cap = SS_BURST_CAP_SIZE / SS_USB_PKT_SIZE;
3183 		break;
3184 	case USB_SPEED_HIGH:
3185 		dev->rx_urb_size = RX_HS_URB_SIZE;
3186 		dev->tx_urb_size = TX_HS_URB_SIZE;
3187 		dev->n_rx_urbs = RX_HS_URB_NUM;
3188 		dev->n_tx_urbs = TX_HS_URB_NUM;
3189 		dev->bulk_in_delay = HS_BULK_IN_DELAY;
3190 		dev->burst_cap = HS_BURST_CAP_SIZE / HS_USB_PKT_SIZE;
3191 		break;
3192 	case USB_SPEED_FULL:
3193 		dev->rx_urb_size = RX_FS_URB_SIZE;
3194 		dev->tx_urb_size = TX_FS_URB_SIZE;
3195 		dev->n_rx_urbs = RX_FS_URB_NUM;
3196 		dev->n_tx_urbs = TX_FS_URB_NUM;
3197 		dev->bulk_in_delay = FS_BULK_IN_DELAY;
3198 		dev->burst_cap = FS_BURST_CAP_SIZE / FS_USB_PKT_SIZE;
3199 		break;
3200 	default:
3201 		netdev_warn(dev->net, "USB bus speed not supported\n");
3202 		result = -EIO;
3203 		break;
3204 	}
3205 
3206 	return result;
3207 }
3208 
3209 static int lan78xx_reset(struct lan78xx_net *dev)
3210 {
3211 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3212 	unsigned long timeout;
3213 	int ret;
3214 	u32 buf;
3215 	u8 sig;
3216 
3217 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3218 	if (ret < 0)
3219 		return ret;
3220 
3221 	buf |= HW_CFG_LRST_;
3222 
3223 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3224 	if (ret < 0)
3225 		return ret;
3226 
3227 	timeout = jiffies + HZ;
3228 	do {
3229 		mdelay(1);
3230 		ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3231 		if (ret < 0)
3232 			return ret;
3233 
3234 		if (time_after(jiffies, timeout)) {
3235 			netdev_warn(dev->net,
3236 				    "timeout on completion of LiteReset");
3237 			ret = -ETIMEDOUT;
3238 			return ret;
3239 		}
3240 	} while (buf & HW_CFG_LRST_);
3241 
3242 	ret = lan78xx_init_mac_address(dev);
3243 	if (ret < 0)
3244 		return ret;
3245 
3246 	/* save DEVID for later usage */
3247 	ret = lan78xx_read_reg(dev, ID_REV, &buf);
3248 	if (ret < 0)
3249 		return ret;
3250 
3251 	dev->chipid = (buf & ID_REV_CHIP_ID_MASK_) >> 16;
3252 	dev->chiprev = buf & ID_REV_CHIP_REV_MASK_;
3253 
3254 	/* Respond to the IN token with a NAK */
3255 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3256 	if (ret < 0)
3257 		return ret;
3258 
3259 	buf |= USB_CFG_BIR_;
3260 
3261 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3262 	if (ret < 0)
3263 		return ret;
3264 
3265 	/* Init LTM */
3266 	ret = lan78xx_init_ltm(dev);
3267 	if (ret < 0)
3268 		return ret;
3269 
3270 	ret = lan78xx_write_reg(dev, BURST_CAP, dev->burst_cap);
3271 	if (ret < 0)
3272 		return ret;
3273 
3274 	ret = lan78xx_write_reg(dev, BULK_IN_DLY, dev->bulk_in_delay);
3275 	if (ret < 0)
3276 		return ret;
3277 
3278 	ret = lan78xx_read_reg(dev, HW_CFG, &buf);
3279 	if (ret < 0)
3280 		return ret;
3281 
3282 	buf |= HW_CFG_MEF_;
3283 	buf |= HW_CFG_CLK125_EN_;
3284 	buf |= HW_CFG_REFCLK25_EN_;
3285 
3286 	ret = lan78xx_write_reg(dev, HW_CFG, buf);
3287 	if (ret < 0)
3288 		return ret;
3289 
3290 	ret = lan78xx_read_reg(dev, USB_CFG0, &buf);
3291 	if (ret < 0)
3292 		return ret;
3293 
3294 	buf |= USB_CFG_BCE_;
3295 
3296 	ret = lan78xx_write_reg(dev, USB_CFG0, buf);
3297 	if (ret < 0)
3298 		return ret;
3299 
3300 	/* set FIFO sizes */
3301 	buf = (MAX_RX_FIFO_SIZE - 512) / 512;
3302 
3303 	ret = lan78xx_write_reg(dev, FCT_RX_FIFO_END, buf);
3304 	if (ret < 0)
3305 		return ret;
3306 
3307 	buf = (MAX_TX_FIFO_SIZE - 512) / 512;
3308 
3309 	ret = lan78xx_write_reg(dev, FCT_TX_FIFO_END, buf);
3310 	if (ret < 0)
3311 		return ret;
3312 
3313 	ret = lan78xx_write_reg(dev, INT_STS, INT_STS_CLEAR_ALL_);
3314 	if (ret < 0)
3315 		return ret;
3316 
3317 	ret = lan78xx_write_reg(dev, FLOW, 0);
3318 	if (ret < 0)
3319 		return ret;
3320 
3321 	ret = lan78xx_write_reg(dev, FCT_FLOW, 0);
3322 	if (ret < 0)
3323 		return ret;
3324 
3325 	/* Don't need rfe_ctl_lock during initialisation */
3326 	ret = lan78xx_read_reg(dev, RFE_CTL, &pdata->rfe_ctl);
3327 	if (ret < 0)
3328 		return ret;
3329 
3330 	pdata->rfe_ctl |= RFE_CTL_BCAST_EN_ | RFE_CTL_DA_PERFECT_;
3331 
3332 	ret = lan78xx_write_reg(dev, RFE_CTL, pdata->rfe_ctl);
3333 	if (ret < 0)
3334 		return ret;
3335 
3336 	/* Enable or disable checksum offload engines */
3337 	ret = lan78xx_set_features(dev->net, dev->net->features);
3338 	if (ret < 0)
3339 		return ret;
3340 
3341 	lan78xx_set_multicast(dev->net);
3342 
3343 	/* reset PHY */
3344 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3345 	if (ret < 0)
3346 		return ret;
3347 
3348 	buf |= PMT_CTL_PHY_RST_;
3349 
3350 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
3351 	if (ret < 0)
3352 		return ret;
3353 
3354 	timeout = jiffies + HZ;
3355 	do {
3356 		mdelay(1);
3357 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
3358 		if (ret < 0)
3359 			return ret;
3360 
3361 		if (time_after(jiffies, timeout)) {
3362 			netdev_warn(dev->net, "timeout waiting for PHY Reset");
3363 			ret = -ETIMEDOUT;
3364 			return ret;
3365 		}
3366 	} while ((buf & PMT_CTL_PHY_RST_) || !(buf & PMT_CTL_READY_));
3367 
3368 	ret = lan78xx_read_reg(dev, MAC_CR, &buf);
3369 	if (ret < 0)
3370 		return ret;
3371 
3372 	/* LAN7801 only has RGMII mode */
3373 	if (dev->chipid == ID_REV_CHIP_ID_7801_) {
3374 		buf &= ~MAC_CR_GMII_EN_;
3375 		/* Enable Auto Duplex and Auto speed */
3376 		buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3377 	}
3378 
3379 	if (dev->chipid == ID_REV_CHIP_ID_7800_ ||
3380 	    dev->chipid == ID_REV_CHIP_ID_7850_) {
3381 		ret = lan78xx_read_raw_eeprom(dev, 0, 1, &sig);
3382 		if (!ret && sig != EEPROM_INDICATOR) {
3383 			/* Implies there is no external eeprom. Set mac speed */
3384 			netdev_info(dev->net, "No External EEPROM. Setting MAC Speed\n");
3385 			buf |= MAC_CR_AUTO_DUPLEX_ | MAC_CR_AUTO_SPEED_;
3386 		}
3387 	}
3388 	ret = lan78xx_write_reg(dev, MAC_CR, buf);
3389 	if (ret < 0)
3390 		return ret;
3391 
3392 	ret = lan78xx_set_rx_max_frame_length(dev,
3393 					      RX_MAX_FRAME_LEN(dev->net->mtu));
3394 
3395 	return ret;
3396 }
3397 
3398 static void lan78xx_init_stats(struct lan78xx_net *dev)
3399 {
3400 	u32 *p;
3401 	int i;
3402 
3403 	/* initialize for stats update
3404 	 * some counters are 20bits and some are 32bits
3405 	 */
3406 	p = (u32 *)&dev->stats.rollover_max;
3407 	for (i = 0; i < (sizeof(dev->stats.rollover_max) / (sizeof(u32))); i++)
3408 		p[i] = 0xFFFFF;
3409 
3410 	dev->stats.rollover_max.rx_unicast_byte_count = 0xFFFFFFFF;
3411 	dev->stats.rollover_max.rx_broadcast_byte_count = 0xFFFFFFFF;
3412 	dev->stats.rollover_max.rx_multicast_byte_count = 0xFFFFFFFF;
3413 	dev->stats.rollover_max.eee_rx_lpi_transitions = 0xFFFFFFFF;
3414 	dev->stats.rollover_max.eee_rx_lpi_time = 0xFFFFFFFF;
3415 	dev->stats.rollover_max.tx_unicast_byte_count = 0xFFFFFFFF;
3416 	dev->stats.rollover_max.tx_broadcast_byte_count = 0xFFFFFFFF;
3417 	dev->stats.rollover_max.tx_multicast_byte_count = 0xFFFFFFFF;
3418 	dev->stats.rollover_max.eee_tx_lpi_transitions = 0xFFFFFFFF;
3419 	dev->stats.rollover_max.eee_tx_lpi_time = 0xFFFFFFFF;
3420 
3421 	set_bit(EVENT_STAT_UPDATE, &dev->flags);
3422 }
3423 
3424 static int lan78xx_open(struct net_device *net)
3425 {
3426 	struct lan78xx_net *dev = netdev_priv(net);
3427 	int ret;
3428 
3429 	netif_dbg(dev, ifup, dev->net, "open device");
3430 
3431 	ret = usb_autopm_get_interface(dev->intf);
3432 	if (ret < 0)
3433 		return ret;
3434 
3435 	mutex_lock(&dev->dev_mutex);
3436 
3437 	phy_start(net->phydev);
3438 
3439 	netif_dbg(dev, ifup, dev->net, "phy initialised successfully");
3440 
3441 	/* for Link Check */
3442 	if (dev->urb_intr) {
3443 		ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
3444 		if (ret < 0) {
3445 			netif_err(dev, ifup, dev->net,
3446 				  "intr submit %d\n", ret);
3447 			goto done;
3448 		}
3449 	}
3450 
3451 	ret = lan78xx_flush_rx_fifo(dev);
3452 	if (ret < 0)
3453 		goto done;
3454 	ret = lan78xx_flush_tx_fifo(dev);
3455 	if (ret < 0)
3456 		goto done;
3457 
3458 	ret = lan78xx_start_tx_path(dev);
3459 	if (ret < 0)
3460 		goto done;
3461 	ret = lan78xx_start_rx_path(dev);
3462 	if (ret < 0)
3463 		goto done;
3464 
3465 	lan78xx_init_stats(dev);
3466 
3467 	set_bit(EVENT_DEV_OPEN, &dev->flags);
3468 
3469 	netif_start_queue(net);
3470 
3471 	dev->link_on = false;
3472 
3473 	napi_enable(&dev->napi);
3474 
3475 	lan78xx_defer_kevent(dev, EVENT_LINK_RESET);
3476 done:
3477 	mutex_unlock(&dev->dev_mutex);
3478 
3479 	if (ret < 0)
3480 		usb_autopm_put_interface(dev->intf);
3481 
3482 	return ret;
3483 }
3484 
3485 static void lan78xx_terminate_urbs(struct lan78xx_net *dev)
3486 {
3487 	DECLARE_WAIT_QUEUE_HEAD_ONSTACK(unlink_wakeup);
3488 	DECLARE_WAITQUEUE(wait, current);
3489 	int temp;
3490 
3491 	/* ensure there are no more active urbs */
3492 	add_wait_queue(&unlink_wakeup, &wait);
3493 	set_current_state(TASK_UNINTERRUPTIBLE);
3494 	dev->wait = &unlink_wakeup;
3495 	temp = unlink_urbs(dev, &dev->txq) + unlink_urbs(dev, &dev->rxq);
3496 
3497 	/* maybe wait for deletions to finish. */
3498 	while (!skb_queue_empty(&dev->rxq) ||
3499 	       !skb_queue_empty(&dev->txq)) {
3500 		schedule_timeout(msecs_to_jiffies(UNLINK_TIMEOUT_MS));
3501 		set_current_state(TASK_UNINTERRUPTIBLE);
3502 		netif_dbg(dev, ifdown, dev->net,
3503 			  "waited for %d urb completions", temp);
3504 	}
3505 	set_current_state(TASK_RUNNING);
3506 	dev->wait = NULL;
3507 	remove_wait_queue(&unlink_wakeup, &wait);
3508 
3509 	/* empty Rx done, Rx overflow and Tx pend queues
3510 	 */
3511 	while (!skb_queue_empty(&dev->rxq_done)) {
3512 		struct sk_buff *skb = skb_dequeue(&dev->rxq_done);
3513 
3514 		lan78xx_release_rx_buf(dev, skb);
3515 	}
3516 
3517 	skb_queue_purge(&dev->rxq_overflow);
3518 	skb_queue_purge(&dev->txq_pend);
3519 }
3520 
3521 static int lan78xx_stop(struct net_device *net)
3522 {
3523 	struct lan78xx_net *dev = netdev_priv(net);
3524 
3525 	netif_dbg(dev, ifup, dev->net, "stop device");
3526 
3527 	mutex_lock(&dev->dev_mutex);
3528 
3529 	if (timer_pending(&dev->stat_monitor))
3530 		timer_delete_sync(&dev->stat_monitor);
3531 
3532 	clear_bit(EVENT_DEV_OPEN, &dev->flags);
3533 	netif_stop_queue(net);
3534 	napi_disable(&dev->napi);
3535 
3536 	lan78xx_terminate_urbs(dev);
3537 
3538 	netif_info(dev, ifdown, dev->net,
3539 		   "stop stats: rx/tx %lu/%lu, errs %lu/%lu\n",
3540 		   net->stats.rx_packets, net->stats.tx_packets,
3541 		   net->stats.rx_errors, net->stats.tx_errors);
3542 
3543 	/* ignore errors that occur stopping the Tx and Rx data paths */
3544 	lan78xx_stop_tx_path(dev);
3545 	lan78xx_stop_rx_path(dev);
3546 
3547 	if (net->phydev)
3548 		phy_stop(net->phydev);
3549 
3550 	usb_kill_urb(dev->urb_intr);
3551 
3552 	/* deferred work (task, timer, softirq) must also stop.
3553 	 * can't flush_scheduled_work() until we drop rtnl (later),
3554 	 * else workers could deadlock; so make workers a NOP.
3555 	 */
3556 	clear_bit(EVENT_TX_HALT, &dev->flags);
3557 	clear_bit(EVENT_RX_HALT, &dev->flags);
3558 	clear_bit(EVENT_LINK_RESET, &dev->flags);
3559 	clear_bit(EVENT_STAT_UPDATE, &dev->flags);
3560 
3561 	cancel_delayed_work_sync(&dev->wq);
3562 
3563 	usb_autopm_put_interface(dev->intf);
3564 
3565 	mutex_unlock(&dev->dev_mutex);
3566 
3567 	return 0;
3568 }
3569 
3570 static enum skb_state defer_bh(struct lan78xx_net *dev, struct sk_buff *skb,
3571 			       struct sk_buff_head *list, enum skb_state state)
3572 {
3573 	unsigned long flags;
3574 	enum skb_state old_state;
3575 	struct skb_data *entry = (struct skb_data *)skb->cb;
3576 
3577 	spin_lock_irqsave(&list->lock, flags);
3578 	old_state = entry->state;
3579 	entry->state = state;
3580 
3581 	__skb_unlink(skb, list);
3582 	spin_unlock(&list->lock);
3583 	spin_lock(&dev->rxq_done.lock);
3584 
3585 	__skb_queue_tail(&dev->rxq_done, skb);
3586 	if (skb_queue_len(&dev->rxq_done) == 1)
3587 		napi_schedule(&dev->napi);
3588 
3589 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
3590 
3591 	return old_state;
3592 }
3593 
3594 static void tx_complete(struct urb *urb)
3595 {
3596 	struct sk_buff *skb = (struct sk_buff *)urb->context;
3597 	struct skb_data *entry = (struct skb_data *)skb->cb;
3598 	struct lan78xx_net *dev = entry->dev;
3599 
3600 	if (urb->status == 0) {
3601 		dev->net->stats.tx_packets += entry->num_of_packet;
3602 		dev->net->stats.tx_bytes += entry->length;
3603 	} else {
3604 		dev->net->stats.tx_errors += entry->num_of_packet;
3605 
3606 		switch (urb->status) {
3607 		case -EPIPE:
3608 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
3609 			break;
3610 
3611 		/* software-driven interface shutdown */
3612 		case -ECONNRESET:
3613 		case -ESHUTDOWN:
3614 			netif_dbg(dev, tx_err, dev->net,
3615 				  "tx err interface gone %d\n",
3616 				  entry->urb->status);
3617 			break;
3618 
3619 		case -EPROTO:
3620 		case -ETIME:
3621 		case -EILSEQ:
3622 			netif_stop_queue(dev->net);
3623 			netif_dbg(dev, tx_err, dev->net,
3624 				  "tx err queue stopped %d\n",
3625 				  entry->urb->status);
3626 			break;
3627 		default:
3628 			netif_dbg(dev, tx_err, dev->net,
3629 				  "unknown tx err %d\n",
3630 				  entry->urb->status);
3631 			break;
3632 		}
3633 	}
3634 
3635 	usb_autopm_put_interface_async(dev->intf);
3636 
3637 	skb_unlink(skb, &dev->txq);
3638 
3639 	lan78xx_release_tx_buf(dev, skb);
3640 
3641 	/* Re-schedule NAPI if Tx data pending but no URBs in progress.
3642 	 */
3643 	if (skb_queue_empty(&dev->txq) &&
3644 	    !skb_queue_empty(&dev->txq_pend))
3645 		napi_schedule(&dev->napi);
3646 }
3647 
3648 static void lan78xx_queue_skb(struct sk_buff_head *list,
3649 			      struct sk_buff *newsk, enum skb_state state)
3650 {
3651 	struct skb_data *entry = (struct skb_data *)newsk->cb;
3652 
3653 	__skb_queue_tail(list, newsk);
3654 	entry->state = state;
3655 }
3656 
3657 static unsigned int lan78xx_tx_urb_space(struct lan78xx_net *dev)
3658 {
3659 	return skb_queue_len(&dev->txq_free) * dev->tx_urb_size;
3660 }
3661 
3662 static unsigned int lan78xx_tx_pend_data_len(struct lan78xx_net *dev)
3663 {
3664 	return dev->tx_pend_data_len;
3665 }
3666 
3667 static void lan78xx_tx_pend_skb_add(struct lan78xx_net *dev,
3668 				    struct sk_buff *skb,
3669 				    unsigned int *tx_pend_data_len)
3670 {
3671 	unsigned long flags;
3672 
3673 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3674 
3675 	__skb_queue_tail(&dev->txq_pend, skb);
3676 
3677 	dev->tx_pend_data_len += skb->len;
3678 	*tx_pend_data_len = dev->tx_pend_data_len;
3679 
3680 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3681 }
3682 
3683 static void lan78xx_tx_pend_skb_head_add(struct lan78xx_net *dev,
3684 					 struct sk_buff *skb,
3685 					 unsigned int *tx_pend_data_len)
3686 {
3687 	unsigned long flags;
3688 
3689 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3690 
3691 	__skb_queue_head(&dev->txq_pend, skb);
3692 
3693 	dev->tx_pend_data_len += skb->len;
3694 	*tx_pend_data_len = dev->tx_pend_data_len;
3695 
3696 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3697 }
3698 
3699 static void lan78xx_tx_pend_skb_get(struct lan78xx_net *dev,
3700 				    struct sk_buff **skb,
3701 				    unsigned int *tx_pend_data_len)
3702 {
3703 	unsigned long flags;
3704 
3705 	spin_lock_irqsave(&dev->txq_pend.lock, flags);
3706 
3707 	*skb = __skb_dequeue(&dev->txq_pend);
3708 	if (*skb)
3709 		dev->tx_pend_data_len -= (*skb)->len;
3710 	*tx_pend_data_len = dev->tx_pend_data_len;
3711 
3712 	spin_unlock_irqrestore(&dev->txq_pend.lock, flags);
3713 }
3714 
3715 static netdev_tx_t
3716 lan78xx_start_xmit(struct sk_buff *skb, struct net_device *net)
3717 {
3718 	struct lan78xx_net *dev = netdev_priv(net);
3719 	unsigned int tx_pend_data_len;
3720 
3721 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags))
3722 		schedule_delayed_work(&dev->wq, 0);
3723 
3724 	skb_tx_timestamp(skb);
3725 
3726 	lan78xx_tx_pend_skb_add(dev, skb, &tx_pend_data_len);
3727 
3728 	/* Set up a Tx URB if none is in progress */
3729 
3730 	if (skb_queue_empty(&dev->txq))
3731 		napi_schedule(&dev->napi);
3732 
3733 	/* Stop stack Tx queue if we have enough data to fill
3734 	 * all the free Tx URBs.
3735 	 */
3736 	if (tx_pend_data_len > lan78xx_tx_urb_space(dev)) {
3737 		netif_stop_queue(net);
3738 
3739 		netif_dbg(dev, hw, dev->net, "tx data len: %u, urb space %u",
3740 			  tx_pend_data_len, lan78xx_tx_urb_space(dev));
3741 
3742 		/* Kick off transmission of pending data */
3743 
3744 		if (!skb_queue_empty(&dev->txq_free))
3745 			napi_schedule(&dev->napi);
3746 	}
3747 
3748 	return NETDEV_TX_OK;
3749 }
3750 
3751 static int lan78xx_bind(struct lan78xx_net *dev, struct usb_interface *intf)
3752 {
3753 	struct lan78xx_priv *pdata = NULL;
3754 	int ret;
3755 	int i;
3756 
3757 	dev->data[0] = (unsigned long)kzalloc(sizeof(*pdata), GFP_KERNEL);
3758 
3759 	pdata = (struct lan78xx_priv *)(dev->data[0]);
3760 	if (!pdata) {
3761 		netdev_warn(dev->net, "Unable to allocate lan78xx_priv");
3762 		return -ENOMEM;
3763 	}
3764 
3765 	pdata->dev = dev;
3766 
3767 	spin_lock_init(&pdata->rfe_ctl_lock);
3768 	mutex_init(&pdata->dataport_mutex);
3769 
3770 	INIT_WORK(&pdata->set_multicast, lan78xx_deferred_multicast_write);
3771 
3772 	for (i = 0; i < DP_SEL_VHF_VLAN_LEN; i++)
3773 		pdata->vlan_table[i] = 0;
3774 
3775 	INIT_WORK(&pdata->set_vlan, lan78xx_deferred_vlan_write);
3776 
3777 	dev->net->features = 0;
3778 
3779 	if (DEFAULT_TX_CSUM_ENABLE)
3780 		dev->net->features |= NETIF_F_HW_CSUM;
3781 
3782 	if (DEFAULT_RX_CSUM_ENABLE)
3783 		dev->net->features |= NETIF_F_RXCSUM;
3784 
3785 	if (DEFAULT_TSO_CSUM_ENABLE)
3786 		dev->net->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_SG;
3787 
3788 	if (DEFAULT_VLAN_RX_OFFLOAD)
3789 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_RX;
3790 
3791 	if (DEFAULT_VLAN_FILTER_ENABLE)
3792 		dev->net->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3793 
3794 	dev->net->hw_features = dev->net->features;
3795 
3796 	ret = lan78xx_setup_irq_domain(dev);
3797 	if (ret < 0) {
3798 		netdev_warn(dev->net,
3799 			    "lan78xx_setup_irq_domain() failed : %d", ret);
3800 		goto out1;
3801 	}
3802 
3803 	/* Init all registers */
3804 	ret = lan78xx_reset(dev);
3805 	if (ret) {
3806 		netdev_warn(dev->net, "Registers INIT FAILED....");
3807 		goto out2;
3808 	}
3809 
3810 	ret = lan78xx_mdio_init(dev);
3811 	if (ret) {
3812 		netdev_warn(dev->net, "MDIO INIT FAILED.....");
3813 		goto out2;
3814 	}
3815 
3816 	dev->net->flags |= IFF_MULTICAST;
3817 
3818 	pdata->wol = WAKE_MAGIC;
3819 
3820 	return ret;
3821 
3822 out2:
3823 	lan78xx_remove_irq_domain(dev);
3824 
3825 out1:
3826 	netdev_warn(dev->net, "Bind routine FAILED");
3827 	cancel_work_sync(&pdata->set_multicast);
3828 	cancel_work_sync(&pdata->set_vlan);
3829 	kfree(pdata);
3830 	return ret;
3831 }
3832 
3833 static void lan78xx_unbind(struct lan78xx_net *dev, struct usb_interface *intf)
3834 {
3835 	struct lan78xx_priv *pdata = (struct lan78xx_priv *)(dev->data[0]);
3836 
3837 	lan78xx_remove_irq_domain(dev);
3838 
3839 	lan78xx_remove_mdio(dev);
3840 
3841 	if (pdata) {
3842 		cancel_work_sync(&pdata->set_multicast);
3843 		cancel_work_sync(&pdata->set_vlan);
3844 		netif_dbg(dev, ifdown, dev->net, "free pdata");
3845 		kfree(pdata);
3846 		pdata = NULL;
3847 		dev->data[0] = 0;
3848 	}
3849 }
3850 
3851 static void lan78xx_rx_csum_offload(struct lan78xx_net *dev,
3852 				    struct sk_buff *skb,
3853 				    u32 rx_cmd_a, u32 rx_cmd_b)
3854 {
3855 	/* HW Checksum offload appears to be flawed if used when not stripping
3856 	 * VLAN headers. Drop back to S/W checksums under these conditions.
3857 	 */
3858 	if (!(dev->net->features & NETIF_F_RXCSUM) ||
3859 	    unlikely(rx_cmd_a & RX_CMD_A_ICSM_) ||
3860 	    ((rx_cmd_a & RX_CMD_A_FVTG_) &&
3861 	     !(dev->net->features & NETIF_F_HW_VLAN_CTAG_RX))) {
3862 		skb->ip_summed = CHECKSUM_NONE;
3863 	} else {
3864 		skb->csum = ntohs((u16)(rx_cmd_b >> RX_CMD_B_CSUM_SHIFT_));
3865 		skb->ip_summed = CHECKSUM_COMPLETE;
3866 	}
3867 }
3868 
3869 static void lan78xx_rx_vlan_offload(struct lan78xx_net *dev,
3870 				    struct sk_buff *skb,
3871 				    u32 rx_cmd_a, u32 rx_cmd_b)
3872 {
3873 	if ((dev->net->features & NETIF_F_HW_VLAN_CTAG_RX) &&
3874 	    (rx_cmd_a & RX_CMD_A_FVTG_))
3875 		__vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
3876 				       (rx_cmd_b & 0xffff));
3877 }
3878 
3879 static void lan78xx_skb_return(struct lan78xx_net *dev, struct sk_buff *skb)
3880 {
3881 	dev->net->stats.rx_packets++;
3882 	dev->net->stats.rx_bytes += skb->len;
3883 
3884 	skb->protocol = eth_type_trans(skb, dev->net);
3885 
3886 	netif_dbg(dev, rx_status, dev->net, "< rx, len %zu, type 0x%x\n",
3887 		  skb->len + sizeof(struct ethhdr), skb->protocol);
3888 	memset(skb->cb, 0, sizeof(struct skb_data));
3889 
3890 	if (skb_defer_rx_timestamp(skb))
3891 		return;
3892 
3893 	napi_gro_receive(&dev->napi, skb);
3894 }
3895 
3896 static int lan78xx_rx(struct lan78xx_net *dev, struct sk_buff *skb,
3897 		      int budget, int *work_done)
3898 {
3899 	if (skb->len < RX_SKB_MIN_LEN)
3900 		return 0;
3901 
3902 	/* Extract frames from the URB buffer and pass each one to
3903 	 * the stack in a new NAPI SKB.
3904 	 */
3905 	while (skb->len > 0) {
3906 		u32 rx_cmd_a, rx_cmd_b, align_count, size;
3907 		u16 rx_cmd_c;
3908 		unsigned char *packet;
3909 
3910 		rx_cmd_a = get_unaligned_le32(skb->data);
3911 		skb_pull(skb, sizeof(rx_cmd_a));
3912 
3913 		rx_cmd_b = get_unaligned_le32(skb->data);
3914 		skb_pull(skb, sizeof(rx_cmd_b));
3915 
3916 		rx_cmd_c = get_unaligned_le16(skb->data);
3917 		skb_pull(skb, sizeof(rx_cmd_c));
3918 
3919 		packet = skb->data;
3920 
3921 		/* get the packet length */
3922 		size = (rx_cmd_a & RX_CMD_A_LEN_MASK_);
3923 		align_count = (4 - ((size + RXW_PADDING) % 4)) % 4;
3924 
3925 		if (unlikely(size > skb->len)) {
3926 			netif_dbg(dev, rx_err, dev->net,
3927 				  "size err rx_cmd_a=0x%08x\n",
3928 				  rx_cmd_a);
3929 			return 0;
3930 		}
3931 
3932 		if (unlikely(rx_cmd_a & RX_CMD_A_RED_)) {
3933 			netif_dbg(dev, rx_err, dev->net,
3934 				  "Error rx_cmd_a=0x%08x", rx_cmd_a);
3935 		} else {
3936 			u32 frame_len;
3937 			struct sk_buff *skb2;
3938 
3939 			if (unlikely(size < ETH_FCS_LEN)) {
3940 				netif_dbg(dev, rx_err, dev->net,
3941 					  "size err rx_cmd_a=0x%08x\n",
3942 					  rx_cmd_a);
3943 				return 0;
3944 			}
3945 
3946 			frame_len = size - ETH_FCS_LEN;
3947 
3948 			skb2 = napi_alloc_skb(&dev->napi, frame_len);
3949 			if (!skb2)
3950 				return 0;
3951 
3952 			memcpy(skb2->data, packet, frame_len);
3953 
3954 			skb_put(skb2, frame_len);
3955 
3956 			lan78xx_rx_csum_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3957 			lan78xx_rx_vlan_offload(dev, skb2, rx_cmd_a, rx_cmd_b);
3958 
3959 			/* Processing of the URB buffer must complete once
3960 			 * it has started. If the NAPI work budget is exhausted
3961 			 * while frames remain they are added to the overflow
3962 			 * queue for delivery in the next NAPI polling cycle.
3963 			 */
3964 			if (*work_done < budget) {
3965 				lan78xx_skb_return(dev, skb2);
3966 				++(*work_done);
3967 			} else {
3968 				skb_queue_tail(&dev->rxq_overflow, skb2);
3969 			}
3970 		}
3971 
3972 		skb_pull(skb, size);
3973 
3974 		/* skip padding bytes before the next frame starts */
3975 		if (skb->len)
3976 			skb_pull(skb, align_count);
3977 	}
3978 
3979 	return 1;
3980 }
3981 
3982 static inline void rx_process(struct lan78xx_net *dev, struct sk_buff *skb,
3983 			      int budget, int *work_done)
3984 {
3985 	if (!lan78xx_rx(dev, skb, budget, work_done)) {
3986 		netif_dbg(dev, rx_err, dev->net, "drop\n");
3987 		dev->net->stats.rx_errors++;
3988 	}
3989 }
3990 
3991 static void rx_complete(struct urb *urb)
3992 {
3993 	struct sk_buff	*skb = (struct sk_buff *)urb->context;
3994 	struct skb_data	*entry = (struct skb_data *)skb->cb;
3995 	struct lan78xx_net *dev = entry->dev;
3996 	int urb_status = urb->status;
3997 	enum skb_state state;
3998 
3999 	netif_dbg(dev, rx_status, dev->net,
4000 		  "rx done: status %d", urb->status);
4001 
4002 	skb_put(skb, urb->actual_length);
4003 	state = rx_done;
4004 
4005 	if (urb != entry->urb)
4006 		netif_warn(dev, rx_err, dev->net, "URB pointer mismatch");
4007 
4008 	switch (urb_status) {
4009 	case 0:
4010 		if (skb->len < RX_SKB_MIN_LEN) {
4011 			state = rx_cleanup;
4012 			dev->net->stats.rx_errors++;
4013 			dev->net->stats.rx_length_errors++;
4014 			netif_dbg(dev, rx_err, dev->net,
4015 				  "rx length %d\n", skb->len);
4016 		}
4017 		usb_mark_last_busy(dev->udev);
4018 		break;
4019 	case -EPIPE:
4020 		dev->net->stats.rx_errors++;
4021 		lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4022 		fallthrough;
4023 	case -ECONNRESET:				/* async unlink */
4024 	case -ESHUTDOWN:				/* hardware gone */
4025 		netif_dbg(dev, ifdown, dev->net,
4026 			  "rx shutdown, code %d\n", urb_status);
4027 		state = rx_cleanup;
4028 		break;
4029 	case -EPROTO:
4030 	case -ETIME:
4031 	case -EILSEQ:
4032 		dev->net->stats.rx_errors++;
4033 		state = rx_cleanup;
4034 		break;
4035 
4036 	/* data overrun ... flush fifo? */
4037 	case -EOVERFLOW:
4038 		dev->net->stats.rx_over_errors++;
4039 		fallthrough;
4040 
4041 	default:
4042 		state = rx_cleanup;
4043 		dev->net->stats.rx_errors++;
4044 		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
4045 		break;
4046 	}
4047 
4048 	state = defer_bh(dev, skb, &dev->rxq, state);
4049 }
4050 
4051 static int rx_submit(struct lan78xx_net *dev, struct sk_buff *skb, gfp_t flags)
4052 {
4053 	struct skb_data	*entry = (struct skb_data *)skb->cb;
4054 	size_t size = dev->rx_urb_size;
4055 	struct urb *urb = entry->urb;
4056 	unsigned long lockflags;
4057 	int ret = 0;
4058 
4059 	usb_fill_bulk_urb(urb, dev->udev, dev->pipe_in,
4060 			  skb->data, size, rx_complete, skb);
4061 
4062 	spin_lock_irqsave(&dev->rxq.lock, lockflags);
4063 
4064 	if (netif_device_present(dev->net) &&
4065 	    netif_running(dev->net) &&
4066 	    !test_bit(EVENT_RX_HALT, &dev->flags) &&
4067 	    !test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4068 		ret = usb_submit_urb(urb, flags);
4069 		switch (ret) {
4070 		case 0:
4071 			lan78xx_queue_skb(&dev->rxq, skb, rx_start);
4072 			break;
4073 		case -EPIPE:
4074 			lan78xx_defer_kevent(dev, EVENT_RX_HALT);
4075 			break;
4076 		case -ENODEV:
4077 		case -ENOENT:
4078 			netif_dbg(dev, ifdown, dev->net, "device gone\n");
4079 			netif_device_detach(dev->net);
4080 			break;
4081 		case -EHOSTUNREACH:
4082 			ret = -ENOLINK;
4083 			napi_schedule(&dev->napi);
4084 			break;
4085 		default:
4086 			netif_dbg(dev, rx_err, dev->net,
4087 				  "rx submit, %d\n", ret);
4088 			napi_schedule(&dev->napi);
4089 			break;
4090 		}
4091 	} else {
4092 		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
4093 		ret = -ENOLINK;
4094 	}
4095 	spin_unlock_irqrestore(&dev->rxq.lock, lockflags);
4096 
4097 	if (ret)
4098 		lan78xx_release_rx_buf(dev, skb);
4099 
4100 	return ret;
4101 }
4102 
4103 static void lan78xx_rx_urb_submit_all(struct lan78xx_net *dev)
4104 {
4105 	struct sk_buff *rx_buf;
4106 
4107 	/* Ensure the maximum number of Rx URBs is submitted
4108 	 */
4109 	while ((rx_buf = lan78xx_get_rx_buf(dev)) != NULL) {
4110 		if (rx_submit(dev, rx_buf, GFP_ATOMIC) != 0)
4111 			break;
4112 	}
4113 }
4114 
4115 static void lan78xx_rx_urb_resubmit(struct lan78xx_net *dev,
4116 				    struct sk_buff *rx_buf)
4117 {
4118 	/* reset SKB data pointers */
4119 
4120 	rx_buf->data = rx_buf->head;
4121 	skb_reset_tail_pointer(rx_buf);
4122 	rx_buf->len = 0;
4123 	rx_buf->data_len = 0;
4124 
4125 	rx_submit(dev, rx_buf, GFP_ATOMIC);
4126 }
4127 
4128 static void lan78xx_fill_tx_cmd_words(struct sk_buff *skb, u8 *buffer)
4129 {
4130 	u32 tx_cmd_a;
4131 	u32 tx_cmd_b;
4132 
4133 	tx_cmd_a = (u32)(skb->len & TX_CMD_A_LEN_MASK_) | TX_CMD_A_FCS_;
4134 
4135 	if (skb->ip_summed == CHECKSUM_PARTIAL)
4136 		tx_cmd_a |= TX_CMD_A_IPE_ | TX_CMD_A_TPE_;
4137 
4138 	tx_cmd_b = 0;
4139 	if (skb_is_gso(skb)) {
4140 		u16 mss = max(skb_shinfo(skb)->gso_size, TX_CMD_B_MSS_MIN_);
4141 
4142 		tx_cmd_b = (mss << TX_CMD_B_MSS_SHIFT_) & TX_CMD_B_MSS_MASK_;
4143 
4144 		tx_cmd_a |= TX_CMD_A_LSO_;
4145 	}
4146 
4147 	if (skb_vlan_tag_present(skb)) {
4148 		tx_cmd_a |= TX_CMD_A_IVTG_;
4149 		tx_cmd_b |= skb_vlan_tag_get(skb) & TX_CMD_B_VTAG_MASK_;
4150 	}
4151 
4152 	put_unaligned_le32(tx_cmd_a, buffer);
4153 	put_unaligned_le32(tx_cmd_b, buffer + 4);
4154 }
4155 
4156 static struct skb_data *lan78xx_tx_buf_fill(struct lan78xx_net *dev,
4157 					    struct sk_buff *tx_buf)
4158 {
4159 	struct skb_data *entry = (struct skb_data *)tx_buf->cb;
4160 	int remain = dev->tx_urb_size;
4161 	u8 *tx_data = tx_buf->data;
4162 	u32 urb_len = 0;
4163 
4164 	entry->num_of_packet = 0;
4165 	entry->length = 0;
4166 
4167 	/* Work through the pending SKBs and copy the data of each SKB into
4168 	 * the URB buffer if there room for all the SKB data.
4169 	 *
4170 	 * There must be at least DST+SRC+TYPE in the SKB (with padding enabled)
4171 	 */
4172 	while (remain >= TX_SKB_MIN_LEN) {
4173 		unsigned int pending_bytes;
4174 		unsigned int align_bytes;
4175 		struct sk_buff *skb;
4176 		unsigned int len;
4177 
4178 		lan78xx_tx_pend_skb_get(dev, &skb, &pending_bytes);
4179 
4180 		if (!skb)
4181 			break;
4182 
4183 		align_bytes = (TX_ALIGNMENT - (urb_len % TX_ALIGNMENT)) %
4184 			      TX_ALIGNMENT;
4185 		len = align_bytes + TX_CMD_LEN + skb->len;
4186 		if (len > remain) {
4187 			lan78xx_tx_pend_skb_head_add(dev, skb, &pending_bytes);
4188 			break;
4189 		}
4190 
4191 		tx_data += align_bytes;
4192 
4193 		lan78xx_fill_tx_cmd_words(skb, tx_data);
4194 		tx_data += TX_CMD_LEN;
4195 
4196 		len = skb->len;
4197 		if (skb_copy_bits(skb, 0, tx_data, len) < 0) {
4198 			struct net_device_stats *stats = &dev->net->stats;
4199 
4200 			stats->tx_dropped++;
4201 			dev_kfree_skb_any(skb);
4202 			tx_data -= TX_CMD_LEN;
4203 			continue;
4204 		}
4205 
4206 		tx_data += len;
4207 		entry->length += len;
4208 		entry->num_of_packet += skb_shinfo(skb)->gso_segs ?: 1;
4209 
4210 		dev_kfree_skb_any(skb);
4211 
4212 		urb_len = (u32)(tx_data - (u8 *)tx_buf->data);
4213 
4214 		remain = dev->tx_urb_size - urb_len;
4215 	}
4216 
4217 	skb_put(tx_buf, urb_len);
4218 
4219 	return entry;
4220 }
4221 
4222 static void lan78xx_tx_bh(struct lan78xx_net *dev)
4223 {
4224 	int ret;
4225 
4226 	/* Start the stack Tx queue if it was stopped
4227 	 */
4228 	netif_tx_lock(dev->net);
4229 	if (netif_queue_stopped(dev->net)) {
4230 		if (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev))
4231 			netif_wake_queue(dev->net);
4232 	}
4233 	netif_tx_unlock(dev->net);
4234 
4235 	/* Go through the Tx pending queue and set up URBs to transfer
4236 	 * the data to the device. Stop if no more pending data or URBs,
4237 	 * or if an error occurs when a URB is submitted.
4238 	 */
4239 	do {
4240 		struct skb_data *entry;
4241 		struct sk_buff *tx_buf;
4242 		unsigned long flags;
4243 
4244 		if (skb_queue_empty(&dev->txq_pend))
4245 			break;
4246 
4247 		tx_buf = lan78xx_get_tx_buf(dev);
4248 		if (!tx_buf)
4249 			break;
4250 
4251 		entry = lan78xx_tx_buf_fill(dev, tx_buf);
4252 
4253 		spin_lock_irqsave(&dev->txq.lock, flags);
4254 		ret = usb_autopm_get_interface_async(dev->intf);
4255 		if (ret < 0) {
4256 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4257 			goto out;
4258 		}
4259 
4260 		usb_fill_bulk_urb(entry->urb, dev->udev, dev->pipe_out,
4261 				  tx_buf->data, tx_buf->len, tx_complete,
4262 				  tx_buf);
4263 
4264 		if (tx_buf->len % dev->maxpacket == 0) {
4265 			/* send USB_ZERO_PACKET */
4266 			entry->urb->transfer_flags |= URB_ZERO_PACKET;
4267 		}
4268 
4269 #ifdef CONFIG_PM
4270 		/* if device is asleep stop outgoing packet processing */
4271 		if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4272 			usb_anchor_urb(entry->urb, &dev->deferred);
4273 			netif_stop_queue(dev->net);
4274 			spin_unlock_irqrestore(&dev->txq.lock, flags);
4275 			netdev_dbg(dev->net,
4276 				   "Delaying transmission for resumption\n");
4277 			return;
4278 		}
4279 #endif
4280 		ret = usb_submit_urb(entry->urb, GFP_ATOMIC);
4281 		switch (ret) {
4282 		case 0:
4283 			netif_trans_update(dev->net);
4284 			lan78xx_queue_skb(&dev->txq, tx_buf, tx_start);
4285 			break;
4286 		case -EPIPE:
4287 			netif_stop_queue(dev->net);
4288 			lan78xx_defer_kevent(dev, EVENT_TX_HALT);
4289 			usb_autopm_put_interface_async(dev->intf);
4290 			break;
4291 		case -ENODEV:
4292 		case -ENOENT:
4293 			netif_dbg(dev, tx_err, dev->net,
4294 				  "tx submit urb err %d (disconnected?)", ret);
4295 			netif_device_detach(dev->net);
4296 			break;
4297 		default:
4298 			usb_autopm_put_interface_async(dev->intf);
4299 			netif_dbg(dev, tx_err, dev->net,
4300 				  "tx submit urb err %d\n", ret);
4301 			break;
4302 		}
4303 
4304 		spin_unlock_irqrestore(&dev->txq.lock, flags);
4305 
4306 		if (ret) {
4307 			netdev_warn(dev->net, "failed to tx urb %d\n", ret);
4308 out:
4309 			dev->net->stats.tx_dropped += entry->num_of_packet;
4310 			lan78xx_release_tx_buf(dev, tx_buf);
4311 		}
4312 	} while (ret == 0);
4313 }
4314 
4315 static int lan78xx_bh(struct lan78xx_net *dev, int budget)
4316 {
4317 	struct sk_buff_head done;
4318 	struct sk_buff *rx_buf;
4319 	struct skb_data *entry;
4320 	unsigned long flags;
4321 	int work_done = 0;
4322 
4323 	/* Pass frames received in the last NAPI cycle before
4324 	 * working on newly completed URBs.
4325 	 */
4326 	while (!skb_queue_empty(&dev->rxq_overflow)) {
4327 		lan78xx_skb_return(dev, skb_dequeue(&dev->rxq_overflow));
4328 		++work_done;
4329 	}
4330 
4331 	/* Take a snapshot of the done queue and move items to a
4332 	 * temporary queue. Rx URB completions will continue to add
4333 	 * to the done queue.
4334 	 */
4335 	__skb_queue_head_init(&done);
4336 
4337 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4338 	skb_queue_splice_init(&dev->rxq_done, &done);
4339 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4340 
4341 	/* Extract receive frames from completed URBs and
4342 	 * pass them to the stack. Re-submit each completed URB.
4343 	 */
4344 	while ((work_done < budget) &&
4345 	       (rx_buf = __skb_dequeue(&done))) {
4346 		entry = (struct skb_data *)(rx_buf->cb);
4347 		switch (entry->state) {
4348 		case rx_done:
4349 			rx_process(dev, rx_buf, budget, &work_done);
4350 			break;
4351 		case rx_cleanup:
4352 			break;
4353 		default:
4354 			netdev_dbg(dev->net, "rx buf state %d\n",
4355 				   entry->state);
4356 			break;
4357 		}
4358 
4359 		lan78xx_rx_urb_resubmit(dev, rx_buf);
4360 	}
4361 
4362 	/* If budget was consumed before processing all the URBs put them
4363 	 * back on the front of the done queue. They will be first to be
4364 	 * processed in the next NAPI cycle.
4365 	 */
4366 	spin_lock_irqsave(&dev->rxq_done.lock, flags);
4367 	skb_queue_splice(&done, &dev->rxq_done);
4368 	spin_unlock_irqrestore(&dev->rxq_done.lock, flags);
4369 
4370 	if (netif_device_present(dev->net) && netif_running(dev->net)) {
4371 		/* reset update timer delta */
4372 		if (timer_pending(&dev->stat_monitor) && (dev->delta != 1)) {
4373 			dev->delta = 1;
4374 			mod_timer(&dev->stat_monitor,
4375 				  jiffies + STAT_UPDATE_TIMER);
4376 		}
4377 
4378 		/* Submit all free Rx URBs */
4379 
4380 		if (!test_bit(EVENT_RX_HALT, &dev->flags))
4381 			lan78xx_rx_urb_submit_all(dev);
4382 
4383 		/* Submit new Tx URBs */
4384 
4385 		lan78xx_tx_bh(dev);
4386 	}
4387 
4388 	return work_done;
4389 }
4390 
4391 static int lan78xx_poll(struct napi_struct *napi, int budget)
4392 {
4393 	struct lan78xx_net *dev = container_of(napi, struct lan78xx_net, napi);
4394 	int result = budget;
4395 	int work_done;
4396 
4397 	/* Don't do any work if the device is suspended */
4398 
4399 	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
4400 		napi_complete_done(napi, 0);
4401 		return 0;
4402 	}
4403 
4404 	/* Process completed URBs and submit new URBs */
4405 
4406 	work_done = lan78xx_bh(dev, budget);
4407 
4408 	if (work_done < budget) {
4409 		napi_complete_done(napi, work_done);
4410 
4411 		/* Start a new polling cycle if data was received or
4412 		 * data is waiting to be transmitted.
4413 		 */
4414 		if (!skb_queue_empty(&dev->rxq_done)) {
4415 			napi_schedule(napi);
4416 		} else if (netif_carrier_ok(dev->net)) {
4417 			if (skb_queue_empty(&dev->txq) &&
4418 			    !skb_queue_empty(&dev->txq_pend)) {
4419 				napi_schedule(napi);
4420 			} else {
4421 				netif_tx_lock(dev->net);
4422 				if (netif_queue_stopped(dev->net)) {
4423 					netif_wake_queue(dev->net);
4424 					napi_schedule(napi);
4425 				}
4426 				netif_tx_unlock(dev->net);
4427 			}
4428 		}
4429 		result = work_done;
4430 	}
4431 
4432 	return result;
4433 }
4434 
4435 static void lan78xx_delayedwork(struct work_struct *work)
4436 {
4437 	int status;
4438 	struct lan78xx_net *dev;
4439 
4440 	dev = container_of(work, struct lan78xx_net, wq.work);
4441 
4442 	if (test_bit(EVENT_DEV_DISCONNECT, &dev->flags))
4443 		return;
4444 
4445 	if (usb_autopm_get_interface(dev->intf) < 0)
4446 		return;
4447 
4448 	if (test_bit(EVENT_TX_HALT, &dev->flags)) {
4449 		unlink_urbs(dev, &dev->txq);
4450 
4451 		status = usb_clear_halt(dev->udev, dev->pipe_out);
4452 		if (status < 0 &&
4453 		    status != -EPIPE &&
4454 		    status != -ESHUTDOWN) {
4455 			if (netif_msg_tx_err(dev))
4456 				netdev_err(dev->net,
4457 					   "can't clear tx halt, status %d\n",
4458 					   status);
4459 		} else {
4460 			clear_bit(EVENT_TX_HALT, &dev->flags);
4461 			if (status != -ESHUTDOWN)
4462 				netif_wake_queue(dev->net);
4463 		}
4464 	}
4465 
4466 	if (test_bit(EVENT_RX_HALT, &dev->flags)) {
4467 		unlink_urbs(dev, &dev->rxq);
4468 		status = usb_clear_halt(dev->udev, dev->pipe_in);
4469 		if (status < 0 &&
4470 		    status != -EPIPE &&
4471 		    status != -ESHUTDOWN) {
4472 			if (netif_msg_rx_err(dev))
4473 				netdev_err(dev->net,
4474 					   "can't clear rx halt, status %d\n",
4475 					   status);
4476 		} else {
4477 			clear_bit(EVENT_RX_HALT, &dev->flags);
4478 			napi_schedule(&dev->napi);
4479 		}
4480 	}
4481 
4482 	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
4483 		int ret = 0;
4484 
4485 		clear_bit(EVENT_LINK_RESET, &dev->flags);
4486 		if (lan78xx_link_reset(dev) < 0) {
4487 			netdev_info(dev->net, "link reset failed (%d)\n",
4488 				    ret);
4489 		}
4490 	}
4491 
4492 	if (test_bit(EVENT_STAT_UPDATE, &dev->flags)) {
4493 		lan78xx_update_stats(dev);
4494 
4495 		clear_bit(EVENT_STAT_UPDATE, &dev->flags);
4496 
4497 		mod_timer(&dev->stat_monitor,
4498 			  jiffies + (STAT_UPDATE_TIMER * dev->delta));
4499 
4500 		dev->delta = min((dev->delta * 2), 50);
4501 	}
4502 
4503 	usb_autopm_put_interface(dev->intf);
4504 }
4505 
4506 static void intr_complete(struct urb *urb)
4507 {
4508 	struct lan78xx_net *dev = urb->context;
4509 	int status = urb->status;
4510 
4511 	switch (status) {
4512 	/* success */
4513 	case 0:
4514 		lan78xx_status(dev, urb);
4515 		break;
4516 
4517 	/* software-driven interface shutdown */
4518 	case -ENOENT:			/* urb killed */
4519 	case -ENODEV:			/* hardware gone */
4520 	case -ESHUTDOWN:		/* hardware gone */
4521 		netif_dbg(dev, ifdown, dev->net,
4522 			  "intr shutdown, code %d\n", status);
4523 		return;
4524 
4525 	/* NOTE:  not throttling like RX/TX, since this endpoint
4526 	 * already polls infrequently
4527 	 */
4528 	default:
4529 		netdev_dbg(dev->net, "intr status %d\n", status);
4530 		break;
4531 	}
4532 
4533 	if (!netif_device_present(dev->net) ||
4534 	    !netif_running(dev->net)) {
4535 		netdev_warn(dev->net, "not submitting new status URB");
4536 		return;
4537 	}
4538 
4539 	memset(urb->transfer_buffer, 0, urb->transfer_buffer_length);
4540 	status = usb_submit_urb(urb, GFP_ATOMIC);
4541 
4542 	switch (status) {
4543 	case  0:
4544 		break;
4545 	case -ENODEV:
4546 	case -ENOENT:
4547 		netif_dbg(dev, timer, dev->net,
4548 			  "intr resubmit %d (disconnect?)", status);
4549 		netif_device_detach(dev->net);
4550 		break;
4551 	default:
4552 		netif_err(dev, timer, dev->net,
4553 			  "intr resubmit --> %d\n", status);
4554 		break;
4555 	}
4556 }
4557 
4558 static void lan78xx_disconnect(struct usb_interface *intf)
4559 {
4560 	struct lan78xx_net *dev;
4561 	struct usb_device *udev;
4562 	struct net_device *net;
4563 	struct phy_device *phydev;
4564 
4565 	dev = usb_get_intfdata(intf);
4566 	usb_set_intfdata(intf, NULL);
4567 	if (!dev)
4568 		return;
4569 
4570 	netif_napi_del(&dev->napi);
4571 
4572 	udev = interface_to_usbdev(intf);
4573 	net = dev->net;
4574 
4575 	unregister_netdev(net);
4576 
4577 	timer_shutdown_sync(&dev->stat_monitor);
4578 	set_bit(EVENT_DEV_DISCONNECT, &dev->flags);
4579 	cancel_delayed_work_sync(&dev->wq);
4580 
4581 	phydev = net->phydev;
4582 
4583 	phy_disconnect(net->phydev);
4584 
4585 	if (phy_is_pseudo_fixed_link(phydev)) {
4586 		fixed_phy_unregister(phydev);
4587 		phy_device_free(phydev);
4588 	}
4589 
4590 	usb_scuttle_anchored_urbs(&dev->deferred);
4591 
4592 	lan78xx_unbind(dev, intf);
4593 
4594 	lan78xx_free_tx_resources(dev);
4595 	lan78xx_free_rx_resources(dev);
4596 
4597 	usb_kill_urb(dev->urb_intr);
4598 	usb_free_urb(dev->urb_intr);
4599 
4600 	free_netdev(net);
4601 	usb_put_dev(udev);
4602 }
4603 
4604 static void lan78xx_tx_timeout(struct net_device *net, unsigned int txqueue)
4605 {
4606 	struct lan78xx_net *dev = netdev_priv(net);
4607 
4608 	unlink_urbs(dev, &dev->txq);
4609 	napi_schedule(&dev->napi);
4610 }
4611 
4612 static netdev_features_t lan78xx_features_check(struct sk_buff *skb,
4613 						struct net_device *netdev,
4614 						netdev_features_t features)
4615 {
4616 	struct lan78xx_net *dev = netdev_priv(netdev);
4617 
4618 	if (skb->len > LAN78XX_TSO_SIZE(dev))
4619 		features &= ~NETIF_F_GSO_MASK;
4620 
4621 	features = vlan_features_check(skb, features);
4622 	features = vxlan_features_check(skb, features);
4623 
4624 	return features;
4625 }
4626 
4627 static const struct net_device_ops lan78xx_netdev_ops = {
4628 	.ndo_open		= lan78xx_open,
4629 	.ndo_stop		= lan78xx_stop,
4630 	.ndo_start_xmit		= lan78xx_start_xmit,
4631 	.ndo_tx_timeout		= lan78xx_tx_timeout,
4632 	.ndo_change_mtu		= lan78xx_change_mtu,
4633 	.ndo_set_mac_address	= lan78xx_set_mac_addr,
4634 	.ndo_validate_addr	= eth_validate_addr,
4635 	.ndo_eth_ioctl		= phy_do_ioctl_running,
4636 	.ndo_set_rx_mode	= lan78xx_set_multicast,
4637 	.ndo_set_features	= lan78xx_set_features,
4638 	.ndo_vlan_rx_add_vid	= lan78xx_vlan_rx_add_vid,
4639 	.ndo_vlan_rx_kill_vid	= lan78xx_vlan_rx_kill_vid,
4640 	.ndo_features_check	= lan78xx_features_check,
4641 };
4642 
4643 static void lan78xx_stat_monitor(struct timer_list *t)
4644 {
4645 	struct lan78xx_net *dev = from_timer(dev, t, stat_monitor);
4646 
4647 	lan78xx_defer_kevent(dev, EVENT_STAT_UPDATE);
4648 }
4649 
4650 static int lan78xx_probe(struct usb_interface *intf,
4651 			 const struct usb_device_id *id)
4652 {
4653 	struct usb_host_endpoint *ep_blkin, *ep_blkout, *ep_intr;
4654 	struct lan78xx_net *dev;
4655 	struct net_device *netdev;
4656 	struct usb_device *udev;
4657 	int ret;
4658 	unsigned int maxp;
4659 	unsigned int period;
4660 	u8 *buf = NULL;
4661 
4662 	udev = interface_to_usbdev(intf);
4663 	udev = usb_get_dev(udev);
4664 
4665 	netdev = alloc_etherdev(sizeof(struct lan78xx_net));
4666 	if (!netdev) {
4667 		dev_err(&intf->dev, "Error: OOM\n");
4668 		ret = -ENOMEM;
4669 		goto out1;
4670 	}
4671 
4672 	/* netdev_printk() needs this */
4673 	SET_NETDEV_DEV(netdev, &intf->dev);
4674 
4675 	dev = netdev_priv(netdev);
4676 	dev->udev = udev;
4677 	dev->intf = intf;
4678 	dev->net = netdev;
4679 	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV
4680 					| NETIF_MSG_PROBE | NETIF_MSG_LINK);
4681 
4682 	skb_queue_head_init(&dev->rxq);
4683 	skb_queue_head_init(&dev->txq);
4684 	skb_queue_head_init(&dev->rxq_done);
4685 	skb_queue_head_init(&dev->txq_pend);
4686 	skb_queue_head_init(&dev->rxq_overflow);
4687 	mutex_init(&dev->mdiobus_mutex);
4688 	mutex_init(&dev->dev_mutex);
4689 
4690 	ret = lan78xx_urb_config_init(dev);
4691 	if (ret < 0)
4692 		goto out2;
4693 
4694 	ret = lan78xx_alloc_tx_resources(dev);
4695 	if (ret < 0)
4696 		goto out2;
4697 
4698 	ret = lan78xx_alloc_rx_resources(dev);
4699 	if (ret < 0)
4700 		goto out3;
4701 
4702 	/* MTU range: 68 - 9000 */
4703 	netdev->max_mtu = MAX_SINGLE_PACKET_SIZE;
4704 
4705 	netif_set_tso_max_size(netdev, LAN78XX_TSO_SIZE(dev));
4706 
4707 	netif_napi_add(netdev, &dev->napi, lan78xx_poll);
4708 
4709 	INIT_DELAYED_WORK(&dev->wq, lan78xx_delayedwork);
4710 	init_usb_anchor(&dev->deferred);
4711 
4712 	netdev->netdev_ops = &lan78xx_netdev_ops;
4713 	netdev->watchdog_timeo = TX_TIMEOUT_JIFFIES;
4714 	netdev->ethtool_ops = &lan78xx_ethtool_ops;
4715 
4716 	dev->delta = 1;
4717 	timer_setup(&dev->stat_monitor, lan78xx_stat_monitor, 0);
4718 
4719 	mutex_init(&dev->stats.access_lock);
4720 
4721 	if (intf->cur_altsetting->desc.bNumEndpoints < 3) {
4722 		ret = -ENODEV;
4723 		goto out4;
4724 	}
4725 
4726 	dev->pipe_in = usb_rcvbulkpipe(udev, BULK_IN_PIPE);
4727 	ep_blkin = usb_pipe_endpoint(udev, dev->pipe_in);
4728 	if (!ep_blkin || !usb_endpoint_is_bulk_in(&ep_blkin->desc)) {
4729 		ret = -ENODEV;
4730 		goto out4;
4731 	}
4732 
4733 	dev->pipe_out = usb_sndbulkpipe(udev, BULK_OUT_PIPE);
4734 	ep_blkout = usb_pipe_endpoint(udev, dev->pipe_out);
4735 	if (!ep_blkout || !usb_endpoint_is_bulk_out(&ep_blkout->desc)) {
4736 		ret = -ENODEV;
4737 		goto out4;
4738 	}
4739 
4740 	ep_intr = &intf->cur_altsetting->endpoint[2];
4741 	if (!usb_endpoint_is_int_in(&ep_intr->desc)) {
4742 		ret = -ENODEV;
4743 		goto out4;
4744 	}
4745 
4746 	dev->pipe_intr = usb_rcvintpipe(dev->udev,
4747 					usb_endpoint_num(&ep_intr->desc));
4748 
4749 	ret = lan78xx_bind(dev, intf);
4750 	if (ret < 0)
4751 		goto out4;
4752 
4753 	period = ep_intr->desc.bInterval;
4754 	maxp = usb_maxpacket(dev->udev, dev->pipe_intr);
4755 
4756 	dev->urb_intr = usb_alloc_urb(0, GFP_KERNEL);
4757 	if (!dev->urb_intr) {
4758 		ret = -ENOMEM;
4759 		goto out5;
4760 	}
4761 
4762 	buf = kmalloc(maxp, GFP_KERNEL);
4763 	if (!buf) {
4764 		ret = -ENOMEM;
4765 		goto free_urbs;
4766 	}
4767 
4768 	usb_fill_int_urb(dev->urb_intr, dev->udev,
4769 			 dev->pipe_intr, buf, maxp,
4770 			 intr_complete, dev, period);
4771 	dev->urb_intr->transfer_flags |= URB_FREE_BUFFER;
4772 
4773 	dev->maxpacket = usb_maxpacket(dev->udev, dev->pipe_out);
4774 
4775 	/* Reject broken descriptors. */
4776 	if (dev->maxpacket == 0) {
4777 		ret = -ENODEV;
4778 		goto free_urbs;
4779 	}
4780 
4781 	/* driver requires remote-wakeup capability during autosuspend. */
4782 	intf->needs_remote_wakeup = 1;
4783 
4784 	ret = lan78xx_phy_init(dev);
4785 	if (ret < 0)
4786 		goto free_urbs;
4787 
4788 	ret = register_netdev(netdev);
4789 	if (ret != 0) {
4790 		netif_err(dev, probe, netdev, "couldn't register the device\n");
4791 		goto out8;
4792 	}
4793 
4794 	usb_set_intfdata(intf, dev);
4795 
4796 	ret = device_set_wakeup_enable(&udev->dev, true);
4797 
4798 	 /* Default delay of 2sec has more overhead than advantage.
4799 	  * Set to 10sec as default.
4800 	  */
4801 	pm_runtime_set_autosuspend_delay(&udev->dev,
4802 					 DEFAULT_AUTOSUSPEND_DELAY);
4803 
4804 	return 0;
4805 
4806 out8:
4807 	phy_disconnect(netdev->phydev);
4808 free_urbs:
4809 	usb_free_urb(dev->urb_intr);
4810 out5:
4811 	lan78xx_unbind(dev, intf);
4812 out4:
4813 	netif_napi_del(&dev->napi);
4814 	lan78xx_free_rx_resources(dev);
4815 out3:
4816 	lan78xx_free_tx_resources(dev);
4817 out2:
4818 	free_netdev(netdev);
4819 out1:
4820 	usb_put_dev(udev);
4821 
4822 	return ret;
4823 }
4824 
4825 static u16 lan78xx_wakeframe_crc16(const u8 *buf, int len)
4826 {
4827 	const u16 crc16poly = 0x8005;
4828 	int i;
4829 	u16 bit, crc, msb;
4830 	u8 data;
4831 
4832 	crc = 0xFFFF;
4833 	for (i = 0; i < len; i++) {
4834 		data = *buf++;
4835 		for (bit = 0; bit < 8; bit++) {
4836 			msb = crc >> 15;
4837 			crc <<= 1;
4838 
4839 			if (msb ^ (u16)(data & 1)) {
4840 				crc ^= crc16poly;
4841 				crc |= (u16)0x0001U;
4842 			}
4843 			data >>= 1;
4844 		}
4845 	}
4846 
4847 	return crc;
4848 }
4849 
4850 static int lan78xx_set_auto_suspend(struct lan78xx_net *dev)
4851 {
4852 	u32 buf;
4853 	int ret;
4854 
4855 	ret = lan78xx_stop_tx_path(dev);
4856 	if (ret < 0)
4857 		return ret;
4858 
4859 	ret = lan78xx_stop_rx_path(dev);
4860 	if (ret < 0)
4861 		return ret;
4862 
4863 	/* auto suspend (selective suspend) */
4864 
4865 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4866 	if (ret < 0)
4867 		return ret;
4868 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4869 	if (ret < 0)
4870 		return ret;
4871 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4872 	if (ret < 0)
4873 		return ret;
4874 
4875 	/* set goodframe wakeup */
4876 
4877 	ret = lan78xx_read_reg(dev, WUCSR, &buf);
4878 	if (ret < 0)
4879 		return ret;
4880 
4881 	buf |= WUCSR_RFE_WAKE_EN_;
4882 	buf |= WUCSR_STORE_WAKE_;
4883 
4884 	ret = lan78xx_write_reg(dev, WUCSR, buf);
4885 	if (ret < 0)
4886 		return ret;
4887 
4888 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4889 	if (ret < 0)
4890 		return ret;
4891 
4892 	buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
4893 	buf |= PMT_CTL_RES_CLR_WKP_STS_;
4894 	buf |= PMT_CTL_PHY_WAKE_EN_;
4895 	buf |= PMT_CTL_WOL_EN_;
4896 	buf &= ~PMT_CTL_SUS_MODE_MASK_;
4897 	buf |= PMT_CTL_SUS_MODE_3_;
4898 
4899 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4900 	if (ret < 0)
4901 		return ret;
4902 
4903 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
4904 	if (ret < 0)
4905 		return ret;
4906 
4907 	buf |= PMT_CTL_WUPS_MASK_;
4908 
4909 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
4910 	if (ret < 0)
4911 		return ret;
4912 
4913 	ret = lan78xx_start_rx_path(dev);
4914 
4915 	return ret;
4916 }
4917 
4918 static int lan78xx_set_suspend(struct lan78xx_net *dev, u32 wol)
4919 {
4920 	const u8 ipv4_multicast[3] = { 0x01, 0x00, 0x5E };
4921 	const u8 ipv6_multicast[3] = { 0x33, 0x33 };
4922 	const u8 arp_type[2] = { 0x08, 0x06 };
4923 	u32 temp_pmt_ctl;
4924 	int mask_index;
4925 	u32 temp_wucsr;
4926 	u32 buf;
4927 	u16 crc;
4928 	int ret;
4929 
4930 	ret = lan78xx_stop_tx_path(dev);
4931 	if (ret < 0)
4932 		return ret;
4933 	ret = lan78xx_stop_rx_path(dev);
4934 	if (ret < 0)
4935 		return ret;
4936 
4937 	ret = lan78xx_write_reg(dev, WUCSR, 0);
4938 	if (ret < 0)
4939 		return ret;
4940 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
4941 	if (ret < 0)
4942 		return ret;
4943 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
4944 	if (ret < 0)
4945 		return ret;
4946 
4947 	temp_wucsr = 0;
4948 
4949 	temp_pmt_ctl = 0;
4950 
4951 	ret = lan78xx_read_reg(dev, PMT_CTL, &temp_pmt_ctl);
4952 	if (ret < 0)
4953 		return ret;
4954 
4955 	temp_pmt_ctl &= ~PMT_CTL_RES_CLR_WKP_EN_;
4956 	temp_pmt_ctl |= PMT_CTL_RES_CLR_WKP_STS_;
4957 
4958 	for (mask_index = 0; mask_index < NUM_OF_WUF_CFG; mask_index++) {
4959 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index), 0);
4960 		if (ret < 0)
4961 			return ret;
4962 	}
4963 
4964 	mask_index = 0;
4965 	if (wol & WAKE_PHY) {
4966 		temp_pmt_ctl |= PMT_CTL_PHY_WAKE_EN_;
4967 
4968 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4969 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4970 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4971 	}
4972 	if (wol & WAKE_MAGIC) {
4973 		temp_wucsr |= WUCSR_MPEN_;
4974 
4975 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4976 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4977 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_3_;
4978 	}
4979 	if (wol & WAKE_BCAST) {
4980 		temp_wucsr |= WUCSR_BCST_EN_;
4981 
4982 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
4983 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
4984 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
4985 	}
4986 	if (wol & WAKE_MCAST) {
4987 		temp_wucsr |= WUCSR_WAKE_EN_;
4988 
4989 		/* set WUF_CFG & WUF_MASK for IPv4 Multicast */
4990 		crc = lan78xx_wakeframe_crc16(ipv4_multicast, 3);
4991 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
4992 					WUF_CFGX_EN_ |
4993 					WUF_CFGX_TYPE_MCAST_ |
4994 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
4995 					(crc & WUF_CFGX_CRC16_MASK_));
4996 		if (ret < 0)
4997 			return ret;
4998 
4999 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 7);
5000 		if (ret < 0)
5001 			return ret;
5002 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5003 		if (ret < 0)
5004 			return ret;
5005 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5006 		if (ret < 0)
5007 			return ret;
5008 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5009 		if (ret < 0)
5010 			return ret;
5011 
5012 		mask_index++;
5013 
5014 		/* for IPv6 Multicast */
5015 		crc = lan78xx_wakeframe_crc16(ipv6_multicast, 2);
5016 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5017 					WUF_CFGX_EN_ |
5018 					WUF_CFGX_TYPE_MCAST_ |
5019 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5020 					(crc & WUF_CFGX_CRC16_MASK_));
5021 		if (ret < 0)
5022 			return ret;
5023 
5024 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 3);
5025 		if (ret < 0)
5026 			return ret;
5027 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5028 		if (ret < 0)
5029 			return ret;
5030 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5031 		if (ret < 0)
5032 			return ret;
5033 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5034 		if (ret < 0)
5035 			return ret;
5036 
5037 		mask_index++;
5038 
5039 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5040 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5041 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5042 	}
5043 	if (wol & WAKE_UCAST) {
5044 		temp_wucsr |= WUCSR_PFDA_EN_;
5045 
5046 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5047 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5048 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5049 	}
5050 	if (wol & WAKE_ARP) {
5051 		temp_wucsr |= WUCSR_WAKE_EN_;
5052 
5053 		/* set WUF_CFG & WUF_MASK
5054 		 * for packettype (offset 12,13) = ARP (0x0806)
5055 		 */
5056 		crc = lan78xx_wakeframe_crc16(arp_type, 2);
5057 		ret = lan78xx_write_reg(dev, WUF_CFG(mask_index),
5058 					WUF_CFGX_EN_ |
5059 					WUF_CFGX_TYPE_ALL_ |
5060 					(0 << WUF_CFGX_OFFSET_SHIFT_) |
5061 					(crc & WUF_CFGX_CRC16_MASK_));
5062 		if (ret < 0)
5063 			return ret;
5064 
5065 		ret = lan78xx_write_reg(dev, WUF_MASK0(mask_index), 0x3000);
5066 		if (ret < 0)
5067 			return ret;
5068 		ret = lan78xx_write_reg(dev, WUF_MASK1(mask_index), 0);
5069 		if (ret < 0)
5070 			return ret;
5071 		ret = lan78xx_write_reg(dev, WUF_MASK2(mask_index), 0);
5072 		if (ret < 0)
5073 			return ret;
5074 		ret = lan78xx_write_reg(dev, WUF_MASK3(mask_index), 0);
5075 		if (ret < 0)
5076 			return ret;
5077 
5078 		mask_index++;
5079 
5080 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5081 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5082 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5083 	}
5084 
5085 	ret = lan78xx_write_reg(dev, WUCSR, temp_wucsr);
5086 	if (ret < 0)
5087 		return ret;
5088 
5089 	/* when multiple WOL bits are set */
5090 	if (hweight_long((unsigned long)wol) > 1) {
5091 		temp_pmt_ctl |= PMT_CTL_WOL_EN_;
5092 		temp_pmt_ctl &= ~PMT_CTL_SUS_MODE_MASK_;
5093 		temp_pmt_ctl |= PMT_CTL_SUS_MODE_0_;
5094 	}
5095 	ret = lan78xx_write_reg(dev, PMT_CTL, temp_pmt_ctl);
5096 	if (ret < 0)
5097 		return ret;
5098 
5099 	/* clear WUPS */
5100 	ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5101 	if (ret < 0)
5102 		return ret;
5103 
5104 	buf |= PMT_CTL_WUPS_MASK_;
5105 
5106 	ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5107 	if (ret < 0)
5108 		return ret;
5109 
5110 	ret = lan78xx_start_rx_path(dev);
5111 
5112 	return ret;
5113 }
5114 
5115 static int lan78xx_suspend(struct usb_interface *intf, pm_message_t message)
5116 {
5117 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5118 	bool dev_open;
5119 	int ret;
5120 
5121 	mutex_lock(&dev->dev_mutex);
5122 
5123 	netif_dbg(dev, ifdown, dev->net,
5124 		  "suspending: pm event %#x", message.event);
5125 
5126 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5127 
5128 	if (dev_open) {
5129 		spin_lock_irq(&dev->txq.lock);
5130 		/* don't autosuspend while transmitting */
5131 		if ((skb_queue_len(&dev->txq) ||
5132 		     skb_queue_len(&dev->txq_pend)) &&
5133 		    PMSG_IS_AUTO(message)) {
5134 			spin_unlock_irq(&dev->txq.lock);
5135 			ret = -EBUSY;
5136 			goto out;
5137 		} else {
5138 			set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5139 			spin_unlock_irq(&dev->txq.lock);
5140 		}
5141 
5142 		/* stop RX */
5143 		ret = lan78xx_stop_rx_path(dev);
5144 		if (ret < 0)
5145 			goto out;
5146 
5147 		ret = lan78xx_flush_rx_fifo(dev);
5148 		if (ret < 0)
5149 			goto out;
5150 
5151 		/* stop Tx */
5152 		ret = lan78xx_stop_tx_path(dev);
5153 		if (ret < 0)
5154 			goto out;
5155 
5156 		/* empty out the Rx and Tx queues */
5157 		netif_device_detach(dev->net);
5158 		lan78xx_terminate_urbs(dev);
5159 		usb_kill_urb(dev->urb_intr);
5160 
5161 		/* reattach */
5162 		netif_device_attach(dev->net);
5163 
5164 		timer_delete(&dev->stat_monitor);
5165 
5166 		if (PMSG_IS_AUTO(message)) {
5167 			ret = lan78xx_set_auto_suspend(dev);
5168 			if (ret < 0)
5169 				goto out;
5170 		} else {
5171 			struct lan78xx_priv *pdata;
5172 
5173 			pdata = (struct lan78xx_priv *)(dev->data[0]);
5174 			netif_carrier_off(dev->net);
5175 			ret = lan78xx_set_suspend(dev, pdata->wol);
5176 			if (ret < 0)
5177 				goto out;
5178 		}
5179 	} else {
5180 		/* Interface is down; don't allow WOL and PHY
5181 		 * events to wake up the host
5182 		 */
5183 		u32 buf;
5184 
5185 		set_bit(EVENT_DEV_ASLEEP, &dev->flags);
5186 
5187 		ret = lan78xx_write_reg(dev, WUCSR, 0);
5188 		if (ret < 0)
5189 			goto out;
5190 		ret = lan78xx_write_reg(dev, WUCSR2, 0);
5191 		if (ret < 0)
5192 			goto out;
5193 
5194 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5195 		if (ret < 0)
5196 			goto out;
5197 
5198 		buf &= ~PMT_CTL_RES_CLR_WKP_EN_;
5199 		buf |= PMT_CTL_RES_CLR_WKP_STS_;
5200 		buf &= ~PMT_CTL_SUS_MODE_MASK_;
5201 		buf |= PMT_CTL_SUS_MODE_3_;
5202 
5203 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5204 		if (ret < 0)
5205 			goto out;
5206 
5207 		ret = lan78xx_read_reg(dev, PMT_CTL, &buf);
5208 		if (ret < 0)
5209 			goto out;
5210 
5211 		buf |= PMT_CTL_WUPS_MASK_;
5212 
5213 		ret = lan78xx_write_reg(dev, PMT_CTL, buf);
5214 		if (ret < 0)
5215 			goto out;
5216 	}
5217 
5218 	ret = 0;
5219 out:
5220 	mutex_unlock(&dev->dev_mutex);
5221 
5222 	return ret;
5223 }
5224 
5225 static bool lan78xx_submit_deferred_urbs(struct lan78xx_net *dev)
5226 {
5227 	bool pipe_halted = false;
5228 	struct urb *urb;
5229 
5230 	while ((urb = usb_get_from_anchor(&dev->deferred))) {
5231 		struct sk_buff *skb = urb->context;
5232 		int ret;
5233 
5234 		if (!netif_device_present(dev->net) ||
5235 		    !netif_carrier_ok(dev->net) ||
5236 		    pipe_halted) {
5237 			lan78xx_release_tx_buf(dev, skb);
5238 			continue;
5239 		}
5240 
5241 		ret = usb_submit_urb(urb, GFP_ATOMIC);
5242 
5243 		if (ret == 0) {
5244 			netif_trans_update(dev->net);
5245 			lan78xx_queue_skb(&dev->txq, skb, tx_start);
5246 		} else {
5247 			if (ret == -EPIPE) {
5248 				netif_stop_queue(dev->net);
5249 				pipe_halted = true;
5250 			} else if (ret == -ENODEV) {
5251 				netif_device_detach(dev->net);
5252 			}
5253 
5254 			lan78xx_release_tx_buf(dev, skb);
5255 		}
5256 	}
5257 
5258 	return pipe_halted;
5259 }
5260 
5261 static int lan78xx_resume(struct usb_interface *intf)
5262 {
5263 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5264 	bool dev_open;
5265 	int ret;
5266 
5267 	mutex_lock(&dev->dev_mutex);
5268 
5269 	netif_dbg(dev, ifup, dev->net, "resuming device");
5270 
5271 	dev_open = test_bit(EVENT_DEV_OPEN, &dev->flags);
5272 
5273 	if (dev_open) {
5274 		bool pipe_halted = false;
5275 
5276 		ret = lan78xx_flush_tx_fifo(dev);
5277 		if (ret < 0)
5278 			goto out;
5279 
5280 		if (dev->urb_intr) {
5281 			int ret = usb_submit_urb(dev->urb_intr, GFP_KERNEL);
5282 
5283 			if (ret < 0) {
5284 				if (ret == -ENODEV)
5285 					netif_device_detach(dev->net);
5286 				netdev_warn(dev->net, "Failed to submit intr URB");
5287 			}
5288 		}
5289 
5290 		spin_lock_irq(&dev->txq.lock);
5291 
5292 		if (netif_device_present(dev->net)) {
5293 			pipe_halted = lan78xx_submit_deferred_urbs(dev);
5294 
5295 			if (pipe_halted)
5296 				lan78xx_defer_kevent(dev, EVENT_TX_HALT);
5297 		}
5298 
5299 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5300 
5301 		spin_unlock_irq(&dev->txq.lock);
5302 
5303 		if (!pipe_halted &&
5304 		    netif_device_present(dev->net) &&
5305 		    (lan78xx_tx_pend_data_len(dev) < lan78xx_tx_urb_space(dev)))
5306 			netif_start_queue(dev->net);
5307 
5308 		ret = lan78xx_start_tx_path(dev);
5309 		if (ret < 0)
5310 			goto out;
5311 
5312 		napi_schedule(&dev->napi);
5313 
5314 		if (!timer_pending(&dev->stat_monitor)) {
5315 			dev->delta = 1;
5316 			mod_timer(&dev->stat_monitor,
5317 				  jiffies + STAT_UPDATE_TIMER);
5318 		}
5319 
5320 	} else {
5321 		clear_bit(EVENT_DEV_ASLEEP, &dev->flags);
5322 	}
5323 
5324 	ret = lan78xx_write_reg(dev, WUCSR2, 0);
5325 	if (ret < 0)
5326 		goto out;
5327 	ret = lan78xx_write_reg(dev, WUCSR, 0);
5328 	if (ret < 0)
5329 		goto out;
5330 	ret = lan78xx_write_reg(dev, WK_SRC, 0xFFF1FF1FUL);
5331 	if (ret < 0)
5332 		goto out;
5333 
5334 	ret = lan78xx_write_reg(dev, WUCSR2, WUCSR2_NS_RCD_ |
5335 					     WUCSR2_ARP_RCD_ |
5336 					     WUCSR2_IPV6_TCPSYN_RCD_ |
5337 					     WUCSR2_IPV4_TCPSYN_RCD_);
5338 	if (ret < 0)
5339 		goto out;
5340 
5341 	ret = lan78xx_write_reg(dev, WUCSR, WUCSR_EEE_TX_WAKE_ |
5342 					    WUCSR_EEE_RX_WAKE_ |
5343 					    WUCSR_PFDA_FR_ |
5344 					    WUCSR_RFE_WAKE_FR_ |
5345 					    WUCSR_WUFR_ |
5346 					    WUCSR_MPR_ |
5347 					    WUCSR_BCST_FR_);
5348 	if (ret < 0)
5349 		goto out;
5350 
5351 	ret = 0;
5352 out:
5353 	mutex_unlock(&dev->dev_mutex);
5354 
5355 	return ret;
5356 }
5357 
5358 static int lan78xx_reset_resume(struct usb_interface *intf)
5359 {
5360 	struct lan78xx_net *dev = usb_get_intfdata(intf);
5361 	int ret;
5362 
5363 	netif_dbg(dev, ifup, dev->net, "(reset) resuming device");
5364 
5365 	ret = lan78xx_reset(dev);
5366 	if (ret < 0)
5367 		return ret;
5368 
5369 	phy_start(dev->net->phydev);
5370 
5371 	ret = lan78xx_resume(intf);
5372 
5373 	return ret;
5374 }
5375 
5376 static const struct usb_device_id products[] = {
5377 	{
5378 	/* LAN7800 USB Gigabit Ethernet Device */
5379 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7800_USB_PRODUCT_ID),
5380 	},
5381 	{
5382 	/* LAN7850 USB Gigabit Ethernet Device */
5383 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7850_USB_PRODUCT_ID),
5384 	},
5385 	{
5386 	/* LAN7801 USB Gigabit Ethernet Device */
5387 	USB_DEVICE(LAN78XX_USB_VENDOR_ID, LAN7801_USB_PRODUCT_ID),
5388 	},
5389 	{
5390 	/* ATM2-AF USB Gigabit Ethernet Device */
5391 	USB_DEVICE(AT29M2AF_USB_VENDOR_ID, AT29M2AF_USB_PRODUCT_ID),
5392 	},
5393 	{},
5394 };
5395 MODULE_DEVICE_TABLE(usb, products);
5396 
5397 static struct usb_driver lan78xx_driver = {
5398 	.name			= DRIVER_NAME,
5399 	.id_table		= products,
5400 	.probe			= lan78xx_probe,
5401 	.disconnect		= lan78xx_disconnect,
5402 	.suspend		= lan78xx_suspend,
5403 	.resume			= lan78xx_resume,
5404 	.reset_resume		= lan78xx_reset_resume,
5405 	.supports_autosuspend	= 1,
5406 	.disable_hub_initiated_lpm = 1,
5407 };
5408 
5409 module_usb_driver(lan78xx_driver);
5410 
5411 MODULE_AUTHOR(DRIVER_AUTHOR);
5412 MODULE_DESCRIPTION(DRIVER_DESC);
5413 MODULE_LICENSE("GPL");
5414