1 /*
2  * QLogic qlge NIC HBA Driver
3  * Copyright (c)  2003-2008 QLogic Corporation
4  * See LICENSE.qlge for copyright and licensing details.
5  * Author:     Linux qlge network device driver by
6  *                      Ron Mercer <ron.mercer@qlogic.com>
7  */
8 #include <linux/kernel.h>
9 #include <linux/init.h>
10 #include <linux/bitops.h>
11 #include <linux/types.h>
12 #include <linux/module.h>
13 #include <linux/list.h>
14 #include <linux/pci.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/pagemap.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/dmapool.h>
20 #include <linux/mempool.h>
21 #include <linux/spinlock.h>
22 #include <linux/kthread.h>
23 #include <linux/interrupt.h>
24 #include <linux/errno.h>
25 #include <linux/ioport.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/ipv6.h>
29 #include <net/ipv6.h>
30 #include <linux/tcp.h>
31 #include <linux/udp.h>
32 #include <linux/if_arp.h>
33 #include <linux/if_ether.h>
34 #include <linux/netdevice.h>
35 #include <linux/etherdevice.h>
36 #include <linux/ethtool.h>
37 #include <linux/if_vlan.h>
38 #include <linux/skbuff.h>
39 #include <linux/delay.h>
40 #include <linux/mm.h>
41 #include <linux/vmalloc.h>
42 #include <linux/prefetch.h>
43 #include <net/ip6_checksum.h>
44 
45 #include "qlge.h"
46 
47 char qlge_driver_name[] = DRV_NAME;
48 const char qlge_driver_version[] = DRV_VERSION;
49 
50 MODULE_AUTHOR("Ron Mercer <ron.mercer@qlogic.com>");
51 MODULE_DESCRIPTION(DRV_STRING " ");
52 MODULE_LICENSE("GPL");
53 MODULE_VERSION(DRV_VERSION);
54 
55 static const u32 default_msg =
56     NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK |
57 /* NETIF_MSG_TIMER |	*/
58     NETIF_MSG_IFDOWN |
59     NETIF_MSG_IFUP |
60     NETIF_MSG_RX_ERR |
61     NETIF_MSG_TX_ERR |
62 /*  NETIF_MSG_TX_QUEUED | */
63 /*  NETIF_MSG_INTR | NETIF_MSG_TX_DONE | NETIF_MSG_RX_STATUS | */
64 /* NETIF_MSG_PKTDATA | */
65     NETIF_MSG_HW | NETIF_MSG_WOL | 0;
66 
67 static int debug = -1;	/* defaults above */
68 module_param(debug, int, 0664);
69 MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
70 
71 #define MSIX_IRQ 0
72 #define MSI_IRQ 1
73 #define LEG_IRQ 2
74 static int qlge_irq_type = MSIX_IRQ;
75 module_param(qlge_irq_type, int, 0664);
76 MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
77 
78 static int qlge_mpi_coredump;
79 module_param(qlge_mpi_coredump, int, 0);
80 MODULE_PARM_DESC(qlge_mpi_coredump,
81 		"Option to enable MPI firmware dump. "
82 		"Default is OFF - Do Not allocate memory. ");
83 
84 static int qlge_force_coredump;
85 module_param(qlge_force_coredump, int, 0);
86 MODULE_PARM_DESC(qlge_force_coredump,
87 		"Option to allow force of firmware core dump. "
88 		"Default is OFF - Do not allow.");
89 
90 static DEFINE_PCI_DEVICE_TABLE(qlge_pci_tbl) = {
91 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
92 	{PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8000)},
93 	/* required last entry */
94 	{0,}
95 };
96 
97 MODULE_DEVICE_TABLE(pci, qlge_pci_tbl);
98 
99 static int ql_wol(struct ql_adapter *qdev);
100 static void qlge_set_multicast_list(struct net_device *ndev);
101 
102 /* This hardware semaphore causes exclusive access to
103  * resources shared between the NIC driver, MPI firmware,
104  * FCOE firmware and the FC driver.
105  */
ql_sem_trylock(struct ql_adapter * qdev,u32 sem_mask)106 static int ql_sem_trylock(struct ql_adapter *qdev, u32 sem_mask)
107 {
108 	u32 sem_bits = 0;
109 
110 	switch (sem_mask) {
111 	case SEM_XGMAC0_MASK:
112 		sem_bits = SEM_SET << SEM_XGMAC0_SHIFT;
113 		break;
114 	case SEM_XGMAC1_MASK:
115 		sem_bits = SEM_SET << SEM_XGMAC1_SHIFT;
116 		break;
117 	case SEM_ICB_MASK:
118 		sem_bits = SEM_SET << SEM_ICB_SHIFT;
119 		break;
120 	case SEM_MAC_ADDR_MASK:
121 		sem_bits = SEM_SET << SEM_MAC_ADDR_SHIFT;
122 		break;
123 	case SEM_FLASH_MASK:
124 		sem_bits = SEM_SET << SEM_FLASH_SHIFT;
125 		break;
126 	case SEM_PROBE_MASK:
127 		sem_bits = SEM_SET << SEM_PROBE_SHIFT;
128 		break;
129 	case SEM_RT_IDX_MASK:
130 		sem_bits = SEM_SET << SEM_RT_IDX_SHIFT;
131 		break;
132 	case SEM_PROC_REG_MASK:
133 		sem_bits = SEM_SET << SEM_PROC_REG_SHIFT;
134 		break;
135 	default:
136 		netif_alert(qdev, probe, qdev->ndev, "bad Semaphore mask!.\n");
137 		return -EINVAL;
138 	}
139 
140 	ql_write32(qdev, SEM, sem_bits | sem_mask);
141 	return !(ql_read32(qdev, SEM) & sem_bits);
142 }
143 
ql_sem_spinlock(struct ql_adapter * qdev,u32 sem_mask)144 int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask)
145 {
146 	unsigned int wait_count = 30;
147 	do {
148 		if (!ql_sem_trylock(qdev, sem_mask))
149 			return 0;
150 		udelay(100);
151 	} while (--wait_count);
152 	return -ETIMEDOUT;
153 }
154 
ql_sem_unlock(struct ql_adapter * qdev,u32 sem_mask)155 void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask)
156 {
157 	ql_write32(qdev, SEM, sem_mask);
158 	ql_read32(qdev, SEM);	/* flush */
159 }
160 
161 /* This function waits for a specific bit to come ready
162  * in a given register.  It is used mostly by the initialize
163  * process, but is also used in kernel thread API such as
164  * netdev->set_multi, netdev->set_mac_address, netdev->vlan_rx_add_vid.
165  */
ql_wait_reg_rdy(struct ql_adapter * qdev,u32 reg,u32 bit,u32 err_bit)166 int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 err_bit)
167 {
168 	u32 temp;
169 	int count = UDELAY_COUNT;
170 
171 	while (count) {
172 		temp = ql_read32(qdev, reg);
173 
174 		/* check for errors */
175 		if (temp & err_bit) {
176 			netif_alert(qdev, probe, qdev->ndev,
177 				    "register 0x%.08x access error, value = 0x%.08x!.\n",
178 				    reg, temp);
179 			return -EIO;
180 		} else if (temp & bit)
181 			return 0;
182 		udelay(UDELAY_DELAY);
183 		count--;
184 	}
185 	netif_alert(qdev, probe, qdev->ndev,
186 		    "Timed out waiting for reg %x to come ready.\n", reg);
187 	return -ETIMEDOUT;
188 }
189 
190 /* The CFG register is used to download TX and RX control blocks
191  * to the chip. This function waits for an operation to complete.
192  */
ql_wait_cfg(struct ql_adapter * qdev,u32 bit)193 static int ql_wait_cfg(struct ql_adapter *qdev, u32 bit)
194 {
195 	int count = UDELAY_COUNT;
196 	u32 temp;
197 
198 	while (count) {
199 		temp = ql_read32(qdev, CFG);
200 		if (temp & CFG_LE)
201 			return -EIO;
202 		if (!(temp & bit))
203 			return 0;
204 		udelay(UDELAY_DELAY);
205 		count--;
206 	}
207 	return -ETIMEDOUT;
208 }
209 
210 
211 /* Used to issue init control blocks to hw. Maps control block,
212  * sets address, triggers download, waits for completion.
213  */
ql_write_cfg(struct ql_adapter * qdev,void * ptr,int size,u32 bit,u16 q_id)214 int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
215 		 u16 q_id)
216 {
217 	u64 map;
218 	int status = 0;
219 	int direction;
220 	u32 mask;
221 	u32 value;
222 
223 	direction =
224 	    (bit & (CFG_LRQ | CFG_LR | CFG_LCQ)) ? PCI_DMA_TODEVICE :
225 	    PCI_DMA_FROMDEVICE;
226 
227 	map = pci_map_single(qdev->pdev, ptr, size, direction);
228 	if (pci_dma_mapping_error(qdev->pdev, map)) {
229 		netif_err(qdev, ifup, qdev->ndev, "Couldn't map DMA area.\n");
230 		return -ENOMEM;
231 	}
232 
233 	status = ql_sem_spinlock(qdev, SEM_ICB_MASK);
234 	if (status)
235 		return status;
236 
237 	status = ql_wait_cfg(qdev, bit);
238 	if (status) {
239 		netif_err(qdev, ifup, qdev->ndev,
240 			  "Timed out waiting for CFG to come ready.\n");
241 		goto exit;
242 	}
243 
244 	ql_write32(qdev, ICB_L, (u32) map);
245 	ql_write32(qdev, ICB_H, (u32) (map >> 32));
246 
247 	mask = CFG_Q_MASK | (bit << 16);
248 	value = bit | (q_id << CFG_Q_SHIFT);
249 	ql_write32(qdev, CFG, (mask | value));
250 
251 	/*
252 	 * Wait for the bit to clear after signaling hw.
253 	 */
254 	status = ql_wait_cfg(qdev, bit);
255 exit:
256 	ql_sem_unlock(qdev, SEM_ICB_MASK);	/* does flush too */
257 	pci_unmap_single(qdev->pdev, map, size, direction);
258 	return status;
259 }
260 
261 /* Get a specific MAC address from the CAM.  Used for debug and reg dump. */
ql_get_mac_addr_reg(struct ql_adapter * qdev,u32 type,u16 index,u32 * value)262 int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
263 			u32 *value)
264 {
265 	u32 offset = 0;
266 	int status;
267 
268 	switch (type) {
269 	case MAC_ADDR_TYPE_MULTI_MAC:
270 	case MAC_ADDR_TYPE_CAM_MAC:
271 		{
272 			status =
273 			    ql_wait_reg_rdy(qdev,
274 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
275 			if (status)
276 				goto exit;
277 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
278 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
279 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
280 			status =
281 			    ql_wait_reg_rdy(qdev,
282 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
283 			if (status)
284 				goto exit;
285 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
286 			status =
287 			    ql_wait_reg_rdy(qdev,
288 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
289 			if (status)
290 				goto exit;
291 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
292 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
293 				   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
294 			status =
295 			    ql_wait_reg_rdy(qdev,
296 				MAC_ADDR_IDX, MAC_ADDR_MR, 0);
297 			if (status)
298 				goto exit;
299 			*value++ = ql_read32(qdev, MAC_ADDR_DATA);
300 			if (type == MAC_ADDR_TYPE_CAM_MAC) {
301 				status =
302 				    ql_wait_reg_rdy(qdev,
303 					MAC_ADDR_IDX, MAC_ADDR_MW, 0);
304 				if (status)
305 					goto exit;
306 				ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
307 					   (index << MAC_ADDR_IDX_SHIFT) | /* index */
308 					   MAC_ADDR_ADR | MAC_ADDR_RS | type); /* type */
309 				status =
310 				    ql_wait_reg_rdy(qdev, MAC_ADDR_IDX,
311 						    MAC_ADDR_MR, 0);
312 				if (status)
313 					goto exit;
314 				*value++ = ql_read32(qdev, MAC_ADDR_DATA);
315 			}
316 			break;
317 		}
318 	case MAC_ADDR_TYPE_VLAN:
319 	case MAC_ADDR_TYPE_MULTI_FLTR:
320 	default:
321 		netif_crit(qdev, ifup, qdev->ndev,
322 			   "Address type %d not yet supported.\n", type);
323 		status = -EPERM;
324 	}
325 exit:
326 	return status;
327 }
328 
329 /* Set up a MAC, multicast or VLAN address for the
330  * inbound frame matching.
331  */
ql_set_mac_addr_reg(struct ql_adapter * qdev,u8 * addr,u32 type,u16 index)332 static int ql_set_mac_addr_reg(struct ql_adapter *qdev, u8 *addr, u32 type,
333 			       u16 index)
334 {
335 	u32 offset = 0;
336 	int status = 0;
337 
338 	switch (type) {
339 	case MAC_ADDR_TYPE_MULTI_MAC:
340 		{
341 			u32 upper = (addr[0] << 8) | addr[1];
342 			u32 lower = (addr[2] << 24) | (addr[3] << 16) |
343 					(addr[4] << 8) | (addr[5]);
344 
345 			status =
346 				ql_wait_reg_rdy(qdev,
347 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
348 			if (status)
349 				goto exit;
350 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
351 				(index << MAC_ADDR_IDX_SHIFT) |
352 				type | MAC_ADDR_E);
353 			ql_write32(qdev, MAC_ADDR_DATA, lower);
354 			status =
355 				ql_wait_reg_rdy(qdev,
356 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
357 			if (status)
358 				goto exit;
359 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) |
360 				(index << MAC_ADDR_IDX_SHIFT) |
361 				type | MAC_ADDR_E);
362 
363 			ql_write32(qdev, MAC_ADDR_DATA, upper);
364 			status =
365 				ql_wait_reg_rdy(qdev,
366 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
367 			if (status)
368 				goto exit;
369 			break;
370 		}
371 	case MAC_ADDR_TYPE_CAM_MAC:
372 		{
373 			u32 cam_output;
374 			u32 upper = (addr[0] << 8) | addr[1];
375 			u32 lower =
376 			    (addr[2] << 24) | (addr[3] << 16) | (addr[4] << 8) |
377 			    (addr[5]);
378 
379 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
380 				     "Adding %s address %pM at index %d in the CAM.\n",
381 				     type == MAC_ADDR_TYPE_MULTI_MAC ?
382 				     "MULTICAST" : "UNICAST",
383 				     addr, index);
384 
385 			status =
386 			    ql_wait_reg_rdy(qdev,
387 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
388 			if (status)
389 				goto exit;
390 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
391 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
392 				   type);	/* type */
393 			ql_write32(qdev, MAC_ADDR_DATA, lower);
394 			status =
395 			    ql_wait_reg_rdy(qdev,
396 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
397 			if (status)
398 				goto exit;
399 			ql_write32(qdev, MAC_ADDR_IDX, (offset++) | /* offset */
400 				   (index << MAC_ADDR_IDX_SHIFT) | /* index */
401 				   type);	/* type */
402 			ql_write32(qdev, MAC_ADDR_DATA, upper);
403 			status =
404 			    ql_wait_reg_rdy(qdev,
405 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
406 			if (status)
407 				goto exit;
408 			ql_write32(qdev, MAC_ADDR_IDX, (offset) |	/* offset */
409 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
410 				   type);	/* type */
411 			/* This field should also include the queue id
412 			   and possibly the function id.  Right now we hardcode
413 			   the route field to NIC core.
414 			 */
415 			cam_output = (CAM_OUT_ROUTE_NIC |
416 				      (qdev->
417 				       func << CAM_OUT_FUNC_SHIFT) |
418 					(0 << CAM_OUT_CQ_ID_SHIFT));
419 			if (qdev->ndev->features & NETIF_F_HW_VLAN_RX)
420 				cam_output |= CAM_OUT_RV;
421 			/* route to NIC core */
422 			ql_write32(qdev, MAC_ADDR_DATA, cam_output);
423 			break;
424 		}
425 	case MAC_ADDR_TYPE_VLAN:
426 		{
427 			u32 enable_bit = *((u32 *) &addr[0]);
428 			/* For VLAN, the addr actually holds a bit that
429 			 * either enables or disables the vlan id we are
430 			 * addressing. It's either MAC_ADDR_E on or off.
431 			 * That's bit-27 we're talking about.
432 			 */
433 			netif_info(qdev, ifup, qdev->ndev,
434 				   "%s VLAN ID %d %s the CAM.\n",
435 				   enable_bit ? "Adding" : "Removing",
436 				   index,
437 				   enable_bit ? "to" : "from");
438 
439 			status =
440 			    ql_wait_reg_rdy(qdev,
441 				MAC_ADDR_IDX, MAC_ADDR_MW, 0);
442 			if (status)
443 				goto exit;
444 			ql_write32(qdev, MAC_ADDR_IDX, offset |	/* offset */
445 				   (index << MAC_ADDR_IDX_SHIFT) |	/* index */
446 				   type |	/* type */
447 				   enable_bit);	/* enable/disable */
448 			break;
449 		}
450 	case MAC_ADDR_TYPE_MULTI_FLTR:
451 	default:
452 		netif_crit(qdev, ifup, qdev->ndev,
453 			   "Address type %d not yet supported.\n", type);
454 		status = -EPERM;
455 	}
456 exit:
457 	return status;
458 }
459 
460 /* Set or clear MAC address in hardware. We sometimes
461  * have to clear it to prevent wrong frame routing
462  * especially in a bonding environment.
463  */
ql_set_mac_addr(struct ql_adapter * qdev,int set)464 static int ql_set_mac_addr(struct ql_adapter *qdev, int set)
465 {
466 	int status;
467 	char zero_mac_addr[ETH_ALEN];
468 	char *addr;
469 
470 	if (set) {
471 		addr = &qdev->current_mac_addr[0];
472 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
473 			     "Set Mac addr %pM\n", addr);
474 	} else {
475 		memset(zero_mac_addr, 0, ETH_ALEN);
476 		addr = &zero_mac_addr[0];
477 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
478 			     "Clearing MAC address\n");
479 	}
480 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
481 	if (status)
482 		return status;
483 	status = ql_set_mac_addr_reg(qdev, (u8 *) addr,
484 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
485 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
486 	if (status)
487 		netif_err(qdev, ifup, qdev->ndev,
488 			  "Failed to init mac address.\n");
489 	return status;
490 }
491 
ql_link_on(struct ql_adapter * qdev)492 void ql_link_on(struct ql_adapter *qdev)
493 {
494 	netif_err(qdev, link, qdev->ndev, "Link is up.\n");
495 	netif_carrier_on(qdev->ndev);
496 	ql_set_mac_addr(qdev, 1);
497 }
498 
ql_link_off(struct ql_adapter * qdev)499 void ql_link_off(struct ql_adapter *qdev)
500 {
501 	netif_err(qdev, link, qdev->ndev, "Link is down.\n");
502 	netif_carrier_off(qdev->ndev);
503 	ql_set_mac_addr(qdev, 0);
504 }
505 
506 /* Get a specific frame routing value from the CAM.
507  * Used for debug and reg dump.
508  */
ql_get_routing_reg(struct ql_adapter * qdev,u32 index,u32 * value)509 int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value)
510 {
511 	int status = 0;
512 
513 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
514 	if (status)
515 		goto exit;
516 
517 	ql_write32(qdev, RT_IDX,
518 		   RT_IDX_TYPE_NICQ | RT_IDX_RS | (index << RT_IDX_IDX_SHIFT));
519 	status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MR, 0);
520 	if (status)
521 		goto exit;
522 	*value = ql_read32(qdev, RT_DATA);
523 exit:
524 	return status;
525 }
526 
527 /* The NIC function for this chip has 16 routing indexes.  Each one can be used
528  * to route different frame types to various inbound queues.  We send broadcast/
529  * multicast/error frames to the default queue for slow handling,
530  * and CAM hit/RSS frames to the fast handling queues.
531  */
ql_set_routing_reg(struct ql_adapter * qdev,u32 index,u32 mask,int enable)532 static int ql_set_routing_reg(struct ql_adapter *qdev, u32 index, u32 mask,
533 			      int enable)
534 {
535 	int status = -EINVAL; /* Return error if no mask match. */
536 	u32 value = 0;
537 
538 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
539 		     "%s %s mask %s the routing reg.\n",
540 		     enable ? "Adding" : "Removing",
541 		     index == RT_IDX_ALL_ERR_SLOT ? "MAC ERROR/ALL ERROR" :
542 		     index == RT_IDX_IP_CSUM_ERR_SLOT ? "IP CSUM ERROR" :
543 		     index == RT_IDX_TCP_UDP_CSUM_ERR_SLOT ? "TCP/UDP CSUM ERROR" :
544 		     index == RT_IDX_BCAST_SLOT ? "BROADCAST" :
545 		     index == RT_IDX_MCAST_MATCH_SLOT ? "MULTICAST MATCH" :
546 		     index == RT_IDX_ALLMULTI_SLOT ? "ALL MULTICAST MATCH" :
547 		     index == RT_IDX_UNUSED6_SLOT ? "UNUSED6" :
548 		     index == RT_IDX_UNUSED7_SLOT ? "UNUSED7" :
549 		     index == RT_IDX_RSS_MATCH_SLOT ? "RSS ALL/IPV4 MATCH" :
550 		     index == RT_IDX_RSS_IPV6_SLOT ? "RSS IPV6" :
551 		     index == RT_IDX_RSS_TCP4_SLOT ? "RSS TCP4" :
552 		     index == RT_IDX_RSS_TCP6_SLOT ? "RSS TCP6" :
553 		     index == RT_IDX_CAM_HIT_SLOT ? "CAM HIT" :
554 		     index == RT_IDX_UNUSED013 ? "UNUSED13" :
555 		     index == RT_IDX_UNUSED014 ? "UNUSED14" :
556 		     index == RT_IDX_PROMISCUOUS_SLOT ? "PROMISCUOUS" :
557 		     "(Bad index != RT_IDX)",
558 		     enable ? "to" : "from");
559 
560 	switch (mask) {
561 	case RT_IDX_CAM_HIT:
562 		{
563 			value = RT_IDX_DST_CAM_Q |	/* dest */
564 			    RT_IDX_TYPE_NICQ |	/* type */
565 			    (RT_IDX_CAM_HIT_SLOT << RT_IDX_IDX_SHIFT);/* index */
566 			break;
567 		}
568 	case RT_IDX_VALID:	/* Promiscuous Mode frames. */
569 		{
570 			value = RT_IDX_DST_DFLT_Q |	/* dest */
571 			    RT_IDX_TYPE_NICQ |	/* type */
572 			    (RT_IDX_PROMISCUOUS_SLOT << RT_IDX_IDX_SHIFT);/* index */
573 			break;
574 		}
575 	case RT_IDX_ERR:	/* Pass up MAC,IP,TCP/UDP error frames. */
576 		{
577 			value = RT_IDX_DST_DFLT_Q |	/* dest */
578 			    RT_IDX_TYPE_NICQ |	/* type */
579 			    (RT_IDX_ALL_ERR_SLOT << RT_IDX_IDX_SHIFT);/* index */
580 			break;
581 		}
582 	case RT_IDX_IP_CSUM_ERR: /* Pass up IP CSUM error frames. */
583 		{
584 			value = RT_IDX_DST_DFLT_Q | /* dest */
585 				RT_IDX_TYPE_NICQ | /* type */
586 				(RT_IDX_IP_CSUM_ERR_SLOT <<
587 				RT_IDX_IDX_SHIFT); /* index */
588 			break;
589 		}
590 	case RT_IDX_TU_CSUM_ERR: /* Pass up TCP/UDP CSUM error frames. */
591 		{
592 			value = RT_IDX_DST_DFLT_Q | /* dest */
593 				RT_IDX_TYPE_NICQ | /* type */
594 				(RT_IDX_TCP_UDP_CSUM_ERR_SLOT <<
595 				RT_IDX_IDX_SHIFT); /* index */
596 			break;
597 		}
598 	case RT_IDX_BCAST:	/* Pass up Broadcast frames to default Q. */
599 		{
600 			value = RT_IDX_DST_DFLT_Q |	/* dest */
601 			    RT_IDX_TYPE_NICQ |	/* type */
602 			    (RT_IDX_BCAST_SLOT << RT_IDX_IDX_SHIFT);/* index */
603 			break;
604 		}
605 	case RT_IDX_MCAST:	/* Pass up All Multicast frames. */
606 		{
607 			value = RT_IDX_DST_DFLT_Q |	/* dest */
608 			    RT_IDX_TYPE_NICQ |	/* type */
609 			    (RT_IDX_ALLMULTI_SLOT << RT_IDX_IDX_SHIFT);/* index */
610 			break;
611 		}
612 	case RT_IDX_MCAST_MATCH:	/* Pass up matched Multicast frames. */
613 		{
614 			value = RT_IDX_DST_DFLT_Q |	/* dest */
615 			    RT_IDX_TYPE_NICQ |	/* type */
616 			    (RT_IDX_MCAST_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
617 			break;
618 		}
619 	case RT_IDX_RSS_MATCH:	/* Pass up matched RSS frames. */
620 		{
621 			value = RT_IDX_DST_RSS |	/* dest */
622 			    RT_IDX_TYPE_NICQ |	/* type */
623 			    (RT_IDX_RSS_MATCH_SLOT << RT_IDX_IDX_SHIFT);/* index */
624 			break;
625 		}
626 	case 0:		/* Clear the E-bit on an entry. */
627 		{
628 			value = RT_IDX_DST_DFLT_Q |	/* dest */
629 			    RT_IDX_TYPE_NICQ |	/* type */
630 			    (index << RT_IDX_IDX_SHIFT);/* index */
631 			break;
632 		}
633 	default:
634 		netif_err(qdev, ifup, qdev->ndev,
635 			  "Mask type %d not yet supported.\n", mask);
636 		status = -EPERM;
637 		goto exit;
638 	}
639 
640 	if (value) {
641 		status = ql_wait_reg_rdy(qdev, RT_IDX, RT_IDX_MW, 0);
642 		if (status)
643 			goto exit;
644 		value |= (enable ? RT_IDX_E : 0);
645 		ql_write32(qdev, RT_IDX, value);
646 		ql_write32(qdev, RT_DATA, enable ? mask : 0);
647 	}
648 exit:
649 	return status;
650 }
651 
ql_enable_interrupts(struct ql_adapter * qdev)652 static void ql_enable_interrupts(struct ql_adapter *qdev)
653 {
654 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16) | INTR_EN_EI);
655 }
656 
ql_disable_interrupts(struct ql_adapter * qdev)657 static void ql_disable_interrupts(struct ql_adapter *qdev)
658 {
659 	ql_write32(qdev, INTR_EN, (INTR_EN_EI << 16));
660 }
661 
662 /* If we're running with multiple MSI-X vectors then we enable on the fly.
663  * Otherwise, we may have multiple outstanding workers and don't want to
664  * enable until the last one finishes. In this case, the irq_cnt gets
665  * incremented every time we queue a worker and decremented every time
666  * a worker finishes.  Once it hits zero we enable the interrupt.
667  */
ql_enable_completion_interrupt(struct ql_adapter * qdev,u32 intr)668 u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
669 {
670 	u32 var = 0;
671 	unsigned long hw_flags = 0;
672 	struct intr_context *ctx = qdev->intr_context + intr;
673 
674 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr)) {
675 		/* Always enable if we're MSIX multi interrupts and
676 		 * it's not the default (zeroeth) interrupt.
677 		 */
678 		ql_write32(qdev, INTR_EN,
679 			   ctx->intr_en_mask);
680 		var = ql_read32(qdev, STS);
681 		return var;
682 	}
683 
684 	spin_lock_irqsave(&qdev->hw_lock, hw_flags);
685 	if (atomic_dec_and_test(&ctx->irq_cnt)) {
686 		ql_write32(qdev, INTR_EN,
687 			   ctx->intr_en_mask);
688 		var = ql_read32(qdev, STS);
689 	}
690 	spin_unlock_irqrestore(&qdev->hw_lock, hw_flags);
691 	return var;
692 }
693 
ql_disable_completion_interrupt(struct ql_adapter * qdev,u32 intr)694 static u32 ql_disable_completion_interrupt(struct ql_adapter *qdev, u32 intr)
695 {
696 	u32 var = 0;
697 	struct intr_context *ctx;
698 
699 	/* HW disables for us if we're MSIX multi interrupts and
700 	 * it's not the default (zeroeth) interrupt.
701 	 */
702 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags) && intr))
703 		return 0;
704 
705 	ctx = qdev->intr_context + intr;
706 	spin_lock(&qdev->hw_lock);
707 	if (!atomic_read(&ctx->irq_cnt)) {
708 		ql_write32(qdev, INTR_EN,
709 		ctx->intr_dis_mask);
710 		var = ql_read32(qdev, STS);
711 	}
712 	atomic_inc(&ctx->irq_cnt);
713 	spin_unlock(&qdev->hw_lock);
714 	return var;
715 }
716 
ql_enable_all_completion_interrupts(struct ql_adapter * qdev)717 static void ql_enable_all_completion_interrupts(struct ql_adapter *qdev)
718 {
719 	int i;
720 	for (i = 0; i < qdev->intr_count; i++) {
721 		/* The enable call does a atomic_dec_and_test
722 		 * and enables only if the result is zero.
723 		 * So we precharge it here.
724 		 */
725 		if (unlikely(!test_bit(QL_MSIX_ENABLED, &qdev->flags) ||
726 			i == 0))
727 			atomic_set(&qdev->intr_context[i].irq_cnt, 1);
728 		ql_enable_completion_interrupt(qdev, i);
729 	}
730 
731 }
732 
ql_validate_flash(struct ql_adapter * qdev,u32 size,const char * str)733 static int ql_validate_flash(struct ql_adapter *qdev, u32 size, const char *str)
734 {
735 	int status, i;
736 	u16 csum = 0;
737 	__le16 *flash = (__le16 *)&qdev->flash;
738 
739 	status = strncmp((char *)&qdev->flash, str, 4);
740 	if (status) {
741 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash signature.\n");
742 		return	status;
743 	}
744 
745 	for (i = 0; i < size; i++)
746 		csum += le16_to_cpu(*flash++);
747 
748 	if (csum)
749 		netif_err(qdev, ifup, qdev->ndev,
750 			  "Invalid flash checksum, csum = 0x%.04x.\n", csum);
751 
752 	return csum;
753 }
754 
ql_read_flash_word(struct ql_adapter * qdev,int offset,__le32 * data)755 static int ql_read_flash_word(struct ql_adapter *qdev, int offset, __le32 *data)
756 {
757 	int status = 0;
758 	/* wait for reg to come ready */
759 	status = ql_wait_reg_rdy(qdev,
760 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
761 	if (status)
762 		goto exit;
763 	/* set up for reg read */
764 	ql_write32(qdev, FLASH_ADDR, FLASH_ADDR_R | offset);
765 	/* wait for reg to come ready */
766 	status = ql_wait_reg_rdy(qdev,
767 			FLASH_ADDR, FLASH_ADDR_RDY, FLASH_ADDR_ERR);
768 	if (status)
769 		goto exit;
770 	 /* This data is stored on flash as an array of
771 	 * __le32.  Since ql_read32() returns cpu endian
772 	 * we need to swap it back.
773 	 */
774 	*data = cpu_to_le32(ql_read32(qdev, FLASH_DATA));
775 exit:
776 	return status;
777 }
778 
ql_get_8000_flash_params(struct ql_adapter * qdev)779 static int ql_get_8000_flash_params(struct ql_adapter *qdev)
780 {
781 	u32 i, size;
782 	int status;
783 	__le32 *p = (__le32 *)&qdev->flash;
784 	u32 offset;
785 	u8 mac_addr[6];
786 
787 	/* Get flash offset for function and adjust
788 	 * for dword access.
789 	 */
790 	if (!qdev->port)
791 		offset = FUNC0_FLASH_OFFSET / sizeof(u32);
792 	else
793 		offset = FUNC1_FLASH_OFFSET / sizeof(u32);
794 
795 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
796 		return -ETIMEDOUT;
797 
798 	size = sizeof(struct flash_params_8000) / sizeof(u32);
799 	for (i = 0; i < size; i++, p++) {
800 		status = ql_read_flash_word(qdev, i+offset, p);
801 		if (status) {
802 			netif_err(qdev, ifup, qdev->ndev,
803 				  "Error reading flash.\n");
804 			goto exit;
805 		}
806 	}
807 
808 	status = ql_validate_flash(qdev,
809 			sizeof(struct flash_params_8000) / sizeof(u16),
810 			"8000");
811 	if (status) {
812 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
813 		status = -EINVAL;
814 		goto exit;
815 	}
816 
817 	/* Extract either manufacturer or BOFM modified
818 	 * MAC address.
819 	 */
820 	if (qdev->flash.flash_params_8000.data_type1 == 2)
821 		memcpy(mac_addr,
822 			qdev->flash.flash_params_8000.mac_addr1,
823 			qdev->ndev->addr_len);
824 	else
825 		memcpy(mac_addr,
826 			qdev->flash.flash_params_8000.mac_addr,
827 			qdev->ndev->addr_len);
828 
829 	if (!is_valid_ether_addr(mac_addr)) {
830 		netif_err(qdev, ifup, qdev->ndev, "Invalid MAC address.\n");
831 		status = -EINVAL;
832 		goto exit;
833 	}
834 
835 	memcpy(qdev->ndev->dev_addr,
836 		mac_addr,
837 		qdev->ndev->addr_len);
838 
839 exit:
840 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
841 	return status;
842 }
843 
ql_get_8012_flash_params(struct ql_adapter * qdev)844 static int ql_get_8012_flash_params(struct ql_adapter *qdev)
845 {
846 	int i;
847 	int status;
848 	__le32 *p = (__le32 *)&qdev->flash;
849 	u32 offset = 0;
850 	u32 size = sizeof(struct flash_params_8012) / sizeof(u32);
851 
852 	/* Second function's parameters follow the first
853 	 * function's.
854 	 */
855 	if (qdev->port)
856 		offset = size;
857 
858 	if (ql_sem_spinlock(qdev, SEM_FLASH_MASK))
859 		return -ETIMEDOUT;
860 
861 	for (i = 0; i < size; i++, p++) {
862 		status = ql_read_flash_word(qdev, i+offset, p);
863 		if (status) {
864 			netif_err(qdev, ifup, qdev->ndev,
865 				  "Error reading flash.\n");
866 			goto exit;
867 		}
868 
869 	}
870 
871 	status = ql_validate_flash(qdev,
872 			sizeof(struct flash_params_8012) / sizeof(u16),
873 			"8012");
874 	if (status) {
875 		netif_err(qdev, ifup, qdev->ndev, "Invalid flash.\n");
876 		status = -EINVAL;
877 		goto exit;
878 	}
879 
880 	if (!is_valid_ether_addr(qdev->flash.flash_params_8012.mac_addr)) {
881 		status = -EINVAL;
882 		goto exit;
883 	}
884 
885 	memcpy(qdev->ndev->dev_addr,
886 		qdev->flash.flash_params_8012.mac_addr,
887 		qdev->ndev->addr_len);
888 
889 exit:
890 	ql_sem_unlock(qdev, SEM_FLASH_MASK);
891 	return status;
892 }
893 
894 /* xgmac register are located behind the xgmac_addr and xgmac_data
895  * register pair.  Each read/write requires us to wait for the ready
896  * bit before reading/writing the data.
897  */
ql_write_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 data)898 static int ql_write_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 data)
899 {
900 	int status;
901 	/* wait for reg to come ready */
902 	status = ql_wait_reg_rdy(qdev,
903 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
904 	if (status)
905 		return status;
906 	/* write the data to the data reg */
907 	ql_write32(qdev, XGMAC_DATA, data);
908 	/* trigger the write */
909 	ql_write32(qdev, XGMAC_ADDR, reg);
910 	return status;
911 }
912 
913 /* xgmac register are located behind the xgmac_addr and xgmac_data
914  * register pair.  Each read/write requires us to wait for the ready
915  * bit before reading/writing the data.
916  */
ql_read_xgmac_reg(struct ql_adapter * qdev,u32 reg,u32 * data)917 int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
918 {
919 	int status = 0;
920 	/* wait for reg to come ready */
921 	status = ql_wait_reg_rdy(qdev,
922 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
923 	if (status)
924 		goto exit;
925 	/* set up for reg read */
926 	ql_write32(qdev, XGMAC_ADDR, reg | XGMAC_ADDR_R);
927 	/* wait for reg to come ready */
928 	status = ql_wait_reg_rdy(qdev,
929 			XGMAC_ADDR, XGMAC_ADDR_RDY, XGMAC_ADDR_XME);
930 	if (status)
931 		goto exit;
932 	/* get the data */
933 	*data = ql_read32(qdev, XGMAC_DATA);
934 exit:
935 	return status;
936 }
937 
938 /* This is used for reading the 64-bit statistics regs. */
ql_read_xgmac_reg64(struct ql_adapter * qdev,u32 reg,u64 * data)939 int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data)
940 {
941 	int status = 0;
942 	u32 hi = 0;
943 	u32 lo = 0;
944 
945 	status = ql_read_xgmac_reg(qdev, reg, &lo);
946 	if (status)
947 		goto exit;
948 
949 	status = ql_read_xgmac_reg(qdev, reg + 4, &hi);
950 	if (status)
951 		goto exit;
952 
953 	*data = (u64) lo | ((u64) hi << 32);
954 
955 exit:
956 	return status;
957 }
958 
ql_8000_port_initialize(struct ql_adapter * qdev)959 static int ql_8000_port_initialize(struct ql_adapter *qdev)
960 {
961 	int status;
962 	/*
963 	 * Get MPI firmware version for driver banner
964 	 * and ethool info.
965 	 */
966 	status = ql_mb_about_fw(qdev);
967 	if (status)
968 		goto exit;
969 	status = ql_mb_get_fw_state(qdev);
970 	if (status)
971 		goto exit;
972 	/* Wake up a worker to get/set the TX/RX frame sizes. */
973 	queue_delayed_work(qdev->workqueue, &qdev->mpi_port_cfg_work, 0);
974 exit:
975 	return status;
976 }
977 
978 /* Take the MAC Core out of reset.
979  * Enable statistics counting.
980  * Take the transmitter/receiver out of reset.
981  * This functionality may be done in the MPI firmware at a
982  * later date.
983  */
ql_8012_port_initialize(struct ql_adapter * qdev)984 static int ql_8012_port_initialize(struct ql_adapter *qdev)
985 {
986 	int status = 0;
987 	u32 data;
988 
989 	if (ql_sem_trylock(qdev, qdev->xg_sem_mask)) {
990 		/* Another function has the semaphore, so
991 		 * wait for the port init bit to come ready.
992 		 */
993 		netif_info(qdev, link, qdev->ndev,
994 			   "Another function has the semaphore, so wait for the port init bit to come ready.\n");
995 		status = ql_wait_reg_rdy(qdev, STS, qdev->port_init, 0);
996 		if (status) {
997 			netif_crit(qdev, link, qdev->ndev,
998 				   "Port initialize timed out.\n");
999 		}
1000 		return status;
1001 	}
1002 
1003 	netif_info(qdev, link, qdev->ndev, "Got xgmac semaphore!.\n");
1004 	/* Set the core reset. */
1005 	status = ql_read_xgmac_reg(qdev, GLOBAL_CFG, &data);
1006 	if (status)
1007 		goto end;
1008 	data |= GLOBAL_CFG_RESET;
1009 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1010 	if (status)
1011 		goto end;
1012 
1013 	/* Clear the core reset and turn on jumbo for receiver. */
1014 	data &= ~GLOBAL_CFG_RESET;	/* Clear core reset. */
1015 	data |= GLOBAL_CFG_JUMBO;	/* Turn on jumbo. */
1016 	data |= GLOBAL_CFG_TX_STAT_EN;
1017 	data |= GLOBAL_CFG_RX_STAT_EN;
1018 	status = ql_write_xgmac_reg(qdev, GLOBAL_CFG, data);
1019 	if (status)
1020 		goto end;
1021 
1022 	/* Enable transmitter, and clear it's reset. */
1023 	status = ql_read_xgmac_reg(qdev, TX_CFG, &data);
1024 	if (status)
1025 		goto end;
1026 	data &= ~TX_CFG_RESET;	/* Clear the TX MAC reset. */
1027 	data |= TX_CFG_EN;	/* Enable the transmitter. */
1028 	status = ql_write_xgmac_reg(qdev, TX_CFG, data);
1029 	if (status)
1030 		goto end;
1031 
1032 	/* Enable receiver and clear it's reset. */
1033 	status = ql_read_xgmac_reg(qdev, RX_CFG, &data);
1034 	if (status)
1035 		goto end;
1036 	data &= ~RX_CFG_RESET;	/* Clear the RX MAC reset. */
1037 	data |= RX_CFG_EN;	/* Enable the receiver. */
1038 	status = ql_write_xgmac_reg(qdev, RX_CFG, data);
1039 	if (status)
1040 		goto end;
1041 
1042 	/* Turn on jumbo. */
1043 	status =
1044 	    ql_write_xgmac_reg(qdev, MAC_TX_PARAMS, MAC_TX_PARAMS_JUMBO | (0x2580 << 16));
1045 	if (status)
1046 		goto end;
1047 	status =
1048 	    ql_write_xgmac_reg(qdev, MAC_RX_PARAMS, 0x2580);
1049 	if (status)
1050 		goto end;
1051 
1052 	/* Signal to the world that the port is enabled.        */
1053 	ql_write32(qdev, STS, ((qdev->port_init << 16) | qdev->port_init));
1054 end:
1055 	ql_sem_unlock(qdev, qdev->xg_sem_mask);
1056 	return status;
1057 }
1058 
ql_lbq_block_size(struct ql_adapter * qdev)1059 static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1060 {
1061 	return PAGE_SIZE << qdev->lbq_buf_order;
1062 }
1063 
1064 /* Get the next large buffer. */
ql_get_curr_lbuf(struct rx_ring * rx_ring)1065 static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1066 {
1067 	struct bq_desc *lbq_desc = &rx_ring->lbq[rx_ring->lbq_curr_idx];
1068 	rx_ring->lbq_curr_idx++;
1069 	if (rx_ring->lbq_curr_idx == rx_ring->lbq_len)
1070 		rx_ring->lbq_curr_idx = 0;
1071 	rx_ring->lbq_free_cnt++;
1072 	return lbq_desc;
1073 }
1074 
ql_get_curr_lchunk(struct ql_adapter * qdev,struct rx_ring * rx_ring)1075 static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1076 		struct rx_ring *rx_ring)
1077 {
1078 	struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1079 
1080 	pci_dma_sync_single_for_cpu(qdev->pdev,
1081 					dma_unmap_addr(lbq_desc, mapaddr),
1082 				    rx_ring->lbq_buf_size,
1083 					PCI_DMA_FROMDEVICE);
1084 
1085 	/* If it's the last chunk of our master page then
1086 	 * we unmap it.
1087 	 */
1088 	if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1089 					== ql_lbq_block_size(qdev))
1090 		pci_unmap_page(qdev->pdev,
1091 				lbq_desc->p.pg_chunk.map,
1092 				ql_lbq_block_size(qdev),
1093 				PCI_DMA_FROMDEVICE);
1094 	return lbq_desc;
1095 }
1096 
1097 /* Get the next small buffer. */
ql_get_curr_sbuf(struct rx_ring * rx_ring)1098 static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1099 {
1100 	struct bq_desc *sbq_desc = &rx_ring->sbq[rx_ring->sbq_curr_idx];
1101 	rx_ring->sbq_curr_idx++;
1102 	if (rx_ring->sbq_curr_idx == rx_ring->sbq_len)
1103 		rx_ring->sbq_curr_idx = 0;
1104 	rx_ring->sbq_free_cnt++;
1105 	return sbq_desc;
1106 }
1107 
1108 /* Update an rx ring index. */
ql_update_cq(struct rx_ring * rx_ring)1109 static void ql_update_cq(struct rx_ring *rx_ring)
1110 {
1111 	rx_ring->cnsmr_idx++;
1112 	rx_ring->curr_entry++;
1113 	if (unlikely(rx_ring->cnsmr_idx == rx_ring->cq_len)) {
1114 		rx_ring->cnsmr_idx = 0;
1115 		rx_ring->curr_entry = rx_ring->cq_base;
1116 	}
1117 }
1118 
ql_write_cq_idx(struct rx_ring * rx_ring)1119 static void ql_write_cq_idx(struct rx_ring *rx_ring)
1120 {
1121 	ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1122 }
1123 
ql_get_next_chunk(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct bq_desc * lbq_desc)1124 static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1125 						struct bq_desc *lbq_desc)
1126 {
1127 	if (!rx_ring->pg_chunk.page) {
1128 		u64 map;
1129 		rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1130 						GFP_ATOMIC,
1131 						qdev->lbq_buf_order);
1132 		if (unlikely(!rx_ring->pg_chunk.page)) {
1133 			netif_err(qdev, drv, qdev->ndev,
1134 				  "page allocation failed.\n");
1135 			return -ENOMEM;
1136 		}
1137 		rx_ring->pg_chunk.offset = 0;
1138 		map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1139 					0, ql_lbq_block_size(qdev),
1140 					PCI_DMA_FROMDEVICE);
1141 		if (pci_dma_mapping_error(qdev->pdev, map)) {
1142 			__free_pages(rx_ring->pg_chunk.page,
1143 					qdev->lbq_buf_order);
1144 			netif_err(qdev, drv, qdev->ndev,
1145 				  "PCI mapping failed.\n");
1146 			return -ENOMEM;
1147 		}
1148 		rx_ring->pg_chunk.map = map;
1149 		rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1150 	}
1151 
1152 	/* Copy the current master pg_chunk info
1153 	 * to the current descriptor.
1154 	 */
1155 	lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1156 
1157 	/* Adjust the master page chunk for next
1158 	 * buffer get.
1159 	 */
1160 	rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1161 	if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1162 		rx_ring->pg_chunk.page = NULL;
1163 		lbq_desc->p.pg_chunk.last_flag = 1;
1164 	} else {
1165 		rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1166 		get_page(rx_ring->pg_chunk.page);
1167 		lbq_desc->p.pg_chunk.last_flag = 0;
1168 	}
1169 	return 0;
1170 }
1171 /* Process (refill) a large buffer queue. */
ql_update_lbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1172 static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1173 {
1174 	u32 clean_idx = rx_ring->lbq_clean_idx;
1175 	u32 start_idx = clean_idx;
1176 	struct bq_desc *lbq_desc;
1177 	u64 map;
1178 	int i;
1179 
1180 	while (rx_ring->lbq_free_cnt > 32) {
1181 		for (i = 0; i < 16; i++) {
1182 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1183 				     "lbq: try cleaning clean_idx = %d.\n",
1184 				     clean_idx);
1185 			lbq_desc = &rx_ring->lbq[clean_idx];
1186 			if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1187 				netif_err(qdev, ifup, qdev->ndev,
1188 					  "Could not get a page chunk.\n");
1189 				return;
1190 			}
1191 
1192 			map = lbq_desc->p.pg_chunk.map +
1193 				lbq_desc->p.pg_chunk.offset;
1194 				dma_unmap_addr_set(lbq_desc, mapaddr, map);
1195 			dma_unmap_len_set(lbq_desc, maplen,
1196 					rx_ring->lbq_buf_size);
1197 				*lbq_desc->addr = cpu_to_le64(map);
1198 
1199 			pci_dma_sync_single_for_device(qdev->pdev, map,
1200 						rx_ring->lbq_buf_size,
1201 						PCI_DMA_FROMDEVICE);
1202 			clean_idx++;
1203 			if (clean_idx == rx_ring->lbq_len)
1204 				clean_idx = 0;
1205 		}
1206 
1207 		rx_ring->lbq_clean_idx = clean_idx;
1208 		rx_ring->lbq_prod_idx += 16;
1209 		if (rx_ring->lbq_prod_idx == rx_ring->lbq_len)
1210 			rx_ring->lbq_prod_idx = 0;
1211 		rx_ring->lbq_free_cnt -= 16;
1212 	}
1213 
1214 	if (start_idx != clean_idx) {
1215 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1216 			     "lbq: updating prod idx = %d.\n",
1217 			     rx_ring->lbq_prod_idx);
1218 		ql_write_db_reg(rx_ring->lbq_prod_idx,
1219 				rx_ring->lbq_prod_idx_db_reg);
1220 	}
1221 }
1222 
1223 /* Process (refill) a small buffer queue. */
ql_update_sbq(struct ql_adapter * qdev,struct rx_ring * rx_ring)1224 static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1225 {
1226 	u32 clean_idx = rx_ring->sbq_clean_idx;
1227 	u32 start_idx = clean_idx;
1228 	struct bq_desc *sbq_desc;
1229 	u64 map;
1230 	int i;
1231 
1232 	while (rx_ring->sbq_free_cnt > 16) {
1233 		for (i = 0; i < 16; i++) {
1234 			sbq_desc = &rx_ring->sbq[clean_idx];
1235 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1236 				     "sbq: try cleaning clean_idx = %d.\n",
1237 				     clean_idx);
1238 			if (sbq_desc->p.skb == NULL) {
1239 				netif_printk(qdev, rx_status, KERN_DEBUG,
1240 					     qdev->ndev,
1241 					     "sbq: getting new skb for index %d.\n",
1242 					     sbq_desc->index);
1243 				sbq_desc->p.skb =
1244 				    netdev_alloc_skb(qdev->ndev,
1245 						     SMALL_BUFFER_SIZE);
1246 				if (sbq_desc->p.skb == NULL) {
1247 					netif_err(qdev, probe, qdev->ndev,
1248 						  "Couldn't get an skb.\n");
1249 					rx_ring->sbq_clean_idx = clean_idx;
1250 					return;
1251 				}
1252 				skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1253 				map = pci_map_single(qdev->pdev,
1254 						     sbq_desc->p.skb->data,
1255 						     rx_ring->sbq_buf_size,
1256 						     PCI_DMA_FROMDEVICE);
1257 				if (pci_dma_mapping_error(qdev->pdev, map)) {
1258 					netif_err(qdev, ifup, qdev->ndev,
1259 						  "PCI mapping failed.\n");
1260 					rx_ring->sbq_clean_idx = clean_idx;
1261 					dev_kfree_skb_any(sbq_desc->p.skb);
1262 					sbq_desc->p.skb = NULL;
1263 					return;
1264 				}
1265 				dma_unmap_addr_set(sbq_desc, mapaddr, map);
1266 				dma_unmap_len_set(sbq_desc, maplen,
1267 						  rx_ring->sbq_buf_size);
1268 				*sbq_desc->addr = cpu_to_le64(map);
1269 			}
1270 
1271 			clean_idx++;
1272 			if (clean_idx == rx_ring->sbq_len)
1273 				clean_idx = 0;
1274 		}
1275 		rx_ring->sbq_clean_idx = clean_idx;
1276 		rx_ring->sbq_prod_idx += 16;
1277 		if (rx_ring->sbq_prod_idx == rx_ring->sbq_len)
1278 			rx_ring->sbq_prod_idx = 0;
1279 		rx_ring->sbq_free_cnt -= 16;
1280 	}
1281 
1282 	if (start_idx != clean_idx) {
1283 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1284 			     "sbq: updating prod idx = %d.\n",
1285 			     rx_ring->sbq_prod_idx);
1286 		ql_write_db_reg(rx_ring->sbq_prod_idx,
1287 				rx_ring->sbq_prod_idx_db_reg);
1288 	}
1289 }
1290 
ql_update_buffer_queues(struct ql_adapter * qdev,struct rx_ring * rx_ring)1291 static void ql_update_buffer_queues(struct ql_adapter *qdev,
1292 				    struct rx_ring *rx_ring)
1293 {
1294 	ql_update_sbq(qdev, rx_ring);
1295 	ql_update_lbq(qdev, rx_ring);
1296 }
1297 
1298 /* Unmaps tx buffers.  Can be called from send() if a pci mapping
1299  * fails at some stage, or from the interrupt when a tx completes.
1300  */
ql_unmap_send(struct ql_adapter * qdev,struct tx_ring_desc * tx_ring_desc,int mapped)1301 static void ql_unmap_send(struct ql_adapter *qdev,
1302 			  struct tx_ring_desc *tx_ring_desc, int mapped)
1303 {
1304 	int i;
1305 	for (i = 0; i < mapped; i++) {
1306 		if (i == 0 || (i == 7 && mapped > 7)) {
1307 			/*
1308 			 * Unmap the skb->data area, or the
1309 			 * external sglist (AKA the Outbound
1310 			 * Address List (OAL)).
1311 			 * If its the zeroeth element, then it's
1312 			 * the skb->data area.  If it's the 7th
1313 			 * element and there is more than 6 frags,
1314 			 * then its an OAL.
1315 			 */
1316 			if (i == 7) {
1317 				netif_printk(qdev, tx_done, KERN_DEBUG,
1318 					     qdev->ndev,
1319 					     "unmapping OAL area.\n");
1320 			}
1321 			pci_unmap_single(qdev->pdev,
1322 					 dma_unmap_addr(&tx_ring_desc->map[i],
1323 							mapaddr),
1324 					 dma_unmap_len(&tx_ring_desc->map[i],
1325 						       maplen),
1326 					 PCI_DMA_TODEVICE);
1327 		} else {
1328 			netif_printk(qdev, tx_done, KERN_DEBUG, qdev->ndev,
1329 				     "unmapping frag %d.\n", i);
1330 			pci_unmap_page(qdev->pdev,
1331 				       dma_unmap_addr(&tx_ring_desc->map[i],
1332 						      mapaddr),
1333 				       dma_unmap_len(&tx_ring_desc->map[i],
1334 						     maplen), PCI_DMA_TODEVICE);
1335 		}
1336 	}
1337 
1338 }
1339 
1340 /* Map the buffers for this transmit.  This will return
1341  * NETDEV_TX_BUSY or NETDEV_TX_OK based on success.
1342  */
ql_map_send(struct ql_adapter * qdev,struct ob_mac_iocb_req * mac_iocb_ptr,struct sk_buff * skb,struct tx_ring_desc * tx_ring_desc)1343 static int ql_map_send(struct ql_adapter *qdev,
1344 		       struct ob_mac_iocb_req *mac_iocb_ptr,
1345 		       struct sk_buff *skb, struct tx_ring_desc *tx_ring_desc)
1346 {
1347 	int len = skb_headlen(skb);
1348 	dma_addr_t map;
1349 	int frag_idx, err, map_idx = 0;
1350 	struct tx_buf_desc *tbd = mac_iocb_ptr->tbd;
1351 	int frag_cnt = skb_shinfo(skb)->nr_frags;
1352 
1353 	if (frag_cnt) {
1354 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
1355 			     "frag_cnt = %d.\n", frag_cnt);
1356 	}
1357 	/*
1358 	 * Map the skb buffer first.
1359 	 */
1360 	map = pci_map_single(qdev->pdev, skb->data, len, PCI_DMA_TODEVICE);
1361 
1362 	err = pci_dma_mapping_error(qdev->pdev, map);
1363 	if (err) {
1364 		netif_err(qdev, tx_queued, qdev->ndev,
1365 			  "PCI mapping failed with error: %d\n", err);
1366 
1367 		return NETDEV_TX_BUSY;
1368 	}
1369 
1370 	tbd->len = cpu_to_le32(len);
1371 	tbd->addr = cpu_to_le64(map);
1372 	dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1373 	dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen, len);
1374 	map_idx++;
1375 
1376 	/*
1377 	 * This loop fills the remainder of the 8 address descriptors
1378 	 * in the IOCB.  If there are more than 7 fragments, then the
1379 	 * eighth address desc will point to an external list (OAL).
1380 	 * When this happens, the remainder of the frags will be stored
1381 	 * in this list.
1382 	 */
1383 	for (frag_idx = 0; frag_idx < frag_cnt; frag_idx++, map_idx++) {
1384 		skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_idx];
1385 		tbd++;
1386 		if (frag_idx == 6 && frag_cnt > 7) {
1387 			/* Let's tack on an sglist.
1388 			 * Our control block will now
1389 			 * look like this:
1390 			 * iocb->seg[0] = skb->data
1391 			 * iocb->seg[1] = frag[0]
1392 			 * iocb->seg[2] = frag[1]
1393 			 * iocb->seg[3] = frag[2]
1394 			 * iocb->seg[4] = frag[3]
1395 			 * iocb->seg[5] = frag[4]
1396 			 * iocb->seg[6] = frag[5]
1397 			 * iocb->seg[7] = ptr to OAL (external sglist)
1398 			 * oal->seg[0] = frag[6]
1399 			 * oal->seg[1] = frag[7]
1400 			 * oal->seg[2] = frag[8]
1401 			 * oal->seg[3] = frag[9]
1402 			 * oal->seg[4] = frag[10]
1403 			 *      etc...
1404 			 */
1405 			/* Tack on the OAL in the eighth segment of IOCB. */
1406 			map = pci_map_single(qdev->pdev, &tx_ring_desc->oal,
1407 					     sizeof(struct oal),
1408 					     PCI_DMA_TODEVICE);
1409 			err = pci_dma_mapping_error(qdev->pdev, map);
1410 			if (err) {
1411 				netif_err(qdev, tx_queued, qdev->ndev,
1412 					  "PCI mapping outbound address list with error: %d\n",
1413 					  err);
1414 				goto map_error;
1415 			}
1416 
1417 			tbd->addr = cpu_to_le64(map);
1418 			/*
1419 			 * The length is the number of fragments
1420 			 * that remain to be mapped times the length
1421 			 * of our sglist (OAL).
1422 			 */
1423 			tbd->len =
1424 			    cpu_to_le32((sizeof(struct tx_buf_desc) *
1425 					 (frag_cnt - frag_idx)) | TX_DESC_C);
1426 			dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr,
1427 					   map);
1428 			dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1429 					  sizeof(struct oal));
1430 			tbd = (struct tx_buf_desc *)&tx_ring_desc->oal;
1431 			map_idx++;
1432 		}
1433 
1434 		map = skb_frag_dma_map(&qdev->pdev->dev, frag, 0, skb_frag_size(frag),
1435 				       DMA_TO_DEVICE);
1436 
1437 		err = dma_mapping_error(&qdev->pdev->dev, map);
1438 		if (err) {
1439 			netif_err(qdev, tx_queued, qdev->ndev,
1440 				  "PCI mapping frags failed with error: %d.\n",
1441 				  err);
1442 			goto map_error;
1443 		}
1444 
1445 		tbd->addr = cpu_to_le64(map);
1446 		tbd->len = cpu_to_le32(skb_frag_size(frag));
1447 		dma_unmap_addr_set(&tx_ring_desc->map[map_idx], mapaddr, map);
1448 		dma_unmap_len_set(&tx_ring_desc->map[map_idx], maplen,
1449 				  skb_frag_size(frag));
1450 
1451 	}
1452 	/* Save the number of segments we've mapped. */
1453 	tx_ring_desc->map_cnt = map_idx;
1454 	/* Terminate the last segment. */
1455 	tbd->len = cpu_to_le32(le32_to_cpu(tbd->len) | TX_DESC_E);
1456 	return NETDEV_TX_OK;
1457 
1458 map_error:
1459 	/*
1460 	 * If the first frag mapping failed, then i will be zero.
1461 	 * This causes the unmap of the skb->data area.  Otherwise
1462 	 * we pass in the number of frags that mapped successfully
1463 	 * so they can be umapped.
1464 	 */
1465 	ql_unmap_send(qdev, tx_ring_desc, map_idx);
1466 	return NETDEV_TX_BUSY;
1467 }
1468 
1469 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_gro_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1470 static void ql_process_mac_rx_gro_page(struct ql_adapter *qdev,
1471 					struct rx_ring *rx_ring,
1472 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1473 					u32 length,
1474 					u16 vlan_id)
1475 {
1476 	struct sk_buff *skb;
1477 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1478 	struct napi_struct *napi = &rx_ring->napi;
1479 
1480 	napi->dev = qdev->ndev;
1481 
1482 	skb = napi_get_frags(napi);
1483 	if (!skb) {
1484 		netif_err(qdev, drv, qdev->ndev,
1485 			  "Couldn't get an skb, exiting.\n");
1486 		rx_ring->rx_dropped++;
1487 		put_page(lbq_desc->p.pg_chunk.page);
1488 		return;
1489 	}
1490 	prefetch(lbq_desc->p.pg_chunk.va);
1491 	__skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
1492 			     lbq_desc->p.pg_chunk.page,
1493 			     lbq_desc->p.pg_chunk.offset,
1494 			     length);
1495 
1496 	skb->len += length;
1497 	skb->data_len += length;
1498 	skb->truesize += length;
1499 	skb_shinfo(skb)->nr_frags++;
1500 
1501 	rx_ring->rx_packets++;
1502 	rx_ring->rx_bytes += length;
1503 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1504 	skb_record_rx_queue(skb, rx_ring->cq_id);
1505 	if (vlan_id != 0xffff)
1506 		__vlan_hwaccel_put_tag(skb, vlan_id);
1507 	napi_gro_frags(napi);
1508 }
1509 
1510 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_page(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1511 static void ql_process_mac_rx_page(struct ql_adapter *qdev,
1512 					struct rx_ring *rx_ring,
1513 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1514 					u32 length,
1515 					u16 vlan_id)
1516 {
1517 	struct net_device *ndev = qdev->ndev;
1518 	struct sk_buff *skb = NULL;
1519 	void *addr;
1520 	struct bq_desc *lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1521 	struct napi_struct *napi = &rx_ring->napi;
1522 
1523 	skb = netdev_alloc_skb(ndev, length);
1524 	if (!skb) {
1525 		netif_err(qdev, drv, qdev->ndev,
1526 			  "Couldn't get an skb, need to unwind!.\n");
1527 		rx_ring->rx_dropped++;
1528 		put_page(lbq_desc->p.pg_chunk.page);
1529 		return;
1530 	}
1531 
1532 	addr = lbq_desc->p.pg_chunk.va;
1533 	prefetch(addr);
1534 
1535 
1536 	/* Frame error, so drop the packet. */
1537 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1538 		netif_info(qdev, drv, qdev->ndev,
1539 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1540 		rx_ring->rx_errors++;
1541 		goto err_out;
1542 	}
1543 
1544 	/* The max framesize filter on this chip is set higher than
1545 	 * MTU since FCoE uses 2k frames.
1546 	 */
1547 	if (skb->len > ndev->mtu + ETH_HLEN) {
1548 		netif_err(qdev, drv, qdev->ndev,
1549 			  "Segment too small, dropping.\n");
1550 		rx_ring->rx_dropped++;
1551 		goto err_out;
1552 	}
1553 	memcpy(skb_put(skb, ETH_HLEN), addr, ETH_HLEN);
1554 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1555 		     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1556 		     length);
1557 	skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1558 				lbq_desc->p.pg_chunk.offset+ETH_HLEN,
1559 				length-ETH_HLEN);
1560 	skb->len += length-ETH_HLEN;
1561 	skb->data_len += length-ETH_HLEN;
1562 	skb->truesize += length-ETH_HLEN;
1563 
1564 	rx_ring->rx_packets++;
1565 	rx_ring->rx_bytes += skb->len;
1566 	skb->protocol = eth_type_trans(skb, ndev);
1567 	skb_checksum_none_assert(skb);
1568 
1569 	if ((ndev->features & NETIF_F_RXCSUM) &&
1570 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1571 		/* TCP frame. */
1572 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1573 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1574 				     "TCP checksum done!\n");
1575 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1576 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1577 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1578 			/* Unfragmented ipv4 UDP frame. */
1579 			struct iphdr *iph = (struct iphdr *) skb->data;
1580 			if (!(iph->frag_off &
1581 				cpu_to_be16(IP_MF|IP_OFFSET))) {
1582 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1583 				netif_printk(qdev, rx_status, KERN_DEBUG,
1584 					     qdev->ndev,
1585 					     "TCP checksum done!\n");
1586 			}
1587 		}
1588 	}
1589 
1590 	skb_record_rx_queue(skb, rx_ring->cq_id);
1591 	if (vlan_id != 0xffff)
1592 		__vlan_hwaccel_put_tag(skb, vlan_id);
1593 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1594 		napi_gro_receive(napi, skb);
1595 	else
1596 		netif_receive_skb(skb);
1597 	return;
1598 err_out:
1599 	dev_kfree_skb_any(skb);
1600 	put_page(lbq_desc->p.pg_chunk.page);
1601 }
1602 
1603 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u32 length,u16 vlan_id)1604 static void ql_process_mac_rx_skb(struct ql_adapter *qdev,
1605 					struct rx_ring *rx_ring,
1606 					struct ib_mac_iocb_rsp *ib_mac_rsp,
1607 					u32 length,
1608 					u16 vlan_id)
1609 {
1610 	struct net_device *ndev = qdev->ndev;
1611 	struct sk_buff *skb = NULL;
1612 	struct sk_buff *new_skb = NULL;
1613 	struct bq_desc *sbq_desc = ql_get_curr_sbuf(rx_ring);
1614 
1615 	skb = sbq_desc->p.skb;
1616 	/* Allocate new_skb and copy */
1617 	new_skb = netdev_alloc_skb(qdev->ndev, length + NET_IP_ALIGN);
1618 	if (new_skb == NULL) {
1619 		netif_err(qdev, probe, qdev->ndev,
1620 			  "No skb available, drop the packet.\n");
1621 		rx_ring->rx_dropped++;
1622 		return;
1623 	}
1624 	skb_reserve(new_skb, NET_IP_ALIGN);
1625 	memcpy(skb_put(new_skb, length), skb->data, length);
1626 	skb = new_skb;
1627 
1628 	/* Frame error, so drop the packet. */
1629 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1630 		netif_info(qdev, drv, qdev->ndev,
1631 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1632 		dev_kfree_skb_any(skb);
1633 		rx_ring->rx_errors++;
1634 		return;
1635 	}
1636 
1637 	/* loopback self test for ethtool */
1638 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1639 		ql_check_lb_frame(qdev, skb);
1640 		dev_kfree_skb_any(skb);
1641 		return;
1642 	}
1643 
1644 	/* The max framesize filter on this chip is set higher than
1645 	 * MTU since FCoE uses 2k frames.
1646 	 */
1647 	if (skb->len > ndev->mtu + ETH_HLEN) {
1648 		dev_kfree_skb_any(skb);
1649 		rx_ring->rx_dropped++;
1650 		return;
1651 	}
1652 
1653 	prefetch(skb->data);
1654 	skb->dev = ndev;
1655 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1656 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1657 			     "%s Multicast.\n",
1658 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1659 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1660 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1661 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1662 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1663 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1664 	}
1665 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P)
1666 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1667 			     "Promiscuous Packet.\n");
1668 
1669 	rx_ring->rx_packets++;
1670 	rx_ring->rx_bytes += skb->len;
1671 	skb->protocol = eth_type_trans(skb, ndev);
1672 	skb_checksum_none_assert(skb);
1673 
1674 	/* If rx checksum is on, and there are no
1675 	 * csum or frame errors.
1676 	 */
1677 	if ((ndev->features & NETIF_F_RXCSUM) &&
1678 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1679 		/* TCP frame. */
1680 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1681 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1682 				     "TCP checksum done!\n");
1683 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1684 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1685 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
1686 			/* Unfragmented ipv4 UDP frame. */
1687 			struct iphdr *iph = (struct iphdr *) skb->data;
1688 			if (!(iph->frag_off &
1689 				ntohs(IP_MF|IP_OFFSET))) {
1690 				skb->ip_summed = CHECKSUM_UNNECESSARY;
1691 				netif_printk(qdev, rx_status, KERN_DEBUG,
1692 					     qdev->ndev,
1693 					     "TCP checksum done!\n");
1694 			}
1695 		}
1696 	}
1697 
1698 	skb_record_rx_queue(skb, rx_ring->cq_id);
1699 	if (vlan_id != 0xffff)
1700 		__vlan_hwaccel_put_tag(skb, vlan_id);
1701 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
1702 		napi_gro_receive(&rx_ring->napi, skb);
1703 	else
1704 		netif_receive_skb(skb);
1705 }
1706 
ql_realign_skb(struct sk_buff * skb,int len)1707 static void ql_realign_skb(struct sk_buff *skb, int len)
1708 {
1709 	void *temp_addr = skb->data;
1710 
1711 	/* Undo the skb_reserve(skb,32) we did before
1712 	 * giving to hardware, and realign data on
1713 	 * a 2-byte boundary.
1714 	 */
1715 	skb->data -= QLGE_SB_PAD - NET_IP_ALIGN;
1716 	skb->tail -= QLGE_SB_PAD - NET_IP_ALIGN;
1717 	skb_copy_to_linear_data(skb, temp_addr,
1718 		(unsigned int)len);
1719 }
1720 
1721 /*
1722  * This function builds an skb for the given inbound
1723  * completion.  It will be rewritten for readability in the near
1724  * future, but for not it works well.
1725  */
ql_build_rx_skb(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)1726 static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1727 				       struct rx_ring *rx_ring,
1728 				       struct ib_mac_iocb_rsp *ib_mac_rsp)
1729 {
1730 	struct bq_desc *lbq_desc;
1731 	struct bq_desc *sbq_desc;
1732 	struct sk_buff *skb = NULL;
1733 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
1734        u32 hdr_len = le32_to_cpu(ib_mac_rsp->hdr_len);
1735 
1736 	/*
1737 	 * Handle the header buffer if present.
1738 	 */
1739 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV &&
1740 	    ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1741 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1742 			     "Header of %d bytes in small buffer.\n", hdr_len);
1743 		/*
1744 		 * Headers fit nicely into a small buffer.
1745 		 */
1746 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1747 		pci_unmap_single(qdev->pdev,
1748 				dma_unmap_addr(sbq_desc, mapaddr),
1749 				dma_unmap_len(sbq_desc, maplen),
1750 				PCI_DMA_FROMDEVICE);
1751 		skb = sbq_desc->p.skb;
1752 		ql_realign_skb(skb, hdr_len);
1753 		skb_put(skb, hdr_len);
1754 		sbq_desc->p.skb = NULL;
1755 	}
1756 
1757 	/*
1758 	 * Handle the data buffer(s).
1759 	 */
1760 	if (unlikely(!length)) {	/* Is there data too? */
1761 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1762 			     "No Data buffer in this packet.\n");
1763 		return skb;
1764 	}
1765 
1766 	if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
1767 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1768 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1769 				     "Headers in small, data of %d bytes in small, combine them.\n",
1770 				     length);
1771 			/*
1772 			 * Data is less than small buffer size so it's
1773 			 * stuffed in a small buffer.
1774 			 * For this case we append the data
1775 			 * from the "data" small buffer to the "header" small
1776 			 * buffer.
1777 			 */
1778 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1779 			pci_dma_sync_single_for_cpu(qdev->pdev,
1780 						    dma_unmap_addr
1781 						    (sbq_desc, mapaddr),
1782 						    dma_unmap_len
1783 						    (sbq_desc, maplen),
1784 						    PCI_DMA_FROMDEVICE);
1785 			memcpy(skb_put(skb, length),
1786 			       sbq_desc->p.skb->data, length);
1787 			pci_dma_sync_single_for_device(qdev->pdev,
1788 						       dma_unmap_addr
1789 						       (sbq_desc,
1790 							mapaddr),
1791 						       dma_unmap_len
1792 						       (sbq_desc,
1793 							maplen),
1794 						       PCI_DMA_FROMDEVICE);
1795 		} else {
1796 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1797 				     "%d bytes in a single small buffer.\n",
1798 				     length);
1799 			sbq_desc = ql_get_curr_sbuf(rx_ring);
1800 			skb = sbq_desc->p.skb;
1801 			ql_realign_skb(skb, length);
1802 			skb_put(skb, length);
1803 			pci_unmap_single(qdev->pdev,
1804 					 dma_unmap_addr(sbq_desc,
1805 							mapaddr),
1806 					 dma_unmap_len(sbq_desc,
1807 						       maplen),
1808 					 PCI_DMA_FROMDEVICE);
1809 			sbq_desc->p.skb = NULL;
1810 		}
1811 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
1812 		if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS) {
1813 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1814 				     "Header in small, %d bytes in large. Chain large to small!\n",
1815 				     length);
1816 			/*
1817 			 * The data is in a single large buffer.  We
1818 			 * chain it to the header buffer's skb and let
1819 			 * it rip.
1820 			 */
1821 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1822 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1823 				     "Chaining page at offset = %d, for %d bytes  to skb.\n",
1824 				     lbq_desc->p.pg_chunk.offset, length);
1825 			skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1826 						lbq_desc->p.pg_chunk.offset,
1827 						length);
1828 			skb->len += length;
1829 			skb->data_len += length;
1830 			skb->truesize += length;
1831 		} else {
1832 			/*
1833 			 * The headers and data are in a single large buffer. We
1834 			 * copy it to a new skb and let it go. This can happen with
1835 			 * jumbo mtu on a non-TCP/UDP frame.
1836 			 */
1837 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1838 			skb = netdev_alloc_skb(qdev->ndev, length);
1839 			if (skb == NULL) {
1840 				netif_printk(qdev, probe, KERN_DEBUG, qdev->ndev,
1841 					     "No skb available, drop the packet.\n");
1842 				return NULL;
1843 			}
1844 			pci_unmap_page(qdev->pdev,
1845 				       dma_unmap_addr(lbq_desc,
1846 						      mapaddr),
1847 				       dma_unmap_len(lbq_desc, maplen),
1848 				       PCI_DMA_FROMDEVICE);
1849 			skb_reserve(skb, NET_IP_ALIGN);
1850 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1851 				     "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n",
1852 				     length);
1853 			skb_fill_page_desc(skb, 0,
1854 						lbq_desc->p.pg_chunk.page,
1855 						lbq_desc->p.pg_chunk.offset,
1856 						length);
1857 			skb->len += length;
1858 			skb->data_len += length;
1859 			skb->truesize += length;
1860 			length -= length;
1861 			__pskb_pull_tail(skb,
1862 				(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1863 				VLAN_ETH_HLEN : ETH_HLEN);
1864 		}
1865 	} else {
1866 		/*
1867 		 * The data is in a chain of large buffers
1868 		 * pointed to by a small buffer.  We loop
1869 		 * thru and chain them to the our small header
1870 		 * buffer's skb.
1871 		 * frags:  There are 18 max frags and our small
1872 		 *         buffer will hold 32 of them. The thing is,
1873 		 *         we'll use 3 max for our 9000 byte jumbo
1874 		 *         frames.  If the MTU goes up we could
1875 		 *          eventually be in trouble.
1876 		 */
1877 		int size, i = 0;
1878 		sbq_desc = ql_get_curr_sbuf(rx_ring);
1879 		pci_unmap_single(qdev->pdev,
1880 				 dma_unmap_addr(sbq_desc, mapaddr),
1881 				 dma_unmap_len(sbq_desc, maplen),
1882 				 PCI_DMA_FROMDEVICE);
1883 		if (!(ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HS)) {
1884 			/*
1885 			 * This is an non TCP/UDP IP frame, so
1886 			 * the headers aren't split into a small
1887 			 * buffer.  We have to use the small buffer
1888 			 * that contains our sg list as our skb to
1889 			 * send upstairs. Copy the sg list here to
1890 			 * a local buffer and use it to find the
1891 			 * pages to chain.
1892 			 */
1893 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1894 				     "%d bytes of headers & data in chain of large.\n",
1895 				     length);
1896 			skb = sbq_desc->p.skb;
1897 			sbq_desc->p.skb = NULL;
1898 			skb_reserve(skb, NET_IP_ALIGN);
1899 		}
1900 		while (length > 0) {
1901 			lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1902 			size = (length < rx_ring->lbq_buf_size) ? length :
1903 				rx_ring->lbq_buf_size;
1904 
1905 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1906 				     "Adding page %d to skb for %d bytes.\n",
1907 				     i, size);
1908 			skb_fill_page_desc(skb, i,
1909 						lbq_desc->p.pg_chunk.page,
1910 						lbq_desc->p.pg_chunk.offset,
1911 						size);
1912 			skb->len += size;
1913 			skb->data_len += size;
1914 			skb->truesize += size;
1915 			length -= size;
1916 			i++;
1917 		}
1918 		__pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1919 				VLAN_ETH_HLEN : ETH_HLEN);
1920 	}
1921 	return skb;
1922 }
1923 
1924 /* Process an inbound completion from an rx ring. */
ql_process_mac_split_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp,u16 vlan_id)1925 static void ql_process_mac_split_rx_intr(struct ql_adapter *qdev,
1926 				   struct rx_ring *rx_ring,
1927 				   struct ib_mac_iocb_rsp *ib_mac_rsp,
1928 				   u16 vlan_id)
1929 {
1930 	struct net_device *ndev = qdev->ndev;
1931 	struct sk_buff *skb = NULL;
1932 
1933 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
1934 
1935 	skb = ql_build_rx_skb(qdev, rx_ring, ib_mac_rsp);
1936 	if (unlikely(!skb)) {
1937 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1938 			     "No skb available, drop packet.\n");
1939 		rx_ring->rx_dropped++;
1940 		return;
1941 	}
1942 
1943 	/* Frame error, so drop the packet. */
1944 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_ERR_MASK) {
1945 		netif_info(qdev, drv, qdev->ndev,
1946 			  "Receive error, flags2 = 0x%x\n", ib_mac_rsp->flags2);
1947 		dev_kfree_skb_any(skb);
1948 		rx_ring->rx_errors++;
1949 		return;
1950 	}
1951 
1952 	/* The max framesize filter on this chip is set higher than
1953 	 * MTU since FCoE uses 2k frames.
1954 	 */
1955 	if (skb->len > ndev->mtu + ETH_HLEN) {
1956 		dev_kfree_skb_any(skb);
1957 		rx_ring->rx_dropped++;
1958 		return;
1959 	}
1960 
1961 	/* loopback self test for ethtool */
1962 	if (test_bit(QL_SELFTEST, &qdev->flags)) {
1963 		ql_check_lb_frame(qdev, skb);
1964 		dev_kfree_skb_any(skb);
1965 		return;
1966 	}
1967 
1968 	prefetch(skb->data);
1969 	skb->dev = ndev;
1970 	if (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) {
1971 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev, "%s Multicast.\n",
1972 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1973 			     IB_MAC_IOCB_RSP_M_HASH ? "Hash" :
1974 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1975 			     IB_MAC_IOCB_RSP_M_REG ? "Registered" :
1976 			     (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1977 			     IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1978 		rx_ring->rx_multicast++;
1979 	}
1980 	if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1981 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1982 			     "Promiscuous Packet.\n");
1983 	}
1984 
1985 	skb->protocol = eth_type_trans(skb, ndev);
1986 	skb_checksum_none_assert(skb);
1987 
1988 	/* If rx checksum is on, and there are no
1989 	 * csum or frame errors.
1990 	 */
1991 	if ((ndev->features & NETIF_F_RXCSUM) &&
1992 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK)) {
1993 		/* TCP frame. */
1994 		if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T) {
1995 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
1996 				     "TCP checksum done!\n");
1997 			skb->ip_summed = CHECKSUM_UNNECESSARY;
1998 		} else if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_U) &&
1999 				(ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_V4)) {
2000 		/* Unfragmented ipv4 UDP frame. */
2001 			struct iphdr *iph = (struct iphdr *) skb->data;
2002 			if (!(iph->frag_off &
2003 				ntohs(IP_MF|IP_OFFSET))) {
2004 				skb->ip_summed = CHECKSUM_UNNECESSARY;
2005 				netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2006 					     "TCP checksum done!\n");
2007 			}
2008 		}
2009 	}
2010 
2011 	rx_ring->rx_packets++;
2012 	rx_ring->rx_bytes += skb->len;
2013 	skb_record_rx_queue(skb, rx_ring->cq_id);
2014 	if ((ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) && (vlan_id != 0))
2015 		__vlan_hwaccel_put_tag(skb, vlan_id);
2016 	if (skb->ip_summed == CHECKSUM_UNNECESSARY)
2017 		napi_gro_receive(&rx_ring->napi, skb);
2018 	else
2019 		netif_receive_skb(skb);
2020 }
2021 
2022 /* Process an inbound completion from an rx ring. */
ql_process_mac_rx_intr(struct ql_adapter * qdev,struct rx_ring * rx_ring,struct ib_mac_iocb_rsp * ib_mac_rsp)2023 static unsigned long ql_process_mac_rx_intr(struct ql_adapter *qdev,
2024 					struct rx_ring *rx_ring,
2025 					struct ib_mac_iocb_rsp *ib_mac_rsp)
2026 {
2027 	u32 length = le32_to_cpu(ib_mac_rsp->data_len);
2028 	u16 vlan_id = (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
2029 			((le16_to_cpu(ib_mac_rsp->vlan_id) &
2030 			IB_MAC_IOCB_RSP_VLAN_MASK)) : 0xffff;
2031 
2032 	QL_DUMP_IB_MAC_RSP(ib_mac_rsp);
2033 
2034 	if (ib_mac_rsp->flags4 & IB_MAC_IOCB_RSP_HV) {
2035 		/* The data and headers are split into
2036 		 * separate buffers.
2037 		 */
2038 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2039 						vlan_id);
2040 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DS) {
2041 		/* The data fit in a single small buffer.
2042 		 * Allocate a new skb, copy the data and
2043 		 * return the buffer to the free pool.
2044 		 */
2045 		ql_process_mac_rx_skb(qdev, rx_ring, ib_mac_rsp,
2046 						length, vlan_id);
2047 	} else if ((ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) &&
2048 		!(ib_mac_rsp->flags1 & IB_MAC_CSUM_ERR_MASK) &&
2049 		(ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_T)) {
2050 		/* TCP packet in a page chunk that's been checksummed.
2051 		 * Tack it on to our GRO skb and let it go.
2052 		 */
2053 		ql_process_mac_rx_gro_page(qdev, rx_ring, ib_mac_rsp,
2054 						length, vlan_id);
2055 	} else if (ib_mac_rsp->flags3 & IB_MAC_IOCB_RSP_DL) {
2056 		/* Non-TCP packet in a page chunk. Allocate an
2057 		 * skb, tack it on frags, and send it up.
2058 		 */
2059 		ql_process_mac_rx_page(qdev, rx_ring, ib_mac_rsp,
2060 						length, vlan_id);
2061 	} else {
2062 		/* Non-TCP/UDP large frames that span multiple buffers
2063 		 * can be processed corrrectly by the split frame logic.
2064 		 */
2065 		ql_process_mac_split_rx_intr(qdev, rx_ring, ib_mac_rsp,
2066 						vlan_id);
2067 	}
2068 
2069 	return (unsigned long)length;
2070 }
2071 
2072 /* Process an outbound completion from an rx ring. */
ql_process_mac_tx_intr(struct ql_adapter * qdev,struct ob_mac_iocb_rsp * mac_rsp)2073 static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
2074 				   struct ob_mac_iocb_rsp *mac_rsp)
2075 {
2076 	struct tx_ring *tx_ring;
2077 	struct tx_ring_desc *tx_ring_desc;
2078 
2079 	QL_DUMP_OB_MAC_RSP(mac_rsp);
2080 	tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
2081 	tx_ring_desc = &tx_ring->q[mac_rsp->tid];
2082 	ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
2083 	tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
2084 	tx_ring->tx_packets++;
2085 	dev_kfree_skb(tx_ring_desc->skb);
2086 	tx_ring_desc->skb = NULL;
2087 
2088 	if (unlikely(mac_rsp->flags1 & (OB_MAC_IOCB_RSP_E |
2089 					OB_MAC_IOCB_RSP_S |
2090 					OB_MAC_IOCB_RSP_L |
2091 					OB_MAC_IOCB_RSP_P | OB_MAC_IOCB_RSP_B))) {
2092 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_E) {
2093 			netif_warn(qdev, tx_done, qdev->ndev,
2094 				   "Total descriptor length did not match transfer length.\n");
2095 		}
2096 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_S) {
2097 			netif_warn(qdev, tx_done, qdev->ndev,
2098 				   "Frame too short to be valid, not sent.\n");
2099 		}
2100 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_L) {
2101 			netif_warn(qdev, tx_done, qdev->ndev,
2102 				   "Frame too long, but sent anyway.\n");
2103 		}
2104 		if (mac_rsp->flags1 & OB_MAC_IOCB_RSP_B) {
2105 			netif_warn(qdev, tx_done, qdev->ndev,
2106 				   "PCI backplane error. Frame not sent.\n");
2107 		}
2108 	}
2109 	atomic_inc(&tx_ring->tx_count);
2110 }
2111 
2112 /* Fire up a handler to reset the MPI processor. */
ql_queue_fw_error(struct ql_adapter * qdev)2113 void ql_queue_fw_error(struct ql_adapter *qdev)
2114 {
2115 	ql_link_off(qdev);
2116 	queue_delayed_work(qdev->workqueue, &qdev->mpi_reset_work, 0);
2117 }
2118 
ql_queue_asic_error(struct ql_adapter * qdev)2119 void ql_queue_asic_error(struct ql_adapter *qdev)
2120 {
2121 	ql_link_off(qdev);
2122 	ql_disable_interrupts(qdev);
2123 	/* Clear adapter up bit to signal the recovery
2124 	 * process that it shouldn't kill the reset worker
2125 	 * thread
2126 	 */
2127 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
2128 	/* Set asic recovery bit to indicate reset process that we are
2129 	 * in fatal error recovery process rather than normal close
2130 	 */
2131 	set_bit(QL_ASIC_RECOVERY, &qdev->flags);
2132 	queue_delayed_work(qdev->workqueue, &qdev->asic_reset_work, 0);
2133 }
2134 
ql_process_chip_ae_intr(struct ql_adapter * qdev,struct ib_ae_iocb_rsp * ib_ae_rsp)2135 static void ql_process_chip_ae_intr(struct ql_adapter *qdev,
2136 				    struct ib_ae_iocb_rsp *ib_ae_rsp)
2137 {
2138 	switch (ib_ae_rsp->event) {
2139 	case MGMT_ERR_EVENT:
2140 		netif_err(qdev, rx_err, qdev->ndev,
2141 			  "Management Processor Fatal Error.\n");
2142 		ql_queue_fw_error(qdev);
2143 		return;
2144 
2145 	case CAM_LOOKUP_ERR_EVENT:
2146 		netdev_err(qdev->ndev, "Multiple CAM hits lookup occurred.\n");
2147 		netdev_err(qdev->ndev, "This event shouldn't occur.\n");
2148 		ql_queue_asic_error(qdev);
2149 		return;
2150 
2151 	case SOFT_ECC_ERROR_EVENT:
2152 		netdev_err(qdev->ndev, "Soft ECC error detected.\n");
2153 		ql_queue_asic_error(qdev);
2154 		break;
2155 
2156 	case PCI_ERR_ANON_BUF_RD:
2157 		netdev_err(qdev->ndev, "PCI error occurred when reading "
2158 					"anonymous buffers from rx_ring %d.\n",
2159 					ib_ae_rsp->q_id);
2160 		ql_queue_asic_error(qdev);
2161 		break;
2162 
2163 	default:
2164 		netif_err(qdev, drv, qdev->ndev, "Unexpected event %d.\n",
2165 			  ib_ae_rsp->event);
2166 		ql_queue_asic_error(qdev);
2167 		break;
2168 	}
2169 }
2170 
ql_clean_outbound_rx_ring(struct rx_ring * rx_ring)2171 static int ql_clean_outbound_rx_ring(struct rx_ring *rx_ring)
2172 {
2173 	struct ql_adapter *qdev = rx_ring->qdev;
2174 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2175 	struct ob_mac_iocb_rsp *net_rsp = NULL;
2176 	int count = 0;
2177 
2178 	struct tx_ring *tx_ring;
2179 	/* While there are entries in the completion queue. */
2180 	while (prod != rx_ring->cnsmr_idx) {
2181 
2182 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2183 			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2184 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2185 
2186 		net_rsp = (struct ob_mac_iocb_rsp *)rx_ring->curr_entry;
2187 		rmb();
2188 		switch (net_rsp->opcode) {
2189 
2190 		case OPCODE_OB_MAC_TSO_IOCB:
2191 		case OPCODE_OB_MAC_IOCB:
2192 			ql_process_mac_tx_intr(qdev, net_rsp);
2193 			break;
2194 		default:
2195 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2196 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2197 				     net_rsp->opcode);
2198 		}
2199 		count++;
2200 		ql_update_cq(rx_ring);
2201 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2202 	}
2203 	if (!net_rsp)
2204 		return 0;
2205 	ql_write_cq_idx(rx_ring);
2206 	tx_ring = &qdev->tx_ring[net_rsp->txq_idx];
2207 	if (__netif_subqueue_stopped(qdev->ndev, tx_ring->wq_id)) {
2208 		if (atomic_read(&tx_ring->queue_stopped) &&
2209 		    (atomic_read(&tx_ring->tx_count) > (tx_ring->wq_len / 4)))
2210 			/*
2211 			 * The queue got stopped because the tx_ring was full.
2212 			 * Wake it up, because it's now at least 25% empty.
2213 			 */
2214 			netif_wake_subqueue(qdev->ndev, tx_ring->wq_id);
2215 	}
2216 
2217 	return count;
2218 }
2219 
ql_clean_inbound_rx_ring(struct rx_ring * rx_ring,int budget)2220 static int ql_clean_inbound_rx_ring(struct rx_ring *rx_ring, int budget)
2221 {
2222 	struct ql_adapter *qdev = rx_ring->qdev;
2223 	u32 prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2224 	struct ql_net_rsp_iocb *net_rsp;
2225 	int count = 0;
2226 
2227 	/* While there are entries in the completion queue. */
2228 	while (prod != rx_ring->cnsmr_idx) {
2229 
2230 		netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2231 			     "cq_id = %d, prod = %d, cnsmr = %d.\n.",
2232 			     rx_ring->cq_id, prod, rx_ring->cnsmr_idx);
2233 
2234 		net_rsp = rx_ring->curr_entry;
2235 		rmb();
2236 		switch (net_rsp->opcode) {
2237 		case OPCODE_IB_MAC_IOCB:
2238 			ql_process_mac_rx_intr(qdev, rx_ring,
2239 					       (struct ib_mac_iocb_rsp *)
2240 					       net_rsp);
2241 			break;
2242 
2243 		case OPCODE_IB_AE_IOCB:
2244 			ql_process_chip_ae_intr(qdev, (struct ib_ae_iocb_rsp *)
2245 						net_rsp);
2246 			break;
2247 		default:
2248 			netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2249 				     "Hit default case, not handled! dropping the packet, opcode = %x.\n",
2250 				     net_rsp->opcode);
2251 			break;
2252 		}
2253 		count++;
2254 		ql_update_cq(rx_ring);
2255 		prod = ql_read_sh_reg(rx_ring->prod_idx_sh_reg);
2256 		if (count == budget)
2257 			break;
2258 	}
2259 	ql_update_buffer_queues(qdev, rx_ring);
2260 	ql_write_cq_idx(rx_ring);
2261 	return count;
2262 }
2263 
ql_napi_poll_msix(struct napi_struct * napi,int budget)2264 static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
2265 {
2266 	struct rx_ring *rx_ring = container_of(napi, struct rx_ring, napi);
2267 	struct ql_adapter *qdev = rx_ring->qdev;
2268 	struct rx_ring *trx_ring;
2269 	int i, work_done = 0;
2270 	struct intr_context *ctx = &qdev->intr_context[rx_ring->cq_id];
2271 
2272 	netif_printk(qdev, rx_status, KERN_DEBUG, qdev->ndev,
2273 		     "Enter, NAPI POLL cq_id = %d.\n", rx_ring->cq_id);
2274 
2275 	/* Service the TX rings first.  They start
2276 	 * right after the RSS rings. */
2277 	for (i = qdev->rss_ring_count; i < qdev->rx_ring_count; i++) {
2278 		trx_ring = &qdev->rx_ring[i];
2279 		/* If this TX completion ring belongs to this vector and
2280 		 * it's not empty then service it.
2281 		 */
2282 		if ((ctx->irq_mask & (1 << trx_ring->cq_id)) &&
2283 			(ql_read_sh_reg(trx_ring->prod_idx_sh_reg) !=
2284 					trx_ring->cnsmr_idx)) {
2285 			netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2286 				     "%s: Servicing TX completion ring %d.\n",
2287 				     __func__, trx_ring->cq_id);
2288 			ql_clean_outbound_rx_ring(trx_ring);
2289 		}
2290 	}
2291 
2292 	/*
2293 	 * Now service the RSS ring if it's active.
2294 	 */
2295 	if (ql_read_sh_reg(rx_ring->prod_idx_sh_reg) !=
2296 					rx_ring->cnsmr_idx) {
2297 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2298 			     "%s: Servicing RX completion ring %d.\n",
2299 			     __func__, rx_ring->cq_id);
2300 		work_done = ql_clean_inbound_rx_ring(rx_ring, budget);
2301 	}
2302 
2303 	if (work_done < budget) {
2304 		napi_complete(napi);
2305 		ql_enable_completion_interrupt(qdev, rx_ring->irq);
2306 	}
2307 	return work_done;
2308 }
2309 
qlge_vlan_mode(struct net_device * ndev,netdev_features_t features)2310 static void qlge_vlan_mode(struct net_device *ndev, netdev_features_t features)
2311 {
2312 	struct ql_adapter *qdev = netdev_priv(ndev);
2313 
2314 	if (features & NETIF_F_HW_VLAN_RX) {
2315 		netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2316 			     "Turning on VLAN in NIC_RCV_CFG.\n");
2317 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK |
2318 				 NIC_RCV_CFG_VLAN_MATCH_AND_NON);
2319 	} else {
2320 		netif_printk(qdev, ifup, KERN_DEBUG, ndev,
2321 			     "Turning off VLAN in NIC_RCV_CFG.\n");
2322 		ql_write32(qdev, NIC_RCV_CFG, NIC_RCV_CFG_VLAN_MASK);
2323 	}
2324 }
2325 
qlge_fix_features(struct net_device * ndev,netdev_features_t features)2326 static netdev_features_t qlge_fix_features(struct net_device *ndev,
2327 	netdev_features_t features)
2328 {
2329 	/*
2330 	 * Since there is no support for separate rx/tx vlan accel
2331 	 * enable/disable make sure tx flag is always in same state as rx.
2332 	 */
2333 	if (features & NETIF_F_HW_VLAN_RX)
2334 		features |= NETIF_F_HW_VLAN_TX;
2335 	else
2336 		features &= ~NETIF_F_HW_VLAN_TX;
2337 
2338 	return features;
2339 }
2340 
qlge_set_features(struct net_device * ndev,netdev_features_t features)2341 static int qlge_set_features(struct net_device *ndev,
2342 	netdev_features_t features)
2343 {
2344 	netdev_features_t changed = ndev->features ^ features;
2345 
2346 	if (changed & NETIF_F_HW_VLAN_RX)
2347 		qlge_vlan_mode(ndev, features);
2348 
2349 	return 0;
2350 }
2351 
__qlge_vlan_rx_add_vid(struct ql_adapter * qdev,u16 vid)2352 static int __qlge_vlan_rx_add_vid(struct ql_adapter *qdev, u16 vid)
2353 {
2354 	u32 enable_bit = MAC_ADDR_E;
2355 	int err;
2356 
2357 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2358 				  MAC_ADDR_TYPE_VLAN, vid);
2359 	if (err)
2360 		netif_err(qdev, ifup, qdev->ndev,
2361 			  "Failed to init vlan address.\n");
2362 	return err;
2363 }
2364 
qlge_vlan_rx_add_vid(struct net_device * ndev,u16 vid)2365 static int qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
2366 {
2367 	struct ql_adapter *qdev = netdev_priv(ndev);
2368 	int status;
2369 	int err;
2370 
2371 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2372 	if (status)
2373 		return status;
2374 
2375 	err = __qlge_vlan_rx_add_vid(qdev, vid);
2376 	set_bit(vid, qdev->active_vlans);
2377 
2378 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2379 
2380 	return err;
2381 }
2382 
__qlge_vlan_rx_kill_vid(struct ql_adapter * qdev,u16 vid)2383 static int __qlge_vlan_rx_kill_vid(struct ql_adapter *qdev, u16 vid)
2384 {
2385 	u32 enable_bit = 0;
2386 	int err;
2387 
2388 	err = ql_set_mac_addr_reg(qdev, (u8 *) &enable_bit,
2389 				  MAC_ADDR_TYPE_VLAN, vid);
2390 	if (err)
2391 		netif_err(qdev, ifup, qdev->ndev,
2392 			  "Failed to clear vlan address.\n");
2393 	return err;
2394 }
2395 
qlge_vlan_rx_kill_vid(struct net_device * ndev,u16 vid)2396 static int qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
2397 {
2398 	struct ql_adapter *qdev = netdev_priv(ndev);
2399 	int status;
2400 	int err;
2401 
2402 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2403 	if (status)
2404 		return status;
2405 
2406 	err = __qlge_vlan_rx_kill_vid(qdev, vid);
2407 	clear_bit(vid, qdev->active_vlans);
2408 
2409 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2410 
2411 	return err;
2412 }
2413 
qlge_restore_vlan(struct ql_adapter * qdev)2414 static void qlge_restore_vlan(struct ql_adapter *qdev)
2415 {
2416 	int status;
2417 	u16 vid;
2418 
2419 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
2420 	if (status)
2421 		return;
2422 
2423 	for_each_set_bit(vid, qdev->active_vlans, VLAN_N_VID)
2424 		__qlge_vlan_rx_add_vid(qdev, vid);
2425 
2426 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
2427 }
2428 
2429 /* MSI-X Multiple Vector Interrupt Handler for inbound completions. */
qlge_msix_rx_isr(int irq,void * dev_id)2430 static irqreturn_t qlge_msix_rx_isr(int irq, void *dev_id)
2431 {
2432 	struct rx_ring *rx_ring = dev_id;
2433 	napi_schedule(&rx_ring->napi);
2434 	return IRQ_HANDLED;
2435 }
2436 
2437 /* This handles a fatal error, MPI activity, and the default
2438  * rx_ring in an MSI-X multiple vector environment.
2439  * In MSI/Legacy environment it also process the rest of
2440  * the rx_rings.
2441  */
qlge_isr(int irq,void * dev_id)2442 static irqreturn_t qlge_isr(int irq, void *dev_id)
2443 {
2444 	struct rx_ring *rx_ring = dev_id;
2445 	struct ql_adapter *qdev = rx_ring->qdev;
2446 	struct intr_context *intr_context = &qdev->intr_context[0];
2447 	u32 var;
2448 	int work_done = 0;
2449 
2450 	spin_lock(&qdev->hw_lock);
2451 	if (atomic_read(&qdev->intr_context[0].irq_cnt)) {
2452 		netif_printk(qdev, intr, KERN_DEBUG, qdev->ndev,
2453 			     "Shared Interrupt, Not ours!\n");
2454 		spin_unlock(&qdev->hw_lock);
2455 		return IRQ_NONE;
2456 	}
2457 	spin_unlock(&qdev->hw_lock);
2458 
2459 	var = ql_disable_completion_interrupt(qdev, intr_context->intr);
2460 
2461 	/*
2462 	 * Check for fatal error.
2463 	 */
2464 	if (var & STS_FE) {
2465 		ql_queue_asic_error(qdev);
2466 		netdev_err(qdev->ndev, "Got fatal error, STS = %x.\n", var);
2467 		var = ql_read32(qdev, ERR_STS);
2468 		netdev_err(qdev->ndev, "Resetting chip. "
2469 					"Error Status Register = 0x%x\n", var);
2470 		return IRQ_HANDLED;
2471 	}
2472 
2473 	/*
2474 	 * Check MPI processor activity.
2475 	 */
2476 	if ((var & STS_PI) &&
2477 		(ql_read32(qdev, INTR_MASK) & INTR_MASK_PI)) {
2478 		/*
2479 		 * We've got an async event or mailbox completion.
2480 		 * Handle it and clear the source of the interrupt.
2481 		 */
2482 		netif_err(qdev, intr, qdev->ndev,
2483 			  "Got MPI processor interrupt.\n");
2484 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2485 		ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16));
2486 		queue_delayed_work_on(smp_processor_id(),
2487 				qdev->workqueue, &qdev->mpi_work, 0);
2488 		work_done++;
2489 	}
2490 
2491 	/*
2492 	 * Get the bit-mask that shows the active queues for this
2493 	 * pass.  Compare it to the queues that this irq services
2494 	 * and call napi if there's a match.
2495 	 */
2496 	var = ql_read32(qdev, ISR1);
2497 	if (var & intr_context->irq_mask) {
2498 		netif_info(qdev, intr, qdev->ndev,
2499 			   "Waking handler for rx_ring[0].\n");
2500 		ql_disable_completion_interrupt(qdev, intr_context->intr);
2501 		napi_schedule(&rx_ring->napi);
2502 		work_done++;
2503 	}
2504 	ql_enable_completion_interrupt(qdev, intr_context->intr);
2505 	return work_done ? IRQ_HANDLED : IRQ_NONE;
2506 }
2507 
ql_tso(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2508 static int ql_tso(struct sk_buff *skb, struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2509 {
2510 
2511 	if (skb_is_gso(skb)) {
2512 		int err;
2513 		if (skb_header_cloned(skb)) {
2514 			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2515 			if (err)
2516 				return err;
2517 		}
2518 
2519 		mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2520 		mac_iocb_ptr->flags3 |= OB_MAC_TSO_IOCB_IC;
2521 		mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2522 		mac_iocb_ptr->total_hdrs_len =
2523 		    cpu_to_le16(skb_transport_offset(skb) + tcp_hdrlen(skb));
2524 		mac_iocb_ptr->net_trans_offset =
2525 		    cpu_to_le16(skb_network_offset(skb) |
2526 				skb_transport_offset(skb)
2527 				<< OB_MAC_TRANSPORT_HDR_SHIFT);
2528 		mac_iocb_ptr->mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
2529 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_LSO;
2530 		if (likely(skb->protocol == htons(ETH_P_IP))) {
2531 			struct iphdr *iph = ip_hdr(skb);
2532 			iph->check = 0;
2533 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2534 			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
2535 								 iph->daddr, 0,
2536 								 IPPROTO_TCP,
2537 								 0);
2538 		} else if (skb->protocol == htons(ETH_P_IPV6)) {
2539 			mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP6;
2540 			tcp_hdr(skb)->check =
2541 			    ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
2542 					     &ipv6_hdr(skb)->daddr,
2543 					     0, IPPROTO_TCP, 0);
2544 		}
2545 		return 1;
2546 	}
2547 	return 0;
2548 }
2549 
ql_hw_csum_setup(struct sk_buff * skb,struct ob_mac_tso_iocb_req * mac_iocb_ptr)2550 static void ql_hw_csum_setup(struct sk_buff *skb,
2551 			     struct ob_mac_tso_iocb_req *mac_iocb_ptr)
2552 {
2553 	int len;
2554 	struct iphdr *iph = ip_hdr(skb);
2555 	__sum16 *check;
2556 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_TSO_IOCB;
2557 	mac_iocb_ptr->frame_len = cpu_to_le32((u32) skb->len);
2558 	mac_iocb_ptr->net_trans_offset =
2559 		cpu_to_le16(skb_network_offset(skb) |
2560 		skb_transport_offset(skb) << OB_MAC_TRANSPORT_HDR_SHIFT);
2561 
2562 	mac_iocb_ptr->flags1 |= OB_MAC_TSO_IOCB_IP4;
2563 	len = (ntohs(iph->tot_len) - (iph->ihl << 2));
2564 	if (likely(iph->protocol == IPPROTO_TCP)) {
2565 		check = &(tcp_hdr(skb)->check);
2566 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_TC;
2567 		mac_iocb_ptr->total_hdrs_len =
2568 		    cpu_to_le16(skb_transport_offset(skb) +
2569 				(tcp_hdr(skb)->doff << 2));
2570 	} else {
2571 		check = &(udp_hdr(skb)->check);
2572 		mac_iocb_ptr->flags2 |= OB_MAC_TSO_IOCB_UC;
2573 		mac_iocb_ptr->total_hdrs_len =
2574 		    cpu_to_le16(skb_transport_offset(skb) +
2575 				sizeof(struct udphdr));
2576 	}
2577 	*check = ~csum_tcpudp_magic(iph->saddr,
2578 				    iph->daddr, len, iph->protocol, 0);
2579 }
2580 
qlge_send(struct sk_buff * skb,struct net_device * ndev)2581 static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2582 {
2583 	struct tx_ring_desc *tx_ring_desc;
2584 	struct ob_mac_iocb_req *mac_iocb_ptr;
2585 	struct ql_adapter *qdev = netdev_priv(ndev);
2586 	int tso;
2587 	struct tx_ring *tx_ring;
2588 	u32 tx_ring_idx = (u32) skb->queue_mapping;
2589 
2590 	tx_ring = &qdev->tx_ring[tx_ring_idx];
2591 
2592 	if (skb_padto(skb, ETH_ZLEN))
2593 		return NETDEV_TX_OK;
2594 
2595 	if (unlikely(atomic_read(&tx_ring->tx_count) < 2)) {
2596 		netif_info(qdev, tx_queued, qdev->ndev,
2597 			   "%s: shutting down tx queue %d du to lack of resources.\n",
2598 			   __func__, tx_ring_idx);
2599 		netif_stop_subqueue(ndev, tx_ring->wq_id);
2600 		atomic_inc(&tx_ring->queue_stopped);
2601 		tx_ring->tx_errors++;
2602 		return NETDEV_TX_BUSY;
2603 	}
2604 	tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
2605 	mac_iocb_ptr = tx_ring_desc->queue_entry;
2606 	memset((void *)mac_iocb_ptr, 0, sizeof(*mac_iocb_ptr));
2607 
2608 	mac_iocb_ptr->opcode = OPCODE_OB_MAC_IOCB;
2609 	mac_iocb_ptr->tid = tx_ring_desc->index;
2610 	/* We use the upper 32-bits to store the tx queue for this IO.
2611 	 * When we get the completion we can use it to establish the context.
2612 	 */
2613 	mac_iocb_ptr->txq_idx = tx_ring_idx;
2614 	tx_ring_desc->skb = skb;
2615 
2616 	mac_iocb_ptr->frame_len = cpu_to_le16((u16) skb->len);
2617 
2618 	if (vlan_tx_tag_present(skb)) {
2619 		netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2620 			     "Adding a vlan tag %d.\n", vlan_tx_tag_get(skb));
2621 		mac_iocb_ptr->flags3 |= OB_MAC_IOCB_V;
2622 		mac_iocb_ptr->vlan_tci = cpu_to_le16(vlan_tx_tag_get(skb));
2623 	}
2624 	tso = ql_tso(skb, (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2625 	if (tso < 0) {
2626 		dev_kfree_skb_any(skb);
2627 		return NETDEV_TX_OK;
2628 	} else if (unlikely(!tso) && (skb->ip_summed == CHECKSUM_PARTIAL)) {
2629 		ql_hw_csum_setup(skb,
2630 				 (struct ob_mac_tso_iocb_req *)mac_iocb_ptr);
2631 	}
2632 	if (ql_map_send(qdev, mac_iocb_ptr, skb, tx_ring_desc) !=
2633 			NETDEV_TX_OK) {
2634 		netif_err(qdev, tx_queued, qdev->ndev,
2635 			  "Could not map the segments.\n");
2636 		tx_ring->tx_errors++;
2637 		return NETDEV_TX_BUSY;
2638 	}
2639 	QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
2640 	tx_ring->prod_idx++;
2641 	if (tx_ring->prod_idx == tx_ring->wq_len)
2642 		tx_ring->prod_idx = 0;
2643 	wmb();
2644 
2645 	ql_write_db_reg(tx_ring->prod_idx, tx_ring->prod_idx_db_reg);
2646 	netif_printk(qdev, tx_queued, KERN_DEBUG, qdev->ndev,
2647 		     "tx queued, slot %d, len %d\n",
2648 		     tx_ring->prod_idx, skb->len);
2649 
2650 	atomic_dec(&tx_ring->tx_count);
2651 	return NETDEV_TX_OK;
2652 }
2653 
2654 
ql_free_shadow_space(struct ql_adapter * qdev)2655 static void ql_free_shadow_space(struct ql_adapter *qdev)
2656 {
2657 	if (qdev->rx_ring_shadow_reg_area) {
2658 		pci_free_consistent(qdev->pdev,
2659 				    PAGE_SIZE,
2660 				    qdev->rx_ring_shadow_reg_area,
2661 				    qdev->rx_ring_shadow_reg_dma);
2662 		qdev->rx_ring_shadow_reg_area = NULL;
2663 	}
2664 	if (qdev->tx_ring_shadow_reg_area) {
2665 		pci_free_consistent(qdev->pdev,
2666 				    PAGE_SIZE,
2667 				    qdev->tx_ring_shadow_reg_area,
2668 				    qdev->tx_ring_shadow_reg_dma);
2669 		qdev->tx_ring_shadow_reg_area = NULL;
2670 	}
2671 }
2672 
ql_alloc_shadow_space(struct ql_adapter * qdev)2673 static int ql_alloc_shadow_space(struct ql_adapter *qdev)
2674 {
2675 	qdev->rx_ring_shadow_reg_area =
2676 	    pci_alloc_consistent(qdev->pdev,
2677 				 PAGE_SIZE, &qdev->rx_ring_shadow_reg_dma);
2678 	if (qdev->rx_ring_shadow_reg_area == NULL) {
2679 		netif_err(qdev, ifup, qdev->ndev,
2680 			  "Allocation of RX shadow space failed.\n");
2681 		return -ENOMEM;
2682 	}
2683 	memset(qdev->rx_ring_shadow_reg_area, 0, PAGE_SIZE);
2684 	qdev->tx_ring_shadow_reg_area =
2685 	    pci_alloc_consistent(qdev->pdev, PAGE_SIZE,
2686 				 &qdev->tx_ring_shadow_reg_dma);
2687 	if (qdev->tx_ring_shadow_reg_area == NULL) {
2688 		netif_err(qdev, ifup, qdev->ndev,
2689 			  "Allocation of TX shadow space failed.\n");
2690 		goto err_wqp_sh_area;
2691 	}
2692 	memset(qdev->tx_ring_shadow_reg_area, 0, PAGE_SIZE);
2693 	return 0;
2694 
2695 err_wqp_sh_area:
2696 	pci_free_consistent(qdev->pdev,
2697 			    PAGE_SIZE,
2698 			    qdev->rx_ring_shadow_reg_area,
2699 			    qdev->rx_ring_shadow_reg_dma);
2700 	return -ENOMEM;
2701 }
2702 
ql_init_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)2703 static void ql_init_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
2704 {
2705 	struct tx_ring_desc *tx_ring_desc;
2706 	int i;
2707 	struct ob_mac_iocb_req *mac_iocb_ptr;
2708 
2709 	mac_iocb_ptr = tx_ring->wq_base;
2710 	tx_ring_desc = tx_ring->q;
2711 	for (i = 0; i < tx_ring->wq_len; i++) {
2712 		tx_ring_desc->index = i;
2713 		tx_ring_desc->skb = NULL;
2714 		tx_ring_desc->queue_entry = mac_iocb_ptr;
2715 		mac_iocb_ptr++;
2716 		tx_ring_desc++;
2717 	}
2718 	atomic_set(&tx_ring->tx_count, tx_ring->wq_len);
2719 	atomic_set(&tx_ring->queue_stopped, 0);
2720 }
2721 
ql_free_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2722 static void ql_free_tx_resources(struct ql_adapter *qdev,
2723 				 struct tx_ring *tx_ring)
2724 {
2725 	if (tx_ring->wq_base) {
2726 		pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2727 				    tx_ring->wq_base, tx_ring->wq_base_dma);
2728 		tx_ring->wq_base = NULL;
2729 	}
2730 	kfree(tx_ring->q);
2731 	tx_ring->q = NULL;
2732 }
2733 
ql_alloc_tx_resources(struct ql_adapter * qdev,struct tx_ring * tx_ring)2734 static int ql_alloc_tx_resources(struct ql_adapter *qdev,
2735 				 struct tx_ring *tx_ring)
2736 {
2737 	tx_ring->wq_base =
2738 	    pci_alloc_consistent(qdev->pdev, tx_ring->wq_size,
2739 				 &tx_ring->wq_base_dma);
2740 
2741 	if ((tx_ring->wq_base == NULL) ||
2742 	    tx_ring->wq_base_dma & WQ_ADDR_ALIGN) {
2743 		netif_err(qdev, ifup, qdev->ndev, "tx_ring alloc failed.\n");
2744 		return -ENOMEM;
2745 	}
2746 	tx_ring->q =
2747 	    kmalloc(tx_ring->wq_len * sizeof(struct tx_ring_desc), GFP_KERNEL);
2748 	if (tx_ring->q == NULL)
2749 		goto err;
2750 
2751 	return 0;
2752 err:
2753 	pci_free_consistent(qdev->pdev, tx_ring->wq_size,
2754 			    tx_ring->wq_base, tx_ring->wq_base_dma);
2755 	return -ENOMEM;
2756 }
2757 
ql_free_lbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2758 static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2759 {
2760 	struct bq_desc *lbq_desc;
2761 
2762 	uint32_t  curr_idx, clean_idx;
2763 
2764 	curr_idx = rx_ring->lbq_curr_idx;
2765 	clean_idx = rx_ring->lbq_clean_idx;
2766 	while (curr_idx != clean_idx) {
2767 		lbq_desc = &rx_ring->lbq[curr_idx];
2768 
2769 		if (lbq_desc->p.pg_chunk.last_flag) {
2770 			pci_unmap_page(qdev->pdev,
2771 				lbq_desc->p.pg_chunk.map,
2772 				ql_lbq_block_size(qdev),
2773 				       PCI_DMA_FROMDEVICE);
2774 			lbq_desc->p.pg_chunk.last_flag = 0;
2775 		}
2776 
2777 		put_page(lbq_desc->p.pg_chunk.page);
2778 		lbq_desc->p.pg_chunk.page = NULL;
2779 
2780 		if (++curr_idx == rx_ring->lbq_len)
2781 			curr_idx = 0;
2782 
2783 	}
2784 }
2785 
ql_free_sbq_buffers(struct ql_adapter * qdev,struct rx_ring * rx_ring)2786 static void ql_free_sbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2787 {
2788 	int i;
2789 	struct bq_desc *sbq_desc;
2790 
2791 	for (i = 0; i < rx_ring->sbq_len; i++) {
2792 		sbq_desc = &rx_ring->sbq[i];
2793 		if (sbq_desc == NULL) {
2794 			netif_err(qdev, ifup, qdev->ndev,
2795 				  "sbq_desc %d is NULL.\n", i);
2796 			return;
2797 		}
2798 		if (sbq_desc->p.skb) {
2799 			pci_unmap_single(qdev->pdev,
2800 					 dma_unmap_addr(sbq_desc, mapaddr),
2801 					 dma_unmap_len(sbq_desc, maplen),
2802 					 PCI_DMA_FROMDEVICE);
2803 			dev_kfree_skb(sbq_desc->p.skb);
2804 			sbq_desc->p.skb = NULL;
2805 		}
2806 	}
2807 }
2808 
2809 /* Free all large and small rx buffers associated
2810  * with the completion queues for this device.
2811  */
ql_free_rx_buffers(struct ql_adapter * qdev)2812 static void ql_free_rx_buffers(struct ql_adapter *qdev)
2813 {
2814 	int i;
2815 	struct rx_ring *rx_ring;
2816 
2817 	for (i = 0; i < qdev->rx_ring_count; i++) {
2818 		rx_ring = &qdev->rx_ring[i];
2819 		if (rx_ring->lbq)
2820 			ql_free_lbq_buffers(qdev, rx_ring);
2821 		if (rx_ring->sbq)
2822 			ql_free_sbq_buffers(qdev, rx_ring);
2823 	}
2824 }
2825 
ql_alloc_rx_buffers(struct ql_adapter * qdev)2826 static void ql_alloc_rx_buffers(struct ql_adapter *qdev)
2827 {
2828 	struct rx_ring *rx_ring;
2829 	int i;
2830 
2831 	for (i = 0; i < qdev->rx_ring_count; i++) {
2832 		rx_ring = &qdev->rx_ring[i];
2833 		if (rx_ring->type != TX_Q)
2834 			ql_update_buffer_queues(qdev, rx_ring);
2835 	}
2836 }
2837 
ql_init_lbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2838 static void ql_init_lbq_ring(struct ql_adapter *qdev,
2839 				struct rx_ring *rx_ring)
2840 {
2841 	int i;
2842 	struct bq_desc *lbq_desc;
2843 	__le64 *bq = rx_ring->lbq_base;
2844 
2845 	memset(rx_ring->lbq, 0, rx_ring->lbq_len * sizeof(struct bq_desc));
2846 	for (i = 0; i < rx_ring->lbq_len; i++) {
2847 		lbq_desc = &rx_ring->lbq[i];
2848 		memset(lbq_desc, 0, sizeof(*lbq_desc));
2849 		lbq_desc->index = i;
2850 		lbq_desc->addr = bq;
2851 		bq++;
2852 	}
2853 }
2854 
ql_init_sbq_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)2855 static void ql_init_sbq_ring(struct ql_adapter *qdev,
2856 				struct rx_ring *rx_ring)
2857 {
2858 	int i;
2859 	struct bq_desc *sbq_desc;
2860 	__le64 *bq = rx_ring->sbq_base;
2861 
2862 	memset(rx_ring->sbq, 0, rx_ring->sbq_len * sizeof(struct bq_desc));
2863 	for (i = 0; i < rx_ring->sbq_len; i++) {
2864 		sbq_desc = &rx_ring->sbq[i];
2865 		memset(sbq_desc, 0, sizeof(*sbq_desc));
2866 		sbq_desc->index = i;
2867 		sbq_desc->addr = bq;
2868 		bq++;
2869 	}
2870 }
2871 
ql_free_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2872 static void ql_free_rx_resources(struct ql_adapter *qdev,
2873 				 struct rx_ring *rx_ring)
2874 {
2875 	/* Free the small buffer queue. */
2876 	if (rx_ring->sbq_base) {
2877 		pci_free_consistent(qdev->pdev,
2878 				    rx_ring->sbq_size,
2879 				    rx_ring->sbq_base, rx_ring->sbq_base_dma);
2880 		rx_ring->sbq_base = NULL;
2881 	}
2882 
2883 	/* Free the small buffer queue control blocks. */
2884 	kfree(rx_ring->sbq);
2885 	rx_ring->sbq = NULL;
2886 
2887 	/* Free the large buffer queue. */
2888 	if (rx_ring->lbq_base) {
2889 		pci_free_consistent(qdev->pdev,
2890 				    rx_ring->lbq_size,
2891 				    rx_ring->lbq_base, rx_ring->lbq_base_dma);
2892 		rx_ring->lbq_base = NULL;
2893 	}
2894 
2895 	/* Free the large buffer queue control blocks. */
2896 	kfree(rx_ring->lbq);
2897 	rx_ring->lbq = NULL;
2898 
2899 	/* Free the rx queue. */
2900 	if (rx_ring->cq_base) {
2901 		pci_free_consistent(qdev->pdev,
2902 				    rx_ring->cq_size,
2903 				    rx_ring->cq_base, rx_ring->cq_base_dma);
2904 		rx_ring->cq_base = NULL;
2905 	}
2906 }
2907 
2908 /* Allocate queues and buffers for this completions queue based
2909  * on the values in the parameter structure. */
ql_alloc_rx_resources(struct ql_adapter * qdev,struct rx_ring * rx_ring)2910 static int ql_alloc_rx_resources(struct ql_adapter *qdev,
2911 				 struct rx_ring *rx_ring)
2912 {
2913 
2914 	/*
2915 	 * Allocate the completion queue for this rx_ring.
2916 	 */
2917 	rx_ring->cq_base =
2918 	    pci_alloc_consistent(qdev->pdev, rx_ring->cq_size,
2919 				 &rx_ring->cq_base_dma);
2920 
2921 	if (rx_ring->cq_base == NULL) {
2922 		netif_err(qdev, ifup, qdev->ndev, "rx_ring alloc failed.\n");
2923 		return -ENOMEM;
2924 	}
2925 
2926 	if (rx_ring->sbq_len) {
2927 		/*
2928 		 * Allocate small buffer queue.
2929 		 */
2930 		rx_ring->sbq_base =
2931 		    pci_alloc_consistent(qdev->pdev, rx_ring->sbq_size,
2932 					 &rx_ring->sbq_base_dma);
2933 
2934 		if (rx_ring->sbq_base == NULL) {
2935 			netif_err(qdev, ifup, qdev->ndev,
2936 				  "Small buffer queue allocation failed.\n");
2937 			goto err_mem;
2938 		}
2939 
2940 		/*
2941 		 * Allocate small buffer queue control blocks.
2942 		 */
2943 		rx_ring->sbq =
2944 		    kmalloc(rx_ring->sbq_len * sizeof(struct bq_desc),
2945 			    GFP_KERNEL);
2946 		if (rx_ring->sbq == NULL) {
2947 			netif_err(qdev, ifup, qdev->ndev,
2948 				  "Small buffer queue control block allocation failed.\n");
2949 			goto err_mem;
2950 		}
2951 
2952 		ql_init_sbq_ring(qdev, rx_ring);
2953 	}
2954 
2955 	if (rx_ring->lbq_len) {
2956 		/*
2957 		 * Allocate large buffer queue.
2958 		 */
2959 		rx_ring->lbq_base =
2960 		    pci_alloc_consistent(qdev->pdev, rx_ring->lbq_size,
2961 					 &rx_ring->lbq_base_dma);
2962 
2963 		if (rx_ring->lbq_base == NULL) {
2964 			netif_err(qdev, ifup, qdev->ndev,
2965 				  "Large buffer queue allocation failed.\n");
2966 			goto err_mem;
2967 		}
2968 		/*
2969 		 * Allocate large buffer queue control blocks.
2970 		 */
2971 		rx_ring->lbq =
2972 		    kmalloc(rx_ring->lbq_len * sizeof(struct bq_desc),
2973 			    GFP_KERNEL);
2974 		if (rx_ring->lbq == NULL) {
2975 			netif_err(qdev, ifup, qdev->ndev,
2976 				  "Large buffer queue control block allocation failed.\n");
2977 			goto err_mem;
2978 		}
2979 
2980 		ql_init_lbq_ring(qdev, rx_ring);
2981 	}
2982 
2983 	return 0;
2984 
2985 err_mem:
2986 	ql_free_rx_resources(qdev, rx_ring);
2987 	return -ENOMEM;
2988 }
2989 
ql_tx_ring_clean(struct ql_adapter * qdev)2990 static void ql_tx_ring_clean(struct ql_adapter *qdev)
2991 {
2992 	struct tx_ring *tx_ring;
2993 	struct tx_ring_desc *tx_ring_desc;
2994 	int i, j;
2995 
2996 	/*
2997 	 * Loop through all queues and free
2998 	 * any resources.
2999 	 */
3000 	for (j = 0; j < qdev->tx_ring_count; j++) {
3001 		tx_ring = &qdev->tx_ring[j];
3002 		for (i = 0; i < tx_ring->wq_len; i++) {
3003 			tx_ring_desc = &tx_ring->q[i];
3004 			if (tx_ring_desc && tx_ring_desc->skb) {
3005 				netif_err(qdev, ifdown, qdev->ndev,
3006 					  "Freeing lost SKB %p, from queue %d, index %d.\n",
3007 					  tx_ring_desc->skb, j,
3008 					  tx_ring_desc->index);
3009 				ql_unmap_send(qdev, tx_ring_desc,
3010 					      tx_ring_desc->map_cnt);
3011 				dev_kfree_skb(tx_ring_desc->skb);
3012 				tx_ring_desc->skb = NULL;
3013 			}
3014 		}
3015 	}
3016 }
3017 
ql_free_mem_resources(struct ql_adapter * qdev)3018 static void ql_free_mem_resources(struct ql_adapter *qdev)
3019 {
3020 	int i;
3021 
3022 	for (i = 0; i < qdev->tx_ring_count; i++)
3023 		ql_free_tx_resources(qdev, &qdev->tx_ring[i]);
3024 	for (i = 0; i < qdev->rx_ring_count; i++)
3025 		ql_free_rx_resources(qdev, &qdev->rx_ring[i]);
3026 	ql_free_shadow_space(qdev);
3027 }
3028 
ql_alloc_mem_resources(struct ql_adapter * qdev)3029 static int ql_alloc_mem_resources(struct ql_adapter *qdev)
3030 {
3031 	int i;
3032 
3033 	/* Allocate space for our shadow registers and such. */
3034 	if (ql_alloc_shadow_space(qdev))
3035 		return -ENOMEM;
3036 
3037 	for (i = 0; i < qdev->rx_ring_count; i++) {
3038 		if (ql_alloc_rx_resources(qdev, &qdev->rx_ring[i]) != 0) {
3039 			netif_err(qdev, ifup, qdev->ndev,
3040 				  "RX resource allocation failed.\n");
3041 			goto err_mem;
3042 		}
3043 	}
3044 	/* Allocate tx queue resources */
3045 	for (i = 0; i < qdev->tx_ring_count; i++) {
3046 		if (ql_alloc_tx_resources(qdev, &qdev->tx_ring[i]) != 0) {
3047 			netif_err(qdev, ifup, qdev->ndev,
3048 				  "TX resource allocation failed.\n");
3049 			goto err_mem;
3050 		}
3051 	}
3052 	return 0;
3053 
3054 err_mem:
3055 	ql_free_mem_resources(qdev);
3056 	return -ENOMEM;
3057 }
3058 
3059 /* Set up the rx ring control block and pass it to the chip.
3060  * The control block is defined as
3061  * "Completion Queue Initialization Control Block", or cqicb.
3062  */
ql_start_rx_ring(struct ql_adapter * qdev,struct rx_ring * rx_ring)3063 static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
3064 {
3065 	struct cqicb *cqicb = &rx_ring->cqicb;
3066 	void *shadow_reg = qdev->rx_ring_shadow_reg_area +
3067 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3068 	u64 shadow_reg_dma = qdev->rx_ring_shadow_reg_dma +
3069 		(rx_ring->cq_id * RX_RING_SHADOW_SPACE);
3070 	void __iomem *doorbell_area =
3071 	    qdev->doorbell_area + (DB_PAGE_SIZE * (128 + rx_ring->cq_id));
3072 	int err = 0;
3073 	u16 bq_len;
3074 	u64 tmp;
3075 	__le64 *base_indirect_ptr;
3076 	int page_entries;
3077 
3078 	/* Set up the shadow registers for this ring. */
3079 	rx_ring->prod_idx_sh_reg = shadow_reg;
3080 	rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
3081 	*rx_ring->prod_idx_sh_reg = 0;
3082 	shadow_reg += sizeof(u64);
3083 	shadow_reg_dma += sizeof(u64);
3084 	rx_ring->lbq_base_indirect = shadow_reg;
3085 	rx_ring->lbq_base_indirect_dma = shadow_reg_dma;
3086 	shadow_reg += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3087 	shadow_reg_dma += (sizeof(u64) * MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3088 	rx_ring->sbq_base_indirect = shadow_reg;
3089 	rx_ring->sbq_base_indirect_dma = shadow_reg_dma;
3090 
3091 	/* PCI doorbell mem area + 0x00 for consumer index register */
3092 	rx_ring->cnsmr_idx_db_reg = (u32 __iomem *) doorbell_area;
3093 	rx_ring->cnsmr_idx = 0;
3094 	rx_ring->curr_entry = rx_ring->cq_base;
3095 
3096 	/* PCI doorbell mem area + 0x04 for valid register */
3097 	rx_ring->valid_db_reg = doorbell_area + 0x04;
3098 
3099 	/* PCI doorbell mem area + 0x18 for large buffer consumer */
3100 	rx_ring->lbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x18);
3101 
3102 	/* PCI doorbell mem area + 0x1c */
3103 	rx_ring->sbq_prod_idx_db_reg = (u32 __iomem *) (doorbell_area + 0x1c);
3104 
3105 	memset((void *)cqicb, 0, sizeof(struct cqicb));
3106 	cqicb->msix_vect = rx_ring->irq;
3107 
3108 	bq_len = (rx_ring->cq_len == 65536) ? 0 : (u16) rx_ring->cq_len;
3109 	cqicb->len = cpu_to_le16(bq_len | LEN_V | LEN_CPP_CONT);
3110 
3111 	cqicb->addr = cpu_to_le64(rx_ring->cq_base_dma);
3112 
3113 	cqicb->prod_idx_addr = cpu_to_le64(rx_ring->prod_idx_sh_reg_dma);
3114 
3115 	/*
3116 	 * Set up the control block load flags.
3117 	 */
3118 	cqicb->flags = FLAGS_LC |	/* Load queue base address */
3119 	    FLAGS_LV |		/* Load MSI-X vector */
3120 	    FLAGS_LI;		/* Load irq delay values */
3121 	if (rx_ring->lbq_len) {
3122 		cqicb->flags |= FLAGS_LL;	/* Load lbq values */
3123 		tmp = (u64)rx_ring->lbq_base_dma;
3124 		base_indirect_ptr = rx_ring->lbq_base_indirect;
3125 		page_entries = 0;
3126 		do {
3127 			*base_indirect_ptr = cpu_to_le64(tmp);
3128 			tmp += DB_PAGE_SIZE;
3129 			base_indirect_ptr++;
3130 			page_entries++;
3131 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->lbq_len));
3132 		cqicb->lbq_addr =
3133 		    cpu_to_le64(rx_ring->lbq_base_indirect_dma);
3134 		bq_len = (rx_ring->lbq_buf_size == 65536) ? 0 :
3135 			(u16) rx_ring->lbq_buf_size;
3136 		cqicb->lbq_buf_size = cpu_to_le16(bq_len);
3137 		bq_len = (rx_ring->lbq_len == 65536) ? 0 :
3138 			(u16) rx_ring->lbq_len;
3139 		cqicb->lbq_len = cpu_to_le16(bq_len);
3140 		rx_ring->lbq_prod_idx = 0;
3141 		rx_ring->lbq_curr_idx = 0;
3142 		rx_ring->lbq_clean_idx = 0;
3143 		rx_ring->lbq_free_cnt = rx_ring->lbq_len;
3144 	}
3145 	if (rx_ring->sbq_len) {
3146 		cqicb->flags |= FLAGS_LS;	/* Load sbq values */
3147 		tmp = (u64)rx_ring->sbq_base_dma;
3148 		base_indirect_ptr = rx_ring->sbq_base_indirect;
3149 		page_entries = 0;
3150 		do {
3151 			*base_indirect_ptr = cpu_to_le64(tmp);
3152 			tmp += DB_PAGE_SIZE;
3153 			base_indirect_ptr++;
3154 			page_entries++;
3155 		} while (page_entries < MAX_DB_PAGES_PER_BQ(rx_ring->sbq_len));
3156 		cqicb->sbq_addr =
3157 		    cpu_to_le64(rx_ring->sbq_base_indirect_dma);
3158 		cqicb->sbq_buf_size =
3159 		    cpu_to_le16((u16)(rx_ring->sbq_buf_size));
3160 		bq_len = (rx_ring->sbq_len == 65536) ? 0 :
3161 			(u16) rx_ring->sbq_len;
3162 		cqicb->sbq_len = cpu_to_le16(bq_len);
3163 		rx_ring->sbq_prod_idx = 0;
3164 		rx_ring->sbq_curr_idx = 0;
3165 		rx_ring->sbq_clean_idx = 0;
3166 		rx_ring->sbq_free_cnt = rx_ring->sbq_len;
3167 	}
3168 	switch (rx_ring->type) {
3169 	case TX_Q:
3170 		cqicb->irq_delay = cpu_to_le16(qdev->tx_coalesce_usecs);
3171 		cqicb->pkt_delay = cpu_to_le16(qdev->tx_max_coalesced_frames);
3172 		break;
3173 	case RX_Q:
3174 		/* Inbound completion handling rx_rings run in
3175 		 * separate NAPI contexts.
3176 		 */
3177 		netif_napi_add(qdev->ndev, &rx_ring->napi, ql_napi_poll_msix,
3178 			       64);
3179 		cqicb->irq_delay = cpu_to_le16(qdev->rx_coalesce_usecs);
3180 		cqicb->pkt_delay = cpu_to_le16(qdev->rx_max_coalesced_frames);
3181 		break;
3182 	default:
3183 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3184 			     "Invalid rx_ring->type = %d.\n", rx_ring->type);
3185 	}
3186 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3187 		     "Initializing rx work queue.\n");
3188 	err = ql_write_cfg(qdev, cqicb, sizeof(struct cqicb),
3189 			   CFG_LCQ, rx_ring->cq_id);
3190 	if (err) {
3191 		netif_err(qdev, ifup, qdev->ndev, "Failed to load CQICB.\n");
3192 		return err;
3193 	}
3194 	return err;
3195 }
3196 
ql_start_tx_ring(struct ql_adapter * qdev,struct tx_ring * tx_ring)3197 static int ql_start_tx_ring(struct ql_adapter *qdev, struct tx_ring *tx_ring)
3198 {
3199 	struct wqicb *wqicb = (struct wqicb *)tx_ring;
3200 	void __iomem *doorbell_area =
3201 	    qdev->doorbell_area + (DB_PAGE_SIZE * tx_ring->wq_id);
3202 	void *shadow_reg = qdev->tx_ring_shadow_reg_area +
3203 	    (tx_ring->wq_id * sizeof(u64));
3204 	u64 shadow_reg_dma = qdev->tx_ring_shadow_reg_dma +
3205 	    (tx_ring->wq_id * sizeof(u64));
3206 	int err = 0;
3207 
3208 	/*
3209 	 * Assign doorbell registers for this tx_ring.
3210 	 */
3211 	/* TX PCI doorbell mem area for tx producer index */
3212 	tx_ring->prod_idx_db_reg = (u32 __iomem *) doorbell_area;
3213 	tx_ring->prod_idx = 0;
3214 	/* TX PCI doorbell mem area + 0x04 */
3215 	tx_ring->valid_db_reg = doorbell_area + 0x04;
3216 
3217 	/*
3218 	 * Assign shadow registers for this tx_ring.
3219 	 */
3220 	tx_ring->cnsmr_idx_sh_reg = shadow_reg;
3221 	tx_ring->cnsmr_idx_sh_reg_dma = shadow_reg_dma;
3222 
3223 	wqicb->len = cpu_to_le16(tx_ring->wq_len | Q_LEN_V | Q_LEN_CPP_CONT);
3224 	wqicb->flags = cpu_to_le16(Q_FLAGS_LC |
3225 				   Q_FLAGS_LB | Q_FLAGS_LI | Q_FLAGS_LO);
3226 	wqicb->cq_id_rss = cpu_to_le16(tx_ring->cq_id);
3227 	wqicb->rid = 0;
3228 	wqicb->addr = cpu_to_le64(tx_ring->wq_base_dma);
3229 
3230 	wqicb->cnsmr_idx_addr = cpu_to_le64(tx_ring->cnsmr_idx_sh_reg_dma);
3231 
3232 	ql_init_tx_ring(qdev, tx_ring);
3233 
3234 	err = ql_write_cfg(qdev, wqicb, sizeof(*wqicb), CFG_LRQ,
3235 			   (u16) tx_ring->wq_id);
3236 	if (err) {
3237 		netif_err(qdev, ifup, qdev->ndev, "Failed to load tx_ring.\n");
3238 		return err;
3239 	}
3240 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3241 		     "Successfully loaded WQICB.\n");
3242 	return err;
3243 }
3244 
ql_disable_msix(struct ql_adapter * qdev)3245 static void ql_disable_msix(struct ql_adapter *qdev)
3246 {
3247 	if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3248 		pci_disable_msix(qdev->pdev);
3249 		clear_bit(QL_MSIX_ENABLED, &qdev->flags);
3250 		kfree(qdev->msi_x_entry);
3251 		qdev->msi_x_entry = NULL;
3252 	} else if (test_bit(QL_MSI_ENABLED, &qdev->flags)) {
3253 		pci_disable_msi(qdev->pdev);
3254 		clear_bit(QL_MSI_ENABLED, &qdev->flags);
3255 	}
3256 }
3257 
3258 /* We start by trying to get the number of vectors
3259  * stored in qdev->intr_count. If we don't get that
3260  * many then we reduce the count and try again.
3261  */
ql_enable_msix(struct ql_adapter * qdev)3262 static void ql_enable_msix(struct ql_adapter *qdev)
3263 {
3264 	int i, err;
3265 
3266 	/* Get the MSIX vectors. */
3267 	if (qlge_irq_type == MSIX_IRQ) {
3268 		/* Try to alloc space for the msix struct,
3269 		 * if it fails then go to MSI/legacy.
3270 		 */
3271 		qdev->msi_x_entry = kcalloc(qdev->intr_count,
3272 					    sizeof(struct msix_entry),
3273 					    GFP_KERNEL);
3274 		if (!qdev->msi_x_entry) {
3275 			qlge_irq_type = MSI_IRQ;
3276 			goto msi;
3277 		}
3278 
3279 		for (i = 0; i < qdev->intr_count; i++)
3280 			qdev->msi_x_entry[i].entry = i;
3281 
3282 		/* Loop to get our vectors.  We start with
3283 		 * what we want and settle for what we get.
3284 		 */
3285 		do {
3286 			err = pci_enable_msix(qdev->pdev,
3287 				qdev->msi_x_entry, qdev->intr_count);
3288 			if (err > 0)
3289 				qdev->intr_count = err;
3290 		} while (err > 0);
3291 
3292 		if (err < 0) {
3293 			kfree(qdev->msi_x_entry);
3294 			qdev->msi_x_entry = NULL;
3295 			netif_warn(qdev, ifup, qdev->ndev,
3296 				   "MSI-X Enable failed, trying MSI.\n");
3297 			qdev->intr_count = 1;
3298 			qlge_irq_type = MSI_IRQ;
3299 		} else if (err == 0) {
3300 			set_bit(QL_MSIX_ENABLED, &qdev->flags);
3301 			netif_info(qdev, ifup, qdev->ndev,
3302 				   "MSI-X Enabled, got %d vectors.\n",
3303 				   qdev->intr_count);
3304 			return;
3305 		}
3306 	}
3307 msi:
3308 	qdev->intr_count = 1;
3309 	if (qlge_irq_type == MSI_IRQ) {
3310 		if (!pci_enable_msi(qdev->pdev)) {
3311 			set_bit(QL_MSI_ENABLED, &qdev->flags);
3312 			netif_info(qdev, ifup, qdev->ndev,
3313 				   "Running with MSI interrupts.\n");
3314 			return;
3315 		}
3316 	}
3317 	qlge_irq_type = LEG_IRQ;
3318 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3319 		     "Running with legacy interrupts.\n");
3320 }
3321 
3322 /* Each vector services 1 RSS ring and and 1 or more
3323  * TX completion rings.  This function loops through
3324  * the TX completion rings and assigns the vector that
3325  * will service it.  An example would be if there are
3326  * 2 vectors (so 2 RSS rings) and 8 TX completion rings.
3327  * This would mean that vector 0 would service RSS ring 0
3328  * and TX completion rings 0,1,2 and 3.  Vector 1 would
3329  * service RSS ring 1 and TX completion rings 4,5,6 and 7.
3330  */
ql_set_tx_vect(struct ql_adapter * qdev)3331 static void ql_set_tx_vect(struct ql_adapter *qdev)
3332 {
3333 	int i, j, vect;
3334 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3335 
3336 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3337 		/* Assign irq vectors to TX rx_rings.*/
3338 		for (vect = 0, j = 0, i = qdev->rss_ring_count;
3339 					 i < qdev->rx_ring_count; i++) {
3340 			if (j == tx_rings_per_vector) {
3341 				vect++;
3342 				j = 0;
3343 			}
3344 			qdev->rx_ring[i].irq = vect;
3345 			j++;
3346 		}
3347 	} else {
3348 		/* For single vector all rings have an irq
3349 		 * of zero.
3350 		 */
3351 		for (i = 0; i < qdev->rx_ring_count; i++)
3352 			qdev->rx_ring[i].irq = 0;
3353 	}
3354 }
3355 
3356 /* Set the interrupt mask for this vector.  Each vector
3357  * will service 1 RSS ring and 1 or more TX completion
3358  * rings.  This function sets up a bit mask per vector
3359  * that indicates which rings it services.
3360  */
ql_set_irq_mask(struct ql_adapter * qdev,struct intr_context * ctx)3361 static void ql_set_irq_mask(struct ql_adapter *qdev, struct intr_context *ctx)
3362 {
3363 	int j, vect = ctx->intr;
3364 	u32 tx_rings_per_vector = qdev->tx_ring_count / qdev->intr_count;
3365 
3366 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3367 		/* Add the RSS ring serviced by this vector
3368 		 * to the mask.
3369 		 */
3370 		ctx->irq_mask = (1 << qdev->rx_ring[vect].cq_id);
3371 		/* Add the TX ring(s) serviced by this vector
3372 		 * to the mask. */
3373 		for (j = 0; j < tx_rings_per_vector; j++) {
3374 			ctx->irq_mask |=
3375 			(1 << qdev->rx_ring[qdev->rss_ring_count +
3376 			(vect * tx_rings_per_vector) + j].cq_id);
3377 		}
3378 	} else {
3379 		/* For single vector we just shift each queue's
3380 		 * ID into the mask.
3381 		 */
3382 		for (j = 0; j < qdev->rx_ring_count; j++)
3383 			ctx->irq_mask |= (1 << qdev->rx_ring[j].cq_id);
3384 	}
3385 }
3386 
3387 /*
3388  * Here we build the intr_context structures based on
3389  * our rx_ring count and intr vector count.
3390  * The intr_context structure is used to hook each vector
3391  * to possibly different handlers.
3392  */
ql_resolve_queues_to_irqs(struct ql_adapter * qdev)3393 static void ql_resolve_queues_to_irqs(struct ql_adapter *qdev)
3394 {
3395 	int i = 0;
3396 	struct intr_context *intr_context = &qdev->intr_context[0];
3397 
3398 	if (likely(test_bit(QL_MSIX_ENABLED, &qdev->flags))) {
3399 		/* Each rx_ring has it's
3400 		 * own intr_context since we have separate
3401 		 * vectors for each queue.
3402 		 */
3403 		for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3404 			qdev->rx_ring[i].irq = i;
3405 			intr_context->intr = i;
3406 			intr_context->qdev = qdev;
3407 			/* Set up this vector's bit-mask that indicates
3408 			 * which queues it services.
3409 			 */
3410 			ql_set_irq_mask(qdev, intr_context);
3411 			/*
3412 			 * We set up each vectors enable/disable/read bits so
3413 			 * there's no bit/mask calculations in the critical path.
3414 			 */
3415 			intr_context->intr_en_mask =
3416 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3417 			    INTR_EN_TYPE_ENABLE | INTR_EN_IHD_MASK | INTR_EN_IHD
3418 			    | i;
3419 			intr_context->intr_dis_mask =
3420 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3421 			    INTR_EN_TYPE_DISABLE | INTR_EN_IHD_MASK |
3422 			    INTR_EN_IHD | i;
3423 			intr_context->intr_read_mask =
3424 			    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3425 			    INTR_EN_TYPE_READ | INTR_EN_IHD_MASK | INTR_EN_IHD |
3426 			    i;
3427 			if (i == 0) {
3428 				/* The first vector/queue handles
3429 				 * broadcast/multicast, fatal errors,
3430 				 * and firmware events.  This in addition
3431 				 * to normal inbound NAPI processing.
3432 				 */
3433 				intr_context->handler = qlge_isr;
3434 				sprintf(intr_context->name, "%s-rx-%d",
3435 					qdev->ndev->name, i);
3436 			} else {
3437 				/*
3438 				 * Inbound queues handle unicast frames only.
3439 				 */
3440 				intr_context->handler = qlge_msix_rx_isr;
3441 				sprintf(intr_context->name, "%s-rx-%d",
3442 					qdev->ndev->name, i);
3443 			}
3444 		}
3445 	} else {
3446 		/*
3447 		 * All rx_rings use the same intr_context since
3448 		 * there is only one vector.
3449 		 */
3450 		intr_context->intr = 0;
3451 		intr_context->qdev = qdev;
3452 		/*
3453 		 * We set up each vectors enable/disable/read bits so
3454 		 * there's no bit/mask calculations in the critical path.
3455 		 */
3456 		intr_context->intr_en_mask =
3457 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_ENABLE;
3458 		intr_context->intr_dis_mask =
3459 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK |
3460 		    INTR_EN_TYPE_DISABLE;
3461 		intr_context->intr_read_mask =
3462 		    INTR_EN_TYPE_MASK | INTR_EN_INTR_MASK | INTR_EN_TYPE_READ;
3463 		/*
3464 		 * Single interrupt means one handler for all rings.
3465 		 */
3466 		intr_context->handler = qlge_isr;
3467 		sprintf(intr_context->name, "%s-single_irq", qdev->ndev->name);
3468 		/* Set up this vector's bit-mask that indicates
3469 		 * which queues it services. In this case there is
3470 		 * a single vector so it will service all RSS and
3471 		 * TX completion rings.
3472 		 */
3473 		ql_set_irq_mask(qdev, intr_context);
3474 	}
3475 	/* Tell the TX completion rings which MSIx vector
3476 	 * they will be using.
3477 	 */
3478 	ql_set_tx_vect(qdev);
3479 }
3480 
ql_free_irq(struct ql_adapter * qdev)3481 static void ql_free_irq(struct ql_adapter *qdev)
3482 {
3483 	int i;
3484 	struct intr_context *intr_context = &qdev->intr_context[0];
3485 
3486 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3487 		if (intr_context->hooked) {
3488 			if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3489 				free_irq(qdev->msi_x_entry[i].vector,
3490 					 &qdev->rx_ring[i]);
3491 				netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3492 					     "freeing msix interrupt %d.\n", i);
3493 			} else {
3494 				free_irq(qdev->pdev->irq, &qdev->rx_ring[0]);
3495 				netif_printk(qdev, ifdown, KERN_DEBUG, qdev->ndev,
3496 					     "freeing msi interrupt %d.\n", i);
3497 			}
3498 		}
3499 	}
3500 	ql_disable_msix(qdev);
3501 }
3502 
ql_request_irq(struct ql_adapter * qdev)3503 static int ql_request_irq(struct ql_adapter *qdev)
3504 {
3505 	int i;
3506 	int status = 0;
3507 	struct pci_dev *pdev = qdev->pdev;
3508 	struct intr_context *intr_context = &qdev->intr_context[0];
3509 
3510 	ql_resolve_queues_to_irqs(qdev);
3511 
3512 	for (i = 0; i < qdev->intr_count; i++, intr_context++) {
3513 		atomic_set(&intr_context->irq_cnt, 0);
3514 		if (test_bit(QL_MSIX_ENABLED, &qdev->flags)) {
3515 			status = request_irq(qdev->msi_x_entry[i].vector,
3516 					     intr_context->handler,
3517 					     0,
3518 					     intr_context->name,
3519 					     &qdev->rx_ring[i]);
3520 			if (status) {
3521 				netif_err(qdev, ifup, qdev->ndev,
3522 					  "Failed request for MSIX interrupt %d.\n",
3523 					  i);
3524 				goto err_irq;
3525 			} else {
3526 				netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3527 					     "Hooked intr %d, queue type %s, with name %s.\n",
3528 					     i,
3529 					     qdev->rx_ring[i].type == DEFAULT_Q ?
3530 					     "DEFAULT_Q" :
3531 					     qdev->rx_ring[i].type == TX_Q ?
3532 					     "TX_Q" :
3533 					     qdev->rx_ring[i].type == RX_Q ?
3534 					     "RX_Q" : "",
3535 					     intr_context->name);
3536 			}
3537 		} else {
3538 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3539 				     "trying msi or legacy interrupts.\n");
3540 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3541 				     "%s: irq = %d.\n", __func__, pdev->irq);
3542 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3543 				     "%s: context->name = %s.\n", __func__,
3544 				     intr_context->name);
3545 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3546 				     "%s: dev_id = 0x%p.\n", __func__,
3547 				     &qdev->rx_ring[0]);
3548 			status =
3549 			    request_irq(pdev->irq, qlge_isr,
3550 					test_bit(QL_MSI_ENABLED,
3551 						 &qdev->
3552 						 flags) ? 0 : IRQF_SHARED,
3553 					intr_context->name, &qdev->rx_ring[0]);
3554 			if (status)
3555 				goto err_irq;
3556 
3557 			netif_err(qdev, ifup, qdev->ndev,
3558 				  "Hooked intr %d, queue type %s, with name %s.\n",
3559 				  i,
3560 				  qdev->rx_ring[0].type == DEFAULT_Q ?
3561 				  "DEFAULT_Q" :
3562 				  qdev->rx_ring[0].type == TX_Q ? "TX_Q" :
3563 				  qdev->rx_ring[0].type == RX_Q ? "RX_Q" : "",
3564 				  intr_context->name);
3565 		}
3566 		intr_context->hooked = 1;
3567 	}
3568 	return status;
3569 err_irq:
3570 	netif_err(qdev, ifup, qdev->ndev, "Failed to get the interrupts!!!/n");
3571 	ql_free_irq(qdev);
3572 	return status;
3573 }
3574 
ql_start_rss(struct ql_adapter * qdev)3575 static int ql_start_rss(struct ql_adapter *qdev)
3576 {
3577 	static const u8 init_hash_seed[] = {
3578 		0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2,
3579 		0x41, 0x67, 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0,
3580 		0xd0, 0xca, 0x2b, 0xcb, 0xae, 0x7b, 0x30, 0xb4,
3581 		0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30, 0xf2, 0x0c,
3582 		0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa
3583 	};
3584 	struct ricb *ricb = &qdev->ricb;
3585 	int status = 0;
3586 	int i;
3587 	u8 *hash_id = (u8 *) ricb->hash_cq_id;
3588 
3589 	memset((void *)ricb, 0, sizeof(*ricb));
3590 
3591 	ricb->base_cq = RSS_L4K;
3592 	ricb->flags =
3593 		(RSS_L6K | RSS_LI | RSS_LB | RSS_LM | RSS_RT4 | RSS_RT6);
3594 	ricb->mask = cpu_to_le16((u16)(0x3ff));
3595 
3596 	/*
3597 	 * Fill out the Indirection Table.
3598 	 */
3599 	for (i = 0; i < 1024; i++)
3600 		hash_id[i] = (i & (qdev->rss_ring_count - 1));
3601 
3602 	memcpy((void *)&ricb->ipv6_hash_key[0], init_hash_seed, 40);
3603 	memcpy((void *)&ricb->ipv4_hash_key[0], init_hash_seed, 16);
3604 
3605 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev, "Initializing RSS.\n");
3606 
3607 	status = ql_write_cfg(qdev, ricb, sizeof(*ricb), CFG_LR, 0);
3608 	if (status) {
3609 		netif_err(qdev, ifup, qdev->ndev, "Failed to load RICB.\n");
3610 		return status;
3611 	}
3612 	netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3613 		     "Successfully loaded RICB.\n");
3614 	return status;
3615 }
3616 
ql_clear_routing_entries(struct ql_adapter * qdev)3617 static int ql_clear_routing_entries(struct ql_adapter *qdev)
3618 {
3619 	int i, status = 0;
3620 
3621 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3622 	if (status)
3623 		return status;
3624 	/* Clear all the entries in the routing table. */
3625 	for (i = 0; i < 16; i++) {
3626 		status = ql_set_routing_reg(qdev, i, 0, 0);
3627 		if (status) {
3628 			netif_err(qdev, ifup, qdev->ndev,
3629 				  "Failed to init routing register for CAM packets.\n");
3630 			break;
3631 		}
3632 	}
3633 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3634 	return status;
3635 }
3636 
3637 /* Initialize the frame-to-queue routing. */
ql_route_initialize(struct ql_adapter * qdev)3638 static int ql_route_initialize(struct ql_adapter *qdev)
3639 {
3640 	int status = 0;
3641 
3642 	/* Clear all the entries in the routing table. */
3643 	status = ql_clear_routing_entries(qdev);
3644 	if (status)
3645 		return status;
3646 
3647 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
3648 	if (status)
3649 		return status;
3650 
3651 	status = ql_set_routing_reg(qdev, RT_IDX_IP_CSUM_ERR_SLOT,
3652 						RT_IDX_IP_CSUM_ERR, 1);
3653 	if (status) {
3654 		netif_err(qdev, ifup, qdev->ndev,
3655 			"Failed to init routing register "
3656 			"for IP CSUM error packets.\n");
3657 		goto exit;
3658 	}
3659 	status = ql_set_routing_reg(qdev, RT_IDX_TCP_UDP_CSUM_ERR_SLOT,
3660 						RT_IDX_TU_CSUM_ERR, 1);
3661 	if (status) {
3662 		netif_err(qdev, ifup, qdev->ndev,
3663 			"Failed to init routing register "
3664 			"for TCP/UDP CSUM error packets.\n");
3665 		goto exit;
3666 	}
3667 	status = ql_set_routing_reg(qdev, RT_IDX_BCAST_SLOT, RT_IDX_BCAST, 1);
3668 	if (status) {
3669 		netif_err(qdev, ifup, qdev->ndev,
3670 			  "Failed to init routing register for broadcast packets.\n");
3671 		goto exit;
3672 	}
3673 	/* If we have more than one inbound queue, then turn on RSS in the
3674 	 * routing block.
3675 	 */
3676 	if (qdev->rss_ring_count > 1) {
3677 		status = ql_set_routing_reg(qdev, RT_IDX_RSS_MATCH_SLOT,
3678 					RT_IDX_RSS_MATCH, 1);
3679 		if (status) {
3680 			netif_err(qdev, ifup, qdev->ndev,
3681 				  "Failed to init routing register for MATCH RSS packets.\n");
3682 			goto exit;
3683 		}
3684 	}
3685 
3686 	status = ql_set_routing_reg(qdev, RT_IDX_CAM_HIT_SLOT,
3687 				    RT_IDX_CAM_HIT, 1);
3688 	if (status)
3689 		netif_err(qdev, ifup, qdev->ndev,
3690 			  "Failed to init routing register for CAM packets.\n");
3691 exit:
3692 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
3693 	return status;
3694 }
3695 
ql_cam_route_initialize(struct ql_adapter * qdev)3696 int ql_cam_route_initialize(struct ql_adapter *qdev)
3697 {
3698 	int status, set;
3699 
3700 	/* If check if the link is up and use to
3701 	 * determine if we are setting or clearing
3702 	 * the MAC address in the CAM.
3703 	 */
3704 	set = ql_read32(qdev, STS);
3705 	set &= qdev->port_link_up;
3706 	status = ql_set_mac_addr(qdev, set);
3707 	if (status) {
3708 		netif_err(qdev, ifup, qdev->ndev, "Failed to init mac address.\n");
3709 		return status;
3710 	}
3711 
3712 	status = ql_route_initialize(qdev);
3713 	if (status)
3714 		netif_err(qdev, ifup, qdev->ndev, "Failed to init routing table.\n");
3715 
3716 	return status;
3717 }
3718 
ql_adapter_initialize(struct ql_adapter * qdev)3719 static int ql_adapter_initialize(struct ql_adapter *qdev)
3720 {
3721 	u32 value, mask;
3722 	int i;
3723 	int status = 0;
3724 
3725 	/*
3726 	 * Set up the System register to halt on errors.
3727 	 */
3728 	value = SYS_EFE | SYS_FAE;
3729 	mask = value << 16;
3730 	ql_write32(qdev, SYS, mask | value);
3731 
3732 	/* Set the default queue, and VLAN behavior. */
3733 	value = NIC_RCV_CFG_DFQ | NIC_RCV_CFG_RV;
3734 	mask = NIC_RCV_CFG_DFQ_MASK | (NIC_RCV_CFG_RV << 16);
3735 	ql_write32(qdev, NIC_RCV_CFG, (mask | value));
3736 
3737 	/* Set the MPI interrupt to enabled. */
3738 	ql_write32(qdev, INTR_MASK, (INTR_MASK_PI << 16) | INTR_MASK_PI);
3739 
3740 	/* Enable the function, set pagesize, enable error checking. */
3741 	value = FSC_FE | FSC_EPC_INBOUND | FSC_EPC_OUTBOUND |
3742 	    FSC_EC | FSC_VM_PAGE_4K;
3743 	value |= SPLT_SETTING;
3744 
3745 	/* Set/clear header splitting. */
3746 	mask = FSC_VM_PAGESIZE_MASK |
3747 	    FSC_DBL_MASK | FSC_DBRST_MASK | (value << 16);
3748 	ql_write32(qdev, FSC, mask | value);
3749 
3750 	ql_write32(qdev, SPLT_HDR, SPLT_LEN);
3751 
3752 	/* Set RX packet routing to use port/pci function on which the
3753 	 * packet arrived on in addition to usual frame routing.
3754 	 * This is helpful on bonding where both interfaces can have
3755 	 * the same MAC address.
3756 	 */
3757 	ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3758 	/* Reroute all packets to our Interface.
3759 	 * They may have been routed to MPI firmware
3760 	 * due to WOL.
3761 	 */
3762 	value = ql_read32(qdev, MGMT_RCV_CFG);
3763 	value &= ~MGMT_RCV_CFG_RM;
3764 	mask = 0xffff0000;
3765 
3766 	/* Sticky reg needs clearing due to WOL. */
3767 	ql_write32(qdev, MGMT_RCV_CFG, mask);
3768 	ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3769 
3770 	/* Default WOL is enable on Mezz cards */
3771 	if (qdev->pdev->subsystem_device == 0x0068 ||
3772 			qdev->pdev->subsystem_device == 0x0180)
3773 		qdev->wol = WAKE_MAGIC;
3774 
3775 	/* Start up the rx queues. */
3776 	for (i = 0; i < qdev->rx_ring_count; i++) {
3777 		status = ql_start_rx_ring(qdev, &qdev->rx_ring[i]);
3778 		if (status) {
3779 			netif_err(qdev, ifup, qdev->ndev,
3780 				  "Failed to start rx ring[%d].\n", i);
3781 			return status;
3782 		}
3783 	}
3784 
3785 	/* If there is more than one inbound completion queue
3786 	 * then download a RICB to configure RSS.
3787 	 */
3788 	if (qdev->rss_ring_count > 1) {
3789 		status = ql_start_rss(qdev);
3790 		if (status) {
3791 			netif_err(qdev, ifup, qdev->ndev, "Failed to start RSS.\n");
3792 			return status;
3793 		}
3794 	}
3795 
3796 	/* Start up the tx queues. */
3797 	for (i = 0; i < qdev->tx_ring_count; i++) {
3798 		status = ql_start_tx_ring(qdev, &qdev->tx_ring[i]);
3799 		if (status) {
3800 			netif_err(qdev, ifup, qdev->ndev,
3801 				  "Failed to start tx ring[%d].\n", i);
3802 			return status;
3803 		}
3804 	}
3805 
3806 	/* Initialize the port and set the max framesize. */
3807 	status = qdev->nic_ops->port_initialize(qdev);
3808 	if (status)
3809 		netif_err(qdev, ifup, qdev->ndev, "Failed to start port.\n");
3810 
3811 	/* Set up the MAC address and frame routing filter. */
3812 	status = ql_cam_route_initialize(qdev);
3813 	if (status) {
3814 		netif_err(qdev, ifup, qdev->ndev,
3815 			  "Failed to init CAM/Routing tables.\n");
3816 		return status;
3817 	}
3818 
3819 	/* Start NAPI for the RSS queues. */
3820 	for (i = 0; i < qdev->rss_ring_count; i++) {
3821 		netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
3822 			     "Enabling NAPI for rx_ring[%d].\n", i);
3823 		napi_enable(&qdev->rx_ring[i].napi);
3824 	}
3825 
3826 	return status;
3827 }
3828 
3829 /* Issue soft reset to chip. */
ql_adapter_reset(struct ql_adapter * qdev)3830 static int ql_adapter_reset(struct ql_adapter *qdev)
3831 {
3832 	u32 value;
3833 	int status = 0;
3834 	unsigned long end_jiffies;
3835 
3836 	/* Clear all the entries in the routing table. */
3837 	status = ql_clear_routing_entries(qdev);
3838 	if (status) {
3839 		netif_err(qdev, ifup, qdev->ndev, "Failed to clear routing bits.\n");
3840 		return status;
3841 	}
3842 
3843 	end_jiffies = jiffies +
3844 		max((unsigned long)1, usecs_to_jiffies(30));
3845 
3846 	/* Check if bit is set then skip the mailbox command and
3847 	 * clear the bit, else we are in normal reset process.
3848 	 */
3849 	if (!test_bit(QL_ASIC_RECOVERY, &qdev->flags)) {
3850 		/* Stop management traffic. */
3851 		ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_STOP);
3852 
3853 		/* Wait for the NIC and MGMNT FIFOs to empty. */
3854 		ql_wait_fifo_empty(qdev);
3855 	} else
3856 		clear_bit(QL_ASIC_RECOVERY, &qdev->flags);
3857 
3858 	ql_write32(qdev, RST_FO, (RST_FO_FR << 16) | RST_FO_FR);
3859 
3860 	do {
3861 		value = ql_read32(qdev, RST_FO);
3862 		if ((value & RST_FO_FR) == 0)
3863 			break;
3864 		cpu_relax();
3865 	} while (time_before(jiffies, end_jiffies));
3866 
3867 	if (value & RST_FO_FR) {
3868 		netif_err(qdev, ifdown, qdev->ndev,
3869 			  "ETIMEDOUT!!! errored out of resetting the chip!\n");
3870 		status = -ETIMEDOUT;
3871 	}
3872 
3873 	/* Resume management traffic. */
3874 	ql_mb_set_mgmnt_traffic_ctl(qdev, MB_SET_MPI_TFK_RESUME);
3875 	return status;
3876 }
3877 
ql_display_dev_info(struct net_device * ndev)3878 static void ql_display_dev_info(struct net_device *ndev)
3879 {
3880 	struct ql_adapter *qdev = netdev_priv(ndev);
3881 
3882 	netif_info(qdev, probe, qdev->ndev,
3883 		   "Function #%d, Port %d, NIC Roll %d, NIC Rev = %d, "
3884 		   "XG Roll = %d, XG Rev = %d.\n",
3885 		   qdev->func,
3886 		   qdev->port,
3887 		   qdev->chip_rev_id & 0x0000000f,
3888 		   qdev->chip_rev_id >> 4 & 0x0000000f,
3889 		   qdev->chip_rev_id >> 8 & 0x0000000f,
3890 		   qdev->chip_rev_id >> 12 & 0x0000000f);
3891 	netif_info(qdev, probe, qdev->ndev,
3892 		   "MAC address %pM\n", ndev->dev_addr);
3893 }
3894 
ql_wol(struct ql_adapter * qdev)3895 static int ql_wol(struct ql_adapter *qdev)
3896 {
3897 	int status = 0;
3898 	u32 wol = MB_WOL_DISABLE;
3899 
3900 	/* The CAM is still intact after a reset, but if we
3901 	 * are doing WOL, then we may need to program the
3902 	 * routing regs. We would also need to issue the mailbox
3903 	 * commands to instruct the MPI what to do per the ethtool
3904 	 * settings.
3905 	 */
3906 
3907 	if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3908 			WAKE_MCAST | WAKE_BCAST)) {
3909 		netif_err(qdev, ifdown, qdev->ndev,
3910 			  "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3911 			  qdev->wol);
3912 		return -EINVAL;
3913 	}
3914 
3915 	if (qdev->wol & WAKE_MAGIC) {
3916 		status = ql_mb_wol_set_magic(qdev, 1);
3917 		if (status) {
3918 			netif_err(qdev, ifdown, qdev->ndev,
3919 				  "Failed to set magic packet on %s.\n",
3920 				  qdev->ndev->name);
3921 			return status;
3922 		} else
3923 			netif_info(qdev, drv, qdev->ndev,
3924 				   "Enabled magic packet successfully on %s.\n",
3925 				   qdev->ndev->name);
3926 
3927 		wol |= MB_WOL_MAGIC_PKT;
3928 	}
3929 
3930 	if (qdev->wol) {
3931 		wol |= MB_WOL_MODE_ON;
3932 		status = ql_mb_wol_mode(qdev, wol);
3933 		netif_err(qdev, drv, qdev->ndev,
3934 			  "WOL %s (wol code 0x%x) on %s\n",
3935 			  (status == 0) ? "Successfully set" : "Failed",
3936 			  wol, qdev->ndev->name);
3937 	}
3938 
3939 	return status;
3940 }
3941 
ql_cancel_all_work_sync(struct ql_adapter * qdev)3942 static void ql_cancel_all_work_sync(struct ql_adapter *qdev)
3943 {
3944 
3945 	/* Don't kill the reset worker thread if we
3946 	 * are in the process of recovery.
3947 	 */
3948 	if (test_bit(QL_ADAPTER_UP, &qdev->flags))
3949 		cancel_delayed_work_sync(&qdev->asic_reset_work);
3950 	cancel_delayed_work_sync(&qdev->mpi_reset_work);
3951 	cancel_delayed_work_sync(&qdev->mpi_work);
3952 	cancel_delayed_work_sync(&qdev->mpi_idc_work);
3953 	cancel_delayed_work_sync(&qdev->mpi_core_to_log);
3954 	cancel_delayed_work_sync(&qdev->mpi_port_cfg_work);
3955 }
3956 
ql_adapter_down(struct ql_adapter * qdev)3957 static int ql_adapter_down(struct ql_adapter *qdev)
3958 {
3959 	int i, status = 0;
3960 
3961 	ql_link_off(qdev);
3962 
3963 	ql_cancel_all_work_sync(qdev);
3964 
3965 	for (i = 0; i < qdev->rss_ring_count; i++)
3966 		napi_disable(&qdev->rx_ring[i].napi);
3967 
3968 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
3969 
3970 	ql_disable_interrupts(qdev);
3971 
3972 	ql_tx_ring_clean(qdev);
3973 
3974 	/* Call netif_napi_del() from common point.
3975 	 */
3976 	for (i = 0; i < qdev->rss_ring_count; i++)
3977 		netif_napi_del(&qdev->rx_ring[i].napi);
3978 
3979 	status = ql_adapter_reset(qdev);
3980 	if (status)
3981 		netif_err(qdev, ifdown, qdev->ndev, "reset(func #%d) FAILED!\n",
3982 			  qdev->func);
3983 	ql_free_rx_buffers(qdev);
3984 
3985 	return status;
3986 }
3987 
ql_adapter_up(struct ql_adapter * qdev)3988 static int ql_adapter_up(struct ql_adapter *qdev)
3989 {
3990 	int err = 0;
3991 
3992 	err = ql_adapter_initialize(qdev);
3993 	if (err) {
3994 		netif_info(qdev, ifup, qdev->ndev, "Unable to initialize adapter.\n");
3995 		goto err_init;
3996 	}
3997 	set_bit(QL_ADAPTER_UP, &qdev->flags);
3998 	ql_alloc_rx_buffers(qdev);
3999 	/* If the port is initialized and the
4000 	 * link is up the turn on the carrier.
4001 	 */
4002 	if ((ql_read32(qdev, STS) & qdev->port_init) &&
4003 			(ql_read32(qdev, STS) & qdev->port_link_up))
4004 		ql_link_on(qdev);
4005 	/* Restore rx mode. */
4006 	clear_bit(QL_ALLMULTI, &qdev->flags);
4007 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4008 	qlge_set_multicast_list(qdev->ndev);
4009 
4010 	/* Restore vlan setting. */
4011 	qlge_restore_vlan(qdev);
4012 
4013 	ql_enable_interrupts(qdev);
4014 	ql_enable_all_completion_interrupts(qdev);
4015 	netif_tx_start_all_queues(qdev->ndev);
4016 
4017 	return 0;
4018 err_init:
4019 	ql_adapter_reset(qdev);
4020 	return err;
4021 }
4022 
ql_release_adapter_resources(struct ql_adapter * qdev)4023 static void ql_release_adapter_resources(struct ql_adapter *qdev)
4024 {
4025 	ql_free_mem_resources(qdev);
4026 	ql_free_irq(qdev);
4027 }
4028 
ql_get_adapter_resources(struct ql_adapter * qdev)4029 static int ql_get_adapter_resources(struct ql_adapter *qdev)
4030 {
4031 	int status = 0;
4032 
4033 	if (ql_alloc_mem_resources(qdev)) {
4034 		netif_err(qdev, ifup, qdev->ndev, "Unable to  allocate memory.\n");
4035 		return -ENOMEM;
4036 	}
4037 	status = ql_request_irq(qdev);
4038 	return status;
4039 }
4040 
qlge_close(struct net_device * ndev)4041 static int qlge_close(struct net_device *ndev)
4042 {
4043 	struct ql_adapter *qdev = netdev_priv(ndev);
4044 
4045 	/* If we hit pci_channel_io_perm_failure
4046 	 * failure condition, then we already
4047 	 * brought the adapter down.
4048 	 */
4049 	if (test_bit(QL_EEH_FATAL, &qdev->flags)) {
4050 		netif_err(qdev, drv, qdev->ndev, "EEH fatal did unload.\n");
4051 		clear_bit(QL_EEH_FATAL, &qdev->flags);
4052 		return 0;
4053 	}
4054 
4055 	/*
4056 	 * Wait for device to recover from a reset.
4057 	 * (Rarely happens, but possible.)
4058 	 */
4059 	while (!test_bit(QL_ADAPTER_UP, &qdev->flags))
4060 		msleep(1);
4061 	ql_adapter_down(qdev);
4062 	ql_release_adapter_resources(qdev);
4063 	return 0;
4064 }
4065 
ql_configure_rings(struct ql_adapter * qdev)4066 static int ql_configure_rings(struct ql_adapter *qdev)
4067 {
4068 	int i;
4069 	struct rx_ring *rx_ring;
4070 	struct tx_ring *tx_ring;
4071 	int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
4072 	unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4073 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4074 
4075 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4076 
4077 	/* In a perfect world we have one RSS ring for each CPU
4078 	 * and each has it's own vector.  To do that we ask for
4079 	 * cpu_cnt vectors.  ql_enable_msix() will adjust the
4080 	 * vector count to what we actually get.  We then
4081 	 * allocate an RSS ring for each.
4082 	 * Essentially, we are doing min(cpu_count, msix_vector_count).
4083 	 */
4084 	qdev->intr_count = cpu_cnt;
4085 	ql_enable_msix(qdev);
4086 	/* Adjust the RSS ring count to the actual vector count. */
4087 	qdev->rss_ring_count = qdev->intr_count;
4088 	qdev->tx_ring_count = cpu_cnt;
4089 	qdev->rx_ring_count = qdev->tx_ring_count + qdev->rss_ring_count;
4090 
4091 	for (i = 0; i < qdev->tx_ring_count; i++) {
4092 		tx_ring = &qdev->tx_ring[i];
4093 		memset((void *)tx_ring, 0, sizeof(*tx_ring));
4094 		tx_ring->qdev = qdev;
4095 		tx_ring->wq_id = i;
4096 		tx_ring->wq_len = qdev->tx_ring_size;
4097 		tx_ring->wq_size =
4098 		    tx_ring->wq_len * sizeof(struct ob_mac_iocb_req);
4099 
4100 		/*
4101 		 * The completion queue ID for the tx rings start
4102 		 * immediately after the rss rings.
4103 		 */
4104 		tx_ring->cq_id = qdev->rss_ring_count + i;
4105 	}
4106 
4107 	for (i = 0; i < qdev->rx_ring_count; i++) {
4108 		rx_ring = &qdev->rx_ring[i];
4109 		memset((void *)rx_ring, 0, sizeof(*rx_ring));
4110 		rx_ring->qdev = qdev;
4111 		rx_ring->cq_id = i;
4112 		rx_ring->cpu = i % cpu_cnt;	/* CPU to run handler on. */
4113 		if (i < qdev->rss_ring_count) {
4114 			/*
4115 			 * Inbound (RSS) queues.
4116 			 */
4117 			rx_ring->cq_len = qdev->rx_ring_size;
4118 			rx_ring->cq_size =
4119 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4120 			rx_ring->lbq_len = NUM_LARGE_BUFFERS;
4121 			rx_ring->lbq_size =
4122 			    rx_ring->lbq_len * sizeof(__le64);
4123 			rx_ring->lbq_buf_size = (u16)lbq_buf_len;
4124 			netif_printk(qdev, ifup, KERN_DEBUG, qdev->ndev,
4125 				     "lbq_buf_size %d, order = %d\n",
4126 				     rx_ring->lbq_buf_size,
4127 				     qdev->lbq_buf_order);
4128 			rx_ring->sbq_len = NUM_SMALL_BUFFERS;
4129 			rx_ring->sbq_size =
4130 			    rx_ring->sbq_len * sizeof(__le64);
4131 			rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
4132 			rx_ring->type = RX_Q;
4133 		} else {
4134 			/*
4135 			 * Outbound queue handles outbound completions only.
4136 			 */
4137 			/* outbound cq is same size as tx_ring it services. */
4138 			rx_ring->cq_len = qdev->tx_ring_size;
4139 			rx_ring->cq_size =
4140 			    rx_ring->cq_len * sizeof(struct ql_net_rsp_iocb);
4141 			rx_ring->lbq_len = 0;
4142 			rx_ring->lbq_size = 0;
4143 			rx_ring->lbq_buf_size = 0;
4144 			rx_ring->sbq_len = 0;
4145 			rx_ring->sbq_size = 0;
4146 			rx_ring->sbq_buf_size = 0;
4147 			rx_ring->type = TX_Q;
4148 		}
4149 	}
4150 	return 0;
4151 }
4152 
qlge_open(struct net_device * ndev)4153 static int qlge_open(struct net_device *ndev)
4154 {
4155 	int err = 0;
4156 	struct ql_adapter *qdev = netdev_priv(ndev);
4157 
4158 	err = ql_adapter_reset(qdev);
4159 	if (err)
4160 		return err;
4161 
4162 	err = ql_configure_rings(qdev);
4163 	if (err)
4164 		return err;
4165 
4166 	err = ql_get_adapter_resources(qdev);
4167 	if (err)
4168 		goto error_up;
4169 
4170 	err = ql_adapter_up(qdev);
4171 	if (err)
4172 		goto error_up;
4173 
4174 	return err;
4175 
4176 error_up:
4177 	ql_release_adapter_resources(qdev);
4178 	return err;
4179 }
4180 
ql_change_rx_buffers(struct ql_adapter * qdev)4181 static int ql_change_rx_buffers(struct ql_adapter *qdev)
4182 {
4183 	struct rx_ring *rx_ring;
4184 	int i, status;
4185 	u32 lbq_buf_len;
4186 
4187 	/* Wait for an outstanding reset to complete. */
4188 	if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4189 		int i = 3;
4190 		while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
4191 			netif_err(qdev, ifup, qdev->ndev,
4192 				  "Waiting for adapter UP...\n");
4193 			ssleep(1);
4194 		}
4195 
4196 		if (!i) {
4197 			netif_err(qdev, ifup, qdev->ndev,
4198 				  "Timed out waiting for adapter UP\n");
4199 			return -ETIMEDOUT;
4200 		}
4201 	}
4202 
4203 	status = ql_adapter_down(qdev);
4204 	if (status)
4205 		goto error;
4206 
4207 	/* Get the new rx buffer size. */
4208 	lbq_buf_len = (qdev->ndev->mtu > 1500) ?
4209 		LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
4210 	qdev->lbq_buf_order = get_order(lbq_buf_len);
4211 
4212 	for (i = 0; i < qdev->rss_ring_count; i++) {
4213 		rx_ring = &qdev->rx_ring[i];
4214 		/* Set the new size. */
4215 		rx_ring->lbq_buf_size = lbq_buf_len;
4216 	}
4217 
4218 	status = ql_adapter_up(qdev);
4219 	if (status)
4220 		goto error;
4221 
4222 	return status;
4223 error:
4224 	netif_alert(qdev, ifup, qdev->ndev,
4225 		    "Driver up/down cycle failed, closing device.\n");
4226 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4227 	dev_close(qdev->ndev);
4228 	return status;
4229 }
4230 
qlge_change_mtu(struct net_device * ndev,int new_mtu)4231 static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
4232 {
4233 	struct ql_adapter *qdev = netdev_priv(ndev);
4234 	int status;
4235 
4236 	if (ndev->mtu == 1500 && new_mtu == 9000) {
4237 		netif_err(qdev, ifup, qdev->ndev, "Changing to jumbo MTU.\n");
4238 	} else if (ndev->mtu == 9000 && new_mtu == 1500) {
4239 		netif_err(qdev, ifup, qdev->ndev, "Changing to normal MTU.\n");
4240 	} else
4241 		return -EINVAL;
4242 
4243 	queue_delayed_work(qdev->workqueue,
4244 			&qdev->mpi_port_cfg_work, 3*HZ);
4245 
4246 	ndev->mtu = new_mtu;
4247 
4248 	if (!netif_running(qdev->ndev)) {
4249 		return 0;
4250 	}
4251 
4252 	status = ql_change_rx_buffers(qdev);
4253 	if (status) {
4254 		netif_err(qdev, ifup, qdev->ndev,
4255 			  "Changing MTU failed.\n");
4256 	}
4257 
4258 	return status;
4259 }
4260 
qlge_get_stats(struct net_device * ndev)4261 static struct net_device_stats *qlge_get_stats(struct net_device
4262 					       *ndev)
4263 {
4264 	struct ql_adapter *qdev = netdev_priv(ndev);
4265 	struct rx_ring *rx_ring = &qdev->rx_ring[0];
4266 	struct tx_ring *tx_ring = &qdev->tx_ring[0];
4267 	unsigned long pkts, mcast, dropped, errors, bytes;
4268 	int i;
4269 
4270 	/* Get RX stats. */
4271 	pkts = mcast = dropped = errors = bytes = 0;
4272 	for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
4273 			pkts += rx_ring->rx_packets;
4274 			bytes += rx_ring->rx_bytes;
4275 			dropped += rx_ring->rx_dropped;
4276 			errors += rx_ring->rx_errors;
4277 			mcast += rx_ring->rx_multicast;
4278 	}
4279 	ndev->stats.rx_packets = pkts;
4280 	ndev->stats.rx_bytes = bytes;
4281 	ndev->stats.rx_dropped = dropped;
4282 	ndev->stats.rx_errors = errors;
4283 	ndev->stats.multicast = mcast;
4284 
4285 	/* Get TX stats. */
4286 	pkts = errors = bytes = 0;
4287 	for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
4288 			pkts += tx_ring->tx_packets;
4289 			bytes += tx_ring->tx_bytes;
4290 			errors += tx_ring->tx_errors;
4291 	}
4292 	ndev->stats.tx_packets = pkts;
4293 	ndev->stats.tx_bytes = bytes;
4294 	ndev->stats.tx_errors = errors;
4295 	return &ndev->stats;
4296 }
4297 
qlge_set_multicast_list(struct net_device * ndev)4298 static void qlge_set_multicast_list(struct net_device *ndev)
4299 {
4300 	struct ql_adapter *qdev = netdev_priv(ndev);
4301 	struct netdev_hw_addr *ha;
4302 	int i, status;
4303 
4304 	status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
4305 	if (status)
4306 		return;
4307 	/*
4308 	 * Set or clear promiscuous mode if a
4309 	 * transition is taking place.
4310 	 */
4311 	if (ndev->flags & IFF_PROMISC) {
4312 		if (!test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4313 			if (ql_set_routing_reg
4314 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 1)) {
4315 				netif_err(qdev, hw, qdev->ndev,
4316 					  "Failed to set promiscuous mode.\n");
4317 			} else {
4318 				set_bit(QL_PROMISCUOUS, &qdev->flags);
4319 			}
4320 		}
4321 	} else {
4322 		if (test_bit(QL_PROMISCUOUS, &qdev->flags)) {
4323 			if (ql_set_routing_reg
4324 			    (qdev, RT_IDX_PROMISCUOUS_SLOT, RT_IDX_VALID, 0)) {
4325 				netif_err(qdev, hw, qdev->ndev,
4326 					  "Failed to clear promiscuous mode.\n");
4327 			} else {
4328 				clear_bit(QL_PROMISCUOUS, &qdev->flags);
4329 			}
4330 		}
4331 	}
4332 
4333 	/*
4334 	 * Set or clear all multicast mode if a
4335 	 * transition is taking place.
4336 	 */
4337 	if ((ndev->flags & IFF_ALLMULTI) ||
4338 	    (netdev_mc_count(ndev) > MAX_MULTICAST_ENTRIES)) {
4339 		if (!test_bit(QL_ALLMULTI, &qdev->flags)) {
4340 			if (ql_set_routing_reg
4341 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 1)) {
4342 				netif_err(qdev, hw, qdev->ndev,
4343 					  "Failed to set all-multi mode.\n");
4344 			} else {
4345 				set_bit(QL_ALLMULTI, &qdev->flags);
4346 			}
4347 		}
4348 	} else {
4349 		if (test_bit(QL_ALLMULTI, &qdev->flags)) {
4350 			if (ql_set_routing_reg
4351 			    (qdev, RT_IDX_ALLMULTI_SLOT, RT_IDX_MCAST, 0)) {
4352 				netif_err(qdev, hw, qdev->ndev,
4353 					  "Failed to clear all-multi mode.\n");
4354 			} else {
4355 				clear_bit(QL_ALLMULTI, &qdev->flags);
4356 			}
4357 		}
4358 	}
4359 
4360 	if (!netdev_mc_empty(ndev)) {
4361 		status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4362 		if (status)
4363 			goto exit;
4364 		i = 0;
4365 		netdev_for_each_mc_addr(ha, ndev) {
4366 			if (ql_set_mac_addr_reg(qdev, (u8 *) ha->addr,
4367 						MAC_ADDR_TYPE_MULTI_MAC, i)) {
4368 				netif_err(qdev, hw, qdev->ndev,
4369 					  "Failed to loadmulticast address.\n");
4370 				ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4371 				goto exit;
4372 			}
4373 			i++;
4374 		}
4375 		ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4376 		if (ql_set_routing_reg
4377 		    (qdev, RT_IDX_MCAST_MATCH_SLOT, RT_IDX_MCAST_MATCH, 1)) {
4378 			netif_err(qdev, hw, qdev->ndev,
4379 				  "Failed to set multicast match mode.\n");
4380 		} else {
4381 			set_bit(QL_ALLMULTI, &qdev->flags);
4382 		}
4383 	}
4384 exit:
4385 	ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
4386 }
4387 
qlge_set_mac_address(struct net_device * ndev,void * p)4388 static int qlge_set_mac_address(struct net_device *ndev, void *p)
4389 {
4390 	struct ql_adapter *qdev = netdev_priv(ndev);
4391 	struct sockaddr *addr = p;
4392 	int status;
4393 
4394 	if (!is_valid_ether_addr(addr->sa_data))
4395 		return -EADDRNOTAVAIL;
4396 	memcpy(ndev->dev_addr, addr->sa_data, ndev->addr_len);
4397 	/* Update local copy of current mac address. */
4398 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4399 
4400 	status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
4401 	if (status)
4402 		return status;
4403 	status = ql_set_mac_addr_reg(qdev, (u8 *) ndev->dev_addr,
4404 			MAC_ADDR_TYPE_CAM_MAC, qdev->func * MAX_CQ);
4405 	if (status)
4406 		netif_err(qdev, hw, qdev->ndev, "Failed to load MAC address.\n");
4407 	ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
4408 	return status;
4409 }
4410 
qlge_tx_timeout(struct net_device * ndev)4411 static void qlge_tx_timeout(struct net_device *ndev)
4412 {
4413 	struct ql_adapter *qdev = netdev_priv(ndev);
4414 	ql_queue_asic_error(qdev);
4415 }
4416 
ql_asic_reset_work(struct work_struct * work)4417 static void ql_asic_reset_work(struct work_struct *work)
4418 {
4419 	struct ql_adapter *qdev =
4420 	    container_of(work, struct ql_adapter, asic_reset_work.work);
4421 	int status;
4422 	rtnl_lock();
4423 	status = ql_adapter_down(qdev);
4424 	if (status)
4425 		goto error;
4426 
4427 	status = ql_adapter_up(qdev);
4428 	if (status)
4429 		goto error;
4430 
4431 	/* Restore rx mode. */
4432 	clear_bit(QL_ALLMULTI, &qdev->flags);
4433 	clear_bit(QL_PROMISCUOUS, &qdev->flags);
4434 	qlge_set_multicast_list(qdev->ndev);
4435 
4436 	rtnl_unlock();
4437 	return;
4438 error:
4439 	netif_alert(qdev, ifup, qdev->ndev,
4440 		    "Driver up/down cycle failed, closing device\n");
4441 
4442 	set_bit(QL_ADAPTER_UP, &qdev->flags);
4443 	dev_close(qdev->ndev);
4444 	rtnl_unlock();
4445 }
4446 
4447 static const struct nic_operations qla8012_nic_ops = {
4448 	.get_flash		= ql_get_8012_flash_params,
4449 	.port_initialize	= ql_8012_port_initialize,
4450 };
4451 
4452 static const struct nic_operations qla8000_nic_ops = {
4453 	.get_flash		= ql_get_8000_flash_params,
4454 	.port_initialize	= ql_8000_port_initialize,
4455 };
4456 
4457 /* Find the pcie function number for the other NIC
4458  * on this chip.  Since both NIC functions share a
4459  * common firmware we have the lowest enabled function
4460  * do any common work.  Examples would be resetting
4461  * after a fatal firmware error, or doing a firmware
4462  * coredump.
4463  */
ql_get_alt_pcie_func(struct ql_adapter * qdev)4464 static int ql_get_alt_pcie_func(struct ql_adapter *qdev)
4465 {
4466 	int status = 0;
4467 	u32 temp;
4468 	u32 nic_func1, nic_func2;
4469 
4470 	status = ql_read_mpi_reg(qdev, MPI_TEST_FUNC_PORT_CFG,
4471 			&temp);
4472 	if (status)
4473 		return status;
4474 
4475 	nic_func1 = ((temp >> MPI_TEST_NIC1_FUNC_SHIFT) &
4476 			MPI_TEST_NIC_FUNC_MASK);
4477 	nic_func2 = ((temp >> MPI_TEST_NIC2_FUNC_SHIFT) &
4478 			MPI_TEST_NIC_FUNC_MASK);
4479 
4480 	if (qdev->func == nic_func1)
4481 		qdev->alt_func = nic_func2;
4482 	else if (qdev->func == nic_func2)
4483 		qdev->alt_func = nic_func1;
4484 	else
4485 		status = -EIO;
4486 
4487 	return status;
4488 }
4489 
ql_get_board_info(struct ql_adapter * qdev)4490 static int ql_get_board_info(struct ql_adapter *qdev)
4491 {
4492 	int status;
4493 	qdev->func =
4494 	    (ql_read32(qdev, STS) & STS_FUNC_ID_MASK) >> STS_FUNC_ID_SHIFT;
4495 	if (qdev->func > 3)
4496 		return -EIO;
4497 
4498 	status = ql_get_alt_pcie_func(qdev);
4499 	if (status)
4500 		return status;
4501 
4502 	qdev->port = (qdev->func < qdev->alt_func) ? 0 : 1;
4503 	if (qdev->port) {
4504 		qdev->xg_sem_mask = SEM_XGMAC1_MASK;
4505 		qdev->port_link_up = STS_PL1;
4506 		qdev->port_init = STS_PI1;
4507 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBI;
4508 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC2_MBO;
4509 	} else {
4510 		qdev->xg_sem_mask = SEM_XGMAC0_MASK;
4511 		qdev->port_link_up = STS_PL0;
4512 		qdev->port_init = STS_PI0;
4513 		qdev->mailbox_in = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBI;
4514 		qdev->mailbox_out = PROC_ADDR_MPI_RISC | PROC_ADDR_FUNC0_MBO;
4515 	}
4516 	qdev->chip_rev_id = ql_read32(qdev, REV_ID);
4517 	qdev->device_id = qdev->pdev->device;
4518 	if (qdev->device_id == QLGE_DEVICE_ID_8012)
4519 		qdev->nic_ops = &qla8012_nic_ops;
4520 	else if (qdev->device_id == QLGE_DEVICE_ID_8000)
4521 		qdev->nic_ops = &qla8000_nic_ops;
4522 	return status;
4523 }
4524 
ql_release_all(struct pci_dev * pdev)4525 static void ql_release_all(struct pci_dev *pdev)
4526 {
4527 	struct net_device *ndev = pci_get_drvdata(pdev);
4528 	struct ql_adapter *qdev = netdev_priv(ndev);
4529 
4530 	if (qdev->workqueue) {
4531 		destroy_workqueue(qdev->workqueue);
4532 		qdev->workqueue = NULL;
4533 	}
4534 
4535 	if (qdev->reg_base)
4536 		iounmap(qdev->reg_base);
4537 	if (qdev->doorbell_area)
4538 		iounmap(qdev->doorbell_area);
4539 	vfree(qdev->mpi_coredump);
4540 	pci_release_regions(pdev);
4541 	pci_set_drvdata(pdev, NULL);
4542 }
4543 
ql_init_device(struct pci_dev * pdev,struct net_device * ndev,int cards_found)4544 static int __devinit ql_init_device(struct pci_dev *pdev,
4545 				    struct net_device *ndev, int cards_found)
4546 {
4547 	struct ql_adapter *qdev = netdev_priv(ndev);
4548 	int err = 0;
4549 
4550 	memset((void *)qdev, 0, sizeof(*qdev));
4551 	err = pci_enable_device(pdev);
4552 	if (err) {
4553 		dev_err(&pdev->dev, "PCI device enable failed.\n");
4554 		return err;
4555 	}
4556 
4557 	qdev->ndev = ndev;
4558 	qdev->pdev = pdev;
4559 	pci_set_drvdata(pdev, ndev);
4560 
4561 	/* Set PCIe read request size */
4562 	err = pcie_set_readrq(pdev, 4096);
4563 	if (err) {
4564 		dev_err(&pdev->dev, "Set readrq failed.\n");
4565 		goto err_out1;
4566 	}
4567 
4568 	err = pci_request_regions(pdev, DRV_NAME);
4569 	if (err) {
4570 		dev_err(&pdev->dev, "PCI region request failed.\n");
4571 		return err;
4572 	}
4573 
4574 	pci_set_master(pdev);
4575 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
4576 		set_bit(QL_DMA64, &qdev->flags);
4577 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
4578 	} else {
4579 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
4580 		if (!err)
4581 		       err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
4582 	}
4583 
4584 	if (err) {
4585 		dev_err(&pdev->dev, "No usable DMA configuration.\n");
4586 		goto err_out2;
4587 	}
4588 
4589 	/* Set PCIe reset type for EEH to fundamental. */
4590 	pdev->needs_freset = 1;
4591 	pci_save_state(pdev);
4592 	qdev->reg_base =
4593 	    ioremap_nocache(pci_resource_start(pdev, 1),
4594 			    pci_resource_len(pdev, 1));
4595 	if (!qdev->reg_base) {
4596 		dev_err(&pdev->dev, "Register mapping failed.\n");
4597 		err = -ENOMEM;
4598 		goto err_out2;
4599 	}
4600 
4601 	qdev->doorbell_area_size = pci_resource_len(pdev, 3);
4602 	qdev->doorbell_area =
4603 	    ioremap_nocache(pci_resource_start(pdev, 3),
4604 			    pci_resource_len(pdev, 3));
4605 	if (!qdev->doorbell_area) {
4606 		dev_err(&pdev->dev, "Doorbell register mapping failed.\n");
4607 		err = -ENOMEM;
4608 		goto err_out2;
4609 	}
4610 
4611 	err = ql_get_board_info(qdev);
4612 	if (err) {
4613 		dev_err(&pdev->dev, "Register access failed.\n");
4614 		err = -EIO;
4615 		goto err_out2;
4616 	}
4617 	qdev->msg_enable = netif_msg_init(debug, default_msg);
4618 	spin_lock_init(&qdev->hw_lock);
4619 	spin_lock_init(&qdev->stats_lock);
4620 
4621 	if (qlge_mpi_coredump) {
4622 		qdev->mpi_coredump =
4623 			vmalloc(sizeof(struct ql_mpi_coredump));
4624 		if (qdev->mpi_coredump == NULL) {
4625 			dev_err(&pdev->dev, "Coredump alloc failed.\n");
4626 			err = -ENOMEM;
4627 			goto err_out2;
4628 		}
4629 		if (qlge_force_coredump)
4630 			set_bit(QL_FRC_COREDUMP, &qdev->flags);
4631 	}
4632 	/* make sure the EEPROM is good */
4633 	err = qdev->nic_ops->get_flash(qdev);
4634 	if (err) {
4635 		dev_err(&pdev->dev, "Invalid FLASH.\n");
4636 		goto err_out2;
4637 	}
4638 
4639 	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
4640 	/* Keep local copy of current mac address. */
4641 	memcpy(qdev->current_mac_addr, ndev->dev_addr, ndev->addr_len);
4642 
4643 	/* Set up the default ring sizes. */
4644 	qdev->tx_ring_size = NUM_TX_RING_ENTRIES;
4645 	qdev->rx_ring_size = NUM_RX_RING_ENTRIES;
4646 
4647 	/* Set up the coalescing parameters. */
4648 	qdev->rx_coalesce_usecs = DFLT_COALESCE_WAIT;
4649 	qdev->tx_coalesce_usecs = DFLT_COALESCE_WAIT;
4650 	qdev->rx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4651 	qdev->tx_max_coalesced_frames = DFLT_INTER_FRAME_WAIT;
4652 
4653 	/*
4654 	 * Set up the operating parameters.
4655 	 */
4656 	qdev->workqueue = create_singlethread_workqueue(ndev->name);
4657 	INIT_DELAYED_WORK(&qdev->asic_reset_work, ql_asic_reset_work);
4658 	INIT_DELAYED_WORK(&qdev->mpi_reset_work, ql_mpi_reset_work);
4659 	INIT_DELAYED_WORK(&qdev->mpi_work, ql_mpi_work);
4660 	INIT_DELAYED_WORK(&qdev->mpi_port_cfg_work, ql_mpi_port_cfg_work);
4661 	INIT_DELAYED_WORK(&qdev->mpi_idc_work, ql_mpi_idc_work);
4662 	INIT_DELAYED_WORK(&qdev->mpi_core_to_log, ql_mpi_core_to_log);
4663 	init_completion(&qdev->ide_completion);
4664 	mutex_init(&qdev->mpi_mutex);
4665 
4666 	if (!cards_found) {
4667 		dev_info(&pdev->dev, "%s\n", DRV_STRING);
4668 		dev_info(&pdev->dev, "Driver name: %s, Version: %s.\n",
4669 			 DRV_NAME, DRV_VERSION);
4670 	}
4671 	return 0;
4672 err_out2:
4673 	ql_release_all(pdev);
4674 err_out1:
4675 	pci_disable_device(pdev);
4676 	return err;
4677 }
4678 
4679 static const struct net_device_ops qlge_netdev_ops = {
4680 	.ndo_open		= qlge_open,
4681 	.ndo_stop		= qlge_close,
4682 	.ndo_start_xmit		= qlge_send,
4683 	.ndo_change_mtu		= qlge_change_mtu,
4684 	.ndo_get_stats		= qlge_get_stats,
4685 	.ndo_set_rx_mode	= qlge_set_multicast_list,
4686 	.ndo_set_mac_address	= qlge_set_mac_address,
4687 	.ndo_validate_addr	= eth_validate_addr,
4688 	.ndo_tx_timeout		= qlge_tx_timeout,
4689 	.ndo_fix_features	= qlge_fix_features,
4690 	.ndo_set_features	= qlge_set_features,
4691 	.ndo_vlan_rx_add_vid	= qlge_vlan_rx_add_vid,
4692 	.ndo_vlan_rx_kill_vid	= qlge_vlan_rx_kill_vid,
4693 };
4694 
ql_timer(unsigned long data)4695 static void ql_timer(unsigned long data)
4696 {
4697 	struct ql_adapter *qdev = (struct ql_adapter *)data;
4698 	u32 var = 0;
4699 
4700 	var = ql_read32(qdev, STS);
4701 	if (pci_channel_offline(qdev->pdev)) {
4702 		netif_err(qdev, ifup, qdev->ndev, "EEH STS = 0x%.08x.\n", var);
4703 		return;
4704 	}
4705 
4706 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4707 }
4708 
qlge_probe(struct pci_dev * pdev,const struct pci_device_id * pci_entry)4709 static int __devinit qlge_probe(struct pci_dev *pdev,
4710 				const struct pci_device_id *pci_entry)
4711 {
4712 	struct net_device *ndev = NULL;
4713 	struct ql_adapter *qdev = NULL;
4714 	static int cards_found = 0;
4715 	int err = 0;
4716 
4717 	ndev = alloc_etherdev_mq(sizeof(struct ql_adapter),
4718 			min(MAX_CPUS, (int)num_online_cpus()));
4719 	if (!ndev)
4720 		return -ENOMEM;
4721 
4722 	err = ql_init_device(pdev, ndev, cards_found);
4723 	if (err < 0) {
4724 		free_netdev(ndev);
4725 		return err;
4726 	}
4727 
4728 	qdev = netdev_priv(ndev);
4729 	SET_NETDEV_DEV(ndev, &pdev->dev);
4730 	ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM |
4731 		NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN |
4732 		NETIF_F_HW_VLAN_TX | NETIF_F_RXCSUM;
4733 	ndev->features = ndev->hw_features |
4734 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
4735 
4736 	if (test_bit(QL_DMA64, &qdev->flags))
4737 		ndev->features |= NETIF_F_HIGHDMA;
4738 
4739 	/*
4740 	 * Set up net_device structure.
4741 	 */
4742 	ndev->tx_queue_len = qdev->tx_ring_size;
4743 	ndev->irq = pdev->irq;
4744 
4745 	ndev->netdev_ops = &qlge_netdev_ops;
4746 	SET_ETHTOOL_OPS(ndev, &qlge_ethtool_ops);
4747 	ndev->watchdog_timeo = 10 * HZ;
4748 
4749 	err = register_netdev(ndev);
4750 	if (err) {
4751 		dev_err(&pdev->dev, "net device registration failed.\n");
4752 		ql_release_all(pdev);
4753 		pci_disable_device(pdev);
4754 		return err;
4755 	}
4756 	/* Start up the timer to trigger EEH if
4757 	 * the bus goes dead
4758 	 */
4759 	init_timer_deferrable(&qdev->timer);
4760 	qdev->timer.data = (unsigned long)qdev;
4761 	qdev->timer.function = ql_timer;
4762 	qdev->timer.expires = jiffies + (5*HZ);
4763 	add_timer(&qdev->timer);
4764 	ql_link_off(qdev);
4765 	ql_display_dev_info(ndev);
4766 	atomic_set(&qdev->lb_count, 0);
4767 	cards_found++;
4768 	return 0;
4769 }
4770 
ql_lb_send(struct sk_buff * skb,struct net_device * ndev)4771 netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4772 {
4773 	return qlge_send(skb, ndev);
4774 }
4775 
ql_clean_lb_rx_ring(struct rx_ring * rx_ring,int budget)4776 int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4777 {
4778 	return ql_clean_inbound_rx_ring(rx_ring, budget);
4779 }
4780 
qlge_remove(struct pci_dev * pdev)4781 static void __devexit qlge_remove(struct pci_dev *pdev)
4782 {
4783 	struct net_device *ndev = pci_get_drvdata(pdev);
4784 	struct ql_adapter *qdev = netdev_priv(ndev);
4785 	del_timer_sync(&qdev->timer);
4786 	ql_cancel_all_work_sync(qdev);
4787 	unregister_netdev(ndev);
4788 	ql_release_all(pdev);
4789 	pci_disable_device(pdev);
4790 	free_netdev(ndev);
4791 }
4792 
4793 /* Clean up resources without touching hardware. */
ql_eeh_close(struct net_device * ndev)4794 static void ql_eeh_close(struct net_device *ndev)
4795 {
4796 	int i;
4797 	struct ql_adapter *qdev = netdev_priv(ndev);
4798 
4799 	if (netif_carrier_ok(ndev)) {
4800 		netif_carrier_off(ndev);
4801 		netif_stop_queue(ndev);
4802 	}
4803 
4804 	/* Disabling the timer */
4805 	del_timer_sync(&qdev->timer);
4806 	ql_cancel_all_work_sync(qdev);
4807 
4808 	for (i = 0; i < qdev->rss_ring_count; i++)
4809 		netif_napi_del(&qdev->rx_ring[i].napi);
4810 
4811 	clear_bit(QL_ADAPTER_UP, &qdev->flags);
4812 	ql_tx_ring_clean(qdev);
4813 	ql_free_rx_buffers(qdev);
4814 	ql_release_adapter_resources(qdev);
4815 }
4816 
4817 /*
4818  * This callback is called by the PCI subsystem whenever
4819  * a PCI bus error is detected.
4820  */
qlge_io_error_detected(struct pci_dev * pdev,enum pci_channel_state state)4821 static pci_ers_result_t qlge_io_error_detected(struct pci_dev *pdev,
4822 					       enum pci_channel_state state)
4823 {
4824 	struct net_device *ndev = pci_get_drvdata(pdev);
4825 	struct ql_adapter *qdev = netdev_priv(ndev);
4826 
4827 	switch (state) {
4828 	case pci_channel_io_normal:
4829 		return PCI_ERS_RESULT_CAN_RECOVER;
4830 	case pci_channel_io_frozen:
4831 		netif_device_detach(ndev);
4832 		if (netif_running(ndev))
4833 			ql_eeh_close(ndev);
4834 		pci_disable_device(pdev);
4835 		return PCI_ERS_RESULT_NEED_RESET;
4836 	case pci_channel_io_perm_failure:
4837 		dev_err(&pdev->dev,
4838 			"%s: pci_channel_io_perm_failure.\n", __func__);
4839 		ql_eeh_close(ndev);
4840 		set_bit(QL_EEH_FATAL, &qdev->flags);
4841 		return PCI_ERS_RESULT_DISCONNECT;
4842 	}
4843 
4844 	/* Request a slot reset. */
4845 	return PCI_ERS_RESULT_NEED_RESET;
4846 }
4847 
4848 /*
4849  * This callback is called after the PCI buss has been reset.
4850  * Basically, this tries to restart the card from scratch.
4851  * This is a shortened version of the device probe/discovery code,
4852  * it resembles the first-half of the () routine.
4853  */
qlge_io_slot_reset(struct pci_dev * pdev)4854 static pci_ers_result_t qlge_io_slot_reset(struct pci_dev *pdev)
4855 {
4856 	struct net_device *ndev = pci_get_drvdata(pdev);
4857 	struct ql_adapter *qdev = netdev_priv(ndev);
4858 
4859 	pdev->error_state = pci_channel_io_normal;
4860 
4861 	pci_restore_state(pdev);
4862 	if (pci_enable_device(pdev)) {
4863 		netif_err(qdev, ifup, qdev->ndev,
4864 			  "Cannot re-enable PCI device after reset.\n");
4865 		return PCI_ERS_RESULT_DISCONNECT;
4866 	}
4867 	pci_set_master(pdev);
4868 
4869 	if (ql_adapter_reset(qdev)) {
4870 		netif_err(qdev, drv, qdev->ndev, "reset FAILED!\n");
4871 		set_bit(QL_EEH_FATAL, &qdev->flags);
4872 		return PCI_ERS_RESULT_DISCONNECT;
4873 	}
4874 
4875 	return PCI_ERS_RESULT_RECOVERED;
4876 }
4877 
qlge_io_resume(struct pci_dev * pdev)4878 static void qlge_io_resume(struct pci_dev *pdev)
4879 {
4880 	struct net_device *ndev = pci_get_drvdata(pdev);
4881 	struct ql_adapter *qdev = netdev_priv(ndev);
4882 	int err = 0;
4883 
4884 	if (netif_running(ndev)) {
4885 		err = qlge_open(ndev);
4886 		if (err) {
4887 			netif_err(qdev, ifup, qdev->ndev,
4888 				  "Device initialization failed after reset.\n");
4889 			return;
4890 		}
4891 	} else {
4892 		netif_err(qdev, ifup, qdev->ndev,
4893 			  "Device was not running prior to EEH.\n");
4894 	}
4895 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4896 	netif_device_attach(ndev);
4897 }
4898 
4899 static struct pci_error_handlers qlge_err_handler = {
4900 	.error_detected = qlge_io_error_detected,
4901 	.slot_reset = qlge_io_slot_reset,
4902 	.resume = qlge_io_resume,
4903 };
4904 
qlge_suspend(struct pci_dev * pdev,pm_message_t state)4905 static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4906 {
4907 	struct net_device *ndev = pci_get_drvdata(pdev);
4908 	struct ql_adapter *qdev = netdev_priv(ndev);
4909 	int err;
4910 
4911 	netif_device_detach(ndev);
4912 	del_timer_sync(&qdev->timer);
4913 
4914 	if (netif_running(ndev)) {
4915 		err = ql_adapter_down(qdev);
4916 		if (!err)
4917 			return err;
4918 	}
4919 
4920 	ql_wol(qdev);
4921 	err = pci_save_state(pdev);
4922 	if (err)
4923 		return err;
4924 
4925 	pci_disable_device(pdev);
4926 
4927 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
4928 
4929 	return 0;
4930 }
4931 
4932 #ifdef CONFIG_PM
qlge_resume(struct pci_dev * pdev)4933 static int qlge_resume(struct pci_dev *pdev)
4934 {
4935 	struct net_device *ndev = pci_get_drvdata(pdev);
4936 	struct ql_adapter *qdev = netdev_priv(ndev);
4937 	int err;
4938 
4939 	pci_set_power_state(pdev, PCI_D0);
4940 	pci_restore_state(pdev);
4941 	err = pci_enable_device(pdev);
4942 	if (err) {
4943 		netif_err(qdev, ifup, qdev->ndev, "Cannot enable PCI device from suspend\n");
4944 		return err;
4945 	}
4946 	pci_set_master(pdev);
4947 
4948 	pci_enable_wake(pdev, PCI_D3hot, 0);
4949 	pci_enable_wake(pdev, PCI_D3cold, 0);
4950 
4951 	if (netif_running(ndev)) {
4952 		err = ql_adapter_up(qdev);
4953 		if (err)
4954 			return err;
4955 	}
4956 
4957 	mod_timer(&qdev->timer, jiffies + (5*HZ));
4958 	netif_device_attach(ndev);
4959 
4960 	return 0;
4961 }
4962 #endif /* CONFIG_PM */
4963 
qlge_shutdown(struct pci_dev * pdev)4964 static void qlge_shutdown(struct pci_dev *pdev)
4965 {
4966 	qlge_suspend(pdev, PMSG_SUSPEND);
4967 }
4968 
4969 static struct pci_driver qlge_driver = {
4970 	.name = DRV_NAME,
4971 	.id_table = qlge_pci_tbl,
4972 	.probe = qlge_probe,
4973 	.remove = __devexit_p(qlge_remove),
4974 #ifdef CONFIG_PM
4975 	.suspend = qlge_suspend,
4976 	.resume = qlge_resume,
4977 #endif
4978 	.shutdown = qlge_shutdown,
4979 	.err_handler = &qlge_err_handler
4980 };
4981 
qlge_init_module(void)4982 static int __init qlge_init_module(void)
4983 {
4984 	return pci_register_driver(&qlge_driver);
4985 }
4986 
qlge_exit(void)4987 static void __exit qlge_exit(void)
4988 {
4989 	pci_unregister_driver(&qlge_driver);
4990 }
4991 
4992 module_init(qlge_init_module);
4993 module_exit(qlge_exit);
4994