1 /*
2  * This file is part of the Chelsio T4 Ethernet driver for Linux.
3  *
4  * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * OpenIB.org BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
36 
37 #include <linux/bitmap.h>
38 #include <linux/crc32.h>
39 #include <linux/ctype.h>
40 #include <linux/debugfs.h>
41 #include <linux/err.h>
42 #include <linux/etherdevice.h>
43 #include <linux/firmware.h>
44 #include <linux/if.h>
45 #include <linux/if_vlan.h>
46 #include <linux/init.h>
47 #include <linux/log2.h>
48 #include <linux/mdio.h>
49 #include <linux/module.h>
50 #include <linux/moduleparam.h>
51 #include <linux/mutex.h>
52 #include <linux/netdevice.h>
53 #include <linux/pci.h>
54 #include <linux/aer.h>
55 #include <linux/rtnetlink.h>
56 #include <linux/sched.h>
57 #include <linux/seq_file.h>
58 #include <linux/sockios.h>
59 #include <linux/vmalloc.h>
60 #include <linux/workqueue.h>
61 #include <net/neighbour.h>
62 #include <net/netevent.h>
63 #include <asm/uaccess.h>
64 
65 #include "cxgb4.h"
66 #include "t4_regs.h"
67 #include "t4_msg.h"
68 #include "t4fw_api.h"
69 #include "l2t.h"
70 
71 #define DRV_VERSION "1.3.0-ko"
72 #define DRV_DESC "Chelsio T4 Network Driver"
73 
74 /*
75  * Max interrupt hold-off timer value in us.  Queues fall back to this value
76  * under extreme memory pressure so it's largish to give the system time to
77  * recover.
78  */
79 #define MAX_SGE_TIMERVAL 200U
80 
81 #ifdef CONFIG_PCI_IOV
82 /*
83  * Virtual Function provisioning constants.  We need two extra Ingress Queues
84  * with Interrupt capability to serve as the VF's Firmware Event Queue and
85  * Forwarded Interrupt Queue (when using MSI mode) -- neither will have Free
86  * Lists associated with them).  For each Ethernet/Control Egress Queue and
87  * for each Free List, we need an Egress Context.
88  */
89 enum {
90 	VFRES_NPORTS = 1,		/* # of "ports" per VF */
91 	VFRES_NQSETS = 2,		/* # of "Queue Sets" per VF */
92 
93 	VFRES_NVI = VFRES_NPORTS,	/* # of Virtual Interfaces */
94 	VFRES_NETHCTRL = VFRES_NQSETS,	/* # of EQs used for ETH or CTRL Qs */
95 	VFRES_NIQFLINT = VFRES_NQSETS+2,/* # of ingress Qs/w Free List(s)/intr */
96 	VFRES_NIQ = 0,			/* # of non-fl/int ingress queues */
97 	VFRES_NEQ = VFRES_NQSETS*2,	/* # of egress queues */
98 	VFRES_TC = 0,			/* PCI-E traffic class */
99 	VFRES_NEXACTF = 16,		/* # of exact MPS filters */
100 
101 	VFRES_R_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF|FW_CMD_CAP_PORT,
102 	VFRES_WX_CAPS = FW_CMD_CAP_DMAQ|FW_CMD_CAP_VF,
103 };
104 
105 /*
106  * Provide a Port Access Rights Mask for the specified PF/VF.  This is very
107  * static and likely not to be useful in the long run.  We really need to
108  * implement some form of persistent configuration which the firmware
109  * controls.
110  */
pfvfres_pmask(struct adapter * adapter,unsigned int pf,unsigned int vf)111 static unsigned int pfvfres_pmask(struct adapter *adapter,
112 				  unsigned int pf, unsigned int vf)
113 {
114 	unsigned int portn, portvec;
115 
116 	/*
117 	 * Give PF's access to all of the ports.
118 	 */
119 	if (vf == 0)
120 		return FW_PFVF_CMD_PMASK_MASK;
121 
122 	/*
123 	 * For VFs, we'll assign them access to the ports based purely on the
124 	 * PF.  We assign active ports in order, wrapping around if there are
125 	 * fewer active ports than PFs: e.g. active port[pf % nports].
126 	 * Unfortunately the adapter's port_info structs haven't been
127 	 * initialized yet so we have to compute this.
128 	 */
129 	if (adapter->params.nports == 0)
130 		return 0;
131 
132 	portn = pf % adapter->params.nports;
133 	portvec = adapter->params.portvec;
134 	for (;;) {
135 		/*
136 		 * Isolate the lowest set bit in the port vector.  If we're at
137 		 * the port number that we want, return that as the pmask.
138 		 * otherwise mask that bit out of the port vector and
139 		 * decrement our port number ...
140 		 */
141 		unsigned int pmask = portvec ^ (portvec & (portvec-1));
142 		if (portn == 0)
143 			return pmask;
144 		portn--;
145 		portvec &= ~pmask;
146 	}
147 	/*NOTREACHED*/
148 }
149 #endif
150 
151 enum {
152 	MEMWIN0_APERTURE = 65536,
153 	MEMWIN0_BASE     = 0x30000,
154 	MEMWIN1_APERTURE = 32768,
155 	MEMWIN1_BASE     = 0x28000,
156 	MEMWIN2_APERTURE = 2048,
157 	MEMWIN2_BASE     = 0x1b800,
158 };
159 
160 enum {
161 	MAX_TXQ_ENTRIES      = 16384,
162 	MAX_CTRL_TXQ_ENTRIES = 1024,
163 	MAX_RSPQ_ENTRIES     = 16384,
164 	MAX_RX_BUFFERS       = 16384,
165 	MIN_TXQ_ENTRIES      = 32,
166 	MIN_CTRL_TXQ_ENTRIES = 32,
167 	MIN_RSPQ_ENTRIES     = 128,
168 	MIN_FL_ENTRIES       = 16
169 };
170 
171 #define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
172 			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
173 			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
174 
175 #define CH_DEVICE(devid, data) { PCI_VDEVICE(CHELSIO, devid), (data) }
176 
177 static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
178 	CH_DEVICE(0xa000, 0),  /* PE10K */
179 	CH_DEVICE(0x4001, -1),
180 	CH_DEVICE(0x4002, -1),
181 	CH_DEVICE(0x4003, -1),
182 	CH_DEVICE(0x4004, -1),
183 	CH_DEVICE(0x4005, -1),
184 	CH_DEVICE(0x4006, -1),
185 	CH_DEVICE(0x4007, -1),
186 	CH_DEVICE(0x4008, -1),
187 	CH_DEVICE(0x4009, -1),
188 	CH_DEVICE(0x400a, -1),
189 	CH_DEVICE(0x4401, 4),
190 	CH_DEVICE(0x4402, 4),
191 	CH_DEVICE(0x4403, 4),
192 	CH_DEVICE(0x4404, 4),
193 	CH_DEVICE(0x4405, 4),
194 	CH_DEVICE(0x4406, 4),
195 	CH_DEVICE(0x4407, 4),
196 	CH_DEVICE(0x4408, 4),
197 	CH_DEVICE(0x4409, 4),
198 	CH_DEVICE(0x440a, 4),
199 	CH_DEVICE(0x440d, 4),
200 	CH_DEVICE(0x440e, 4),
201 	{ 0, }
202 };
203 
204 #define FW_FNAME "cxgb4/t4fw.bin"
205 
206 MODULE_DESCRIPTION(DRV_DESC);
207 MODULE_AUTHOR("Chelsio Communications");
208 MODULE_LICENSE("Dual BSD/GPL");
209 MODULE_VERSION(DRV_VERSION);
210 MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
211 MODULE_FIRMWARE(FW_FNAME);
212 
213 static int dflt_msg_enable = DFLT_MSG_ENABLE;
214 
215 module_param(dflt_msg_enable, int, 0644);
216 MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
217 
218 /*
219  * The driver uses the best interrupt scheme available on a platform in the
220  * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
221  * of these schemes the driver may consider as follows:
222  *
223  * msi = 2: choose from among all three options
224  * msi = 1: only consider MSI and INTx interrupts
225  * msi = 0: force INTx interrupts
226  */
227 static int msi = 2;
228 
229 module_param(msi, int, 0644);
230 MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
231 
232 /*
233  * Queue interrupt hold-off timer values.  Queues default to the first of these
234  * upon creation.
235  */
236 static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
237 
238 module_param_array(intr_holdoff, uint, NULL, 0644);
239 MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
240 		 "0..4 in microseconds");
241 
242 static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
243 
244 module_param_array(intr_cnt, uint, NULL, 0644);
245 MODULE_PARM_DESC(intr_cnt,
246 		 "thresholds 1..3 for queue interrupt packet counters");
247 
248 static bool vf_acls;
249 
250 #ifdef CONFIG_PCI_IOV
251 module_param(vf_acls, bool, 0644);
252 MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
253 
254 static unsigned int num_vf[4];
255 
256 module_param_array(num_vf, uint, NULL, 0644);
257 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
258 #endif
259 
260 static struct dentry *cxgb4_debugfs_root;
261 
262 static LIST_HEAD(adapter_list);
263 static DEFINE_MUTEX(uld_mutex);
264 static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
265 static const char *uld_str[] = { "RDMA", "iSCSI" };
266 
link_report(struct net_device * dev)267 static void link_report(struct net_device *dev)
268 {
269 	if (!netif_carrier_ok(dev))
270 		netdev_info(dev, "link down\n");
271 	else {
272 		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
273 
274 		const char *s = "10Mbps";
275 		const struct port_info *p = netdev_priv(dev);
276 
277 		switch (p->link_cfg.speed) {
278 		case SPEED_10000:
279 			s = "10Gbps";
280 			break;
281 		case SPEED_1000:
282 			s = "1000Mbps";
283 			break;
284 		case SPEED_100:
285 			s = "100Mbps";
286 			break;
287 		}
288 
289 		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
290 			    fc[p->link_cfg.fc]);
291 	}
292 }
293 
t4_os_link_changed(struct adapter * adapter,int port_id,int link_stat)294 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
295 {
296 	struct net_device *dev = adapter->port[port_id];
297 
298 	/* Skip changes from disabled ports. */
299 	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
300 		if (link_stat)
301 			netif_carrier_on(dev);
302 		else
303 			netif_carrier_off(dev);
304 
305 		link_report(dev);
306 	}
307 }
308 
t4_os_portmod_changed(const struct adapter * adap,int port_id)309 void t4_os_portmod_changed(const struct adapter *adap, int port_id)
310 {
311 	static const char *mod_str[] = {
312 		NULL, "LR", "SR", "ER", "passive DA", "active DA", "LRM"
313 	};
314 
315 	const struct net_device *dev = adap->port[port_id];
316 	const struct port_info *pi = netdev_priv(dev);
317 
318 	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
319 		netdev_info(dev, "port module unplugged\n");
320 	else if (pi->mod_type < ARRAY_SIZE(mod_str))
321 		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
322 }
323 
324 /*
325  * Configure the exact and hash address filters to handle a port's multicast
326  * and secondary unicast MAC addresses.
327  */
set_addr_filters(const struct net_device * dev,bool sleep)328 static int set_addr_filters(const struct net_device *dev, bool sleep)
329 {
330 	u64 mhash = 0;
331 	u64 uhash = 0;
332 	bool free = true;
333 	u16 filt_idx[7];
334 	const u8 *addr[7];
335 	int ret, naddr = 0;
336 	const struct netdev_hw_addr *ha;
337 	int uc_cnt = netdev_uc_count(dev);
338 	int mc_cnt = netdev_mc_count(dev);
339 	const struct port_info *pi = netdev_priv(dev);
340 	unsigned int mb = pi->adapter->fn;
341 
342 	/* first do the secondary unicast addresses */
343 	netdev_for_each_uc_addr(ha, dev) {
344 		addr[naddr++] = ha->addr;
345 		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
346 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
347 					naddr, addr, filt_idx, &uhash, sleep);
348 			if (ret < 0)
349 				return ret;
350 
351 			free = false;
352 			naddr = 0;
353 		}
354 	}
355 
356 	/* next set up the multicast addresses */
357 	netdev_for_each_mc_addr(ha, dev) {
358 		addr[naddr++] = ha->addr;
359 		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
360 			ret = t4_alloc_mac_filt(pi->adapter, mb, pi->viid, free,
361 					naddr, addr, filt_idx, &mhash, sleep);
362 			if (ret < 0)
363 				return ret;
364 
365 			free = false;
366 			naddr = 0;
367 		}
368 	}
369 
370 	return t4_set_addr_hash(pi->adapter, mb, pi->viid, uhash != 0,
371 				uhash | mhash, sleep);
372 }
373 
374 /*
375  * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
376  * If @mtu is -1 it is left unchanged.
377  */
set_rxmode(struct net_device * dev,int mtu,bool sleep_ok)378 static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
379 {
380 	int ret;
381 	struct port_info *pi = netdev_priv(dev);
382 
383 	ret = set_addr_filters(dev, sleep_ok);
384 	if (ret == 0)
385 		ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, mtu,
386 				    (dev->flags & IFF_PROMISC) ? 1 : 0,
387 				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1, -1,
388 				    sleep_ok);
389 	return ret;
390 }
391 
392 /**
393  *	link_start - enable a port
394  *	@dev: the port to enable
395  *
396  *	Performs the MAC and PHY actions needed to enable a port.
397  */
link_start(struct net_device * dev)398 static int link_start(struct net_device *dev)
399 {
400 	int ret;
401 	struct port_info *pi = netdev_priv(dev);
402 	unsigned int mb = pi->adapter->fn;
403 
404 	/*
405 	 * We do not set address filters and promiscuity here, the stack does
406 	 * that step explicitly.
407 	 */
408 	ret = t4_set_rxmode(pi->adapter, mb, pi->viid, dev->mtu, -1, -1, -1,
409 			    !!(dev->features & NETIF_F_HW_VLAN_RX), true);
410 	if (ret == 0) {
411 		ret = t4_change_mac(pi->adapter, mb, pi->viid,
412 				    pi->xact_addr_filt, dev->dev_addr, true,
413 				    true);
414 		if (ret >= 0) {
415 			pi->xact_addr_filt = ret;
416 			ret = 0;
417 		}
418 	}
419 	if (ret == 0)
420 		ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
421 				    &pi->link_cfg);
422 	if (ret == 0)
423 		ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
424 	return ret;
425 }
426 
427 /*
428  * Response queue handler for the FW event queue.
429  */
fwevtq_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)430 static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
431 			  const struct pkt_gl *gl)
432 {
433 	u8 opcode = ((const struct rss_header *)rsp)->opcode;
434 
435 	rsp++;                                          /* skip RSS header */
436 	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
437 		const struct cpl_sge_egr_update *p = (void *)rsp;
438 		unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
439 		struct sge_txq *txq;
440 
441 		txq = q->adap->sge.egr_map[qid - q->adap->sge.egr_start];
442 		txq->restarts++;
443 		if ((u8 *)txq < (u8 *)q->adap->sge.ofldtxq) {
444 			struct sge_eth_txq *eq;
445 
446 			eq = container_of(txq, struct sge_eth_txq, q);
447 			netif_tx_wake_queue(eq->txq);
448 		} else {
449 			struct sge_ofld_txq *oq;
450 
451 			oq = container_of(txq, struct sge_ofld_txq, q);
452 			tasklet_schedule(&oq->qresume_tsk);
453 		}
454 	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
455 		const struct cpl_fw6_msg *p = (void *)rsp;
456 
457 		if (p->type == 0)
458 			t4_handle_fw_rpl(q->adap, p->data);
459 	} else if (opcode == CPL_L2T_WRITE_RPL) {
460 		const struct cpl_l2t_write_rpl *p = (void *)rsp;
461 
462 		do_l2t_write_rpl(q->adap, p);
463 	} else
464 		dev_err(q->adap->pdev_dev,
465 			"unexpected CPL %#x on FW event queue\n", opcode);
466 	return 0;
467 }
468 
469 /**
470  *	uldrx_handler - response queue handler for ULD queues
471  *	@q: the response queue that received the packet
472  *	@rsp: the response queue descriptor holding the offload message
473  *	@gl: the gather list of packet fragments
474  *
475  *	Deliver an ingress offload packet to a ULD.  All processing is done by
476  *	the ULD, we just maintain statistics.
477  */
uldrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)478 static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
479 			 const struct pkt_gl *gl)
480 {
481 	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
482 
483 	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
484 		rxq->stats.nomem++;
485 		return -1;
486 	}
487 	if (gl == NULL)
488 		rxq->stats.imm++;
489 	else if (gl == CXGB4_MSG_AN)
490 		rxq->stats.an++;
491 	else
492 		rxq->stats.pkts++;
493 	return 0;
494 }
495 
disable_msi(struct adapter * adapter)496 static void disable_msi(struct adapter *adapter)
497 {
498 	if (adapter->flags & USING_MSIX) {
499 		pci_disable_msix(adapter->pdev);
500 		adapter->flags &= ~USING_MSIX;
501 	} else if (adapter->flags & USING_MSI) {
502 		pci_disable_msi(adapter->pdev);
503 		adapter->flags &= ~USING_MSI;
504 	}
505 }
506 
507 /*
508  * Interrupt handler for non-data events used with MSI-X.
509  */
t4_nondata_intr(int irq,void * cookie)510 static irqreturn_t t4_nondata_intr(int irq, void *cookie)
511 {
512 	struct adapter *adap = cookie;
513 
514 	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
515 	if (v & PFSW) {
516 		adap->swintr = 1;
517 		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
518 	}
519 	t4_slow_intr_handler(adap);
520 	return IRQ_HANDLED;
521 }
522 
523 /*
524  * Name the MSI-X interrupts.
525  */
name_msix_vecs(struct adapter * adap)526 static void name_msix_vecs(struct adapter *adap)
527 {
528 	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc);
529 
530 	/* non-data interrupts */
531 	snprintf(adap->msix_info[0].desc, n, "%s", adap->port[0]->name);
532 
533 	/* FW events */
534 	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq",
535 		 adap->port[0]->name);
536 
537 	/* Ethernet queues */
538 	for_each_port(adap, j) {
539 		struct net_device *d = adap->port[j];
540 		const struct port_info *pi = netdev_priv(d);
541 
542 		for (i = 0; i < pi->nqsets; i++, msi_idx++)
543 			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
544 				 d->name, i);
545 	}
546 
547 	/* offload queues */
548 	for_each_ofldrxq(&adap->sge, i)
549 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-ofld%d",
550 			 adap->port[0]->name, i);
551 
552 	for_each_rdmarxq(&adap->sge, i)
553 		snprintf(adap->msix_info[msi_idx++].desc, n, "%s-rdma%d",
554 			 adap->port[0]->name, i);
555 }
556 
request_msix_queue_irqs(struct adapter * adap)557 static int request_msix_queue_irqs(struct adapter *adap)
558 {
559 	struct sge *s = &adap->sge;
560 	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
561 
562 	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
563 			  adap->msix_info[1].desc, &s->fw_evtq);
564 	if (err)
565 		return err;
566 
567 	for_each_ethrxq(s, ethqidx) {
568 		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
569 				  adap->msix_info[msi].desc,
570 				  &s->ethrxq[ethqidx].rspq);
571 		if (err)
572 			goto unwind;
573 		msi++;
574 	}
575 	for_each_ofldrxq(s, ofldqidx) {
576 		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
577 				  adap->msix_info[msi].desc,
578 				  &s->ofldrxq[ofldqidx].rspq);
579 		if (err)
580 			goto unwind;
581 		msi++;
582 	}
583 	for_each_rdmarxq(s, rdmaqidx) {
584 		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
585 				  adap->msix_info[msi].desc,
586 				  &s->rdmarxq[rdmaqidx].rspq);
587 		if (err)
588 			goto unwind;
589 		msi++;
590 	}
591 	return 0;
592 
593 unwind:
594 	while (--rdmaqidx >= 0)
595 		free_irq(adap->msix_info[--msi].vec,
596 			 &s->rdmarxq[rdmaqidx].rspq);
597 	while (--ofldqidx >= 0)
598 		free_irq(adap->msix_info[--msi].vec,
599 			 &s->ofldrxq[ofldqidx].rspq);
600 	while (--ethqidx >= 0)
601 		free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
602 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
603 	return err;
604 }
605 
free_msix_queue_irqs(struct adapter * adap)606 static void free_msix_queue_irqs(struct adapter *adap)
607 {
608 	int i, msi = 2;
609 	struct sge *s = &adap->sge;
610 
611 	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
612 	for_each_ethrxq(s, i)
613 		free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
614 	for_each_ofldrxq(s, i)
615 		free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
616 	for_each_rdmarxq(s, i)
617 		free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
618 }
619 
620 /**
621  *	write_rss - write the RSS table for a given port
622  *	@pi: the port
623  *	@queues: array of queue indices for RSS
624  *
625  *	Sets up the portion of the HW RSS table for the port's VI to distribute
626  *	packets to the Rx queues in @queues.
627  */
write_rss(const struct port_info * pi,const u16 * queues)628 static int write_rss(const struct port_info *pi, const u16 *queues)
629 {
630 	u16 *rss;
631 	int i, err;
632 	const struct sge_eth_rxq *q = &pi->adapter->sge.ethrxq[pi->first_qset];
633 
634 	rss = kmalloc(pi->rss_size * sizeof(u16), GFP_KERNEL);
635 	if (!rss)
636 		return -ENOMEM;
637 
638 	/* map the queue indices to queue ids */
639 	for (i = 0; i < pi->rss_size; i++, queues++)
640 		rss[i] = q[*queues].rspq.abs_id;
641 
642 	err = t4_config_rss_range(pi->adapter, pi->adapter->fn, pi->viid, 0,
643 				  pi->rss_size, rss, pi->rss_size);
644 	kfree(rss);
645 	return err;
646 }
647 
648 /**
649  *	setup_rss - configure RSS
650  *	@adap: the adapter
651  *
652  *	Sets up RSS for each port.
653  */
setup_rss(struct adapter * adap)654 static int setup_rss(struct adapter *adap)
655 {
656 	int i, err;
657 
658 	for_each_port(adap, i) {
659 		const struct port_info *pi = adap2pinfo(adap, i);
660 
661 		err = write_rss(pi, pi->rss);
662 		if (err)
663 			return err;
664 	}
665 	return 0;
666 }
667 
668 /*
669  * Return the channel of the ingress queue with the given qid.
670  */
rxq_to_chan(const struct sge * p,unsigned int qid)671 static unsigned int rxq_to_chan(const struct sge *p, unsigned int qid)
672 {
673 	qid -= p->ingr_start;
674 	return netdev2pinfo(p->ingr_map[qid]->netdev)->tx_chan;
675 }
676 
677 /*
678  * Wait until all NAPI handlers are descheduled.
679  */
quiesce_rx(struct adapter * adap)680 static void quiesce_rx(struct adapter *adap)
681 {
682 	int i;
683 
684 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
685 		struct sge_rspq *q = adap->sge.ingr_map[i];
686 
687 		if (q && q->handler)
688 			napi_disable(&q->napi);
689 	}
690 }
691 
692 /*
693  * Enable NAPI scheduling and interrupt generation for all Rx queues.
694  */
enable_rx(struct adapter * adap)695 static void enable_rx(struct adapter *adap)
696 {
697 	int i;
698 
699 	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
700 		struct sge_rspq *q = adap->sge.ingr_map[i];
701 
702 		if (!q)
703 			continue;
704 		if (q->handler)
705 			napi_enable(&q->napi);
706 		/* 0-increment GTS to start the timer and enable interrupts */
707 		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
708 			     SEINTARM(q->intr_params) |
709 			     INGRESSQID(q->cntxt_id));
710 	}
711 }
712 
713 /**
714  *	setup_sge_queues - configure SGE Tx/Rx/response queues
715  *	@adap: the adapter
716  *
717  *	Determines how many sets of SGE queues to use and initializes them.
718  *	We support multiple queue sets per port if we have MSI-X, otherwise
719  *	just one queue set per port.
720  */
setup_sge_queues(struct adapter * adap)721 static int setup_sge_queues(struct adapter *adap)
722 {
723 	int err, msi_idx, i, j;
724 	struct sge *s = &adap->sge;
725 
726 	bitmap_zero(s->starving_fl, MAX_EGRQ);
727 	bitmap_zero(s->txq_maperr, MAX_EGRQ);
728 
729 	if (adap->flags & USING_MSIX)
730 		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
731 	else {
732 		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
733 				       NULL, NULL);
734 		if (err)
735 			return err;
736 		msi_idx = -((int)s->intrq.abs_id + 1);
737 	}
738 
739 	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
740 			       msi_idx, NULL, fwevtq_handler);
741 	if (err) {
742 freeout:	t4_free_sge_resources(adap);
743 		return err;
744 	}
745 
746 	for_each_port(adap, i) {
747 		struct net_device *dev = adap->port[i];
748 		struct port_info *pi = netdev_priv(dev);
749 		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
750 		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
751 
752 		for (j = 0; j < pi->nqsets; j++, q++) {
753 			if (msi_idx > 0)
754 				msi_idx++;
755 			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
756 					       msi_idx, &q->fl,
757 					       t4_ethrx_handler);
758 			if (err)
759 				goto freeout;
760 			q->rspq.idx = j;
761 			memset(&q->stats, 0, sizeof(q->stats));
762 		}
763 		for (j = 0; j < pi->nqsets; j++, t++) {
764 			err = t4_sge_alloc_eth_txq(adap, t, dev,
765 					netdev_get_tx_queue(dev, j),
766 					s->fw_evtq.cntxt_id);
767 			if (err)
768 				goto freeout;
769 		}
770 	}
771 
772 	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
773 	for_each_ofldrxq(s, i) {
774 		struct sge_ofld_rxq *q = &s->ofldrxq[i];
775 		struct net_device *dev = adap->port[i / j];
776 
777 		if (msi_idx > 0)
778 			msi_idx++;
779 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
780 				       &q->fl, uldrx_handler);
781 		if (err)
782 			goto freeout;
783 		memset(&q->stats, 0, sizeof(q->stats));
784 		s->ofld_rxq[i] = q->rspq.abs_id;
785 		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
786 					    s->fw_evtq.cntxt_id);
787 		if (err)
788 			goto freeout;
789 	}
790 
791 	for_each_rdmarxq(s, i) {
792 		struct sge_ofld_rxq *q = &s->rdmarxq[i];
793 
794 		if (msi_idx > 0)
795 			msi_idx++;
796 		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
797 				       msi_idx, &q->fl, uldrx_handler);
798 		if (err)
799 			goto freeout;
800 		memset(&q->stats, 0, sizeof(q->stats));
801 		s->rdma_rxq[i] = q->rspq.abs_id;
802 	}
803 
804 	for_each_port(adap, i) {
805 		/*
806 		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
807 		 * have RDMA queues, and that's the right value.
808 		 */
809 		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
810 					    s->fw_evtq.cntxt_id,
811 					    s->rdmarxq[i].rspq.cntxt_id);
812 		if (err)
813 			goto freeout;
814 	}
815 
816 	t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
817 		     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
818 		     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
819 	return 0;
820 }
821 
822 /*
823  * Returns 0 if new FW was successfully loaded, a positive errno if a load was
824  * started but failed, and a negative errno if flash load couldn't start.
825  */
upgrade_fw(struct adapter * adap)826 static int upgrade_fw(struct adapter *adap)
827 {
828 	int ret;
829 	u32 vers;
830 	const struct fw_hdr *hdr;
831 	const struct firmware *fw;
832 	struct device *dev = adap->pdev_dev;
833 
834 	ret = request_firmware(&fw, FW_FNAME, dev);
835 	if (ret < 0) {
836 		dev_err(dev, "unable to load firmware image " FW_FNAME
837 			", error %d\n", ret);
838 		return ret;
839 	}
840 
841 	hdr = (const struct fw_hdr *)fw->data;
842 	vers = ntohl(hdr->fw_ver);
843 	if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
844 		ret = -EINVAL;              /* wrong major version, won't do */
845 		goto out;
846 	}
847 
848 	/*
849 	 * If the flash FW is unusable or we found something newer, load it.
850 	 */
851 	if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
852 	    vers > adap->params.fw_vers) {
853 		ret = -t4_load_fw(adap, fw->data, fw->size);
854 		if (!ret)
855 			dev_info(dev, "firmware upgraded to version %pI4 from "
856 				 FW_FNAME "\n", &hdr->fw_ver);
857 	}
858 out:	release_firmware(fw);
859 	return ret;
860 }
861 
862 /*
863  * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
864  * The allocated memory is cleared.
865  */
t4_alloc_mem(size_t size)866 void *t4_alloc_mem(size_t size)
867 {
868 	void *p = kzalloc(size, GFP_KERNEL);
869 
870 	if (!p)
871 		p = vzalloc(size);
872 	return p;
873 }
874 
875 /*
876  * Free memory allocated through alloc_mem().
877  */
t4_free_mem(void * addr)878 static void t4_free_mem(void *addr)
879 {
880 	if (is_vmalloc_addr(addr))
881 		vfree(addr);
882 	else
883 		kfree(addr);
884 }
885 
is_offload(const struct adapter * adap)886 static inline int is_offload(const struct adapter *adap)
887 {
888 	return adap->params.offload;
889 }
890 
891 /*
892  * Implementation of ethtool operations.
893  */
894 
get_msglevel(struct net_device * dev)895 static u32 get_msglevel(struct net_device *dev)
896 {
897 	return netdev2adap(dev)->msg_enable;
898 }
899 
set_msglevel(struct net_device * dev,u32 val)900 static void set_msglevel(struct net_device *dev, u32 val)
901 {
902 	netdev2adap(dev)->msg_enable = val;
903 }
904 
905 static char stats_strings[][ETH_GSTRING_LEN] = {
906 	"TxOctetsOK         ",
907 	"TxFramesOK         ",
908 	"TxBroadcastFrames  ",
909 	"TxMulticastFrames  ",
910 	"TxUnicastFrames    ",
911 	"TxErrorFrames      ",
912 
913 	"TxFrames64         ",
914 	"TxFrames65To127    ",
915 	"TxFrames128To255   ",
916 	"TxFrames256To511   ",
917 	"TxFrames512To1023  ",
918 	"TxFrames1024To1518 ",
919 	"TxFrames1519ToMax  ",
920 
921 	"TxFramesDropped    ",
922 	"TxPauseFrames      ",
923 	"TxPPP0Frames       ",
924 	"TxPPP1Frames       ",
925 	"TxPPP2Frames       ",
926 	"TxPPP3Frames       ",
927 	"TxPPP4Frames       ",
928 	"TxPPP5Frames       ",
929 	"TxPPP6Frames       ",
930 	"TxPPP7Frames       ",
931 
932 	"RxOctetsOK         ",
933 	"RxFramesOK         ",
934 	"RxBroadcastFrames  ",
935 	"RxMulticastFrames  ",
936 	"RxUnicastFrames    ",
937 
938 	"RxFramesTooLong    ",
939 	"RxJabberErrors     ",
940 	"RxFCSErrors        ",
941 	"RxLengthErrors     ",
942 	"RxSymbolErrors     ",
943 	"RxRuntFrames       ",
944 
945 	"RxFrames64         ",
946 	"RxFrames65To127    ",
947 	"RxFrames128To255   ",
948 	"RxFrames256To511   ",
949 	"RxFrames512To1023  ",
950 	"RxFrames1024To1518 ",
951 	"RxFrames1519ToMax  ",
952 
953 	"RxPauseFrames      ",
954 	"RxPPP0Frames       ",
955 	"RxPPP1Frames       ",
956 	"RxPPP2Frames       ",
957 	"RxPPP3Frames       ",
958 	"RxPPP4Frames       ",
959 	"RxPPP5Frames       ",
960 	"RxPPP6Frames       ",
961 	"RxPPP7Frames       ",
962 
963 	"RxBG0FramesDropped ",
964 	"RxBG1FramesDropped ",
965 	"RxBG2FramesDropped ",
966 	"RxBG3FramesDropped ",
967 	"RxBG0FramesTrunc   ",
968 	"RxBG1FramesTrunc   ",
969 	"RxBG2FramesTrunc   ",
970 	"RxBG3FramesTrunc   ",
971 
972 	"TSO                ",
973 	"TxCsumOffload      ",
974 	"RxCsumGood         ",
975 	"VLANextractions    ",
976 	"VLANinsertions     ",
977 	"GROpackets         ",
978 	"GROmerged          ",
979 };
980 
get_sset_count(struct net_device * dev,int sset)981 static int get_sset_count(struct net_device *dev, int sset)
982 {
983 	switch (sset) {
984 	case ETH_SS_STATS:
985 		return ARRAY_SIZE(stats_strings);
986 	default:
987 		return -EOPNOTSUPP;
988 	}
989 }
990 
991 #define T4_REGMAP_SIZE (160 * 1024)
992 
get_regs_len(struct net_device * dev)993 static int get_regs_len(struct net_device *dev)
994 {
995 	return T4_REGMAP_SIZE;
996 }
997 
get_eeprom_len(struct net_device * dev)998 static int get_eeprom_len(struct net_device *dev)
999 {
1000 	return EEPROMSIZE;
1001 }
1002 
get_drvinfo(struct net_device * dev,struct ethtool_drvinfo * info)1003 static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1004 {
1005 	struct adapter *adapter = netdev2adap(dev);
1006 
1007 	strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
1008 	strlcpy(info->version, DRV_VERSION, sizeof(info->version));
1009 	strlcpy(info->bus_info, pci_name(adapter->pdev),
1010 		sizeof(info->bus_info));
1011 
1012 	if (adapter->params.fw_vers)
1013 		snprintf(info->fw_version, sizeof(info->fw_version),
1014 			"%u.%u.%u.%u, TP %u.%u.%u.%u",
1015 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
1016 			FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
1017 			FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
1018 			FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
1019 			FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
1020 			FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
1021 			FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
1022 			FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
1023 }
1024 
get_strings(struct net_device * dev,u32 stringset,u8 * data)1025 static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
1026 {
1027 	if (stringset == ETH_SS_STATS)
1028 		memcpy(data, stats_strings, sizeof(stats_strings));
1029 }
1030 
1031 /*
1032  * port stats maintained per queue of the port.  They should be in the same
1033  * order as in stats_strings above.
1034  */
1035 struct queue_port_stats {
1036 	u64 tso;
1037 	u64 tx_csum;
1038 	u64 rx_csum;
1039 	u64 vlan_ex;
1040 	u64 vlan_ins;
1041 	u64 gro_pkts;
1042 	u64 gro_merged;
1043 };
1044 
collect_sge_port_stats(const struct adapter * adap,const struct port_info * p,struct queue_port_stats * s)1045 static void collect_sge_port_stats(const struct adapter *adap,
1046 		const struct port_info *p, struct queue_port_stats *s)
1047 {
1048 	int i;
1049 	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
1050 	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
1051 
1052 	memset(s, 0, sizeof(*s));
1053 	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
1054 		s->tso += tx->tso;
1055 		s->tx_csum += tx->tx_cso;
1056 		s->rx_csum += rx->stats.rx_cso;
1057 		s->vlan_ex += rx->stats.vlan_ex;
1058 		s->vlan_ins += tx->vlan_ins;
1059 		s->gro_pkts += rx->stats.lro_pkts;
1060 		s->gro_merged += rx->stats.lro_merged;
1061 	}
1062 }
1063 
get_stats(struct net_device * dev,struct ethtool_stats * stats,u64 * data)1064 static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
1065 		      u64 *data)
1066 {
1067 	struct port_info *pi = netdev_priv(dev);
1068 	struct adapter *adapter = pi->adapter;
1069 
1070 	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
1071 
1072 	data += sizeof(struct port_stats) / sizeof(u64);
1073 	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
1074 }
1075 
1076 /*
1077  * Return a version number to identify the type of adapter.  The scheme is:
1078  * - bits 0..9: chip version
1079  * - bits 10..15: chip revision
1080  * - bits 16..23: register dump version
1081  */
mk_adap_vers(const struct adapter * ap)1082 static inline unsigned int mk_adap_vers(const struct adapter *ap)
1083 {
1084 	return 4 | (ap->params.rev << 10) | (1 << 16);
1085 }
1086 
reg_block_dump(struct adapter * ap,void * buf,unsigned int start,unsigned int end)1087 static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
1088 			   unsigned int end)
1089 {
1090 	u32 *p = buf + start;
1091 
1092 	for ( ; start <= end; start += sizeof(u32))
1093 		*p++ = t4_read_reg(ap, start);
1094 }
1095 
get_regs(struct net_device * dev,struct ethtool_regs * regs,void * buf)1096 static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
1097 		     void *buf)
1098 {
1099 	static const unsigned int reg_ranges[] = {
1100 		0x1008, 0x1108,
1101 		0x1180, 0x11b4,
1102 		0x11fc, 0x123c,
1103 		0x1300, 0x173c,
1104 		0x1800, 0x18fc,
1105 		0x3000, 0x30d8,
1106 		0x30e0, 0x5924,
1107 		0x5960, 0x59d4,
1108 		0x5a00, 0x5af8,
1109 		0x6000, 0x6098,
1110 		0x6100, 0x6150,
1111 		0x6200, 0x6208,
1112 		0x6240, 0x6248,
1113 		0x6280, 0x6338,
1114 		0x6370, 0x638c,
1115 		0x6400, 0x643c,
1116 		0x6500, 0x6524,
1117 		0x6a00, 0x6a38,
1118 		0x6a60, 0x6a78,
1119 		0x6b00, 0x6b84,
1120 		0x6bf0, 0x6c84,
1121 		0x6cf0, 0x6d84,
1122 		0x6df0, 0x6e84,
1123 		0x6ef0, 0x6f84,
1124 		0x6ff0, 0x7084,
1125 		0x70f0, 0x7184,
1126 		0x71f0, 0x7284,
1127 		0x72f0, 0x7384,
1128 		0x73f0, 0x7450,
1129 		0x7500, 0x7530,
1130 		0x7600, 0x761c,
1131 		0x7680, 0x76cc,
1132 		0x7700, 0x7798,
1133 		0x77c0, 0x77fc,
1134 		0x7900, 0x79fc,
1135 		0x7b00, 0x7c38,
1136 		0x7d00, 0x7efc,
1137 		0x8dc0, 0x8e1c,
1138 		0x8e30, 0x8e78,
1139 		0x8ea0, 0x8f6c,
1140 		0x8fc0, 0x9074,
1141 		0x90fc, 0x90fc,
1142 		0x9400, 0x9458,
1143 		0x9600, 0x96bc,
1144 		0x9800, 0x9808,
1145 		0x9820, 0x983c,
1146 		0x9850, 0x9864,
1147 		0x9c00, 0x9c6c,
1148 		0x9c80, 0x9cec,
1149 		0x9d00, 0x9d6c,
1150 		0x9d80, 0x9dec,
1151 		0x9e00, 0x9e6c,
1152 		0x9e80, 0x9eec,
1153 		0x9f00, 0x9f6c,
1154 		0x9f80, 0x9fec,
1155 		0xd004, 0xd03c,
1156 		0xdfc0, 0xdfe0,
1157 		0xe000, 0xea7c,
1158 		0xf000, 0x11190,
1159 		0x19040, 0x1906c,
1160 		0x19078, 0x19080,
1161 		0x1908c, 0x19124,
1162 		0x19150, 0x191b0,
1163 		0x191d0, 0x191e8,
1164 		0x19238, 0x1924c,
1165 		0x193f8, 0x19474,
1166 		0x19490, 0x194f8,
1167 		0x19800, 0x19f30,
1168 		0x1a000, 0x1a06c,
1169 		0x1a0b0, 0x1a120,
1170 		0x1a128, 0x1a138,
1171 		0x1a190, 0x1a1c4,
1172 		0x1a1fc, 0x1a1fc,
1173 		0x1e040, 0x1e04c,
1174 		0x1e284, 0x1e28c,
1175 		0x1e2c0, 0x1e2c0,
1176 		0x1e2e0, 0x1e2e0,
1177 		0x1e300, 0x1e384,
1178 		0x1e3c0, 0x1e3c8,
1179 		0x1e440, 0x1e44c,
1180 		0x1e684, 0x1e68c,
1181 		0x1e6c0, 0x1e6c0,
1182 		0x1e6e0, 0x1e6e0,
1183 		0x1e700, 0x1e784,
1184 		0x1e7c0, 0x1e7c8,
1185 		0x1e840, 0x1e84c,
1186 		0x1ea84, 0x1ea8c,
1187 		0x1eac0, 0x1eac0,
1188 		0x1eae0, 0x1eae0,
1189 		0x1eb00, 0x1eb84,
1190 		0x1ebc0, 0x1ebc8,
1191 		0x1ec40, 0x1ec4c,
1192 		0x1ee84, 0x1ee8c,
1193 		0x1eec0, 0x1eec0,
1194 		0x1eee0, 0x1eee0,
1195 		0x1ef00, 0x1ef84,
1196 		0x1efc0, 0x1efc8,
1197 		0x1f040, 0x1f04c,
1198 		0x1f284, 0x1f28c,
1199 		0x1f2c0, 0x1f2c0,
1200 		0x1f2e0, 0x1f2e0,
1201 		0x1f300, 0x1f384,
1202 		0x1f3c0, 0x1f3c8,
1203 		0x1f440, 0x1f44c,
1204 		0x1f684, 0x1f68c,
1205 		0x1f6c0, 0x1f6c0,
1206 		0x1f6e0, 0x1f6e0,
1207 		0x1f700, 0x1f784,
1208 		0x1f7c0, 0x1f7c8,
1209 		0x1f840, 0x1f84c,
1210 		0x1fa84, 0x1fa8c,
1211 		0x1fac0, 0x1fac0,
1212 		0x1fae0, 0x1fae0,
1213 		0x1fb00, 0x1fb84,
1214 		0x1fbc0, 0x1fbc8,
1215 		0x1fc40, 0x1fc4c,
1216 		0x1fe84, 0x1fe8c,
1217 		0x1fec0, 0x1fec0,
1218 		0x1fee0, 0x1fee0,
1219 		0x1ff00, 0x1ff84,
1220 		0x1ffc0, 0x1ffc8,
1221 		0x20000, 0x2002c,
1222 		0x20100, 0x2013c,
1223 		0x20190, 0x201c8,
1224 		0x20200, 0x20318,
1225 		0x20400, 0x20528,
1226 		0x20540, 0x20614,
1227 		0x21000, 0x21040,
1228 		0x2104c, 0x21060,
1229 		0x210c0, 0x210ec,
1230 		0x21200, 0x21268,
1231 		0x21270, 0x21284,
1232 		0x212fc, 0x21388,
1233 		0x21400, 0x21404,
1234 		0x21500, 0x21518,
1235 		0x2152c, 0x2153c,
1236 		0x21550, 0x21554,
1237 		0x21600, 0x21600,
1238 		0x21608, 0x21628,
1239 		0x21630, 0x2163c,
1240 		0x21700, 0x2171c,
1241 		0x21780, 0x2178c,
1242 		0x21800, 0x21c38,
1243 		0x21c80, 0x21d7c,
1244 		0x21e00, 0x21e04,
1245 		0x22000, 0x2202c,
1246 		0x22100, 0x2213c,
1247 		0x22190, 0x221c8,
1248 		0x22200, 0x22318,
1249 		0x22400, 0x22528,
1250 		0x22540, 0x22614,
1251 		0x23000, 0x23040,
1252 		0x2304c, 0x23060,
1253 		0x230c0, 0x230ec,
1254 		0x23200, 0x23268,
1255 		0x23270, 0x23284,
1256 		0x232fc, 0x23388,
1257 		0x23400, 0x23404,
1258 		0x23500, 0x23518,
1259 		0x2352c, 0x2353c,
1260 		0x23550, 0x23554,
1261 		0x23600, 0x23600,
1262 		0x23608, 0x23628,
1263 		0x23630, 0x2363c,
1264 		0x23700, 0x2371c,
1265 		0x23780, 0x2378c,
1266 		0x23800, 0x23c38,
1267 		0x23c80, 0x23d7c,
1268 		0x23e00, 0x23e04,
1269 		0x24000, 0x2402c,
1270 		0x24100, 0x2413c,
1271 		0x24190, 0x241c8,
1272 		0x24200, 0x24318,
1273 		0x24400, 0x24528,
1274 		0x24540, 0x24614,
1275 		0x25000, 0x25040,
1276 		0x2504c, 0x25060,
1277 		0x250c0, 0x250ec,
1278 		0x25200, 0x25268,
1279 		0x25270, 0x25284,
1280 		0x252fc, 0x25388,
1281 		0x25400, 0x25404,
1282 		0x25500, 0x25518,
1283 		0x2552c, 0x2553c,
1284 		0x25550, 0x25554,
1285 		0x25600, 0x25600,
1286 		0x25608, 0x25628,
1287 		0x25630, 0x2563c,
1288 		0x25700, 0x2571c,
1289 		0x25780, 0x2578c,
1290 		0x25800, 0x25c38,
1291 		0x25c80, 0x25d7c,
1292 		0x25e00, 0x25e04,
1293 		0x26000, 0x2602c,
1294 		0x26100, 0x2613c,
1295 		0x26190, 0x261c8,
1296 		0x26200, 0x26318,
1297 		0x26400, 0x26528,
1298 		0x26540, 0x26614,
1299 		0x27000, 0x27040,
1300 		0x2704c, 0x27060,
1301 		0x270c0, 0x270ec,
1302 		0x27200, 0x27268,
1303 		0x27270, 0x27284,
1304 		0x272fc, 0x27388,
1305 		0x27400, 0x27404,
1306 		0x27500, 0x27518,
1307 		0x2752c, 0x2753c,
1308 		0x27550, 0x27554,
1309 		0x27600, 0x27600,
1310 		0x27608, 0x27628,
1311 		0x27630, 0x2763c,
1312 		0x27700, 0x2771c,
1313 		0x27780, 0x2778c,
1314 		0x27800, 0x27c38,
1315 		0x27c80, 0x27d7c,
1316 		0x27e00, 0x27e04
1317 	};
1318 
1319 	int i;
1320 	struct adapter *ap = netdev2adap(dev);
1321 
1322 	regs->version = mk_adap_vers(ap);
1323 
1324 	memset(buf, 0, T4_REGMAP_SIZE);
1325 	for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
1326 		reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
1327 }
1328 
restart_autoneg(struct net_device * dev)1329 static int restart_autoneg(struct net_device *dev)
1330 {
1331 	struct port_info *p = netdev_priv(dev);
1332 
1333 	if (!netif_running(dev))
1334 		return -EAGAIN;
1335 	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
1336 		return -EINVAL;
1337 	t4_restart_aneg(p->adapter, p->adapter->fn, p->tx_chan);
1338 	return 0;
1339 }
1340 
identify_port(struct net_device * dev,enum ethtool_phys_id_state state)1341 static int identify_port(struct net_device *dev,
1342 			 enum ethtool_phys_id_state state)
1343 {
1344 	unsigned int val;
1345 	struct adapter *adap = netdev2adap(dev);
1346 
1347 	if (state == ETHTOOL_ID_ACTIVE)
1348 		val = 0xffff;
1349 	else if (state == ETHTOOL_ID_INACTIVE)
1350 		val = 0;
1351 	else
1352 		return -EINVAL;
1353 
1354 	return t4_identify_port(adap, adap->fn, netdev2pinfo(dev)->viid, val);
1355 }
1356 
from_fw_linkcaps(unsigned int type,unsigned int caps)1357 static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
1358 {
1359 	unsigned int v = 0;
1360 
1361 	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XFI ||
1362 	    type == FW_PORT_TYPE_BT_XAUI) {
1363 		v |= SUPPORTED_TP;
1364 		if (caps & FW_PORT_CAP_SPEED_100M)
1365 			v |= SUPPORTED_100baseT_Full;
1366 		if (caps & FW_PORT_CAP_SPEED_1G)
1367 			v |= SUPPORTED_1000baseT_Full;
1368 		if (caps & FW_PORT_CAP_SPEED_10G)
1369 			v |= SUPPORTED_10000baseT_Full;
1370 	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
1371 		v |= SUPPORTED_Backplane;
1372 		if (caps & FW_PORT_CAP_SPEED_1G)
1373 			v |= SUPPORTED_1000baseKX_Full;
1374 		if (caps & FW_PORT_CAP_SPEED_10G)
1375 			v |= SUPPORTED_10000baseKX4_Full;
1376 	} else if (type == FW_PORT_TYPE_KR)
1377 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
1378 	else if (type == FW_PORT_TYPE_BP_AP)
1379 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1380 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full;
1381 	else if (type == FW_PORT_TYPE_BP4_AP)
1382 		v |= SUPPORTED_Backplane | SUPPORTED_10000baseR_FEC |
1383 		     SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
1384 		     SUPPORTED_10000baseKX4_Full;
1385 	else if (type == FW_PORT_TYPE_FIBER_XFI ||
1386 		 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP)
1387 		v |= SUPPORTED_FIBRE;
1388 
1389 	if (caps & FW_PORT_CAP_ANEG)
1390 		v |= SUPPORTED_Autoneg;
1391 	return v;
1392 }
1393 
to_fw_linkcaps(unsigned int caps)1394 static unsigned int to_fw_linkcaps(unsigned int caps)
1395 {
1396 	unsigned int v = 0;
1397 
1398 	if (caps & ADVERTISED_100baseT_Full)
1399 		v |= FW_PORT_CAP_SPEED_100M;
1400 	if (caps & ADVERTISED_1000baseT_Full)
1401 		v |= FW_PORT_CAP_SPEED_1G;
1402 	if (caps & ADVERTISED_10000baseT_Full)
1403 		v |= FW_PORT_CAP_SPEED_10G;
1404 	return v;
1405 }
1406 
get_settings(struct net_device * dev,struct ethtool_cmd * cmd)1407 static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1408 {
1409 	const struct port_info *p = netdev_priv(dev);
1410 
1411 	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
1412 	    p->port_type == FW_PORT_TYPE_BT_XFI ||
1413 	    p->port_type == FW_PORT_TYPE_BT_XAUI)
1414 		cmd->port = PORT_TP;
1415 	else if (p->port_type == FW_PORT_TYPE_FIBER_XFI ||
1416 		 p->port_type == FW_PORT_TYPE_FIBER_XAUI)
1417 		cmd->port = PORT_FIBRE;
1418 	else if (p->port_type == FW_PORT_TYPE_SFP) {
1419 		if (p->mod_type == FW_PORT_MOD_TYPE_TWINAX_PASSIVE ||
1420 		    p->mod_type == FW_PORT_MOD_TYPE_TWINAX_ACTIVE)
1421 			cmd->port = PORT_DA;
1422 		else
1423 			cmd->port = PORT_FIBRE;
1424 	} else
1425 		cmd->port = PORT_OTHER;
1426 
1427 	if (p->mdio_addr >= 0) {
1428 		cmd->phy_address = p->mdio_addr;
1429 		cmd->transceiver = XCVR_EXTERNAL;
1430 		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
1431 			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
1432 	} else {
1433 		cmd->phy_address = 0;  /* not really, but no better option */
1434 		cmd->transceiver = XCVR_INTERNAL;
1435 		cmd->mdio_support = 0;
1436 	}
1437 
1438 	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
1439 	cmd->advertising = from_fw_linkcaps(p->port_type,
1440 					    p->link_cfg.advertising);
1441 	ethtool_cmd_speed_set(cmd,
1442 			      netif_carrier_ok(dev) ? p->link_cfg.speed : 0);
1443 	cmd->duplex = DUPLEX_FULL;
1444 	cmd->autoneg = p->link_cfg.autoneg;
1445 	cmd->maxtxpkt = 0;
1446 	cmd->maxrxpkt = 0;
1447 	return 0;
1448 }
1449 
speed_to_caps(int speed)1450 static unsigned int speed_to_caps(int speed)
1451 {
1452 	if (speed == SPEED_100)
1453 		return FW_PORT_CAP_SPEED_100M;
1454 	if (speed == SPEED_1000)
1455 		return FW_PORT_CAP_SPEED_1G;
1456 	if (speed == SPEED_10000)
1457 		return FW_PORT_CAP_SPEED_10G;
1458 	return 0;
1459 }
1460 
set_settings(struct net_device * dev,struct ethtool_cmd * cmd)1461 static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1462 {
1463 	unsigned int cap;
1464 	struct port_info *p = netdev_priv(dev);
1465 	struct link_config *lc = &p->link_cfg;
1466 	u32 speed = ethtool_cmd_speed(cmd);
1467 
1468 	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
1469 		return -EINVAL;
1470 
1471 	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
1472 		/*
1473 		 * PHY offers a single speed.  See if that's what's
1474 		 * being requested.
1475 		 */
1476 		if (cmd->autoneg == AUTONEG_DISABLE &&
1477 		    (lc->supported & speed_to_caps(speed)))
1478 			return 0;
1479 		return -EINVAL;
1480 	}
1481 
1482 	if (cmd->autoneg == AUTONEG_DISABLE) {
1483 		cap = speed_to_caps(speed);
1484 
1485 		if (!(lc->supported & cap) || (speed == SPEED_1000) ||
1486 		    (speed == SPEED_10000))
1487 			return -EINVAL;
1488 		lc->requested_speed = cap;
1489 		lc->advertising = 0;
1490 	} else {
1491 		cap = to_fw_linkcaps(cmd->advertising);
1492 		if (!(lc->supported & cap))
1493 			return -EINVAL;
1494 		lc->requested_speed = 0;
1495 		lc->advertising = cap | FW_PORT_CAP_ANEG;
1496 	}
1497 	lc->autoneg = cmd->autoneg;
1498 
1499 	if (netif_running(dev))
1500 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1501 				     lc);
1502 	return 0;
1503 }
1504 
get_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1505 static void get_pauseparam(struct net_device *dev,
1506 			   struct ethtool_pauseparam *epause)
1507 {
1508 	struct port_info *p = netdev_priv(dev);
1509 
1510 	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
1511 	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
1512 	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
1513 }
1514 
set_pauseparam(struct net_device * dev,struct ethtool_pauseparam * epause)1515 static int set_pauseparam(struct net_device *dev,
1516 			  struct ethtool_pauseparam *epause)
1517 {
1518 	struct port_info *p = netdev_priv(dev);
1519 	struct link_config *lc = &p->link_cfg;
1520 
1521 	if (epause->autoneg == AUTONEG_DISABLE)
1522 		lc->requested_fc = 0;
1523 	else if (lc->supported & FW_PORT_CAP_ANEG)
1524 		lc->requested_fc = PAUSE_AUTONEG;
1525 	else
1526 		return -EINVAL;
1527 
1528 	if (epause->rx_pause)
1529 		lc->requested_fc |= PAUSE_RX;
1530 	if (epause->tx_pause)
1531 		lc->requested_fc |= PAUSE_TX;
1532 	if (netif_running(dev))
1533 		return t4_link_start(p->adapter, p->adapter->fn, p->tx_chan,
1534 				     lc);
1535 	return 0;
1536 }
1537 
get_sge_param(struct net_device * dev,struct ethtool_ringparam * e)1538 static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1539 {
1540 	const struct port_info *pi = netdev_priv(dev);
1541 	const struct sge *s = &pi->adapter->sge;
1542 
1543 	e->rx_max_pending = MAX_RX_BUFFERS;
1544 	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
1545 	e->rx_jumbo_max_pending = 0;
1546 	e->tx_max_pending = MAX_TXQ_ENTRIES;
1547 
1548 	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
1549 	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
1550 	e->rx_jumbo_pending = 0;
1551 	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
1552 }
1553 
set_sge_param(struct net_device * dev,struct ethtool_ringparam * e)1554 static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
1555 {
1556 	int i;
1557 	const struct port_info *pi = netdev_priv(dev);
1558 	struct adapter *adapter = pi->adapter;
1559 	struct sge *s = &adapter->sge;
1560 
1561 	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
1562 	    e->tx_pending > MAX_TXQ_ENTRIES ||
1563 	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
1564 	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
1565 	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
1566 		return -EINVAL;
1567 
1568 	if (adapter->flags & FULL_INIT_DONE)
1569 		return -EBUSY;
1570 
1571 	for (i = 0; i < pi->nqsets; ++i) {
1572 		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
1573 		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
1574 		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
1575 	}
1576 	return 0;
1577 }
1578 
closest_timer(const struct sge * s,int time)1579 static int closest_timer(const struct sge *s, int time)
1580 {
1581 	int i, delta, match = 0, min_delta = INT_MAX;
1582 
1583 	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
1584 		delta = time - s->timer_val[i];
1585 		if (delta < 0)
1586 			delta = -delta;
1587 		if (delta < min_delta) {
1588 			min_delta = delta;
1589 			match = i;
1590 		}
1591 	}
1592 	return match;
1593 }
1594 
closest_thres(const struct sge * s,int thres)1595 static int closest_thres(const struct sge *s, int thres)
1596 {
1597 	int i, delta, match = 0, min_delta = INT_MAX;
1598 
1599 	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
1600 		delta = thres - s->counter_val[i];
1601 		if (delta < 0)
1602 			delta = -delta;
1603 		if (delta < min_delta) {
1604 			min_delta = delta;
1605 			match = i;
1606 		}
1607 	}
1608 	return match;
1609 }
1610 
1611 /*
1612  * Return a queue's interrupt hold-off time in us.  0 means no timer.
1613  */
qtimer_val(const struct adapter * adap,const struct sge_rspq * q)1614 static unsigned int qtimer_val(const struct adapter *adap,
1615 			       const struct sge_rspq *q)
1616 {
1617 	unsigned int idx = q->intr_params >> 1;
1618 
1619 	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
1620 }
1621 
1622 /**
1623  *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
1624  *	@adap: the adapter
1625  *	@q: the Rx queue
1626  *	@us: the hold-off time in us, or 0 to disable timer
1627  *	@cnt: the hold-off packet count, or 0 to disable counter
1628  *
1629  *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
1630  *	one of the two needs to be enabled for the queue to generate interrupts.
1631  */
set_rxq_intr_params(struct adapter * adap,struct sge_rspq * q,unsigned int us,unsigned int cnt)1632 static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
1633 			       unsigned int us, unsigned int cnt)
1634 {
1635 	if ((us | cnt) == 0)
1636 		cnt = 1;
1637 
1638 	if (cnt) {
1639 		int err;
1640 		u32 v, new_idx;
1641 
1642 		new_idx = closest_thres(&adap->sge, cnt);
1643 		if (q->desc && q->pktcnt_idx != new_idx) {
1644 			/* the queue has already been created, update it */
1645 			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
1646 			    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
1647 			    FW_PARAMS_PARAM_YZ(q->cntxt_id);
1648 			err = t4_set_params(adap, adap->fn, adap->fn, 0, 1, &v,
1649 					    &new_idx);
1650 			if (err)
1651 				return err;
1652 		}
1653 		q->pktcnt_idx = new_idx;
1654 	}
1655 
1656 	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
1657 	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
1658 	return 0;
1659 }
1660 
set_coalesce(struct net_device * dev,struct ethtool_coalesce * c)1661 static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1662 {
1663 	const struct port_info *pi = netdev_priv(dev);
1664 	struct adapter *adap = pi->adapter;
1665 
1666 	return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
1667 			c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
1668 }
1669 
get_coalesce(struct net_device * dev,struct ethtool_coalesce * c)1670 static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
1671 {
1672 	const struct port_info *pi = netdev_priv(dev);
1673 	const struct adapter *adap = pi->adapter;
1674 	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
1675 
1676 	c->rx_coalesce_usecs = qtimer_val(adap, rq);
1677 	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
1678 		adap->sge.counter_val[rq->pktcnt_idx] : 0;
1679 	return 0;
1680 }
1681 
1682 /**
1683  *	eeprom_ptov - translate a physical EEPROM address to virtual
1684  *	@phys_addr: the physical EEPROM address
1685  *	@fn: the PCI function number
1686  *	@sz: size of function-specific area
1687  *
1688  *	Translate a physical EEPROM address to virtual.  The first 1K is
1689  *	accessed through virtual addresses starting at 31K, the rest is
1690  *	accessed through virtual addresses starting at 0.
1691  *
1692  *	The mapping is as follows:
1693  *	[0..1K) -> [31K..32K)
1694  *	[1K..1K+A) -> [31K-A..31K)
1695  *	[1K+A..ES) -> [0..ES-A-1K)
1696  *
1697  *	where A = @fn * @sz, and ES = EEPROM size.
1698  */
eeprom_ptov(unsigned int phys_addr,unsigned int fn,unsigned int sz)1699 static int eeprom_ptov(unsigned int phys_addr, unsigned int fn, unsigned int sz)
1700 {
1701 	fn *= sz;
1702 	if (phys_addr < 1024)
1703 		return phys_addr + (31 << 10);
1704 	if (phys_addr < 1024 + fn)
1705 		return 31744 - fn + phys_addr - 1024;
1706 	if (phys_addr < EEPROMSIZE)
1707 		return phys_addr - 1024 - fn;
1708 	return -EINVAL;
1709 }
1710 
1711 /*
1712  * The next two routines implement eeprom read/write from physical addresses.
1713  */
eeprom_rd_phys(struct adapter * adap,unsigned int phys_addr,u32 * v)1714 static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
1715 {
1716 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1717 
1718 	if (vaddr >= 0)
1719 		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
1720 	return vaddr < 0 ? vaddr : 0;
1721 }
1722 
eeprom_wr_phys(struct adapter * adap,unsigned int phys_addr,u32 v)1723 static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
1724 {
1725 	int vaddr = eeprom_ptov(phys_addr, adap->fn, EEPROMPFSIZE);
1726 
1727 	if (vaddr >= 0)
1728 		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
1729 	return vaddr < 0 ? vaddr : 0;
1730 }
1731 
1732 #define EEPROM_MAGIC 0x38E2F10C
1733 
get_eeprom(struct net_device * dev,struct ethtool_eeprom * e,u8 * data)1734 static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
1735 		      u8 *data)
1736 {
1737 	int i, err = 0;
1738 	struct adapter *adapter = netdev2adap(dev);
1739 
1740 	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
1741 	if (!buf)
1742 		return -ENOMEM;
1743 
1744 	e->magic = EEPROM_MAGIC;
1745 	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
1746 		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
1747 
1748 	if (!err)
1749 		memcpy(data, buf + e->offset, e->len);
1750 	kfree(buf);
1751 	return err;
1752 }
1753 
set_eeprom(struct net_device * dev,struct ethtool_eeprom * eeprom,u8 * data)1754 static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
1755 		      u8 *data)
1756 {
1757 	u8 *buf;
1758 	int err = 0;
1759 	u32 aligned_offset, aligned_len, *p;
1760 	struct adapter *adapter = netdev2adap(dev);
1761 
1762 	if (eeprom->magic != EEPROM_MAGIC)
1763 		return -EINVAL;
1764 
1765 	aligned_offset = eeprom->offset & ~3;
1766 	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
1767 
1768 	if (adapter->fn > 0) {
1769 		u32 start = 1024 + adapter->fn * EEPROMPFSIZE;
1770 
1771 		if (aligned_offset < start ||
1772 		    aligned_offset + aligned_len > start + EEPROMPFSIZE)
1773 			return -EPERM;
1774 	}
1775 
1776 	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
1777 		/*
1778 		 * RMW possibly needed for first or last words.
1779 		 */
1780 		buf = kmalloc(aligned_len, GFP_KERNEL);
1781 		if (!buf)
1782 			return -ENOMEM;
1783 		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
1784 		if (!err && aligned_len > 4)
1785 			err = eeprom_rd_phys(adapter,
1786 					     aligned_offset + aligned_len - 4,
1787 					     (u32 *)&buf[aligned_len - 4]);
1788 		if (err)
1789 			goto out;
1790 		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
1791 	} else
1792 		buf = data;
1793 
1794 	err = t4_seeprom_wp(adapter, false);
1795 	if (err)
1796 		goto out;
1797 
1798 	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
1799 		err = eeprom_wr_phys(adapter, aligned_offset, *p);
1800 		aligned_offset += 4;
1801 	}
1802 
1803 	if (!err)
1804 		err = t4_seeprom_wp(adapter, true);
1805 out:
1806 	if (buf != data)
1807 		kfree(buf);
1808 	return err;
1809 }
1810 
set_flash(struct net_device * netdev,struct ethtool_flash * ef)1811 static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
1812 {
1813 	int ret;
1814 	const struct firmware *fw;
1815 	struct adapter *adap = netdev2adap(netdev);
1816 
1817 	ef->data[sizeof(ef->data) - 1] = '\0';
1818 	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
1819 	if (ret < 0)
1820 		return ret;
1821 
1822 	ret = t4_load_fw(adap, fw->data, fw->size);
1823 	release_firmware(fw);
1824 	if (!ret)
1825 		dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
1826 	return ret;
1827 }
1828 
1829 #define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
1830 #define BCAST_CRC 0xa0ccc1a6
1831 
get_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1832 static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1833 {
1834 	wol->supported = WAKE_BCAST | WAKE_MAGIC;
1835 	wol->wolopts = netdev2adap(dev)->wol;
1836 	memset(&wol->sopass, 0, sizeof(wol->sopass));
1837 }
1838 
set_wol(struct net_device * dev,struct ethtool_wolinfo * wol)1839 static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1840 {
1841 	int err = 0;
1842 	struct port_info *pi = netdev_priv(dev);
1843 
1844 	if (wol->wolopts & ~WOL_SUPPORTED)
1845 		return -EINVAL;
1846 	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
1847 			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
1848 	if (wol->wolopts & WAKE_BCAST) {
1849 		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
1850 					~0ULL, 0, false);
1851 		if (!err)
1852 			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
1853 						~6ULL, ~0ULL, BCAST_CRC, true);
1854 	} else
1855 		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
1856 	return err;
1857 }
1858 
cxgb_set_features(struct net_device * dev,netdev_features_t features)1859 static int cxgb_set_features(struct net_device *dev, netdev_features_t features)
1860 {
1861 	const struct port_info *pi = netdev_priv(dev);
1862 	netdev_features_t changed = dev->features ^ features;
1863 	int err;
1864 
1865 	if (!(changed & NETIF_F_HW_VLAN_RX))
1866 		return 0;
1867 
1868 	err = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, -1,
1869 			    -1, -1, -1,
1870 			    !!(features & NETIF_F_HW_VLAN_RX), true);
1871 	if (unlikely(err))
1872 		dev->features = features ^ NETIF_F_HW_VLAN_RX;
1873 	return err;
1874 }
1875 
get_rss_table_size(struct net_device * dev)1876 static u32 get_rss_table_size(struct net_device *dev)
1877 {
1878 	const struct port_info *pi = netdev_priv(dev);
1879 
1880 	return pi->rss_size;
1881 }
1882 
get_rss_table(struct net_device * dev,u32 * p)1883 static int get_rss_table(struct net_device *dev, u32 *p)
1884 {
1885 	const struct port_info *pi = netdev_priv(dev);
1886 	unsigned int n = pi->rss_size;
1887 
1888 	while (n--)
1889 		p[n] = pi->rss[n];
1890 	return 0;
1891 }
1892 
set_rss_table(struct net_device * dev,const u32 * p)1893 static int set_rss_table(struct net_device *dev, const u32 *p)
1894 {
1895 	unsigned int i;
1896 	struct port_info *pi = netdev_priv(dev);
1897 
1898 	for (i = 0; i < pi->rss_size; i++)
1899 		pi->rss[i] = p[i];
1900 	if (pi->adapter->flags & FULL_INIT_DONE)
1901 		return write_rss(pi, pi->rss);
1902 	return 0;
1903 }
1904 
get_rxnfc(struct net_device * dev,struct ethtool_rxnfc * info,u32 * rules)1905 static int get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
1906 		     u32 *rules)
1907 {
1908 	const struct port_info *pi = netdev_priv(dev);
1909 
1910 	switch (info->cmd) {
1911 	case ETHTOOL_GRXFH: {
1912 		unsigned int v = pi->rss_mode;
1913 
1914 		info->data = 0;
1915 		switch (info->flow_type) {
1916 		case TCP_V4_FLOW:
1917 			if (v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN)
1918 				info->data = RXH_IP_SRC | RXH_IP_DST |
1919 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1920 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1921 				info->data = RXH_IP_SRC | RXH_IP_DST;
1922 			break;
1923 		case UDP_V4_FLOW:
1924 			if ((v & FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN) &&
1925 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1926 				info->data = RXH_IP_SRC | RXH_IP_DST |
1927 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1928 			else if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1929 				info->data = RXH_IP_SRC | RXH_IP_DST;
1930 			break;
1931 		case SCTP_V4_FLOW:
1932 		case AH_ESP_V4_FLOW:
1933 		case IPV4_FLOW:
1934 			if (v & FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN)
1935 				info->data = RXH_IP_SRC | RXH_IP_DST;
1936 			break;
1937 		case TCP_V6_FLOW:
1938 			if (v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN)
1939 				info->data = RXH_IP_SRC | RXH_IP_DST |
1940 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1941 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1942 				info->data = RXH_IP_SRC | RXH_IP_DST;
1943 			break;
1944 		case UDP_V6_FLOW:
1945 			if ((v & FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN) &&
1946 			    (v & FW_RSS_VI_CONFIG_CMD_UDPEN))
1947 				info->data = RXH_IP_SRC | RXH_IP_DST |
1948 					     RXH_L4_B_0_1 | RXH_L4_B_2_3;
1949 			else if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1950 				info->data = RXH_IP_SRC | RXH_IP_DST;
1951 			break;
1952 		case SCTP_V6_FLOW:
1953 		case AH_ESP_V6_FLOW:
1954 		case IPV6_FLOW:
1955 			if (v & FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN)
1956 				info->data = RXH_IP_SRC | RXH_IP_DST;
1957 			break;
1958 		}
1959 		return 0;
1960 	}
1961 	case ETHTOOL_GRXRINGS:
1962 		info->data = pi->nqsets;
1963 		return 0;
1964 	}
1965 	return -EOPNOTSUPP;
1966 }
1967 
1968 static const struct ethtool_ops cxgb_ethtool_ops = {
1969 	.get_settings      = get_settings,
1970 	.set_settings      = set_settings,
1971 	.get_drvinfo       = get_drvinfo,
1972 	.get_msglevel      = get_msglevel,
1973 	.set_msglevel      = set_msglevel,
1974 	.get_ringparam     = get_sge_param,
1975 	.set_ringparam     = set_sge_param,
1976 	.get_coalesce      = get_coalesce,
1977 	.set_coalesce      = set_coalesce,
1978 	.get_eeprom_len    = get_eeprom_len,
1979 	.get_eeprom        = get_eeprom,
1980 	.set_eeprom        = set_eeprom,
1981 	.get_pauseparam    = get_pauseparam,
1982 	.set_pauseparam    = set_pauseparam,
1983 	.get_link          = ethtool_op_get_link,
1984 	.get_strings       = get_strings,
1985 	.set_phys_id       = identify_port,
1986 	.nway_reset        = restart_autoneg,
1987 	.get_sset_count    = get_sset_count,
1988 	.get_ethtool_stats = get_stats,
1989 	.get_regs_len      = get_regs_len,
1990 	.get_regs          = get_regs,
1991 	.get_wol           = get_wol,
1992 	.set_wol           = set_wol,
1993 	.get_rxnfc         = get_rxnfc,
1994 	.get_rxfh_indir_size = get_rss_table_size,
1995 	.get_rxfh_indir    = get_rss_table,
1996 	.set_rxfh_indir    = set_rss_table,
1997 	.flash_device      = set_flash,
1998 };
1999 
2000 /*
2001  * debugfs support
2002  */
2003 
mem_open(struct inode * inode,struct file * file)2004 static int mem_open(struct inode *inode, struct file *file)
2005 {
2006 	file->private_data = inode->i_private;
2007 	return 0;
2008 }
2009 
mem_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)2010 static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
2011 			loff_t *ppos)
2012 {
2013 	loff_t pos = *ppos;
2014 	loff_t avail = file->f_path.dentry->d_inode->i_size;
2015 	unsigned int mem = (uintptr_t)file->private_data & 3;
2016 	struct adapter *adap = file->private_data - mem;
2017 
2018 	if (pos < 0)
2019 		return -EINVAL;
2020 	if (pos >= avail)
2021 		return 0;
2022 	if (count > avail - pos)
2023 		count = avail - pos;
2024 
2025 	while (count) {
2026 		size_t len;
2027 		int ret, ofst;
2028 		__be32 data[16];
2029 
2030 		if (mem == MEM_MC)
2031 			ret = t4_mc_read(adap, pos, data, NULL);
2032 		else
2033 			ret = t4_edc_read(adap, mem, pos, data, NULL);
2034 		if (ret)
2035 			return ret;
2036 
2037 		ofst = pos % sizeof(data);
2038 		len = min(count, sizeof(data) - ofst);
2039 		if (copy_to_user(buf, (u8 *)data + ofst, len))
2040 			return -EFAULT;
2041 
2042 		buf += len;
2043 		pos += len;
2044 		count -= len;
2045 	}
2046 	count = pos - *ppos;
2047 	*ppos = pos;
2048 	return count;
2049 }
2050 
2051 static const struct file_operations mem_debugfs_fops = {
2052 	.owner   = THIS_MODULE,
2053 	.open    = mem_open,
2054 	.read    = mem_read,
2055 	.llseek  = default_llseek,
2056 };
2057 
add_debugfs_mem(struct adapter * adap,const char * name,unsigned int idx,unsigned int size_mb)2058 static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
2059 				      unsigned int idx, unsigned int size_mb)
2060 {
2061 	struct dentry *de;
2062 
2063 	de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
2064 				 (void *)adap + idx, &mem_debugfs_fops);
2065 	if (de && de->d_inode)
2066 		de->d_inode->i_size = size_mb << 20;
2067 }
2068 
setup_debugfs(struct adapter * adap)2069 static int __devinit setup_debugfs(struct adapter *adap)
2070 {
2071 	int i;
2072 
2073 	if (IS_ERR_OR_NULL(adap->debugfs_root))
2074 		return -1;
2075 
2076 	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
2077 	if (i & EDRAM0_ENABLE)
2078 		add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
2079 	if (i & EDRAM1_ENABLE)
2080 		add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
2081 	if (i & EXT_MEM_ENABLE)
2082 		add_debugfs_mem(adap, "mc", MEM_MC,
2083 			EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
2084 	if (adap->l2t)
2085 		debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
2086 				    &t4_l2t_fops);
2087 	return 0;
2088 }
2089 
2090 /*
2091  * upper-layer driver support
2092  */
2093 
2094 /*
2095  * Allocate an active-open TID and set it to the supplied value.
2096  */
cxgb4_alloc_atid(struct tid_info * t,void * data)2097 int cxgb4_alloc_atid(struct tid_info *t, void *data)
2098 {
2099 	int atid = -1;
2100 
2101 	spin_lock_bh(&t->atid_lock);
2102 	if (t->afree) {
2103 		union aopen_entry *p = t->afree;
2104 
2105 		atid = p - t->atid_tab;
2106 		t->afree = p->next;
2107 		p->data = data;
2108 		t->atids_in_use++;
2109 	}
2110 	spin_unlock_bh(&t->atid_lock);
2111 	return atid;
2112 }
2113 EXPORT_SYMBOL(cxgb4_alloc_atid);
2114 
2115 /*
2116  * Release an active-open TID.
2117  */
cxgb4_free_atid(struct tid_info * t,unsigned int atid)2118 void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
2119 {
2120 	union aopen_entry *p = &t->atid_tab[atid];
2121 
2122 	spin_lock_bh(&t->atid_lock);
2123 	p->next = t->afree;
2124 	t->afree = p;
2125 	t->atids_in_use--;
2126 	spin_unlock_bh(&t->atid_lock);
2127 }
2128 EXPORT_SYMBOL(cxgb4_free_atid);
2129 
2130 /*
2131  * Allocate a server TID and set it to the supplied value.
2132  */
cxgb4_alloc_stid(struct tid_info * t,int family,void * data)2133 int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
2134 {
2135 	int stid;
2136 
2137 	spin_lock_bh(&t->stid_lock);
2138 	if (family == PF_INET) {
2139 		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
2140 		if (stid < t->nstids)
2141 			__set_bit(stid, t->stid_bmap);
2142 		else
2143 			stid = -1;
2144 	} else {
2145 		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
2146 		if (stid < 0)
2147 			stid = -1;
2148 	}
2149 	if (stid >= 0) {
2150 		t->stid_tab[stid].data = data;
2151 		stid += t->stid_base;
2152 		t->stids_in_use++;
2153 	}
2154 	spin_unlock_bh(&t->stid_lock);
2155 	return stid;
2156 }
2157 EXPORT_SYMBOL(cxgb4_alloc_stid);
2158 
2159 /*
2160  * Release a server TID.
2161  */
cxgb4_free_stid(struct tid_info * t,unsigned int stid,int family)2162 void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
2163 {
2164 	stid -= t->stid_base;
2165 	spin_lock_bh(&t->stid_lock);
2166 	if (family == PF_INET)
2167 		__clear_bit(stid, t->stid_bmap);
2168 	else
2169 		bitmap_release_region(t->stid_bmap, stid, 2);
2170 	t->stid_tab[stid].data = NULL;
2171 	t->stids_in_use--;
2172 	spin_unlock_bh(&t->stid_lock);
2173 }
2174 EXPORT_SYMBOL(cxgb4_free_stid);
2175 
2176 /*
2177  * Populate a TID_RELEASE WR.  Caller must properly size the skb.
2178  */
mk_tid_release(struct sk_buff * skb,unsigned int chan,unsigned int tid)2179 static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
2180 			   unsigned int tid)
2181 {
2182 	struct cpl_tid_release *req;
2183 
2184 	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
2185 	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
2186 	INIT_TP_WR(req, tid);
2187 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
2188 }
2189 
2190 /*
2191  * Queue a TID release request and if necessary schedule a work queue to
2192  * process it.
2193  */
cxgb4_queue_tid_release(struct tid_info * t,unsigned int chan,unsigned int tid)2194 static void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
2195 				    unsigned int tid)
2196 {
2197 	void **p = &t->tid_tab[tid];
2198 	struct adapter *adap = container_of(t, struct adapter, tids);
2199 
2200 	spin_lock_bh(&adap->tid_release_lock);
2201 	*p = adap->tid_release_head;
2202 	/* Low 2 bits encode the Tx channel number */
2203 	adap->tid_release_head = (void **)((uintptr_t)p | chan);
2204 	if (!adap->tid_release_task_busy) {
2205 		adap->tid_release_task_busy = true;
2206 		schedule_work(&adap->tid_release_task);
2207 	}
2208 	spin_unlock_bh(&adap->tid_release_lock);
2209 }
2210 
2211 /*
2212  * Process the list of pending TID release requests.
2213  */
process_tid_release_list(struct work_struct * work)2214 static void process_tid_release_list(struct work_struct *work)
2215 {
2216 	struct sk_buff *skb;
2217 	struct adapter *adap;
2218 
2219 	adap = container_of(work, struct adapter, tid_release_task);
2220 
2221 	spin_lock_bh(&adap->tid_release_lock);
2222 	while (adap->tid_release_head) {
2223 		void **p = adap->tid_release_head;
2224 		unsigned int chan = (uintptr_t)p & 3;
2225 		p = (void *)p - chan;
2226 
2227 		adap->tid_release_head = *p;
2228 		*p = NULL;
2229 		spin_unlock_bh(&adap->tid_release_lock);
2230 
2231 		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
2232 					 GFP_KERNEL)))
2233 			schedule_timeout_uninterruptible(1);
2234 
2235 		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
2236 		t4_ofld_send(adap, skb);
2237 		spin_lock_bh(&adap->tid_release_lock);
2238 	}
2239 	adap->tid_release_task_busy = false;
2240 	spin_unlock_bh(&adap->tid_release_lock);
2241 }
2242 
2243 /*
2244  * Release a TID and inform HW.  If we are unable to allocate the release
2245  * message we defer to a work queue.
2246  */
cxgb4_remove_tid(struct tid_info * t,unsigned int chan,unsigned int tid)2247 void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
2248 {
2249 	void *old;
2250 	struct sk_buff *skb;
2251 	struct adapter *adap = container_of(t, struct adapter, tids);
2252 
2253 	old = t->tid_tab[tid];
2254 	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
2255 	if (likely(skb)) {
2256 		t->tid_tab[tid] = NULL;
2257 		mk_tid_release(skb, chan, tid);
2258 		t4_ofld_send(adap, skb);
2259 	} else
2260 		cxgb4_queue_tid_release(t, chan, tid);
2261 	if (old)
2262 		atomic_dec(&t->tids_in_use);
2263 }
2264 EXPORT_SYMBOL(cxgb4_remove_tid);
2265 
2266 /*
2267  * Allocate and initialize the TID tables.  Returns 0 on success.
2268  */
tid_init(struct tid_info * t)2269 static int tid_init(struct tid_info *t)
2270 {
2271 	size_t size;
2272 	unsigned int natids = t->natids;
2273 
2274 	size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
2275 	       t->nstids * sizeof(*t->stid_tab) +
2276 	       BITS_TO_LONGS(t->nstids) * sizeof(long);
2277 	t->tid_tab = t4_alloc_mem(size);
2278 	if (!t->tid_tab)
2279 		return -ENOMEM;
2280 
2281 	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
2282 	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
2283 	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
2284 	spin_lock_init(&t->stid_lock);
2285 	spin_lock_init(&t->atid_lock);
2286 
2287 	t->stids_in_use = 0;
2288 	t->afree = NULL;
2289 	t->atids_in_use = 0;
2290 	atomic_set(&t->tids_in_use, 0);
2291 
2292 	/* Setup the free list for atid_tab and clear the stid bitmap. */
2293 	if (natids) {
2294 		while (--natids)
2295 			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
2296 		t->afree = t->atid_tab;
2297 	}
2298 	bitmap_zero(t->stid_bmap, t->nstids);
2299 	return 0;
2300 }
2301 
2302 /**
2303  *	cxgb4_create_server - create an IP server
2304  *	@dev: the device
2305  *	@stid: the server TID
2306  *	@sip: local IP address to bind server to
2307  *	@sport: the server's TCP port
2308  *	@queue: queue to direct messages from this server to
2309  *
2310  *	Create an IP server for the given port and address.
2311  *	Returns <0 on error and one of the %NET_XMIT_* values on success.
2312  */
cxgb4_create_server(const struct net_device * dev,unsigned int stid,__be32 sip,__be16 sport,unsigned int queue)2313 int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
2314 			__be32 sip, __be16 sport, unsigned int queue)
2315 {
2316 	unsigned int chan;
2317 	struct sk_buff *skb;
2318 	struct adapter *adap;
2319 	struct cpl_pass_open_req *req;
2320 
2321 	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
2322 	if (!skb)
2323 		return -ENOMEM;
2324 
2325 	adap = netdev2adap(dev);
2326 	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
2327 	INIT_TP_WR(req, 0);
2328 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
2329 	req->local_port = sport;
2330 	req->peer_port = htons(0);
2331 	req->local_ip = sip;
2332 	req->peer_ip = htonl(0);
2333 	chan = rxq_to_chan(&adap->sge, queue);
2334 	req->opt0 = cpu_to_be64(TX_CHAN(chan));
2335 	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
2336 				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
2337 	return t4_mgmt_tx(adap, skb);
2338 }
2339 EXPORT_SYMBOL(cxgb4_create_server);
2340 
2341 /**
2342  *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
2343  *	@mtus: the HW MTU table
2344  *	@mtu: the target MTU
2345  *	@idx: index of selected entry in the MTU table
2346  *
2347  *	Returns the index and the value in the HW MTU table that is closest to
2348  *	but does not exceed @mtu, unless @mtu is smaller than any value in the
2349  *	table, in which case that smallest available value is selected.
2350  */
cxgb4_best_mtu(const unsigned short * mtus,unsigned short mtu,unsigned int * idx)2351 unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
2352 			    unsigned int *idx)
2353 {
2354 	unsigned int i = 0;
2355 
2356 	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
2357 		++i;
2358 	if (idx)
2359 		*idx = i;
2360 	return mtus[i];
2361 }
2362 EXPORT_SYMBOL(cxgb4_best_mtu);
2363 
2364 /**
2365  *	cxgb4_port_chan - get the HW channel of a port
2366  *	@dev: the net device for the port
2367  *
2368  *	Return the HW Tx channel of the given port.
2369  */
cxgb4_port_chan(const struct net_device * dev)2370 unsigned int cxgb4_port_chan(const struct net_device *dev)
2371 {
2372 	return netdev2pinfo(dev)->tx_chan;
2373 }
2374 EXPORT_SYMBOL(cxgb4_port_chan);
2375 
2376 /**
2377  *	cxgb4_port_viid - get the VI id of a port
2378  *	@dev: the net device for the port
2379  *
2380  *	Return the VI id of the given port.
2381  */
cxgb4_port_viid(const struct net_device * dev)2382 unsigned int cxgb4_port_viid(const struct net_device *dev)
2383 {
2384 	return netdev2pinfo(dev)->viid;
2385 }
2386 EXPORT_SYMBOL(cxgb4_port_viid);
2387 
2388 /**
2389  *	cxgb4_port_idx - get the index of a port
2390  *	@dev: the net device for the port
2391  *
2392  *	Return the index of the given port.
2393  */
cxgb4_port_idx(const struct net_device * dev)2394 unsigned int cxgb4_port_idx(const struct net_device *dev)
2395 {
2396 	return netdev2pinfo(dev)->port_id;
2397 }
2398 EXPORT_SYMBOL(cxgb4_port_idx);
2399 
cxgb4_get_tcp_stats(struct pci_dev * pdev,struct tp_tcp_stats * v4,struct tp_tcp_stats * v6)2400 void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
2401 			 struct tp_tcp_stats *v6)
2402 {
2403 	struct adapter *adap = pci_get_drvdata(pdev);
2404 
2405 	spin_lock(&adap->stats_lock);
2406 	t4_tp_get_tcp_stats(adap, v4, v6);
2407 	spin_unlock(&adap->stats_lock);
2408 }
2409 EXPORT_SYMBOL(cxgb4_get_tcp_stats);
2410 
cxgb4_iscsi_init(struct net_device * dev,unsigned int tag_mask,const unsigned int * pgsz_order)2411 void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
2412 		      const unsigned int *pgsz_order)
2413 {
2414 	struct adapter *adap = netdev2adap(dev);
2415 
2416 	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
2417 	t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
2418 		     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
2419 		     HPZ3(pgsz_order[3]));
2420 }
2421 EXPORT_SYMBOL(cxgb4_iscsi_init);
2422 
2423 static struct pci_driver cxgb4_driver;
2424 
check_neigh_update(struct neighbour * neigh)2425 static void check_neigh_update(struct neighbour *neigh)
2426 {
2427 	const struct device *parent;
2428 	const struct net_device *netdev = neigh->dev;
2429 
2430 	if (netdev->priv_flags & IFF_802_1Q_VLAN)
2431 		netdev = vlan_dev_real_dev(netdev);
2432 	parent = netdev->dev.parent;
2433 	if (parent && parent->driver == &cxgb4_driver.driver)
2434 		t4_l2t_update(dev_get_drvdata(parent), neigh);
2435 }
2436 
netevent_cb(struct notifier_block * nb,unsigned long event,void * data)2437 static int netevent_cb(struct notifier_block *nb, unsigned long event,
2438 		       void *data)
2439 {
2440 	switch (event) {
2441 	case NETEVENT_NEIGH_UPDATE:
2442 		check_neigh_update(data);
2443 		break;
2444 	case NETEVENT_REDIRECT:
2445 	default:
2446 		break;
2447 	}
2448 	return 0;
2449 }
2450 
2451 static bool netevent_registered;
2452 static struct notifier_block cxgb4_netevent_nb = {
2453 	.notifier_call = netevent_cb
2454 };
2455 
uld_attach(struct adapter * adap,unsigned int uld)2456 static void uld_attach(struct adapter *adap, unsigned int uld)
2457 {
2458 	void *handle;
2459 	struct cxgb4_lld_info lli;
2460 
2461 	lli.pdev = adap->pdev;
2462 	lli.l2t = adap->l2t;
2463 	lli.tids = &adap->tids;
2464 	lli.ports = adap->port;
2465 	lli.vr = &adap->vres;
2466 	lli.mtus = adap->params.mtus;
2467 	if (uld == CXGB4_ULD_RDMA) {
2468 		lli.rxq_ids = adap->sge.rdma_rxq;
2469 		lli.nrxq = adap->sge.rdmaqs;
2470 	} else if (uld == CXGB4_ULD_ISCSI) {
2471 		lli.rxq_ids = adap->sge.ofld_rxq;
2472 		lli.nrxq = adap->sge.ofldqsets;
2473 	}
2474 	lli.ntxq = adap->sge.ofldqsets;
2475 	lli.nchan = adap->params.nports;
2476 	lli.nports = adap->params.nports;
2477 	lli.wr_cred = adap->params.ofldq_wr_cred;
2478 	lli.adapter_type = adap->params.rev;
2479 	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
2480 	lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
2481 			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
2482 			(adap->fn * 4));
2483 	lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
2484 			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF) >>
2485 			(adap->fn * 4));
2486 	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
2487 	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
2488 	lli.fw_vers = adap->params.fw_vers;
2489 
2490 	handle = ulds[uld].add(&lli);
2491 	if (IS_ERR(handle)) {
2492 		dev_warn(adap->pdev_dev,
2493 			 "could not attach to the %s driver, error %ld\n",
2494 			 uld_str[uld], PTR_ERR(handle));
2495 		return;
2496 	}
2497 
2498 	adap->uld_handle[uld] = handle;
2499 
2500 	if (!netevent_registered) {
2501 		register_netevent_notifier(&cxgb4_netevent_nb);
2502 		netevent_registered = true;
2503 	}
2504 
2505 	if (adap->flags & FULL_INIT_DONE)
2506 		ulds[uld].state_change(handle, CXGB4_STATE_UP);
2507 }
2508 
attach_ulds(struct adapter * adap)2509 static void attach_ulds(struct adapter *adap)
2510 {
2511 	unsigned int i;
2512 
2513 	mutex_lock(&uld_mutex);
2514 	list_add_tail(&adap->list_node, &adapter_list);
2515 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2516 		if (ulds[i].add)
2517 			uld_attach(adap, i);
2518 	mutex_unlock(&uld_mutex);
2519 }
2520 
detach_ulds(struct adapter * adap)2521 static void detach_ulds(struct adapter *adap)
2522 {
2523 	unsigned int i;
2524 
2525 	mutex_lock(&uld_mutex);
2526 	list_del(&adap->list_node);
2527 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2528 		if (adap->uld_handle[i]) {
2529 			ulds[i].state_change(adap->uld_handle[i],
2530 					     CXGB4_STATE_DETACH);
2531 			adap->uld_handle[i] = NULL;
2532 		}
2533 	if (netevent_registered && list_empty(&adapter_list)) {
2534 		unregister_netevent_notifier(&cxgb4_netevent_nb);
2535 		netevent_registered = false;
2536 	}
2537 	mutex_unlock(&uld_mutex);
2538 }
2539 
notify_ulds(struct adapter * adap,enum cxgb4_state new_state)2540 static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
2541 {
2542 	unsigned int i;
2543 
2544 	mutex_lock(&uld_mutex);
2545 	for (i = 0; i < CXGB4_ULD_MAX; i++)
2546 		if (adap->uld_handle[i])
2547 			ulds[i].state_change(adap->uld_handle[i], new_state);
2548 	mutex_unlock(&uld_mutex);
2549 }
2550 
2551 /**
2552  *	cxgb4_register_uld - register an upper-layer driver
2553  *	@type: the ULD type
2554  *	@p: the ULD methods
2555  *
2556  *	Registers an upper-layer driver with this driver and notifies the ULD
2557  *	about any presently available devices that support its type.  Returns
2558  *	%-EBUSY if a ULD of the same type is already registered.
2559  */
cxgb4_register_uld(enum cxgb4_uld type,const struct cxgb4_uld_info * p)2560 int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
2561 {
2562 	int ret = 0;
2563 	struct adapter *adap;
2564 
2565 	if (type >= CXGB4_ULD_MAX)
2566 		return -EINVAL;
2567 	mutex_lock(&uld_mutex);
2568 	if (ulds[type].add) {
2569 		ret = -EBUSY;
2570 		goto out;
2571 	}
2572 	ulds[type] = *p;
2573 	list_for_each_entry(adap, &adapter_list, list_node)
2574 		uld_attach(adap, type);
2575 out:	mutex_unlock(&uld_mutex);
2576 	return ret;
2577 }
2578 EXPORT_SYMBOL(cxgb4_register_uld);
2579 
2580 /**
2581  *	cxgb4_unregister_uld - unregister an upper-layer driver
2582  *	@type: the ULD type
2583  *
2584  *	Unregisters an existing upper-layer driver.
2585  */
cxgb4_unregister_uld(enum cxgb4_uld type)2586 int cxgb4_unregister_uld(enum cxgb4_uld type)
2587 {
2588 	struct adapter *adap;
2589 
2590 	if (type >= CXGB4_ULD_MAX)
2591 		return -EINVAL;
2592 	mutex_lock(&uld_mutex);
2593 	list_for_each_entry(adap, &adapter_list, list_node)
2594 		adap->uld_handle[type] = NULL;
2595 	ulds[type].add = NULL;
2596 	mutex_unlock(&uld_mutex);
2597 	return 0;
2598 }
2599 EXPORT_SYMBOL(cxgb4_unregister_uld);
2600 
2601 /**
2602  *	cxgb_up - enable the adapter
2603  *	@adap: adapter being enabled
2604  *
2605  *	Called when the first port is enabled, this function performs the
2606  *	actions necessary to make an adapter operational, such as completing
2607  *	the initialization of HW modules, and enabling interrupts.
2608  *
2609  *	Must be called with the rtnl lock held.
2610  */
cxgb_up(struct adapter * adap)2611 static int cxgb_up(struct adapter *adap)
2612 {
2613 	int err;
2614 
2615 	err = setup_sge_queues(adap);
2616 	if (err)
2617 		goto out;
2618 	err = setup_rss(adap);
2619 	if (err)
2620 		goto freeq;
2621 
2622 	if (adap->flags & USING_MSIX) {
2623 		name_msix_vecs(adap);
2624 		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
2625 				  adap->msix_info[0].desc, adap);
2626 		if (err)
2627 			goto irq_err;
2628 
2629 		err = request_msix_queue_irqs(adap);
2630 		if (err) {
2631 			free_irq(adap->msix_info[0].vec, adap);
2632 			goto irq_err;
2633 		}
2634 	} else {
2635 		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
2636 				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
2637 				  adap->port[0]->name, adap);
2638 		if (err)
2639 			goto irq_err;
2640 	}
2641 	enable_rx(adap);
2642 	t4_sge_start(adap);
2643 	t4_intr_enable(adap);
2644 	adap->flags |= FULL_INIT_DONE;
2645 	notify_ulds(adap, CXGB4_STATE_UP);
2646  out:
2647 	return err;
2648  irq_err:
2649 	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
2650  freeq:
2651 	t4_free_sge_resources(adap);
2652 	goto out;
2653 }
2654 
cxgb_down(struct adapter * adapter)2655 static void cxgb_down(struct adapter *adapter)
2656 {
2657 	t4_intr_disable(adapter);
2658 	cancel_work_sync(&adapter->tid_release_task);
2659 	adapter->tid_release_task_busy = false;
2660 	adapter->tid_release_head = NULL;
2661 
2662 	if (adapter->flags & USING_MSIX) {
2663 		free_msix_queue_irqs(adapter);
2664 		free_irq(adapter->msix_info[0].vec, adapter);
2665 	} else
2666 		free_irq(adapter->pdev->irq, adapter);
2667 	quiesce_rx(adapter);
2668 	t4_sge_stop(adapter);
2669 	t4_free_sge_resources(adapter);
2670 	adapter->flags &= ~FULL_INIT_DONE;
2671 }
2672 
2673 /*
2674  * net_device operations
2675  */
cxgb_open(struct net_device * dev)2676 static int cxgb_open(struct net_device *dev)
2677 {
2678 	int err;
2679 	struct port_info *pi = netdev_priv(dev);
2680 	struct adapter *adapter = pi->adapter;
2681 
2682 	netif_carrier_off(dev);
2683 
2684 	if (!(adapter->flags & FULL_INIT_DONE)) {
2685 		err = cxgb_up(adapter);
2686 		if (err < 0)
2687 			return err;
2688 	}
2689 
2690 	err = link_start(dev);
2691 	if (!err)
2692 		netif_tx_start_all_queues(dev);
2693 	return err;
2694 }
2695 
cxgb_close(struct net_device * dev)2696 static int cxgb_close(struct net_device *dev)
2697 {
2698 	struct port_info *pi = netdev_priv(dev);
2699 	struct adapter *adapter = pi->adapter;
2700 
2701 	netif_tx_stop_all_queues(dev);
2702 	netif_carrier_off(dev);
2703 	return t4_enable_vi(adapter, adapter->fn, pi->viid, false, false);
2704 }
2705 
cxgb_get_stats(struct net_device * dev,struct rtnl_link_stats64 * ns)2706 static struct rtnl_link_stats64 *cxgb_get_stats(struct net_device *dev,
2707 						struct rtnl_link_stats64 *ns)
2708 {
2709 	struct port_stats stats;
2710 	struct port_info *p = netdev_priv(dev);
2711 	struct adapter *adapter = p->adapter;
2712 
2713 	spin_lock(&adapter->stats_lock);
2714 	t4_get_port_stats(adapter, p->tx_chan, &stats);
2715 	spin_unlock(&adapter->stats_lock);
2716 
2717 	ns->tx_bytes   = stats.tx_octets;
2718 	ns->tx_packets = stats.tx_frames;
2719 	ns->rx_bytes   = stats.rx_octets;
2720 	ns->rx_packets = stats.rx_frames;
2721 	ns->multicast  = stats.rx_mcast_frames;
2722 
2723 	/* detailed rx_errors */
2724 	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
2725 			       stats.rx_runt;
2726 	ns->rx_over_errors   = 0;
2727 	ns->rx_crc_errors    = stats.rx_fcs_err;
2728 	ns->rx_frame_errors  = stats.rx_symbol_err;
2729 	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
2730 			       stats.rx_ovflow2 + stats.rx_ovflow3 +
2731 			       stats.rx_trunc0 + stats.rx_trunc1 +
2732 			       stats.rx_trunc2 + stats.rx_trunc3;
2733 	ns->rx_missed_errors = 0;
2734 
2735 	/* detailed tx_errors */
2736 	ns->tx_aborted_errors   = 0;
2737 	ns->tx_carrier_errors   = 0;
2738 	ns->tx_fifo_errors      = 0;
2739 	ns->tx_heartbeat_errors = 0;
2740 	ns->tx_window_errors    = 0;
2741 
2742 	ns->tx_errors = stats.tx_error_frames;
2743 	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
2744 		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
2745 	return ns;
2746 }
2747 
cxgb_ioctl(struct net_device * dev,struct ifreq * req,int cmd)2748 static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
2749 {
2750 	unsigned int mbox;
2751 	int ret = 0, prtad, devad;
2752 	struct port_info *pi = netdev_priv(dev);
2753 	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
2754 
2755 	switch (cmd) {
2756 	case SIOCGMIIPHY:
2757 		if (pi->mdio_addr < 0)
2758 			return -EOPNOTSUPP;
2759 		data->phy_id = pi->mdio_addr;
2760 		break;
2761 	case SIOCGMIIREG:
2762 	case SIOCSMIIREG:
2763 		if (mdio_phy_id_is_c45(data->phy_id)) {
2764 			prtad = mdio_phy_id_prtad(data->phy_id);
2765 			devad = mdio_phy_id_devad(data->phy_id);
2766 		} else if (data->phy_id < 32) {
2767 			prtad = data->phy_id;
2768 			devad = 0;
2769 			data->reg_num &= 0x1f;
2770 		} else
2771 			return -EINVAL;
2772 
2773 		mbox = pi->adapter->fn;
2774 		if (cmd == SIOCGMIIREG)
2775 			ret = t4_mdio_rd(pi->adapter, mbox, prtad, devad,
2776 					 data->reg_num, &data->val_out);
2777 		else
2778 			ret = t4_mdio_wr(pi->adapter, mbox, prtad, devad,
2779 					 data->reg_num, data->val_in);
2780 		break;
2781 	default:
2782 		return -EOPNOTSUPP;
2783 	}
2784 	return ret;
2785 }
2786 
cxgb_set_rxmode(struct net_device * dev)2787 static void cxgb_set_rxmode(struct net_device *dev)
2788 {
2789 	/* unfortunately we can't return errors to the stack */
2790 	set_rxmode(dev, -1, false);
2791 }
2792 
cxgb_change_mtu(struct net_device * dev,int new_mtu)2793 static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
2794 {
2795 	int ret;
2796 	struct port_info *pi = netdev_priv(dev);
2797 
2798 	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
2799 		return -EINVAL;
2800 	ret = t4_set_rxmode(pi->adapter, pi->adapter->fn, pi->viid, new_mtu, -1,
2801 			    -1, -1, -1, true);
2802 	if (!ret)
2803 		dev->mtu = new_mtu;
2804 	return ret;
2805 }
2806 
cxgb_set_mac_addr(struct net_device * dev,void * p)2807 static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2808 {
2809 	int ret;
2810 	struct sockaddr *addr = p;
2811 	struct port_info *pi = netdev_priv(dev);
2812 
2813 	if (!is_valid_ether_addr(addr->sa_data))
2814 		return -EINVAL;
2815 
2816 	ret = t4_change_mac(pi->adapter, pi->adapter->fn, pi->viid,
2817 			    pi->xact_addr_filt, addr->sa_data, true, true);
2818 	if (ret < 0)
2819 		return ret;
2820 
2821 	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2822 	pi->xact_addr_filt = ret;
2823 	return 0;
2824 }
2825 
2826 #ifdef CONFIG_NET_POLL_CONTROLLER
cxgb_netpoll(struct net_device * dev)2827 static void cxgb_netpoll(struct net_device *dev)
2828 {
2829 	struct port_info *pi = netdev_priv(dev);
2830 	struct adapter *adap = pi->adapter;
2831 
2832 	if (adap->flags & USING_MSIX) {
2833 		int i;
2834 		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
2835 
2836 		for (i = pi->nqsets; i; i--, rx++)
2837 			t4_sge_intr_msix(0, &rx->rspq);
2838 	} else
2839 		t4_intr_handler(adap)(0, adap);
2840 }
2841 #endif
2842 
2843 static const struct net_device_ops cxgb4_netdev_ops = {
2844 	.ndo_open             = cxgb_open,
2845 	.ndo_stop             = cxgb_close,
2846 	.ndo_start_xmit       = t4_eth_xmit,
2847 	.ndo_get_stats64      = cxgb_get_stats,
2848 	.ndo_set_rx_mode      = cxgb_set_rxmode,
2849 	.ndo_set_mac_address  = cxgb_set_mac_addr,
2850 	.ndo_set_features     = cxgb_set_features,
2851 	.ndo_validate_addr    = eth_validate_addr,
2852 	.ndo_do_ioctl         = cxgb_ioctl,
2853 	.ndo_change_mtu       = cxgb_change_mtu,
2854 #ifdef CONFIG_NET_POLL_CONTROLLER
2855 	.ndo_poll_controller  = cxgb_netpoll,
2856 #endif
2857 };
2858 
t4_fatal_err(struct adapter * adap)2859 void t4_fatal_err(struct adapter *adap)
2860 {
2861 	t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
2862 	t4_intr_disable(adap);
2863 	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
2864 }
2865 
setup_memwin(struct adapter * adap)2866 static void setup_memwin(struct adapter *adap)
2867 {
2868 	u32 bar0;
2869 
2870 	bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
2871 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
2872 		     (bar0 + MEMWIN0_BASE) | BIR(0) |
2873 		     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
2874 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
2875 		     (bar0 + MEMWIN1_BASE) | BIR(0) |
2876 		     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
2877 	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
2878 		     (bar0 + MEMWIN2_BASE) | BIR(0) |
2879 		     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
2880 	if (adap->vres.ocq.size) {
2881 		unsigned int start, sz_kb;
2882 
2883 		start = pci_resource_start(adap->pdev, 2) +
2884 			OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
2885 		sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
2886 		t4_write_reg(adap,
2887 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
2888 			     start | BIR(1) | WINDOW(ilog2(sz_kb)));
2889 		t4_write_reg(adap,
2890 			     PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3),
2891 			     adap->vres.ocq.start);
2892 		t4_read_reg(adap,
2893 			    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, 3));
2894 	}
2895 }
2896 
adap_init1(struct adapter * adap,struct fw_caps_config_cmd * c)2897 static int adap_init1(struct adapter *adap, struct fw_caps_config_cmd *c)
2898 {
2899 	u32 v;
2900 	int ret;
2901 
2902 	/* get device capabilities */
2903 	memset(c, 0, sizeof(*c));
2904 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2905 			       FW_CMD_REQUEST | FW_CMD_READ);
2906 	c->retval_len16 = htonl(FW_LEN16(*c));
2907 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), c);
2908 	if (ret < 0)
2909 		return ret;
2910 
2911 	/* select capabilities we'll be using */
2912 	if (c->niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
2913 		if (!vf_acls)
2914 			c->niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
2915 		else
2916 			c->niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
2917 	} else if (vf_acls) {
2918 		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
2919 		return ret;
2920 	}
2921 	c->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
2922 			       FW_CMD_REQUEST | FW_CMD_WRITE);
2923 	ret = t4_wr_mbox(adap, adap->fn, c, sizeof(*c), NULL);
2924 	if (ret < 0)
2925 		return ret;
2926 
2927 	ret = t4_config_glbl_rss(adap, adap->fn,
2928 				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
2929 				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
2930 				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
2931 	if (ret < 0)
2932 		return ret;
2933 
2934 	ret = t4_cfg_pfvf(adap, adap->fn, adap->fn, 0, MAX_EGRQ, 64, MAX_INGQ,
2935 			  0, 0, 4, 0xf, 0xf, 16, FW_CMD_CAP_PF, FW_CMD_CAP_PF);
2936 	if (ret < 0)
2937 		return ret;
2938 
2939 	t4_sge_init(adap);
2940 
2941 	/* tweak some settings */
2942 	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
2943 	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
2944 	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
2945 	v = t4_read_reg(adap, TP_PIO_DATA);
2946 	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
2947 
2948 	/* get basic stuff going */
2949 	return t4_early_init(adap, adap->fn);
2950 }
2951 
2952 /*
2953  * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
2954  */
2955 #define MAX_ATIDS 8192U
2956 
2957 /*
2958  * Phase 0 of initialization: contact FW, obtain config, perform basic init.
2959  */
adap_init0(struct adapter * adap)2960 static int adap_init0(struct adapter *adap)
2961 {
2962 	int ret;
2963 	u32 v, port_vec;
2964 	enum dev_state state;
2965 	u32 params[7], val[7];
2966 	struct fw_caps_config_cmd c;
2967 
2968 	ret = t4_check_fw_version(adap);
2969 	if (ret == -EINVAL || ret > 0) {
2970 		if (upgrade_fw(adap) >= 0)             /* recache FW version */
2971 			ret = t4_check_fw_version(adap);
2972 	}
2973 	if (ret < 0)
2974 		return ret;
2975 
2976 	/* contact FW, request master */
2977 	ret = t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, &state);
2978 	if (ret < 0) {
2979 		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
2980 			ret);
2981 		return ret;
2982 	}
2983 
2984 	/* reset device */
2985 	ret = t4_fw_reset(adap, adap->fn, PIORSTMODE | PIORST);
2986 	if (ret < 0)
2987 		goto bye;
2988 
2989 	for (v = 0; v < SGE_NTIMERS - 1; v++)
2990 		adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
2991 	adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
2992 	adap->sge.counter_val[0] = 1;
2993 	for (v = 1; v < SGE_NCOUNTERS; v++)
2994 		adap->sge.counter_val[v] = min(intr_cnt[v - 1],
2995 					       THRESHOLD_3_MASK);
2996 #define FW_PARAM_DEV(param) \
2997 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
2998 	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
2999 
3000 	params[0] = FW_PARAM_DEV(CCLK);
3001 	ret = t4_query_params(adap, adap->fn, adap->fn, 0, 1, params, val);
3002 	if (ret < 0)
3003 		goto bye;
3004 	adap->params.vpd.cclk = val[0];
3005 
3006 	ret = adap_init1(adap, &c);
3007 	if (ret < 0)
3008 		goto bye;
3009 
3010 #define FW_PARAM_PFVF(param) \
3011 	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
3012 	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param) | \
3013 	 FW_PARAMS_PARAM_Y(adap->fn))
3014 
3015 	params[0] = FW_PARAM_DEV(PORTVEC);
3016 	params[1] = FW_PARAM_PFVF(L2T_START);
3017 	params[2] = FW_PARAM_PFVF(L2T_END);
3018 	params[3] = FW_PARAM_PFVF(FILTER_START);
3019 	params[4] = FW_PARAM_PFVF(FILTER_END);
3020 	params[5] = FW_PARAM_PFVF(IQFLINT_START);
3021 	params[6] = FW_PARAM_PFVF(EQ_START);
3022 	ret = t4_query_params(adap, adap->fn, adap->fn, 0, 7, params, val);
3023 	if (ret < 0)
3024 		goto bye;
3025 	port_vec = val[0];
3026 	adap->tids.ftid_base = val[3];
3027 	adap->tids.nftids = val[4] - val[3] + 1;
3028 	adap->sge.ingr_start = val[5];
3029 	adap->sge.egr_start = val[6];
3030 
3031 	if (c.ofldcaps) {
3032 		/* query offload-related parameters */
3033 		params[0] = FW_PARAM_DEV(NTID);
3034 		params[1] = FW_PARAM_PFVF(SERVER_START);
3035 		params[2] = FW_PARAM_PFVF(SERVER_END);
3036 		params[3] = FW_PARAM_PFVF(TDDP_START);
3037 		params[4] = FW_PARAM_PFVF(TDDP_END);
3038 		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
3039 		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3040 				      val);
3041 		if (ret < 0)
3042 			goto bye;
3043 		adap->tids.ntids = val[0];
3044 		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
3045 		adap->tids.stid_base = val[1];
3046 		adap->tids.nstids = val[2] - val[1] + 1;
3047 		adap->vres.ddp.start = val[3];
3048 		adap->vres.ddp.size = val[4] - val[3] + 1;
3049 		adap->params.ofldq_wr_cred = val[5];
3050 		adap->params.offload = 1;
3051 	}
3052 	if (c.rdmacaps) {
3053 		params[0] = FW_PARAM_PFVF(STAG_START);
3054 		params[1] = FW_PARAM_PFVF(STAG_END);
3055 		params[2] = FW_PARAM_PFVF(RQ_START);
3056 		params[3] = FW_PARAM_PFVF(RQ_END);
3057 		params[4] = FW_PARAM_PFVF(PBL_START);
3058 		params[5] = FW_PARAM_PFVF(PBL_END);
3059 		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3060 				      val);
3061 		if (ret < 0)
3062 			goto bye;
3063 		adap->vres.stag.start = val[0];
3064 		adap->vres.stag.size = val[1] - val[0] + 1;
3065 		adap->vres.rq.start = val[2];
3066 		adap->vres.rq.size = val[3] - val[2] + 1;
3067 		adap->vres.pbl.start = val[4];
3068 		adap->vres.pbl.size = val[5] - val[4] + 1;
3069 
3070 		params[0] = FW_PARAM_PFVF(SQRQ_START);
3071 		params[1] = FW_PARAM_PFVF(SQRQ_END);
3072 		params[2] = FW_PARAM_PFVF(CQ_START);
3073 		params[3] = FW_PARAM_PFVF(CQ_END);
3074 		params[4] = FW_PARAM_PFVF(OCQ_START);
3075 		params[5] = FW_PARAM_PFVF(OCQ_END);
3076 		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 6, params,
3077 				      val);
3078 		if (ret < 0)
3079 			goto bye;
3080 		adap->vres.qp.start = val[0];
3081 		adap->vres.qp.size = val[1] - val[0] + 1;
3082 		adap->vres.cq.start = val[2];
3083 		adap->vres.cq.size = val[3] - val[2] + 1;
3084 		adap->vres.ocq.start = val[4];
3085 		adap->vres.ocq.size = val[5] - val[4] + 1;
3086 	}
3087 	if (c.iscsicaps) {
3088 		params[0] = FW_PARAM_PFVF(ISCSI_START);
3089 		params[1] = FW_PARAM_PFVF(ISCSI_END);
3090 		ret = t4_query_params(adap, adap->fn, adap->fn, 0, 2, params,
3091 				      val);
3092 		if (ret < 0)
3093 			goto bye;
3094 		adap->vres.iscsi.start = val[0];
3095 		adap->vres.iscsi.size = val[1] - val[0] + 1;
3096 	}
3097 #undef FW_PARAM_PFVF
3098 #undef FW_PARAM_DEV
3099 
3100 	adap->params.nports = hweight32(port_vec);
3101 	adap->params.portvec = port_vec;
3102 	adap->flags |= FW_OK;
3103 
3104 	/* These are finalized by FW initialization, load their values now */
3105 	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
3106 	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
3107 	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
3108 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3109 		     adap->params.b_wnd);
3110 
3111 #ifdef CONFIG_PCI_IOV
3112 	/*
3113 	 * Provision resource limits for Virtual Functions.  We currently
3114 	 * grant them all the same static resource limits except for the Port
3115 	 * Access Rights Mask which we're assigning based on the PF.  All of
3116 	 * the static provisioning stuff for both the PF and VF really needs
3117 	 * to be managed in a persistent manner for each device which the
3118 	 * firmware controls.
3119 	 */
3120 	{
3121 		int pf, vf;
3122 
3123 		for (pf = 0; pf < ARRAY_SIZE(num_vf); pf++) {
3124 			if (num_vf[pf] <= 0)
3125 				continue;
3126 
3127 			/* VF numbering starts at 1! */
3128 			for (vf = 1; vf <= num_vf[pf]; vf++) {
3129 				ret = t4_cfg_pfvf(adap, adap->fn, pf, vf,
3130 						  VFRES_NEQ, VFRES_NETHCTRL,
3131 						  VFRES_NIQFLINT, VFRES_NIQ,
3132 						  VFRES_TC, VFRES_NVI,
3133 						  FW_PFVF_CMD_CMASK_MASK,
3134 						  pfvfres_pmask(adap, pf, vf),
3135 						  VFRES_NEXACTF,
3136 						  VFRES_R_CAPS, VFRES_WX_CAPS);
3137 				if (ret < 0)
3138 					dev_warn(adap->pdev_dev, "failed to "
3139 						 "provision pf/vf=%d/%d; "
3140 						 "err=%d\n", pf, vf, ret);
3141 			}
3142 		}
3143 	}
3144 #endif
3145 
3146 	setup_memwin(adap);
3147 	return 0;
3148 
3149 	/*
3150 	 * If a command timed out or failed with EIO FW does not operate within
3151 	 * its spec or something catastrophic happened to HW/FW, stop issuing
3152 	 * commands.
3153 	 */
3154 bye:	if (ret != -ETIMEDOUT && ret != -EIO)
3155 		t4_fw_bye(adap, adap->fn);
3156 	return ret;
3157 }
3158 
3159 /* EEH callbacks */
3160 
eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)3161 static pci_ers_result_t eeh_err_detected(struct pci_dev *pdev,
3162 					 pci_channel_state_t state)
3163 {
3164 	int i;
3165 	struct adapter *adap = pci_get_drvdata(pdev);
3166 
3167 	if (!adap)
3168 		goto out;
3169 
3170 	rtnl_lock();
3171 	adap->flags &= ~FW_OK;
3172 	notify_ulds(adap, CXGB4_STATE_START_RECOVERY);
3173 	for_each_port(adap, i) {
3174 		struct net_device *dev = adap->port[i];
3175 
3176 		netif_device_detach(dev);
3177 		netif_carrier_off(dev);
3178 	}
3179 	if (adap->flags & FULL_INIT_DONE)
3180 		cxgb_down(adap);
3181 	rtnl_unlock();
3182 	pci_disable_device(pdev);
3183 out:	return state == pci_channel_io_perm_failure ?
3184 		PCI_ERS_RESULT_DISCONNECT : PCI_ERS_RESULT_NEED_RESET;
3185 }
3186 
eeh_slot_reset(struct pci_dev * pdev)3187 static pci_ers_result_t eeh_slot_reset(struct pci_dev *pdev)
3188 {
3189 	int i, ret;
3190 	struct fw_caps_config_cmd c;
3191 	struct adapter *adap = pci_get_drvdata(pdev);
3192 
3193 	if (!adap) {
3194 		pci_restore_state(pdev);
3195 		pci_save_state(pdev);
3196 		return PCI_ERS_RESULT_RECOVERED;
3197 	}
3198 
3199 	if (pci_enable_device(pdev)) {
3200 		dev_err(&pdev->dev, "cannot reenable PCI device after reset\n");
3201 		return PCI_ERS_RESULT_DISCONNECT;
3202 	}
3203 
3204 	pci_set_master(pdev);
3205 	pci_restore_state(pdev);
3206 	pci_save_state(pdev);
3207 	pci_cleanup_aer_uncorrect_error_status(pdev);
3208 
3209 	if (t4_wait_dev_ready(adap) < 0)
3210 		return PCI_ERS_RESULT_DISCONNECT;
3211 	if (t4_fw_hello(adap, adap->fn, adap->fn, MASTER_MUST, NULL))
3212 		return PCI_ERS_RESULT_DISCONNECT;
3213 	adap->flags |= FW_OK;
3214 	if (adap_init1(adap, &c))
3215 		return PCI_ERS_RESULT_DISCONNECT;
3216 
3217 	for_each_port(adap, i) {
3218 		struct port_info *p = adap2pinfo(adap, i);
3219 
3220 		ret = t4_alloc_vi(adap, adap->fn, p->tx_chan, adap->fn, 0, 1,
3221 				  NULL, NULL);
3222 		if (ret < 0)
3223 			return PCI_ERS_RESULT_DISCONNECT;
3224 		p->viid = ret;
3225 		p->xact_addr_filt = -1;
3226 	}
3227 
3228 	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
3229 		     adap->params.b_wnd);
3230 	setup_memwin(adap);
3231 	if (cxgb_up(adap))
3232 		return PCI_ERS_RESULT_DISCONNECT;
3233 	return PCI_ERS_RESULT_RECOVERED;
3234 }
3235 
eeh_resume(struct pci_dev * pdev)3236 static void eeh_resume(struct pci_dev *pdev)
3237 {
3238 	int i;
3239 	struct adapter *adap = pci_get_drvdata(pdev);
3240 
3241 	if (!adap)
3242 		return;
3243 
3244 	rtnl_lock();
3245 	for_each_port(adap, i) {
3246 		struct net_device *dev = adap->port[i];
3247 
3248 		if (netif_running(dev)) {
3249 			link_start(dev);
3250 			cxgb_set_rxmode(dev);
3251 		}
3252 		netif_device_attach(dev);
3253 	}
3254 	rtnl_unlock();
3255 }
3256 
3257 static struct pci_error_handlers cxgb4_eeh = {
3258 	.error_detected = eeh_err_detected,
3259 	.slot_reset     = eeh_slot_reset,
3260 	.resume         = eeh_resume,
3261 };
3262 
is_10g_port(const struct link_config * lc)3263 static inline bool is_10g_port(const struct link_config *lc)
3264 {
3265 	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
3266 }
3267 
init_rspq(struct sge_rspq * q,u8 timer_idx,u8 pkt_cnt_idx,unsigned int size,unsigned int iqe_size)3268 static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
3269 			     unsigned int size, unsigned int iqe_size)
3270 {
3271 	q->intr_params = QINTR_TIMER_IDX(timer_idx) |
3272 			 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
3273 	q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
3274 	q->iqe_len = iqe_size;
3275 	q->size = size;
3276 }
3277 
3278 /*
3279  * Perform default configuration of DMA queues depending on the number and type
3280  * of ports we found and the number of available CPUs.  Most settings can be
3281  * modified by the admin prior to actual use.
3282  */
cfg_queues(struct adapter * adap)3283 static void __devinit cfg_queues(struct adapter *adap)
3284 {
3285 	struct sge *s = &adap->sge;
3286 	int i, q10g = 0, n10g = 0, qidx = 0;
3287 
3288 	for_each_port(adap, i)
3289 		n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
3290 
3291 	/*
3292 	 * We default to 1 queue per non-10G port and up to # of cores queues
3293 	 * per 10G port.
3294 	 */
3295 	if (n10g)
3296 		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
3297 	if (q10g > num_online_cpus())
3298 		q10g = num_online_cpus();
3299 
3300 	for_each_port(adap, i) {
3301 		struct port_info *pi = adap2pinfo(adap, i);
3302 
3303 		pi->first_qset = qidx;
3304 		pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
3305 		qidx += pi->nqsets;
3306 	}
3307 
3308 	s->ethqsets = qidx;
3309 	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
3310 
3311 	if (is_offload(adap)) {
3312 		/*
3313 		 * For offload we use 1 queue/channel if all ports are up to 1G,
3314 		 * otherwise we divide all available queues amongst the channels
3315 		 * capped by the number of available cores.
3316 		 */
3317 		if (n10g) {
3318 			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
3319 				  num_online_cpus());
3320 			s->ofldqsets = roundup(i, adap->params.nports);
3321 		} else
3322 			s->ofldqsets = adap->params.nports;
3323 		/* For RDMA one Rx queue per channel suffices */
3324 		s->rdmaqs = adap->params.nports;
3325 	}
3326 
3327 	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
3328 		struct sge_eth_rxq *r = &s->ethrxq[i];
3329 
3330 		init_rspq(&r->rspq, 0, 0, 1024, 64);
3331 		r->fl.size = 72;
3332 	}
3333 
3334 	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
3335 		s->ethtxq[i].q.size = 1024;
3336 
3337 	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
3338 		s->ctrlq[i].q.size = 512;
3339 
3340 	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
3341 		s->ofldtxq[i].q.size = 1024;
3342 
3343 	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
3344 		struct sge_ofld_rxq *r = &s->ofldrxq[i];
3345 
3346 		init_rspq(&r->rspq, 0, 0, 1024, 64);
3347 		r->rspq.uld = CXGB4_ULD_ISCSI;
3348 		r->fl.size = 72;
3349 	}
3350 
3351 	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
3352 		struct sge_ofld_rxq *r = &s->rdmarxq[i];
3353 
3354 		init_rspq(&r->rspq, 0, 0, 511, 64);
3355 		r->rspq.uld = CXGB4_ULD_RDMA;
3356 		r->fl.size = 72;
3357 	}
3358 
3359 	init_rspq(&s->fw_evtq, 6, 0, 512, 64);
3360 	init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
3361 }
3362 
3363 /*
3364  * Reduce the number of Ethernet queues across all ports to at most n.
3365  * n provides at least one queue per port.
3366  */
reduce_ethqs(struct adapter * adap,int n)3367 static void __devinit reduce_ethqs(struct adapter *adap, int n)
3368 {
3369 	int i;
3370 	struct port_info *pi;
3371 
3372 	while (n < adap->sge.ethqsets)
3373 		for_each_port(adap, i) {
3374 			pi = adap2pinfo(adap, i);
3375 			if (pi->nqsets > 1) {
3376 				pi->nqsets--;
3377 				adap->sge.ethqsets--;
3378 				if (adap->sge.ethqsets <= n)
3379 					break;
3380 			}
3381 		}
3382 
3383 	n = 0;
3384 	for_each_port(adap, i) {
3385 		pi = adap2pinfo(adap, i);
3386 		pi->first_qset = n;
3387 		n += pi->nqsets;
3388 	}
3389 }
3390 
3391 /* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
3392 #define EXTRA_VECS 2
3393 
enable_msix(struct adapter * adap)3394 static int __devinit enable_msix(struct adapter *adap)
3395 {
3396 	int ofld_need = 0;
3397 	int i, err, want, need;
3398 	struct sge *s = &adap->sge;
3399 	unsigned int nchan = adap->params.nports;
3400 	struct msix_entry entries[MAX_INGQ + 1];
3401 
3402 	for (i = 0; i < ARRAY_SIZE(entries); ++i)
3403 		entries[i].entry = i;
3404 
3405 	want = s->max_ethqsets + EXTRA_VECS;
3406 	if (is_offload(adap)) {
3407 		want += s->rdmaqs + s->ofldqsets;
3408 		/* need nchan for each possible ULD */
3409 		ofld_need = 2 * nchan;
3410 	}
3411 	need = adap->params.nports + EXTRA_VECS + ofld_need;
3412 
3413 	while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
3414 		want = err;
3415 
3416 	if (!err) {
3417 		/*
3418 		 * Distribute available vectors to the various queue groups.
3419 		 * Every group gets its minimum requirement and NIC gets top
3420 		 * priority for leftovers.
3421 		 */
3422 		i = want - EXTRA_VECS - ofld_need;
3423 		if (i < s->max_ethqsets) {
3424 			s->max_ethqsets = i;
3425 			if (i < s->ethqsets)
3426 				reduce_ethqs(adap, i);
3427 		}
3428 		if (is_offload(adap)) {
3429 			i = want - EXTRA_VECS - s->max_ethqsets;
3430 			i -= ofld_need - nchan;
3431 			s->ofldqsets = (i / nchan) * nchan;  /* round down */
3432 		}
3433 		for (i = 0; i < want; ++i)
3434 			adap->msix_info[i].vec = entries[i].vector;
3435 	} else if (err > 0)
3436 		dev_info(adap->pdev_dev,
3437 			 "only %d MSI-X vectors left, not using MSI-X\n", err);
3438 	return err;
3439 }
3440 
3441 #undef EXTRA_VECS
3442 
init_rss(struct adapter * adap)3443 static int __devinit init_rss(struct adapter *adap)
3444 {
3445 	unsigned int i, j;
3446 
3447 	for_each_port(adap, i) {
3448 		struct port_info *pi = adap2pinfo(adap, i);
3449 
3450 		pi->rss = kcalloc(pi->rss_size, sizeof(u16), GFP_KERNEL);
3451 		if (!pi->rss)
3452 			return -ENOMEM;
3453 		for (j = 0; j < pi->rss_size; j++)
3454 			pi->rss[j] = ethtool_rxfh_indir_default(j, pi->nqsets);
3455 	}
3456 	return 0;
3457 }
3458 
print_port_info(const struct net_device * dev)3459 static void __devinit print_port_info(const struct net_device *dev)
3460 {
3461 	static const char *base[] = {
3462 		"R XFI", "R XAUI", "T SGMII", "T XFI", "T XAUI", "KX4", "CX4",
3463 		"KX", "KR", "R SFP+", "KR/KX", "KR/KX/KX4"
3464 	};
3465 
3466 	char buf[80];
3467 	char *bufp = buf;
3468 	const char *spd = "";
3469 	const struct port_info *pi = netdev_priv(dev);
3470 	const struct adapter *adap = pi->adapter;
3471 
3472 	if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_2_5GB)
3473 		spd = " 2.5 GT/s";
3474 	else if (adap->params.pci.speed == PCI_EXP_LNKSTA_CLS_5_0GB)
3475 		spd = " 5 GT/s";
3476 
3477 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
3478 		bufp += sprintf(bufp, "100/");
3479 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
3480 		bufp += sprintf(bufp, "1000/");
3481 	if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
3482 		bufp += sprintf(bufp, "10G/");
3483 	if (bufp != buf)
3484 		--bufp;
3485 	sprintf(bufp, "BASE-%s", base[pi->port_type]);
3486 
3487 	netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s%s\n",
3488 		    adap->params.vpd.id, adap->params.rev, buf,
3489 		    is_offload(adap) ? "R" : "", adap->params.pci.width, spd,
3490 		    (adap->flags & USING_MSIX) ? " MSI-X" :
3491 		    (adap->flags & USING_MSI) ? " MSI" : "");
3492 	netdev_info(dev, "S/N: %s, E/C: %s\n",
3493 		    adap->params.vpd.sn, adap->params.vpd.ec);
3494 }
3495 
enable_pcie_relaxed_ordering(struct pci_dev * dev)3496 static void __devinit enable_pcie_relaxed_ordering(struct pci_dev *dev)
3497 {
3498 	u16 v;
3499 	int pos;
3500 
3501 	pos = pci_pcie_cap(dev);
3502 	if (pos > 0) {
3503 		pci_read_config_word(dev, pos + PCI_EXP_DEVCTL, &v);
3504 		v |= PCI_EXP_DEVCTL_RELAX_EN;
3505 		pci_write_config_word(dev, pos + PCI_EXP_DEVCTL, v);
3506 	}
3507 }
3508 
3509 /*
3510  * Free the following resources:
3511  * - memory used for tables
3512  * - MSI/MSI-X
3513  * - net devices
3514  * - resources FW is holding for us
3515  */
free_some_resources(struct adapter * adapter)3516 static void free_some_resources(struct adapter *adapter)
3517 {
3518 	unsigned int i;
3519 
3520 	t4_free_mem(adapter->l2t);
3521 	t4_free_mem(adapter->tids.tid_tab);
3522 	disable_msi(adapter);
3523 
3524 	for_each_port(adapter, i)
3525 		if (adapter->port[i]) {
3526 			kfree(adap2pinfo(adapter, i)->rss);
3527 			free_netdev(adapter->port[i]);
3528 		}
3529 	if (adapter->flags & FW_OK)
3530 		t4_fw_bye(adapter, adapter->fn);
3531 }
3532 
3533 #define TSO_FLAGS (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN)
3534 #define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | TSO_FLAGS | \
3535 		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
3536 
init_one(struct pci_dev * pdev,const struct pci_device_id * ent)3537 static int __devinit init_one(struct pci_dev *pdev,
3538 			      const struct pci_device_id *ent)
3539 {
3540 	int func, i, err;
3541 	struct port_info *pi;
3542 	bool highdma = false;
3543 	struct adapter *adapter = NULL;
3544 
3545 	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
3546 
3547 	err = pci_request_regions(pdev, KBUILD_MODNAME);
3548 	if (err) {
3549 		/* Just info, some other driver may have claimed the device. */
3550 		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
3551 		return err;
3552 	}
3553 
3554 	/* We control everything through one PF */
3555 	func = PCI_FUNC(pdev->devfn);
3556 	if (func != ent->driver_data) {
3557 		pci_save_state(pdev);        /* to restore SR-IOV later */
3558 		goto sriov;
3559 	}
3560 
3561 	err = pci_enable_device(pdev);
3562 	if (err) {
3563 		dev_err(&pdev->dev, "cannot enable PCI device\n");
3564 		goto out_release_regions;
3565 	}
3566 
3567 	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
3568 		highdma = true;
3569 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
3570 		if (err) {
3571 			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
3572 				"coherent allocations\n");
3573 			goto out_disable_device;
3574 		}
3575 	} else {
3576 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3577 		if (err) {
3578 			dev_err(&pdev->dev, "no usable DMA configuration\n");
3579 			goto out_disable_device;
3580 		}
3581 	}
3582 
3583 	pci_enable_pcie_error_reporting(pdev);
3584 	enable_pcie_relaxed_ordering(pdev);
3585 	pci_set_master(pdev);
3586 	pci_save_state(pdev);
3587 
3588 	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
3589 	if (!adapter) {
3590 		err = -ENOMEM;
3591 		goto out_disable_device;
3592 	}
3593 
3594 	adapter->regs = pci_ioremap_bar(pdev, 0);
3595 	if (!adapter->regs) {
3596 		dev_err(&pdev->dev, "cannot map device registers\n");
3597 		err = -ENOMEM;
3598 		goto out_free_adapter;
3599 	}
3600 
3601 	adapter->pdev = pdev;
3602 	adapter->pdev_dev = &pdev->dev;
3603 	adapter->fn = func;
3604 	adapter->msg_enable = dflt_msg_enable;
3605 	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
3606 
3607 	spin_lock_init(&adapter->stats_lock);
3608 	spin_lock_init(&adapter->tid_release_lock);
3609 
3610 	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
3611 
3612 	err = t4_prep_adapter(adapter);
3613 	if (err)
3614 		goto out_unmap_bar;
3615 	err = adap_init0(adapter);
3616 	if (err)
3617 		goto out_unmap_bar;
3618 
3619 	for_each_port(adapter, i) {
3620 		struct net_device *netdev;
3621 
3622 		netdev = alloc_etherdev_mq(sizeof(struct port_info),
3623 					   MAX_ETH_QSETS);
3624 		if (!netdev) {
3625 			err = -ENOMEM;
3626 			goto out_free_dev;
3627 		}
3628 
3629 		SET_NETDEV_DEV(netdev, &pdev->dev);
3630 
3631 		adapter->port[i] = netdev;
3632 		pi = netdev_priv(netdev);
3633 		pi->adapter = adapter;
3634 		pi->xact_addr_filt = -1;
3635 		pi->port_id = i;
3636 		netdev->irq = pdev->irq;
3637 
3638 		netdev->hw_features = NETIF_F_SG | TSO_FLAGS |
3639 			NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
3640 			NETIF_F_RXCSUM | NETIF_F_RXHASH |
3641 			NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
3642 		if (highdma)
3643 			netdev->hw_features |= NETIF_F_HIGHDMA;
3644 		netdev->features |= netdev->hw_features;
3645 		netdev->vlan_features = netdev->features & VLAN_FEAT;
3646 
3647 		netdev->priv_flags |= IFF_UNICAST_FLT;
3648 
3649 		netdev->netdev_ops = &cxgb4_netdev_ops;
3650 		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
3651 	}
3652 
3653 	pci_set_drvdata(pdev, adapter);
3654 
3655 	if (adapter->flags & FW_OK) {
3656 		err = t4_port_init(adapter, func, func, 0);
3657 		if (err)
3658 			goto out_free_dev;
3659 	}
3660 
3661 	/*
3662 	 * Configure queues and allocate tables now, they can be needed as
3663 	 * soon as the first register_netdev completes.
3664 	 */
3665 	cfg_queues(adapter);
3666 
3667 	adapter->l2t = t4_init_l2t();
3668 	if (!adapter->l2t) {
3669 		/* We tolerate a lack of L2T, giving up some functionality */
3670 		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
3671 		adapter->params.offload = 0;
3672 	}
3673 
3674 	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
3675 		dev_warn(&pdev->dev, "could not allocate TID table, "
3676 			 "continuing\n");
3677 		adapter->params.offload = 0;
3678 	}
3679 
3680 	/* See what interrupts we'll be using */
3681 	if (msi > 1 && enable_msix(adapter) == 0)
3682 		adapter->flags |= USING_MSIX;
3683 	else if (msi > 0 && pci_enable_msi(pdev) == 0)
3684 		adapter->flags |= USING_MSI;
3685 
3686 	err = init_rss(adapter);
3687 	if (err)
3688 		goto out_free_dev;
3689 
3690 	/*
3691 	 * The card is now ready to go.  If any errors occur during device
3692 	 * registration we do not fail the whole card but rather proceed only
3693 	 * with the ports we manage to register successfully.  However we must
3694 	 * register at least one net device.
3695 	 */
3696 	for_each_port(adapter, i) {
3697 		pi = adap2pinfo(adapter, i);
3698 		netif_set_real_num_tx_queues(adapter->port[i], pi->nqsets);
3699 		netif_set_real_num_rx_queues(adapter->port[i], pi->nqsets);
3700 
3701 		err = register_netdev(adapter->port[i]);
3702 		if (err)
3703 			break;
3704 		adapter->chan_map[pi->tx_chan] = i;
3705 		print_port_info(adapter->port[i]);
3706 	}
3707 	if (i == 0) {
3708 		dev_err(&pdev->dev, "could not register any net devices\n");
3709 		goto out_free_dev;
3710 	}
3711 	if (err) {
3712 		dev_warn(&pdev->dev, "only %d net devices registered\n", i);
3713 		err = 0;
3714 	}
3715 
3716 	if (cxgb4_debugfs_root) {
3717 		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
3718 							   cxgb4_debugfs_root);
3719 		setup_debugfs(adapter);
3720 	}
3721 
3722 	/* PCIe EEH recovery on powerpc platforms needs fundamental reset */
3723 	pdev->needs_freset = 1;
3724 
3725 	if (is_offload(adapter))
3726 		attach_ulds(adapter);
3727 
3728 sriov:
3729 #ifdef CONFIG_PCI_IOV
3730 	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
3731 		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
3732 			dev_info(&pdev->dev,
3733 				 "instantiated %u virtual functions\n",
3734 				 num_vf[func]);
3735 #endif
3736 	return 0;
3737 
3738  out_free_dev:
3739 	free_some_resources(adapter);
3740  out_unmap_bar:
3741 	iounmap(adapter->regs);
3742  out_free_adapter:
3743 	kfree(adapter);
3744  out_disable_device:
3745 	pci_disable_pcie_error_reporting(pdev);
3746 	pci_disable_device(pdev);
3747  out_release_regions:
3748 	pci_release_regions(pdev);
3749 	pci_set_drvdata(pdev, NULL);
3750 	return err;
3751 }
3752 
remove_one(struct pci_dev * pdev)3753 static void __devexit remove_one(struct pci_dev *pdev)
3754 {
3755 	struct adapter *adapter = pci_get_drvdata(pdev);
3756 
3757 	pci_disable_sriov(pdev);
3758 
3759 	if (adapter) {
3760 		int i;
3761 
3762 		if (is_offload(adapter))
3763 			detach_ulds(adapter);
3764 
3765 		for_each_port(adapter, i)
3766 			if (adapter->port[i]->reg_state == NETREG_REGISTERED)
3767 				unregister_netdev(adapter->port[i]);
3768 
3769 		if (adapter->debugfs_root)
3770 			debugfs_remove_recursive(adapter->debugfs_root);
3771 
3772 		if (adapter->flags & FULL_INIT_DONE)
3773 			cxgb_down(adapter);
3774 
3775 		free_some_resources(adapter);
3776 		iounmap(adapter->regs);
3777 		kfree(adapter);
3778 		pci_disable_pcie_error_reporting(pdev);
3779 		pci_disable_device(pdev);
3780 		pci_release_regions(pdev);
3781 		pci_set_drvdata(pdev, NULL);
3782 	} else
3783 		pci_release_regions(pdev);
3784 }
3785 
3786 static struct pci_driver cxgb4_driver = {
3787 	.name     = KBUILD_MODNAME,
3788 	.id_table = cxgb4_pci_tbl,
3789 	.probe    = init_one,
3790 	.remove   = __devexit_p(remove_one),
3791 	.err_handler = &cxgb4_eeh,
3792 };
3793 
cxgb4_init_module(void)3794 static int __init cxgb4_init_module(void)
3795 {
3796 	int ret;
3797 
3798 	/* Debugfs support is optional, just warn if this fails */
3799 	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
3800 	if (!cxgb4_debugfs_root)
3801 		pr_warning("could not create debugfs entry, continuing\n");
3802 
3803 	ret = pci_register_driver(&cxgb4_driver);
3804 	if (ret < 0)
3805 		debugfs_remove(cxgb4_debugfs_root);
3806 	return ret;
3807 }
3808 
cxgb4_cleanup_module(void)3809 static void __exit cxgb4_cleanup_module(void)
3810 {
3811 	pci_unregister_driver(&cxgb4_driver);
3812 	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
3813 }
3814 
3815 module_init(cxgb4_init_module);
3816 module_exit(cxgb4_cleanup_module);
3817