1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17 
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23 
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29 
30 static unsigned int num_vfs;
31 module_param(num_vfs, uint, S_IRUGO);
32 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
33 
34 static ushort rx_frag_size = 2048;
35 module_param(rx_frag_size, ushort, S_IRUGO);
36 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
37 
38 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
39 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
40 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
41 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
42 	{ PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
43 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
44 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
45 	{ PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID5)},
46 	{ 0 }
47 };
48 MODULE_DEVICE_TABLE(pci, be_dev_ids);
49 /* UE Status Low CSR */
50 static const char * const ue_status_low_desc[] = {
51 	"CEV",
52 	"CTX",
53 	"DBUF",
54 	"ERX",
55 	"Host",
56 	"MPU",
57 	"NDMA",
58 	"PTC ",
59 	"RDMA ",
60 	"RXF ",
61 	"RXIPS ",
62 	"RXULP0 ",
63 	"RXULP1 ",
64 	"RXULP2 ",
65 	"TIM ",
66 	"TPOST ",
67 	"TPRE ",
68 	"TXIPS ",
69 	"TXULP0 ",
70 	"TXULP1 ",
71 	"UC ",
72 	"WDMA ",
73 	"TXULP2 ",
74 	"HOST1 ",
75 	"P0_OB_LINK ",
76 	"P1_OB_LINK ",
77 	"HOST_GPIO ",
78 	"MBOX ",
79 	"AXGMAC0",
80 	"AXGMAC1",
81 	"JTAG",
82 	"MPU_INTPEND"
83 };
84 /* UE Status High CSR */
85 static const char * const ue_status_hi_desc[] = {
86 	"LPCMEMHOST",
87 	"MGMT_MAC",
88 	"PCS0ONLINE",
89 	"MPU_IRAM",
90 	"PCS1ONLINE",
91 	"PCTL0",
92 	"PCTL1",
93 	"PMEM",
94 	"RR",
95 	"TXPB",
96 	"RXPP",
97 	"XAUI",
98 	"TXP",
99 	"ARM",
100 	"IPC",
101 	"HOST2",
102 	"HOST3",
103 	"HOST4",
104 	"HOST5",
105 	"HOST6",
106 	"HOST7",
107 	"HOST8",
108 	"HOST9",
109 	"NETC",
110 	"Unknown",
111 	"Unknown",
112 	"Unknown",
113 	"Unknown",
114 	"Unknown",
115 	"Unknown",
116 	"Unknown",
117 	"Unknown"
118 };
119 
120 /* Is BE in a multi-channel mode */
be_is_mc(struct be_adapter * adapter)121 static inline bool be_is_mc(struct be_adapter *adapter) {
122 	return (adapter->function_mode & FLEX10_MODE ||
123 		adapter->function_mode & VNIC_MODE ||
124 		adapter->function_mode & UMC_ENABLED);
125 }
126 
be_queue_free(struct be_adapter * adapter,struct be_queue_info * q)127 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
128 {
129 	struct be_dma_mem *mem = &q->dma_mem;
130 	if (mem->va)
131 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
132 				  mem->dma);
133 }
134 
be_queue_alloc(struct be_adapter * adapter,struct be_queue_info * q,u16 len,u16 entry_size)135 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
136 		u16 len, u16 entry_size)
137 {
138 	struct be_dma_mem *mem = &q->dma_mem;
139 
140 	memset(q, 0, sizeof(*q));
141 	q->len = len;
142 	q->entry_size = entry_size;
143 	mem->size = len * entry_size;
144 	mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
145 				     GFP_KERNEL);
146 	if (!mem->va)
147 		return -1;
148 	memset(mem->va, 0, mem->size);
149 	return 0;
150 }
151 
be_intr_set(struct be_adapter * adapter,bool enable)152 static void be_intr_set(struct be_adapter *adapter, bool enable)
153 {
154 	u32 reg, enabled;
155 
156 	if (adapter->eeh_err)
157 		return;
158 
159 	pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
160 				&reg);
161 	enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
162 
163 	if (!enabled && enable)
164 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165 	else if (enabled && !enable)
166 		reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
167 	else
168 		return;
169 
170 	pci_write_config_dword(adapter->pdev,
171 			PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
172 }
173 
be_rxq_notify(struct be_adapter * adapter,u16 qid,u16 posted)174 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
175 {
176 	u32 val = 0;
177 	val |= qid & DB_RQ_RING_ID_MASK;
178 	val |= posted << DB_RQ_NUM_POSTED_SHIFT;
179 
180 	wmb();
181 	iowrite32(val, adapter->db + DB_RQ_OFFSET);
182 }
183 
be_txq_notify(struct be_adapter * adapter,u16 qid,u16 posted)184 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
185 {
186 	u32 val = 0;
187 	val |= qid & DB_TXULP_RING_ID_MASK;
188 	val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
189 
190 	wmb();
191 	iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
192 }
193 
be_eq_notify(struct be_adapter * adapter,u16 qid,bool arm,bool clear_int,u16 num_popped)194 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
195 		bool arm, bool clear_int, u16 num_popped)
196 {
197 	u32 val = 0;
198 	val |= qid & DB_EQ_RING_ID_MASK;
199 	val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
200 			DB_EQ_RING_ID_EXT_MASK_SHIFT);
201 
202 	if (adapter->eeh_err)
203 		return;
204 
205 	if (arm)
206 		val |= 1 << DB_EQ_REARM_SHIFT;
207 	if (clear_int)
208 		val |= 1 << DB_EQ_CLR_SHIFT;
209 	val |= 1 << DB_EQ_EVNT_SHIFT;
210 	val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
211 	iowrite32(val, adapter->db + DB_EQ_OFFSET);
212 }
213 
be_cq_notify(struct be_adapter * adapter,u16 qid,bool arm,u16 num_popped)214 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
215 {
216 	u32 val = 0;
217 	val |= qid & DB_CQ_RING_ID_MASK;
218 	val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
219 			DB_CQ_RING_ID_EXT_MASK_SHIFT);
220 
221 	if (adapter->eeh_err)
222 		return;
223 
224 	if (arm)
225 		val |= 1 << DB_CQ_REARM_SHIFT;
226 	val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
227 	iowrite32(val, adapter->db + DB_CQ_OFFSET);
228 }
229 
be_mac_addr_set(struct net_device * netdev,void * p)230 static int be_mac_addr_set(struct net_device *netdev, void *p)
231 {
232 	struct be_adapter *adapter = netdev_priv(netdev);
233 	struct sockaddr *addr = p;
234 	int status = 0;
235 	u8 current_mac[ETH_ALEN];
236 	u32 pmac_id = adapter->pmac_id;
237 
238 	if (!is_valid_ether_addr(addr->sa_data))
239 		return -EADDRNOTAVAIL;
240 
241 	status = be_cmd_mac_addr_query(adapter, current_mac,
242 				MAC_ADDRESS_TYPE_NETWORK, false,
243 				adapter->if_handle, 0);
244 	if (status)
245 		goto err;
246 
247 	if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
248 		status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
249 				adapter->if_handle, &adapter->pmac_id, 0);
250 		if (status)
251 			goto err;
252 
253 		be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
254 	}
255 	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
256 	return 0;
257 err:
258 	dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
259 	return status;
260 }
261 
populate_be2_stats(struct be_adapter * adapter)262 static void populate_be2_stats(struct be_adapter *adapter)
263 {
264 	struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
265 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
266 	struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
267 	struct be_port_rxf_stats_v0 *port_stats =
268 					&rxf_stats->port[adapter->port_num];
269 	struct be_drv_stats *drvs = &adapter->drv_stats;
270 
271 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
272 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
273 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
274 	drvs->rx_control_frames = port_stats->rx_control_frames;
275 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
276 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
277 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
278 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
279 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
280 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
281 	drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
282 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
283 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
284 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
285 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
286 	drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
287 	drvs->rx_dropped_header_too_small =
288 		port_stats->rx_dropped_header_too_small;
289 	drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
290 	drvs->rx_alignment_symbol_errors =
291 		port_stats->rx_alignment_symbol_errors;
292 
293 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
294 	drvs->tx_controlframes = port_stats->tx_controlframes;
295 
296 	if (adapter->port_num)
297 		drvs->jabber_events = rxf_stats->port1_jabber_events;
298 	else
299 		drvs->jabber_events = rxf_stats->port0_jabber_events;
300 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
301 	drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
302 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
303 	drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
304 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
305 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
306 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
307 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
308 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
309 }
310 
populate_be3_stats(struct be_adapter * adapter)311 static void populate_be3_stats(struct be_adapter *adapter)
312 {
313 	struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
314 	struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
315 	struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
316 	struct be_port_rxf_stats_v1 *port_stats =
317 					&rxf_stats->port[adapter->port_num];
318 	struct be_drv_stats *drvs = &adapter->drv_stats;
319 
320 	be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
321 	drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
322 	drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
323 	drvs->rx_pause_frames = port_stats->rx_pause_frames;
324 	drvs->rx_crc_errors = port_stats->rx_crc_errors;
325 	drvs->rx_control_frames = port_stats->rx_control_frames;
326 	drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
327 	drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
328 	drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
329 	drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
330 	drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
331 	drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
332 	drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
333 	drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
334 	drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
335 	drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
336 	drvs->rx_dropped_header_too_small =
337 		port_stats->rx_dropped_header_too_small;
338 	drvs->rx_input_fifo_overflow_drop =
339 		port_stats->rx_input_fifo_overflow_drop;
340 	drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
341 	drvs->rx_alignment_symbol_errors =
342 		port_stats->rx_alignment_symbol_errors;
343 	drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
344 	drvs->tx_pauseframes = port_stats->tx_pauseframes;
345 	drvs->tx_controlframes = port_stats->tx_controlframes;
346 	drvs->jabber_events = port_stats->jabber_events;
347 	drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
348 	drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
349 	drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
350 	drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
351 	drvs->forwarded_packets = rxf_stats->forwarded_packets;
352 	drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
353 	drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
354 	drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
355 	adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
356 }
357 
populate_lancer_stats(struct be_adapter * adapter)358 static void populate_lancer_stats(struct be_adapter *adapter)
359 {
360 
361 	struct be_drv_stats *drvs = &adapter->drv_stats;
362 	struct lancer_pport_stats *pport_stats =
363 					pport_stats_from_cmd(adapter);
364 
365 	be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
366 	drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
367 	drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
368 	drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
369 	drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
370 	drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
371 	drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
372 	drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
373 	drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
374 	drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
375 	drvs->rx_dropped_tcp_length =
376 				pport_stats->rx_dropped_invalid_tcp_length;
377 	drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
378 	drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
379 	drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
380 	drvs->rx_dropped_header_too_small =
381 				pport_stats->rx_dropped_header_too_small;
382 	drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
383 	drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
384 	drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
385 	drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
386 	drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
387 	drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
388 	drvs->jabber_events = pport_stats->rx_jabbers;
389 	drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
390 	drvs->forwarded_packets = pport_stats->num_forwards_lo;
391 	drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
392 	drvs->rx_drops_too_many_frags =
393 				pport_stats->rx_drops_too_many_frags_lo;
394 }
395 
accumulate_16bit_val(u32 * acc,u16 val)396 static void accumulate_16bit_val(u32 *acc, u16 val)
397 {
398 #define lo(x)			(x & 0xFFFF)
399 #define hi(x)			(x & 0xFFFF0000)
400 	bool wrapped = val < lo(*acc);
401 	u32 newacc = hi(*acc) + val;
402 
403 	if (wrapped)
404 		newacc += 65536;
405 	ACCESS_ONCE(*acc) = newacc;
406 }
407 
be_parse_stats(struct be_adapter * adapter)408 void be_parse_stats(struct be_adapter *adapter)
409 {
410 	struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
411 	struct be_rx_obj *rxo;
412 	int i;
413 
414 	if (adapter->generation == BE_GEN3) {
415 		if (lancer_chip(adapter))
416 			populate_lancer_stats(adapter);
417 		 else
418 			populate_be3_stats(adapter);
419 	} else {
420 		populate_be2_stats(adapter);
421 	}
422 
423 	/* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
424 	for_all_rx_queues(adapter, rxo, i) {
425 		/* below erx HW counter can actually wrap around after
426 		 * 65535. Driver accumulates a 32-bit value
427 		 */
428 		accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
429 				(u16)erx->rx_drops_no_fragments[rxo->q.id]);
430 	}
431 }
432 
be_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * stats)433 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
434 					struct rtnl_link_stats64 *stats)
435 {
436 	struct be_adapter *adapter = netdev_priv(netdev);
437 	struct be_drv_stats *drvs = &adapter->drv_stats;
438 	struct be_rx_obj *rxo;
439 	struct be_tx_obj *txo;
440 	u64 pkts, bytes;
441 	unsigned int start;
442 	int i;
443 
444 	for_all_rx_queues(adapter, rxo, i) {
445 		const struct be_rx_stats *rx_stats = rx_stats(rxo);
446 		do {
447 			start = u64_stats_fetch_begin_bh(&rx_stats->sync);
448 			pkts = rx_stats(rxo)->rx_pkts;
449 			bytes = rx_stats(rxo)->rx_bytes;
450 		} while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
451 		stats->rx_packets += pkts;
452 		stats->rx_bytes += bytes;
453 		stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
454 		stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
455 					rx_stats(rxo)->rx_drops_no_frags;
456 	}
457 
458 	for_all_tx_queues(adapter, txo, i) {
459 		const struct be_tx_stats *tx_stats = tx_stats(txo);
460 		do {
461 			start = u64_stats_fetch_begin_bh(&tx_stats->sync);
462 			pkts = tx_stats(txo)->tx_pkts;
463 			bytes = tx_stats(txo)->tx_bytes;
464 		} while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
465 		stats->tx_packets += pkts;
466 		stats->tx_bytes += bytes;
467 	}
468 
469 	/* bad pkts received */
470 	stats->rx_errors = drvs->rx_crc_errors +
471 		drvs->rx_alignment_symbol_errors +
472 		drvs->rx_in_range_errors +
473 		drvs->rx_out_range_errors +
474 		drvs->rx_frame_too_long +
475 		drvs->rx_dropped_too_small +
476 		drvs->rx_dropped_too_short +
477 		drvs->rx_dropped_header_too_small +
478 		drvs->rx_dropped_tcp_length +
479 		drvs->rx_dropped_runt;
480 
481 	/* detailed rx errors */
482 	stats->rx_length_errors = drvs->rx_in_range_errors +
483 		drvs->rx_out_range_errors +
484 		drvs->rx_frame_too_long;
485 
486 	stats->rx_crc_errors = drvs->rx_crc_errors;
487 
488 	/* frame alignment errors */
489 	stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
490 
491 	/* receiver fifo overrun */
492 	/* drops_no_pbuf is no per i/f, it's per BE card */
493 	stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
494 				drvs->rx_input_fifo_overflow_drop +
495 				drvs->rx_drops_no_pbuf;
496 	return stats;
497 }
498 
be_link_status_update(struct be_adapter * adapter,u8 link_status)499 void be_link_status_update(struct be_adapter *adapter, u8 link_status)
500 {
501 	struct net_device *netdev = adapter->netdev;
502 
503 	if (!(adapter->flags & BE_FLAGS_LINK_STATUS_INIT)) {
504 		netif_carrier_off(netdev);
505 		adapter->flags |= BE_FLAGS_LINK_STATUS_INIT;
506 	}
507 
508 	if ((link_status & LINK_STATUS_MASK) == LINK_UP)
509 		netif_carrier_on(netdev);
510 	else
511 		netif_carrier_off(netdev);
512 }
513 
be_tx_stats_update(struct be_tx_obj * txo,u32 wrb_cnt,u32 copied,u32 gso_segs,bool stopped)514 static void be_tx_stats_update(struct be_tx_obj *txo,
515 			u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
516 {
517 	struct be_tx_stats *stats = tx_stats(txo);
518 
519 	u64_stats_update_begin(&stats->sync);
520 	stats->tx_reqs++;
521 	stats->tx_wrbs += wrb_cnt;
522 	stats->tx_bytes += copied;
523 	stats->tx_pkts += (gso_segs ? gso_segs : 1);
524 	if (stopped)
525 		stats->tx_stops++;
526 	u64_stats_update_end(&stats->sync);
527 }
528 
529 /* Determine number of WRB entries needed to xmit data in an skb */
wrb_cnt_for_skb(struct be_adapter * adapter,struct sk_buff * skb,bool * dummy)530 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
531 								bool *dummy)
532 {
533 	int cnt = (skb->len > skb->data_len);
534 
535 	cnt += skb_shinfo(skb)->nr_frags;
536 
537 	/* to account for hdr wrb */
538 	cnt++;
539 	if (lancer_chip(adapter) || !(cnt & 1)) {
540 		*dummy = false;
541 	} else {
542 		/* add a dummy to make it an even num */
543 		cnt++;
544 		*dummy = true;
545 	}
546 	BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
547 	return cnt;
548 }
549 
wrb_fill(struct be_eth_wrb * wrb,u64 addr,int len)550 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
551 {
552 	wrb->frag_pa_hi = upper_32_bits(addr);
553 	wrb->frag_pa_lo = addr & 0xFFFFFFFF;
554 	wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
555 }
556 
be_get_tx_vlan_tag(struct be_adapter * adapter,struct sk_buff * skb)557 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
558 					struct sk_buff *skb)
559 {
560 	u8 vlan_prio;
561 	u16 vlan_tag;
562 
563 	vlan_tag = vlan_tx_tag_get(skb);
564 	vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
565 	/* If vlan priority provided by OS is NOT in available bmap */
566 	if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
567 		vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
568 				adapter->recommended_prio;
569 
570 	return vlan_tag;
571 }
572 
wrb_fill_hdr(struct be_adapter * adapter,struct be_eth_hdr_wrb * hdr,struct sk_buff * skb,u32 wrb_cnt,u32 len)573 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
574 		struct sk_buff *skb, u32 wrb_cnt, u32 len)
575 {
576 	u16 vlan_tag;
577 
578 	memset(hdr, 0, sizeof(*hdr));
579 
580 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
581 
582 	if (skb_is_gso(skb)) {
583 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
584 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
585 			hdr, skb_shinfo(skb)->gso_size);
586 		if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
587 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
588 		if (lancer_chip(adapter) && adapter->sli_family  ==
589 							LANCER_A0_SLI_FAMILY) {
590 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
591 			if (is_tcp_pkt(skb))
592 				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
593 								tcpcs, hdr, 1);
594 			else if (is_udp_pkt(skb))
595 				AMAP_SET_BITS(struct amap_eth_hdr_wrb,
596 								udpcs, hdr, 1);
597 		}
598 	} else if (skb->ip_summed == CHECKSUM_PARTIAL) {
599 		if (is_tcp_pkt(skb))
600 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
601 		else if (is_udp_pkt(skb))
602 			AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
603 	}
604 
605 	if (vlan_tx_tag_present(skb)) {
606 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
607 		vlan_tag = be_get_tx_vlan_tag(adapter, skb);
608 		AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
609 	}
610 
611 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
612 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
613 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
614 	AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
615 }
616 
unmap_tx_frag(struct device * dev,struct be_eth_wrb * wrb,bool unmap_single)617 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
618 		bool unmap_single)
619 {
620 	dma_addr_t dma;
621 
622 	be_dws_le_to_cpu(wrb, sizeof(*wrb));
623 
624 	dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
625 	if (wrb->frag_len) {
626 		if (unmap_single)
627 			dma_unmap_single(dev, dma, wrb->frag_len,
628 					 DMA_TO_DEVICE);
629 		else
630 			dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
631 	}
632 }
633 
make_tx_wrbs(struct be_adapter * adapter,struct be_queue_info * txq,struct sk_buff * skb,u32 wrb_cnt,bool dummy_wrb)634 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
635 		struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
636 {
637 	dma_addr_t busaddr;
638 	int i, copied = 0;
639 	struct device *dev = &adapter->pdev->dev;
640 	struct sk_buff *first_skb = skb;
641 	struct be_eth_wrb *wrb;
642 	struct be_eth_hdr_wrb *hdr;
643 	bool map_single = false;
644 	u16 map_head;
645 
646 	hdr = queue_head_node(txq);
647 	queue_head_inc(txq);
648 	map_head = txq->head;
649 
650 	if (skb->len > skb->data_len) {
651 		int len = skb_headlen(skb);
652 		busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
653 		if (dma_mapping_error(dev, busaddr))
654 			goto dma_err;
655 		map_single = true;
656 		wrb = queue_head_node(txq);
657 		wrb_fill(wrb, busaddr, len);
658 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
659 		queue_head_inc(txq);
660 		copied += len;
661 	}
662 
663 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
664 		const struct skb_frag_struct *frag =
665 			&skb_shinfo(skb)->frags[i];
666 		busaddr = skb_frag_dma_map(dev, frag, 0,
667 					   skb_frag_size(frag), DMA_TO_DEVICE);
668 		if (dma_mapping_error(dev, busaddr))
669 			goto dma_err;
670 		wrb = queue_head_node(txq);
671 		wrb_fill(wrb, busaddr, skb_frag_size(frag));
672 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
673 		queue_head_inc(txq);
674 		copied += skb_frag_size(frag);
675 	}
676 
677 	if (dummy_wrb) {
678 		wrb = queue_head_node(txq);
679 		wrb_fill(wrb, 0, 0);
680 		be_dws_cpu_to_le(wrb, sizeof(*wrb));
681 		queue_head_inc(txq);
682 	}
683 
684 	wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
685 	be_dws_cpu_to_le(hdr, sizeof(*hdr));
686 
687 	return copied;
688 dma_err:
689 	txq->head = map_head;
690 	while (copied) {
691 		wrb = queue_head_node(txq);
692 		unmap_tx_frag(dev, wrb, map_single);
693 		map_single = false;
694 		copied -= wrb->frag_len;
695 		queue_head_inc(txq);
696 	}
697 	return 0;
698 }
699 
be_xmit(struct sk_buff * skb,struct net_device * netdev)700 static netdev_tx_t be_xmit(struct sk_buff *skb,
701 			struct net_device *netdev)
702 {
703 	struct be_adapter *adapter = netdev_priv(netdev);
704 	struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
705 	struct be_queue_info *txq = &txo->q;
706 	u32 wrb_cnt = 0, copied = 0;
707 	u32 start = txq->head;
708 	bool dummy_wrb, stopped = false;
709 
710 	/* For vlan tagged pkts, BE
711 	 * 1) calculates checksum even when CSO is not requested
712 	 * 2) calculates checksum wrongly for padded pkt less than
713 	 * 60 bytes long.
714 	 * As a workaround disable TX vlan offloading in such cases.
715 	 */
716 	if (unlikely(vlan_tx_tag_present(skb) &&
717 		     (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
718 		skb = skb_share_check(skb, GFP_ATOMIC);
719 		if (unlikely(!skb))
720 			goto tx_drop;
721 
722 		skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
723 		if (unlikely(!skb))
724 			goto tx_drop;
725 
726 		skb->vlan_tci = 0;
727 	}
728 
729 	wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
730 
731 	copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
732 	if (copied) {
733 		/* record the sent skb in the sent_skb table */
734 		BUG_ON(txo->sent_skb_list[start]);
735 		txo->sent_skb_list[start] = skb;
736 
737 		/* Ensure txq has space for the next skb; Else stop the queue
738 		 * *BEFORE* ringing the tx doorbell, so that we serialze the
739 		 * tx compls of the current transmit which'll wake up the queue
740 		 */
741 		atomic_add(wrb_cnt, &txq->used);
742 		if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
743 								txq->len) {
744 			netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
745 			stopped = true;
746 		}
747 
748 		be_txq_notify(adapter, txq->id, wrb_cnt);
749 
750 		be_tx_stats_update(txo, wrb_cnt, copied,
751 				skb_shinfo(skb)->gso_segs, stopped);
752 	} else {
753 		txq->head = start;
754 		dev_kfree_skb_any(skb);
755 	}
756 tx_drop:
757 	return NETDEV_TX_OK;
758 }
759 
be_change_mtu(struct net_device * netdev,int new_mtu)760 static int be_change_mtu(struct net_device *netdev, int new_mtu)
761 {
762 	struct be_adapter *adapter = netdev_priv(netdev);
763 	if (new_mtu < BE_MIN_MTU ||
764 			new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
765 					(ETH_HLEN + ETH_FCS_LEN))) {
766 		dev_info(&adapter->pdev->dev,
767 			"MTU must be between %d and %d bytes\n",
768 			BE_MIN_MTU,
769 			(BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
770 		return -EINVAL;
771 	}
772 	dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
773 			netdev->mtu, new_mtu);
774 	netdev->mtu = new_mtu;
775 	return 0;
776 }
777 
778 /*
779  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
780  * If the user configures more, place BE in vlan promiscuous mode.
781  */
be_vid_config(struct be_adapter * adapter,bool vf,u32 vf_num)782 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
783 {
784 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf_num];
785 	u16 vtag[BE_NUM_VLANS_SUPPORTED];
786 	u16 ntags = 0, i;
787 	int status = 0;
788 
789 	if (vf) {
790 		vtag[0] = cpu_to_le16(vf_cfg->vlan_tag);
791 		status = be_cmd_vlan_config(adapter, vf_cfg->if_handle, vtag,
792 					    1, 1, 0);
793 	}
794 
795 	/* No need to further configure vids if in promiscuous mode */
796 	if (adapter->promiscuous)
797 		return 0;
798 
799 	if (adapter->vlans_added <= adapter->max_vlans)  {
800 		/* Construct VLAN Table to give to HW */
801 		for (i = 0; i < VLAN_N_VID; i++) {
802 			if (adapter->vlan_tag[i]) {
803 				vtag[ntags] = cpu_to_le16(i);
804 				ntags++;
805 			}
806 		}
807 		status = be_cmd_vlan_config(adapter, adapter->if_handle,
808 					vtag, ntags, 1, 0);
809 	} else {
810 		status = be_cmd_vlan_config(adapter, adapter->if_handle,
811 					NULL, 0, 1, 1);
812 	}
813 
814 	return status;
815 }
816 
be_vlan_add_vid(struct net_device * netdev,u16 vid)817 static int be_vlan_add_vid(struct net_device *netdev, u16 vid)
818 {
819 	struct be_adapter *adapter = netdev_priv(netdev);
820 	int status = 0;
821 
822 	if (!be_physfn(adapter)) {
823 		status = -EINVAL;
824 		goto ret;
825 	}
826 
827 	adapter->vlan_tag[vid] = 1;
828 	if (adapter->vlans_added <= (adapter->max_vlans + 1))
829 		status = be_vid_config(adapter, false, 0);
830 
831 	if (!status)
832 		adapter->vlans_added++;
833 	else
834 		adapter->vlan_tag[vid] = 0;
835 ret:
836 	return status;
837 }
838 
be_vlan_rem_vid(struct net_device * netdev,u16 vid)839 static int be_vlan_rem_vid(struct net_device *netdev, u16 vid)
840 {
841 	struct be_adapter *adapter = netdev_priv(netdev);
842 	int status = 0;
843 
844 	if (!be_physfn(adapter)) {
845 		status = -EINVAL;
846 		goto ret;
847 	}
848 
849 	adapter->vlan_tag[vid] = 0;
850 	if (adapter->vlans_added <= adapter->max_vlans)
851 		status = be_vid_config(adapter, false, 0);
852 
853 	if (!status)
854 		adapter->vlans_added--;
855 	else
856 		adapter->vlan_tag[vid] = 1;
857 ret:
858 	return status;
859 }
860 
be_set_rx_mode(struct net_device * netdev)861 static void be_set_rx_mode(struct net_device *netdev)
862 {
863 	struct be_adapter *adapter = netdev_priv(netdev);
864 
865 	if (netdev->flags & IFF_PROMISC) {
866 		be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
867 		adapter->promiscuous = true;
868 		goto done;
869 	}
870 
871 	/* BE was previously in promiscuous mode; disable it */
872 	if (adapter->promiscuous) {
873 		adapter->promiscuous = false;
874 		be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
875 
876 		if (adapter->vlans_added)
877 			be_vid_config(adapter, false, 0);
878 	}
879 
880 	/* Enable multicast promisc if num configured exceeds what we support */
881 	if (netdev->flags & IFF_ALLMULTI ||
882 			netdev_mc_count(netdev) > BE_MAX_MC) {
883 		be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
884 		goto done;
885 	}
886 
887 	be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
888 done:
889 	return;
890 }
891 
be_set_vf_mac(struct net_device * netdev,int vf,u8 * mac)892 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
893 {
894 	struct be_adapter *adapter = netdev_priv(netdev);
895 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
896 	int status;
897 
898 	if (!sriov_enabled(adapter))
899 		return -EPERM;
900 
901 	if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
902 		return -EINVAL;
903 
904 	if (lancer_chip(adapter)) {
905 		status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
906 	} else {
907 		status = be_cmd_pmac_del(adapter, vf_cfg->if_handle,
908 					 vf_cfg->pmac_id, vf + 1);
909 
910 		status = be_cmd_pmac_add(adapter, mac, vf_cfg->if_handle,
911 					 &vf_cfg->pmac_id, vf + 1);
912 	}
913 
914 	if (status)
915 		dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
916 				mac, vf);
917 	else
918 		memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
919 
920 	return status;
921 }
922 
be_get_vf_config(struct net_device * netdev,int vf,struct ifla_vf_info * vi)923 static int be_get_vf_config(struct net_device *netdev, int vf,
924 			struct ifla_vf_info *vi)
925 {
926 	struct be_adapter *adapter = netdev_priv(netdev);
927 	struct be_vf_cfg *vf_cfg = &adapter->vf_cfg[vf];
928 
929 	if (!sriov_enabled(adapter))
930 		return -EPERM;
931 
932 	if (vf >= adapter->num_vfs)
933 		return -EINVAL;
934 
935 	vi->vf = vf;
936 	vi->tx_rate = vf_cfg->tx_rate;
937 	vi->vlan = vf_cfg->vlan_tag;
938 	vi->qos = 0;
939 	memcpy(&vi->mac, vf_cfg->mac_addr, ETH_ALEN);
940 
941 	return 0;
942 }
943 
be_set_vf_vlan(struct net_device * netdev,int vf,u16 vlan,u8 qos)944 static int be_set_vf_vlan(struct net_device *netdev,
945 			int vf, u16 vlan, u8 qos)
946 {
947 	struct be_adapter *adapter = netdev_priv(netdev);
948 	int status = 0;
949 
950 	if (!sriov_enabled(adapter))
951 		return -EPERM;
952 
953 	if (vf >= adapter->num_vfs || vlan > 4095)
954 		return -EINVAL;
955 
956 	if (vlan) {
957 		adapter->vf_cfg[vf].vlan_tag = vlan;
958 		adapter->vlans_added++;
959 	} else {
960 		adapter->vf_cfg[vf].vlan_tag = 0;
961 		adapter->vlans_added--;
962 	}
963 
964 	status = be_vid_config(adapter, true, vf);
965 
966 	if (status)
967 		dev_info(&adapter->pdev->dev,
968 				"VLAN %d config on VF %d failed\n", vlan, vf);
969 	return status;
970 }
971 
be_set_vf_tx_rate(struct net_device * netdev,int vf,int rate)972 static int be_set_vf_tx_rate(struct net_device *netdev,
973 			int vf, int rate)
974 {
975 	struct be_adapter *adapter = netdev_priv(netdev);
976 	int status = 0;
977 
978 	if (!sriov_enabled(adapter))
979 		return -EPERM;
980 
981 	if (vf >= adapter->num_vfs)
982 		return -EINVAL;
983 
984 	if (rate < 100 || rate > 10000) {
985 		dev_err(&adapter->pdev->dev,
986 			"tx rate must be between 100 and 10000 Mbps\n");
987 		return -EINVAL;
988 	}
989 
990 	status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
991 
992 	if (status)
993 		dev_err(&adapter->pdev->dev,
994 				"tx rate %d on VF %d failed\n", rate, vf);
995 	else
996 		adapter->vf_cfg[vf].tx_rate = rate;
997 	return status;
998 }
999 
be_rx_eqd_update(struct be_adapter * adapter,struct be_rx_obj * rxo)1000 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
1001 {
1002 	struct be_eq_obj *rx_eq = &rxo->rx_eq;
1003 	struct be_rx_stats *stats = rx_stats(rxo);
1004 	ulong now = jiffies;
1005 	ulong delta = now - stats->rx_jiffies;
1006 	u64 pkts;
1007 	unsigned int start, eqd;
1008 
1009 	if (!rx_eq->enable_aic)
1010 		return;
1011 
1012 	/* Wrapped around */
1013 	if (time_before(now, stats->rx_jiffies)) {
1014 		stats->rx_jiffies = now;
1015 		return;
1016 	}
1017 
1018 	/* Update once a second */
1019 	if (delta < HZ)
1020 		return;
1021 
1022 	do {
1023 		start = u64_stats_fetch_begin_bh(&stats->sync);
1024 		pkts = stats->rx_pkts;
1025 	} while (u64_stats_fetch_retry_bh(&stats->sync, start));
1026 
1027 	stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1028 	stats->rx_pkts_prev = pkts;
1029 	stats->rx_jiffies = now;
1030 	eqd = stats->rx_pps / 110000;
1031 	eqd = eqd << 3;
1032 	if (eqd > rx_eq->max_eqd)
1033 		eqd = rx_eq->max_eqd;
1034 	if (eqd < rx_eq->min_eqd)
1035 		eqd = rx_eq->min_eqd;
1036 	if (eqd < 10)
1037 		eqd = 0;
1038 	if (eqd != rx_eq->cur_eqd) {
1039 		be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1040 		rx_eq->cur_eqd = eqd;
1041 	}
1042 }
1043 
be_rx_stats_update(struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)1044 static void be_rx_stats_update(struct be_rx_obj *rxo,
1045 		struct be_rx_compl_info *rxcp)
1046 {
1047 	struct be_rx_stats *stats = rx_stats(rxo);
1048 
1049 	u64_stats_update_begin(&stats->sync);
1050 	stats->rx_compl++;
1051 	stats->rx_bytes += rxcp->pkt_size;
1052 	stats->rx_pkts++;
1053 	if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1054 		stats->rx_mcast_pkts++;
1055 	if (rxcp->err)
1056 		stats->rx_compl_err++;
1057 	u64_stats_update_end(&stats->sync);
1058 }
1059 
csum_passed(struct be_rx_compl_info * rxcp)1060 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1061 {
1062 	/* L4 checksum is not reliable for non TCP/UDP packets.
1063 	 * Also ignore ipcksm for ipv6 pkts */
1064 	return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1065 				(rxcp->ip_csum || rxcp->ipv6);
1066 }
1067 
1068 static struct be_rx_page_info *
get_rx_page_info(struct be_adapter * adapter,struct be_rx_obj * rxo,u16 frag_idx)1069 get_rx_page_info(struct be_adapter *adapter,
1070 		struct be_rx_obj *rxo,
1071 		u16 frag_idx)
1072 {
1073 	struct be_rx_page_info *rx_page_info;
1074 	struct be_queue_info *rxq = &rxo->q;
1075 
1076 	rx_page_info = &rxo->page_info_tbl[frag_idx];
1077 	BUG_ON(!rx_page_info->page);
1078 
1079 	if (rx_page_info->last_page_user) {
1080 		dma_unmap_page(&adapter->pdev->dev,
1081 			       dma_unmap_addr(rx_page_info, bus),
1082 			       adapter->big_page_size, DMA_FROM_DEVICE);
1083 		rx_page_info->last_page_user = false;
1084 	}
1085 
1086 	atomic_dec(&rxq->used);
1087 	return rx_page_info;
1088 }
1089 
1090 /* Throwaway the data in the Rx completion */
be_rx_compl_discard(struct be_adapter * adapter,struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)1091 static void be_rx_compl_discard(struct be_adapter *adapter,
1092 		struct be_rx_obj *rxo,
1093 		struct be_rx_compl_info *rxcp)
1094 {
1095 	struct be_queue_info *rxq = &rxo->q;
1096 	struct be_rx_page_info *page_info;
1097 	u16 i, num_rcvd = rxcp->num_rcvd;
1098 
1099 	for (i = 0; i < num_rcvd; i++) {
1100 		page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1101 		put_page(page_info->page);
1102 		memset(page_info, 0, sizeof(*page_info));
1103 		index_inc(&rxcp->rxq_idx, rxq->len);
1104 	}
1105 }
1106 
1107 /*
1108  * skb_fill_rx_data forms a complete skb for an ether frame
1109  * indicated by rxcp.
1110  */
skb_fill_rx_data(struct be_adapter * adapter,struct be_rx_obj * rxo,struct sk_buff * skb,struct be_rx_compl_info * rxcp)1111 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1112 			struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1113 {
1114 	struct be_queue_info *rxq = &rxo->q;
1115 	struct be_rx_page_info *page_info;
1116 	u16 i, j;
1117 	u16 hdr_len, curr_frag_len, remaining;
1118 	u8 *start;
1119 
1120 	page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1121 	start = page_address(page_info->page) + page_info->page_offset;
1122 	prefetch(start);
1123 
1124 	/* Copy data in the first descriptor of this completion */
1125 	curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1126 
1127 	/* Copy the header portion into skb_data */
1128 	hdr_len = min(BE_HDR_LEN, curr_frag_len);
1129 	memcpy(skb->data, start, hdr_len);
1130 	skb->len = curr_frag_len;
1131 	if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1132 		/* Complete packet has now been moved to data */
1133 		put_page(page_info->page);
1134 		skb->data_len = 0;
1135 		skb->tail += curr_frag_len;
1136 	} else {
1137 		skb_shinfo(skb)->nr_frags = 1;
1138 		skb_frag_set_page(skb, 0, page_info->page);
1139 		skb_shinfo(skb)->frags[0].page_offset =
1140 					page_info->page_offset + hdr_len;
1141 		skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1142 		skb->data_len = curr_frag_len - hdr_len;
1143 		skb->truesize += rx_frag_size;
1144 		skb->tail += hdr_len;
1145 	}
1146 	page_info->page = NULL;
1147 
1148 	if (rxcp->pkt_size <= rx_frag_size) {
1149 		BUG_ON(rxcp->num_rcvd != 1);
1150 		return;
1151 	}
1152 
1153 	/* More frags present for this completion */
1154 	index_inc(&rxcp->rxq_idx, rxq->len);
1155 	remaining = rxcp->pkt_size - curr_frag_len;
1156 	for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1157 		page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1158 		curr_frag_len = min(remaining, rx_frag_size);
1159 
1160 		/* Coalesce all frags from the same physical page in one slot */
1161 		if (page_info->page_offset == 0) {
1162 			/* Fresh page */
1163 			j++;
1164 			skb_frag_set_page(skb, j, page_info->page);
1165 			skb_shinfo(skb)->frags[j].page_offset =
1166 							page_info->page_offset;
1167 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1168 			skb_shinfo(skb)->nr_frags++;
1169 		} else {
1170 			put_page(page_info->page);
1171 		}
1172 
1173 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1174 		skb->len += curr_frag_len;
1175 		skb->data_len += curr_frag_len;
1176 		skb->truesize += rx_frag_size;
1177 		remaining -= curr_frag_len;
1178 		index_inc(&rxcp->rxq_idx, rxq->len);
1179 		page_info->page = NULL;
1180 	}
1181 	BUG_ON(j > MAX_SKB_FRAGS);
1182 }
1183 
1184 /* Process the RX completion indicated by rxcp when GRO is disabled */
be_rx_compl_process(struct be_adapter * adapter,struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)1185 static void be_rx_compl_process(struct be_adapter *adapter,
1186 			struct be_rx_obj *rxo,
1187 			struct be_rx_compl_info *rxcp)
1188 {
1189 	struct net_device *netdev = adapter->netdev;
1190 	struct sk_buff *skb;
1191 
1192 	skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1193 	if (unlikely(!skb)) {
1194 		rx_stats(rxo)->rx_drops_no_skbs++;
1195 		be_rx_compl_discard(adapter, rxo, rxcp);
1196 		return;
1197 	}
1198 
1199 	skb_fill_rx_data(adapter, rxo, skb, rxcp);
1200 
1201 	if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1202 		skb->ip_summed = CHECKSUM_UNNECESSARY;
1203 	else
1204 		skb_checksum_none_assert(skb);
1205 
1206 	skb->protocol = eth_type_trans(skb, netdev);
1207 	if (adapter->netdev->features & NETIF_F_RXHASH)
1208 		skb->rxhash = rxcp->rss_hash;
1209 
1210 
1211 	if (rxcp->vlanf)
1212 		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1213 
1214 	netif_receive_skb(skb);
1215 }
1216 
1217 /* Process the RX completion indicated by rxcp when GRO is enabled */
be_rx_compl_process_gro(struct be_adapter * adapter,struct be_rx_obj * rxo,struct be_rx_compl_info * rxcp)1218 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1219 		struct be_rx_obj *rxo,
1220 		struct be_rx_compl_info *rxcp)
1221 {
1222 	struct be_rx_page_info *page_info;
1223 	struct sk_buff *skb = NULL;
1224 	struct be_queue_info *rxq = &rxo->q;
1225 	struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1226 	u16 remaining, curr_frag_len;
1227 	u16 i, j;
1228 
1229 	skb = napi_get_frags(&eq_obj->napi);
1230 	if (!skb) {
1231 		be_rx_compl_discard(adapter, rxo, rxcp);
1232 		return;
1233 	}
1234 
1235 	remaining = rxcp->pkt_size;
1236 	for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1237 		page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1238 
1239 		curr_frag_len = min(remaining, rx_frag_size);
1240 
1241 		/* Coalesce all frags from the same physical page in one slot */
1242 		if (i == 0 || page_info->page_offset == 0) {
1243 			/* First frag or Fresh page */
1244 			j++;
1245 			skb_frag_set_page(skb, j, page_info->page);
1246 			skb_shinfo(skb)->frags[j].page_offset =
1247 							page_info->page_offset;
1248 			skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1249 		} else {
1250 			put_page(page_info->page);
1251 		}
1252 		skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1253 		skb->truesize += rx_frag_size;
1254 		remaining -= curr_frag_len;
1255 		index_inc(&rxcp->rxq_idx, rxq->len);
1256 		memset(page_info, 0, sizeof(*page_info));
1257 	}
1258 	BUG_ON(j > MAX_SKB_FRAGS);
1259 
1260 	skb_shinfo(skb)->nr_frags = j + 1;
1261 	skb->len = rxcp->pkt_size;
1262 	skb->data_len = rxcp->pkt_size;
1263 	skb->ip_summed = CHECKSUM_UNNECESSARY;
1264 	if (adapter->netdev->features & NETIF_F_RXHASH)
1265 		skb->rxhash = rxcp->rss_hash;
1266 
1267 	if (rxcp->vlanf)
1268 		__vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1269 
1270 	napi_gro_frags(&eq_obj->napi);
1271 }
1272 
be_parse_rx_compl_v1(struct be_adapter * adapter,struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)1273 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1274 				struct be_eth_rx_compl *compl,
1275 				struct be_rx_compl_info *rxcp)
1276 {
1277 	rxcp->pkt_size =
1278 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1279 	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1280 	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1281 	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1282 	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1283 	rxcp->ip_csum =
1284 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1285 	rxcp->l4_csum =
1286 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1287 	rxcp->ipv6 =
1288 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1289 	rxcp->rxq_idx =
1290 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1291 	rxcp->num_rcvd =
1292 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1293 	rxcp->pkt_type =
1294 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1295 	rxcp->rss_hash =
1296 		AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1297 	if (rxcp->vlanf) {
1298 		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1299 					  compl);
1300 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1301 					       compl);
1302 	}
1303 	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1304 }
1305 
be_parse_rx_compl_v0(struct be_adapter * adapter,struct be_eth_rx_compl * compl,struct be_rx_compl_info * rxcp)1306 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1307 				struct be_eth_rx_compl *compl,
1308 				struct be_rx_compl_info *rxcp)
1309 {
1310 	rxcp->pkt_size =
1311 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1312 	rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1313 	rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1314 	rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1315 	rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1316 	rxcp->ip_csum =
1317 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1318 	rxcp->l4_csum =
1319 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1320 	rxcp->ipv6 =
1321 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1322 	rxcp->rxq_idx =
1323 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1324 	rxcp->num_rcvd =
1325 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1326 	rxcp->pkt_type =
1327 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1328 	rxcp->rss_hash =
1329 		AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1330 	if (rxcp->vlanf) {
1331 		rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1332 					  compl);
1333 		rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1334 					       compl);
1335 	}
1336 	rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1337 }
1338 
be_rx_compl_get(struct be_rx_obj * rxo)1339 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1340 {
1341 	struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1342 	struct be_rx_compl_info *rxcp = &rxo->rxcp;
1343 	struct be_adapter *adapter = rxo->adapter;
1344 
1345 	/* For checking the valid bit it is Ok to use either definition as the
1346 	 * valid bit is at the same position in both v0 and v1 Rx compl */
1347 	if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1348 		return NULL;
1349 
1350 	rmb();
1351 	be_dws_le_to_cpu(compl, sizeof(*compl));
1352 
1353 	if (adapter->be3_native)
1354 		be_parse_rx_compl_v1(adapter, compl, rxcp);
1355 	else
1356 		be_parse_rx_compl_v0(adapter, compl, rxcp);
1357 
1358 	if (rxcp->vlanf) {
1359 		/* vlanf could be wrongly set in some cards.
1360 		 * ignore if vtm is not set */
1361 		if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1362 			rxcp->vlanf = 0;
1363 
1364 		if (!lancer_chip(adapter))
1365 			rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1366 
1367 		if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1368 		    !adapter->vlan_tag[rxcp->vlan_tag])
1369 			rxcp->vlanf = 0;
1370 	}
1371 
1372 	/* As the compl has been parsed, reset it; we wont touch it again */
1373 	compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1374 
1375 	queue_tail_inc(&rxo->cq);
1376 	return rxcp;
1377 }
1378 
be_alloc_pages(u32 size,gfp_t gfp)1379 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1380 {
1381 	u32 order = get_order(size);
1382 
1383 	if (order > 0)
1384 		gfp |= __GFP_COMP;
1385 	return  alloc_pages(gfp, order);
1386 }
1387 
1388 /*
1389  * Allocate a page, split it to fragments of size rx_frag_size and post as
1390  * receive buffers to BE
1391  */
be_post_rx_frags(struct be_rx_obj * rxo,gfp_t gfp)1392 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1393 {
1394 	struct be_adapter *adapter = rxo->adapter;
1395 	struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1396 	struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1397 	struct be_queue_info *rxq = &rxo->q;
1398 	struct page *pagep = NULL;
1399 	struct be_eth_rx_d *rxd;
1400 	u64 page_dmaaddr = 0, frag_dmaaddr;
1401 	u32 posted, page_offset = 0;
1402 
1403 	page_info = &rxo->page_info_tbl[rxq->head];
1404 	for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1405 		if (!pagep) {
1406 			pagep = be_alloc_pages(adapter->big_page_size, gfp);
1407 			if (unlikely(!pagep)) {
1408 				rx_stats(rxo)->rx_post_fail++;
1409 				break;
1410 			}
1411 			page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1412 						    0, adapter->big_page_size,
1413 						    DMA_FROM_DEVICE);
1414 			page_info->page_offset = 0;
1415 		} else {
1416 			get_page(pagep);
1417 			page_info->page_offset = page_offset + rx_frag_size;
1418 		}
1419 		page_offset = page_info->page_offset;
1420 		page_info->page = pagep;
1421 		dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1422 		frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1423 
1424 		rxd = queue_head_node(rxq);
1425 		rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1426 		rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1427 
1428 		/* Any space left in the current big page for another frag? */
1429 		if ((page_offset + rx_frag_size + rx_frag_size) >
1430 					adapter->big_page_size) {
1431 			pagep = NULL;
1432 			page_info->last_page_user = true;
1433 		}
1434 
1435 		prev_page_info = page_info;
1436 		queue_head_inc(rxq);
1437 		page_info = &page_info_tbl[rxq->head];
1438 	}
1439 	if (pagep)
1440 		prev_page_info->last_page_user = true;
1441 
1442 	if (posted) {
1443 		atomic_add(posted, &rxq->used);
1444 		be_rxq_notify(adapter, rxq->id, posted);
1445 	} else if (atomic_read(&rxq->used) == 0) {
1446 		/* Let be_worker replenish when memory is available */
1447 		rxo->rx_post_starved = true;
1448 	}
1449 }
1450 
be_tx_compl_get(struct be_queue_info * tx_cq)1451 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1452 {
1453 	struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1454 
1455 	if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1456 		return NULL;
1457 
1458 	rmb();
1459 	be_dws_le_to_cpu(txcp, sizeof(*txcp));
1460 
1461 	txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1462 
1463 	queue_tail_inc(tx_cq);
1464 	return txcp;
1465 }
1466 
be_tx_compl_process(struct be_adapter * adapter,struct be_tx_obj * txo,u16 last_index)1467 static u16 be_tx_compl_process(struct be_adapter *adapter,
1468 		struct be_tx_obj *txo, u16 last_index)
1469 {
1470 	struct be_queue_info *txq = &txo->q;
1471 	struct be_eth_wrb *wrb;
1472 	struct sk_buff **sent_skbs = txo->sent_skb_list;
1473 	struct sk_buff *sent_skb;
1474 	u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1475 	bool unmap_skb_hdr = true;
1476 
1477 	sent_skb = sent_skbs[txq->tail];
1478 	BUG_ON(!sent_skb);
1479 	sent_skbs[txq->tail] = NULL;
1480 
1481 	/* skip header wrb */
1482 	queue_tail_inc(txq);
1483 
1484 	do {
1485 		cur_index = txq->tail;
1486 		wrb = queue_tail_node(txq);
1487 		unmap_tx_frag(&adapter->pdev->dev, wrb,
1488 			      (unmap_skb_hdr && skb_headlen(sent_skb)));
1489 		unmap_skb_hdr = false;
1490 
1491 		num_wrbs++;
1492 		queue_tail_inc(txq);
1493 	} while (cur_index != last_index);
1494 
1495 	kfree_skb(sent_skb);
1496 	return num_wrbs;
1497 }
1498 
event_get(struct be_eq_obj * eq_obj)1499 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1500 {
1501 	struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1502 
1503 	if (!eqe->evt)
1504 		return NULL;
1505 
1506 	rmb();
1507 	eqe->evt = le32_to_cpu(eqe->evt);
1508 	queue_tail_inc(&eq_obj->q);
1509 	return eqe;
1510 }
1511 
event_handle(struct be_adapter * adapter,struct be_eq_obj * eq_obj,bool rearm)1512 static int event_handle(struct be_adapter *adapter,
1513 			struct be_eq_obj *eq_obj,
1514 			bool rearm)
1515 {
1516 	struct be_eq_entry *eqe;
1517 	u16 num = 0;
1518 
1519 	while ((eqe = event_get(eq_obj)) != NULL) {
1520 		eqe->evt = 0;
1521 		num++;
1522 	}
1523 
1524 	/* Deal with any spurious interrupts that come
1525 	 * without events
1526 	 */
1527 	if (!num)
1528 		rearm = true;
1529 
1530 	be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1531 	if (num)
1532 		napi_schedule(&eq_obj->napi);
1533 
1534 	return num;
1535 }
1536 
1537 /* Just read and notify events without processing them.
1538  * Used at the time of destroying event queues */
be_eq_clean(struct be_adapter * adapter,struct be_eq_obj * eq_obj)1539 static void be_eq_clean(struct be_adapter *adapter,
1540 			struct be_eq_obj *eq_obj)
1541 {
1542 	struct be_eq_entry *eqe;
1543 	u16 num = 0;
1544 
1545 	while ((eqe = event_get(eq_obj)) != NULL) {
1546 		eqe->evt = 0;
1547 		num++;
1548 	}
1549 
1550 	if (num)
1551 		be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1552 }
1553 
be_rx_q_clean(struct be_adapter * adapter,struct be_rx_obj * rxo)1554 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1555 {
1556 	struct be_rx_page_info *page_info;
1557 	struct be_queue_info *rxq = &rxo->q;
1558 	struct be_queue_info *rx_cq = &rxo->cq;
1559 	struct be_rx_compl_info *rxcp;
1560 	u16 tail;
1561 
1562 	/* First cleanup pending rx completions */
1563 	while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1564 		be_rx_compl_discard(adapter, rxo, rxcp);
1565 		be_cq_notify(adapter, rx_cq->id, false, 1);
1566 	}
1567 
1568 	/* Then free posted rx buffer that were not used */
1569 	tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1570 	for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1571 		page_info = get_rx_page_info(adapter, rxo, tail);
1572 		put_page(page_info->page);
1573 		memset(page_info, 0, sizeof(*page_info));
1574 	}
1575 	BUG_ON(atomic_read(&rxq->used));
1576 	rxq->tail = rxq->head = 0;
1577 }
1578 
be_tx_compl_clean(struct be_adapter * adapter,struct be_tx_obj * txo)1579 static void be_tx_compl_clean(struct be_adapter *adapter,
1580 				struct be_tx_obj *txo)
1581 {
1582 	struct be_queue_info *tx_cq = &txo->cq;
1583 	struct be_queue_info *txq = &txo->q;
1584 	struct be_eth_tx_compl *txcp;
1585 	u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1586 	struct sk_buff **sent_skbs = txo->sent_skb_list;
1587 	struct sk_buff *sent_skb;
1588 	bool dummy_wrb;
1589 
1590 	/* Wait for a max of 200ms for all the tx-completions to arrive. */
1591 	do {
1592 		while ((txcp = be_tx_compl_get(tx_cq))) {
1593 			end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1594 					wrb_index, txcp);
1595 			num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1596 			cmpl++;
1597 		}
1598 		if (cmpl) {
1599 			be_cq_notify(adapter, tx_cq->id, false, cmpl);
1600 			atomic_sub(num_wrbs, &txq->used);
1601 			cmpl = 0;
1602 			num_wrbs = 0;
1603 		}
1604 
1605 		if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1606 			break;
1607 
1608 		mdelay(1);
1609 	} while (true);
1610 
1611 	if (atomic_read(&txq->used))
1612 		dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1613 			atomic_read(&txq->used));
1614 
1615 	/* free posted tx for which compls will never arrive */
1616 	while (atomic_read(&txq->used)) {
1617 		sent_skb = sent_skbs[txq->tail];
1618 		end_idx = txq->tail;
1619 		index_adv(&end_idx,
1620 			wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1621 			txq->len);
1622 		num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1623 		atomic_sub(num_wrbs, &txq->used);
1624 	}
1625 }
1626 
be_mcc_queues_destroy(struct be_adapter * adapter)1627 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1628 {
1629 	struct be_queue_info *q;
1630 
1631 	q = &adapter->mcc_obj.q;
1632 	if (q->created)
1633 		be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1634 	be_queue_free(adapter, q);
1635 
1636 	q = &adapter->mcc_obj.cq;
1637 	if (q->created)
1638 		be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1639 	be_queue_free(adapter, q);
1640 }
1641 
1642 /* Must be called only after TX qs are created as MCC shares TX EQ */
be_mcc_queues_create(struct be_adapter * adapter)1643 static int be_mcc_queues_create(struct be_adapter *adapter)
1644 {
1645 	struct be_queue_info *q, *cq;
1646 
1647 	/* Alloc MCC compl queue */
1648 	cq = &adapter->mcc_obj.cq;
1649 	if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1650 			sizeof(struct be_mcc_compl)))
1651 		goto err;
1652 
1653 	/* Ask BE to create MCC compl queue; share TX's eq */
1654 	if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1655 		goto mcc_cq_free;
1656 
1657 	/* Alloc MCC queue */
1658 	q = &adapter->mcc_obj.q;
1659 	if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1660 		goto mcc_cq_destroy;
1661 
1662 	/* Ask BE to create MCC queue */
1663 	if (be_cmd_mccq_create(adapter, q, cq))
1664 		goto mcc_q_free;
1665 
1666 	return 0;
1667 
1668 mcc_q_free:
1669 	be_queue_free(adapter, q);
1670 mcc_cq_destroy:
1671 	be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1672 mcc_cq_free:
1673 	be_queue_free(adapter, cq);
1674 err:
1675 	return -1;
1676 }
1677 
be_tx_queues_destroy(struct be_adapter * adapter)1678 static void be_tx_queues_destroy(struct be_adapter *adapter)
1679 {
1680 	struct be_queue_info *q;
1681 	struct be_tx_obj *txo;
1682 	u8 i;
1683 
1684 	for_all_tx_queues(adapter, txo, i) {
1685 		q = &txo->q;
1686 		if (q->created)
1687 			be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1688 		be_queue_free(adapter, q);
1689 
1690 		q = &txo->cq;
1691 		if (q->created)
1692 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1693 		be_queue_free(adapter, q);
1694 	}
1695 
1696 	/* Clear any residual events */
1697 	be_eq_clean(adapter, &adapter->tx_eq);
1698 
1699 	q = &adapter->tx_eq.q;
1700 	if (q->created)
1701 		be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1702 	be_queue_free(adapter, q);
1703 }
1704 
be_num_txqs_want(struct be_adapter * adapter)1705 static int be_num_txqs_want(struct be_adapter *adapter)
1706 {
1707 	if (sriov_enabled(adapter) || be_is_mc(adapter) ||
1708 		lancer_chip(adapter) || !be_physfn(adapter) ||
1709 		adapter->generation == BE_GEN2)
1710 		return 1;
1711 	else
1712 		return MAX_TX_QS;
1713 }
1714 
1715 /* One TX event queue is shared by all TX compl qs */
be_tx_queues_create(struct be_adapter * adapter)1716 static int be_tx_queues_create(struct be_adapter *adapter)
1717 {
1718 	struct be_queue_info *eq, *q, *cq;
1719 	struct be_tx_obj *txo;
1720 	u8 i;
1721 
1722 	adapter->num_tx_qs = be_num_txqs_want(adapter);
1723 	if (adapter->num_tx_qs != MAX_TX_QS) {
1724 		rtnl_lock();
1725 		netif_set_real_num_tx_queues(adapter->netdev,
1726 			adapter->num_tx_qs);
1727 		rtnl_unlock();
1728 	}
1729 
1730 	adapter->tx_eq.max_eqd = 0;
1731 	adapter->tx_eq.min_eqd = 0;
1732 	adapter->tx_eq.cur_eqd = 96;
1733 	adapter->tx_eq.enable_aic = false;
1734 
1735 	eq = &adapter->tx_eq.q;
1736 	if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1737 		sizeof(struct be_eq_entry)))
1738 		return -1;
1739 
1740 	if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1741 		goto err;
1742 	adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1743 
1744 	for_all_tx_queues(adapter, txo, i) {
1745 		cq = &txo->cq;
1746 		if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1747 			sizeof(struct be_eth_tx_compl)))
1748 			goto err;
1749 
1750 		if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1751 			goto err;
1752 
1753 		q = &txo->q;
1754 		if (be_queue_alloc(adapter, q, TX_Q_LEN,
1755 			sizeof(struct be_eth_wrb)))
1756 			goto err;
1757 	}
1758 	return 0;
1759 
1760 err:
1761 	be_tx_queues_destroy(adapter);
1762 	return -1;
1763 }
1764 
be_rx_queues_destroy(struct be_adapter * adapter)1765 static void be_rx_queues_destroy(struct be_adapter *adapter)
1766 {
1767 	struct be_queue_info *q;
1768 	struct be_rx_obj *rxo;
1769 	int i;
1770 
1771 	for_all_rx_queues(adapter, rxo, i) {
1772 		be_queue_free(adapter, &rxo->q);
1773 
1774 		q = &rxo->cq;
1775 		if (q->created)
1776 			be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1777 		be_queue_free(adapter, q);
1778 
1779 		q = &rxo->rx_eq.q;
1780 		if (q->created)
1781 			be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1782 		be_queue_free(adapter, q);
1783 	}
1784 }
1785 
be_num_rxqs_want(struct be_adapter * adapter)1786 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1787 {
1788 	if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1789 	     !sriov_enabled(adapter) && be_physfn(adapter)) {
1790 		return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1791 	} else {
1792 		dev_warn(&adapter->pdev->dev,
1793 			"No support for multiple RX queues\n");
1794 		return 1;
1795 	}
1796 }
1797 
be_rx_queues_create(struct be_adapter * adapter)1798 static int be_rx_queues_create(struct be_adapter *adapter)
1799 {
1800 	struct be_queue_info *eq, *q, *cq;
1801 	struct be_rx_obj *rxo;
1802 	int rc, i;
1803 
1804 	adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1805 				msix_enabled(adapter) ?
1806 					adapter->num_msix_vec - 1 : 1);
1807 	if (adapter->num_rx_qs != MAX_RX_QS)
1808 		dev_warn(&adapter->pdev->dev,
1809 			"Can create only %d RX queues", adapter->num_rx_qs);
1810 
1811 	adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1812 	for_all_rx_queues(adapter, rxo, i) {
1813 		rxo->adapter = adapter;
1814 		rxo->rx_eq.max_eqd = BE_MAX_EQD;
1815 		rxo->rx_eq.enable_aic = true;
1816 
1817 		/* EQ */
1818 		eq = &rxo->rx_eq.q;
1819 		rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1820 					sizeof(struct be_eq_entry));
1821 		if (rc)
1822 			goto err;
1823 
1824 		rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1825 		if (rc)
1826 			goto err;
1827 
1828 		rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1829 
1830 		/* CQ */
1831 		cq = &rxo->cq;
1832 		rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1833 				sizeof(struct be_eth_rx_compl));
1834 		if (rc)
1835 			goto err;
1836 
1837 		rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1838 		if (rc)
1839 			goto err;
1840 
1841 		/* Rx Q - will be created in be_open() */
1842 		q = &rxo->q;
1843 		rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1844 				sizeof(struct be_eth_rx_d));
1845 		if (rc)
1846 			goto err;
1847 
1848 	}
1849 
1850 	return 0;
1851 err:
1852 	be_rx_queues_destroy(adapter);
1853 	return -1;
1854 }
1855 
event_peek(struct be_eq_obj * eq_obj)1856 static bool event_peek(struct be_eq_obj *eq_obj)
1857 {
1858 	struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1859 	if (!eqe->evt)
1860 		return false;
1861 	else
1862 		return true;
1863 }
1864 
be_intx(int irq,void * dev)1865 static irqreturn_t be_intx(int irq, void *dev)
1866 {
1867 	struct be_adapter *adapter = dev;
1868 	struct be_rx_obj *rxo;
1869 	int isr, i, tx = 0 , rx = 0;
1870 
1871 	if (lancer_chip(adapter)) {
1872 		if (event_peek(&adapter->tx_eq))
1873 			tx = event_handle(adapter, &adapter->tx_eq, false);
1874 		for_all_rx_queues(adapter, rxo, i) {
1875 			if (event_peek(&rxo->rx_eq))
1876 				rx |= event_handle(adapter, &rxo->rx_eq, true);
1877 		}
1878 
1879 		if (!(tx || rx))
1880 			return IRQ_NONE;
1881 
1882 	} else {
1883 		isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1884 			(adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1885 		if (!isr)
1886 			return IRQ_NONE;
1887 
1888 		if ((1 << adapter->tx_eq.eq_idx & isr))
1889 			event_handle(adapter, &adapter->tx_eq, false);
1890 
1891 		for_all_rx_queues(adapter, rxo, i) {
1892 			if ((1 << rxo->rx_eq.eq_idx & isr))
1893 				event_handle(adapter, &rxo->rx_eq, true);
1894 		}
1895 	}
1896 
1897 	return IRQ_HANDLED;
1898 }
1899 
be_msix_rx(int irq,void * dev)1900 static irqreturn_t be_msix_rx(int irq, void *dev)
1901 {
1902 	struct be_rx_obj *rxo = dev;
1903 	struct be_adapter *adapter = rxo->adapter;
1904 
1905 	event_handle(adapter, &rxo->rx_eq, true);
1906 
1907 	return IRQ_HANDLED;
1908 }
1909 
be_msix_tx_mcc(int irq,void * dev)1910 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1911 {
1912 	struct be_adapter *adapter = dev;
1913 
1914 	event_handle(adapter, &adapter->tx_eq, false);
1915 
1916 	return IRQ_HANDLED;
1917 }
1918 
do_gro(struct be_rx_compl_info * rxcp)1919 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1920 {
1921 	return (rxcp->tcpf && !rxcp->err) ? true : false;
1922 }
1923 
be_poll_rx(struct napi_struct * napi,int budget)1924 static int be_poll_rx(struct napi_struct *napi, int budget)
1925 {
1926 	struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1927 	struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1928 	struct be_adapter *adapter = rxo->adapter;
1929 	struct be_queue_info *rx_cq = &rxo->cq;
1930 	struct be_rx_compl_info *rxcp;
1931 	u32 work_done;
1932 
1933 	rx_stats(rxo)->rx_polls++;
1934 	for (work_done = 0; work_done < budget; work_done++) {
1935 		rxcp = be_rx_compl_get(rxo);
1936 		if (!rxcp)
1937 			break;
1938 
1939 		/* Is it a flush compl that has no data */
1940 		if (unlikely(rxcp->num_rcvd == 0))
1941 			goto loop_continue;
1942 
1943 		/* Discard compl with partial DMA Lancer B0 */
1944 		if (unlikely(!rxcp->pkt_size)) {
1945 			be_rx_compl_discard(adapter, rxo, rxcp);
1946 			goto loop_continue;
1947 		}
1948 
1949 		/* On BE drop pkts that arrive due to imperfect filtering in
1950 		 * promiscuous mode on some skews
1951 		 */
1952 		if (unlikely(rxcp->port != adapter->port_num &&
1953 				!lancer_chip(adapter))) {
1954 			be_rx_compl_discard(adapter, rxo, rxcp);
1955 			goto loop_continue;
1956 		}
1957 
1958 		if (do_gro(rxcp))
1959 			be_rx_compl_process_gro(adapter, rxo, rxcp);
1960 		else
1961 			be_rx_compl_process(adapter, rxo, rxcp);
1962 loop_continue:
1963 		be_rx_stats_update(rxo, rxcp);
1964 	}
1965 
1966 	be_cq_notify(adapter, rx_cq->id, false, work_done);
1967 
1968 	/* Refill the queue */
1969 	if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1970 		be_post_rx_frags(rxo, GFP_ATOMIC);
1971 
1972 	/* All consumed */
1973 	if (work_done < budget) {
1974 		napi_complete(napi);
1975 		/* Arm CQ */
1976 		be_cq_notify(adapter, rx_cq->id, true, 0);
1977 	}
1978 	return work_done;
1979 }
1980 
1981 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1982  * For TX/MCC we don't honour budget; consume everything
1983  */
be_poll_tx_mcc(struct napi_struct * napi,int budget)1984 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1985 {
1986 	struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1987 	struct be_adapter *adapter =
1988 		container_of(tx_eq, struct be_adapter, tx_eq);
1989 	struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
1990 	struct be_tx_obj *txo;
1991 	struct be_eth_tx_compl *txcp;
1992 	int tx_compl, mcc_compl, status = 0;
1993 	u8 i;
1994 	u16 num_wrbs;
1995 
1996 	for_all_tx_queues(adapter, txo, i) {
1997 		tx_compl = 0;
1998 		num_wrbs = 0;
1999 		while ((txcp = be_tx_compl_get(&txo->cq))) {
2000 			num_wrbs += be_tx_compl_process(adapter, txo,
2001 				AMAP_GET_BITS(struct amap_eth_tx_compl,
2002 					wrb_index, txcp));
2003 			tx_compl++;
2004 		}
2005 		if (tx_compl) {
2006 			be_cq_notify(adapter, txo->cq.id, true, tx_compl);
2007 
2008 			atomic_sub(num_wrbs, &txo->q.used);
2009 
2010 			/* As Tx wrbs have been freed up, wake up netdev queue
2011 			 * if it was stopped due to lack of tx wrbs.  */
2012 			if (__netif_subqueue_stopped(adapter->netdev, i) &&
2013 				atomic_read(&txo->q.used) < txo->q.len / 2) {
2014 				netif_wake_subqueue(adapter->netdev, i);
2015 			}
2016 
2017 			u64_stats_update_begin(&tx_stats(txo)->sync_compl);
2018 			tx_stats(txo)->tx_compl += tx_compl;
2019 			u64_stats_update_end(&tx_stats(txo)->sync_compl);
2020 		}
2021 	}
2022 
2023 	mcc_compl = be_process_mcc(adapter, &status);
2024 
2025 	if (mcc_compl) {
2026 		be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2027 	}
2028 
2029 	napi_complete(napi);
2030 
2031 	/* Arm CQ again to regenerate EQEs for Lancer in INTx mode */
2032 	if (lancer_chip(adapter) && !msix_enabled(adapter)) {
2033 		for_all_tx_queues(adapter, txo, i)
2034 			be_cq_notify(adapter, txo->cq.id, true, 0);
2035 
2036 		be_cq_notify(adapter, mcc_obj->cq.id, true, 0);
2037 	}
2038 
2039 	be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2040 	adapter->drv_stats.tx_events++;
2041 	return 1;
2042 }
2043 
be_detect_dump_ue(struct be_adapter * adapter)2044 void be_detect_dump_ue(struct be_adapter *adapter)
2045 {
2046 	u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2047 	u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2048 	u32 i;
2049 
2050 	if (adapter->eeh_err || adapter->ue_detected)
2051 		return;
2052 
2053 	if (lancer_chip(adapter)) {
2054 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2055 		if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2056 			sliport_err1 = ioread32(adapter->db +
2057 					SLIPORT_ERROR1_OFFSET);
2058 			sliport_err2 = ioread32(adapter->db +
2059 					SLIPORT_ERROR2_OFFSET);
2060 		}
2061 	} else {
2062 		pci_read_config_dword(adapter->pdev,
2063 				PCICFG_UE_STATUS_LOW, &ue_lo);
2064 		pci_read_config_dword(adapter->pdev,
2065 				PCICFG_UE_STATUS_HIGH, &ue_hi);
2066 		pci_read_config_dword(adapter->pdev,
2067 				PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2068 		pci_read_config_dword(adapter->pdev,
2069 				PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2070 
2071 		ue_lo = (ue_lo & (~ue_lo_mask));
2072 		ue_hi = (ue_hi & (~ue_hi_mask));
2073 	}
2074 
2075 	if (ue_lo || ue_hi ||
2076 		sliport_status & SLIPORT_STATUS_ERR_MASK) {
2077 		adapter->ue_detected = true;
2078 		adapter->eeh_err = true;
2079 		dev_err(&adapter->pdev->dev,
2080 			"Unrecoverable error in the card\n");
2081 	}
2082 
2083 	if (ue_lo) {
2084 		for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2085 			if (ue_lo & 1)
2086 				dev_err(&adapter->pdev->dev,
2087 				"UE: %s bit set\n", ue_status_low_desc[i]);
2088 		}
2089 	}
2090 	if (ue_hi) {
2091 		for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2092 			if (ue_hi & 1)
2093 				dev_err(&adapter->pdev->dev,
2094 				"UE: %s bit set\n", ue_status_hi_desc[i]);
2095 		}
2096 	}
2097 
2098 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2099 		dev_err(&adapter->pdev->dev,
2100 			"sliport status 0x%x\n", sliport_status);
2101 		dev_err(&adapter->pdev->dev,
2102 			"sliport error1 0x%x\n", sliport_err1);
2103 		dev_err(&adapter->pdev->dev,
2104 			"sliport error2 0x%x\n", sliport_err2);
2105 	}
2106 }
2107 
be_msix_disable(struct be_adapter * adapter)2108 static void be_msix_disable(struct be_adapter *adapter)
2109 {
2110 	if (msix_enabled(adapter)) {
2111 		pci_disable_msix(adapter->pdev);
2112 		adapter->num_msix_vec = 0;
2113 	}
2114 }
2115 
be_msix_enable(struct be_adapter * adapter)2116 static void be_msix_enable(struct be_adapter *adapter)
2117 {
2118 #define BE_MIN_MSIX_VECTORS	(1 + 1) /* Rx + Tx */
2119 	int i, status, num_vec;
2120 
2121 	num_vec = be_num_rxqs_want(adapter) + 1;
2122 
2123 	for (i = 0; i < num_vec; i++)
2124 		adapter->msix_entries[i].entry = i;
2125 
2126 	status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2127 	if (status == 0) {
2128 		goto done;
2129 	} else if (status >= BE_MIN_MSIX_VECTORS) {
2130 		num_vec = status;
2131 		if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2132 				num_vec) == 0)
2133 			goto done;
2134 	}
2135 	return;
2136 done:
2137 	adapter->num_msix_vec = num_vec;
2138 	return;
2139 }
2140 
be_sriov_enable(struct be_adapter * adapter)2141 static int be_sriov_enable(struct be_adapter *adapter)
2142 {
2143 	be_check_sriov_fn_type(adapter);
2144 
2145 #ifdef CONFIG_PCI_IOV
2146 	if (be_physfn(adapter) && num_vfs) {
2147 		int status, pos;
2148 		u16 dev_vfs;
2149 
2150 		pos = pci_find_ext_capability(adapter->pdev,
2151 						PCI_EXT_CAP_ID_SRIOV);
2152 		pci_read_config_word(adapter->pdev,
2153 				     pos + PCI_SRIOV_TOTAL_VF, &dev_vfs);
2154 
2155 		adapter->num_vfs = min_t(u16, num_vfs, dev_vfs);
2156 		if (adapter->num_vfs != num_vfs)
2157 			dev_info(&adapter->pdev->dev,
2158 				 "Device supports %d VFs and not %d\n",
2159 				 adapter->num_vfs, num_vfs);
2160 
2161 		status = pci_enable_sriov(adapter->pdev, adapter->num_vfs);
2162 		if (status)
2163 			adapter->num_vfs = 0;
2164 
2165 		if (adapter->num_vfs) {
2166 			adapter->vf_cfg = kcalloc(num_vfs,
2167 						sizeof(struct be_vf_cfg),
2168 						GFP_KERNEL);
2169 			if (!adapter->vf_cfg)
2170 				return -ENOMEM;
2171 		}
2172 	}
2173 #endif
2174 	return 0;
2175 }
2176 
be_sriov_disable(struct be_adapter * adapter)2177 static void be_sriov_disable(struct be_adapter *adapter)
2178 {
2179 #ifdef CONFIG_PCI_IOV
2180 	if (sriov_enabled(adapter)) {
2181 		pci_disable_sriov(adapter->pdev);
2182 		kfree(adapter->vf_cfg);
2183 		adapter->num_vfs = 0;
2184 	}
2185 #endif
2186 }
2187 
be_msix_vec_get(struct be_adapter * adapter,struct be_eq_obj * eq_obj)2188 static inline int be_msix_vec_get(struct be_adapter *adapter,
2189 					struct be_eq_obj *eq_obj)
2190 {
2191 	return adapter->msix_entries[eq_obj->eq_idx].vector;
2192 }
2193 
be_request_irq(struct be_adapter * adapter,struct be_eq_obj * eq_obj,void * handler,char * desc,void * context)2194 static int be_request_irq(struct be_adapter *adapter,
2195 		struct be_eq_obj *eq_obj,
2196 		void *handler, char *desc, void *context)
2197 {
2198 	struct net_device *netdev = adapter->netdev;
2199 	int vec;
2200 
2201 	sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2202 	vec = be_msix_vec_get(adapter, eq_obj);
2203 	return request_irq(vec, handler, 0, eq_obj->desc, context);
2204 }
2205 
be_free_irq(struct be_adapter * adapter,struct be_eq_obj * eq_obj,void * context)2206 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2207 			void *context)
2208 {
2209 	int vec = be_msix_vec_get(adapter, eq_obj);
2210 	free_irq(vec, context);
2211 }
2212 
be_msix_register(struct be_adapter * adapter)2213 static int be_msix_register(struct be_adapter *adapter)
2214 {
2215 	struct be_rx_obj *rxo;
2216 	int status, i;
2217 	char qname[10];
2218 
2219 	status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2220 				adapter);
2221 	if (status)
2222 		goto err;
2223 
2224 	for_all_rx_queues(adapter, rxo, i) {
2225 		sprintf(qname, "rxq%d", i);
2226 		status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2227 				qname, rxo);
2228 		if (status)
2229 			goto err_msix;
2230 	}
2231 
2232 	return 0;
2233 
2234 err_msix:
2235 	be_free_irq(adapter, &adapter->tx_eq, adapter);
2236 
2237 	for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2238 		be_free_irq(adapter, &rxo->rx_eq, rxo);
2239 
2240 err:
2241 	dev_warn(&adapter->pdev->dev,
2242 		"MSIX Request IRQ failed - err %d\n", status);
2243 	be_msix_disable(adapter);
2244 	return status;
2245 }
2246 
be_irq_register(struct be_adapter * adapter)2247 static int be_irq_register(struct be_adapter *adapter)
2248 {
2249 	struct net_device *netdev = adapter->netdev;
2250 	int status;
2251 
2252 	if (msix_enabled(adapter)) {
2253 		status = be_msix_register(adapter);
2254 		if (status == 0)
2255 			goto done;
2256 		/* INTx is not supported for VF */
2257 		if (!be_physfn(adapter))
2258 			return status;
2259 	}
2260 
2261 	/* INTx */
2262 	netdev->irq = adapter->pdev->irq;
2263 	status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2264 			adapter);
2265 	if (status) {
2266 		dev_err(&adapter->pdev->dev,
2267 			"INTx request IRQ failed - err %d\n", status);
2268 		return status;
2269 	}
2270 done:
2271 	adapter->isr_registered = true;
2272 	return 0;
2273 }
2274 
be_irq_unregister(struct be_adapter * adapter)2275 static void be_irq_unregister(struct be_adapter *adapter)
2276 {
2277 	struct net_device *netdev = adapter->netdev;
2278 	struct be_rx_obj *rxo;
2279 	int i;
2280 
2281 	if (!adapter->isr_registered)
2282 		return;
2283 
2284 	/* INTx */
2285 	if (!msix_enabled(adapter)) {
2286 		free_irq(netdev->irq, adapter);
2287 		goto done;
2288 	}
2289 
2290 	/* MSIx */
2291 	be_free_irq(adapter, &adapter->tx_eq, adapter);
2292 
2293 	for_all_rx_queues(adapter, rxo, i)
2294 		be_free_irq(adapter, &rxo->rx_eq, rxo);
2295 
2296 done:
2297 	adapter->isr_registered = false;
2298 }
2299 
be_rx_queues_clear(struct be_adapter * adapter)2300 static void be_rx_queues_clear(struct be_adapter *adapter)
2301 {
2302 	struct be_queue_info *q;
2303 	struct be_rx_obj *rxo;
2304 	int i;
2305 
2306 	for_all_rx_queues(adapter, rxo, i) {
2307 		q = &rxo->q;
2308 		if (q->created) {
2309 			be_cmd_rxq_destroy(adapter, q);
2310 			/* After the rxq is invalidated, wait for a grace time
2311 			 * of 1ms for all dma to end and the flush compl to
2312 			 * arrive
2313 			 */
2314 			mdelay(1);
2315 			be_rx_q_clean(adapter, rxo);
2316 		}
2317 
2318 		/* Clear any residual events */
2319 		q = &rxo->rx_eq.q;
2320 		if (q->created)
2321 			be_eq_clean(adapter, &rxo->rx_eq);
2322 	}
2323 }
2324 
be_close(struct net_device * netdev)2325 static int be_close(struct net_device *netdev)
2326 {
2327 	struct be_adapter *adapter = netdev_priv(netdev);
2328 	struct be_rx_obj *rxo;
2329 	struct be_tx_obj *txo;
2330 	struct be_eq_obj *tx_eq = &adapter->tx_eq;
2331 	int vec, i;
2332 
2333 	be_async_mcc_disable(adapter);
2334 
2335 	if (!lancer_chip(adapter))
2336 		be_intr_set(adapter, false);
2337 
2338 	for_all_rx_queues(adapter, rxo, i)
2339 		napi_disable(&rxo->rx_eq.napi);
2340 
2341 	napi_disable(&tx_eq->napi);
2342 
2343 	if (lancer_chip(adapter)) {
2344 		be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2345 		for_all_rx_queues(adapter, rxo, i)
2346 			 be_cq_notify(adapter, rxo->cq.id, false, 0);
2347 		for_all_tx_queues(adapter, txo, i)
2348 			 be_cq_notify(adapter, txo->cq.id, false, 0);
2349 	}
2350 
2351 	if (msix_enabled(adapter)) {
2352 		vec = be_msix_vec_get(adapter, tx_eq);
2353 		synchronize_irq(vec);
2354 
2355 		for_all_rx_queues(adapter, rxo, i) {
2356 			vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2357 			synchronize_irq(vec);
2358 		}
2359 	} else {
2360 		synchronize_irq(netdev->irq);
2361 	}
2362 	be_irq_unregister(adapter);
2363 
2364 	/* Wait for all pending tx completions to arrive so that
2365 	 * all tx skbs are freed.
2366 	 */
2367 	for_all_tx_queues(adapter, txo, i)
2368 		be_tx_compl_clean(adapter, txo);
2369 
2370 	be_rx_queues_clear(adapter);
2371 	return 0;
2372 }
2373 
be_rx_queues_setup(struct be_adapter * adapter)2374 static int be_rx_queues_setup(struct be_adapter *adapter)
2375 {
2376 	struct be_rx_obj *rxo;
2377 	int rc, i, j;
2378 	u8 rsstable[128];
2379 
2380 	for_all_rx_queues(adapter, rxo, i) {
2381 		rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2382 			rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2383 			adapter->if_handle,
2384 			(i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2385 		if (rc)
2386 			return rc;
2387 	}
2388 
2389 	if (be_multi_rxq(adapter)) {
2390 		for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2391 			for_all_rss_queues(adapter, rxo, i) {
2392 				if ((j + i) >= 128)
2393 					break;
2394 				rsstable[j + i] = rxo->rss_id;
2395 			}
2396 		}
2397 		rc = be_cmd_rss_config(adapter, rsstable, 128);
2398 
2399 		if (rc)
2400 			return rc;
2401 	}
2402 
2403 	/* First time posting */
2404 	for_all_rx_queues(adapter, rxo, i) {
2405 		be_post_rx_frags(rxo, GFP_KERNEL);
2406 		napi_enable(&rxo->rx_eq.napi);
2407 	}
2408 	return 0;
2409 }
2410 
be_open(struct net_device * netdev)2411 static int be_open(struct net_device *netdev)
2412 {
2413 	struct be_adapter *adapter = netdev_priv(netdev);
2414 	struct be_eq_obj *tx_eq = &adapter->tx_eq;
2415 	struct be_rx_obj *rxo;
2416 	u8 link_status;
2417 	int status, i;
2418 
2419 	status = be_rx_queues_setup(adapter);
2420 	if (status)
2421 		goto err;
2422 
2423 	napi_enable(&tx_eq->napi);
2424 
2425 	be_irq_register(adapter);
2426 
2427 	if (!lancer_chip(adapter))
2428 		be_intr_set(adapter, true);
2429 
2430 	/* The evt queues are created in unarmed state; arm them */
2431 	for_all_rx_queues(adapter, rxo, i) {
2432 		be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2433 		be_cq_notify(adapter, rxo->cq.id, true, 0);
2434 	}
2435 	be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2436 
2437 	/* Now that interrupts are on we can process async mcc */
2438 	be_async_mcc_enable(adapter);
2439 
2440 	status = be_cmd_link_status_query(adapter, NULL, NULL,
2441 					  &link_status, 0);
2442 	if (!status)
2443 		be_link_status_update(adapter, link_status);
2444 
2445 	return 0;
2446 err:
2447 	be_close(adapter->netdev);
2448 	return -EIO;
2449 }
2450 
be_setup_wol(struct be_adapter * adapter,bool enable)2451 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2452 {
2453 	struct be_dma_mem cmd;
2454 	int status = 0;
2455 	u8 mac[ETH_ALEN];
2456 
2457 	memset(mac, 0, ETH_ALEN);
2458 
2459 	cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2460 	cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2461 				    GFP_KERNEL);
2462 	if (cmd.va == NULL)
2463 		return -1;
2464 	memset(cmd.va, 0, cmd.size);
2465 
2466 	if (enable) {
2467 		status = pci_write_config_dword(adapter->pdev,
2468 			PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2469 		if (status) {
2470 			dev_err(&adapter->pdev->dev,
2471 				"Could not enable Wake-on-lan\n");
2472 			dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2473 					  cmd.dma);
2474 			return status;
2475 		}
2476 		status = be_cmd_enable_magic_wol(adapter,
2477 				adapter->netdev->dev_addr, &cmd);
2478 		pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2479 		pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2480 	} else {
2481 		status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2482 		pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2483 		pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2484 	}
2485 
2486 	dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2487 	return status;
2488 }
2489 
2490 /*
2491  * Generate a seed MAC address from the PF MAC Address using jhash.
2492  * MAC Address for VFs are assigned incrementally starting from the seed.
2493  * These addresses are programmed in the ASIC by the PF and the VF driver
2494  * queries for the MAC address during its probe.
2495  */
be_vf_eth_addr_config(struct be_adapter * adapter)2496 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2497 {
2498 	u32 vf;
2499 	int status = 0;
2500 	u8 mac[ETH_ALEN];
2501 	struct be_vf_cfg *vf_cfg;
2502 
2503 	be_vf_eth_addr_generate(adapter, mac);
2504 
2505 	for_all_vfs(adapter, vf_cfg, vf) {
2506 		if (lancer_chip(adapter)) {
2507 			status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2508 		} else {
2509 			status = be_cmd_pmac_add(adapter, mac,
2510 						 vf_cfg->if_handle,
2511 						 &vf_cfg->pmac_id, vf + 1);
2512 		}
2513 
2514 		if (status)
2515 			dev_err(&adapter->pdev->dev,
2516 			"Mac address assignment failed for VF %d\n", vf);
2517 		else
2518 			memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
2519 
2520 		mac[5] += 1;
2521 	}
2522 	return status;
2523 }
2524 
be_vf_clear(struct be_adapter * adapter)2525 static void be_vf_clear(struct be_adapter *adapter)
2526 {
2527 	struct be_vf_cfg *vf_cfg;
2528 	u32 vf;
2529 
2530 	for_all_vfs(adapter, vf_cfg, vf) {
2531 		if (lancer_chip(adapter))
2532 			be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2533 		else
2534 			be_cmd_pmac_del(adapter, vf_cfg->if_handle,
2535 					vf_cfg->pmac_id, vf + 1);
2536 
2537 		be_cmd_if_destroy(adapter, vf_cfg->if_handle, vf + 1);
2538 	}
2539 }
2540 
be_clear(struct be_adapter * adapter)2541 static int be_clear(struct be_adapter *adapter)
2542 {
2543 	if (sriov_enabled(adapter))
2544 		be_vf_clear(adapter);
2545 
2546 	be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2547 
2548 	be_mcc_queues_destroy(adapter);
2549 	be_rx_queues_destroy(adapter);
2550 	be_tx_queues_destroy(adapter);
2551 
2552 	/* tell fw we're done with firing cmds */
2553 	be_cmd_fw_clean(adapter);
2554 	return 0;
2555 }
2556 
be_vf_setup_init(struct be_adapter * adapter)2557 static void be_vf_setup_init(struct be_adapter *adapter)
2558 {
2559 	struct be_vf_cfg *vf_cfg;
2560 	int vf;
2561 
2562 	for_all_vfs(adapter, vf_cfg, vf) {
2563 		vf_cfg->if_handle = -1;
2564 		vf_cfg->pmac_id = -1;
2565 	}
2566 }
2567 
be_vf_setup(struct be_adapter * adapter)2568 static int be_vf_setup(struct be_adapter *adapter)
2569 {
2570 	struct be_vf_cfg *vf_cfg;
2571 	u32 cap_flags, en_flags, vf;
2572 	u16 lnk_speed;
2573 	int status;
2574 
2575 	be_vf_setup_init(adapter);
2576 
2577 	cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2578 				BE_IF_FLAGS_MULTICAST;
2579 	for_all_vfs(adapter, vf_cfg, vf) {
2580 		status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2581 					  &vf_cfg->if_handle, NULL, vf + 1);
2582 		if (status)
2583 			goto err;
2584 	}
2585 
2586 	status = be_vf_eth_addr_config(adapter);
2587 	if (status)
2588 		goto err;
2589 
2590 	for_all_vfs(adapter, vf_cfg, vf) {
2591 		status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2592 						  NULL, vf + 1);
2593 		if (status)
2594 			goto err;
2595 		vf_cfg->tx_rate = lnk_speed * 10;
2596 	}
2597 	return 0;
2598 err:
2599 	return status;
2600 }
2601 
be_setup_init(struct be_adapter * adapter)2602 static void be_setup_init(struct be_adapter *adapter)
2603 {
2604 	adapter->vlan_prio_bmap = 0xff;
2605 	adapter->link_speed = -1;
2606 	adapter->if_handle = -1;
2607 	adapter->be3_native = false;
2608 	adapter->promiscuous = false;
2609 	adapter->eq_next_idx = 0;
2610 }
2611 
be_configure_mac_from_list(struct be_adapter * adapter,u8 * mac)2612 static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2613 {
2614 	u32 pmac_id;
2615 	int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2616 	if (status != 0)
2617 		goto do_none;
2618 	status = be_cmd_mac_addr_query(adapter, mac,
2619 			MAC_ADDRESS_TYPE_NETWORK,
2620 			false, adapter->if_handle, pmac_id);
2621 	if (status != 0)
2622 		goto do_none;
2623 	status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2624 			&adapter->pmac_id, 0);
2625 do_none:
2626 	return status;
2627 }
2628 
be_setup(struct be_adapter * adapter)2629 static int be_setup(struct be_adapter *adapter)
2630 {
2631 	struct net_device *netdev = adapter->netdev;
2632 	u32 cap_flags, en_flags;
2633 	u32 tx_fc, rx_fc;
2634 	int status, i;
2635 	u8 mac[ETH_ALEN];
2636 	struct be_tx_obj *txo;
2637 
2638 	be_setup_init(adapter);
2639 
2640 	be_cmd_req_native_mode(adapter);
2641 
2642 	status = be_tx_queues_create(adapter);
2643 	if (status != 0)
2644 		goto err;
2645 
2646 	status = be_rx_queues_create(adapter);
2647 	if (status != 0)
2648 		goto err;
2649 
2650 	status = be_mcc_queues_create(adapter);
2651 	if (status != 0)
2652 		goto err;
2653 
2654 	memset(mac, 0, ETH_ALEN);
2655 	status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2656 			true /*permanent */, 0, 0);
2657 	if (status)
2658 		return status;
2659 	memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2660 	memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2661 
2662 	en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2663 			BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2664 	cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2665 			BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2666 
2667 	if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2668 		cap_flags |= BE_IF_FLAGS_RSS;
2669 		en_flags |= BE_IF_FLAGS_RSS;
2670 	}
2671 	status = be_cmd_if_create(adapter, cap_flags, en_flags,
2672 			netdev->dev_addr, &adapter->if_handle,
2673 			&adapter->pmac_id, 0);
2674 	if (status != 0)
2675 		goto err;
2676 
2677 	 for_all_tx_queues(adapter, txo, i) {
2678 		status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2679 		if (status)
2680 			goto err;
2681 	}
2682 
2683 	 /* The VF's permanent mac queried from card is incorrect.
2684 	  * For BEx: Query the mac configued by the PF using if_handle
2685 	  * For Lancer: Get and use mac_list to obtain mac address.
2686 	  */
2687 	if (!be_physfn(adapter)) {
2688 		if (lancer_chip(adapter))
2689 			status = be_configure_mac_from_list(adapter, mac);
2690 		else
2691 			status = be_cmd_mac_addr_query(adapter, mac,
2692 					MAC_ADDRESS_TYPE_NETWORK, false,
2693 					adapter->if_handle, 0);
2694 		if (!status) {
2695 			memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2696 			memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2697 		}
2698 	}
2699 
2700 	be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2701 
2702 	status = be_vid_config(adapter, false, 0);
2703 	if (status)
2704 		goto err;
2705 
2706 	be_set_rx_mode(adapter->netdev);
2707 
2708 	status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2709 	/* For Lancer: It is legal for this cmd to fail on VF */
2710 	if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2711 		goto err;
2712 
2713 	if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2714 		status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2715 					adapter->rx_fc);
2716 		/* For Lancer: It is legal for this cmd to fail on VF */
2717 		if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2718 			goto err;
2719 	}
2720 
2721 	pcie_set_readrq(adapter->pdev, 4096);
2722 
2723 	if (sriov_enabled(adapter)) {
2724 		status = be_vf_setup(adapter);
2725 		if (status)
2726 			goto err;
2727 	}
2728 
2729 	return 0;
2730 err:
2731 	be_clear(adapter);
2732 	return status;
2733 }
2734 
2735 #ifdef CONFIG_NET_POLL_CONTROLLER
be_netpoll(struct net_device * netdev)2736 static void be_netpoll(struct net_device *netdev)
2737 {
2738 	struct be_adapter *adapter = netdev_priv(netdev);
2739 	struct be_rx_obj *rxo;
2740 	int i;
2741 
2742 	event_handle(adapter, &adapter->tx_eq, false);
2743 	for_all_rx_queues(adapter, rxo, i)
2744 		event_handle(adapter, &rxo->rx_eq, true);
2745 }
2746 #endif
2747 
2748 #define FW_FILE_HDR_SIGN 	"ServerEngines Corp. "
be_flash_redboot(struct be_adapter * adapter,const u8 * p,u32 img_start,int image_size,int hdr_size)2749 static bool be_flash_redboot(struct be_adapter *adapter,
2750 			const u8 *p, u32 img_start, int image_size,
2751 			int hdr_size)
2752 {
2753 	u32 crc_offset;
2754 	u8 flashed_crc[4];
2755 	int status;
2756 
2757 	crc_offset = hdr_size + img_start + image_size - 4;
2758 
2759 	p += crc_offset;
2760 
2761 	status = be_cmd_get_flash_crc(adapter, flashed_crc,
2762 			(image_size - 4));
2763 	if (status) {
2764 		dev_err(&adapter->pdev->dev,
2765 		"could not get crc from flash, not flashing redboot\n");
2766 		return false;
2767 	}
2768 
2769 	/*update redboot only if crc does not match*/
2770 	if (!memcmp(flashed_crc, p, 4))
2771 		return false;
2772 	else
2773 		return true;
2774 }
2775 
phy_flashing_required(struct be_adapter * adapter)2776 static bool phy_flashing_required(struct be_adapter *adapter)
2777 {
2778 	int status = 0;
2779 	struct be_phy_info phy_info;
2780 
2781 	status = be_cmd_get_phy_info(adapter, &phy_info);
2782 	if (status)
2783 		return false;
2784 	if ((phy_info.phy_type == TN_8022) &&
2785 		(phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2786 		return true;
2787 	}
2788 	return false;
2789 }
2790 
be_flash_data(struct be_adapter * adapter,const struct firmware * fw,struct be_dma_mem * flash_cmd,int num_of_images)2791 static int be_flash_data(struct be_adapter *adapter,
2792 			const struct firmware *fw,
2793 			struct be_dma_mem *flash_cmd, int num_of_images)
2794 
2795 {
2796 	int status = 0, i, filehdr_size = 0;
2797 	u32 total_bytes = 0, flash_op;
2798 	int num_bytes;
2799 	const u8 *p = fw->data;
2800 	struct be_cmd_write_flashrom *req = flash_cmd->va;
2801 	const struct flash_comp *pflashcomp;
2802 	int num_comp;
2803 
2804 	static const struct flash_comp gen3_flash_types[10] = {
2805 		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2806 			FLASH_IMAGE_MAX_SIZE_g3},
2807 		{ FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2808 			FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2809 		{ FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2810 			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2811 		{ FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2812 			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2813 		{ FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2814 			FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2815 		{ FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2816 			FLASH_IMAGE_MAX_SIZE_g3},
2817 		{ FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2818 			FLASH_IMAGE_MAX_SIZE_g3},
2819 		{ FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2820 			FLASH_IMAGE_MAX_SIZE_g3},
2821 		{ FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2822 			FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2823 		{ FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2824 			FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2825 	};
2826 	static const struct flash_comp gen2_flash_types[8] = {
2827 		{ FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2828 			FLASH_IMAGE_MAX_SIZE_g2},
2829 		{ FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2830 			FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2831 		{ FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2832 			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2833 		{ FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2834 			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2835 		{ FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2836 			FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2837 		{ FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2838 			FLASH_IMAGE_MAX_SIZE_g2},
2839 		{ FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2840 			FLASH_IMAGE_MAX_SIZE_g2},
2841 		{ FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2842 			 FLASH_IMAGE_MAX_SIZE_g2}
2843 	};
2844 
2845 	if (adapter->generation == BE_GEN3) {
2846 		pflashcomp = gen3_flash_types;
2847 		filehdr_size = sizeof(struct flash_file_hdr_g3);
2848 		num_comp = ARRAY_SIZE(gen3_flash_types);
2849 	} else {
2850 		pflashcomp = gen2_flash_types;
2851 		filehdr_size = sizeof(struct flash_file_hdr_g2);
2852 		num_comp = ARRAY_SIZE(gen2_flash_types);
2853 	}
2854 	for (i = 0; i < num_comp; i++) {
2855 		if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2856 				memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2857 			continue;
2858 		if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2859 			if (!phy_flashing_required(adapter))
2860 				continue;
2861 		}
2862 		if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2863 			(!be_flash_redboot(adapter, fw->data,
2864 			pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2865 			(num_of_images * sizeof(struct image_hdr)))))
2866 			continue;
2867 		p = fw->data;
2868 		p += filehdr_size + pflashcomp[i].offset
2869 			+ (num_of_images * sizeof(struct image_hdr));
2870 		if (p + pflashcomp[i].size > fw->data + fw->size)
2871 			return -1;
2872 		total_bytes = pflashcomp[i].size;
2873 		while (total_bytes) {
2874 			if (total_bytes > 32*1024)
2875 				num_bytes = 32*1024;
2876 			else
2877 				num_bytes = total_bytes;
2878 			total_bytes -= num_bytes;
2879 			if (!total_bytes) {
2880 				if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2881 					flash_op = FLASHROM_OPER_PHY_FLASH;
2882 				else
2883 					flash_op = FLASHROM_OPER_FLASH;
2884 			} else {
2885 				if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2886 					flash_op = FLASHROM_OPER_PHY_SAVE;
2887 				else
2888 					flash_op = FLASHROM_OPER_SAVE;
2889 			}
2890 			memcpy(req->params.data_buf, p, num_bytes);
2891 			p += num_bytes;
2892 			status = be_cmd_write_flashrom(adapter, flash_cmd,
2893 				pflashcomp[i].optype, flash_op, num_bytes);
2894 			if (status) {
2895 				if ((status == ILLEGAL_IOCTL_REQ) &&
2896 					(pflashcomp[i].optype ==
2897 						IMG_TYPE_PHY_FW))
2898 					break;
2899 				dev_err(&adapter->pdev->dev,
2900 					"cmd to write to flash rom failed.\n");
2901 				return -1;
2902 			}
2903 		}
2904 	}
2905 	return 0;
2906 }
2907 
get_ufigen_type(struct flash_file_hdr_g2 * fhdr)2908 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2909 {
2910 	if (fhdr == NULL)
2911 		return 0;
2912 	if (fhdr->build[0] == '3')
2913 		return BE_GEN3;
2914 	else if (fhdr->build[0] == '2')
2915 		return BE_GEN2;
2916 	else
2917 		return 0;
2918 }
2919 
lancer_fw_download(struct be_adapter * adapter,const struct firmware * fw)2920 static int lancer_fw_download(struct be_adapter *adapter,
2921 				const struct firmware *fw)
2922 {
2923 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2924 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2925 	struct be_dma_mem flash_cmd;
2926 	const u8 *data_ptr = NULL;
2927 	u8 *dest_image_ptr = NULL;
2928 	size_t image_size = 0;
2929 	u32 chunk_size = 0;
2930 	u32 data_written = 0;
2931 	u32 offset = 0;
2932 	int status = 0;
2933 	u8 add_status = 0;
2934 
2935 	if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2936 		dev_err(&adapter->pdev->dev,
2937 			"FW Image not properly aligned. "
2938 			"Length must be 4 byte aligned.\n");
2939 		status = -EINVAL;
2940 		goto lancer_fw_exit;
2941 	}
2942 
2943 	flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2944 				+ LANCER_FW_DOWNLOAD_CHUNK;
2945 	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2946 						&flash_cmd.dma, GFP_KERNEL);
2947 	if (!flash_cmd.va) {
2948 		status = -ENOMEM;
2949 		dev_err(&adapter->pdev->dev,
2950 			"Memory allocation failure while flashing\n");
2951 		goto lancer_fw_exit;
2952 	}
2953 
2954 	dest_image_ptr = flash_cmd.va +
2955 				sizeof(struct lancer_cmd_req_write_object);
2956 	image_size = fw->size;
2957 	data_ptr = fw->data;
2958 
2959 	while (image_size) {
2960 		chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2961 
2962 		/* Copy the image chunk content. */
2963 		memcpy(dest_image_ptr, data_ptr, chunk_size);
2964 
2965 		status = lancer_cmd_write_object(adapter, &flash_cmd,
2966 				chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2967 				&data_written, &add_status);
2968 
2969 		if (status)
2970 			break;
2971 
2972 		offset += data_written;
2973 		data_ptr += data_written;
2974 		image_size -= data_written;
2975 	}
2976 
2977 	if (!status) {
2978 		/* Commit the FW written */
2979 		status = lancer_cmd_write_object(adapter, &flash_cmd,
2980 					0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2981 					&data_written, &add_status);
2982 	}
2983 
2984 	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2985 				flash_cmd.dma);
2986 	if (status) {
2987 		dev_err(&adapter->pdev->dev,
2988 			"Firmware load error. "
2989 			"Status code: 0x%x Additional Status: 0x%x\n",
2990 			status, add_status);
2991 		goto lancer_fw_exit;
2992 	}
2993 
2994 	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2995 lancer_fw_exit:
2996 	return status;
2997 }
2998 
be_fw_download(struct be_adapter * adapter,const struct firmware * fw)2999 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
3000 {
3001 	struct flash_file_hdr_g2 *fhdr;
3002 	struct flash_file_hdr_g3 *fhdr3;
3003 	struct image_hdr *img_hdr_ptr = NULL;
3004 	struct be_dma_mem flash_cmd;
3005 	const u8 *p;
3006 	int status = 0, i = 0, num_imgs = 0;
3007 
3008 	p = fw->data;
3009 	fhdr = (struct flash_file_hdr_g2 *) p;
3010 
3011 	flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
3012 	flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
3013 					  &flash_cmd.dma, GFP_KERNEL);
3014 	if (!flash_cmd.va) {
3015 		status = -ENOMEM;
3016 		dev_err(&adapter->pdev->dev,
3017 			"Memory allocation failure while flashing\n");
3018 		goto be_fw_exit;
3019 	}
3020 
3021 	if ((adapter->generation == BE_GEN3) &&
3022 			(get_ufigen_type(fhdr) == BE_GEN3)) {
3023 		fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
3024 		num_imgs = le32_to_cpu(fhdr3->num_imgs);
3025 		for (i = 0; i < num_imgs; i++) {
3026 			img_hdr_ptr = (struct image_hdr *) (fw->data +
3027 					(sizeof(struct flash_file_hdr_g3) +
3028 					 i * sizeof(struct image_hdr)));
3029 			if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
3030 				status = be_flash_data(adapter, fw, &flash_cmd,
3031 							num_imgs);
3032 		}
3033 	} else if ((adapter->generation == BE_GEN2) &&
3034 			(get_ufigen_type(fhdr) == BE_GEN2)) {
3035 		status = be_flash_data(adapter, fw, &flash_cmd, 0);
3036 	} else {
3037 		dev_err(&adapter->pdev->dev,
3038 			"UFI and Interface are not compatible for flashing\n");
3039 		status = -1;
3040 	}
3041 
3042 	dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3043 			  flash_cmd.dma);
3044 	if (status) {
3045 		dev_err(&adapter->pdev->dev, "Firmware load error\n");
3046 		goto be_fw_exit;
3047 	}
3048 
3049 	dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3050 
3051 be_fw_exit:
3052 	return status;
3053 }
3054 
be_load_fw(struct be_adapter * adapter,u8 * fw_file)3055 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3056 {
3057 	const struct firmware *fw;
3058 	int status;
3059 
3060 	if (!netif_running(adapter->netdev)) {
3061 		dev_err(&adapter->pdev->dev,
3062 			"Firmware load not allowed (interface is down)\n");
3063 		return -1;
3064 	}
3065 
3066 	status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3067 	if (status)
3068 		goto fw_exit;
3069 
3070 	dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3071 
3072 	if (lancer_chip(adapter))
3073 		status = lancer_fw_download(adapter, fw);
3074 	else
3075 		status = be_fw_download(adapter, fw);
3076 
3077 fw_exit:
3078 	release_firmware(fw);
3079 	return status;
3080 }
3081 
3082 static const struct net_device_ops be_netdev_ops = {
3083 	.ndo_open		= be_open,
3084 	.ndo_stop		= be_close,
3085 	.ndo_start_xmit		= be_xmit,
3086 	.ndo_set_rx_mode	= be_set_rx_mode,
3087 	.ndo_set_mac_address	= be_mac_addr_set,
3088 	.ndo_change_mtu		= be_change_mtu,
3089 	.ndo_get_stats64	= be_get_stats64,
3090 	.ndo_validate_addr	= eth_validate_addr,
3091 	.ndo_vlan_rx_add_vid	= be_vlan_add_vid,
3092 	.ndo_vlan_rx_kill_vid	= be_vlan_rem_vid,
3093 	.ndo_set_vf_mac		= be_set_vf_mac,
3094 	.ndo_set_vf_vlan	= be_set_vf_vlan,
3095 	.ndo_set_vf_tx_rate	= be_set_vf_tx_rate,
3096 	.ndo_get_vf_config	= be_get_vf_config,
3097 #ifdef CONFIG_NET_POLL_CONTROLLER
3098 	.ndo_poll_controller	= be_netpoll,
3099 #endif
3100 };
3101 
be_netdev_init(struct net_device * netdev)3102 static void be_netdev_init(struct net_device *netdev)
3103 {
3104 	struct be_adapter *adapter = netdev_priv(netdev);
3105 	struct be_rx_obj *rxo;
3106 	int i;
3107 
3108 	netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3109 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3110 		NETIF_F_HW_VLAN_TX;
3111 	if (be_multi_rxq(adapter))
3112 		netdev->hw_features |= NETIF_F_RXHASH;
3113 
3114 	netdev->features |= netdev->hw_features |
3115 		NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3116 
3117 	netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3118 		NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3119 
3120 	netdev->flags |= IFF_MULTICAST;
3121 
3122 	netif_set_gso_max_size(netdev, 65535);
3123 
3124 	BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3125 
3126 	SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3127 
3128 	for_all_rx_queues(adapter, rxo, i)
3129 		netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3130 				BE_NAPI_WEIGHT);
3131 
3132 	netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3133 		BE_NAPI_WEIGHT);
3134 }
3135 
be_unmap_pci_bars(struct be_adapter * adapter)3136 static void be_unmap_pci_bars(struct be_adapter *adapter)
3137 {
3138 	if (adapter->csr)
3139 		iounmap(adapter->csr);
3140 	if (adapter->db)
3141 		iounmap(adapter->db);
3142 }
3143 
be_map_pci_bars(struct be_adapter * adapter)3144 static int be_map_pci_bars(struct be_adapter *adapter)
3145 {
3146 	u8 __iomem *addr;
3147 	int db_reg;
3148 
3149 	if (lancer_chip(adapter)) {
3150 		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3151 			pci_resource_len(adapter->pdev, 0));
3152 		if (addr == NULL)
3153 			return -ENOMEM;
3154 		adapter->db = addr;
3155 		return 0;
3156 	}
3157 
3158 	if (be_physfn(adapter)) {
3159 		addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3160 				pci_resource_len(adapter->pdev, 2));
3161 		if (addr == NULL)
3162 			return -ENOMEM;
3163 		adapter->csr = addr;
3164 	}
3165 
3166 	if (adapter->generation == BE_GEN2) {
3167 		db_reg = 4;
3168 	} else {
3169 		if (be_physfn(adapter))
3170 			db_reg = 4;
3171 		else
3172 			db_reg = 0;
3173 	}
3174 	addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3175 				pci_resource_len(adapter->pdev, db_reg));
3176 	if (addr == NULL)
3177 		goto pci_map_err;
3178 	adapter->db = addr;
3179 
3180 	return 0;
3181 pci_map_err:
3182 	be_unmap_pci_bars(adapter);
3183 	return -ENOMEM;
3184 }
3185 
3186 
be_ctrl_cleanup(struct be_adapter * adapter)3187 static void be_ctrl_cleanup(struct be_adapter *adapter)
3188 {
3189 	struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3190 
3191 	be_unmap_pci_bars(adapter);
3192 
3193 	if (mem->va)
3194 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3195 				  mem->dma);
3196 
3197 	mem = &adapter->rx_filter;
3198 	if (mem->va)
3199 		dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3200 				  mem->dma);
3201 }
3202 
be_ctrl_init(struct be_adapter * adapter)3203 static int be_ctrl_init(struct be_adapter *adapter)
3204 {
3205 	struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3206 	struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3207 	struct be_dma_mem *rx_filter = &adapter->rx_filter;
3208 	int status;
3209 
3210 	status = be_map_pci_bars(adapter);
3211 	if (status)
3212 		goto done;
3213 
3214 	mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3215 	mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3216 						mbox_mem_alloc->size,
3217 						&mbox_mem_alloc->dma,
3218 						GFP_KERNEL);
3219 	if (!mbox_mem_alloc->va) {
3220 		status = -ENOMEM;
3221 		goto unmap_pci_bars;
3222 	}
3223 	mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3224 	mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3225 	mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3226 	memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3227 
3228 	rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3229 	rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3230 					&rx_filter->dma, GFP_KERNEL);
3231 	if (rx_filter->va == NULL) {
3232 		status = -ENOMEM;
3233 		goto free_mbox;
3234 	}
3235 	memset(rx_filter->va, 0, rx_filter->size);
3236 
3237 	mutex_init(&adapter->mbox_lock);
3238 	spin_lock_init(&adapter->mcc_lock);
3239 	spin_lock_init(&adapter->mcc_cq_lock);
3240 
3241 	init_completion(&adapter->flash_compl);
3242 	pci_save_state(adapter->pdev);
3243 	return 0;
3244 
3245 free_mbox:
3246 	dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3247 			  mbox_mem_alloc->va, mbox_mem_alloc->dma);
3248 
3249 unmap_pci_bars:
3250 	be_unmap_pci_bars(adapter);
3251 
3252 done:
3253 	return status;
3254 }
3255 
be_stats_cleanup(struct be_adapter * adapter)3256 static void be_stats_cleanup(struct be_adapter *adapter)
3257 {
3258 	struct be_dma_mem *cmd = &adapter->stats_cmd;
3259 
3260 	if (cmd->va)
3261 		dma_free_coherent(&adapter->pdev->dev, cmd->size,
3262 				  cmd->va, cmd->dma);
3263 }
3264 
be_stats_init(struct be_adapter * adapter)3265 static int be_stats_init(struct be_adapter *adapter)
3266 {
3267 	struct be_dma_mem *cmd = &adapter->stats_cmd;
3268 
3269 	if (adapter->generation == BE_GEN2) {
3270 		cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3271 	} else {
3272 		if (lancer_chip(adapter))
3273 			cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3274 		else
3275 			cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3276 	}
3277 	cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3278 				     GFP_KERNEL);
3279 	if (cmd->va == NULL)
3280 		return -1;
3281 	memset(cmd->va, 0, cmd->size);
3282 	return 0;
3283 }
3284 
be_remove(struct pci_dev * pdev)3285 static void __devexit be_remove(struct pci_dev *pdev)
3286 {
3287 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3288 
3289 	if (!adapter)
3290 		return;
3291 
3292 	cancel_delayed_work_sync(&adapter->work);
3293 
3294 	unregister_netdev(adapter->netdev);
3295 
3296 	be_clear(adapter);
3297 
3298 	be_stats_cleanup(adapter);
3299 
3300 	be_ctrl_cleanup(adapter);
3301 
3302 	be_sriov_disable(adapter);
3303 
3304 	be_msix_disable(adapter);
3305 
3306 	pci_set_drvdata(pdev, NULL);
3307 	pci_release_regions(pdev);
3308 	pci_disable_device(pdev);
3309 
3310 	free_netdev(adapter->netdev);
3311 }
3312 
be_get_config(struct be_adapter * adapter)3313 static int be_get_config(struct be_adapter *adapter)
3314 {
3315 	int status;
3316 
3317 	status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3318 			&adapter->function_mode, &adapter->function_caps);
3319 	if (status)
3320 		return status;
3321 
3322 	if (adapter->function_mode & FLEX10_MODE)
3323 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3324 	else
3325 		adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3326 
3327 	status = be_cmd_get_cntl_attributes(adapter);
3328 	if (status)
3329 		return status;
3330 
3331 	return 0;
3332 }
3333 
be_dev_family_check(struct be_adapter * adapter)3334 static int be_dev_family_check(struct be_adapter *adapter)
3335 {
3336 	struct pci_dev *pdev = adapter->pdev;
3337 	u32 sli_intf = 0, if_type;
3338 
3339 	switch (pdev->device) {
3340 	case BE_DEVICE_ID1:
3341 	case OC_DEVICE_ID1:
3342 		adapter->generation = BE_GEN2;
3343 		break;
3344 	case BE_DEVICE_ID2:
3345 	case OC_DEVICE_ID2:
3346 	case OC_DEVICE_ID5:
3347 		adapter->generation = BE_GEN3;
3348 		break;
3349 	case OC_DEVICE_ID3:
3350 	case OC_DEVICE_ID4:
3351 		pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3352 		if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3353 						SLI_INTF_IF_TYPE_SHIFT;
3354 
3355 		if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3356 			if_type != 0x02) {
3357 			dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3358 			return -EINVAL;
3359 		}
3360 		adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3361 					 SLI_INTF_FAMILY_SHIFT);
3362 		adapter->generation = BE_GEN3;
3363 		break;
3364 	default:
3365 		adapter->generation = 0;
3366 	}
3367 	return 0;
3368 }
3369 
lancer_wait_ready(struct be_adapter * adapter)3370 static int lancer_wait_ready(struct be_adapter *adapter)
3371 {
3372 #define SLIPORT_READY_TIMEOUT 30
3373 	u32 sliport_status;
3374 	int status = 0, i;
3375 
3376 	for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3377 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3378 		if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3379 			break;
3380 
3381 		msleep(1000);
3382 	}
3383 
3384 	if (i == SLIPORT_READY_TIMEOUT)
3385 		status = -1;
3386 
3387 	return status;
3388 }
3389 
lancer_test_and_set_rdy_state(struct be_adapter * adapter)3390 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3391 {
3392 	int status;
3393 	u32 sliport_status, err, reset_needed;
3394 	status = lancer_wait_ready(adapter);
3395 	if (!status) {
3396 		sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3397 		err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3398 		reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3399 		if (err && reset_needed) {
3400 			iowrite32(SLI_PORT_CONTROL_IP_MASK,
3401 					adapter->db + SLIPORT_CONTROL_OFFSET);
3402 
3403 			/* check adapter has corrected the error */
3404 			status = lancer_wait_ready(adapter);
3405 			sliport_status = ioread32(adapter->db +
3406 							SLIPORT_STATUS_OFFSET);
3407 			sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3408 						SLIPORT_STATUS_RN_MASK);
3409 			if (status || sliport_status)
3410 				status = -1;
3411 		} else if (err || reset_needed) {
3412 			status = -1;
3413 		}
3414 	}
3415 	return status;
3416 }
3417 
lancer_test_and_recover_fn_err(struct be_adapter * adapter)3418 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3419 {
3420 	int status;
3421 	u32 sliport_status;
3422 
3423 	if (adapter->eeh_err || adapter->ue_detected)
3424 		return;
3425 
3426 	sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3427 
3428 	if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3429 		dev_err(&adapter->pdev->dev,
3430 				"Adapter in error state."
3431 				"Trying to recover.\n");
3432 
3433 		status = lancer_test_and_set_rdy_state(adapter);
3434 		if (status)
3435 			goto err;
3436 
3437 		netif_device_detach(adapter->netdev);
3438 
3439 		if (netif_running(adapter->netdev))
3440 			be_close(adapter->netdev);
3441 
3442 		be_clear(adapter);
3443 
3444 		adapter->fw_timeout = false;
3445 
3446 		status = be_setup(adapter);
3447 		if (status)
3448 			goto err;
3449 
3450 		if (netif_running(adapter->netdev)) {
3451 			status = be_open(adapter->netdev);
3452 			if (status)
3453 				goto err;
3454 		}
3455 
3456 		netif_device_attach(adapter->netdev);
3457 
3458 		dev_err(&adapter->pdev->dev,
3459 				"Adapter error recovery succeeded\n");
3460 	}
3461 	return;
3462 err:
3463 	dev_err(&adapter->pdev->dev,
3464 			"Adapter error recovery failed\n");
3465 }
3466 
be_worker(struct work_struct * work)3467 static void be_worker(struct work_struct *work)
3468 {
3469 	struct be_adapter *adapter =
3470 		container_of(work, struct be_adapter, work.work);
3471 	struct be_rx_obj *rxo;
3472 	int i;
3473 
3474 	if (lancer_chip(adapter))
3475 		lancer_test_and_recover_fn_err(adapter);
3476 
3477 	be_detect_dump_ue(adapter);
3478 
3479 	/* when interrupts are not yet enabled, just reap any pending
3480 	* mcc completions */
3481 	if (!netif_running(adapter->netdev)) {
3482 		int mcc_compl, status = 0;
3483 
3484 		mcc_compl = be_process_mcc(adapter, &status);
3485 
3486 		if (mcc_compl) {
3487 			struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3488 			be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3489 		}
3490 
3491 		goto reschedule;
3492 	}
3493 
3494 	if (!adapter->stats_cmd_sent) {
3495 		if (lancer_chip(adapter))
3496 			lancer_cmd_get_pport_stats(adapter,
3497 						&adapter->stats_cmd);
3498 		else
3499 			be_cmd_get_stats(adapter, &adapter->stats_cmd);
3500 	}
3501 
3502 	for_all_rx_queues(adapter, rxo, i) {
3503 		be_rx_eqd_update(adapter, rxo);
3504 
3505 		if (rxo->rx_post_starved) {
3506 			rxo->rx_post_starved = false;
3507 			be_post_rx_frags(rxo, GFP_KERNEL);
3508 		}
3509 	}
3510 
3511 reschedule:
3512 	adapter->work_counter++;
3513 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3514 }
3515 
be_probe(struct pci_dev * pdev,const struct pci_device_id * pdev_id)3516 static int __devinit be_probe(struct pci_dev *pdev,
3517 			const struct pci_device_id *pdev_id)
3518 {
3519 	int status = 0;
3520 	struct be_adapter *adapter;
3521 	struct net_device *netdev;
3522 
3523 	status = pci_enable_device(pdev);
3524 	if (status)
3525 		goto do_none;
3526 
3527 	status = pci_request_regions(pdev, DRV_NAME);
3528 	if (status)
3529 		goto disable_dev;
3530 	pci_set_master(pdev);
3531 
3532 	netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3533 	if (netdev == NULL) {
3534 		status = -ENOMEM;
3535 		goto rel_reg;
3536 	}
3537 	adapter = netdev_priv(netdev);
3538 	adapter->pdev = pdev;
3539 	pci_set_drvdata(pdev, adapter);
3540 
3541 	status = be_dev_family_check(adapter);
3542 	if (status)
3543 		goto free_netdev;
3544 
3545 	adapter->netdev = netdev;
3546 	SET_NETDEV_DEV(netdev, &pdev->dev);
3547 
3548 	status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3549 	if (!status) {
3550 		netdev->features |= NETIF_F_HIGHDMA;
3551 	} else {
3552 		status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3553 		if (status) {
3554 			dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3555 			goto free_netdev;
3556 		}
3557 	}
3558 
3559 	status = be_sriov_enable(adapter);
3560 	if (status)
3561 		goto free_netdev;
3562 
3563 	status = be_ctrl_init(adapter);
3564 	if (status)
3565 		goto disable_sriov;
3566 
3567 	if (lancer_chip(adapter)) {
3568 		status = lancer_wait_ready(adapter);
3569 		if (!status) {
3570 			iowrite32(SLI_PORT_CONTROL_IP_MASK,
3571 					adapter->db + SLIPORT_CONTROL_OFFSET);
3572 			status = lancer_test_and_set_rdy_state(adapter);
3573 		}
3574 		if (status) {
3575 			dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3576 			goto ctrl_clean;
3577 		}
3578 	}
3579 
3580 	/* sync up with fw's ready state */
3581 	if (be_physfn(adapter)) {
3582 		status = be_cmd_POST(adapter);
3583 		if (status)
3584 			goto ctrl_clean;
3585 	}
3586 
3587 	/* tell fw we're ready to fire cmds */
3588 	status = be_cmd_fw_init(adapter);
3589 	if (status)
3590 		goto ctrl_clean;
3591 
3592 	status = be_cmd_reset_function(adapter);
3593 	if (status)
3594 		goto ctrl_clean;
3595 
3596 	status = be_stats_init(adapter);
3597 	if (status)
3598 		goto ctrl_clean;
3599 
3600 	status = be_get_config(adapter);
3601 	if (status)
3602 		goto stats_clean;
3603 
3604 	/* The INTR bit may be set in the card when probed by a kdump kernel
3605 	 * after a crash.
3606 	 */
3607 	if (!lancer_chip(adapter))
3608 		be_intr_set(adapter, false);
3609 
3610 	be_msix_enable(adapter);
3611 
3612 	INIT_DELAYED_WORK(&adapter->work, be_worker);
3613 	adapter->rx_fc = adapter->tx_fc = true;
3614 
3615 	status = be_setup(adapter);
3616 	if (status)
3617 		goto msix_disable;
3618 
3619 	be_netdev_init(netdev);
3620 	status = register_netdev(netdev);
3621 	if (status != 0)
3622 		goto unsetup;
3623 
3624 	dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3625 
3626 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3627 	return 0;
3628 
3629 unsetup:
3630 	be_clear(adapter);
3631 msix_disable:
3632 	be_msix_disable(adapter);
3633 stats_clean:
3634 	be_stats_cleanup(adapter);
3635 ctrl_clean:
3636 	be_ctrl_cleanup(adapter);
3637 disable_sriov:
3638 	be_sriov_disable(adapter);
3639 free_netdev:
3640 	free_netdev(netdev);
3641 	pci_set_drvdata(pdev, NULL);
3642 rel_reg:
3643 	pci_release_regions(pdev);
3644 disable_dev:
3645 	pci_disable_device(pdev);
3646 do_none:
3647 	dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3648 	return status;
3649 }
3650 
be_suspend(struct pci_dev * pdev,pm_message_t state)3651 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3652 {
3653 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3654 	struct net_device *netdev =  adapter->netdev;
3655 
3656 	cancel_delayed_work_sync(&adapter->work);
3657 	if (adapter->wol)
3658 		be_setup_wol(adapter, true);
3659 
3660 	netif_device_detach(netdev);
3661 	if (netif_running(netdev)) {
3662 		rtnl_lock();
3663 		be_close(netdev);
3664 		rtnl_unlock();
3665 	}
3666 	be_clear(adapter);
3667 
3668 	be_msix_disable(adapter);
3669 	pci_save_state(pdev);
3670 	pci_disable_device(pdev);
3671 	pci_set_power_state(pdev, pci_choose_state(pdev, state));
3672 	return 0;
3673 }
3674 
be_resume(struct pci_dev * pdev)3675 static int be_resume(struct pci_dev *pdev)
3676 {
3677 	int status = 0;
3678 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3679 	struct net_device *netdev =  adapter->netdev;
3680 
3681 	netif_device_detach(netdev);
3682 
3683 	status = pci_enable_device(pdev);
3684 	if (status)
3685 		return status;
3686 
3687 	pci_set_power_state(pdev, 0);
3688 	pci_restore_state(pdev);
3689 
3690 	be_msix_enable(adapter);
3691 	/* tell fw we're ready to fire cmds */
3692 	status = be_cmd_fw_init(adapter);
3693 	if (status)
3694 		return status;
3695 
3696 	be_setup(adapter);
3697 	if (netif_running(netdev)) {
3698 		rtnl_lock();
3699 		be_open(netdev);
3700 		rtnl_unlock();
3701 	}
3702 	netif_device_attach(netdev);
3703 
3704 	if (adapter->wol)
3705 		be_setup_wol(adapter, false);
3706 
3707 	schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3708 	return 0;
3709 }
3710 
3711 /*
3712  * An FLR will stop BE from DMAing any data.
3713  */
be_shutdown(struct pci_dev * pdev)3714 static void be_shutdown(struct pci_dev *pdev)
3715 {
3716 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3717 
3718 	if (!adapter)
3719 		return;
3720 
3721 	cancel_delayed_work_sync(&adapter->work);
3722 
3723 	netif_device_detach(adapter->netdev);
3724 
3725 	if (adapter->wol)
3726 		be_setup_wol(adapter, true);
3727 
3728 	be_cmd_reset_function(adapter);
3729 
3730 	pci_disable_device(pdev);
3731 }
3732 
be_eeh_err_detected(struct pci_dev * pdev,pci_channel_state_t state)3733 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3734 				pci_channel_state_t state)
3735 {
3736 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3737 	struct net_device *netdev =  adapter->netdev;
3738 
3739 	dev_err(&adapter->pdev->dev, "EEH error detected\n");
3740 
3741 	adapter->eeh_err = true;
3742 
3743 	netif_device_detach(netdev);
3744 
3745 	if (netif_running(netdev)) {
3746 		rtnl_lock();
3747 		be_close(netdev);
3748 		rtnl_unlock();
3749 	}
3750 	be_clear(adapter);
3751 
3752 	if (state == pci_channel_io_perm_failure)
3753 		return PCI_ERS_RESULT_DISCONNECT;
3754 
3755 	pci_disable_device(pdev);
3756 
3757 	return PCI_ERS_RESULT_NEED_RESET;
3758 }
3759 
be_eeh_reset(struct pci_dev * pdev)3760 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3761 {
3762 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3763 	int status;
3764 
3765 	dev_info(&adapter->pdev->dev, "EEH reset\n");
3766 	adapter->eeh_err = false;
3767 	adapter->ue_detected = false;
3768 	adapter->fw_timeout = false;
3769 
3770 	status = pci_enable_device(pdev);
3771 	if (status)
3772 		return PCI_ERS_RESULT_DISCONNECT;
3773 
3774 	pci_set_master(pdev);
3775 	pci_set_power_state(pdev, 0);
3776 	pci_restore_state(pdev);
3777 
3778 	/* Check if card is ok and fw is ready */
3779 	status = be_cmd_POST(adapter);
3780 	if (status)
3781 		return PCI_ERS_RESULT_DISCONNECT;
3782 
3783 	return PCI_ERS_RESULT_RECOVERED;
3784 }
3785 
be_eeh_resume(struct pci_dev * pdev)3786 static void be_eeh_resume(struct pci_dev *pdev)
3787 {
3788 	int status = 0;
3789 	struct be_adapter *adapter = pci_get_drvdata(pdev);
3790 	struct net_device *netdev =  adapter->netdev;
3791 
3792 	dev_info(&adapter->pdev->dev, "EEH resume\n");
3793 
3794 	pci_save_state(pdev);
3795 
3796 	/* tell fw we're ready to fire cmds */
3797 	status = be_cmd_fw_init(adapter);
3798 	if (status)
3799 		goto err;
3800 
3801 	status = be_setup(adapter);
3802 	if (status)
3803 		goto err;
3804 
3805 	if (netif_running(netdev)) {
3806 		status = be_open(netdev);
3807 		if (status)
3808 			goto err;
3809 	}
3810 	netif_device_attach(netdev);
3811 	return;
3812 err:
3813 	dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3814 }
3815 
3816 static struct pci_error_handlers be_eeh_handlers = {
3817 	.error_detected = be_eeh_err_detected,
3818 	.slot_reset = be_eeh_reset,
3819 	.resume = be_eeh_resume,
3820 };
3821 
3822 static struct pci_driver be_driver = {
3823 	.name = DRV_NAME,
3824 	.id_table = be_dev_ids,
3825 	.probe = be_probe,
3826 	.remove = be_remove,
3827 	.suspend = be_suspend,
3828 	.resume = be_resume,
3829 	.shutdown = be_shutdown,
3830 	.err_handler = &be_eeh_handlers
3831 };
3832 
be_init_module(void)3833 static int __init be_init_module(void)
3834 {
3835 	if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3836 	    rx_frag_size != 2048) {
3837 		printk(KERN_WARNING DRV_NAME
3838 			" : Module param rx_frag_size must be 2048/4096/8192."
3839 			" Using 2048\n");
3840 		rx_frag_size = 2048;
3841 	}
3842 
3843 	return pci_register_driver(&be_driver);
3844 }
3845 module_init(be_init_module);
3846 
be_exit_module(void)3847 static void __exit be_exit_module(void)
3848 {
3849 	pci_unregister_driver(&be_driver);
3850 }
3851 module_exit(be_exit_module);
3852