1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2015 - 2022 Beijing WangXun Technology Co., Ltd. */
3
4 #include <linux/etherdevice.h>
5 #include <linux/netdevice.h>
6 #include <linux/if_ether.h>
7 #include <linux/if_vlan.h>
8 #include <linux/iopoll.h>
9 #include <linux/pci.h>
10
11 #include "wx_type.h"
12 #include "wx_lib.h"
13 #include "wx_hw.h"
14
wx_phy_read_reg_mdi(struct mii_bus * bus,int phy_addr,int devnum,int regnum)15 static int wx_phy_read_reg_mdi(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
16 {
17 struct wx *wx = bus->priv;
18 u32 command, val;
19 int ret;
20
21 /* setup and write the address cycle command */
22 command = WX_MSCA_RA(regnum) |
23 WX_MSCA_PA(phy_addr) |
24 WX_MSCA_DA(devnum);
25 wr32(wx, WX_MSCA, command);
26
27 command = WX_MSCC_CMD(WX_MSCA_CMD_READ) | WX_MSCC_BUSY;
28 if (wx->mac.type == wx_mac_em)
29 command |= WX_MDIO_CLK(6);
30 wr32(wx, WX_MSCC, command);
31
32 /* wait to complete */
33 ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
34 100000, false, wx, WX_MSCC);
35 if (ret) {
36 wx_err(wx, "Mdio read c22 command did not complete.\n");
37 return ret;
38 }
39
40 return (u16)rd32(wx, WX_MSCC);
41 }
42
wx_phy_write_reg_mdi(struct mii_bus * bus,int phy_addr,int devnum,int regnum,u16 value)43 static int wx_phy_write_reg_mdi(struct mii_bus *bus, int phy_addr,
44 int devnum, int regnum, u16 value)
45 {
46 struct wx *wx = bus->priv;
47 u32 command, val;
48 int ret;
49
50 /* setup and write the address cycle command */
51 command = WX_MSCA_RA(regnum) |
52 WX_MSCA_PA(phy_addr) |
53 WX_MSCA_DA(devnum);
54 wr32(wx, WX_MSCA, command);
55
56 command = value | WX_MSCC_CMD(WX_MSCA_CMD_WRITE) | WX_MSCC_BUSY;
57 if (wx->mac.type == wx_mac_em)
58 command |= WX_MDIO_CLK(6);
59 wr32(wx, WX_MSCC, command);
60
61 /* wait to complete */
62 ret = read_poll_timeout(rd32, val, !(val & WX_MSCC_BUSY), 1000,
63 100000, false, wx, WX_MSCC);
64 if (ret)
65 wx_err(wx, "Mdio write c22 command did not complete.\n");
66
67 return ret;
68 }
69
wx_phy_read_reg_mdi_c22(struct mii_bus * bus,int phy_addr,int regnum)70 int wx_phy_read_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum)
71 {
72 struct wx *wx = bus->priv;
73
74 wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
75 return wx_phy_read_reg_mdi(bus, phy_addr, 0, regnum);
76 }
77 EXPORT_SYMBOL(wx_phy_read_reg_mdi_c22);
78
wx_phy_write_reg_mdi_c22(struct mii_bus * bus,int phy_addr,int regnum,u16 value)79 int wx_phy_write_reg_mdi_c22(struct mii_bus *bus, int phy_addr, int regnum, u16 value)
80 {
81 struct wx *wx = bus->priv;
82
83 wr32(wx, WX_MDIO_CLAUSE_SELECT, 0xF);
84 return wx_phy_write_reg_mdi(bus, phy_addr, 0, regnum, value);
85 }
86 EXPORT_SYMBOL(wx_phy_write_reg_mdi_c22);
87
wx_phy_read_reg_mdi_c45(struct mii_bus * bus,int phy_addr,int devnum,int regnum)88 int wx_phy_read_reg_mdi_c45(struct mii_bus *bus, int phy_addr, int devnum, int regnum)
89 {
90 struct wx *wx = bus->priv;
91
92 wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
93 return wx_phy_read_reg_mdi(bus, phy_addr, devnum, regnum);
94 }
95 EXPORT_SYMBOL(wx_phy_read_reg_mdi_c45);
96
wx_phy_write_reg_mdi_c45(struct mii_bus * bus,int phy_addr,int devnum,int regnum,u16 value)97 int wx_phy_write_reg_mdi_c45(struct mii_bus *bus, int phy_addr,
98 int devnum, int regnum, u16 value)
99 {
100 struct wx *wx = bus->priv;
101
102 wr32(wx, WX_MDIO_CLAUSE_SELECT, 0);
103 return wx_phy_write_reg_mdi(bus, phy_addr, devnum, regnum, value);
104 }
105 EXPORT_SYMBOL(wx_phy_write_reg_mdi_c45);
106
wx_intr_disable(struct wx * wx,u64 qmask)107 static void wx_intr_disable(struct wx *wx, u64 qmask)
108 {
109 u32 mask;
110
111 mask = (qmask & U32_MAX);
112 if (mask)
113 wr32(wx, WX_PX_IMS(0), mask);
114
115 switch (wx->mac.type) {
116 case wx_mac_sp:
117 case wx_mac_aml:
118 mask = (qmask >> 32);
119 if (mask)
120 wr32(wx, WX_PX_IMS(1), mask);
121 break;
122 default:
123 break;
124 }
125 }
126
wx_intr_enable(struct wx * wx,u64 qmask)127 void wx_intr_enable(struct wx *wx, u64 qmask)
128 {
129 u32 mask;
130
131 mask = (qmask & U32_MAX);
132 if (mask)
133 wr32(wx, WX_PX_IMC(0), mask);
134
135 switch (wx->mac.type) {
136 case wx_mac_sp:
137 case wx_mac_aml:
138 mask = (qmask >> 32);
139 if (mask)
140 wr32(wx, WX_PX_IMC(1), mask);
141 break;
142 default:
143 break;
144 }
145 }
146 EXPORT_SYMBOL(wx_intr_enable);
147
148 /**
149 * wx_irq_disable - Mask off interrupt generation on the NIC
150 * @wx: board private structure
151 **/
wx_irq_disable(struct wx * wx)152 void wx_irq_disable(struct wx *wx)
153 {
154 struct pci_dev *pdev = wx->pdev;
155
156 wr32(wx, WX_PX_MISC_IEN, 0);
157 wx_intr_disable(wx, WX_INTR_ALL);
158
159 if (pdev->msix_enabled) {
160 int vector;
161
162 for (vector = 0; vector < wx->num_q_vectors; vector++)
163 synchronize_irq(wx->msix_q_entries[vector].vector);
164
165 synchronize_irq(wx->msix_entry->vector);
166 } else {
167 synchronize_irq(pdev->irq);
168 }
169 }
170 EXPORT_SYMBOL(wx_irq_disable);
171
172 /* cmd_addr is used for some special command:
173 * 1. to be sector address, when implemented erase sector command
174 * 2. to be flash address when implemented read, write flash address
175 */
wx_fmgr_cmd_op(struct wx * wx,u32 cmd,u32 cmd_addr)176 static int wx_fmgr_cmd_op(struct wx *wx, u32 cmd, u32 cmd_addr)
177 {
178 u32 cmd_val = 0, val = 0;
179
180 cmd_val = WX_SPI_CMD_CMD(cmd) |
181 WX_SPI_CMD_CLK(WX_SPI_CLK_DIV) |
182 cmd_addr;
183 wr32(wx, WX_SPI_CMD, cmd_val);
184
185 return read_poll_timeout(rd32, val, (val & 0x1), 10, 100000,
186 false, wx, WX_SPI_STATUS);
187 }
188
wx_flash_read_dword(struct wx * wx,u32 addr,u32 * data)189 static int wx_flash_read_dword(struct wx *wx, u32 addr, u32 *data)
190 {
191 int ret = 0;
192
193 ret = wx_fmgr_cmd_op(wx, WX_SPI_CMD_READ_DWORD, addr);
194 if (ret < 0)
195 return ret;
196
197 *data = rd32(wx, WX_SPI_DATA);
198
199 return ret;
200 }
201
wx_check_flash_load(struct wx * hw,u32 check_bit)202 int wx_check_flash_load(struct wx *hw, u32 check_bit)
203 {
204 u32 reg = 0;
205 int err = 0;
206
207 /* if there's flash existing */
208 if (!(rd32(hw, WX_SPI_STATUS) &
209 WX_SPI_STATUS_FLASH_BYPASS)) {
210 /* wait hw load flash done */
211 err = read_poll_timeout(rd32, reg, !(reg & check_bit), 20000, 2000000,
212 false, hw, WX_SPI_ILDR_STATUS);
213 if (err < 0)
214 wx_err(hw, "Check flash load timeout.\n");
215 }
216
217 return err;
218 }
219 EXPORT_SYMBOL(wx_check_flash_load);
220
wx_control_hw(struct wx * wx,bool drv)221 void wx_control_hw(struct wx *wx, bool drv)
222 {
223 /* True : Let firmware know the driver has taken over
224 * False : Let firmware take over control of hw
225 */
226 wr32m(wx, WX_CFG_PORT_CTL, WX_CFG_PORT_CTL_DRV_LOAD,
227 drv ? WX_CFG_PORT_CTL_DRV_LOAD : 0);
228 }
229 EXPORT_SYMBOL(wx_control_hw);
230
231 /**
232 * wx_mng_present - returns 0 when management capability is present
233 * @wx: pointer to hardware structure
234 */
wx_mng_present(struct wx * wx)235 int wx_mng_present(struct wx *wx)
236 {
237 u32 fwsm;
238
239 fwsm = rd32(wx, WX_MIS_ST);
240 if (fwsm & WX_MIS_ST_MNG_INIT_DN)
241 return 0;
242 else
243 return -EACCES;
244 }
245 EXPORT_SYMBOL(wx_mng_present);
246
247 /* Software lock to be held while software semaphore is being accessed. */
248 static DEFINE_MUTEX(wx_sw_sync_lock);
249
250 /**
251 * wx_release_sw_sync - Release SW semaphore
252 * @wx: pointer to hardware structure
253 * @mask: Mask to specify which semaphore to release
254 *
255 * Releases the SW semaphore for the specified
256 * function (CSR, PHY0, PHY1, EEPROM, Flash)
257 **/
wx_release_sw_sync(struct wx * wx,u32 mask)258 static void wx_release_sw_sync(struct wx *wx, u32 mask)
259 {
260 mutex_lock(&wx_sw_sync_lock);
261 wr32m(wx, WX_MNG_SWFW_SYNC, mask, 0);
262 mutex_unlock(&wx_sw_sync_lock);
263 }
264
265 /**
266 * wx_acquire_sw_sync - Acquire SW semaphore
267 * @wx: pointer to hardware structure
268 * @mask: Mask to specify which semaphore to acquire
269 *
270 * Acquires the SW semaphore for the specified
271 * function (CSR, PHY0, PHY1, EEPROM, Flash)
272 **/
wx_acquire_sw_sync(struct wx * wx,u32 mask)273 static int wx_acquire_sw_sync(struct wx *wx, u32 mask)
274 {
275 u32 sem = 0;
276 int ret = 0;
277
278 mutex_lock(&wx_sw_sync_lock);
279 ret = read_poll_timeout(rd32, sem, !(sem & mask),
280 5000, 2000000, false, wx, WX_MNG_SWFW_SYNC);
281 if (!ret) {
282 sem |= mask;
283 wr32(wx, WX_MNG_SWFW_SYNC, sem);
284 } else {
285 wx_err(wx, "SW Semaphore not granted: 0x%x.\n", sem);
286 }
287 mutex_unlock(&wx_sw_sync_lock);
288
289 return ret;
290 }
291
wx_host_interface_command_s(struct wx * wx,u32 * buffer,u32 length,u32 timeout,bool return_data)292 static int wx_host_interface_command_s(struct wx *wx, u32 *buffer,
293 u32 length, u32 timeout, bool return_data)
294 {
295 u32 hdr_size = sizeof(struct wx_hic_hdr);
296 u32 hicr, i, bi, buf[64] = {};
297 int status = 0;
298 u32 dword_len;
299 u16 buf_len;
300
301 status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
302 if (status != 0)
303 return status;
304
305 dword_len = length >> 2;
306
307 /* The device driver writes the relevant command block
308 * into the ram area.
309 */
310 for (i = 0; i < dword_len; i++) {
311 wr32a(wx, WX_MNG_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
312 /* write flush */
313 buf[i] = rd32a(wx, WX_MNG_MBOX, i);
314 }
315 /* Setting this bit tells the ARC that a new command is pending. */
316 wr32m(wx, WX_MNG_MBOX_CTL,
317 WX_MNG_MBOX_CTL_SWRDY, WX_MNG_MBOX_CTL_SWRDY);
318
319 status = read_poll_timeout(rd32, hicr, hicr & WX_MNG_MBOX_CTL_FWRDY, 1000,
320 timeout * 1000, false, wx, WX_MNG_MBOX_CTL);
321
322 buf[0] = rd32(wx, WX_MNG_MBOX);
323 if ((buf[0] & 0xff0000) >> 16 == 0x80) {
324 wx_err(wx, "Unknown FW command: 0x%x\n", buffer[0] & 0xff);
325 status = -EINVAL;
326 goto rel_out;
327 }
328
329 /* Check command completion */
330 if (status) {
331 wx_err(wx, "Command has failed with no status valid.\n");
332 wx_dbg(wx, "write value:\n");
333 for (i = 0; i < dword_len; i++)
334 wx_dbg(wx, "%x ", buffer[i]);
335 wx_dbg(wx, "read value:\n");
336 for (i = 0; i < dword_len; i++)
337 wx_dbg(wx, "%x ", buf[i]);
338 wx_dbg(wx, "\ncheck: %x %x\n", buffer[0] & 0xff, ~buf[0] >> 24);
339
340 goto rel_out;
341 }
342
343 if (!return_data)
344 goto rel_out;
345
346 /* Calculate length in DWORDs */
347 dword_len = hdr_size >> 2;
348
349 /* first pull in the header so we know the buffer length */
350 for (bi = 0; bi < dword_len; bi++) {
351 buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
352 le32_to_cpus(&buffer[bi]);
353 }
354
355 /* If there is any thing in data position pull it in */
356 buf_len = ((struct wx_hic_hdr *)buffer)->buf_len;
357 if (buf_len == 0)
358 goto rel_out;
359
360 if (length < buf_len + hdr_size) {
361 wx_err(wx, "Buffer not large enough for reply message.\n");
362 status = -EFAULT;
363 goto rel_out;
364 }
365
366 /* Calculate length in DWORDs, add 3 for odd lengths */
367 dword_len = (buf_len + 3) >> 2;
368
369 /* Pull in the rest of the buffer (bi is where we left off) */
370 for (; bi <= dword_len; bi++) {
371 buffer[bi] = rd32a(wx, WX_MNG_MBOX, bi);
372 le32_to_cpus(&buffer[bi]);
373 }
374
375 rel_out:
376 wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_MB);
377 return status;
378 }
379
wx_poll_fw_reply(struct wx * wx,u32 * buffer,u8 send_cmd)380 static bool wx_poll_fw_reply(struct wx *wx, u32 *buffer, u8 send_cmd)
381 {
382 u32 dword_len = sizeof(struct wx_hic_hdr) >> 2;
383 struct wx_hic_hdr *recv_hdr;
384 u32 i;
385
386 /* read hdr */
387 for (i = 0; i < dword_len; i++) {
388 buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
389 le32_to_cpus(&buffer[i]);
390 }
391
392 /* check hdr */
393 recv_hdr = (struct wx_hic_hdr *)buffer;
394 if (recv_hdr->cmd == send_cmd &&
395 recv_hdr->index == wx->swfw_index)
396 return true;
397
398 return false;
399 }
400
wx_host_interface_command_r(struct wx * wx,u32 * buffer,u32 length,u32 timeout,bool return_data)401 static int wx_host_interface_command_r(struct wx *wx, u32 *buffer,
402 u32 length, u32 timeout, bool return_data)
403 {
404 struct wx_hic_hdr *hdr = (struct wx_hic_hdr *)buffer;
405 u32 hdr_size = sizeof(struct wx_hic_hdr);
406 bool busy, reply;
407 u32 dword_len;
408 u16 buf_len;
409 int err = 0;
410 u8 send_cmd;
411 u32 i;
412
413 /* wait to get lock */
414 might_sleep();
415 err = read_poll_timeout(test_and_set_bit, busy, !busy, 1000, timeout * 1000,
416 false, WX_STATE_SWFW_BUSY, wx->state);
417 if (err)
418 return err;
419
420 /* index to unique seq id for each mbox message */
421 hdr->index = wx->swfw_index;
422 send_cmd = hdr->cmd;
423
424 dword_len = length >> 2;
425 /* write data to SW-FW mbox array */
426 for (i = 0; i < dword_len; i++) {
427 wr32a(wx, WX_SW2FW_MBOX, i, (__force u32)cpu_to_le32(buffer[i]));
428 /* write flush */
429 rd32a(wx, WX_SW2FW_MBOX, i);
430 }
431
432 /* generate interrupt to notify FW */
433 wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, 0);
434 wr32m(wx, WX_SW2FW_MBOX_CMD, WX_SW2FW_MBOX_CMD_VLD, WX_SW2FW_MBOX_CMD_VLD);
435
436 /* polling reply from FW */
437 err = read_poll_timeout(wx_poll_fw_reply, reply, reply, 2000,
438 timeout * 1000, true, wx, buffer, send_cmd);
439 if (err) {
440 wx_err(wx, "Polling from FW messages timeout, cmd: 0x%x, index: %d\n",
441 send_cmd, wx->swfw_index);
442 goto rel_out;
443 }
444
445 if (hdr->cmd_or_resp.ret_status == 0x80) {
446 wx_err(wx, "Unknown FW command: 0x%x\n", send_cmd);
447 err = -EINVAL;
448 goto rel_out;
449 }
450
451 /* expect no reply from FW then return */
452 if (!return_data)
453 goto rel_out;
454
455 /* If there is any thing in data position pull it in */
456 buf_len = hdr->buf_len;
457 if (buf_len == 0)
458 goto rel_out;
459
460 if (length < buf_len + hdr_size) {
461 wx_err(wx, "Buffer not large enough for reply message.\n");
462 err = -EFAULT;
463 goto rel_out;
464 }
465
466 /* Calculate length in DWORDs, add 3 for odd lengths */
467 dword_len = (buf_len + 3) >> 2;
468 for (i = hdr_size >> 2; i <= dword_len; i++) {
469 buffer[i] = rd32a(wx, WX_FW2SW_MBOX, i);
470 le32_to_cpus(&buffer[i]);
471 }
472
473 rel_out:
474 /* index++, index replace wx_hic_hdr.checksum */
475 if (wx->swfw_index == WX_HIC_HDR_INDEX_MAX)
476 wx->swfw_index = 0;
477 else
478 wx->swfw_index++;
479
480 clear_bit(WX_STATE_SWFW_BUSY, wx->state);
481 return err;
482 }
483
484 /**
485 * wx_host_interface_command - Issue command to manageability block
486 * @wx: pointer to the HW structure
487 * @buffer: contains the command to write and where the return status will
488 * be placed
489 * @length: length of buffer, must be multiple of 4 bytes
490 * @timeout: time in ms to wait for command completion
491 * @return_data: read and return data from the buffer (true) or not (false)
492 * Needed because FW structures are big endian and decoding of
493 * these fields can be 8 bit or 16 bit based on command. Decoding
494 * is not easily understood without making a table of commands.
495 * So we will leave this up to the caller to read back the data
496 * in these cases.
497 **/
wx_host_interface_command(struct wx * wx,u32 * buffer,u32 length,u32 timeout,bool return_data)498 int wx_host_interface_command(struct wx *wx, u32 *buffer,
499 u32 length, u32 timeout, bool return_data)
500 {
501 if (length == 0 || length > WX_HI_MAX_BLOCK_BYTE_LENGTH) {
502 wx_err(wx, "Buffer length failure buffersize=%d.\n", length);
503 return -EINVAL;
504 }
505
506 /* Calculate length in DWORDs. We must be DWORD aligned */
507 if ((length % (sizeof(u32))) != 0) {
508 wx_err(wx, "Buffer length failure, not aligned to dword");
509 return -EINVAL;
510 }
511
512 if (test_bit(WX_FLAG_SWFW_RING, wx->flags))
513 return wx_host_interface_command_r(wx, buffer, length,
514 timeout, return_data);
515
516 return wx_host_interface_command_s(wx, buffer, length, timeout, return_data);
517 }
518 EXPORT_SYMBOL(wx_host_interface_command);
519
wx_set_pps(struct wx * wx,bool enable,u64 nsec,u64 cycles)520 int wx_set_pps(struct wx *wx, bool enable, u64 nsec, u64 cycles)
521 {
522 struct wx_hic_set_pps pps_cmd;
523
524 pps_cmd.hdr.cmd = FW_PPS_SET_CMD;
525 pps_cmd.hdr.buf_len = FW_PPS_SET_LEN;
526 pps_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED;
527 pps_cmd.lan_id = wx->bus.func;
528 pps_cmd.enable = (u8)enable;
529 pps_cmd.nsec = nsec;
530 pps_cmd.cycles = cycles;
531 pps_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM;
532
533 return wx_host_interface_command(wx, (u32 *)&pps_cmd,
534 sizeof(pps_cmd),
535 WX_HI_COMMAND_TIMEOUT,
536 false);
537 }
538
539 /**
540 * wx_read_ee_hostif_data - Read EEPROM word using a host interface cmd
541 * assuming that the semaphore is already obtained.
542 * @wx: pointer to hardware structure
543 * @offset: offset of word in the EEPROM to read
544 * @data: word read from the EEPROM
545 *
546 * Reads a 16 bit word from the EEPROM using the hostif.
547 **/
wx_read_ee_hostif_data(struct wx * wx,u16 offset,u16 * data)548 static int wx_read_ee_hostif_data(struct wx *wx, u16 offset, u16 *data)
549 {
550 struct wx_hic_read_shadow_ram buffer;
551 int status;
552
553 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
554 buffer.hdr.req.buf_lenh = 0;
555 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
556 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
557
558 /* convert offset from words to bytes */
559 buffer.address = (__force u32)cpu_to_be32(offset * 2);
560 /* one word */
561 buffer.length = (__force u16)cpu_to_be16(sizeof(u16));
562
563 status = wx_host_interface_command(wx, (u32 *)&buffer, sizeof(buffer),
564 WX_HI_COMMAND_TIMEOUT, false);
565
566 if (status != 0)
567 return status;
568
569 if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
570 *data = (u16)rd32a(wx, WX_MNG_MBOX, FW_NVM_DATA_OFFSET);
571 else
572 *data = (u16)rd32a(wx, WX_FW2SW_MBOX, FW_NVM_DATA_OFFSET);
573
574 return status;
575 }
576
577 /**
578 * wx_read_ee_hostif - Read EEPROM word using a host interface cmd
579 * @wx: pointer to hardware structure
580 * @offset: offset of word in the EEPROM to read
581 * @data: word read from the EEPROM
582 *
583 * Reads a 16 bit word from the EEPROM using the hostif.
584 **/
wx_read_ee_hostif(struct wx * wx,u16 offset,u16 * data)585 int wx_read_ee_hostif(struct wx *wx, u16 offset, u16 *data)
586 {
587 int status = 0;
588
589 status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
590 if (status == 0) {
591 status = wx_read_ee_hostif_data(wx, offset, data);
592 wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
593 }
594
595 return status;
596 }
597 EXPORT_SYMBOL(wx_read_ee_hostif);
598
599 /**
600 * wx_read_ee_hostif_buffer- Read EEPROM word(s) using hostif
601 * @wx: pointer to hardware structure
602 * @offset: offset of word in the EEPROM to read
603 * @words: number of words
604 * @data: word(s) read from the EEPROM
605 *
606 * Reads a 16 bit word(s) from the EEPROM using the hostif.
607 **/
wx_read_ee_hostif_buffer(struct wx * wx,u16 offset,u16 words,u16 * data)608 int wx_read_ee_hostif_buffer(struct wx *wx,
609 u16 offset, u16 words, u16 *data)
610 {
611 struct wx_hic_read_shadow_ram buffer;
612 u32 current_word = 0;
613 u16 words_to_read;
614 u32 value = 0;
615 int status;
616 u32 mbox;
617 u32 i;
618
619 /* Take semaphore for the entire operation. */
620 status = wx_acquire_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
621 if (status != 0)
622 return status;
623
624 while (words) {
625 if (words > FW_MAX_READ_BUFFER_SIZE / 2)
626 words_to_read = FW_MAX_READ_BUFFER_SIZE / 2;
627 else
628 words_to_read = words;
629
630 buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD;
631 buffer.hdr.req.buf_lenh = 0;
632 buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN;
633 buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM;
634
635 /* convert offset from words to bytes */
636 buffer.address = (__force u32)cpu_to_be32((offset + current_word) * 2);
637 buffer.length = (__force u16)cpu_to_be16(words_to_read * 2);
638
639 status = wx_host_interface_command(wx, (u32 *)&buffer,
640 sizeof(buffer),
641 WX_HI_COMMAND_TIMEOUT,
642 false);
643
644 if (status != 0) {
645 wx_err(wx, "Host interface command failed\n");
646 goto out;
647 }
648
649 if (!test_bit(WX_FLAG_SWFW_RING, wx->flags))
650 mbox = WX_MNG_MBOX;
651 else
652 mbox = WX_FW2SW_MBOX;
653 for (i = 0; i < words_to_read; i++) {
654 u32 reg = mbox + (FW_NVM_DATA_OFFSET << 2) + 2 * i;
655
656 value = rd32(wx, reg);
657 data[current_word] = (u16)(value & 0xffff);
658 current_word++;
659 i++;
660 if (i < words_to_read) {
661 value >>= 16;
662 data[current_word] = (u16)(value & 0xffff);
663 current_word++;
664 }
665 }
666 words -= words_to_read;
667 }
668
669 out:
670 wx_release_sw_sync(wx, WX_MNG_SWFW_SYNC_SW_FLASH);
671 return status;
672 }
673 EXPORT_SYMBOL(wx_read_ee_hostif_buffer);
674
675 /**
676 * wx_init_eeprom_params - Initialize EEPROM params
677 * @wx: pointer to hardware structure
678 *
679 * Initializes the EEPROM parameters wx_eeprom_info within the
680 * wx_hw struct in order to set up EEPROM access.
681 **/
wx_init_eeprom_params(struct wx * wx)682 void wx_init_eeprom_params(struct wx *wx)
683 {
684 struct wx_eeprom_info *eeprom = &wx->eeprom;
685 u16 eeprom_size;
686 u16 data = 0x80;
687
688 if (eeprom->type == wx_eeprom_uninitialized) {
689 eeprom->semaphore_delay = 10;
690 eeprom->type = wx_eeprom_none;
691
692 if (!(rd32(wx, WX_SPI_STATUS) &
693 WX_SPI_STATUS_FLASH_BYPASS)) {
694 eeprom->type = wx_flash;
695
696 eeprom_size = 4096;
697 eeprom->word_size = eeprom_size >> 1;
698
699 wx_dbg(wx, "Eeprom params: type = %d, size = %d\n",
700 eeprom->type, eeprom->word_size);
701 }
702 }
703
704 switch (wx->mac.type) {
705 case wx_mac_sp:
706 case wx_mac_aml:
707 if (wx_read_ee_hostif(wx, WX_SW_REGION_PTR, &data)) {
708 wx_err(wx, "NVM Read Error\n");
709 return;
710 }
711 data = data >> 1;
712 break;
713 default:
714 break;
715 }
716
717 eeprom->sw_region_offset = data;
718 }
719 EXPORT_SYMBOL(wx_init_eeprom_params);
720
721 /**
722 * wx_get_mac_addr - Generic get MAC address
723 * @wx: pointer to hardware structure
724 * @mac_addr: Adapter MAC address
725 *
726 * Reads the adapter's MAC address from first Receive Address Register (RAR0)
727 * A reset of the adapter must be performed prior to calling this function
728 * in order for the MAC address to have been loaded from the EEPROM into RAR0
729 **/
wx_get_mac_addr(struct wx * wx,u8 * mac_addr)730 void wx_get_mac_addr(struct wx *wx, u8 *mac_addr)
731 {
732 u32 rar_high;
733 u32 rar_low;
734 u16 i;
735
736 wr32(wx, WX_PSR_MAC_SWC_IDX, 0);
737 rar_high = rd32(wx, WX_PSR_MAC_SWC_AD_H);
738 rar_low = rd32(wx, WX_PSR_MAC_SWC_AD_L);
739
740 for (i = 0; i < 2; i++)
741 mac_addr[i] = (u8)(rar_high >> (1 - i) * 8);
742
743 for (i = 0; i < 4; i++)
744 mac_addr[i + 2] = (u8)(rar_low >> (3 - i) * 8);
745 }
746 EXPORT_SYMBOL(wx_get_mac_addr);
747
748 /**
749 * wx_set_rar - Set Rx address register
750 * @wx: pointer to hardware structure
751 * @index: Receive address register to write
752 * @addr: Address to put into receive address register
753 * @pools: VMDq "set" or "pool" index
754 * @enable_addr: set flag that address is active
755 *
756 * Puts an ethernet address into a receive address register.
757 **/
wx_set_rar(struct wx * wx,u32 index,u8 * addr,u64 pools,u32 enable_addr)758 static int wx_set_rar(struct wx *wx, u32 index, u8 *addr, u64 pools,
759 u32 enable_addr)
760 {
761 u32 rar_entries = wx->mac.num_rar_entries;
762 u32 rar_low, rar_high;
763
764 /* Make sure we are using a valid rar index range */
765 if (index >= rar_entries) {
766 wx_err(wx, "RAR index %d is out of range.\n", index);
767 return -EINVAL;
768 }
769
770 /* select the MAC address */
771 wr32(wx, WX_PSR_MAC_SWC_IDX, index);
772
773 /* setup VMDq pool mapping */
774 wr32(wx, WX_PSR_MAC_SWC_VM_L, pools & 0xFFFFFFFF);
775
776 switch (wx->mac.type) {
777 case wx_mac_sp:
778 case wx_mac_aml:
779 wr32(wx, WX_PSR_MAC_SWC_VM_H, pools >> 32);
780 break;
781 default:
782 break;
783 }
784
785 /* HW expects these in little endian so we reverse the byte
786 * order from network order (big endian) to little endian
787 *
788 * Some parts put the VMDq setting in the extra RAH bits,
789 * so save everything except the lower 16 bits that hold part
790 * of the address and the address valid bit.
791 */
792 rar_low = ((u32)addr[5] |
793 ((u32)addr[4] << 8) |
794 ((u32)addr[3] << 16) |
795 ((u32)addr[2] << 24));
796 rar_high = ((u32)addr[1] |
797 ((u32)addr[0] << 8));
798 if (enable_addr != 0)
799 rar_high |= WX_PSR_MAC_SWC_AD_H_AV;
800
801 wr32(wx, WX_PSR_MAC_SWC_AD_L, rar_low);
802 wr32m(wx, WX_PSR_MAC_SWC_AD_H,
803 (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
804 WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
805 WX_PSR_MAC_SWC_AD_H_AV),
806 rar_high);
807
808 return 0;
809 }
810
811 /**
812 * wx_clear_rar - Remove Rx address register
813 * @wx: pointer to hardware structure
814 * @index: Receive address register to write
815 *
816 * Clears an ethernet address from a receive address register.
817 **/
wx_clear_rar(struct wx * wx,u32 index)818 static int wx_clear_rar(struct wx *wx, u32 index)
819 {
820 u32 rar_entries = wx->mac.num_rar_entries;
821
822 /* Make sure we are using a valid rar index range */
823 if (index >= rar_entries) {
824 wx_err(wx, "RAR index %d is out of range.\n", index);
825 return -EINVAL;
826 }
827
828 /* Some parts put the VMDq setting in the extra RAH bits,
829 * so save everything except the lower 16 bits that hold part
830 * of the address and the address valid bit.
831 */
832 wr32(wx, WX_PSR_MAC_SWC_IDX, index);
833
834 wr32(wx, WX_PSR_MAC_SWC_VM_L, 0);
835 wr32(wx, WX_PSR_MAC_SWC_VM_H, 0);
836
837 wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
838 wr32m(wx, WX_PSR_MAC_SWC_AD_H,
839 (WX_PSR_MAC_SWC_AD_H_AD(U16_MAX) |
840 WX_PSR_MAC_SWC_AD_H_ADTYPE(1) |
841 WX_PSR_MAC_SWC_AD_H_AV),
842 0);
843
844 return 0;
845 }
846
847 /**
848 * wx_clear_vmdq - Disassociate a VMDq pool index from a rx address
849 * @wx: pointer to hardware struct
850 * @rar: receive address register index to disassociate
851 * @vmdq: VMDq pool index to remove from the rar
852 **/
wx_clear_vmdq(struct wx * wx,u32 rar,u32 __maybe_unused vmdq)853 static int wx_clear_vmdq(struct wx *wx, u32 rar, u32 __maybe_unused vmdq)
854 {
855 u32 rar_entries = wx->mac.num_rar_entries;
856 u32 mpsar_lo, mpsar_hi;
857
858 /* Make sure we are using a valid rar index range */
859 if (rar >= rar_entries) {
860 wx_err(wx, "RAR index %d is out of range.\n", rar);
861 return -EINVAL;
862 }
863
864 wr32(wx, WX_PSR_MAC_SWC_IDX, rar);
865 mpsar_lo = rd32(wx, WX_PSR_MAC_SWC_VM_L);
866 mpsar_hi = rd32(wx, WX_PSR_MAC_SWC_VM_H);
867
868 if (!mpsar_lo && !mpsar_hi)
869 return 0;
870
871 /* was that the last pool using this rar? */
872 if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
873 wx_clear_rar(wx, rar);
874
875 return 0;
876 }
877
878 /**
879 * wx_init_uta_tables - Initialize the Unicast Table Array
880 * @wx: pointer to hardware structure
881 **/
wx_init_uta_tables(struct wx * wx)882 static void wx_init_uta_tables(struct wx *wx)
883 {
884 int i;
885
886 wx_dbg(wx, " Clearing UTA\n");
887
888 for (i = 0; i < 128; i++)
889 wr32(wx, WX_PSR_UC_TBL(i), 0);
890 }
891
892 /**
893 * wx_init_rx_addrs - Initializes receive address filters.
894 * @wx: pointer to hardware structure
895 *
896 * Places the MAC address in receive address register 0 and clears the rest
897 * of the receive address registers. Clears the multicast table. Assumes
898 * the receiver is in reset when the routine is called.
899 **/
wx_init_rx_addrs(struct wx * wx)900 void wx_init_rx_addrs(struct wx *wx)
901 {
902 u32 rar_entries = wx->mac.num_rar_entries;
903 u32 psrctl;
904 int i;
905
906 /* If the current mac address is valid, assume it is a software override
907 * to the permanent address.
908 * Otherwise, use the permanent address from the eeprom.
909 */
910 if (!is_valid_ether_addr(wx->mac.addr)) {
911 /* Get the MAC address from the RAR0 for later reference */
912 wx_get_mac_addr(wx, wx->mac.addr);
913 wx_dbg(wx, "Keeping Current RAR0 Addr = %pM\n", wx->mac.addr);
914 } else {
915 /* Setup the receive address. */
916 wx_dbg(wx, "Overriding MAC Address in RAR[0]\n");
917 wx_dbg(wx, "New MAC Addr = %pM\n", wx->mac.addr);
918
919 wx_set_rar(wx, 0, wx->mac.addr, 0, WX_PSR_MAC_SWC_AD_H_AV);
920
921 switch (wx->mac.type) {
922 case wx_mac_sp:
923 case wx_mac_aml:
924 /* clear VMDq pool/queue selection for RAR 0 */
925 wx_clear_vmdq(wx, 0, WX_CLEAR_VMDQ_ALL);
926 break;
927 default:
928 break;
929 }
930 }
931
932 /* Zero out the other receive addresses. */
933 wx_dbg(wx, "Clearing RAR[1-%d]\n", rar_entries - 1);
934 for (i = 1; i < rar_entries; i++) {
935 wr32(wx, WX_PSR_MAC_SWC_IDX, i);
936 wr32(wx, WX_PSR_MAC_SWC_AD_L, 0);
937 wr32(wx, WX_PSR_MAC_SWC_AD_H, 0);
938 }
939
940 /* Clear the MTA */
941 wx->addr_ctrl.mta_in_use = 0;
942 psrctl = rd32(wx, WX_PSR_CTL);
943 psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
944 psrctl |= wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT;
945 wr32(wx, WX_PSR_CTL, psrctl);
946 wx_dbg(wx, " Clearing MTA\n");
947 for (i = 0; i < wx->mac.mcft_size; i++)
948 wr32(wx, WX_PSR_MC_TBL(i), 0);
949
950 wx_init_uta_tables(wx);
951 }
952 EXPORT_SYMBOL(wx_init_rx_addrs);
953
wx_sync_mac_table(struct wx * wx)954 static void wx_sync_mac_table(struct wx *wx)
955 {
956 int i;
957
958 for (i = 0; i < wx->mac.num_rar_entries; i++) {
959 if (wx->mac_table[i].state & WX_MAC_STATE_MODIFIED) {
960 if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
961 wx_set_rar(wx, i,
962 wx->mac_table[i].addr,
963 wx->mac_table[i].pools,
964 WX_PSR_MAC_SWC_AD_H_AV);
965 } else {
966 wx_clear_rar(wx, i);
967 }
968 wx->mac_table[i].state &= ~(WX_MAC_STATE_MODIFIED);
969 }
970 }
971 }
972
973 /* this function destroys the first RAR entry */
wx_mac_set_default_filter(struct wx * wx,u8 * addr)974 void wx_mac_set_default_filter(struct wx *wx, u8 *addr)
975 {
976 memcpy(&wx->mac_table[0].addr, addr, ETH_ALEN);
977 wx->mac_table[0].pools = 1ULL;
978 wx->mac_table[0].state = (WX_MAC_STATE_DEFAULT | WX_MAC_STATE_IN_USE);
979 wx_set_rar(wx, 0, wx->mac_table[0].addr,
980 wx->mac_table[0].pools,
981 WX_PSR_MAC_SWC_AD_H_AV);
982 }
983 EXPORT_SYMBOL(wx_mac_set_default_filter);
984
wx_flush_sw_mac_table(struct wx * wx)985 void wx_flush_sw_mac_table(struct wx *wx)
986 {
987 u32 i;
988
989 for (i = 0; i < wx->mac.num_rar_entries; i++) {
990 if (!(wx->mac_table[i].state & WX_MAC_STATE_IN_USE))
991 continue;
992
993 wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
994 wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
995 memset(wx->mac_table[i].addr, 0, ETH_ALEN);
996 wx->mac_table[i].pools = 0;
997 }
998 wx_sync_mac_table(wx);
999 }
1000 EXPORT_SYMBOL(wx_flush_sw_mac_table);
1001
wx_add_mac_filter(struct wx * wx,u8 * addr,u16 pool)1002 static int wx_add_mac_filter(struct wx *wx, u8 *addr, u16 pool)
1003 {
1004 u32 i;
1005
1006 if (is_zero_ether_addr(addr))
1007 return -EINVAL;
1008
1009 for (i = 0; i < wx->mac.num_rar_entries; i++) {
1010 if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE) {
1011 if (ether_addr_equal(addr, wx->mac_table[i].addr)) {
1012 if (wx->mac_table[i].pools != (1ULL << pool)) {
1013 memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
1014 wx->mac_table[i].pools |= (1ULL << pool);
1015 wx_sync_mac_table(wx);
1016 return i;
1017 }
1018 }
1019 }
1020
1021 if (wx->mac_table[i].state & WX_MAC_STATE_IN_USE)
1022 continue;
1023 wx->mac_table[i].state |= (WX_MAC_STATE_MODIFIED |
1024 WX_MAC_STATE_IN_USE);
1025 memcpy(wx->mac_table[i].addr, addr, ETH_ALEN);
1026 wx->mac_table[i].pools |= (1ULL << pool);
1027 wx_sync_mac_table(wx);
1028 return i;
1029 }
1030 return -ENOMEM;
1031 }
1032
wx_del_mac_filter(struct wx * wx,u8 * addr,u16 pool)1033 static int wx_del_mac_filter(struct wx *wx, u8 *addr, u16 pool)
1034 {
1035 u32 i;
1036
1037 if (is_zero_ether_addr(addr))
1038 return -EINVAL;
1039
1040 /* search table for addr, if found, set to 0 and sync */
1041 for (i = 0; i < wx->mac.num_rar_entries; i++) {
1042 if (!ether_addr_equal(addr, wx->mac_table[i].addr))
1043 continue;
1044
1045 wx->mac_table[i].state |= WX_MAC_STATE_MODIFIED;
1046 wx->mac_table[i].pools &= ~(1ULL << pool);
1047 if (!wx->mac_table[i].pools) {
1048 wx->mac_table[i].state &= ~WX_MAC_STATE_IN_USE;
1049 memset(wx->mac_table[i].addr, 0, ETH_ALEN);
1050 }
1051 wx_sync_mac_table(wx);
1052 return 0;
1053 }
1054 return -ENOMEM;
1055 }
1056
wx_available_rars(struct wx * wx)1057 static int wx_available_rars(struct wx *wx)
1058 {
1059 u32 i, count = 0;
1060
1061 for (i = 0; i < wx->mac.num_rar_entries; i++) {
1062 if (wx->mac_table[i].state == 0)
1063 count++;
1064 }
1065
1066 return count;
1067 }
1068
1069 /**
1070 * wx_write_uc_addr_list - write unicast addresses to RAR table
1071 * @netdev: network interface device structure
1072 * @pool: index for mac table
1073 *
1074 * Writes unicast address list to the RAR table.
1075 * Returns: -ENOMEM on failure/insufficient address space
1076 * 0 on no addresses written
1077 * X on writing X addresses to the RAR table
1078 **/
wx_write_uc_addr_list(struct net_device * netdev,int pool)1079 static int wx_write_uc_addr_list(struct net_device *netdev, int pool)
1080 {
1081 struct wx *wx = netdev_priv(netdev);
1082 int count = 0;
1083
1084 /* return ENOMEM indicating insufficient memory for addresses */
1085 if (netdev_uc_count(netdev) > wx_available_rars(wx))
1086 return -ENOMEM;
1087
1088 if (!netdev_uc_empty(netdev)) {
1089 struct netdev_hw_addr *ha;
1090
1091 netdev_for_each_uc_addr(ha, netdev) {
1092 wx_del_mac_filter(wx, ha->addr, pool);
1093 wx_add_mac_filter(wx, ha->addr, pool);
1094 count++;
1095 }
1096 }
1097 return count;
1098 }
1099
1100 /**
1101 * wx_mta_vector - Determines bit-vector in multicast table to set
1102 * @wx: pointer to private structure
1103 * @mc_addr: the multicast address
1104 *
1105 * Extracts the 12 bits, from a multicast address, to determine which
1106 * bit-vector to set in the multicast table. The hardware uses 12 bits, from
1107 * incoming rx multicast addresses, to determine the bit-vector to check in
1108 * the MTA. Which of the 4 combination, of 12-bits, the hardware uses is set
1109 * by the MO field of the MCSTCTRL. The MO field is set during initialization
1110 * to mc_filter_type.
1111 **/
wx_mta_vector(struct wx * wx,u8 * mc_addr)1112 static u32 wx_mta_vector(struct wx *wx, u8 *mc_addr)
1113 {
1114 u32 vector = 0;
1115
1116 switch (wx->mac.mc_filter_type) {
1117 case 0: /* use bits [47:36] of the address */
1118 vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4));
1119 break;
1120 case 1: /* use bits [46:35] of the address */
1121 vector = ((mc_addr[4] >> 3) | (((u16)mc_addr[5]) << 5));
1122 break;
1123 case 2: /* use bits [45:34] of the address */
1124 vector = ((mc_addr[4] >> 2) | (((u16)mc_addr[5]) << 6));
1125 break;
1126 case 3: /* use bits [43:32] of the address */
1127 vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8));
1128 break;
1129 default: /* Invalid mc_filter_type */
1130 wx_err(wx, "MC filter type param set incorrectly\n");
1131 break;
1132 }
1133
1134 /* vector can only be 12-bits or boundary will be exceeded */
1135 vector &= 0xFFF;
1136 return vector;
1137 }
1138
1139 /**
1140 * wx_set_mta - Set bit-vector in multicast table
1141 * @wx: pointer to private structure
1142 * @mc_addr: Multicast address
1143 *
1144 * Sets the bit-vector in the multicast table.
1145 **/
wx_set_mta(struct wx * wx,u8 * mc_addr)1146 static void wx_set_mta(struct wx *wx, u8 *mc_addr)
1147 {
1148 u32 vector, vector_bit, vector_reg;
1149
1150 wx->addr_ctrl.mta_in_use++;
1151
1152 vector = wx_mta_vector(wx, mc_addr);
1153 wx_dbg(wx, " bit-vector = 0x%03X\n", vector);
1154
1155 /* The MTA is a register array of 128 32-bit registers. It is treated
1156 * like an array of 4096 bits. We want to set bit
1157 * BitArray[vector_value]. So we figure out what register the bit is
1158 * in, read it, OR in the new bit, then write back the new value. The
1159 * register is determined by the upper 7 bits of the vector value and
1160 * the bit within that register are determined by the lower 5 bits of
1161 * the value.
1162 */
1163 vector_reg = (vector >> 5) & 0x7F;
1164 vector_bit = vector & 0x1F;
1165 wx->mac.mta_shadow[vector_reg] |= (1 << vector_bit);
1166 }
1167
1168 /**
1169 * wx_update_mc_addr_list - Updates MAC list of multicast addresses
1170 * @wx: pointer to private structure
1171 * @netdev: pointer to net device structure
1172 *
1173 * The given list replaces any existing list. Clears the MC addrs from receive
1174 * address registers and the multicast table. Uses unused receive address
1175 * registers for the first multicast addresses, and hashes the rest into the
1176 * multicast table.
1177 **/
wx_update_mc_addr_list(struct wx * wx,struct net_device * netdev)1178 static void wx_update_mc_addr_list(struct wx *wx, struct net_device *netdev)
1179 {
1180 struct netdev_hw_addr *ha;
1181 u32 i, psrctl;
1182
1183 /* Set the new number of MC addresses that we are being requested to
1184 * use.
1185 */
1186 wx->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev);
1187 wx->addr_ctrl.mta_in_use = 0;
1188
1189 /* Clear mta_shadow */
1190 wx_dbg(wx, " Clearing MTA\n");
1191 memset(&wx->mac.mta_shadow, 0, sizeof(wx->mac.mta_shadow));
1192
1193 /* Update mta_shadow */
1194 netdev_for_each_mc_addr(ha, netdev) {
1195 wx_dbg(wx, " Adding the multicast addresses:\n");
1196 wx_set_mta(wx, ha->addr);
1197 }
1198
1199 /* Enable mta */
1200 for (i = 0; i < wx->mac.mcft_size; i++)
1201 wr32a(wx, WX_PSR_MC_TBL(0), i,
1202 wx->mac.mta_shadow[i]);
1203
1204 if (wx->addr_ctrl.mta_in_use > 0) {
1205 psrctl = rd32(wx, WX_PSR_CTL);
1206 psrctl &= ~(WX_PSR_CTL_MO | WX_PSR_CTL_MFE);
1207 psrctl |= WX_PSR_CTL_MFE |
1208 (wx->mac.mc_filter_type << WX_PSR_CTL_MO_SHIFT);
1209 wr32(wx, WX_PSR_CTL, psrctl);
1210 }
1211
1212 wx_dbg(wx, "Update mc addr list Complete\n");
1213 }
1214
1215 /**
1216 * wx_write_mc_addr_list - write multicast addresses to MTA
1217 * @netdev: network interface device structure
1218 *
1219 * Writes multicast address list to the MTA hash table.
1220 * Returns: 0 on no addresses written
1221 * X on writing X addresses to MTA
1222 **/
wx_write_mc_addr_list(struct net_device * netdev)1223 static int wx_write_mc_addr_list(struct net_device *netdev)
1224 {
1225 struct wx *wx = netdev_priv(netdev);
1226
1227 if (!netif_running(netdev))
1228 return 0;
1229
1230 wx_update_mc_addr_list(wx, netdev);
1231
1232 return netdev_mc_count(netdev);
1233 }
1234
1235 /**
1236 * wx_set_mac - Change the Ethernet Address of the NIC
1237 * @netdev: network interface device structure
1238 * @p: pointer to an address structure
1239 *
1240 * Returns 0 on success, negative on failure
1241 **/
wx_set_mac(struct net_device * netdev,void * p)1242 int wx_set_mac(struct net_device *netdev, void *p)
1243 {
1244 struct wx *wx = netdev_priv(netdev);
1245 struct sockaddr *addr = p;
1246 int retval;
1247
1248 retval = eth_prepare_mac_addr_change(netdev, addr);
1249 if (retval)
1250 return retval;
1251
1252 wx_del_mac_filter(wx, wx->mac.addr, 0);
1253 eth_hw_addr_set(netdev, addr->sa_data);
1254 memcpy(wx->mac.addr, addr->sa_data, netdev->addr_len);
1255
1256 wx_mac_set_default_filter(wx, wx->mac.addr);
1257
1258 return 0;
1259 }
1260 EXPORT_SYMBOL(wx_set_mac);
1261
wx_disable_rx(struct wx * wx)1262 void wx_disable_rx(struct wx *wx)
1263 {
1264 u32 pfdtxgswc;
1265 u32 rxctrl;
1266
1267 rxctrl = rd32(wx, WX_RDB_PB_CTL);
1268 if (rxctrl & WX_RDB_PB_CTL_RXEN) {
1269 pfdtxgswc = rd32(wx, WX_PSR_CTL);
1270 if (pfdtxgswc & WX_PSR_CTL_SW_EN) {
1271 pfdtxgswc &= ~WX_PSR_CTL_SW_EN;
1272 wr32(wx, WX_PSR_CTL, pfdtxgswc);
1273 wx->mac.set_lben = true;
1274 } else {
1275 wx->mac.set_lben = false;
1276 }
1277 rxctrl &= ~WX_RDB_PB_CTL_RXEN;
1278 wr32(wx, WX_RDB_PB_CTL, rxctrl);
1279
1280 if (!(((wx->subsystem_device_id & WX_NCSI_MASK) == WX_NCSI_SUP) ||
1281 ((wx->subsystem_device_id & WX_WOL_MASK) == WX_WOL_SUP))) {
1282 /* disable mac receiver */
1283 wr32m(wx, WX_MAC_RX_CFG,
1284 WX_MAC_RX_CFG_RE, 0);
1285 }
1286 }
1287 }
1288 EXPORT_SYMBOL(wx_disable_rx);
1289
wx_enable_rx(struct wx * wx)1290 static void wx_enable_rx(struct wx *wx)
1291 {
1292 u32 psrctl;
1293
1294 /* enable mac receiver */
1295 wr32m(wx, WX_MAC_RX_CFG,
1296 WX_MAC_RX_CFG_RE, WX_MAC_RX_CFG_RE);
1297
1298 wr32m(wx, WX_RDB_PB_CTL,
1299 WX_RDB_PB_CTL_RXEN, WX_RDB_PB_CTL_RXEN);
1300
1301 if (wx->mac.set_lben) {
1302 psrctl = rd32(wx, WX_PSR_CTL);
1303 psrctl |= WX_PSR_CTL_SW_EN;
1304 wr32(wx, WX_PSR_CTL, psrctl);
1305 wx->mac.set_lben = false;
1306 }
1307 }
1308
1309 /**
1310 * wx_set_rxpba - Initialize Rx packet buffer
1311 * @wx: pointer to private structure
1312 **/
wx_set_rxpba(struct wx * wx)1313 static void wx_set_rxpba(struct wx *wx)
1314 {
1315 u32 rxpktsize, txpktsize, txpbthresh;
1316 u32 pbsize = wx->mac.rx_pb_size;
1317
1318 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
1319 if (test_bit(WX_FLAG_FDIR_HASH, wx->flags) ||
1320 test_bit(WX_FLAG_FDIR_PERFECT, wx->flags))
1321 pbsize -= 64; /* Default 64KB */
1322 }
1323
1324 rxpktsize = pbsize << WX_RDB_PB_SZ_SHIFT;
1325 wr32(wx, WX_RDB_PB_SZ(0), rxpktsize);
1326
1327 /* Only support an equally distributed Tx packet buffer strategy. */
1328 txpktsize = wx->mac.tx_pb_size;
1329 txpbthresh = (txpktsize / 1024) - WX_TXPKT_SIZE_MAX;
1330 wr32(wx, WX_TDB_PB_SZ(0), txpktsize);
1331 wr32(wx, WX_TDM_PB_THRE(0), txpbthresh);
1332 }
1333
1334 #define WX_ETH_FRAMING 20
1335
1336 /**
1337 * wx_hpbthresh - calculate high water mark for flow control
1338 *
1339 * @wx: board private structure to calculate for
1340 **/
wx_hpbthresh(struct wx * wx)1341 static int wx_hpbthresh(struct wx *wx)
1342 {
1343 struct net_device *dev = wx->netdev;
1344 int link, tc, kb, marker;
1345 u32 dv_id, rx_pba;
1346
1347 /* Calculate max LAN frame size */
1348 link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + WX_ETH_FRAMING;
1349 tc = link;
1350
1351 /* Calculate delay value for device */
1352 dv_id = WX_DV(link, tc);
1353
1354 /* Delay value is calculated in bit times convert to KB */
1355 kb = WX_BT2KB(dv_id);
1356 rx_pba = rd32(wx, WX_RDB_PB_SZ(0)) >> WX_RDB_PB_SZ_SHIFT;
1357
1358 marker = rx_pba - kb;
1359
1360 /* It is possible that the packet buffer is not large enough
1361 * to provide required headroom. In this case throw an error
1362 * to user and a do the best we can.
1363 */
1364 if (marker < 0) {
1365 dev_warn(&wx->pdev->dev,
1366 "Packet Buffer can not provide enough headroom to support flow control. Decrease MTU or number of traffic classes\n");
1367 marker = tc + 1;
1368 }
1369
1370 return marker;
1371 }
1372
1373 /**
1374 * wx_lpbthresh - calculate low water mark for flow control
1375 *
1376 * @wx: board private structure to calculate for
1377 **/
wx_lpbthresh(struct wx * wx)1378 static int wx_lpbthresh(struct wx *wx)
1379 {
1380 struct net_device *dev = wx->netdev;
1381 u32 dv_id;
1382 int tc;
1383
1384 /* Calculate max LAN frame size */
1385 tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
1386
1387 /* Calculate delay value for device */
1388 dv_id = WX_LOW_DV(tc);
1389
1390 /* Delay value is calculated in bit times convert to KB */
1391 return WX_BT2KB(dv_id);
1392 }
1393
1394 /**
1395 * wx_pbthresh_setup - calculate and setup high low water marks
1396 *
1397 * @wx: board private structure to calculate for
1398 **/
wx_pbthresh_setup(struct wx * wx)1399 static void wx_pbthresh_setup(struct wx *wx)
1400 {
1401 wx->fc.high_water = wx_hpbthresh(wx);
1402 wx->fc.low_water = wx_lpbthresh(wx);
1403
1404 /* Low water marks must not be larger than high water marks */
1405 if (wx->fc.low_water > wx->fc.high_water)
1406 wx->fc.low_water = 0;
1407 }
1408
wx_configure_port(struct wx * wx)1409 static void wx_configure_port(struct wx *wx)
1410 {
1411 u32 value, i;
1412
1413 value = WX_CFG_PORT_CTL_D_VLAN | WX_CFG_PORT_CTL_QINQ;
1414 wr32m(wx, WX_CFG_PORT_CTL,
1415 WX_CFG_PORT_CTL_D_VLAN |
1416 WX_CFG_PORT_CTL_QINQ,
1417 value);
1418
1419 wr32(wx, WX_CFG_TAG_TPID(0),
1420 ETH_P_8021Q | ETH_P_8021AD << 16);
1421 wx->tpid[0] = ETH_P_8021Q;
1422 wx->tpid[1] = ETH_P_8021AD;
1423 for (i = 1; i < 4; i++)
1424 wr32(wx, WX_CFG_TAG_TPID(i),
1425 ETH_P_8021Q | ETH_P_8021Q << 16);
1426 for (i = 2; i < 8; i++)
1427 wx->tpid[i] = ETH_P_8021Q;
1428 }
1429
1430 /**
1431 * wx_disable_sec_rx_path - Stops the receive data path
1432 * @wx: pointer to private structure
1433 *
1434 * Stops the receive data path and waits for the HW to internally empty
1435 * the Rx security block
1436 **/
wx_disable_sec_rx_path(struct wx * wx)1437 int wx_disable_sec_rx_path(struct wx *wx)
1438 {
1439 u32 secrx;
1440
1441 wr32m(wx, WX_RSC_CTL,
1442 WX_RSC_CTL_RX_DIS, WX_RSC_CTL_RX_DIS);
1443
1444 return read_poll_timeout(rd32, secrx, secrx & WX_RSC_ST_RSEC_RDY,
1445 1000, 40000, false, wx, WX_RSC_ST);
1446 }
1447 EXPORT_SYMBOL(wx_disable_sec_rx_path);
1448
1449 /**
1450 * wx_enable_sec_rx_path - Enables the receive data path
1451 * @wx: pointer to private structure
1452 *
1453 * Enables the receive data path.
1454 **/
wx_enable_sec_rx_path(struct wx * wx)1455 void wx_enable_sec_rx_path(struct wx *wx)
1456 {
1457 wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_RX_DIS, 0);
1458 WX_WRITE_FLUSH(wx);
1459 }
1460 EXPORT_SYMBOL(wx_enable_sec_rx_path);
1461
wx_vlan_strip_control(struct wx * wx,bool enable)1462 static void wx_vlan_strip_control(struct wx *wx, bool enable)
1463 {
1464 int i, j;
1465
1466 for (i = 0; i < wx->num_rx_queues; i++) {
1467 struct wx_ring *ring = wx->rx_ring[i];
1468
1469 j = ring->reg_idx;
1470 wr32m(wx, WX_PX_RR_CFG(j), WX_PX_RR_CFG_VLAN,
1471 enable ? WX_PX_RR_CFG_VLAN : 0);
1472 }
1473 }
1474
wx_set_rx_mode(struct net_device * netdev)1475 void wx_set_rx_mode(struct net_device *netdev)
1476 {
1477 struct wx *wx = netdev_priv(netdev);
1478 netdev_features_t features;
1479 u32 fctrl, vmolr, vlnctrl;
1480 int count;
1481
1482 features = netdev->features;
1483
1484 /* Check for Promiscuous and All Multicast modes */
1485 fctrl = rd32(wx, WX_PSR_CTL);
1486 fctrl &= ~(WX_PSR_CTL_UPE | WX_PSR_CTL_MPE);
1487 vmolr = rd32(wx, WX_PSR_VM_L2CTL(0));
1488 vmolr &= ~(WX_PSR_VM_L2CTL_UPE |
1489 WX_PSR_VM_L2CTL_MPE |
1490 WX_PSR_VM_L2CTL_ROPE |
1491 WX_PSR_VM_L2CTL_ROMPE);
1492 vlnctrl = rd32(wx, WX_PSR_VLAN_CTL);
1493 vlnctrl &= ~(WX_PSR_VLAN_CTL_VFE | WX_PSR_VLAN_CTL_CFIEN);
1494
1495 /* set all bits that we expect to always be set */
1496 fctrl |= WX_PSR_CTL_BAM | WX_PSR_CTL_MFE;
1497 vmolr |= WX_PSR_VM_L2CTL_BAM |
1498 WX_PSR_VM_L2CTL_AUPE |
1499 WX_PSR_VM_L2CTL_VACC;
1500 vlnctrl |= WX_PSR_VLAN_CTL_VFE;
1501
1502 wx->addr_ctrl.user_set_promisc = false;
1503 if (netdev->flags & IFF_PROMISC) {
1504 wx->addr_ctrl.user_set_promisc = true;
1505 fctrl |= WX_PSR_CTL_UPE | WX_PSR_CTL_MPE;
1506 /* pf don't want packets routing to vf, so clear UPE */
1507 vmolr |= WX_PSR_VM_L2CTL_MPE;
1508 vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1509 }
1510
1511 if (netdev->flags & IFF_ALLMULTI) {
1512 fctrl |= WX_PSR_CTL_MPE;
1513 vmolr |= WX_PSR_VM_L2CTL_MPE;
1514 }
1515
1516 if (netdev->features & NETIF_F_RXALL) {
1517 vmolr |= (WX_PSR_VM_L2CTL_UPE | WX_PSR_VM_L2CTL_MPE);
1518 vlnctrl &= ~WX_PSR_VLAN_CTL_VFE;
1519 /* receive bad packets */
1520 wr32m(wx, WX_RSC_CTL,
1521 WX_RSC_CTL_SAVE_MAC_ERR,
1522 WX_RSC_CTL_SAVE_MAC_ERR);
1523 } else {
1524 vmolr |= WX_PSR_VM_L2CTL_ROPE | WX_PSR_VM_L2CTL_ROMPE;
1525 }
1526
1527 /* Write addresses to available RAR registers, if there is not
1528 * sufficient space to store all the addresses then enable
1529 * unicast promiscuous mode
1530 */
1531 count = wx_write_uc_addr_list(netdev, 0);
1532 if (count < 0) {
1533 vmolr &= ~WX_PSR_VM_L2CTL_ROPE;
1534 vmolr |= WX_PSR_VM_L2CTL_UPE;
1535 }
1536
1537 /* Write addresses to the MTA, if the attempt fails
1538 * then we should just turn on promiscuous mode so
1539 * that we can at least receive multicast traffic
1540 */
1541 count = wx_write_mc_addr_list(netdev);
1542 if (count < 0) {
1543 vmolr &= ~WX_PSR_VM_L2CTL_ROMPE;
1544 vmolr |= WX_PSR_VM_L2CTL_MPE;
1545 }
1546
1547 wr32(wx, WX_PSR_VLAN_CTL, vlnctrl);
1548 wr32(wx, WX_PSR_CTL, fctrl);
1549 wr32(wx, WX_PSR_VM_L2CTL(0), vmolr);
1550
1551 if ((features & NETIF_F_HW_VLAN_CTAG_RX) &&
1552 (features & NETIF_F_HW_VLAN_STAG_RX))
1553 wx_vlan_strip_control(wx, true);
1554 else
1555 wx_vlan_strip_control(wx, false);
1556
1557 }
1558 EXPORT_SYMBOL(wx_set_rx_mode);
1559
wx_set_rx_buffer_len(struct wx * wx)1560 static void wx_set_rx_buffer_len(struct wx *wx)
1561 {
1562 struct net_device *netdev = wx->netdev;
1563 u32 mhadd, max_frame;
1564
1565 max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
1566 /* adjust max frame to be at least the size of a standard frame */
1567 if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
1568 max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
1569
1570 mhadd = rd32(wx, WX_PSR_MAX_SZ);
1571 if (max_frame != mhadd)
1572 wr32(wx, WX_PSR_MAX_SZ, max_frame);
1573 }
1574
1575 /**
1576 * wx_change_mtu - Change the Maximum Transfer Unit
1577 * @netdev: network interface device structure
1578 * @new_mtu: new value for maximum frame size
1579 *
1580 * Returns 0 on success, negative on failure
1581 **/
wx_change_mtu(struct net_device * netdev,int new_mtu)1582 int wx_change_mtu(struct net_device *netdev, int new_mtu)
1583 {
1584 struct wx *wx = netdev_priv(netdev);
1585
1586 WRITE_ONCE(netdev->mtu, new_mtu);
1587 wx_set_rx_buffer_len(wx);
1588
1589 return 0;
1590 }
1591 EXPORT_SYMBOL(wx_change_mtu);
1592
1593 /* Disable the specified rx queue */
wx_disable_rx_queue(struct wx * wx,struct wx_ring * ring)1594 void wx_disable_rx_queue(struct wx *wx, struct wx_ring *ring)
1595 {
1596 u8 reg_idx = ring->reg_idx;
1597 u32 rxdctl;
1598 int ret;
1599
1600 /* write value back with RRCFG.EN bit cleared */
1601 wr32m(wx, WX_PX_RR_CFG(reg_idx),
1602 WX_PX_RR_CFG_RR_EN, 0);
1603
1604 /* the hardware may take up to 100us to really disable the rx queue */
1605 ret = read_poll_timeout(rd32, rxdctl, !(rxdctl & WX_PX_RR_CFG_RR_EN),
1606 10, 100, true, wx, WX_PX_RR_CFG(reg_idx));
1607
1608 if (ret == -ETIMEDOUT) {
1609 /* Just for information */
1610 wx_err(wx,
1611 "RRCFG.EN on Rx queue %d not cleared within the polling period\n",
1612 reg_idx);
1613 }
1614 }
1615 EXPORT_SYMBOL(wx_disable_rx_queue);
1616
wx_enable_rx_queue(struct wx * wx,struct wx_ring * ring)1617 static void wx_enable_rx_queue(struct wx *wx, struct wx_ring *ring)
1618 {
1619 u8 reg_idx = ring->reg_idx;
1620 u32 rxdctl;
1621 int ret;
1622
1623 ret = read_poll_timeout(rd32, rxdctl, rxdctl & WX_PX_RR_CFG_RR_EN,
1624 1000, 10000, true, wx, WX_PX_RR_CFG(reg_idx));
1625
1626 if (ret == -ETIMEDOUT) {
1627 /* Just for information */
1628 wx_err(wx,
1629 "RRCFG.EN on Rx queue %d not set within the polling period\n",
1630 reg_idx);
1631 }
1632 }
1633
wx_configure_srrctl(struct wx * wx,struct wx_ring * rx_ring)1634 static void wx_configure_srrctl(struct wx *wx,
1635 struct wx_ring *rx_ring)
1636 {
1637 u16 reg_idx = rx_ring->reg_idx;
1638 u32 srrctl;
1639
1640 srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1641 srrctl &= ~(WX_PX_RR_CFG_RR_HDR_SZ |
1642 WX_PX_RR_CFG_RR_BUF_SZ |
1643 WX_PX_RR_CFG_SPLIT_MODE);
1644 /* configure header buffer length, needed for RSC */
1645 srrctl |= WX_RXBUFFER_256 << WX_PX_RR_CFG_BHDRSIZE_SHIFT;
1646
1647 /* configure the packet buffer length */
1648 srrctl |= WX_RX_BUFSZ >> WX_PX_RR_CFG_BSIZEPKT_SHIFT;
1649
1650 wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
1651 }
1652
wx_configure_tx_ring(struct wx * wx,struct wx_ring * ring)1653 static void wx_configure_tx_ring(struct wx *wx,
1654 struct wx_ring *ring)
1655 {
1656 u32 txdctl = WX_PX_TR_CFG_ENABLE;
1657 u8 reg_idx = ring->reg_idx;
1658 u64 tdba = ring->dma;
1659 int ret;
1660
1661 /* disable queue to avoid issues while updating state */
1662 wr32(wx, WX_PX_TR_CFG(reg_idx), WX_PX_TR_CFG_SWFLSH);
1663 WX_WRITE_FLUSH(wx);
1664
1665 wr32(wx, WX_PX_TR_BAL(reg_idx), tdba & DMA_BIT_MASK(32));
1666 wr32(wx, WX_PX_TR_BAH(reg_idx), upper_32_bits(tdba));
1667
1668 /* reset head and tail pointers */
1669 wr32(wx, WX_PX_TR_RP(reg_idx), 0);
1670 wr32(wx, WX_PX_TR_WP(reg_idx), 0);
1671 ring->tail = wx->hw_addr + WX_PX_TR_WP(reg_idx);
1672
1673 if (ring->count < WX_MAX_TXD)
1674 txdctl |= ring->count / 128 << WX_PX_TR_CFG_TR_SIZE_SHIFT;
1675 txdctl |= 0x20 << WX_PX_TR_CFG_WTHRESH_SHIFT;
1676
1677 ring->atr_count = 0;
1678 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags) &&
1679 test_bit(WX_FLAG_FDIR_HASH, wx->flags))
1680 ring->atr_sample_rate = wx->atr_sample_rate;
1681 else
1682 ring->atr_sample_rate = 0;
1683
1684 /* reinitialize tx_buffer_info */
1685 memset(ring->tx_buffer_info, 0,
1686 sizeof(struct wx_tx_buffer) * ring->count);
1687
1688 /* enable queue */
1689 wr32(wx, WX_PX_TR_CFG(reg_idx), txdctl);
1690
1691 /* poll to verify queue is enabled */
1692 ret = read_poll_timeout(rd32, txdctl, txdctl & WX_PX_TR_CFG_ENABLE,
1693 1000, 10000, true, wx, WX_PX_TR_CFG(reg_idx));
1694 if (ret == -ETIMEDOUT)
1695 wx_err(wx, "Could not enable Tx Queue %d\n", reg_idx);
1696 }
1697
wx_configure_rx_ring(struct wx * wx,struct wx_ring * ring)1698 static void wx_configure_rx_ring(struct wx *wx,
1699 struct wx_ring *ring)
1700 {
1701 u16 reg_idx = ring->reg_idx;
1702 union wx_rx_desc *rx_desc;
1703 u64 rdba = ring->dma;
1704 u32 rxdctl;
1705
1706 /* disable queue to avoid issues while updating state */
1707 rxdctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
1708 wx_disable_rx_queue(wx, ring);
1709
1710 wr32(wx, WX_PX_RR_BAL(reg_idx), rdba & DMA_BIT_MASK(32));
1711 wr32(wx, WX_PX_RR_BAH(reg_idx), upper_32_bits(rdba));
1712
1713 if (ring->count == WX_MAX_RXD)
1714 rxdctl |= 0 << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1715 else
1716 rxdctl |= (ring->count / 128) << WX_PX_RR_CFG_RR_SIZE_SHIFT;
1717
1718 rxdctl |= 0x1 << WX_PX_RR_CFG_RR_THER_SHIFT;
1719 wr32(wx, WX_PX_RR_CFG(reg_idx), rxdctl);
1720
1721 /* reset head and tail pointers */
1722 wr32(wx, WX_PX_RR_RP(reg_idx), 0);
1723 wr32(wx, WX_PX_RR_WP(reg_idx), 0);
1724 ring->tail = wx->hw_addr + WX_PX_RR_WP(reg_idx);
1725
1726 wx_configure_srrctl(wx, ring);
1727
1728 /* initialize rx_buffer_info */
1729 memset(ring->rx_buffer_info, 0,
1730 sizeof(struct wx_rx_buffer) * ring->count);
1731
1732 /* initialize Rx descriptor 0 */
1733 rx_desc = WX_RX_DESC(ring, 0);
1734 rx_desc->wb.upper.length = 0;
1735
1736 /* enable receive descriptor ring */
1737 wr32m(wx, WX_PX_RR_CFG(reg_idx),
1738 WX_PX_RR_CFG_RR_EN, WX_PX_RR_CFG_RR_EN);
1739
1740 wx_enable_rx_queue(wx, ring);
1741 wx_alloc_rx_buffers(ring, wx_desc_unused(ring));
1742 }
1743
1744 /**
1745 * wx_configure_tx - Configure Transmit Unit after Reset
1746 * @wx: pointer to private structure
1747 *
1748 * Configure the Tx unit of the MAC after a reset.
1749 **/
wx_configure_tx(struct wx * wx)1750 static void wx_configure_tx(struct wx *wx)
1751 {
1752 u32 i;
1753
1754 /* TDM_CTL.TE must be before Tx queues are enabled */
1755 wr32m(wx, WX_TDM_CTL,
1756 WX_TDM_CTL_TE, WX_TDM_CTL_TE);
1757
1758 /* Setup the HW Tx Head and Tail descriptor pointers */
1759 for (i = 0; i < wx->num_tx_queues; i++)
1760 wx_configure_tx_ring(wx, wx->tx_ring[i]);
1761
1762 wr32m(wx, WX_TSC_BUF_AE, WX_TSC_BUF_AE_THR, 0x10);
1763
1764 if (wx->mac.type == wx_mac_em)
1765 wr32m(wx, WX_TSC_CTL, WX_TSC_CTL_TX_DIS | WX_TSC_CTL_TSEC_DIS, 0x1);
1766
1767 /* enable mac transmitter */
1768 wr32m(wx, WX_MAC_TX_CFG,
1769 WX_MAC_TX_CFG_TE, WX_MAC_TX_CFG_TE);
1770 }
1771
wx_restore_vlan(struct wx * wx)1772 static void wx_restore_vlan(struct wx *wx)
1773 {
1774 u16 vid = 1;
1775
1776 wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), 0);
1777
1778 for_each_set_bit_from(vid, wx->active_vlans, VLAN_N_VID)
1779 wx_vlan_rx_add_vid(wx->netdev, htons(ETH_P_8021Q), vid);
1780 }
1781
wx_store_reta(struct wx * wx)1782 static void wx_store_reta(struct wx *wx)
1783 {
1784 u8 *indir_tbl = wx->rss_indir_tbl;
1785 u32 reta = 0;
1786 u32 i;
1787
1788 /* Fill out the redirection table as follows:
1789 * - 8 bit wide entries containing 4 bit RSS index
1790 */
1791 for (i = 0; i < WX_MAX_RETA_ENTRIES; i++) {
1792 reta |= indir_tbl[i] << (i & 0x3) * 8;
1793 if ((i & 3) == 3) {
1794 wr32(wx, WX_RDB_RSSTBL(i >> 2), reta);
1795 reta = 0;
1796 }
1797 }
1798 }
1799
wx_setup_reta(struct wx * wx)1800 static void wx_setup_reta(struct wx *wx)
1801 {
1802 u16 rss_i = wx->ring_feature[RING_F_RSS].indices;
1803 u32 random_key_size = WX_RSS_KEY_SIZE / 4;
1804 u32 i, j;
1805
1806 /* Fill out hash function seeds */
1807 for (i = 0; i < random_key_size; i++)
1808 wr32(wx, WX_RDB_RSSRK(i), wx->rss_key[i]);
1809
1810 /* Fill out redirection table */
1811 memset(wx->rss_indir_tbl, 0, sizeof(wx->rss_indir_tbl));
1812
1813 for (i = 0, j = 0; i < WX_MAX_RETA_ENTRIES; i++, j++) {
1814 if (j == rss_i)
1815 j = 0;
1816
1817 wx->rss_indir_tbl[i] = j;
1818 }
1819
1820 wx_store_reta(wx);
1821 }
1822
wx_setup_mrqc(struct wx * wx)1823 static void wx_setup_mrqc(struct wx *wx)
1824 {
1825 u32 rss_field = 0;
1826
1827 /* Disable indicating checksum in descriptor, enables RSS hash */
1828 wr32m(wx, WX_PSR_CTL, WX_PSR_CTL_PCSD, WX_PSR_CTL_PCSD);
1829
1830 /* Perform hash on these packet types */
1831 rss_field = WX_RDB_RA_CTL_RSS_IPV4 |
1832 WX_RDB_RA_CTL_RSS_IPV4_TCP |
1833 WX_RDB_RA_CTL_RSS_IPV4_UDP |
1834 WX_RDB_RA_CTL_RSS_IPV6 |
1835 WX_RDB_RA_CTL_RSS_IPV6_TCP |
1836 WX_RDB_RA_CTL_RSS_IPV6_UDP;
1837
1838 netdev_rss_key_fill(wx->rss_key, sizeof(wx->rss_key));
1839
1840 wx_setup_reta(wx);
1841
1842 if (wx->rss_enabled)
1843 rss_field |= WX_RDB_RA_CTL_RSS_EN;
1844
1845 wr32(wx, WX_RDB_RA_CTL, rss_field);
1846 }
1847
1848 /**
1849 * wx_configure_rx - Configure Receive Unit after Reset
1850 * @wx: pointer to private structure
1851 *
1852 * Configure the Rx unit of the MAC after a reset.
1853 **/
wx_configure_rx(struct wx * wx)1854 void wx_configure_rx(struct wx *wx)
1855 {
1856 u32 psrtype, i;
1857 int ret;
1858
1859 wx_disable_rx(wx);
1860
1861 psrtype = WX_RDB_PL_CFG_L4HDR |
1862 WX_RDB_PL_CFG_L3HDR |
1863 WX_RDB_PL_CFG_L2HDR |
1864 WX_RDB_PL_CFG_TUN_TUNHDR;
1865 wr32(wx, WX_RDB_PL_CFG(0), psrtype);
1866
1867 /* enable hw crc stripping */
1868 wr32m(wx, WX_RSC_CTL, WX_RSC_CTL_CRC_STRIP, WX_RSC_CTL_CRC_STRIP);
1869
1870 if (test_bit(WX_FLAG_RSC_CAPABLE, wx->flags)) {
1871 u32 psrctl;
1872
1873 /* RSC Setup */
1874 psrctl = rd32(wx, WX_PSR_CTL);
1875 psrctl |= WX_PSR_CTL_RSC_ACK; /* Disable RSC for ACK packets */
1876 psrctl |= WX_PSR_CTL_RSC_DIS;
1877 wr32(wx, WX_PSR_CTL, psrctl);
1878 }
1879
1880 wx_setup_mrqc(wx);
1881
1882 /* set_rx_buffer_len must be called before ring initialization */
1883 wx_set_rx_buffer_len(wx);
1884
1885 /* Setup the HW Rx Head and Tail Descriptor Pointers and
1886 * the Base and Length of the Rx Descriptor Ring
1887 */
1888 for (i = 0; i < wx->num_rx_queues; i++)
1889 wx_configure_rx_ring(wx, wx->rx_ring[i]);
1890
1891 /* Enable all receives, disable security engine prior to block traffic */
1892 ret = wx_disable_sec_rx_path(wx);
1893 if (ret < 0)
1894 wx_err(wx, "The register status is abnormal, please check device.");
1895
1896 wx_enable_rx(wx);
1897 wx_enable_sec_rx_path(wx);
1898 }
1899 EXPORT_SYMBOL(wx_configure_rx);
1900
wx_configure_isb(struct wx * wx)1901 static void wx_configure_isb(struct wx *wx)
1902 {
1903 /* set ISB Address */
1904 wr32(wx, WX_PX_ISB_ADDR_L, wx->isb_dma & DMA_BIT_MASK(32));
1905 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
1906 wr32(wx, WX_PX_ISB_ADDR_H, upper_32_bits(wx->isb_dma));
1907 }
1908
wx_configure(struct wx * wx)1909 void wx_configure(struct wx *wx)
1910 {
1911 wx_set_rxpba(wx);
1912 wx_pbthresh_setup(wx);
1913 wx_configure_port(wx);
1914
1915 wx_set_rx_mode(wx->netdev);
1916 wx_restore_vlan(wx);
1917
1918 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags))
1919 wx->configure_fdir(wx);
1920
1921 wx_configure_tx(wx);
1922 wx_configure_rx(wx);
1923 wx_configure_isb(wx);
1924 }
1925 EXPORT_SYMBOL(wx_configure);
1926
1927 /**
1928 * wx_disable_pcie_master - Disable PCI-express master access
1929 * @wx: pointer to hardware structure
1930 *
1931 * Disables PCI-Express master access and verifies there are no pending
1932 * requests.
1933 **/
wx_disable_pcie_master(struct wx * wx)1934 int wx_disable_pcie_master(struct wx *wx)
1935 {
1936 int status = 0;
1937 u32 val;
1938
1939 /* Always set this bit to ensure any future transactions are blocked */
1940 pci_clear_master(wx->pdev);
1941
1942 /* Exit if master requests are blocked */
1943 if (!(rd32(wx, WX_PX_TRANSACTION_PENDING)))
1944 return 0;
1945
1946 /* Poll for master request bit to clear */
1947 status = read_poll_timeout(rd32, val, !val, 100, WX_PCI_MASTER_DISABLE_TIMEOUT,
1948 false, wx, WX_PX_TRANSACTION_PENDING);
1949 if (status < 0)
1950 wx_err(wx, "PCIe transaction pending bit did not clear.\n");
1951
1952 return status;
1953 }
1954 EXPORT_SYMBOL(wx_disable_pcie_master);
1955
1956 /**
1957 * wx_stop_adapter - Generic stop Tx/Rx units
1958 * @wx: pointer to hardware structure
1959 *
1960 * Sets the adapter_stopped flag within wx_hw struct. Clears interrupts,
1961 * disables transmit and receive units. The adapter_stopped flag is used by
1962 * the shared code and drivers to determine if the adapter is in a stopped
1963 * state and should not touch the hardware.
1964 **/
wx_stop_adapter(struct wx * wx)1965 int wx_stop_adapter(struct wx *wx)
1966 {
1967 u16 i;
1968
1969 /* Set the adapter_stopped flag so other driver functions stop touching
1970 * the hardware
1971 */
1972 wx->adapter_stopped = true;
1973
1974 /* Disable the receive unit */
1975 wx_disable_rx(wx);
1976
1977 /* Set interrupt mask to stop interrupts from being generated */
1978 wx_intr_disable(wx, WX_INTR_ALL);
1979
1980 /* Clear any pending interrupts, flush previous writes */
1981 wr32(wx, WX_PX_MISC_IC, 0xffffffff);
1982 wr32(wx, WX_BME_CTL, 0x3);
1983
1984 /* Disable the transmit unit. Each queue must be disabled. */
1985 for (i = 0; i < wx->mac.max_tx_queues; i++) {
1986 wr32m(wx, WX_PX_TR_CFG(i),
1987 WX_PX_TR_CFG_SWFLSH | WX_PX_TR_CFG_ENABLE,
1988 WX_PX_TR_CFG_SWFLSH);
1989 }
1990
1991 /* Disable the receive unit by stopping each queue */
1992 for (i = 0; i < wx->mac.max_rx_queues; i++) {
1993 wr32m(wx, WX_PX_RR_CFG(i),
1994 WX_PX_RR_CFG_RR_EN, 0);
1995 }
1996
1997 /* flush all queues disables */
1998 WX_WRITE_FLUSH(wx);
1999
2000 /* Prevent the PCI-E bus from hanging by disabling PCI-E master
2001 * access and verify no pending requests
2002 */
2003 return wx_disable_pcie_master(wx);
2004 }
2005 EXPORT_SYMBOL(wx_stop_adapter);
2006
wx_reset_misc(struct wx * wx)2007 void wx_reset_misc(struct wx *wx)
2008 {
2009 int i;
2010
2011 /* receive packets that size > 2048 */
2012 wr32m(wx, WX_MAC_RX_CFG, WX_MAC_RX_CFG_JE, WX_MAC_RX_CFG_JE);
2013
2014 /* clear counters on read */
2015 wr32m(wx, WX_MMC_CONTROL,
2016 WX_MMC_CONTROL_RSTONRD, WX_MMC_CONTROL_RSTONRD);
2017
2018 wr32m(wx, WX_MAC_RX_FLOW_CTRL,
2019 WX_MAC_RX_FLOW_CTRL_RFE, WX_MAC_RX_FLOW_CTRL_RFE);
2020
2021 wr32(wx, WX_MAC_PKT_FLT, WX_MAC_PKT_FLT_PR);
2022
2023 wr32m(wx, WX_MIS_RST_ST,
2024 WX_MIS_RST_ST_RST_INIT, 0x1E00);
2025
2026 /* errata 4: initialize mng flex tbl and wakeup flex tbl*/
2027 wr32(wx, WX_PSR_MNG_FLEX_SEL, 0);
2028 for (i = 0; i < 16; i++) {
2029 wr32(wx, WX_PSR_MNG_FLEX_DW_L(i), 0);
2030 wr32(wx, WX_PSR_MNG_FLEX_DW_H(i), 0);
2031 wr32(wx, WX_PSR_MNG_FLEX_MSK(i), 0);
2032 }
2033 wr32(wx, WX_PSR_LAN_FLEX_SEL, 0);
2034 for (i = 0; i < 16; i++) {
2035 wr32(wx, WX_PSR_LAN_FLEX_DW_L(i), 0);
2036 wr32(wx, WX_PSR_LAN_FLEX_DW_H(i), 0);
2037 wr32(wx, WX_PSR_LAN_FLEX_MSK(i), 0);
2038 }
2039
2040 /* set pause frame dst mac addr */
2041 wr32(wx, WX_RDB_PFCMACDAL, 0xC2000001);
2042 wr32(wx, WX_RDB_PFCMACDAH, 0x0180);
2043 }
2044 EXPORT_SYMBOL(wx_reset_misc);
2045
2046 /**
2047 * wx_get_pcie_msix_counts - Gets MSI-X vector count
2048 * @wx: pointer to hardware structure
2049 * @msix_count: number of MSI interrupts that can be obtained
2050 * @max_msix_count: number of MSI interrupts that mac need
2051 *
2052 * Read PCIe configuration space, and get the MSI-X vector count from
2053 * the capabilities table.
2054 **/
wx_get_pcie_msix_counts(struct wx * wx,u16 * msix_count,u16 max_msix_count)2055 int wx_get_pcie_msix_counts(struct wx *wx, u16 *msix_count, u16 max_msix_count)
2056 {
2057 struct pci_dev *pdev = wx->pdev;
2058 struct device *dev = &pdev->dev;
2059 int pos;
2060
2061 *msix_count = 1;
2062 pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
2063 if (!pos) {
2064 dev_err(dev, "Unable to find MSI-X Capabilities\n");
2065 return -EINVAL;
2066 }
2067 pci_read_config_word(pdev,
2068 pos + PCI_MSIX_FLAGS,
2069 msix_count);
2070 *msix_count &= WX_PCIE_MSIX_TBL_SZ_MASK;
2071 /* MSI-X count is zero-based in HW */
2072 *msix_count += 1;
2073
2074 if (*msix_count > max_msix_count)
2075 *msix_count = max_msix_count;
2076
2077 return 0;
2078 }
2079 EXPORT_SYMBOL(wx_get_pcie_msix_counts);
2080
2081 /**
2082 * wx_init_rss_key - Initialize wx RSS key
2083 * @wx: device handle
2084 *
2085 * Allocates and initializes the RSS key if it is not allocated.
2086 **/
wx_init_rss_key(struct wx * wx)2087 static int wx_init_rss_key(struct wx *wx)
2088 {
2089 u32 *rss_key;
2090
2091 if (!wx->rss_key) {
2092 rss_key = kzalloc(WX_RSS_KEY_SIZE, GFP_KERNEL);
2093 if (unlikely(!rss_key))
2094 return -ENOMEM;
2095
2096 netdev_rss_key_fill(rss_key, WX_RSS_KEY_SIZE);
2097 wx->rss_key = rss_key;
2098 }
2099
2100 return 0;
2101 }
2102
wx_sw_init(struct wx * wx)2103 int wx_sw_init(struct wx *wx)
2104 {
2105 struct pci_dev *pdev = wx->pdev;
2106 u32 ssid = 0;
2107 int err = 0;
2108
2109 wx->vendor_id = pdev->vendor;
2110 wx->device_id = pdev->device;
2111 wx->revision_id = pdev->revision;
2112 wx->oem_svid = pdev->subsystem_vendor;
2113 wx->oem_ssid = pdev->subsystem_device;
2114 wx->bus.device = PCI_SLOT(pdev->devfn);
2115 wx->bus.func = PCI_FUNC(pdev->devfn);
2116
2117 if (wx->oem_svid == PCI_VENDOR_ID_WANGXUN) {
2118 wx->subsystem_vendor_id = pdev->subsystem_vendor;
2119 wx->subsystem_device_id = pdev->subsystem_device;
2120 } else {
2121 err = wx_flash_read_dword(wx, 0xfffdc, &ssid);
2122 if (err < 0) {
2123 wx_err(wx, "read of internal subsystem device id failed\n");
2124 return err;
2125 }
2126
2127 wx->subsystem_device_id = swab16((u16)ssid);
2128 }
2129
2130 err = wx_init_rss_key(wx);
2131 if (err < 0) {
2132 wx_err(wx, "rss key allocation failed\n");
2133 return err;
2134 }
2135
2136 wx->mac_table = kcalloc(wx->mac.num_rar_entries,
2137 sizeof(struct wx_mac_addr),
2138 GFP_KERNEL);
2139 if (!wx->mac_table) {
2140 wx_err(wx, "mac_table allocation failed\n");
2141 kfree(wx->rss_key);
2142 return -ENOMEM;
2143 }
2144
2145 bitmap_zero(wx->state, WX_STATE_NBITS);
2146 bitmap_zero(wx->flags, WX_PF_FLAGS_NBITS);
2147 wx->misc_irq_domain = false;
2148
2149 return 0;
2150 }
2151 EXPORT_SYMBOL(wx_sw_init);
2152
2153 /**
2154 * wx_find_vlvf_slot - find the vlanid or the first empty slot
2155 * @wx: pointer to hardware structure
2156 * @vlan: VLAN id to write to VLAN filter
2157 *
2158 * return the VLVF index where this VLAN id should be placed
2159 *
2160 **/
wx_find_vlvf_slot(struct wx * wx,u32 vlan)2161 static int wx_find_vlvf_slot(struct wx *wx, u32 vlan)
2162 {
2163 u32 bits = 0, first_empty_slot = 0;
2164 int regindex;
2165
2166 /* short cut the special case */
2167 if (vlan == 0)
2168 return 0;
2169
2170 /* Search for the vlan id in the VLVF entries. Save off the first empty
2171 * slot found along the way
2172 */
2173 for (regindex = 1; regindex < WX_PSR_VLAN_SWC_ENTRIES; regindex++) {
2174 wr32(wx, WX_PSR_VLAN_SWC_IDX, regindex);
2175 bits = rd32(wx, WX_PSR_VLAN_SWC);
2176 if (!bits && !(first_empty_slot))
2177 first_empty_slot = regindex;
2178 else if ((bits & 0x0FFF) == vlan)
2179 break;
2180 }
2181
2182 if (regindex >= WX_PSR_VLAN_SWC_ENTRIES) {
2183 if (first_empty_slot)
2184 regindex = first_empty_slot;
2185 else
2186 regindex = -ENOMEM;
2187 }
2188
2189 return regindex;
2190 }
2191
2192 /**
2193 * wx_set_vlvf - Set VLAN Pool Filter
2194 * @wx: pointer to hardware structure
2195 * @vlan: VLAN id to write to VLAN filter
2196 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
2197 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
2198 * @vfta_changed: pointer to boolean flag which indicates whether VFTA
2199 * should be changed
2200 *
2201 * Turn on/off specified bit in VLVF table.
2202 **/
wx_set_vlvf(struct wx * wx,u32 vlan,u32 vind,bool vlan_on,bool * vfta_changed)2203 static int wx_set_vlvf(struct wx *wx, u32 vlan, u32 vind, bool vlan_on,
2204 bool *vfta_changed)
2205 {
2206 int vlvf_index;
2207 u32 vt, bits;
2208
2209 /* If VT Mode is set
2210 * Either vlan_on
2211 * make sure the vlan is in VLVF
2212 * set the vind bit in the matching VLVFB
2213 * Or !vlan_on
2214 * clear the pool bit and possibly the vind
2215 */
2216 vt = rd32(wx, WX_CFG_PORT_CTL);
2217 if (!(vt & WX_CFG_PORT_CTL_NUM_VT_MASK))
2218 return 0;
2219
2220 vlvf_index = wx_find_vlvf_slot(wx, vlan);
2221 if (vlvf_index < 0)
2222 return vlvf_index;
2223
2224 wr32(wx, WX_PSR_VLAN_SWC_IDX, vlvf_index);
2225 if (vlan_on) {
2226 /* set the pool bit */
2227 if (vind < 32) {
2228 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2229 bits |= (1 << vind);
2230 wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
2231 } else {
2232 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2233 bits |= (1 << (vind - 32));
2234 wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
2235 }
2236 } else {
2237 /* clear the pool bit */
2238 if (vind < 32) {
2239 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2240 bits &= ~(1 << vind);
2241 wr32(wx, WX_PSR_VLAN_SWC_VM_L, bits);
2242 bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2243 } else {
2244 bits = rd32(wx, WX_PSR_VLAN_SWC_VM_H);
2245 bits &= ~(1 << (vind - 32));
2246 wr32(wx, WX_PSR_VLAN_SWC_VM_H, bits);
2247 bits |= rd32(wx, WX_PSR_VLAN_SWC_VM_L);
2248 }
2249 }
2250
2251 if (bits) {
2252 wr32(wx, WX_PSR_VLAN_SWC, (WX_PSR_VLAN_SWC_VIEN | vlan));
2253 if (!vlan_on && vfta_changed)
2254 *vfta_changed = false;
2255 } else {
2256 wr32(wx, WX_PSR_VLAN_SWC, 0);
2257 }
2258
2259 return 0;
2260 }
2261
2262 /**
2263 * wx_set_vfta - Set VLAN filter table
2264 * @wx: pointer to hardware structure
2265 * @vlan: VLAN id to write to VLAN filter
2266 * @vind: VMDq output index that maps queue to VLAN id in VFVFB
2267 * @vlan_on: boolean flag to turn on/off VLAN in VFVF
2268 *
2269 * Turn on/off specified VLAN in the VLAN filter table.
2270 **/
wx_set_vfta(struct wx * wx,u32 vlan,u32 vind,bool vlan_on)2271 static int wx_set_vfta(struct wx *wx, u32 vlan, u32 vind, bool vlan_on)
2272 {
2273 u32 bitindex, vfta, targetbit;
2274 bool vfta_changed = false;
2275 int regindex, ret;
2276
2277 /* this is a 2 part operation - first the VFTA, then the
2278 * VLVF and VLVFB if VT Mode is set
2279 * We don't write the VFTA until we know the VLVF part succeeded.
2280 */
2281
2282 /* Part 1
2283 * The VFTA is a bitstring made up of 128 32-bit registers
2284 * that enable the particular VLAN id, much like the MTA:
2285 * bits[11-5]: which register
2286 * bits[4-0]: which bit in the register
2287 */
2288 regindex = (vlan >> 5) & 0x7F;
2289 bitindex = vlan & 0x1F;
2290 targetbit = (1 << bitindex);
2291 /* errata 5 */
2292 vfta = wx->mac.vft_shadow[regindex];
2293 if (vlan_on) {
2294 if (!(vfta & targetbit)) {
2295 vfta |= targetbit;
2296 vfta_changed = true;
2297 }
2298 } else {
2299 if ((vfta & targetbit)) {
2300 vfta &= ~targetbit;
2301 vfta_changed = true;
2302 }
2303 }
2304 /* Part 2
2305 * Call wx_set_vlvf to set VLVFB and VLVF
2306 */
2307 ret = wx_set_vlvf(wx, vlan, vind, vlan_on, &vfta_changed);
2308 if (ret != 0)
2309 return ret;
2310
2311 if (vfta_changed)
2312 wr32(wx, WX_PSR_VLAN_TBL(regindex), vfta);
2313 wx->mac.vft_shadow[regindex] = vfta;
2314
2315 return 0;
2316 }
2317
2318 /**
2319 * wx_clear_vfta - Clear VLAN filter table
2320 * @wx: pointer to hardware structure
2321 *
2322 * Clears the VLAN filer table, and the VMDq index associated with the filter
2323 **/
wx_clear_vfta(struct wx * wx)2324 static void wx_clear_vfta(struct wx *wx)
2325 {
2326 u32 offset;
2327
2328 for (offset = 0; offset < wx->mac.vft_size; offset++) {
2329 wr32(wx, WX_PSR_VLAN_TBL(offset), 0);
2330 wx->mac.vft_shadow[offset] = 0;
2331 }
2332
2333 for (offset = 0; offset < WX_PSR_VLAN_SWC_ENTRIES; offset++) {
2334 wr32(wx, WX_PSR_VLAN_SWC_IDX, offset);
2335 wr32(wx, WX_PSR_VLAN_SWC, 0);
2336 wr32(wx, WX_PSR_VLAN_SWC_VM_L, 0);
2337 wr32(wx, WX_PSR_VLAN_SWC_VM_H, 0);
2338 }
2339 }
2340
wx_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2341 int wx_vlan_rx_add_vid(struct net_device *netdev,
2342 __be16 proto, u16 vid)
2343 {
2344 struct wx *wx = netdev_priv(netdev);
2345
2346 /* add VID to filter table */
2347 wx_set_vfta(wx, vid, VMDQ_P(0), true);
2348 set_bit(vid, wx->active_vlans);
2349
2350 return 0;
2351 }
2352 EXPORT_SYMBOL(wx_vlan_rx_add_vid);
2353
wx_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2354 int wx_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
2355 {
2356 struct wx *wx = netdev_priv(netdev);
2357
2358 /* remove VID from filter table */
2359 if (vid)
2360 wx_set_vfta(wx, vid, VMDQ_P(0), false);
2361 clear_bit(vid, wx->active_vlans);
2362
2363 return 0;
2364 }
2365 EXPORT_SYMBOL(wx_vlan_rx_kill_vid);
2366
wx_enable_rx_drop(struct wx * wx,struct wx_ring * ring)2367 static void wx_enable_rx_drop(struct wx *wx, struct wx_ring *ring)
2368 {
2369 u16 reg_idx = ring->reg_idx;
2370 u32 srrctl;
2371
2372 srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
2373 srrctl |= WX_PX_RR_CFG_DROP_EN;
2374
2375 wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
2376 }
2377
wx_disable_rx_drop(struct wx * wx,struct wx_ring * ring)2378 static void wx_disable_rx_drop(struct wx *wx, struct wx_ring *ring)
2379 {
2380 u16 reg_idx = ring->reg_idx;
2381 u32 srrctl;
2382
2383 srrctl = rd32(wx, WX_PX_RR_CFG(reg_idx));
2384 srrctl &= ~WX_PX_RR_CFG_DROP_EN;
2385
2386 wr32(wx, WX_PX_RR_CFG(reg_idx), srrctl);
2387 }
2388
wx_fc_enable(struct wx * wx,bool tx_pause,bool rx_pause)2389 int wx_fc_enable(struct wx *wx, bool tx_pause, bool rx_pause)
2390 {
2391 u16 pause_time = WX_DEFAULT_FCPAUSE;
2392 u32 mflcn_reg, fccfg_reg, reg;
2393 u32 fcrtl, fcrth;
2394 int i;
2395
2396 /* Low water mark of zero causes XOFF floods */
2397 if (tx_pause && wx->fc.high_water) {
2398 if (!wx->fc.low_water || wx->fc.low_water >= wx->fc.high_water) {
2399 wx_err(wx, "Invalid water mark configuration\n");
2400 return -EINVAL;
2401 }
2402 }
2403
2404 /* Disable any previous flow control settings */
2405 mflcn_reg = rd32(wx, WX_MAC_RX_FLOW_CTRL);
2406 mflcn_reg &= ~WX_MAC_RX_FLOW_CTRL_RFE;
2407
2408 fccfg_reg = rd32(wx, WX_RDB_RFCC);
2409 fccfg_reg &= ~WX_RDB_RFCC_RFCE_802_3X;
2410
2411 if (rx_pause)
2412 mflcn_reg |= WX_MAC_RX_FLOW_CTRL_RFE;
2413 if (tx_pause)
2414 fccfg_reg |= WX_RDB_RFCC_RFCE_802_3X;
2415
2416 /* Set 802.3x based flow control settings. */
2417 wr32(wx, WX_MAC_RX_FLOW_CTRL, mflcn_reg);
2418 wr32(wx, WX_RDB_RFCC, fccfg_reg);
2419
2420 /* Set up and enable Rx high/low water mark thresholds, enable XON. */
2421 if (tx_pause && wx->fc.high_water) {
2422 fcrtl = (wx->fc.low_water << 10) | WX_RDB_RFCL_XONE;
2423 wr32(wx, WX_RDB_RFCL, fcrtl);
2424 fcrth = (wx->fc.high_water << 10) | WX_RDB_RFCH_XOFFE;
2425 } else {
2426 wr32(wx, WX_RDB_RFCL, 0);
2427 /* In order to prevent Tx hangs when the internal Tx
2428 * switch is enabled we must set the high water mark
2429 * to the Rx packet buffer size - 24KB. This allows
2430 * the Tx switch to function even under heavy Rx
2431 * workloads.
2432 */
2433 fcrth = rd32(wx, WX_RDB_PB_SZ(0)) - 24576;
2434 }
2435
2436 wr32(wx, WX_RDB_RFCH, fcrth);
2437
2438 /* Configure pause time */
2439 reg = pause_time * 0x00010001;
2440 wr32(wx, WX_RDB_RFCV, reg);
2441
2442 /* Configure flow control refresh threshold value */
2443 wr32(wx, WX_RDB_RFCRT, pause_time / 2);
2444
2445 /* We should set the drop enable bit if:
2446 * Number of Rx queues > 1 and flow control is disabled
2447 *
2448 * This allows us to avoid head of line blocking for security
2449 * and performance reasons.
2450 */
2451 if (wx->num_rx_queues > 1 && !tx_pause) {
2452 for (i = 0; i < wx->num_rx_queues; i++)
2453 wx_enable_rx_drop(wx, wx->rx_ring[i]);
2454 } else {
2455 for (i = 0; i < wx->num_rx_queues; i++)
2456 wx_disable_rx_drop(wx, wx->rx_ring[i]);
2457 }
2458
2459 return 0;
2460 }
2461 EXPORT_SYMBOL(wx_fc_enable);
2462
2463 /**
2464 * wx_update_stats - Update the board statistics counters.
2465 * @wx: board private structure
2466 **/
wx_update_stats(struct wx * wx)2467 void wx_update_stats(struct wx *wx)
2468 {
2469 struct wx_hw_stats *hwstats = &wx->stats;
2470
2471 u64 non_eop_descs = 0, alloc_rx_buff_failed = 0;
2472 u64 hw_csum_rx_good = 0, hw_csum_rx_error = 0;
2473 u64 restart_queue = 0, tx_busy = 0;
2474 u32 i;
2475
2476 /* gather some stats to the wx struct that are per queue */
2477 for (i = 0; i < wx->num_rx_queues; i++) {
2478 struct wx_ring *rx_ring = wx->rx_ring[i];
2479
2480 non_eop_descs += rx_ring->rx_stats.non_eop_descs;
2481 alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
2482 hw_csum_rx_good += rx_ring->rx_stats.csum_good_cnt;
2483 hw_csum_rx_error += rx_ring->rx_stats.csum_err;
2484 }
2485 wx->non_eop_descs = non_eop_descs;
2486 wx->alloc_rx_buff_failed = alloc_rx_buff_failed;
2487 wx->hw_csum_rx_error = hw_csum_rx_error;
2488 wx->hw_csum_rx_good = hw_csum_rx_good;
2489
2490 for (i = 0; i < wx->num_tx_queues; i++) {
2491 struct wx_ring *tx_ring = wx->tx_ring[i];
2492
2493 restart_queue += tx_ring->tx_stats.restart_queue;
2494 tx_busy += tx_ring->tx_stats.tx_busy;
2495 }
2496 wx->restart_queue = restart_queue;
2497 wx->tx_busy = tx_busy;
2498
2499 hwstats->gprc += rd32(wx, WX_RDM_PKT_CNT);
2500 hwstats->gptc += rd32(wx, WX_TDM_PKT_CNT);
2501 hwstats->gorc += rd64(wx, WX_RDM_BYTE_CNT_LSB);
2502 hwstats->gotc += rd64(wx, WX_TDM_BYTE_CNT_LSB);
2503 hwstats->tpr += rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
2504 hwstats->tpt += rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
2505 hwstats->crcerrs += rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
2506 hwstats->rlec += rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
2507 hwstats->bprc += rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
2508 hwstats->bptc += rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
2509 hwstats->mprc += rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
2510 hwstats->mptc += rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
2511 hwstats->roc += rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
2512 hwstats->ruc += rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
2513 hwstats->lxonoffrxc += rd32(wx, WX_MAC_LXONOFFRXC);
2514 hwstats->lxontxc += rd32(wx, WX_RDB_LXONTXC);
2515 hwstats->lxofftxc += rd32(wx, WX_RDB_LXOFFTXC);
2516 hwstats->o2bgptc += rd32(wx, WX_TDM_OS2BMC_CNT);
2517 hwstats->b2ospc += rd32(wx, WX_MNG_BMC2OS_CNT);
2518 hwstats->o2bspc += rd32(wx, WX_MNG_OS2BMC_CNT);
2519 hwstats->b2ogprc += rd32(wx, WX_RDM_BMC2OS_CNT);
2520 hwstats->rdmdrop += rd32(wx, WX_RDM_DRP_PKT);
2521
2522 if (test_bit(WX_FLAG_FDIR_CAPABLE, wx->flags)) {
2523 hwstats->fdirmatch += rd32(wx, WX_RDB_FDIR_MATCH);
2524 hwstats->fdirmiss += rd32(wx, WX_RDB_FDIR_MISS);
2525 }
2526
2527 for (i = 0; i < wx->mac.max_rx_queues; i++)
2528 hwstats->qmprc += rd32(wx, WX_PX_MPRC(i));
2529 }
2530 EXPORT_SYMBOL(wx_update_stats);
2531
2532 /**
2533 * wx_clear_hw_cntrs - Generic clear hardware counters
2534 * @wx: board private structure
2535 *
2536 * Clears all hardware statistics counters by reading them from the hardware
2537 * Statistics counters are clear on read.
2538 **/
wx_clear_hw_cntrs(struct wx * wx)2539 void wx_clear_hw_cntrs(struct wx *wx)
2540 {
2541 u16 i = 0;
2542
2543 for (i = 0; i < wx->mac.max_rx_queues; i++)
2544 wr32(wx, WX_PX_MPRC(i), 0);
2545
2546 rd32(wx, WX_RDM_PKT_CNT);
2547 rd32(wx, WX_TDM_PKT_CNT);
2548 rd64(wx, WX_RDM_BYTE_CNT_LSB);
2549 rd32(wx, WX_TDM_BYTE_CNT_LSB);
2550 rd32(wx, WX_RDM_DRP_PKT);
2551 rd32(wx, WX_RX_UNDERSIZE_FRAMES_GOOD);
2552 rd32(wx, WX_RX_OVERSIZE_FRAMES_GOOD);
2553 rd64(wx, WX_RX_FRAME_CNT_GOOD_BAD_L);
2554 rd64(wx, WX_TX_FRAME_CNT_GOOD_BAD_L);
2555 rd64(wx, WX_RX_MC_FRAMES_GOOD_L);
2556 rd64(wx, WX_TX_MC_FRAMES_GOOD_L);
2557 rd64(wx, WX_RX_BC_FRAMES_GOOD_L);
2558 rd64(wx, WX_TX_BC_FRAMES_GOOD_L);
2559 rd64(wx, WX_RX_CRC_ERROR_FRAMES_L);
2560 rd64(wx, WX_RX_LEN_ERROR_FRAMES_L);
2561 rd32(wx, WX_RDB_LXONTXC);
2562 rd32(wx, WX_RDB_LXOFFTXC);
2563 rd32(wx, WX_MAC_LXONOFFRXC);
2564 }
2565 EXPORT_SYMBOL(wx_clear_hw_cntrs);
2566
2567 /**
2568 * wx_start_hw - Prepare hardware for Tx/Rx
2569 * @wx: pointer to hardware structure
2570 *
2571 * Starts the hardware using the generic start_hw function
2572 * and the generation start_hw function.
2573 * Then performs revision-specific operations, if any.
2574 **/
wx_start_hw(struct wx * wx)2575 void wx_start_hw(struct wx *wx)
2576 {
2577 int i;
2578
2579 /* Clear the VLAN filter table */
2580 wx_clear_vfta(wx);
2581 WX_WRITE_FLUSH(wx);
2582 /* Clear the rate limiters */
2583 for (i = 0; i < wx->mac.max_tx_queues; i++) {
2584 wr32(wx, WX_TDM_RP_IDX, i);
2585 wr32(wx, WX_TDM_RP_RATE, 0);
2586 }
2587 }
2588 EXPORT_SYMBOL(wx_start_hw);
2589
2590 MODULE_LICENSE("GPL");
2591