1 // SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-3-Clause)
2 /*
3 * Copyright (c) 2014-2025, Advanced Micro Devices, Inc.
4 * Copyright (c) 2014, Synopsys, Inc.
5 * All rights reserved
6 */
7
8 #include <linux/phy.h>
9 #include <linux/mdio.h>
10 #include <linux/clk.h>
11 #include <linux/bitrev.h>
12 #include <linux/crc32.h>
13 #include <linux/crc32poly.h>
14 #include <linux/pci.h>
15
16 #include "xgbe.h"
17 #include "xgbe-common.h"
18 #include "xgbe-smn.h"
19
xgbe_get_max_frame(struct xgbe_prv_data * pdata)20 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
21 {
22 return pdata->netdev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
23 }
24
xgbe_usec_to_riwt(struct xgbe_prv_data * pdata,unsigned int usec)25 static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
26 unsigned int usec)
27 {
28 unsigned long rate;
29 unsigned int ret;
30
31 DBGPR("-->xgbe_usec_to_riwt\n");
32
33 rate = pdata->sysclk_rate;
34
35 /*
36 * Convert the input usec value to the watchdog timer value. Each
37 * watchdog timer value is equivalent to 256 clock cycles.
38 * Calculate the required value as:
39 * ( usec * ( system_clock_mhz / 10^6 ) / 256
40 */
41 ret = (usec * (rate / 1000000)) / 256;
42
43 DBGPR("<--xgbe_usec_to_riwt\n");
44
45 return ret;
46 }
47
xgbe_riwt_to_usec(struct xgbe_prv_data * pdata,unsigned int riwt)48 static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
49 unsigned int riwt)
50 {
51 unsigned long rate;
52 unsigned int ret;
53
54 DBGPR("-->xgbe_riwt_to_usec\n");
55
56 rate = pdata->sysclk_rate;
57
58 /*
59 * Convert the input watchdog timer value to the usec value. Each
60 * watchdog timer value is equivalent to 256 clock cycles.
61 * Calculate the required value as:
62 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
63 */
64 ret = (riwt * 256) / (rate / 1000000);
65
66 DBGPR("<--xgbe_riwt_to_usec\n");
67
68 return ret;
69 }
70
xgbe_config_pbl_val(struct xgbe_prv_data * pdata)71 static int xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
72 {
73 unsigned int pblx8, pbl;
74 unsigned int i;
75
76 pblx8 = DMA_PBL_X8_DISABLE;
77 pbl = pdata->pbl;
78
79 if (pdata->pbl > 32) {
80 pblx8 = DMA_PBL_X8_ENABLE;
81 pbl >>= 3;
82 }
83
84 for (i = 0; i < pdata->channel_count; i++) {
85 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
86 pblx8);
87
88 if (pdata->channel[i]->tx_ring)
89 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
90 PBL, pbl);
91
92 if (pdata->channel[i]->rx_ring)
93 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
94 PBL, pbl);
95 }
96
97 return 0;
98 }
99
xgbe_config_osp_mode(struct xgbe_prv_data * pdata)100 static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
101 {
102 unsigned int i;
103
104 for (i = 0; i < pdata->channel_count; i++) {
105 if (!pdata->channel[i]->tx_ring)
106 break;
107
108 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
109 pdata->tx_osp_mode);
110 }
111
112 return 0;
113 }
114
xgbe_config_rsf_mode(struct xgbe_prv_data * pdata,unsigned int val)115 static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
116 {
117 unsigned int i;
118
119 for (i = 0; i < pdata->rx_q_count; i++)
120 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
121
122 return 0;
123 }
124
xgbe_config_tsf_mode(struct xgbe_prv_data * pdata,unsigned int val)125 static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
126 {
127 unsigned int i;
128
129 for (i = 0; i < pdata->tx_q_count; i++)
130 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
131
132 return 0;
133 }
134
xgbe_config_rx_threshold(struct xgbe_prv_data * pdata,unsigned int val)135 static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
136 unsigned int val)
137 {
138 unsigned int i;
139
140 for (i = 0; i < pdata->rx_q_count; i++)
141 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
142
143 return 0;
144 }
145
xgbe_config_tx_threshold(struct xgbe_prv_data * pdata,unsigned int val)146 static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
147 unsigned int val)
148 {
149 unsigned int i;
150
151 for (i = 0; i < pdata->tx_q_count; i++)
152 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
153
154 return 0;
155 }
156
xgbe_config_rx_coalesce(struct xgbe_prv_data * pdata)157 static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
158 {
159 unsigned int i;
160
161 for (i = 0; i < pdata->channel_count; i++) {
162 if (!pdata->channel[i]->rx_ring)
163 break;
164
165 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
166 pdata->rx_riwt);
167 }
168
169 return 0;
170 }
171
xgbe_config_tx_coalesce(struct xgbe_prv_data * pdata)172 static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
173 {
174 return 0;
175 }
176
xgbe_config_rx_buffer_size(struct xgbe_prv_data * pdata)177 static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
178 {
179 unsigned int i;
180
181 for (i = 0; i < pdata->channel_count; i++) {
182 if (!pdata->channel[i]->rx_ring)
183 break;
184
185 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
186 pdata->rx_buf_size);
187 }
188 }
189
xgbe_config_tso_mode(struct xgbe_prv_data * pdata)190 static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
191 {
192 unsigned int i;
193
194 for (i = 0; i < pdata->channel_count; i++) {
195 if (!pdata->channel[i]->tx_ring)
196 break;
197
198 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
199 }
200 }
201
xgbe_config_sph_mode(struct xgbe_prv_data * pdata)202 static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
203 {
204 unsigned int i;
205
206 for (i = 0; i < pdata->channel_count; i++) {
207 if (!pdata->channel[i]->rx_ring)
208 break;
209
210 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
211 }
212
213 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
214 pdata->sph = true;
215 }
216
xgbe_disable_sph_mode(struct xgbe_prv_data * pdata)217 static void xgbe_disable_sph_mode(struct xgbe_prv_data *pdata)
218 {
219 unsigned int i;
220
221 for (i = 0; i < pdata->channel_count; i++) {
222 if (!pdata->channel[i]->rx_ring)
223 break;
224
225 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
226 }
227 pdata->sph = false;
228 }
229
xgbe_write_rss_reg(struct xgbe_prv_data * pdata,unsigned int type,unsigned int index,unsigned int val)230 static int xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
231 unsigned int index, unsigned int val)
232 {
233 unsigned int wait;
234 int ret = 0;
235
236 mutex_lock(&pdata->rss_mutex);
237
238 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
239 ret = -EBUSY;
240 goto unlock;
241 }
242
243 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
244
245 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
246 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
247 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
248 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
249
250 wait = 1000;
251 while (wait--) {
252 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
253 goto unlock;
254
255 usleep_range(1000, 1500);
256 }
257
258 ret = -EBUSY;
259
260 unlock:
261 mutex_unlock(&pdata->rss_mutex);
262
263 return ret;
264 }
265
xgbe_write_rss_hash_key(struct xgbe_prv_data * pdata)266 static int xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
267 {
268 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(u32);
269 unsigned int *key = (unsigned int *)&pdata->rss_key;
270 int ret;
271
272 while (key_regs--) {
273 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
274 key_regs, *key++);
275 if (ret)
276 return ret;
277 }
278
279 return 0;
280 }
281
xgbe_write_rss_lookup_table(struct xgbe_prv_data * pdata)282 static int xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
283 {
284 unsigned int i;
285 int ret;
286
287 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
288 ret = xgbe_write_rss_reg(pdata,
289 XGBE_RSS_LOOKUP_TABLE_TYPE, i,
290 pdata->rss_table[i]);
291 if (ret)
292 return ret;
293 }
294
295 return 0;
296 }
297
xgbe_set_rss_hash_key(struct xgbe_prv_data * pdata,const u8 * key)298 static int xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const u8 *key)
299 {
300 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
301
302 return xgbe_write_rss_hash_key(pdata);
303 }
304
xgbe_set_rss_lookup_table(struct xgbe_prv_data * pdata,const u32 * table)305 static int xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata,
306 const u32 *table)
307 {
308 unsigned int i;
309
310 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
311 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
312
313 return xgbe_write_rss_lookup_table(pdata);
314 }
315
xgbe_enable_rss(struct xgbe_prv_data * pdata)316 static int xgbe_enable_rss(struct xgbe_prv_data *pdata)
317 {
318 int ret;
319
320 if (!pdata->hw_feat.rss)
321 return -EOPNOTSUPP;
322
323 /* Program the hash key */
324 ret = xgbe_write_rss_hash_key(pdata);
325 if (ret)
326 return ret;
327
328 /* Program the lookup table */
329 ret = xgbe_write_rss_lookup_table(pdata);
330 if (ret)
331 return ret;
332
333 /* Set the RSS options */
334 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
335
336 /* Enable RSS */
337 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
338
339 return 0;
340 }
341
xgbe_disable_rss(struct xgbe_prv_data * pdata)342 static int xgbe_disable_rss(struct xgbe_prv_data *pdata)
343 {
344 if (!pdata->hw_feat.rss)
345 return -EOPNOTSUPP;
346
347 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
348
349 return 0;
350 }
351
xgbe_config_rss(struct xgbe_prv_data * pdata)352 static void xgbe_config_rss(struct xgbe_prv_data *pdata)
353 {
354 int ret;
355
356 if (!pdata->hw_feat.rss)
357 return;
358
359 if (pdata->netdev->features & NETIF_F_RXHASH)
360 ret = xgbe_enable_rss(pdata);
361 else
362 ret = xgbe_disable_rss(pdata);
363
364 if (ret)
365 netdev_err(pdata->netdev,
366 "error configuring RSS, RSS disabled\n");
367 }
368
xgbe_is_pfc_queue(struct xgbe_prv_data * pdata,unsigned int queue)369 static bool xgbe_is_pfc_queue(struct xgbe_prv_data *pdata,
370 unsigned int queue)
371 {
372 unsigned int prio, tc;
373
374 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
375 /* Does this queue handle the priority? */
376 if (pdata->prio2q_map[prio] != queue)
377 continue;
378
379 /* Get the Traffic Class for this priority */
380 tc = pdata->ets->prio_tc[prio];
381
382 /* Check if PFC is enabled for this traffic class */
383 if (pdata->pfc->pfc_en & (1 << tc))
384 return true;
385 }
386
387 return false;
388 }
389
xgbe_set_vxlan_id(struct xgbe_prv_data * pdata)390 static void xgbe_set_vxlan_id(struct xgbe_prv_data *pdata)
391 {
392 /* Program the VXLAN port */
393 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, pdata->vxlan_port);
394
395 netif_dbg(pdata, drv, pdata->netdev, "VXLAN tunnel id set to %hx\n",
396 pdata->vxlan_port);
397 }
398
xgbe_enable_vxlan(struct xgbe_prv_data * pdata)399 static void xgbe_enable_vxlan(struct xgbe_prv_data *pdata)
400 {
401 if (!pdata->hw_feat.vxn)
402 return;
403
404 /* Program the VXLAN port */
405 xgbe_set_vxlan_id(pdata);
406
407 /* Allow for IPv6/UDP zero-checksum VXLAN packets */
408 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 1);
409
410 /* Enable VXLAN tunneling mode */
411 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNM, 0);
412 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 1);
413
414 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration enabled\n");
415 }
416
xgbe_disable_vxlan(struct xgbe_prv_data * pdata)417 static void xgbe_disable_vxlan(struct xgbe_prv_data *pdata)
418 {
419 if (!pdata->hw_feat.vxn)
420 return;
421
422 /* Disable tunneling mode */
423 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, VNE, 0);
424
425 /* Clear IPv6/UDP zero-checksum VXLAN packets setting */
426 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VUCC, 0);
427
428 /* Clear the VXLAN port */
429 XGMAC_IOWRITE_BITS(pdata, MAC_TIR, TNID, 0);
430
431 netif_dbg(pdata, drv, pdata->netdev, "VXLAN acceleration disabled\n");
432 }
433
xgbe_get_fc_queue_count(struct xgbe_prv_data * pdata)434 static unsigned int xgbe_get_fc_queue_count(struct xgbe_prv_data *pdata)
435 {
436 unsigned int max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
437
438 /* From MAC ver 30H the TFCR is per priority, instead of per queue */
439 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) >= 0x30)
440 return max_q_count;
441 else
442 return min_t(unsigned int, pdata->tx_q_count, max_q_count);
443 }
444
xgbe_disable_tx_flow_control(struct xgbe_prv_data * pdata)445 static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
446 {
447 unsigned int reg, reg_val;
448 unsigned int i, q_count;
449
450 /* Clear MTL flow control */
451 for (i = 0; i < pdata->rx_q_count; i++)
452 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
453
454 /* Clear MAC flow control */
455 q_count = xgbe_get_fc_queue_count(pdata);
456 reg = MAC_Q0TFCR;
457 for (i = 0; i < q_count; i++) {
458 reg_val = XGMAC_IOREAD(pdata, reg);
459 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
460 XGMAC_IOWRITE(pdata, reg, reg_val);
461
462 reg += MAC_QTFCR_INC;
463 }
464
465 return 0;
466 }
467
xgbe_enable_tx_flow_control(struct xgbe_prv_data * pdata)468 static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
469 {
470 struct ieee_pfc *pfc = pdata->pfc;
471 struct ieee_ets *ets = pdata->ets;
472 unsigned int reg, reg_val;
473 unsigned int i, q_count;
474
475 /* Set MTL flow control */
476 for (i = 0; i < pdata->rx_q_count; i++) {
477 unsigned int ehfc = 0;
478
479 if (pdata->rx_rfd[i]) {
480 /* Flow control thresholds are established */
481 if (pfc && ets) {
482 if (xgbe_is_pfc_queue(pdata, i))
483 ehfc = 1;
484 } else {
485 ehfc = 1;
486 }
487 }
488
489 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
490
491 netif_dbg(pdata, drv, pdata->netdev,
492 "flow control %s for RXq%u\n",
493 ehfc ? "enabled" : "disabled", i);
494 }
495
496 /* Set MAC flow control */
497 q_count = xgbe_get_fc_queue_count(pdata);
498 reg = MAC_Q0TFCR;
499 for (i = 0; i < q_count; i++) {
500 reg_val = XGMAC_IOREAD(pdata, reg);
501
502 /* Enable transmit flow control */
503 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
504 /* Set pause time */
505 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
506
507 XGMAC_IOWRITE(pdata, reg, reg_val);
508
509 reg += MAC_QTFCR_INC;
510 }
511
512 return 0;
513 }
514
xgbe_disable_rx_flow_control(struct xgbe_prv_data * pdata)515 static int xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
516 {
517 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
518
519 return 0;
520 }
521
xgbe_enable_rx_flow_control(struct xgbe_prv_data * pdata)522 static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
523 {
524 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
525
526 return 0;
527 }
528
xgbe_config_tx_flow_control(struct xgbe_prv_data * pdata)529 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
530 {
531 struct ieee_pfc *pfc = pdata->pfc;
532
533 if (pdata->tx_pause || (pfc && pfc->pfc_en))
534 xgbe_enable_tx_flow_control(pdata);
535 else
536 xgbe_disable_tx_flow_control(pdata);
537
538 return 0;
539 }
540
xgbe_config_rx_flow_control(struct xgbe_prv_data * pdata)541 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
542 {
543 struct ieee_pfc *pfc = pdata->pfc;
544
545 if (pdata->rx_pause || (pfc && pfc->pfc_en))
546 xgbe_enable_rx_flow_control(pdata);
547 else
548 xgbe_disable_rx_flow_control(pdata);
549
550 return 0;
551 }
552
xgbe_config_flow_control(struct xgbe_prv_data * pdata)553 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
554 {
555 struct ieee_pfc *pfc = pdata->pfc;
556
557 xgbe_config_tx_flow_control(pdata);
558 xgbe_config_rx_flow_control(pdata);
559
560 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
561 (pfc && pfc->pfc_en) ? 1 : 0);
562 }
563
xgbe_enable_dma_interrupts(struct xgbe_prv_data * pdata)564 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
565 {
566 struct xgbe_channel *channel;
567 unsigned int i, ver;
568
569 /* Set the interrupt mode if supported */
570 if (pdata->channel_irq_mode)
571 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
572 pdata->channel_irq_mode);
573
574 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
575
576 for (i = 0; i < pdata->channel_count; i++) {
577 channel = pdata->channel[i];
578
579 /* Clear all the interrupts which are set */
580 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
581 XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
582
583 /* Clear all interrupt enable bits */
584 channel->curr_ier = 0;
585
586 /* Enable following interrupts
587 * NIE - Normal Interrupt Summary Enable
588 * AIE - Abnormal Interrupt Summary Enable
589 * FBEE - Fatal Bus Error Enable
590 */
591 if (ver < 0x21) {
592 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
593 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
594 } else {
595 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
596 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
597 }
598 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
599
600 if (channel->tx_ring) {
601 /* Enable the following Tx interrupts
602 * TIE - Transmit Interrupt Enable (unless using
603 * per channel interrupts in edge triggered
604 * mode)
605 */
606 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
607 XGMAC_SET_BITS(channel->curr_ier,
608 DMA_CH_IER, TIE, 1);
609 }
610 if (channel->rx_ring) {
611 /* Enable following Rx interrupts
612 * RBUE - Receive Buffer Unavailable Enable
613 * RIE - Receive Interrupt Enable (unless using
614 * per channel interrupts in edge triggered
615 * mode)
616 */
617 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
618 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
619 XGMAC_SET_BITS(channel->curr_ier,
620 DMA_CH_IER, RIE, 1);
621 }
622
623 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
624 }
625 }
626
xgbe_enable_mtl_interrupts(struct xgbe_prv_data * pdata)627 static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
628 {
629 unsigned int mtl_q_isr;
630 unsigned int q_count, i;
631
632 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
633 for (i = 0; i < q_count; i++) {
634 /* Clear all the interrupts which are set */
635 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
636 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
637
638 /* No MTL interrupts to be enabled */
639 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
640 }
641 }
642
xgbe_enable_mac_interrupts(struct xgbe_prv_data * pdata)643 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
644 {
645 unsigned int mac_ier = 0;
646
647 /* Enable Timestamp interrupt */
648 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
649
650 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
651
652 /* Enable all counter interrupts */
653 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
654 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
655
656 /* Enable MDIO single command completion interrupt */
657 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
658 }
659
xgbe_enable_ecc_interrupts(struct xgbe_prv_data * pdata)660 static void xgbe_enable_ecc_interrupts(struct xgbe_prv_data *pdata)
661 {
662 unsigned int ecc_isr, ecc_ier = 0;
663
664 if (!pdata->vdata->ecc_support)
665 return;
666
667 /* Clear all the interrupts which are set */
668 ecc_isr = XP_IOREAD(pdata, XP_ECC_ISR);
669 XP_IOWRITE(pdata, XP_ECC_ISR, ecc_isr);
670
671 /* Enable ECC interrupts */
672 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 1);
673 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 1);
674 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 1);
675 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 1);
676 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 1);
677 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 1);
678
679 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
680 }
681
xgbe_disable_ecc_ded(struct xgbe_prv_data * pdata)682 static void xgbe_disable_ecc_ded(struct xgbe_prv_data *pdata)
683 {
684 unsigned int ecc_ier;
685
686 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
687
688 /* Disable ECC DED interrupts */
689 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_DED, 0);
690 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_DED, 0);
691 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_DED, 0);
692
693 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
694 }
695
xgbe_disable_ecc_sec(struct xgbe_prv_data * pdata,enum xgbe_ecc_sec sec)696 static void xgbe_disable_ecc_sec(struct xgbe_prv_data *pdata,
697 enum xgbe_ecc_sec sec)
698 {
699 unsigned int ecc_ier;
700
701 ecc_ier = XP_IOREAD(pdata, XP_ECC_IER);
702
703 /* Disable ECC SEC interrupt */
704 switch (sec) {
705 case XGBE_ECC_SEC_TX:
706 XP_SET_BITS(ecc_ier, XP_ECC_IER, TX_SEC, 0);
707 break;
708 case XGBE_ECC_SEC_RX:
709 XP_SET_BITS(ecc_ier, XP_ECC_IER, RX_SEC, 0);
710 break;
711 case XGBE_ECC_SEC_DESC:
712 XP_SET_BITS(ecc_ier, XP_ECC_IER, DESC_SEC, 0);
713 break;
714 }
715
716 XP_IOWRITE(pdata, XP_ECC_IER, ecc_ier);
717 }
718
xgbe_set_speed(struct xgbe_prv_data * pdata,int speed)719 static int xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
720 {
721 unsigned int ss;
722
723 switch (speed) {
724 case SPEED_10:
725 ss = 0x07;
726 break;
727 case SPEED_1000:
728 ss = 0x03;
729 break;
730 case SPEED_2500:
731 ss = 0x02;
732 break;
733 case SPEED_10000:
734 ss = 0x00;
735 break;
736 default:
737 return -EINVAL;
738 }
739
740 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
741 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
742
743 return 0;
744 }
745
xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data * pdata)746 static int xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
747 {
748 /* Put the VLAN tag in the Rx descriptor */
749 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
750
751 /* Don't check the VLAN type */
752 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
753
754 /* Check only C-TAG (0x8100) packets */
755 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
756
757 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
758 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
759
760 /* Enable VLAN tag stripping */
761 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
762
763 return 0;
764 }
765
xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data * pdata)766 static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
767 {
768 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
769
770 return 0;
771 }
772
xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data * pdata)773 static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
774 {
775 /* Enable VLAN filtering */
776 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
777
778 /* Enable VLAN Hash Table filtering */
779 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
780
781 /* Disable VLAN tag inverse matching */
782 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
783
784 /* Only filter on the lower 12-bits of the VLAN tag */
785 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
786
787 /* In order for the VLAN Hash Table filtering to be effective,
788 * the VLAN tag identifier in the VLAN Tag Register must not
789 * be zero. Set the VLAN tag identifier to "1" to enable the
790 * VLAN Hash Table filtering. This implies that a VLAN tag of
791 * 1 will always pass filtering.
792 */
793 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
794
795 return 0;
796 }
797
xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data * pdata)798 static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
799 {
800 /* Disable VLAN filtering */
801 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
802
803 return 0;
804 }
805
xgbe_vid_crc32_le(__le16 vid_le)806 static u32 xgbe_vid_crc32_le(__le16 vid_le)
807 {
808 u32 crc = ~0;
809 u32 temp = 0;
810 unsigned char *data = (unsigned char *)&vid_le;
811 unsigned char data_byte = 0;
812 int i, bits;
813
814 bits = get_bitmask_order(VLAN_VID_MASK);
815 for (i = 0; i < bits; i++) {
816 if ((i % 8) == 0)
817 data_byte = data[i / 8];
818
819 temp = ((crc & 1) ^ data_byte) & 1;
820 crc >>= 1;
821 data_byte >>= 1;
822
823 if (temp)
824 crc ^= CRC32_POLY_LE;
825 }
826
827 return crc;
828 }
829
xgbe_update_vlan_hash_table(struct xgbe_prv_data * pdata)830 static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
831 {
832 u32 crc;
833 u16 vid;
834 __le16 vid_le;
835 u16 vlan_hash_table = 0;
836
837 /* Generate the VLAN Hash Table value */
838 for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
839 /* Get the CRC32 value of the VLAN ID */
840 vid_le = cpu_to_le16(vid);
841 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
842
843 vlan_hash_table |= (1 << crc);
844 }
845
846 /* Set the VLAN Hash Table filtering register */
847 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
848
849 return 0;
850 }
851
xgbe_set_promiscuous_mode(struct xgbe_prv_data * pdata,unsigned int enable)852 static int xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata,
853 unsigned int enable)
854 {
855 unsigned int val = enable ? 1 : 0;
856
857 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
858 return 0;
859
860 netif_dbg(pdata, drv, pdata->netdev, "%s promiscuous mode\n",
861 enable ? "entering" : "leaving");
862 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
863
864 /* Hardware will still perform VLAN filtering in promiscuous mode */
865 if (enable) {
866 xgbe_disable_rx_vlan_filtering(pdata);
867 } else {
868 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
869 xgbe_enable_rx_vlan_filtering(pdata);
870 }
871
872 return 0;
873 }
874
xgbe_set_all_multicast_mode(struct xgbe_prv_data * pdata,unsigned int enable)875 static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
876 unsigned int enable)
877 {
878 unsigned int val = enable ? 1 : 0;
879
880 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
881 return 0;
882
883 netif_dbg(pdata, drv, pdata->netdev, "%s allmulti mode\n",
884 enable ? "entering" : "leaving");
885 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
886
887 return 0;
888 }
889
xgbe_set_mac_reg(struct xgbe_prv_data * pdata,struct netdev_hw_addr * ha,unsigned int * mac_reg)890 static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
891 struct netdev_hw_addr *ha, unsigned int *mac_reg)
892 {
893 unsigned int mac_addr_hi, mac_addr_lo;
894 u8 *mac_addr;
895
896 mac_addr_lo = 0;
897 mac_addr_hi = 0;
898
899 if (ha) {
900 mac_addr = (u8 *)&mac_addr_lo;
901 mac_addr[0] = ha->addr[0];
902 mac_addr[1] = ha->addr[1];
903 mac_addr[2] = ha->addr[2];
904 mac_addr[3] = ha->addr[3];
905 mac_addr = (u8 *)&mac_addr_hi;
906 mac_addr[0] = ha->addr[4];
907 mac_addr[1] = ha->addr[5];
908
909 netif_dbg(pdata, drv, pdata->netdev,
910 "adding mac address %pM at %#x\n",
911 ha->addr, *mac_reg);
912
913 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
914 }
915
916 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
917 *mac_reg += MAC_MACA_INC;
918 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
919 *mac_reg += MAC_MACA_INC;
920 }
921
xgbe_set_mac_addn_addrs(struct xgbe_prv_data * pdata)922 static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
923 {
924 struct net_device *netdev = pdata->netdev;
925 struct netdev_hw_addr *ha;
926 unsigned int mac_reg;
927 unsigned int addn_macs;
928
929 mac_reg = MAC_MACA1HR;
930 addn_macs = pdata->hw_feat.addn_mac;
931
932 if (netdev_uc_count(netdev) > addn_macs) {
933 xgbe_set_promiscuous_mode(pdata, 1);
934 } else {
935 netdev_for_each_uc_addr(ha, netdev) {
936 xgbe_set_mac_reg(pdata, ha, &mac_reg);
937 addn_macs--;
938 }
939
940 if (netdev_mc_count(netdev) > addn_macs) {
941 xgbe_set_all_multicast_mode(pdata, 1);
942 } else {
943 netdev_for_each_mc_addr(ha, netdev) {
944 xgbe_set_mac_reg(pdata, ha, &mac_reg);
945 addn_macs--;
946 }
947 }
948 }
949
950 /* Clear remaining additional MAC address entries */
951 while (addn_macs--)
952 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
953 }
954
xgbe_set_mac_hash_table(struct xgbe_prv_data * pdata)955 static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
956 {
957 struct net_device *netdev = pdata->netdev;
958 struct netdev_hw_addr *ha;
959 unsigned int hash_reg;
960 unsigned int hash_table_shift, hash_table_count;
961 u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
962 u32 crc;
963 unsigned int i;
964
965 hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
966 hash_table_count = pdata->hw_feat.hash_table_size / 32;
967 memset(hash_table, 0, sizeof(hash_table));
968
969 /* Build the MAC Hash Table register values */
970 netdev_for_each_uc_addr(ha, netdev) {
971 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
972 crc >>= hash_table_shift;
973 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
974 }
975
976 netdev_for_each_mc_addr(ha, netdev) {
977 crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
978 crc >>= hash_table_shift;
979 hash_table[crc >> 5] |= (1 << (crc & 0x1f));
980 }
981
982 /* Set the MAC Hash Table registers */
983 hash_reg = MAC_HTR0;
984 for (i = 0; i < hash_table_count; i++) {
985 XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
986 hash_reg += MAC_HTR_INC;
987 }
988 }
989
xgbe_add_mac_addresses(struct xgbe_prv_data * pdata)990 static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
991 {
992 if (pdata->hw_feat.hash_table_size)
993 xgbe_set_mac_hash_table(pdata);
994 else
995 xgbe_set_mac_addn_addrs(pdata);
996
997 return 0;
998 }
999
xgbe_set_mac_address(struct xgbe_prv_data * pdata,const u8 * addr)1000 static int xgbe_set_mac_address(struct xgbe_prv_data *pdata, const u8 *addr)
1001 {
1002 unsigned int mac_addr_hi, mac_addr_lo;
1003
1004 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
1005 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
1006 (addr[1] << 8) | (addr[0] << 0);
1007
1008 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
1009 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
1010
1011 return 0;
1012 }
1013
xgbe_config_rx_mode(struct xgbe_prv_data * pdata)1014 static int xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
1015 {
1016 struct net_device *netdev = pdata->netdev;
1017 unsigned int pr_mode, am_mode;
1018
1019 pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
1020 am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
1021
1022 xgbe_set_promiscuous_mode(pdata, pr_mode);
1023 xgbe_set_all_multicast_mode(pdata, am_mode);
1024
1025 xgbe_add_mac_addresses(pdata);
1026
1027 return 0;
1028 }
1029
xgbe_clr_gpio(struct xgbe_prv_data * pdata,unsigned int gpio)1030 static int xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1031 {
1032 unsigned int reg;
1033
1034 if (gpio > 15)
1035 return -EINVAL;
1036
1037 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1038
1039 reg &= ~(1 << (gpio + 16));
1040 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1041
1042 return 0;
1043 }
1044
xgbe_set_gpio(struct xgbe_prv_data * pdata,unsigned int gpio)1045 static int xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1046 {
1047 unsigned int reg;
1048
1049 if (gpio > 15)
1050 return -EINVAL;
1051
1052 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1053
1054 reg |= (1 << (gpio + 16));
1055 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1056
1057 return 0;
1058 }
1059
xgbe_get_mmd_address(struct xgbe_prv_data * pdata,int mmd_reg)1060 static unsigned int xgbe_get_mmd_address(struct xgbe_prv_data *pdata,
1061 int mmd_reg)
1062 {
1063 return (mmd_reg & XGBE_ADDR_C45) ?
1064 mmd_reg & ~XGBE_ADDR_C45 :
1065 (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1066 }
1067
xgbe_get_pcs_index_and_offset(struct xgbe_prv_data * pdata,unsigned int mmd_address,unsigned int * index,unsigned int * offset)1068 static void xgbe_get_pcs_index_and_offset(struct xgbe_prv_data *pdata,
1069 unsigned int mmd_address,
1070 unsigned int *index,
1071 unsigned int *offset)
1072 {
1073 /* The PCS registers are accessed using mmio. The underlying
1074 * management interface uses indirect addressing to access the MMD
1075 * register sets. This requires accessing of the PCS register in two
1076 * phases, an address phase and a data phase.
1077 *
1078 * The mmio interface is based on 16-bit offsets and values. All
1079 * register offsets must therefore be adjusted by left shifting the
1080 * offset 1 bit and reading 16 bits of data.
1081 */
1082 mmd_address <<= 1;
1083 *index = mmd_address & ~pdata->xpcs_window_mask;
1084 *offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1085 }
1086
xgbe_read_mmd_regs_v3(struct xgbe_prv_data * pdata,int prtad,int mmd_reg)1087 static int xgbe_read_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
1088 int mmd_reg)
1089 {
1090 unsigned int mmd_address, index, offset;
1091 u32 smn_address;
1092 int mmd_data;
1093 int ret;
1094
1095 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
1096
1097 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
1098
1099 smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
1100 ret = amd_smn_write(0, smn_address, index);
1101 if (ret)
1102 return ret;
1103
1104 ret = amd_smn_read(0, pdata->smn_base + offset, &mmd_data);
1105 if (ret)
1106 return ret;
1107
1108 mmd_data = (offset % 4) ? FIELD_GET(XGBE_GEN_HI_MASK, mmd_data) :
1109 FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
1110
1111 return mmd_data;
1112 }
1113
xgbe_write_mmd_regs_v3(struct xgbe_prv_data * pdata,int prtad,int mmd_reg,int mmd_data)1114 static void xgbe_write_mmd_regs_v3(struct xgbe_prv_data *pdata, int prtad,
1115 int mmd_reg, int mmd_data)
1116 {
1117 unsigned int pci_mmd_data, hi_mask, lo_mask;
1118 unsigned int mmd_address, index, offset;
1119 struct pci_dev *dev;
1120 u32 smn_address;
1121 int ret;
1122
1123 dev = pdata->pcidev;
1124 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
1125
1126 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
1127
1128 smn_address = pdata->smn_base + pdata->xpcs_window_sel_reg;
1129 ret = amd_smn_write(0, smn_address, index);
1130 if (ret) {
1131 pci_err(dev, "Failed to write data 0x%x\n", index);
1132 return;
1133 }
1134
1135 ret = amd_smn_read(0, pdata->smn_base + offset, &pci_mmd_data);
1136 if (ret) {
1137 pci_err(dev, "Failed to read data\n");
1138 return;
1139 }
1140
1141 if (offset % 4) {
1142 hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK, mmd_data);
1143 lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, pci_mmd_data);
1144 } else {
1145 hi_mask = FIELD_PREP(XGBE_GEN_HI_MASK,
1146 FIELD_GET(XGBE_GEN_HI_MASK, pci_mmd_data));
1147 lo_mask = FIELD_GET(XGBE_GEN_LO_MASK, mmd_data);
1148 }
1149
1150 pci_mmd_data = hi_mask | lo_mask;
1151
1152 ret = amd_smn_write(0, smn_address, index);
1153 if (ret) {
1154 pci_err(dev, "Failed to write data 0x%x\n", index);
1155 return;
1156 }
1157
1158 ret = amd_smn_write(0, (pdata->smn_base + offset), pci_mmd_data);
1159 if (ret) {
1160 pci_err(dev, "Failed to write data 0x%x\n", pci_mmd_data);
1161 return;
1162 }
1163 }
1164
xgbe_read_mmd_regs_v2(struct xgbe_prv_data * pdata,int prtad,int mmd_reg)1165 static int xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1166 int mmd_reg)
1167 {
1168 unsigned int mmd_address, index, offset;
1169 unsigned long flags;
1170 int mmd_data;
1171
1172 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
1173
1174 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
1175
1176 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1177 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1178 mmd_data = XPCS16_IOREAD(pdata, offset);
1179 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1180
1181 return mmd_data;
1182 }
1183
xgbe_write_mmd_regs_v2(struct xgbe_prv_data * pdata,int prtad,int mmd_reg,int mmd_data)1184 static void xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad,
1185 int mmd_reg, int mmd_data)
1186 {
1187 unsigned long flags;
1188 unsigned int mmd_address, index, offset;
1189
1190 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
1191
1192 xgbe_get_pcs_index_and_offset(pdata, mmd_address, &index, &offset);
1193
1194 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1195 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1196 XPCS16_IOWRITE(pdata, offset, mmd_data);
1197 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1198 }
1199
xgbe_read_mmd_regs_v1(struct xgbe_prv_data * pdata,int prtad,int mmd_reg)1200 static int xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1201 int mmd_reg)
1202 {
1203 unsigned long flags;
1204 unsigned int mmd_address;
1205 int mmd_data;
1206
1207 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
1208
1209 /* The PCS registers are accessed using mmio. The underlying APB3
1210 * management interface uses indirect addressing to access the MMD
1211 * register sets. This requires accessing of the PCS register in two
1212 * phases, an address phase and a data phase.
1213 *
1214 * The mmio interface is based on 32-bit offsets and values. All
1215 * register offsets must therefore be adjusted by left shifting the
1216 * offset 2 bits and reading 32 bits of data.
1217 */
1218 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1219 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1220 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1221 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1222
1223 return mmd_data;
1224 }
1225
xgbe_write_mmd_regs_v1(struct xgbe_prv_data * pdata,int prtad,int mmd_reg,int mmd_data)1226 static void xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad,
1227 int mmd_reg, int mmd_data)
1228 {
1229 unsigned int mmd_address;
1230 unsigned long flags;
1231
1232 mmd_address = xgbe_get_mmd_address(pdata, mmd_reg);
1233
1234 /* The PCS registers are accessed using mmio. The underlying APB3
1235 * management interface uses indirect addressing to access the MMD
1236 * register sets. This requires accessing of the PCS register in two
1237 * phases, an address phase and a data phase.
1238 *
1239 * The mmio interface is based on 32-bit offsets and values. All
1240 * register offsets must therefore be adjusted by left shifting the
1241 * offset 2 bits and writing 32 bits of data.
1242 */
1243 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1244 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1245 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1246 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1247 }
1248
xgbe_read_mmd_regs(struct xgbe_prv_data * pdata,int prtad,int mmd_reg)1249 static int xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1250 int mmd_reg)
1251 {
1252 switch (pdata->vdata->xpcs_access) {
1253 case XGBE_XPCS_ACCESS_V1:
1254 return xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg);
1255
1256 case XGBE_XPCS_ACCESS_V2:
1257 default:
1258 return xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg);
1259
1260 case XGBE_XPCS_ACCESS_V3:
1261 return xgbe_read_mmd_regs_v3(pdata, prtad, mmd_reg);
1262 }
1263 }
1264
xgbe_write_mmd_regs(struct xgbe_prv_data * pdata,int prtad,int mmd_reg,int mmd_data)1265 static void xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad,
1266 int mmd_reg, int mmd_data)
1267 {
1268 switch (pdata->vdata->xpcs_access) {
1269 case XGBE_XPCS_ACCESS_V1:
1270 return xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data);
1271
1272 case XGBE_XPCS_ACCESS_V3:
1273 return xgbe_write_mmd_regs_v3(pdata, prtad, mmd_reg, mmd_data);
1274
1275 case XGBE_XPCS_ACCESS_V2:
1276 default:
1277 return xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data);
1278 }
1279 }
1280
xgbe_create_mdio_sca_c22(int port,int reg)1281 static unsigned int xgbe_create_mdio_sca_c22(int port, int reg)
1282 {
1283 unsigned int mdio_sca;
1284
1285 mdio_sca = 0;
1286 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1287 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1288
1289 return mdio_sca;
1290 }
1291
xgbe_create_mdio_sca_c45(int port,unsigned int da,int reg)1292 static unsigned int xgbe_create_mdio_sca_c45(int port, unsigned int da, int reg)
1293 {
1294 unsigned int mdio_sca;
1295
1296 mdio_sca = 0;
1297 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1298 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1299 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1300
1301 return mdio_sca;
1302 }
1303
xgbe_write_ext_mii_regs(struct xgbe_prv_data * pdata,unsigned int mdio_sca,u16 val)1304 static int xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata,
1305 unsigned int mdio_sca, u16 val)
1306 {
1307 unsigned int mdio_sccd;
1308
1309 reinit_completion(&pdata->mdio_complete);
1310
1311 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1312
1313 mdio_sccd = 0;
1314 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
1315 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1316 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1317 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1318
1319 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1320 netdev_err(pdata->netdev, "mdio write operation timed out\n");
1321 return -ETIMEDOUT;
1322 }
1323
1324 return 0;
1325 }
1326
xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data * pdata,int addr,int reg,u16 val)1327 static int xgbe_write_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
1328 int reg, u16 val)
1329 {
1330 unsigned int mdio_sca;
1331
1332 mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
1333
1334 return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
1335 }
1336
xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data * pdata,int addr,int devad,int reg,u16 val)1337 static int xgbe_write_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
1338 int devad, int reg, u16 val)
1339 {
1340 unsigned int mdio_sca;
1341
1342 mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
1343
1344 return xgbe_write_ext_mii_regs(pdata, mdio_sca, val);
1345 }
1346
xgbe_read_ext_mii_regs(struct xgbe_prv_data * pdata,unsigned int mdio_sca)1347 static int xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata,
1348 unsigned int mdio_sca)
1349 {
1350 unsigned int mdio_sccd;
1351
1352 reinit_completion(&pdata->mdio_complete);
1353
1354 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1355
1356 mdio_sccd = 0;
1357 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1358 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1359 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1360
1361 if (!wait_for_completion_timeout(&pdata->mdio_complete, HZ)) {
1362 netdev_err(pdata->netdev, "mdio read operation timed out\n");
1363 return -ETIMEDOUT;
1364 }
1365
1366 return XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA);
1367 }
1368
xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data * pdata,int addr,int reg)1369 static int xgbe_read_ext_mii_regs_c22(struct xgbe_prv_data *pdata, int addr,
1370 int reg)
1371 {
1372 unsigned int mdio_sca;
1373
1374 mdio_sca = xgbe_create_mdio_sca_c22(addr, reg);
1375
1376 return xgbe_read_ext_mii_regs(pdata, mdio_sca);
1377 }
1378
xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data * pdata,int addr,int devad,int reg)1379 static int xgbe_read_ext_mii_regs_c45(struct xgbe_prv_data *pdata, int addr,
1380 int devad, int reg)
1381 {
1382 unsigned int mdio_sca;
1383
1384 mdio_sca = xgbe_create_mdio_sca_c45(addr, devad, reg);
1385
1386 return xgbe_read_ext_mii_regs(pdata, mdio_sca);
1387 }
1388
xgbe_set_ext_mii_mode(struct xgbe_prv_data * pdata,unsigned int port,enum xgbe_mdio_mode mode)1389 static int xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1390 enum xgbe_mdio_mode mode)
1391 {
1392 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1393
1394 switch (mode) {
1395 case XGBE_MDIO_MODE_CL22:
1396 if (port > XGMAC_MAX_C22_PORT)
1397 return -EINVAL;
1398 reg_val |= (1 << port);
1399 break;
1400 case XGBE_MDIO_MODE_CL45:
1401 break;
1402 default:
1403 return -EINVAL;
1404 }
1405
1406 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1407
1408 return 0;
1409 }
1410
xgbe_tx_complete(struct xgbe_ring_desc * rdesc)1411 static int xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1412 {
1413 return !XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN);
1414 }
1415
xgbe_disable_rx_csum(struct xgbe_prv_data * pdata)1416 static int xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1417 {
1418 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1419
1420 return 0;
1421 }
1422
xgbe_enable_rx_csum(struct xgbe_prv_data * pdata)1423 static int xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1424 {
1425 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1426
1427 return 0;
1428 }
1429
xgbe_tx_desc_reset(struct xgbe_ring_data * rdata)1430 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1431 {
1432 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1433
1434 /* Reset the Tx descriptor
1435 * Set buffer 1 (lo) address to zero
1436 * Set buffer 1 (hi) address to zero
1437 * Reset all other control bits (IC, TTSE, B2L & B1L)
1438 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1439 */
1440 rdesc->desc0 = 0;
1441 rdesc->desc1 = 0;
1442 rdesc->desc2 = 0;
1443 rdesc->desc3 = 0;
1444
1445 /* Make sure ownership is written to the descriptor */
1446 dma_wmb();
1447 }
1448
xgbe_tx_desc_init(struct xgbe_channel * channel)1449 static void xgbe_tx_desc_init(struct xgbe_channel *channel)
1450 {
1451 struct xgbe_ring *ring = channel->tx_ring;
1452 struct xgbe_ring_data *rdata;
1453 int i;
1454 int start_index = ring->cur;
1455
1456 DBGPR("-->tx_desc_init\n");
1457
1458 /* Initialze all descriptors */
1459 for (i = 0; i < ring->rdesc_count; i++) {
1460 rdata = XGBE_GET_DESC_DATA(ring, i);
1461
1462 /* Initialize Tx descriptor */
1463 xgbe_tx_desc_reset(rdata);
1464 }
1465
1466 /* Update the total number of Tx descriptors */
1467 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1468
1469 /* Update the starting address of descriptor ring */
1470 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1471 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1472 upper_32_bits(rdata->rdesc_dma));
1473 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1474 lower_32_bits(rdata->rdesc_dma));
1475
1476 DBGPR("<--tx_desc_init\n");
1477 }
1478
xgbe_rx_desc_reset(struct xgbe_prv_data * pdata,struct xgbe_ring_data * rdata,unsigned int index)1479 static void xgbe_rx_desc_reset(struct xgbe_prv_data *pdata,
1480 struct xgbe_ring_data *rdata, unsigned int index)
1481 {
1482 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1483 unsigned int rx_usecs = pdata->rx_usecs;
1484 unsigned int rx_frames = pdata->rx_frames;
1485 unsigned int inte;
1486 dma_addr_t hdr_dma, buf_dma;
1487
1488 if (!rx_usecs && !rx_frames) {
1489 /* No coalescing, interrupt for every descriptor */
1490 inte = 1;
1491 } else {
1492 /* Set interrupt based on Rx frame coalescing setting */
1493 if (rx_frames && !((index + 1) % rx_frames))
1494 inte = 1;
1495 else
1496 inte = 0;
1497 }
1498
1499 /* Reset the Rx descriptor
1500 * Set buffer 1 (lo) address to header dma address (lo)
1501 * Set buffer 1 (hi) address to header dma address (hi)
1502 * Set buffer 2 (lo) address to buffer dma address (lo)
1503 * Set buffer 2 (hi) address to buffer dma address (hi) and
1504 * set control bits OWN and INTE
1505 */
1506 hdr_dma = rdata->rx.hdr.dma_base + rdata->rx.hdr.dma_off;
1507 buf_dma = rdata->rx.buf.dma_base + rdata->rx.buf.dma_off;
1508 rdesc->desc0 = cpu_to_le32(lower_32_bits(hdr_dma));
1509 rdesc->desc1 = cpu_to_le32(upper_32_bits(hdr_dma));
1510 rdesc->desc2 = cpu_to_le32(lower_32_bits(buf_dma));
1511 rdesc->desc3 = cpu_to_le32(upper_32_bits(buf_dma));
1512
1513 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, INTE, inte);
1514
1515 /* Since the Rx DMA engine is likely running, make sure everything
1516 * is written to the descriptor(s) before setting the OWN bit
1517 * for the descriptor
1518 */
1519 dma_wmb();
1520
1521 XGMAC_SET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN, 1);
1522
1523 /* Make sure ownership is written to the descriptor */
1524 dma_wmb();
1525 }
1526
xgbe_rx_desc_init(struct xgbe_channel * channel)1527 static void xgbe_rx_desc_init(struct xgbe_channel *channel)
1528 {
1529 struct xgbe_prv_data *pdata = channel->pdata;
1530 struct xgbe_ring *ring = channel->rx_ring;
1531 struct xgbe_ring_data *rdata;
1532 unsigned int start_index = ring->cur;
1533 unsigned int i;
1534
1535 DBGPR("-->rx_desc_init\n");
1536
1537 /* Initialize all descriptors */
1538 for (i = 0; i < ring->rdesc_count; i++) {
1539 rdata = XGBE_GET_DESC_DATA(ring, i);
1540
1541 /* Initialize Rx descriptor */
1542 xgbe_rx_desc_reset(pdata, rdata, i);
1543 }
1544
1545 /* Update the total number of Rx descriptors */
1546 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1547
1548 /* Update the starting address of descriptor ring */
1549 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1550 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1551 upper_32_bits(rdata->rdesc_dma));
1552 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1553 lower_32_bits(rdata->rdesc_dma));
1554
1555 /* Update the Rx Descriptor Tail Pointer */
1556 rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
1557 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
1558 lower_32_bits(rdata->rdesc_dma));
1559
1560 DBGPR("<--rx_desc_init\n");
1561 }
1562
xgbe_tx_start_xmit(struct xgbe_channel * channel,struct xgbe_ring * ring)1563 static void xgbe_tx_start_xmit(struct xgbe_channel *channel,
1564 struct xgbe_ring *ring)
1565 {
1566 struct xgbe_prv_data *pdata = channel->pdata;
1567 struct xgbe_ring_data *rdata;
1568
1569 /* Make sure everything is written before the register write */
1570 wmb();
1571
1572 /* Issue a poll command to Tx DMA by writing address
1573 * of next immediate free descriptor */
1574 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1575 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
1576 lower_32_bits(rdata->rdesc_dma));
1577
1578 /* Start the Tx timer */
1579 if (pdata->tx_usecs && !channel->tx_timer_active) {
1580 channel->tx_timer_active = 1;
1581 mod_timer(&channel->tx_timer,
1582 jiffies + usecs_to_jiffies(pdata->tx_usecs));
1583 }
1584
1585 ring->tx.xmit_more = 0;
1586 }
1587
xgbe_dev_xmit(struct xgbe_channel * channel)1588 static void xgbe_dev_xmit(struct xgbe_channel *channel)
1589 {
1590 struct xgbe_prv_data *pdata = channel->pdata;
1591 struct xgbe_ring *ring = channel->tx_ring;
1592 struct xgbe_ring_data *rdata;
1593 struct xgbe_ring_desc *rdesc;
1594 struct xgbe_packet_data *packet = &ring->packet_data;
1595 unsigned int tx_packets, tx_bytes;
1596 unsigned int csum, tso, vlan, vxlan;
1597 unsigned int tso_context, vlan_context;
1598 unsigned int tx_set_ic;
1599 int start_index = ring->cur;
1600 int cur_index = ring->cur;
1601 int i;
1602
1603 DBGPR("-->xgbe_dev_xmit\n");
1604
1605 tx_packets = packet->tx_packets;
1606 tx_bytes = packet->tx_bytes;
1607
1608 csum = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1609 CSUM_ENABLE);
1610 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1611 TSO_ENABLE);
1612 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1613 VLAN_CTAG);
1614 vxlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
1615 VXLAN);
1616
1617 if (tso && (packet->mss != ring->tx.cur_mss))
1618 tso_context = 1;
1619 else
1620 tso_context = 0;
1621
1622 if (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag))
1623 vlan_context = 1;
1624 else
1625 vlan_context = 0;
1626
1627 /* Determine if an interrupt should be generated for this Tx:
1628 * Interrupt:
1629 * - Tx frame count exceeds the frame count setting
1630 * - Addition of Tx frame count to the frame count since the
1631 * last interrupt was set exceeds the frame count setting
1632 * No interrupt:
1633 * - No frame count setting specified (ethtool -C ethX tx-frames 0)
1634 * - Addition of Tx frame count to the frame count since the
1635 * last interrupt was set does not exceed the frame count setting
1636 */
1637 ring->coalesce_count += tx_packets;
1638 if (!pdata->tx_frames)
1639 tx_set_ic = 0;
1640 else if (tx_packets > pdata->tx_frames)
1641 tx_set_ic = 1;
1642 else if ((ring->coalesce_count % pdata->tx_frames) < tx_packets)
1643 tx_set_ic = 1;
1644 else
1645 tx_set_ic = 0;
1646
1647 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1648 rdesc = rdata->rdesc;
1649
1650 /* Create a context descriptor if this is a TSO packet */
1651 if (tso_context || vlan_context) {
1652 if (tso_context) {
1653 netif_dbg(pdata, tx_queued, pdata->netdev,
1654 "TSO context descriptor, mss=%u\n",
1655 packet->mss);
1656
1657 /* Set the MSS size */
1658 XGMAC_SET_BITS_LE(rdesc->desc2, TX_CONTEXT_DESC2,
1659 MSS, packet->mss);
1660
1661 /* Mark it as a CONTEXT descriptor */
1662 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1663 CTXT, 1);
1664
1665 /* Indicate this descriptor contains the MSS */
1666 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1667 TCMSSV, 1);
1668
1669 ring->tx.cur_mss = packet->mss;
1670 }
1671
1672 if (vlan_context) {
1673 netif_dbg(pdata, tx_queued, pdata->netdev,
1674 "VLAN context descriptor, ctag=%u\n",
1675 packet->vlan_ctag);
1676
1677 /* Mark it as a CONTEXT descriptor */
1678 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1679 CTXT, 1);
1680
1681 /* Set the VLAN tag */
1682 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1683 VT, packet->vlan_ctag);
1684
1685 /* Indicate this descriptor contains the VLAN tag */
1686 XGMAC_SET_BITS_LE(rdesc->desc3, TX_CONTEXT_DESC3,
1687 VLTV, 1);
1688
1689 ring->tx.cur_vlan_ctag = packet->vlan_ctag;
1690 }
1691
1692 cur_index++;
1693 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1694 rdesc = rdata->rdesc;
1695 }
1696
1697 /* Update buffer address (for TSO this is the header) */
1698 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1699 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1700
1701 /* Update the buffer length */
1702 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1703 rdata->skb_dma_len);
1704
1705 /* VLAN tag insertion check */
1706 if (vlan)
1707 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
1708 TX_NORMAL_DESC2_VLAN_INSERT);
1709
1710 /* Timestamp enablement check */
1711 if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
1712 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
1713
1714 /* Mark it as First Descriptor */
1715 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FD, 1);
1716
1717 /* Mark it as a NORMAL descriptor */
1718 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1719
1720 /* Set OWN bit if not the first descriptor */
1721 if (cur_index != start_index)
1722 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1723
1724 if (tso) {
1725 /* Enable TSO */
1726 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TSE, 1);
1727 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPPL,
1728 packet->tcp_payload_len);
1729 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, TCPHDRLEN,
1730 packet->tcp_header_len / 4);
1731
1732 pdata->ext_stats.tx_tso_packets += tx_packets;
1733 } else {
1734 /* Enable CRC and Pad Insertion */
1735 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CPC, 0);
1736
1737 /* Enable HW CSUM */
1738 if (csum)
1739 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1740 CIC, 0x3);
1741
1742 /* Set the total length to be transmitted */
1743 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, FL,
1744 packet->length);
1745 }
1746
1747 if (vxlan) {
1748 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, VNP,
1749 TX_NORMAL_DESC3_VXLAN_PACKET);
1750
1751 pdata->ext_stats.tx_vxlan_packets += packet->tx_packets;
1752 }
1753
1754 for (i = cur_index - start_index + 1; i < packet->rdesc_count; i++) {
1755 cur_index++;
1756 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
1757 rdesc = rdata->rdesc;
1758
1759 /* Update buffer address */
1760 rdesc->desc0 = cpu_to_le32(lower_32_bits(rdata->skb_dma));
1761 rdesc->desc1 = cpu_to_le32(upper_32_bits(rdata->skb_dma));
1762
1763 /* Update the buffer length */
1764 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, HL_B1L,
1765 rdata->skb_dma_len);
1766
1767 /* Set OWN bit */
1768 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1769
1770 /* Mark it as NORMAL descriptor */
1771 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT, 0);
1772
1773 /* Enable HW CSUM */
1774 if (csum)
1775 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3,
1776 CIC, 0x3);
1777 }
1778
1779 /* Set LAST bit for the last descriptor */
1780 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD, 1);
1781
1782 /* Set IC bit based on Tx coalescing settings */
1783 if (tx_set_ic)
1784 XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
1785
1786 /* Save the Tx info to report back during cleanup */
1787 rdata->tx.packets = tx_packets;
1788 rdata->tx.bytes = tx_bytes;
1789
1790 pdata->ext_stats.txq_packets[channel->queue_index] += tx_packets;
1791 pdata->ext_stats.txq_bytes[channel->queue_index] += tx_bytes;
1792
1793 /* In case the Tx DMA engine is running, make sure everything
1794 * is written to the descriptor(s) before setting the OWN bit
1795 * for the first descriptor
1796 */
1797 dma_wmb();
1798
1799 /* Set OWN bit for the first descriptor */
1800 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1801 rdesc = rdata->rdesc;
1802 XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
1803
1804 if (netif_msg_tx_queued(pdata))
1805 xgbe_dump_tx_desc(pdata, ring, start_index,
1806 packet->rdesc_count, 1);
1807
1808 /* Make sure ownership is written to the descriptor */
1809 smp_wmb();
1810
1811 ring->cur = cur_index + 1;
1812 if (!netdev_xmit_more() ||
1813 netif_xmit_stopped(netdev_get_tx_queue(pdata->netdev,
1814 channel->queue_index)))
1815 xgbe_tx_start_xmit(channel, ring);
1816 else
1817 ring->tx.xmit_more = 1;
1818
1819 DBGPR(" %s: descriptors %u to %u written\n",
1820 channel->name, start_index & (ring->rdesc_count - 1),
1821 (ring->cur - 1) & (ring->rdesc_count - 1));
1822
1823 DBGPR("<--xgbe_dev_xmit\n");
1824 }
1825
xgbe_dev_read(struct xgbe_channel * channel)1826 static int xgbe_dev_read(struct xgbe_channel *channel)
1827 {
1828 struct xgbe_prv_data *pdata = channel->pdata;
1829 struct xgbe_ring *ring = channel->rx_ring;
1830 struct xgbe_ring_data *rdata;
1831 struct xgbe_ring_desc *rdesc;
1832 struct xgbe_packet_data *packet = &ring->packet_data;
1833 struct net_device *netdev = pdata->netdev;
1834 unsigned int err, etlt, l34t;
1835
1836 DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
1837
1838 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1839 rdesc = rdata->rdesc;
1840
1841 /* Check for data availability */
1842 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1843 return 1;
1844
1845 /* Make sure descriptor fields are read after reading the OWN bit */
1846 dma_rmb();
1847
1848 if (netif_msg_rx_status(pdata))
1849 xgbe_dump_rx_desc(pdata, ring, ring->cur);
1850
1851 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1852 /* Timestamp Context Descriptor */
1853 xgbe_get_rx_tstamp(packet, rdesc);
1854
1855 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1856 CONTEXT, 1);
1857 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1858 CONTEXT_NEXT, 0);
1859 return 0;
1860 }
1861
1862 /* Normal Descriptor, be sure Context Descriptor bit is off */
1863 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1864
1865 /* Indicate if a Context Descriptor is next */
1866 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1867 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1868 CONTEXT_NEXT, 1);
1869
1870 /* Get the header length */
1871 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1872 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1873 FIRST, 1);
1874 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1875 RX_NORMAL_DESC2, HL);
1876 if (rdata->rx.hdr_len)
1877 pdata->ext_stats.rx_split_header_packets++;
1878 } else {
1879 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1880 FIRST, 0);
1881 }
1882
1883 /* Get the RSS hash */
1884 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1885 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1886 RSS_HASH, 1);
1887
1888 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1889
1890 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1891 switch (l34t) {
1892 case RX_DESC3_L34T_IPV4_TCP:
1893 case RX_DESC3_L34T_IPV4_UDP:
1894 case RX_DESC3_L34T_IPV6_TCP:
1895 case RX_DESC3_L34T_IPV6_UDP:
1896 packet->rss_hash_type = PKT_HASH_TYPE_L4;
1897 break;
1898 default:
1899 packet->rss_hash_type = PKT_HASH_TYPE_L3;
1900 }
1901 }
1902
1903 /* Not all the data has been transferred for this packet */
1904 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD))
1905 return 0;
1906
1907 /* This is the last of the data for this packet */
1908 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1909 LAST, 1);
1910
1911 /* Get the packet length */
1912 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1913
1914 /* Set checksum done indicator as appropriate */
1915 if (netdev->features & NETIF_F_RXCSUM) {
1916 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1917 CSUM_DONE, 1);
1918 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1919 TNPCSUM_DONE, 1);
1920 }
1921
1922 /* Set the tunneled packet indicator */
1923 if (XGMAC_GET_BITS_LE(rdesc->desc2, RX_NORMAL_DESC2, TNP)) {
1924 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1925 TNP, 1);
1926 pdata->ext_stats.rx_vxlan_packets++;
1927
1928 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1929 switch (l34t) {
1930 case RX_DESC3_L34T_IPV4_UNKNOWN:
1931 case RX_DESC3_L34T_IPV6_UNKNOWN:
1932 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1933 TNPCSUM_DONE, 0);
1934 break;
1935 }
1936 }
1937
1938 /* Check for errors (only valid in last descriptor) */
1939 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1940 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1941 netif_dbg(pdata, rx_status, netdev, "err=%u, etlt=%#x\n", err, etlt);
1942
1943 if (!err || !etlt) {
1944 /* No error if err is 0 or etlt is 0 */
1945 if ((etlt == 0x09) &&
1946 (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
1947 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1948 VLAN_CTAG, 1);
1949 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1950 RX_NORMAL_DESC0,
1951 OVT);
1952 netif_dbg(pdata, rx_status, netdev, "vlan-ctag=%#06x\n",
1953 packet->vlan_ctag);
1954 }
1955 } else {
1956 unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
1957 RX_PACKET_ATTRIBUTES, TNP);
1958
1959 if ((etlt == 0x05) || (etlt == 0x06)) {
1960 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1961 CSUM_DONE, 0);
1962 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1963 TNPCSUM_DONE, 0);
1964 pdata->ext_stats.rx_csum_errors++;
1965 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
1966 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1967 CSUM_DONE, 0);
1968 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1969 TNPCSUM_DONE, 0);
1970 pdata->ext_stats.rx_vxlan_csum_errors++;
1971 } else {
1972 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1973 FRAME, 1);
1974 }
1975 }
1976
1977 pdata->ext_stats.rxq_packets[channel->queue_index]++;
1978 pdata->ext_stats.rxq_bytes[channel->queue_index] += rdata->rx.len;
1979
1980 DBGPR("<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n", channel->name,
1981 ring->cur & (ring->rdesc_count - 1), ring->cur);
1982
1983 return 0;
1984 }
1985
xgbe_is_context_desc(struct xgbe_ring_desc * rdesc)1986 static int xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1987 {
1988 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1989 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT);
1990 }
1991
xgbe_is_last_desc(struct xgbe_ring_desc * rdesc)1992 static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1993 {
1994 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1995 return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
1996 }
1997
xgbe_enable_int(struct xgbe_channel * channel,enum xgbe_int int_id)1998 static int xgbe_enable_int(struct xgbe_channel *channel,
1999 enum xgbe_int int_id)
2000 {
2001 switch (int_id) {
2002 case XGMAC_INT_DMA_CH_SR_TI:
2003 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
2004 break;
2005 case XGMAC_INT_DMA_CH_SR_TPS:
2006 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
2007 break;
2008 case XGMAC_INT_DMA_CH_SR_TBU:
2009 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
2010 break;
2011 case XGMAC_INT_DMA_CH_SR_RI:
2012 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
2013 break;
2014 case XGMAC_INT_DMA_CH_SR_RBU:
2015 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
2016 break;
2017 case XGMAC_INT_DMA_CH_SR_RPS:
2018 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
2019 break;
2020 case XGMAC_INT_DMA_CH_SR_TI_RI:
2021 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
2022 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
2023 break;
2024 case XGMAC_INT_DMA_CH_SR_FBE:
2025 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
2026 break;
2027 case XGMAC_INT_DMA_ALL:
2028 channel->curr_ier |= channel->saved_ier;
2029 break;
2030 default:
2031 return -1;
2032 }
2033
2034 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
2035
2036 return 0;
2037 }
2038
xgbe_disable_int(struct xgbe_channel * channel,enum xgbe_int int_id)2039 static int xgbe_disable_int(struct xgbe_channel *channel,
2040 enum xgbe_int int_id)
2041 {
2042 switch (int_id) {
2043 case XGMAC_INT_DMA_CH_SR_TI:
2044 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
2045 break;
2046 case XGMAC_INT_DMA_CH_SR_TPS:
2047 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
2048 break;
2049 case XGMAC_INT_DMA_CH_SR_TBU:
2050 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
2051 break;
2052 case XGMAC_INT_DMA_CH_SR_RI:
2053 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
2054 break;
2055 case XGMAC_INT_DMA_CH_SR_RBU:
2056 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
2057 break;
2058 case XGMAC_INT_DMA_CH_SR_RPS:
2059 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
2060 break;
2061 case XGMAC_INT_DMA_CH_SR_TI_RI:
2062 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
2063 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
2064 break;
2065 case XGMAC_INT_DMA_CH_SR_FBE:
2066 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
2067 break;
2068 case XGMAC_INT_DMA_ALL:
2069 channel->saved_ier = channel->curr_ier;
2070 channel->curr_ier = 0;
2071 break;
2072 default:
2073 return -1;
2074 }
2075
2076 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
2077
2078 return 0;
2079 }
2080
__xgbe_exit(struct xgbe_prv_data * pdata)2081 static int __xgbe_exit(struct xgbe_prv_data *pdata)
2082 {
2083 unsigned int count = 2000;
2084
2085 DBGPR("-->xgbe_exit\n");
2086
2087 /* Issue a software reset */
2088 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
2089 usleep_range(10, 15);
2090
2091 /* Poll Until Poll Condition */
2092 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
2093 usleep_range(500, 600);
2094
2095 if (!count)
2096 return -EBUSY;
2097
2098 DBGPR("<--xgbe_exit\n");
2099
2100 return 0;
2101 }
2102
xgbe_exit(struct xgbe_prv_data * pdata)2103 static int xgbe_exit(struct xgbe_prv_data *pdata)
2104 {
2105 int ret;
2106
2107 /* To guard against possible incorrectly generated interrupts,
2108 * issue the software reset twice.
2109 */
2110 ret = __xgbe_exit(pdata);
2111 if (ret)
2112 return ret;
2113
2114 return __xgbe_exit(pdata);
2115 }
2116
xgbe_flush_tx_queues(struct xgbe_prv_data * pdata)2117 static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
2118 {
2119 unsigned int i, count;
2120
2121 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
2122 return 0;
2123
2124 for (i = 0; i < pdata->tx_q_count; i++)
2125 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
2126
2127 /* Poll Until Poll Condition */
2128 for (i = 0; i < pdata->tx_q_count; i++) {
2129 count = 2000;
2130 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
2131 MTL_Q_TQOMR, FTQ))
2132 usleep_range(500, 600);
2133
2134 if (!count)
2135 return -EBUSY;
2136 }
2137
2138 return 0;
2139 }
2140
xgbe_config_dma_bus(struct xgbe_prv_data * pdata)2141 static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
2142 {
2143 unsigned int sbmr;
2144
2145 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
2146
2147 /* Set enhanced addressing mode */
2148 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
2149
2150 /* Set the System Bus mode */
2151 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
2152 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
2153 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
2154 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
2155 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
2156
2157 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
2158
2159 /* Set descriptor fetching threshold */
2160 if (pdata->vdata->tx_desc_prefetch)
2161 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
2162 pdata->vdata->tx_desc_prefetch);
2163
2164 if (pdata->vdata->rx_desc_prefetch)
2165 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
2166 pdata->vdata->rx_desc_prefetch);
2167 }
2168
xgbe_config_dma_cache(struct xgbe_prv_data * pdata)2169 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
2170 {
2171 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
2172 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
2173 if (pdata->awarcr)
2174 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
2175 }
2176
xgbe_config_mtl_mode(struct xgbe_prv_data * pdata)2177 static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
2178 {
2179 unsigned int i;
2180
2181 /* Set Tx to weighted round robin scheduling algorithm */
2182 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
2183
2184 /* Set Tx traffic classes to use WRR algorithm with equal weights */
2185 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2186 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2187 MTL_TSA_ETS);
2188 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
2189 }
2190
2191 /* Set Rx to strict priority algorithm */
2192 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
2193 }
2194
xgbe_queue_flow_control_threshold(struct xgbe_prv_data * pdata,unsigned int queue,unsigned int q_fifo_size)2195 static void xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
2196 unsigned int queue,
2197 unsigned int q_fifo_size)
2198 {
2199 unsigned int frame_fifo_size;
2200 unsigned int rfa, rfd;
2201
2202 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
2203
2204 if (pdata->pfcq[queue] && (q_fifo_size > pdata->pfc_rfa)) {
2205 /* PFC is active for this queue */
2206 rfa = pdata->pfc_rfa;
2207 rfd = rfa + frame_fifo_size;
2208 if (rfd > XGMAC_FLOW_CONTROL_MAX)
2209 rfd = XGMAC_FLOW_CONTROL_MAX;
2210 if (rfa >= XGMAC_FLOW_CONTROL_MAX)
2211 rfa = XGMAC_FLOW_CONTROL_MAX - XGMAC_FLOW_CONTROL_UNIT;
2212 } else {
2213 /* This path deals with just maximum frame sizes which are
2214 * limited to a jumbo frame of 9,000 (plus headers, etc.)
2215 * so we can never exceed the maximum allowable RFA/RFD
2216 * values.
2217 */
2218 if (q_fifo_size <= 2048) {
2219 /* rx_rfd to zero to signal no flow control */
2220 pdata->rx_rfa[queue] = 0;
2221 pdata->rx_rfd[queue] = 0;
2222 return;
2223 }
2224
2225 if (q_fifo_size <= 4096) {
2226 /* Between 2048 and 4096 */
2227 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
2228 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
2229 return;
2230 }
2231
2232 if (q_fifo_size <= frame_fifo_size) {
2233 /* Between 4096 and max-frame */
2234 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
2235 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
2236 return;
2237 }
2238
2239 if (q_fifo_size <= (frame_fifo_size * 3)) {
2240 /* Between max-frame and 3 max-frames,
2241 * trigger if we get just over a frame of data and
2242 * resume when we have just under half a frame left.
2243 */
2244 rfa = q_fifo_size - frame_fifo_size;
2245 rfd = rfa + (frame_fifo_size / 2);
2246 } else {
2247 /* Above 3 max-frames - trigger when just over
2248 * 2 frames of space available
2249 */
2250 rfa = frame_fifo_size * 2;
2251 rfa += XGMAC_FLOW_CONTROL_UNIT;
2252 rfd = rfa + frame_fifo_size;
2253 }
2254 }
2255
2256 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
2257 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
2258 }
2259
xgbe_calculate_flow_control_threshold(struct xgbe_prv_data * pdata,unsigned int * fifo)2260 static void xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
2261 unsigned int *fifo)
2262 {
2263 unsigned int q_fifo_size;
2264 unsigned int i;
2265
2266 for (i = 0; i < pdata->rx_q_count; i++) {
2267 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
2268
2269 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
2270 }
2271 }
2272
xgbe_config_flow_control_threshold(struct xgbe_prv_data * pdata)2273 static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
2274 {
2275 unsigned int i;
2276
2277 for (i = 0; i < pdata->rx_q_count; i++) {
2278 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
2279 pdata->rx_rfa[i]);
2280 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
2281 pdata->rx_rfd[i]);
2282 }
2283 }
2284
xgbe_get_tx_fifo_size(struct xgbe_prv_data * pdata)2285 static unsigned int xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
2286 {
2287 /* The configured value may not be the actual amount of fifo RAM */
2288 return min_t(unsigned int, pdata->tx_max_fifo_size,
2289 pdata->hw_feat.tx_fifo_size);
2290 }
2291
xgbe_get_rx_fifo_size(struct xgbe_prv_data * pdata)2292 static unsigned int xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
2293 {
2294 /* The configured value may not be the actual amount of fifo RAM */
2295 return min_t(unsigned int, pdata->rx_max_fifo_size,
2296 pdata->hw_feat.rx_fifo_size);
2297 }
2298
xgbe_calculate_equal_fifo(unsigned int fifo_size,unsigned int queue_count,unsigned int * fifo)2299 static void xgbe_calculate_equal_fifo(unsigned int fifo_size,
2300 unsigned int queue_count,
2301 unsigned int *fifo)
2302 {
2303 unsigned int q_fifo_size;
2304 unsigned int p_fifo;
2305 unsigned int i;
2306
2307 q_fifo_size = fifo_size / queue_count;
2308
2309 /* Calculate the fifo setting by dividing the queue's fifo size
2310 * by the fifo allocation increment (with 0 representing the
2311 * base allocation increment so decrement the result by 1).
2312 */
2313 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
2314 if (p_fifo)
2315 p_fifo--;
2316
2317 /* Distribute the fifo equally amongst the queues */
2318 for (i = 0; i < queue_count; i++)
2319 fifo[i] = p_fifo;
2320 }
2321
xgbe_set_nonprio_fifos(unsigned int fifo_size,unsigned int queue_count,unsigned int * fifo)2322 static unsigned int xgbe_set_nonprio_fifos(unsigned int fifo_size,
2323 unsigned int queue_count,
2324 unsigned int *fifo)
2325 {
2326 unsigned int i;
2327
2328 BUILD_BUG_ON_NOT_POWER_OF_2(XGMAC_FIFO_MIN_ALLOC);
2329
2330 if (queue_count <= IEEE_8021QAZ_MAX_TCS)
2331 return fifo_size;
2332
2333 /* Rx queues 9 and up are for specialized packets,
2334 * such as PTP or DCB control packets, etc. and
2335 * don't require a large fifo
2336 */
2337 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
2338 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
2339 fifo_size -= XGMAC_FIFO_MIN_ALLOC;
2340 }
2341
2342 return fifo_size;
2343 }
2344
xgbe_get_pfc_delay(struct xgbe_prv_data * pdata)2345 static unsigned int xgbe_get_pfc_delay(struct xgbe_prv_data *pdata)
2346 {
2347 unsigned int delay;
2348
2349 /* If a delay has been provided, use that */
2350 if (pdata->pfc->delay)
2351 return pdata->pfc->delay / 8;
2352
2353 /* Allow for two maximum size frames */
2354 delay = xgbe_get_max_frame(pdata);
2355 delay += XGMAC_ETH_PREAMBLE;
2356 delay *= 2;
2357
2358 /* Allow for PFC frame */
2359 delay += XGMAC_PFC_DATA_LEN;
2360 delay += ETH_HLEN + ETH_FCS_LEN;
2361 delay += XGMAC_ETH_PREAMBLE;
2362
2363 /* Allow for miscellaneous delays (LPI exit, cable, etc.) */
2364 delay += XGMAC_PFC_DELAYS;
2365
2366 return delay;
2367 }
2368
xgbe_get_pfc_queues(struct xgbe_prv_data * pdata)2369 static unsigned int xgbe_get_pfc_queues(struct xgbe_prv_data *pdata)
2370 {
2371 unsigned int count, prio_queues;
2372 unsigned int i;
2373
2374 if (!pdata->pfc->pfc_en)
2375 return 0;
2376
2377 count = 0;
2378 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2379 for (i = 0; i < prio_queues; i++) {
2380 if (!xgbe_is_pfc_queue(pdata, i))
2381 continue;
2382
2383 pdata->pfcq[i] = 1;
2384 count++;
2385 }
2386
2387 return count;
2388 }
2389
xgbe_calculate_dcb_fifo(struct xgbe_prv_data * pdata,unsigned int fifo_size,unsigned int * fifo)2390 static void xgbe_calculate_dcb_fifo(struct xgbe_prv_data *pdata,
2391 unsigned int fifo_size,
2392 unsigned int *fifo)
2393 {
2394 unsigned int q_fifo_size, rem_fifo, addn_fifo;
2395 unsigned int prio_queues;
2396 unsigned int pfc_count;
2397 unsigned int i;
2398
2399 q_fifo_size = XGMAC_FIFO_ALIGN(xgbe_get_max_frame(pdata));
2400 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2401 pfc_count = xgbe_get_pfc_queues(pdata);
2402
2403 if (!pfc_count || ((q_fifo_size * prio_queues) > fifo_size)) {
2404 /* No traffic classes with PFC enabled or can't do lossless */
2405 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2406 return;
2407 }
2408
2409 /* Calculate how much fifo we have to play with */
2410 rem_fifo = fifo_size - (q_fifo_size * prio_queues);
2411
2412 /* Calculate how much more than base fifo PFC needs, which also
2413 * becomes the threshold activation point (RFA)
2414 */
2415 pdata->pfc_rfa = xgbe_get_pfc_delay(pdata);
2416 pdata->pfc_rfa = XGMAC_FLOW_CONTROL_ALIGN(pdata->pfc_rfa);
2417
2418 if (pdata->pfc_rfa > q_fifo_size) {
2419 addn_fifo = pdata->pfc_rfa - q_fifo_size;
2420 addn_fifo = XGMAC_FIFO_ALIGN(addn_fifo);
2421 } else {
2422 addn_fifo = 0;
2423 }
2424
2425 /* Calculate DCB fifo settings:
2426 * - distribute remaining fifo between the VLAN priority
2427 * queues based on traffic class PFC enablement and overall
2428 * priority (0 is lowest priority, so start at highest)
2429 */
2430 i = prio_queues;
2431 while (i > 0) {
2432 i--;
2433
2434 fifo[i] = (q_fifo_size / XGMAC_FIFO_UNIT) - 1;
2435
2436 if (!pdata->pfcq[i] || !addn_fifo)
2437 continue;
2438
2439 if (addn_fifo > rem_fifo) {
2440 netdev_warn(pdata->netdev,
2441 "RXq%u cannot set needed fifo size\n", i);
2442 if (!rem_fifo)
2443 continue;
2444
2445 addn_fifo = rem_fifo;
2446 }
2447
2448 fifo[i] += (addn_fifo / XGMAC_FIFO_UNIT);
2449 rem_fifo -= addn_fifo;
2450 }
2451
2452 if (rem_fifo) {
2453 unsigned int inc_fifo = rem_fifo / prio_queues;
2454
2455 /* Distribute remaining fifo across queues */
2456 for (i = 0; i < prio_queues; i++)
2457 fifo[i] += (inc_fifo / XGMAC_FIFO_UNIT);
2458 }
2459 }
2460
xgbe_config_tx_fifo_size(struct xgbe_prv_data * pdata)2461 static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
2462 {
2463 unsigned int fifo_size;
2464 unsigned int fifo[XGBE_MAX_QUEUES];
2465 unsigned int i;
2466
2467 fifo_size = xgbe_get_tx_fifo_size(pdata);
2468
2469 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
2470
2471 for (i = 0; i < pdata->tx_q_count; i++)
2472 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
2473
2474 netif_info(pdata, drv, pdata->netdev,
2475 "%d Tx hardware queues, %d byte fifo per queue\n",
2476 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2477 }
2478
xgbe_config_rx_fifo_size(struct xgbe_prv_data * pdata)2479 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
2480 {
2481 unsigned int fifo_size;
2482 unsigned int fifo[XGBE_MAX_QUEUES];
2483 unsigned int prio_queues;
2484 unsigned int i;
2485
2486 /* Clear any DCB related fifo/queue information */
2487 memset(pdata->pfcq, 0, sizeof(pdata->pfcq));
2488 pdata->pfc_rfa = 0;
2489
2490 fifo_size = xgbe_get_rx_fifo_size(pdata);
2491 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2492
2493 /* Assign a minimum fifo to the non-VLAN priority queues */
2494 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
2495
2496 if (pdata->pfc && pdata->ets)
2497 xgbe_calculate_dcb_fifo(pdata, fifo_size, fifo);
2498 else
2499 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
2500
2501 for (i = 0; i < pdata->rx_q_count; i++)
2502 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
2503
2504 xgbe_calculate_flow_control_threshold(pdata, fifo);
2505 xgbe_config_flow_control_threshold(pdata);
2506
2507 if (pdata->pfc && pdata->ets && pdata->pfc->pfc_en) {
2508 netif_info(pdata, drv, pdata->netdev,
2509 "%u Rx hardware queues\n", pdata->rx_q_count);
2510 for (i = 0; i < pdata->rx_q_count; i++)
2511 netif_info(pdata, drv, pdata->netdev,
2512 "RxQ%u, %u byte fifo queue\n", i,
2513 ((fifo[i] + 1) * XGMAC_FIFO_UNIT));
2514 } else {
2515 netif_info(pdata, drv, pdata->netdev,
2516 "%u Rx hardware queues, %u byte fifo per queue\n",
2517 pdata->rx_q_count,
2518 ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
2519 }
2520 }
2521
xgbe_config_queue_mapping(struct xgbe_prv_data * pdata)2522 static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
2523 {
2524 unsigned int qptc, qptc_extra, queue;
2525 unsigned int prio_queues;
2526 unsigned int ppq, ppq_extra, prio;
2527 unsigned int mask;
2528 unsigned int i, j, reg, reg_val;
2529
2530 /* Map the MTL Tx Queues to Traffic Classes
2531 * Note: Tx Queues >= Traffic Classes
2532 */
2533 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
2534 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
2535
2536 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
2537 for (j = 0; j < qptc; j++) {
2538 netif_dbg(pdata, drv, pdata->netdev,
2539 "TXq%u mapped to TC%u\n", queue, i);
2540 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2541 Q2TCMAP, i);
2542 pdata->q2tc_map[queue++] = i;
2543 }
2544
2545 if (i < qptc_extra) {
2546 netif_dbg(pdata, drv, pdata->netdev,
2547 "TXq%u mapped to TC%u\n", queue, i);
2548 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
2549 Q2TCMAP, i);
2550 pdata->q2tc_map[queue++] = i;
2551 }
2552 }
2553
2554 /* Map the 8 VLAN priority values to available MTL Rx queues */
2555 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
2556 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
2557 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
2558
2559 reg = MAC_RQC2R;
2560 reg_val = 0;
2561 for (i = 0, prio = 0; i < prio_queues;) {
2562 mask = 0;
2563 for (j = 0; j < ppq; j++) {
2564 netif_dbg(pdata, drv, pdata->netdev,
2565 "PRIO%u mapped to RXq%u\n", prio, i);
2566 mask |= (1 << prio);
2567 pdata->prio2q_map[prio++] = i;
2568 }
2569
2570 if (i < ppq_extra) {
2571 netif_dbg(pdata, drv, pdata->netdev,
2572 "PRIO%u mapped to RXq%u\n", prio, i);
2573 mask |= (1 << prio);
2574 pdata->prio2q_map[prio++] = i;
2575 }
2576
2577 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2578
2579 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2580 continue;
2581
2582 XGMAC_IOWRITE(pdata, reg, reg_val);
2583 reg += MAC_RQC2_INC;
2584 reg_val = 0;
2585 }
2586
2587 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2588 reg = MTL_RQDCM0R;
2589 reg_val = 0;
2590 for (i = 0; i < pdata->rx_q_count;) {
2591 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2592
2593 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2594 continue;
2595
2596 XGMAC_IOWRITE(pdata, reg, reg_val);
2597
2598 reg += MTL_RQDCM_INC;
2599 reg_val = 0;
2600 }
2601 }
2602
xgbe_config_tc(struct xgbe_prv_data * pdata)2603 static void xgbe_config_tc(struct xgbe_prv_data *pdata)
2604 {
2605 unsigned int offset, queue, prio;
2606 u8 i;
2607
2608 netdev_reset_tc(pdata->netdev);
2609 if (!pdata->num_tcs)
2610 return;
2611
2612 netdev_set_num_tc(pdata->netdev, pdata->num_tcs);
2613
2614 for (i = 0, queue = 0, offset = 0; i < pdata->num_tcs; i++) {
2615 while ((queue < pdata->tx_q_count) &&
2616 (pdata->q2tc_map[queue] == i))
2617 queue++;
2618
2619 netif_dbg(pdata, drv, pdata->netdev, "TC%u using TXq%u-%u\n",
2620 i, offset, queue - 1);
2621 netdev_set_tc_queue(pdata->netdev, i, queue - offset, offset);
2622 offset = queue;
2623 }
2624
2625 if (!pdata->ets)
2626 return;
2627
2628 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++)
2629 netdev_set_prio_tc_map(pdata->netdev, prio,
2630 pdata->ets->prio_tc[prio]);
2631 }
2632
xgbe_config_dcb_tc(struct xgbe_prv_data * pdata)2633 static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
2634 {
2635 struct ieee_ets *ets = pdata->ets;
2636 unsigned int total_weight, min_weight, weight;
2637 unsigned int mask, reg, reg_val;
2638 unsigned int i, prio;
2639
2640 if (!ets)
2641 return;
2642
2643 /* Set Tx to deficit weighted round robin scheduling algorithm (when
2644 * traffic class is using ETS algorithm)
2645 */
2646 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
2647
2648 /* Set Traffic Class algorithms */
2649 total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
2650 min_weight = total_weight / 100;
2651 if (!min_weight)
2652 min_weight = 1;
2653
2654 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
2655 /* Map the priorities to the traffic class */
2656 mask = 0;
2657 for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
2658 if (ets->prio_tc[prio] == i)
2659 mask |= (1 << prio);
2660 }
2661 mask &= 0xff;
2662
2663 netif_dbg(pdata, drv, pdata->netdev, "TC%u PRIO mask=%#x\n",
2664 i, mask);
2665 reg = MTL_TCPM0R + (MTL_TCPM_INC * (i / MTL_TCPM_TC_PER_REG));
2666 reg_val = XGMAC_IOREAD(pdata, reg);
2667
2668 reg_val &= ~(0xff << ((i % MTL_TCPM_TC_PER_REG) << 3));
2669 reg_val |= (mask << ((i % MTL_TCPM_TC_PER_REG) << 3));
2670
2671 XGMAC_IOWRITE(pdata, reg, reg_val);
2672
2673 /* Set the traffic class algorithm */
2674 switch (ets->tc_tsa[i]) {
2675 case IEEE_8021QAZ_TSA_STRICT:
2676 netif_dbg(pdata, drv, pdata->netdev,
2677 "TC%u using SP\n", i);
2678 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2679 MTL_TSA_SP);
2680 break;
2681 case IEEE_8021QAZ_TSA_ETS:
2682 weight = total_weight * ets->tc_tx_bw[i] / 100;
2683 weight = clamp(weight, min_weight, total_weight);
2684
2685 netif_dbg(pdata, drv, pdata->netdev,
2686 "TC%u using DWRR (weight %u)\n", i, weight);
2687 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
2688 MTL_TSA_ETS);
2689 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
2690 weight);
2691 break;
2692 }
2693 }
2694
2695 xgbe_config_tc(pdata);
2696 }
2697
xgbe_config_dcb_pfc(struct xgbe_prv_data * pdata)2698 static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
2699 {
2700 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2701 /* Just stop the Tx queues while Rx fifo is changed */
2702 netif_tx_stop_all_queues(pdata->netdev);
2703
2704 /* Suspend Rx so that fifo's can be adjusted */
2705 pdata->hw_if.disable_rx(pdata);
2706 }
2707
2708 xgbe_config_rx_fifo_size(pdata);
2709 xgbe_config_flow_control(pdata);
2710
2711 if (!test_bit(XGBE_DOWN, &pdata->dev_state)) {
2712 /* Resume Rx */
2713 pdata->hw_if.enable_rx(pdata);
2714
2715 /* Resume Tx queues */
2716 netif_tx_start_all_queues(pdata->netdev);
2717 }
2718 }
2719
xgbe_config_mac_address(struct xgbe_prv_data * pdata)2720 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2721 {
2722 xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
2723
2724 /* Filtering is done using perfect filtering and hash filtering */
2725 if (pdata->hw_feat.hash_table_size) {
2726 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2727 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2728 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2729 }
2730 }
2731
xgbe_config_jumbo_enable(struct xgbe_prv_data * pdata)2732 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2733 {
2734 unsigned int val;
2735
2736 if (pdata->netdev->mtu > XGMAC_JUMBO_PACKET_MTU) {
2737 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSL,
2738 XGMAC_GIANT_PACKET_MTU);
2739 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 1);
2740 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 1);
2741 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 1);
2742 } else {
2743 val = pdata->netdev->mtu > XGMAC_STD_PACKET_MTU ? 1 : 0;
2744 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, GPSLCE, 0);
2745 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, WD, 0);
2746 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, JD, 0);
2747 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2748 }
2749 }
2750
xgbe_config_mac_speed(struct xgbe_prv_data * pdata)2751 static void xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2752 {
2753 xgbe_set_speed(pdata, pdata->phy_speed);
2754 }
2755
xgbe_config_checksum_offload(struct xgbe_prv_data * pdata)2756 static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2757 {
2758 if (pdata->netdev->features & NETIF_F_RXCSUM)
2759 xgbe_enable_rx_csum(pdata);
2760 else
2761 xgbe_disable_rx_csum(pdata);
2762 }
2763
xgbe_config_vlan_support(struct xgbe_prv_data * pdata)2764 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2765 {
2766 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2767 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2768 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2769
2770 /* Set the current VLAN Hash Table register value */
2771 xgbe_update_vlan_hash_table(pdata);
2772
2773 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
2774 xgbe_enable_rx_vlan_filtering(pdata);
2775 else
2776 xgbe_disable_rx_vlan_filtering(pdata);
2777
2778 if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
2779 xgbe_enable_rx_vlan_stripping(pdata);
2780 else
2781 xgbe_disable_rx_vlan_stripping(pdata);
2782 }
2783
xgbe_mmc_read(struct xgbe_prv_data * pdata,unsigned int reg_lo)2784 static u64 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2785 {
2786 bool read_hi;
2787 u64 val;
2788
2789 if (pdata->vdata->mmc_64bit) {
2790 switch (reg_lo) {
2791 /* These registers are always 32 bit */
2792 case MMC_RXRUNTERROR:
2793 case MMC_RXJABBERERROR:
2794 case MMC_RXUNDERSIZE_G:
2795 case MMC_RXOVERSIZE_G:
2796 case MMC_RXWATCHDOGERROR:
2797 case MMC_RXALIGNMENTERROR:
2798 read_hi = false;
2799 break;
2800
2801 default:
2802 read_hi = true;
2803 }
2804 } else {
2805 switch (reg_lo) {
2806 /* These registers are always 64 bit */
2807 case MMC_TXOCTETCOUNT_GB_LO:
2808 case MMC_TXOCTETCOUNT_G_LO:
2809 case MMC_RXOCTETCOUNT_GB_LO:
2810 case MMC_RXOCTETCOUNT_G_LO:
2811 read_hi = true;
2812 break;
2813
2814 default:
2815 read_hi = false;
2816 }
2817 }
2818
2819 val = XGMAC_IOREAD(pdata, reg_lo);
2820
2821 if (read_hi)
2822 val |= ((u64)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2823
2824 return val;
2825 }
2826
xgbe_tx_mmc_int(struct xgbe_prv_data * pdata)2827 static void xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2828 {
2829 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2830 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2831
2832 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2833 stats->txoctetcount_gb +=
2834 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2835
2836 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2837 stats->txframecount_gb +=
2838 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2839
2840 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2841 stats->txbroadcastframes_g +=
2842 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2843
2844 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2845 stats->txmulticastframes_g +=
2846 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2847
2848 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2849 stats->tx64octets_gb +=
2850 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2851
2852 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2853 stats->tx65to127octets_gb +=
2854 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2855
2856 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2857 stats->tx128to255octets_gb +=
2858 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2859
2860 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2861 stats->tx256to511octets_gb +=
2862 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2863
2864 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2865 stats->tx512to1023octets_gb +=
2866 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2867
2868 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2869 stats->tx1024tomaxoctets_gb +=
2870 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2871
2872 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2873 stats->txunicastframes_gb +=
2874 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2875
2876 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2877 stats->txmulticastframes_gb +=
2878 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2879
2880 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2881 stats->txbroadcastframes_g +=
2882 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2883
2884 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2885 stats->txunderflowerror +=
2886 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2887
2888 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2889 stats->txoctetcount_g +=
2890 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2891
2892 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2893 stats->txframecount_g +=
2894 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2895
2896 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2897 stats->txpauseframes +=
2898 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2899
2900 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2901 stats->txvlanframes_g +=
2902 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2903 }
2904
xgbe_rx_mmc_int(struct xgbe_prv_data * pdata)2905 static void xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2906 {
2907 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2908 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2909
2910 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2911 stats->rxframecount_gb +=
2912 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2913
2914 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2915 stats->rxoctetcount_gb +=
2916 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2917
2918 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2919 stats->rxoctetcount_g +=
2920 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2921
2922 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2923 stats->rxbroadcastframes_g +=
2924 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2925
2926 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2927 stats->rxmulticastframes_g +=
2928 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2929
2930 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2931 stats->rxcrcerror +=
2932 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2933
2934 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2935 stats->rxrunterror +=
2936 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2937
2938 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2939 stats->rxjabbererror +=
2940 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2941
2942 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2943 stats->rxundersize_g +=
2944 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2945
2946 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2947 stats->rxoversize_g +=
2948 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2949
2950 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2951 stats->rx64octets_gb +=
2952 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2953
2954 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2955 stats->rx65to127octets_gb +=
2956 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2957
2958 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2959 stats->rx128to255octets_gb +=
2960 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2961
2962 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2963 stats->rx256to511octets_gb +=
2964 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2965
2966 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2967 stats->rx512to1023octets_gb +=
2968 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2969
2970 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2971 stats->rx1024tomaxoctets_gb +=
2972 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2973
2974 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2975 stats->rxunicastframes_g +=
2976 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2977
2978 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2979 stats->rxlengtherror +=
2980 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2981
2982 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2983 stats->rxoutofrangetype +=
2984 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2985
2986 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2987 stats->rxpauseframes +=
2988 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2989
2990 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2991 stats->rxfifooverflow +=
2992 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2993
2994 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2995 stats->rxvlanframes_gb +=
2996 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2997
2998 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2999 stats->rxwatchdogerror +=
3000 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3001
3002 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXALIGNMENTERROR))
3003 stats->rxalignmenterror +=
3004 xgbe_mmc_read(pdata, MMC_RXALIGNMENTERROR);
3005 }
3006
xgbe_read_mmc_stats(struct xgbe_prv_data * pdata)3007 static void xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
3008 {
3009 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
3010
3011 /* Freeze counters */
3012 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
3013
3014 stats->txoctetcount_gb +=
3015 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
3016
3017 stats->txframecount_gb +=
3018 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
3019
3020 stats->txbroadcastframes_g +=
3021 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
3022
3023 stats->txmulticastframes_g +=
3024 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
3025
3026 stats->tx64octets_gb +=
3027 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
3028
3029 stats->tx65to127octets_gb +=
3030 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
3031
3032 stats->tx128to255octets_gb +=
3033 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
3034
3035 stats->tx256to511octets_gb +=
3036 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
3037
3038 stats->tx512to1023octets_gb +=
3039 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
3040
3041 stats->tx1024tomaxoctets_gb +=
3042 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
3043
3044 stats->txunicastframes_gb +=
3045 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
3046
3047 stats->txmulticastframes_gb +=
3048 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
3049
3050 stats->txbroadcastframes_g +=
3051 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
3052
3053 stats->txunderflowerror +=
3054 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
3055
3056 stats->txoctetcount_g +=
3057 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
3058
3059 stats->txframecount_g +=
3060 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
3061
3062 stats->txpauseframes +=
3063 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
3064
3065 stats->txvlanframes_g +=
3066 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
3067
3068 stats->rxframecount_gb +=
3069 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
3070
3071 stats->rxoctetcount_gb +=
3072 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
3073
3074 stats->rxoctetcount_g +=
3075 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
3076
3077 stats->rxbroadcastframes_g +=
3078 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
3079
3080 stats->rxmulticastframes_g +=
3081 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
3082
3083 stats->rxcrcerror +=
3084 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
3085
3086 stats->rxrunterror +=
3087 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
3088
3089 stats->rxjabbererror +=
3090 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
3091
3092 stats->rxundersize_g +=
3093 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
3094
3095 stats->rxoversize_g +=
3096 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
3097
3098 stats->rx64octets_gb +=
3099 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
3100
3101 stats->rx65to127octets_gb +=
3102 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
3103
3104 stats->rx128to255octets_gb +=
3105 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
3106
3107 stats->rx256to511octets_gb +=
3108 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
3109
3110 stats->rx512to1023octets_gb +=
3111 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
3112
3113 stats->rx1024tomaxoctets_gb +=
3114 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
3115
3116 stats->rxunicastframes_g +=
3117 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
3118
3119 stats->rxlengtherror +=
3120 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
3121
3122 stats->rxoutofrangetype +=
3123 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
3124
3125 stats->rxpauseframes +=
3126 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
3127
3128 stats->rxfifooverflow +=
3129 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
3130
3131 stats->rxvlanframes_gb +=
3132 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
3133
3134 stats->rxwatchdogerror +=
3135 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
3136
3137 stats->rxalignmenterror +=
3138 xgbe_mmc_read(pdata, MMC_RXALIGNMENTERROR);
3139
3140 /* Un-freeze counters */
3141 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
3142 }
3143
xgbe_config_mmc(struct xgbe_prv_data * pdata)3144 static void xgbe_config_mmc(struct xgbe_prv_data *pdata)
3145 {
3146 /* Set counters to reset on read */
3147 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
3148
3149 /* Reset the counters */
3150 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
3151 }
3152
xgbe_txq_prepare_tx_stop(struct xgbe_prv_data * pdata,unsigned int queue)3153 static void xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata,
3154 unsigned int queue)
3155 {
3156 unsigned int tx_status;
3157 unsigned long tx_timeout;
3158
3159 /* The Tx engine cannot be stopped if it is actively processing
3160 * packets. Wait for the Tx queue to empty the Tx fifo. Don't
3161 * wait forever though...
3162 */
3163 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3164 while (time_before(jiffies, tx_timeout)) {
3165 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
3166 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
3167 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
3168 break;
3169
3170 usleep_range(500, 1000);
3171 }
3172
3173 if (!time_before(jiffies, tx_timeout))
3174 netdev_info(pdata->netdev,
3175 "timed out waiting for Tx queue %u to empty\n",
3176 queue);
3177 }
3178
xgbe_prepare_tx_stop(struct xgbe_prv_data * pdata,unsigned int queue)3179 static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
3180 unsigned int queue)
3181 {
3182 unsigned int tx_dsr, tx_pos, tx_qidx;
3183 unsigned int tx_status;
3184 unsigned long tx_timeout;
3185
3186 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
3187 return xgbe_txq_prepare_tx_stop(pdata, queue);
3188
3189 /* Calculate the status register to read and the position within */
3190 if (queue < DMA_DSRX_FIRST_QUEUE) {
3191 tx_dsr = DMA_DSR0;
3192 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
3193 } else {
3194 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
3195
3196 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
3197 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
3198 DMA_DSRX_TPS_START;
3199 }
3200
3201 /* The Tx engine cannot be stopped if it is actively processing
3202 * descriptors. Wait for the Tx engine to enter the stopped or
3203 * suspended state. Don't wait forever though...
3204 */
3205 tx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3206 while (time_before(jiffies, tx_timeout)) {
3207 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
3208 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
3209 if ((tx_status == DMA_TPS_STOPPED) ||
3210 (tx_status == DMA_TPS_SUSPENDED))
3211 break;
3212
3213 usleep_range(500, 1000);
3214 }
3215
3216 if (!time_before(jiffies, tx_timeout))
3217 netdev_info(pdata->netdev,
3218 "timed out waiting for Tx DMA channel %u to stop\n",
3219 queue);
3220 }
3221
xgbe_enable_tx(struct xgbe_prv_data * pdata)3222 static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
3223 {
3224 unsigned int i;
3225
3226 /* Enable each Tx DMA channel */
3227 for (i = 0; i < pdata->channel_count; i++) {
3228 if (!pdata->channel[i]->tx_ring)
3229 break;
3230
3231 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3232 }
3233
3234 /* Enable each Tx queue */
3235 for (i = 0; i < pdata->tx_q_count; i++)
3236 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
3237 MTL_Q_ENABLED);
3238
3239 /* Enable MAC Tx */
3240 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3241 }
3242
xgbe_disable_tx(struct xgbe_prv_data * pdata)3243 static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
3244 {
3245 unsigned int i;
3246
3247 /* Prepare for Tx DMA channel stop */
3248 for (i = 0; i < pdata->tx_q_count; i++)
3249 xgbe_prepare_tx_stop(pdata, i);
3250
3251 /* Disable MAC Tx */
3252 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3253
3254 /* Disable each Tx queue */
3255 for (i = 0; i < pdata->tx_q_count; i++)
3256 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
3257
3258 /* Disable each Tx DMA channel */
3259 for (i = 0; i < pdata->channel_count; i++) {
3260 if (!pdata->channel[i]->tx_ring)
3261 break;
3262
3263 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3264 }
3265 }
3266
xgbe_prepare_rx_stop(struct xgbe_prv_data * pdata,unsigned int queue)3267 static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
3268 unsigned int queue)
3269 {
3270 unsigned int rx_status;
3271 unsigned long rx_timeout;
3272
3273 /* The Rx engine cannot be stopped if it is actively processing
3274 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
3275 * wait forever though...
3276 */
3277 rx_timeout = jiffies + (XGBE_DMA_STOP_TIMEOUT * HZ);
3278 while (time_before(jiffies, rx_timeout)) {
3279 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
3280 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
3281 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
3282 break;
3283
3284 usleep_range(500, 1000);
3285 }
3286
3287 if (!time_before(jiffies, rx_timeout))
3288 netdev_info(pdata->netdev,
3289 "timed out waiting for Rx queue %u to empty\n",
3290 queue);
3291 }
3292
xgbe_enable_rx(struct xgbe_prv_data * pdata)3293 static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
3294 {
3295 unsigned int reg_val, i;
3296
3297 /* Enable each Rx DMA channel */
3298 for (i = 0; i < pdata->channel_count; i++) {
3299 if (!pdata->channel[i]->rx_ring)
3300 break;
3301
3302 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3303 }
3304
3305 /* Enable each Rx queue */
3306 reg_val = 0;
3307 for (i = 0; i < pdata->rx_q_count; i++)
3308 reg_val |= (0x02 << (i << 1));
3309 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
3310
3311 /* Enable MAC Rx */
3312 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
3313 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
3314 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
3315 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
3316 }
3317
xgbe_disable_rx(struct xgbe_prv_data * pdata)3318 static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
3319 {
3320 unsigned int i;
3321
3322 /* Disable MAC Rx */
3323 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
3324 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
3325 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
3326 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
3327
3328 /* Prepare for Rx DMA channel stop */
3329 for (i = 0; i < pdata->rx_q_count; i++)
3330 xgbe_prepare_rx_stop(pdata, i);
3331
3332 /* Disable each Rx queue */
3333 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
3334
3335 /* Disable each Rx DMA channel */
3336 for (i = 0; i < pdata->channel_count; i++) {
3337 if (!pdata->channel[i]->rx_ring)
3338 break;
3339
3340 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3341 }
3342 }
3343
xgbe_powerup_tx(struct xgbe_prv_data * pdata)3344 static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
3345 {
3346 unsigned int i;
3347
3348 /* Enable each Tx DMA channel */
3349 for (i = 0; i < pdata->channel_count; i++) {
3350 if (!pdata->channel[i]->tx_ring)
3351 break;
3352
3353 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
3354 }
3355
3356 /* Enable MAC Tx */
3357 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
3358 }
3359
xgbe_powerdown_tx(struct xgbe_prv_data * pdata)3360 static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
3361 {
3362 unsigned int i;
3363
3364 /* Prepare for Tx DMA channel stop */
3365 for (i = 0; i < pdata->tx_q_count; i++)
3366 xgbe_prepare_tx_stop(pdata, i);
3367
3368 /* Disable MAC Tx */
3369 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
3370
3371 /* Disable each Tx DMA channel */
3372 for (i = 0; i < pdata->channel_count; i++) {
3373 if (!pdata->channel[i]->tx_ring)
3374 break;
3375
3376 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
3377 }
3378 }
3379
xgbe_powerup_rx(struct xgbe_prv_data * pdata)3380 static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
3381 {
3382 unsigned int i;
3383
3384 /* Enable each Rx DMA channel */
3385 for (i = 0; i < pdata->channel_count; i++) {
3386 if (!pdata->channel[i]->rx_ring)
3387 break;
3388
3389 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
3390 }
3391 }
3392
xgbe_powerdown_rx(struct xgbe_prv_data * pdata)3393 static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
3394 {
3395 unsigned int i;
3396
3397 /* Disable each Rx DMA channel */
3398 for (i = 0; i < pdata->channel_count; i++) {
3399 if (!pdata->channel[i]->rx_ring)
3400 break;
3401
3402 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
3403 }
3404 }
3405
xgbe_init(struct xgbe_prv_data * pdata)3406 static int xgbe_init(struct xgbe_prv_data *pdata)
3407 {
3408 struct xgbe_desc_if *desc_if = &pdata->desc_if;
3409 int ret;
3410
3411 DBGPR("-->xgbe_init\n");
3412
3413 /* Flush Tx queues */
3414 ret = xgbe_flush_tx_queues(pdata);
3415 if (ret) {
3416 netdev_err(pdata->netdev, "error flushing TX queues\n");
3417 return ret;
3418 }
3419
3420 /*
3421 * Initialize DMA related features
3422 */
3423 xgbe_config_dma_bus(pdata);
3424 xgbe_config_dma_cache(pdata);
3425 xgbe_config_osp_mode(pdata);
3426 xgbe_config_pbl_val(pdata);
3427 xgbe_config_rx_coalesce(pdata);
3428 xgbe_config_tx_coalesce(pdata);
3429 xgbe_config_rx_buffer_size(pdata);
3430 xgbe_config_tso_mode(pdata);
3431
3432 if (pdata->netdev->features & NETIF_F_RXCSUM) {
3433 xgbe_config_sph_mode(pdata);
3434 xgbe_config_rss(pdata);
3435 }
3436
3437 desc_if->wrapper_tx_desc_init(pdata);
3438 desc_if->wrapper_rx_desc_init(pdata);
3439 xgbe_enable_dma_interrupts(pdata);
3440
3441 /*
3442 * Initialize MTL related features
3443 */
3444 xgbe_config_mtl_mode(pdata);
3445 xgbe_config_queue_mapping(pdata);
3446 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
3447 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
3448 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
3449 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
3450 xgbe_config_tx_fifo_size(pdata);
3451 xgbe_config_rx_fifo_size(pdata);
3452 /*TODO: Error Packet and undersized good Packet forwarding enable
3453 (FEP and FUP)
3454 */
3455 xgbe_config_dcb_tc(pdata);
3456 xgbe_enable_mtl_interrupts(pdata);
3457
3458 /*
3459 * Initialize MAC related features
3460 */
3461 xgbe_config_mac_address(pdata);
3462 xgbe_config_rx_mode(pdata);
3463 xgbe_config_jumbo_enable(pdata);
3464 xgbe_config_flow_control(pdata);
3465 xgbe_config_mac_speed(pdata);
3466 xgbe_config_checksum_offload(pdata);
3467 xgbe_config_vlan_support(pdata);
3468 xgbe_config_mmc(pdata);
3469 xgbe_enable_mac_interrupts(pdata);
3470
3471 /*
3472 * Initialize ECC related features
3473 */
3474 xgbe_enable_ecc_interrupts(pdata);
3475
3476 DBGPR("<--xgbe_init\n");
3477
3478 return 0;
3479 }
3480
xgbe_init_function_ptrs_dev(struct xgbe_hw_if * hw_if)3481 void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
3482 {
3483 DBGPR("-->xgbe_init_function_ptrs\n");
3484
3485 hw_if->tx_complete = xgbe_tx_complete;
3486
3487 hw_if->set_mac_address = xgbe_set_mac_address;
3488 hw_if->config_rx_mode = xgbe_config_rx_mode;
3489
3490 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
3491 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
3492
3493 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
3494 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
3495 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
3496 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
3497 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
3498
3499 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
3500 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
3501
3502 hw_if->set_speed = xgbe_set_speed;
3503
3504 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
3505 hw_if->read_ext_mii_regs_c22 = xgbe_read_ext_mii_regs_c22;
3506 hw_if->write_ext_mii_regs_c22 = xgbe_write_ext_mii_regs_c22;
3507 hw_if->read_ext_mii_regs_c45 = xgbe_read_ext_mii_regs_c45;
3508 hw_if->write_ext_mii_regs_c45 = xgbe_write_ext_mii_regs_c45;
3509
3510 hw_if->set_gpio = xgbe_set_gpio;
3511 hw_if->clr_gpio = xgbe_clr_gpio;
3512
3513 hw_if->enable_tx = xgbe_enable_tx;
3514 hw_if->disable_tx = xgbe_disable_tx;
3515 hw_if->enable_rx = xgbe_enable_rx;
3516 hw_if->disable_rx = xgbe_disable_rx;
3517
3518 hw_if->powerup_tx = xgbe_powerup_tx;
3519 hw_if->powerdown_tx = xgbe_powerdown_tx;
3520 hw_if->powerup_rx = xgbe_powerup_rx;
3521 hw_if->powerdown_rx = xgbe_powerdown_rx;
3522
3523 hw_if->dev_xmit = xgbe_dev_xmit;
3524 hw_if->dev_read = xgbe_dev_read;
3525 hw_if->enable_int = xgbe_enable_int;
3526 hw_if->disable_int = xgbe_disable_int;
3527 hw_if->init = xgbe_init;
3528 hw_if->exit = xgbe_exit;
3529
3530 /* Descriptor related Sequences have to be initialized here */
3531 hw_if->tx_desc_init = xgbe_tx_desc_init;
3532 hw_if->rx_desc_init = xgbe_rx_desc_init;
3533 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
3534 hw_if->rx_desc_reset = xgbe_rx_desc_reset;
3535 hw_if->is_last_desc = xgbe_is_last_desc;
3536 hw_if->is_context_desc = xgbe_is_context_desc;
3537 hw_if->tx_start_xmit = xgbe_tx_start_xmit;
3538
3539 /* For FLOW ctrl */
3540 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
3541 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
3542
3543 /* For RX coalescing */
3544 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
3545 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
3546 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
3547 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
3548
3549 /* For RX and TX threshold config */
3550 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
3551 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
3552
3553 /* For RX and TX Store and Forward Mode config */
3554 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
3555 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
3556
3557 /* For TX DMA Operating on Second Frame config */
3558 hw_if->config_osp_mode = xgbe_config_osp_mode;
3559
3560 /* For MMC statistics support */
3561 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
3562 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
3563 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
3564
3565 /* For Data Center Bridging config */
3566 hw_if->config_tc = xgbe_config_tc;
3567 hw_if->config_dcb_tc = xgbe_config_dcb_tc;
3568 hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
3569
3570 /* For Receive Side Scaling */
3571 hw_if->enable_rss = xgbe_enable_rss;
3572 hw_if->disable_rss = xgbe_disable_rss;
3573 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
3574 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;
3575
3576 /* For ECC */
3577 hw_if->disable_ecc_ded = xgbe_disable_ecc_ded;
3578 hw_if->disable_ecc_sec = xgbe_disable_ecc_sec;
3579
3580 /* For VXLAN */
3581 hw_if->enable_vxlan = xgbe_enable_vxlan;
3582 hw_if->disable_vxlan = xgbe_disable_vxlan;
3583 hw_if->set_vxlan_id = xgbe_set_vxlan_id;
3584
3585 /* For Split Header*/
3586 hw_if->enable_sph = xgbe_config_sph_mode;
3587 hw_if->disable_sph = xgbe_disable_sph_mode;
3588
3589 DBGPR("<--xgbe_init_function_ptrs\n");
3590 }
3591
xgbe_enable_mac_loopback(struct xgbe_prv_data * pdata)3592 int xgbe_enable_mac_loopback(struct xgbe_prv_data *pdata)
3593 {
3594 /* Enable MAC loopback mode */
3595 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 1);
3596
3597 /* Wait for loopback to stabilize */
3598 usleep_range(10, 15);
3599
3600 return 0;
3601 }
3602
xgbe_disable_mac_loopback(struct xgbe_prv_data * pdata)3603 void xgbe_disable_mac_loopback(struct xgbe_prv_data *pdata)
3604 {
3605 /* Disable MAC loopback mode */
3606 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, LM, 0);
3607 }
3608