1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/prefetch.h>
33 #include <linux/pinctrl/consumer.h>
34 #ifdef CONFIG_DEBUG_FS
35 #include <linux/debugfs.h>
36 #include <linux/seq_file.h>
37 #endif /* CONFIG_DEBUG_FS */
38 #include <linux/net_tstamp.h>
39 #include <linux/phylink.h>
40 #include <linux/udp.h>
41 #include <linux/bpf_trace.h>
42 #include <net/page_pool/helpers.h>
43 #include <net/pkt_cls.h>
44 #include <net/xdp_sock_drv.h>
45 #include "stmmac_ptp.h"
46 #include "stmmac.h"
47 #include "stmmac_xdp.h"
48 #include <linux/reset.h>
49 #include <linux/of_mdio.h>
50 #include "dwmac1000.h"
51 #include "dwxgmac2.h"
52 #include "hwif.h"
53
54 /* As long as the interface is active, we keep the timestamping counter enabled
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
56 * (clock jumps) when changing timestamping settings at runtime.
57 */
58 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCFUPDT | \
59 PTP_TCR_TSCTRLSSR)
60
61 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
63
64 /* Module parameters */
65 #define TX_TIMEO 5000
66 static int watchdog = TX_TIMEO;
67 module_param(watchdog, int, 0644);
68 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
69
70 static int debug = -1;
71 module_param(debug, int, 0644);
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
73
74 static int phyaddr = -1;
75 module_param(phyaddr, int, 0444);
76 MODULE_PARM_DESC(phyaddr, "Physical device address");
77
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
80
81 /* Limit to make sure XDP TX and slow path can coexist */
82 #define STMMAC_XSK_TX_BUDGET_MAX 256
83 #define STMMAC_TX_XSK_AVAIL 16
84 #define STMMAC_RX_FILL_BATCH 16
85
86 #define STMMAC_XDP_PASS 0
87 #define STMMAC_XDP_CONSUMED BIT(0)
88 #define STMMAC_XDP_TX BIT(1)
89 #define STMMAC_XDP_REDIRECT BIT(2)
90
91 static int flow_ctrl = FLOW_AUTO;
92 module_param(flow_ctrl, int, 0644);
93 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off]");
94
95 static int pause = PAUSE_TIME;
96 module_param(pause, int, 0644);
97 MODULE_PARM_DESC(pause, "Flow Control Pause Time");
98
99 #define TC_DEFAULT 64
100 static int tc = TC_DEFAULT;
101 module_param(tc, int, 0644);
102 MODULE_PARM_DESC(tc, "DMA threshold control value");
103
104 #define DEFAULT_BUFSIZE 1536
105 static int buf_sz = DEFAULT_BUFSIZE;
106 module_param(buf_sz, int, 0644);
107 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
108
109 #define STMMAC_RX_COPYBREAK 256
110
111 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
112 NETIF_MSG_LINK | NETIF_MSG_IFUP |
113 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
114
115 #define STMMAC_DEFAULT_LPI_TIMER 1000
116 static int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
117 module_param(eee_timer, int, 0644);
118 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
119 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
120
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
122 * but allow user to force to use the chain instead of the ring
123 */
124 static unsigned int chain_mode;
125 module_param(chain_mode, int, 0444);
126 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
127
128 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
129 /* For MSI interrupts handling */
130 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
131 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
132 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
133 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
136 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
139 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
140 u32 rxmode, u32 chan);
141
142 #ifdef CONFIG_DEBUG_FS
143 static const struct net_device_ops stmmac_netdev_ops;
144 static void stmmac_init_fs(struct net_device *dev);
145 static void stmmac_exit_fs(struct net_device *dev);
146 #endif
147
148 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
149
stmmac_bus_clks_config(struct stmmac_priv * priv,bool enabled)150 int stmmac_bus_clks_config(struct stmmac_priv *priv, bool enabled)
151 {
152 int ret = 0;
153
154 if (enabled) {
155 ret = clk_prepare_enable(priv->plat->stmmac_clk);
156 if (ret)
157 return ret;
158 ret = clk_prepare_enable(priv->plat->pclk);
159 if (ret) {
160 clk_disable_unprepare(priv->plat->stmmac_clk);
161 return ret;
162 }
163 if (priv->plat->clks_config) {
164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled);
165 if (ret) {
166 clk_disable_unprepare(priv->plat->stmmac_clk);
167 clk_disable_unprepare(priv->plat->pclk);
168 return ret;
169 }
170 }
171 } else {
172 clk_disable_unprepare(priv->plat->stmmac_clk);
173 clk_disable_unprepare(priv->plat->pclk);
174 if (priv->plat->clks_config)
175 priv->plat->clks_config(priv->plat->bsp_priv, enabled);
176 }
177
178 return ret;
179 }
180 EXPORT_SYMBOL_GPL(stmmac_bus_clks_config);
181
182 /**
183 * stmmac_verify_args - verify the driver parameters.
184 * Description: it checks the driver parameters and set a default in case of
185 * errors.
186 */
stmmac_verify_args(void)187 static void stmmac_verify_args(void)
188 {
189 if (unlikely(watchdog < 0))
190 watchdog = TX_TIMEO;
191 if (unlikely((buf_sz < DEFAULT_BUFSIZE) || (buf_sz > BUF_SIZE_16KiB)))
192 buf_sz = DEFAULT_BUFSIZE;
193 if (unlikely(flow_ctrl > 1))
194 flow_ctrl = FLOW_AUTO;
195 else if (likely(flow_ctrl < 0))
196 flow_ctrl = FLOW_OFF;
197 if (unlikely((pause < 0) || (pause > 0xffff)))
198 pause = PAUSE_TIME;
199 if (eee_timer < 0)
200 eee_timer = STMMAC_DEFAULT_LPI_TIMER;
201 }
202
__stmmac_disable_all_queues(struct stmmac_priv * priv)203 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
204 {
205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
207 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
208 u32 queue;
209
210 for (queue = 0; queue < maxq; queue++) {
211 struct stmmac_channel *ch = &priv->channel[queue];
212
213 if (stmmac_xdp_is_enabled(priv) &&
214 test_bit(queue, priv->af_xdp_zc_qps)) {
215 napi_disable(&ch->rxtx_napi);
216 continue;
217 }
218
219 if (queue < rx_queues_cnt)
220 napi_disable(&ch->rx_napi);
221 if (queue < tx_queues_cnt)
222 napi_disable(&ch->tx_napi);
223 }
224 }
225
226 /**
227 * stmmac_disable_all_queues - Disable all queues
228 * @priv: driver private structure
229 */
stmmac_disable_all_queues(struct stmmac_priv * priv)230 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
231 {
232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
233 struct stmmac_rx_queue *rx_q;
234 u32 queue;
235
236 /* synchronize_rcu() needed for pending XDP buffers to drain */
237 for (queue = 0; queue < rx_queues_cnt; queue++) {
238 rx_q = &priv->dma_conf.rx_queue[queue];
239 if (rx_q->xsk_pool) {
240 synchronize_rcu();
241 break;
242 }
243 }
244
245 __stmmac_disable_all_queues(priv);
246 }
247
248 /**
249 * stmmac_enable_all_queues - Enable all queues
250 * @priv: driver private structure
251 */
stmmac_enable_all_queues(struct stmmac_priv * priv)252 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
253 {
254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
256 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
257 u32 queue;
258
259 for (queue = 0; queue < maxq; queue++) {
260 struct stmmac_channel *ch = &priv->channel[queue];
261
262 if (stmmac_xdp_is_enabled(priv) &&
263 test_bit(queue, priv->af_xdp_zc_qps)) {
264 napi_enable(&ch->rxtx_napi);
265 continue;
266 }
267
268 if (queue < rx_queues_cnt)
269 napi_enable(&ch->rx_napi);
270 if (queue < tx_queues_cnt)
271 napi_enable(&ch->tx_napi);
272 }
273 }
274
stmmac_service_event_schedule(struct stmmac_priv * priv)275 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
276 {
277 if (!test_bit(STMMAC_DOWN, &priv->state) &&
278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
279 queue_work(priv->wq, &priv->service_task);
280 }
281
stmmac_global_err(struct stmmac_priv * priv)282 static void stmmac_global_err(struct stmmac_priv *priv)
283 {
284 netif_carrier_off(priv->dev);
285 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
286 stmmac_service_event_schedule(priv);
287 }
288
289 /**
290 * stmmac_clk_csr_set - dynamically set the MDC clock
291 * @priv: driver private structure
292 * Description: this is to dynamically set the MDC clock according to the csr
293 * clock input.
294 * Note:
295 * If a specific clk_csr value is passed from the platform
296 * this means that the CSR Clock Range selection cannot be
297 * changed at run-time and it is fixed (as reported in the driver
298 * documentation). Viceversa the driver will try to set the MDC
299 * clock dynamically according to the actual clock input.
300 */
stmmac_clk_csr_set(struct stmmac_priv * priv)301 static void stmmac_clk_csr_set(struct stmmac_priv *priv)
302 {
303 u32 clk_rate;
304
305 clk_rate = clk_get_rate(priv->plat->stmmac_clk);
306
307 /* Platform provided default clk_csr would be assumed valid
308 * for all other cases except for the below mentioned ones.
309 * For values higher than the IEEE 802.3 specified frequency
310 * we can not estimate the proper divider as it is not known
311 * the frequency of clk_csr_i. So we do not change the default
312 * divider.
313 */
314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) {
315 if (clk_rate < CSR_F_35M)
316 priv->clk_csr = STMMAC_CSR_20_35M;
317 else if ((clk_rate >= CSR_F_35M) && (clk_rate < CSR_F_60M))
318 priv->clk_csr = STMMAC_CSR_35_60M;
319 else if ((clk_rate >= CSR_F_60M) && (clk_rate < CSR_F_100M))
320 priv->clk_csr = STMMAC_CSR_60_100M;
321 else if ((clk_rate >= CSR_F_100M) && (clk_rate < CSR_F_150M))
322 priv->clk_csr = STMMAC_CSR_100_150M;
323 else if ((clk_rate >= CSR_F_150M) && (clk_rate < CSR_F_250M))
324 priv->clk_csr = STMMAC_CSR_150_250M;
325 else if ((clk_rate >= CSR_F_250M) && (clk_rate <= CSR_F_300M))
326 priv->clk_csr = STMMAC_CSR_250_300M;
327 }
328
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) {
330 if (clk_rate > 160000000)
331 priv->clk_csr = 0x03;
332 else if (clk_rate > 80000000)
333 priv->clk_csr = 0x02;
334 else if (clk_rate > 40000000)
335 priv->clk_csr = 0x01;
336 else
337 priv->clk_csr = 0;
338 }
339
340 if (priv->plat->has_xgmac) {
341 if (clk_rate > 400000000)
342 priv->clk_csr = 0x5;
343 else if (clk_rate > 350000000)
344 priv->clk_csr = 0x4;
345 else if (clk_rate > 300000000)
346 priv->clk_csr = 0x3;
347 else if (clk_rate > 250000000)
348 priv->clk_csr = 0x2;
349 else if (clk_rate > 150000000)
350 priv->clk_csr = 0x1;
351 else
352 priv->clk_csr = 0x0;
353 }
354 }
355
print_pkt(unsigned char * buf,int len)356 static void print_pkt(unsigned char *buf, int len)
357 {
358 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
359 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
360 }
361
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
363 {
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
365 u32 avail;
366
367 if (tx_q->dirty_tx > tx_q->cur_tx)
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
369 else
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
371
372 return avail;
373 }
374
375 /**
376 * stmmac_rx_dirty - Get RX queue dirty
377 * @priv: driver private structure
378 * @queue: RX queue index
379 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
381 {
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
383 u32 dirty;
384
385 if (rx_q->dirty_rx <= rx_q->cur_rx)
386 dirty = rx_q->cur_rx - rx_q->dirty_rx;
387 else
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
389
390 return dirty;
391 }
392
stmmac_lpi_entry_timer_config(struct stmmac_priv * priv,bool en)393 static void stmmac_lpi_entry_timer_config(struct stmmac_priv *priv, bool en)
394 {
395 int tx_lpi_timer;
396
397 /* Clear/set the SW EEE timer flag based on LPI ET enablement */
398 priv->eee_sw_timer_en = en ? 0 : 1;
399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0;
400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer);
401 }
402
403 /**
404 * stmmac_enable_eee_mode - check and enter in LPI mode
405 * @priv: driver private structure
406 * Description: this function is to verify and enter in LPI mode in case of
407 * EEE.
408 */
stmmac_enable_eee_mode(struct stmmac_priv * priv)409 static int stmmac_enable_eee_mode(struct stmmac_priv *priv)
410 {
411 u32 tx_cnt = priv->plat->tx_queues_to_use;
412 u32 queue;
413
414 /* check if all TX queues have the work finished */
415 for (queue = 0; queue < tx_cnt; queue++) {
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
417
418 if (tx_q->dirty_tx != tx_q->cur_tx)
419 return -EBUSY; /* still unfinished work */
420 }
421
422 /* Check and enter in LPI mode */
423 if (!priv->tx_path_in_lpi_mode)
424 stmmac_set_eee_mode(priv, priv->hw,
425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING);
426 return 0;
427 }
428
429 /**
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
431 * @priv: driver private structure
432 * Description: this function is to exit and disable EEE in case of
433 * LPI state is true. This is called by the xmit.
434 */
stmmac_disable_eee_mode(struct stmmac_priv * priv)435 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
436 {
437 if (!priv->eee_sw_timer_en) {
438 stmmac_lpi_entry_timer_config(priv, 0);
439 return;
440 }
441
442 stmmac_reset_eee_mode(priv, priv->hw);
443 del_timer_sync(&priv->eee_ctrl_timer);
444 priv->tx_path_in_lpi_mode = false;
445 }
446
447 /**
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
449 * @t: timer_list struct containing private info
450 * Description:
451 * if there is no data transfer and if we are not in LPI state,
452 * then MAC Transmitter can be moved to LPI state.
453 */
stmmac_eee_ctrl_timer(struct timer_list * t)454 static void stmmac_eee_ctrl_timer(struct timer_list *t)
455 {
456 struct stmmac_priv *priv = from_timer(priv, t, eee_ctrl_timer);
457
458 if (stmmac_enable_eee_mode(priv))
459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
460 }
461
462 /**
463 * stmmac_eee_init - init EEE
464 * @priv: driver private structure
465 * Description:
466 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
467 * can also manage EEE, this function enable the LPI state and start related
468 * timer.
469 */
stmmac_eee_init(struct stmmac_priv * priv)470 bool stmmac_eee_init(struct stmmac_priv *priv)
471 {
472 int eee_tw_timer = priv->eee_tw_timer;
473
474 /* Using PCS we cannot dial with the phy registers at this stage
475 * so we do not support extra feature like EEE.
476 */
477 if (priv->hw->pcs == STMMAC_PCS_TBI ||
478 priv->hw->pcs == STMMAC_PCS_RTBI)
479 return false;
480
481 /* Check if MAC core supports the EEE feature. */
482 if (!priv->dma_cap.eee)
483 return false;
484
485 mutex_lock(&priv->lock);
486
487 /* Check if it needs to be deactivated */
488 if (!priv->eee_active) {
489 if (priv->eee_enabled) {
490 netdev_dbg(priv->dev, "disable EEE\n");
491 stmmac_lpi_entry_timer_config(priv, 0);
492 del_timer_sync(&priv->eee_ctrl_timer);
493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer);
494 if (priv->hw->xpcs)
495 xpcs_config_eee(priv->hw->xpcs,
496 priv->plat->mult_fact_100ns,
497 false);
498 }
499 mutex_unlock(&priv->lock);
500 return false;
501 }
502
503 if (priv->eee_active && !priv->eee_enabled) {
504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
506 eee_tw_timer);
507 if (priv->hw->xpcs)
508 xpcs_config_eee(priv->hw->xpcs,
509 priv->plat->mult_fact_100ns,
510 true);
511 }
512
513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) {
514 del_timer_sync(&priv->eee_ctrl_timer);
515 priv->tx_path_in_lpi_mode = false;
516 stmmac_lpi_entry_timer_config(priv, 1);
517 } else {
518 stmmac_lpi_entry_timer_config(priv, 0);
519 mod_timer(&priv->eee_ctrl_timer,
520 STMMAC_LPI_T(priv->tx_lpi_timer));
521 }
522
523 mutex_unlock(&priv->lock);
524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
525 return true;
526 }
527
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
529 * @priv: driver private structure
530 * @p : descriptor pointer
531 * @skb : the socket buffer
532 * Description :
533 * This function will read timestamp from the descriptor & pass it to stack.
534 * and also perform some sanity checks.
535 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)536 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
537 struct dma_desc *p, struct sk_buff *skb)
538 {
539 struct skb_shared_hwtstamps shhwtstamp;
540 bool found = false;
541 u64 ns = 0;
542
543 if (!priv->hwts_tx_en)
544 return;
545
546 /* exit if skb doesn't support hw tstamp */
547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
548 return;
549
550 /* check tx tstamp status */
551 if (stmmac_get_tx_timestamp_status(priv, p)) {
552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
553 found = true;
554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
555 found = true;
556 }
557
558 if (found) {
559 ns -= priv->plat->cdc_error_adj;
560
561 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
562 shhwtstamp.hwtstamp = ns_to_ktime(ns);
563
564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
565 /* pass tstamp to stack */
566 skb_tstamp_tx(skb, &shhwtstamp);
567 }
568 }
569
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
571 * @priv: driver private structure
572 * @p : descriptor pointer
573 * @np : next descriptor pointer
574 * @skb : the socket buffer
575 * Description :
576 * This function will read received packet's timestamp from the descriptor
577 * and pass it to stack. It also perform some sanity checks.
578 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)579 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
580 struct dma_desc *np, struct sk_buff *skb)
581 {
582 struct skb_shared_hwtstamps *shhwtstamp = NULL;
583 struct dma_desc *desc = p;
584 u64 ns = 0;
585
586 if (!priv->hwts_rx_en)
587 return;
588 /* For GMAC4, the valid timestamp is from CTX next desc. */
589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
590 desc = np;
591
592 /* Check if timestamp is available */
593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
595
596 ns -= priv->plat->cdc_error_adj;
597
598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
599 shhwtstamp = skb_hwtstamps(skb);
600 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
601 shhwtstamp->hwtstamp = ns_to_ktime(ns);
602 } else {
603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
604 }
605 }
606
607 /**
608 * stmmac_hwtstamp_set - control hardware timestamping.
609 * @dev: device pointer.
610 * @ifr: An IOCTL specific structure, that can contain a pointer to
611 * a proprietary structure used to pass information to the driver.
612 * Description:
613 * This function configures the MAC to enable/disable both outgoing(TX)
614 * and incoming(RX) packets time stamping based on user input.
615 * Return Value:
616 * 0 on success and an appropriate -ve integer on failure.
617 */
stmmac_hwtstamp_set(struct net_device * dev,struct ifreq * ifr)618 static int stmmac_hwtstamp_set(struct net_device *dev, struct ifreq *ifr)
619 {
620 struct stmmac_priv *priv = netdev_priv(dev);
621 struct hwtstamp_config config;
622 u32 ptp_v2 = 0;
623 u32 tstamp_all = 0;
624 u32 ptp_over_ipv4_udp = 0;
625 u32 ptp_over_ipv6_udp = 0;
626 u32 ptp_over_ethernet = 0;
627 u32 snap_type_sel = 0;
628 u32 ts_master_en = 0;
629 u32 ts_event_en = 0;
630
631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
632 netdev_alert(priv->dev, "No support for HW time stamping\n");
633 priv->hwts_tx_en = 0;
634 priv->hwts_rx_en = 0;
635
636 return -EOPNOTSUPP;
637 }
638
639 if (copy_from_user(&config, ifr->ifr_data,
640 sizeof(config)))
641 return -EFAULT;
642
643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
644 __func__, config.flags, config.tx_type, config.rx_filter);
645
646 if (config.tx_type != HWTSTAMP_TX_OFF &&
647 config.tx_type != HWTSTAMP_TX_ON)
648 return -ERANGE;
649
650 if (priv->adv_ts) {
651 switch (config.rx_filter) {
652 case HWTSTAMP_FILTER_NONE:
653 /* time stamp no incoming packet at all */
654 config.rx_filter = HWTSTAMP_FILTER_NONE;
655 break;
656
657 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
658 /* PTP v1, UDP, any kind of event packet */
659 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
660 /* 'xmac' hardware can support Sync, Pdelay_Req and
661 * Pdelay_resp by setting bit14 and bits17/16 to 01
662 * This leaves Delay_Req timestamps out.
663 * Enable all events *and* general purpose message
664 * timestamping
665 */
666 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
667 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
668 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
669 break;
670
671 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
672 /* PTP v1, UDP, Sync packet */
673 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
674 /* take time stamp for SYNC messages only */
675 ts_event_en = PTP_TCR_TSEVNTENA;
676
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 break;
680
681 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
682 /* PTP v1, UDP, Delay_req packet */
683 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
684 /* take time stamp for Delay_Req messages only */
685 ts_master_en = PTP_TCR_TSMSTRENA;
686 ts_event_en = PTP_TCR_TSEVNTENA;
687
688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 break;
691
692 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
693 /* PTP v2, UDP, any kind of event packet */
694 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
695 ptp_v2 = PTP_TCR_TSVER2ENA;
696 /* take time stamp for all event messages */
697 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
698
699 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
700 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
701 break;
702
703 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
704 /* PTP v2, UDP, Sync packet */
705 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
706 ptp_v2 = PTP_TCR_TSVER2ENA;
707 /* take time stamp for SYNC messages only */
708 ts_event_en = PTP_TCR_TSEVNTENA;
709
710 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
711 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
712 break;
713
714 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
715 /* PTP v2, UDP, Delay_req packet */
716 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
717 ptp_v2 = PTP_TCR_TSVER2ENA;
718 /* take time stamp for Delay_Req messages only */
719 ts_master_en = PTP_TCR_TSMSTRENA;
720 ts_event_en = PTP_TCR_TSEVNTENA;
721
722 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
723 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
724 break;
725
726 case HWTSTAMP_FILTER_PTP_V2_EVENT:
727 /* PTP v2/802.AS1 any layer, any kind of event packet */
728 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
729 ptp_v2 = PTP_TCR_TSVER2ENA;
730 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
731 if (priv->synopsys_id < DWMAC_CORE_4_10)
732 ts_event_en = PTP_TCR_TSEVNTENA;
733 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
734 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
735 ptp_over_ethernet = PTP_TCR_TSIPENA;
736 break;
737
738 case HWTSTAMP_FILTER_PTP_V2_SYNC:
739 /* PTP v2/802.AS1, any layer, Sync packet */
740 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
741 ptp_v2 = PTP_TCR_TSVER2ENA;
742 /* take time stamp for SYNC messages only */
743 ts_event_en = PTP_TCR_TSEVNTENA;
744
745 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
746 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
747 ptp_over_ethernet = PTP_TCR_TSIPENA;
748 break;
749
750 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
751 /* PTP v2/802.AS1, any layer, Delay_req packet */
752 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
753 ptp_v2 = PTP_TCR_TSVER2ENA;
754 /* take time stamp for Delay_Req messages only */
755 ts_master_en = PTP_TCR_TSMSTRENA;
756 ts_event_en = PTP_TCR_TSEVNTENA;
757
758 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
759 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
760 ptp_over_ethernet = PTP_TCR_TSIPENA;
761 break;
762
763 case HWTSTAMP_FILTER_NTP_ALL:
764 case HWTSTAMP_FILTER_ALL:
765 /* time stamp any incoming packet */
766 config.rx_filter = HWTSTAMP_FILTER_ALL;
767 tstamp_all = PTP_TCR_TSENALL;
768 break;
769
770 default:
771 return -ERANGE;
772 }
773 } else {
774 switch (config.rx_filter) {
775 case HWTSTAMP_FILTER_NONE:
776 config.rx_filter = HWTSTAMP_FILTER_NONE;
777 break;
778 default:
779 /* PTP v1, UDP, any kind of event packet */
780 config.rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
781 break;
782 }
783 }
784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1);
785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON;
786
787 priv->systime_flags = STMMAC_HWTS_ACTIVE;
788
789 if (priv->hwts_tx_en || priv->hwts_rx_en) {
790 priv->systime_flags |= tstamp_all | ptp_v2 |
791 ptp_over_ethernet | ptp_over_ipv6_udp |
792 ptp_over_ipv4_udp | ts_event_en |
793 ts_master_en | snap_type_sel;
794 }
795
796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
797
798 memcpy(&priv->tstamp_config, &config, sizeof(config));
799
800 return copy_to_user(ifr->ifr_data, &config,
801 sizeof(config)) ? -EFAULT : 0;
802 }
803
804 /**
805 * stmmac_hwtstamp_get - read hardware timestamping.
806 * @dev: device pointer.
807 * @ifr: An IOCTL specific structure, that can contain a pointer to
808 * a proprietary structure used to pass information to the driver.
809 * Description:
810 * This function obtain the current hardware timestamping settings
811 * as requested.
812 */
stmmac_hwtstamp_get(struct net_device * dev,struct ifreq * ifr)813 static int stmmac_hwtstamp_get(struct net_device *dev, struct ifreq *ifr)
814 {
815 struct stmmac_priv *priv = netdev_priv(dev);
816 struct hwtstamp_config *config = &priv->tstamp_config;
817
818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
819 return -EOPNOTSUPP;
820
821 return copy_to_user(ifr->ifr_data, config,
822 sizeof(*config)) ? -EFAULT : 0;
823 }
824
825 /**
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
827 * @priv: driver private structure
828 * @systime_flags: timestamping flags
829 * Description:
830 * Initialize hardware counter for packet timestamping.
831 * This is valid as long as the interface is open and not suspended.
832 * Will be rerun after resuming from suspend, case in which the timestamping
833 * flags updated by stmmac_hwtstamp_set() also need to be restored.
834 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)835 int stmmac_init_tstamp_counter(struct stmmac_priv *priv, u32 systime_flags)
836 {
837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
838 struct timespec64 now;
839 u32 sec_inc = 0;
840 u64 temp = 0;
841
842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
843 return -EOPNOTSUPP;
844
845 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
846 priv->systime_flags = systime_flags;
847
848 /* program Sub Second Increment reg */
849 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
850 priv->plat->clk_ptp_rate,
851 xmac, &sec_inc);
852 temp = div_u64(1000000000ULL, sec_inc);
853
854 /* Store sub second increment for later use */
855 priv->sub_second_inc = sec_inc;
856
857 /* calculate default added value:
858 * formula is :
859 * addend = (2^32)/freq_div_ratio;
860 * where, freq_div_ratio = 1e9ns/sec_inc
861 */
862 temp = (u64)(temp << 32);
863 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
864 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
865
866 /* initialize system time */
867 ktime_get_real_ts64(&now);
868
869 /* lower 32 bits of tv_sec are safe until y2106 */
870 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
871
872 return 0;
873 }
874 EXPORT_SYMBOL_GPL(stmmac_init_tstamp_counter);
875
876 /**
877 * stmmac_init_ptp - init PTP
878 * @priv: driver private structure
879 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
880 * This is done by looking at the HW cap. register.
881 * This function also registers the ptp driver.
882 */
stmmac_init_ptp(struct stmmac_priv * priv)883 static int stmmac_init_ptp(struct stmmac_priv *priv)
884 {
885 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
886 int ret;
887
888 if (priv->plat->ptp_clk_freq_config)
889 priv->plat->ptp_clk_freq_config(priv);
890
891 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE);
892 if (ret)
893 return ret;
894
895 priv->adv_ts = 0;
896 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
897 if (xmac && priv->dma_cap.atime_stamp)
898 priv->adv_ts = 1;
899 /* Dwmac 3.x core with extend_desc can support adv_ts */
900 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
901 priv->adv_ts = 1;
902
903 if (priv->dma_cap.time_stamp)
904 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
905
906 if (priv->adv_ts)
907 netdev_info(priv->dev,
908 "IEEE 1588-2008 Advanced Timestamp supported\n");
909
910 priv->hwts_tx_en = 0;
911 priv->hwts_rx_en = 0;
912
913 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
914 stmmac_hwtstamp_correct_latency(priv, priv);
915
916 return 0;
917 }
918
stmmac_release_ptp(struct stmmac_priv * priv)919 static void stmmac_release_ptp(struct stmmac_priv *priv)
920 {
921 clk_disable_unprepare(priv->plat->clk_ptp_ref);
922 stmmac_ptp_unregister(priv);
923 }
924
925 /**
926 * stmmac_mac_flow_ctrl - Configure flow control in all queues
927 * @priv: driver private structure
928 * @duplex: duplex passed to the next function
929 * Description: It is used for configuring the flow control in all queues
930 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex)931 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex)
932 {
933 u32 tx_cnt = priv->plat->tx_queues_to_use;
934
935 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl,
936 priv->pause, tx_cnt);
937 }
938
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)939 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
940 phy_interface_t interface)
941 {
942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
943
944 if (priv->hw->xpcs)
945 return &priv->hw->xpcs->pcs;
946
947 if (priv->hw->lynx_pcs)
948 return priv->hw->lynx_pcs;
949
950 return NULL;
951 }
952
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)953 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
954 const struct phylink_link_state *state)
955 {
956 /* Nothing to do, xpcs_config() handles everything */
957 }
958
stmmac_fpe_link_state_handle(struct stmmac_priv * priv,bool is_up)959 static void stmmac_fpe_link_state_handle(struct stmmac_priv *priv, bool is_up)
960 {
961 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
962 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
963 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
964 bool *hs_enable = &fpe_cfg->hs_enable;
965
966 if (is_up && *hs_enable) {
967 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg,
968 MPACKET_VERIFY);
969 } else {
970 *lo_state = FPE_STATE_OFF;
971 *lp_state = FPE_STATE_OFF;
972 }
973 }
974
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)975 static void stmmac_mac_link_down(struct phylink_config *config,
976 unsigned int mode, phy_interface_t interface)
977 {
978 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
979
980 stmmac_mac_set(priv, priv->ioaddr, false);
981 priv->eee_active = false;
982 priv->tx_lpi_enabled = false;
983 priv->eee_enabled = stmmac_eee_init(priv);
984 stmmac_set_eee_pls(priv, priv->hw, false);
985
986 if (priv->dma_cap.fpesel)
987 stmmac_fpe_link_state_handle(priv, false);
988 }
989
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)990 static void stmmac_mac_link_up(struct phylink_config *config,
991 struct phy_device *phy,
992 unsigned int mode, phy_interface_t interface,
993 int speed, int duplex,
994 bool tx_pause, bool rx_pause)
995 {
996 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
997 u32 old_ctrl, ctrl;
998
999 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
1000 priv->plat->serdes_powerup)
1001 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
1002
1003 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1004 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1005
1006 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1007 switch (speed) {
1008 case SPEED_10000:
1009 ctrl |= priv->hw->link.xgmii.speed10000;
1010 break;
1011 case SPEED_5000:
1012 ctrl |= priv->hw->link.xgmii.speed5000;
1013 break;
1014 case SPEED_2500:
1015 ctrl |= priv->hw->link.xgmii.speed2500;
1016 break;
1017 default:
1018 return;
1019 }
1020 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1021 switch (speed) {
1022 case SPEED_100000:
1023 ctrl |= priv->hw->link.xlgmii.speed100000;
1024 break;
1025 case SPEED_50000:
1026 ctrl |= priv->hw->link.xlgmii.speed50000;
1027 break;
1028 case SPEED_40000:
1029 ctrl |= priv->hw->link.xlgmii.speed40000;
1030 break;
1031 case SPEED_25000:
1032 ctrl |= priv->hw->link.xlgmii.speed25000;
1033 break;
1034 case SPEED_10000:
1035 ctrl |= priv->hw->link.xgmii.speed10000;
1036 break;
1037 case SPEED_2500:
1038 ctrl |= priv->hw->link.speed2500;
1039 break;
1040 case SPEED_1000:
1041 ctrl |= priv->hw->link.speed1000;
1042 break;
1043 default:
1044 return;
1045 }
1046 } else {
1047 switch (speed) {
1048 case SPEED_2500:
1049 ctrl |= priv->hw->link.speed2500;
1050 break;
1051 case SPEED_1000:
1052 ctrl |= priv->hw->link.speed1000;
1053 break;
1054 case SPEED_100:
1055 ctrl |= priv->hw->link.speed100;
1056 break;
1057 case SPEED_10:
1058 ctrl |= priv->hw->link.speed10;
1059 break;
1060 default:
1061 return;
1062 }
1063 }
1064
1065 priv->speed = speed;
1066
1067 if (priv->plat->fix_mac_speed)
1068 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1069
1070 if (!duplex)
1071 ctrl &= ~priv->hw->link.duplex;
1072 else
1073 ctrl |= priv->hw->link.duplex;
1074
1075 /* Flow Control operation */
1076 if (rx_pause && tx_pause)
1077 priv->flow_ctrl = FLOW_AUTO;
1078 else if (rx_pause && !tx_pause)
1079 priv->flow_ctrl = FLOW_RX;
1080 else if (!rx_pause && tx_pause)
1081 priv->flow_ctrl = FLOW_TX;
1082 else
1083 priv->flow_ctrl = FLOW_OFF;
1084
1085 stmmac_mac_flow_ctrl(priv, duplex);
1086
1087 if (ctrl != old_ctrl)
1088 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1089
1090 stmmac_mac_set(priv, priv->ioaddr, true);
1091 if (phy && priv->dma_cap.eee) {
1092 priv->eee_active =
1093 phy_init_eee(phy, !(priv->plat->flags &
1094 STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) >= 0;
1095 priv->eee_enabled = stmmac_eee_init(priv);
1096 priv->tx_lpi_enabled = priv->eee_enabled;
1097 stmmac_set_eee_pls(priv, priv->hw, true);
1098 }
1099
1100 if (priv->dma_cap.fpesel)
1101 stmmac_fpe_link_state_handle(priv, true);
1102
1103 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1104 stmmac_hwtstamp_correct_latency(priv, priv);
1105 }
1106
1107 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1108 .mac_select_pcs = stmmac_mac_select_pcs,
1109 .mac_config = stmmac_mac_config,
1110 .mac_link_down = stmmac_mac_link_down,
1111 .mac_link_up = stmmac_mac_link_up,
1112 };
1113
1114 /**
1115 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1116 * @priv: driver private structure
1117 * Description: this is to verify if the HW supports the PCS.
1118 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1119 * configured for the TBI, RTBI, or SGMII PHY interface.
1120 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1121 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1122 {
1123 int interface = priv->plat->mac_interface;
1124
1125 if (priv->dma_cap.pcs) {
1126 if ((interface == PHY_INTERFACE_MODE_RGMII) ||
1127 (interface == PHY_INTERFACE_MODE_RGMII_ID) ||
1128 (interface == PHY_INTERFACE_MODE_RGMII_RXID) ||
1129 (interface == PHY_INTERFACE_MODE_RGMII_TXID)) {
1130 netdev_dbg(priv->dev, "PCS RGMII support enabled\n");
1131 priv->hw->pcs = STMMAC_PCS_RGMII;
1132 } else if (interface == PHY_INTERFACE_MODE_SGMII) {
1133 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1134 priv->hw->pcs = STMMAC_PCS_SGMII;
1135 }
1136 }
1137 }
1138
1139 /**
1140 * stmmac_init_phy - PHY initialization
1141 * @dev: net device structure
1142 * Description: it initializes the driver's PHY state, and attaches the PHY
1143 * to the mac driver.
1144 * Return value:
1145 * 0 on success
1146 */
stmmac_init_phy(struct net_device * dev)1147 static int stmmac_init_phy(struct net_device *dev)
1148 {
1149 struct stmmac_priv *priv = netdev_priv(dev);
1150 struct fwnode_handle *phy_fwnode;
1151 struct fwnode_handle *fwnode;
1152 int ret;
1153
1154 if (!phylink_expects_phy(priv->phylink))
1155 return 0;
1156
1157 fwnode = priv->plat->port_node;
1158 if (!fwnode)
1159 fwnode = dev_fwnode(priv->device);
1160
1161 if (fwnode)
1162 phy_fwnode = fwnode_get_phy_node(fwnode);
1163 else
1164 phy_fwnode = NULL;
1165
1166 /* Some DT bindings do not set-up the PHY handle. Let's try to
1167 * manually parse it
1168 */
1169 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1170 int addr = priv->plat->phy_addr;
1171 struct phy_device *phydev;
1172
1173 if (addr < 0) {
1174 netdev_err(priv->dev, "no phy found\n");
1175 return -ENODEV;
1176 }
1177
1178 phydev = mdiobus_get_phy(priv->mii, addr);
1179 if (!phydev) {
1180 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1181 return -ENODEV;
1182 }
1183
1184 ret = phylink_connect_phy(priv->phylink, phydev);
1185 } else {
1186 fwnode_handle_put(phy_fwnode);
1187 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0);
1188 }
1189
1190 if (!priv->plat->pmt) {
1191 struct ethtool_wolinfo wol = { .cmd = ETHTOOL_GWOL };
1192
1193 phylink_ethtool_get_wol(priv->phylink, &wol);
1194 device_set_wakeup_capable(priv->device, !!wol.supported);
1195 device_set_wakeup_enable(priv->device, !!wol.wolopts);
1196 }
1197
1198 return ret;
1199 }
1200
stmmac_set_half_duplex(struct stmmac_priv * priv)1201 static void stmmac_set_half_duplex(struct stmmac_priv *priv)
1202 {
1203 /* Half-Duplex can only work with single tx queue */
1204 if (priv->plat->tx_queues_to_use > 1)
1205 priv->phylink_config.mac_capabilities &=
1206 ~(MAC_10HD | MAC_100HD | MAC_1000HD);
1207 else
1208 priv->phylink_config.mac_capabilities |=
1209 (MAC_10HD | MAC_100HD | MAC_1000HD);
1210 }
1211
stmmac_phy_setup(struct stmmac_priv * priv)1212 static int stmmac_phy_setup(struct stmmac_priv *priv)
1213 {
1214 struct stmmac_mdio_bus_data *mdio_bus_data;
1215 int mode = priv->plat->phy_interface;
1216 struct fwnode_handle *fwnode;
1217 struct phylink *phylink;
1218 int max_speed;
1219
1220 priv->phylink_config.dev = &priv->dev->dev;
1221 priv->phylink_config.type = PHYLINK_NETDEV;
1222 priv->phylink_config.mac_managed_pm = true;
1223
1224 mdio_bus_data = priv->plat->mdio_bus_data;
1225 if (mdio_bus_data)
1226 priv->phylink_config.ovr_an_inband =
1227 mdio_bus_data->xpcs_an_inband;
1228
1229 /* Set the platform/firmware specified interface mode. Note, phylink
1230 * deals with the PHY interface mode, not the MAC interface mode.
1231 */
1232 __set_bit(mode, priv->phylink_config.supported_interfaces);
1233
1234 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1235 if (priv->hw->xpcs)
1236 xpcs_get_interfaces(priv->hw->xpcs,
1237 priv->phylink_config.supported_interfaces);
1238
1239 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE |
1240 MAC_10FD | MAC_100FD |
1241 MAC_1000FD;
1242
1243 stmmac_set_half_duplex(priv);
1244
1245 /* Get the MAC specific capabilities */
1246 stmmac_mac_phylink_get_caps(priv);
1247
1248 max_speed = priv->plat->max_speed;
1249 if (max_speed)
1250 phylink_limit_mac_speed(&priv->phylink_config, max_speed);
1251
1252 fwnode = priv->plat->port_node;
1253 if (!fwnode)
1254 fwnode = dev_fwnode(priv->device);
1255
1256 phylink = phylink_create(&priv->phylink_config, fwnode,
1257 mode, &stmmac_phylink_mac_ops);
1258 if (IS_ERR(phylink))
1259 return PTR_ERR(phylink);
1260
1261 priv->phylink = phylink;
1262 return 0;
1263 }
1264
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1265 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1266 struct stmmac_dma_conf *dma_conf)
1267 {
1268 u32 rx_cnt = priv->plat->rx_queues_to_use;
1269 unsigned int desc_size;
1270 void *head_rx;
1271 u32 queue;
1272
1273 /* Display RX rings */
1274 for (queue = 0; queue < rx_cnt; queue++) {
1275 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1276
1277 pr_info("\tRX Queue %u rings\n", queue);
1278
1279 if (priv->extend_desc) {
1280 head_rx = (void *)rx_q->dma_erx;
1281 desc_size = sizeof(struct dma_extended_desc);
1282 } else {
1283 head_rx = (void *)rx_q->dma_rx;
1284 desc_size = sizeof(struct dma_desc);
1285 }
1286
1287 /* Display RX ring */
1288 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1289 rx_q->dma_rx_phy, desc_size);
1290 }
1291 }
1292
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1293 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1294 struct stmmac_dma_conf *dma_conf)
1295 {
1296 u32 tx_cnt = priv->plat->tx_queues_to_use;
1297 unsigned int desc_size;
1298 void *head_tx;
1299 u32 queue;
1300
1301 /* Display TX rings */
1302 for (queue = 0; queue < tx_cnt; queue++) {
1303 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1304
1305 pr_info("\tTX Queue %d rings\n", queue);
1306
1307 if (priv->extend_desc) {
1308 head_tx = (void *)tx_q->dma_etx;
1309 desc_size = sizeof(struct dma_extended_desc);
1310 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1311 head_tx = (void *)tx_q->dma_entx;
1312 desc_size = sizeof(struct dma_edesc);
1313 } else {
1314 head_tx = (void *)tx_q->dma_tx;
1315 desc_size = sizeof(struct dma_desc);
1316 }
1317
1318 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1319 tx_q->dma_tx_phy, desc_size);
1320 }
1321 }
1322
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1323 static void stmmac_display_rings(struct stmmac_priv *priv,
1324 struct stmmac_dma_conf *dma_conf)
1325 {
1326 /* Display RX ring */
1327 stmmac_display_rx_rings(priv, dma_conf);
1328
1329 /* Display TX ring */
1330 stmmac_display_tx_rings(priv, dma_conf);
1331 }
1332
stmmac_set_bfsize(int mtu,int bufsize)1333 static int stmmac_set_bfsize(int mtu, int bufsize)
1334 {
1335 int ret = bufsize;
1336
1337 if (mtu >= BUF_SIZE_8KiB)
1338 ret = BUF_SIZE_16KiB;
1339 else if (mtu >= BUF_SIZE_4KiB)
1340 ret = BUF_SIZE_8KiB;
1341 else if (mtu >= BUF_SIZE_2KiB)
1342 ret = BUF_SIZE_4KiB;
1343 else if (mtu > DEFAULT_BUFSIZE)
1344 ret = BUF_SIZE_2KiB;
1345 else
1346 ret = DEFAULT_BUFSIZE;
1347
1348 return ret;
1349 }
1350
1351 /**
1352 * stmmac_clear_rx_descriptors - clear RX descriptors
1353 * @priv: driver private structure
1354 * @dma_conf: structure to take the dma data
1355 * @queue: RX queue index
1356 * Description: this function is called to clear the RX descriptors
1357 * in case of both basic and extended descriptors are used.
1358 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1359 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1360 struct stmmac_dma_conf *dma_conf,
1361 u32 queue)
1362 {
1363 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1364 int i;
1365
1366 /* Clear the RX descriptors */
1367 for (i = 0; i < dma_conf->dma_rx_size; i++)
1368 if (priv->extend_desc)
1369 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1370 priv->use_riwt, priv->mode,
1371 (i == dma_conf->dma_rx_size - 1),
1372 dma_conf->dma_buf_sz);
1373 else
1374 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1375 priv->use_riwt, priv->mode,
1376 (i == dma_conf->dma_rx_size - 1),
1377 dma_conf->dma_buf_sz);
1378 }
1379
1380 /**
1381 * stmmac_clear_tx_descriptors - clear tx descriptors
1382 * @priv: driver private structure
1383 * @dma_conf: structure to take the dma data
1384 * @queue: TX queue index.
1385 * Description: this function is called to clear the TX descriptors
1386 * in case of both basic and extended descriptors are used.
1387 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1388 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1389 struct stmmac_dma_conf *dma_conf,
1390 u32 queue)
1391 {
1392 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1393 int i;
1394
1395 /* Clear the TX descriptors */
1396 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1397 int last = (i == (dma_conf->dma_tx_size - 1));
1398 struct dma_desc *p;
1399
1400 if (priv->extend_desc)
1401 p = &tx_q->dma_etx[i].basic;
1402 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1403 p = &tx_q->dma_entx[i].basic;
1404 else
1405 p = &tx_q->dma_tx[i];
1406
1407 stmmac_init_tx_desc(priv, p, priv->mode, last);
1408 }
1409 }
1410
1411 /**
1412 * stmmac_clear_descriptors - clear descriptors
1413 * @priv: driver private structure
1414 * @dma_conf: structure to take the dma data
1415 * Description: this function is called to clear the TX and RX descriptors
1416 * in case of both basic and extended descriptors are used.
1417 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1418 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1419 struct stmmac_dma_conf *dma_conf)
1420 {
1421 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1422 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1423 u32 queue;
1424
1425 /* Clear the RX descriptors */
1426 for (queue = 0; queue < rx_queue_cnt; queue++)
1427 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1428
1429 /* Clear the TX descriptors */
1430 for (queue = 0; queue < tx_queue_cnt; queue++)
1431 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1432 }
1433
1434 /**
1435 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1436 * @priv: driver private structure
1437 * @dma_conf: structure to take the dma data
1438 * @p: descriptor pointer
1439 * @i: descriptor index
1440 * @flags: gfp flag
1441 * @queue: RX queue index
1442 * Description: this function is called to allocate a receive buffer, perform
1443 * the DMA mapping and init the descriptor.
1444 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1445 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1446 struct stmmac_dma_conf *dma_conf,
1447 struct dma_desc *p,
1448 int i, gfp_t flags, u32 queue)
1449 {
1450 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1451 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1452 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1453
1454 if (priv->dma_cap.host_dma_width <= 32)
1455 gfp |= GFP_DMA32;
1456
1457 if (!buf->page) {
1458 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1459 if (!buf->page)
1460 return -ENOMEM;
1461 buf->page_offset = stmmac_rx_offset(priv);
1462 }
1463
1464 if (priv->sph && !buf->sec_page) {
1465 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1466 if (!buf->sec_page)
1467 return -ENOMEM;
1468
1469 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1470 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1471 } else {
1472 buf->sec_page = NULL;
1473 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1474 }
1475
1476 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1477
1478 stmmac_set_desc_addr(priv, p, buf->addr);
1479 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1480 stmmac_init_desc3(priv, p);
1481
1482 return 0;
1483 }
1484
1485 /**
1486 * stmmac_free_rx_buffer - free RX dma buffers
1487 * @priv: private structure
1488 * @rx_q: RX queue
1489 * @i: buffer index.
1490 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1491 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1492 struct stmmac_rx_queue *rx_q,
1493 int i)
1494 {
1495 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1496
1497 if (buf->page)
1498 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1499 buf->page = NULL;
1500
1501 if (buf->sec_page)
1502 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1503 buf->sec_page = NULL;
1504 }
1505
1506 /**
1507 * stmmac_free_tx_buffer - free RX dma buffers
1508 * @priv: private structure
1509 * @dma_conf: structure to take the dma data
1510 * @queue: RX queue index
1511 * @i: buffer index.
1512 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1513 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1514 struct stmmac_dma_conf *dma_conf,
1515 u32 queue, int i)
1516 {
1517 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1518
1519 if (tx_q->tx_skbuff_dma[i].buf &&
1520 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1521 if (tx_q->tx_skbuff_dma[i].map_as_page)
1522 dma_unmap_page(priv->device,
1523 tx_q->tx_skbuff_dma[i].buf,
1524 tx_q->tx_skbuff_dma[i].len,
1525 DMA_TO_DEVICE);
1526 else
1527 dma_unmap_single(priv->device,
1528 tx_q->tx_skbuff_dma[i].buf,
1529 tx_q->tx_skbuff_dma[i].len,
1530 DMA_TO_DEVICE);
1531 }
1532
1533 if (tx_q->xdpf[i] &&
1534 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1535 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1536 xdp_return_frame(tx_q->xdpf[i]);
1537 tx_q->xdpf[i] = NULL;
1538 }
1539
1540 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1541 tx_q->xsk_frames_done++;
1542
1543 if (tx_q->tx_skbuff[i] &&
1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1545 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1546 tx_q->tx_skbuff[i] = NULL;
1547 }
1548
1549 tx_q->tx_skbuff_dma[i].buf = 0;
1550 tx_q->tx_skbuff_dma[i].map_as_page = false;
1551 }
1552
1553 /**
1554 * dma_free_rx_skbufs - free RX dma buffers
1555 * @priv: private structure
1556 * @dma_conf: structure to take the dma data
1557 * @queue: RX queue index
1558 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1559 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1560 struct stmmac_dma_conf *dma_conf,
1561 u32 queue)
1562 {
1563 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1564 int i;
1565
1566 for (i = 0; i < dma_conf->dma_rx_size; i++)
1567 stmmac_free_rx_buffer(priv, rx_q, i);
1568 }
1569
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1570 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1571 struct stmmac_dma_conf *dma_conf,
1572 u32 queue, gfp_t flags)
1573 {
1574 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1575 int i;
1576
1577 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1578 struct dma_desc *p;
1579 int ret;
1580
1581 if (priv->extend_desc)
1582 p = &((rx_q->dma_erx + i)->basic);
1583 else
1584 p = rx_q->dma_rx + i;
1585
1586 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1587 queue);
1588 if (ret)
1589 return ret;
1590
1591 rx_q->buf_alloc_num++;
1592 }
1593
1594 return 0;
1595 }
1596
1597 /**
1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1599 * @priv: private structure
1600 * @dma_conf: structure to take the dma data
1601 * @queue: RX queue index
1602 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1603 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1604 struct stmmac_dma_conf *dma_conf,
1605 u32 queue)
1606 {
1607 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1608 int i;
1609
1610 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1611 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1612
1613 if (!buf->xdp)
1614 continue;
1615
1616 xsk_buff_free(buf->xdp);
1617 buf->xdp = NULL;
1618 }
1619 }
1620
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1621 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1622 struct stmmac_dma_conf *dma_conf,
1623 u32 queue)
1624 {
1625 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1626 int i;
1627
1628 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1629 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1630 * use this macro to make sure no size violations.
1631 */
1632 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1633
1634 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1635 struct stmmac_rx_buffer *buf;
1636 dma_addr_t dma_addr;
1637 struct dma_desc *p;
1638
1639 if (priv->extend_desc)
1640 p = (struct dma_desc *)(rx_q->dma_erx + i);
1641 else
1642 p = rx_q->dma_rx + i;
1643
1644 buf = &rx_q->buf_pool[i];
1645
1646 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1647 if (!buf->xdp)
1648 return -ENOMEM;
1649
1650 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1651 stmmac_set_desc_addr(priv, p, dma_addr);
1652 rx_q->buf_alloc_num++;
1653 }
1654
1655 return 0;
1656 }
1657
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1659 {
1660 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1661 return NULL;
1662
1663 return xsk_get_pool_from_qid(priv->dev, queue);
1664 }
1665
1666 /**
1667 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1668 * @priv: driver private structure
1669 * @dma_conf: structure to take the dma data
1670 * @queue: RX queue index
1671 * @flags: gfp flag.
1672 * Description: this function initializes the DMA RX descriptors
1673 * and allocates the socket buffers. It supports the chained and ring
1674 * modes.
1675 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1676 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1677 struct stmmac_dma_conf *dma_conf,
1678 u32 queue, gfp_t flags)
1679 {
1680 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1681 int ret;
1682
1683 netif_dbg(priv, probe, priv->dev,
1684 "(%s) dma_rx_phy=0x%08x\n", __func__,
1685 (u32)rx_q->dma_rx_phy);
1686
1687 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1688
1689 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1690
1691 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1692
1693 if (rx_q->xsk_pool) {
1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1695 MEM_TYPE_XSK_BUFF_POOL,
1696 NULL));
1697 netdev_info(priv->dev,
1698 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1699 rx_q->queue_index);
1700 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1701 } else {
1702 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1703 MEM_TYPE_PAGE_POOL,
1704 rx_q->page_pool));
1705 netdev_info(priv->dev,
1706 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1707 rx_q->queue_index);
1708 }
1709
1710 if (rx_q->xsk_pool) {
1711 /* RX XDP ZC buffer pool may not be populated, e.g.
1712 * xdpsock TX-only.
1713 */
1714 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1715 } else {
1716 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1717 if (ret < 0)
1718 return -ENOMEM;
1719 }
1720
1721 /* Setup the chained descriptor addresses */
1722 if (priv->mode == STMMAC_CHAIN_MODE) {
1723 if (priv->extend_desc)
1724 stmmac_mode_init(priv, rx_q->dma_erx,
1725 rx_q->dma_rx_phy,
1726 dma_conf->dma_rx_size, 1);
1727 else
1728 stmmac_mode_init(priv, rx_q->dma_rx,
1729 rx_q->dma_rx_phy,
1730 dma_conf->dma_rx_size, 0);
1731 }
1732
1733 return 0;
1734 }
1735
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1736 static int init_dma_rx_desc_rings(struct net_device *dev,
1737 struct stmmac_dma_conf *dma_conf,
1738 gfp_t flags)
1739 {
1740 struct stmmac_priv *priv = netdev_priv(dev);
1741 u32 rx_count = priv->plat->rx_queues_to_use;
1742 int queue;
1743 int ret;
1744
1745 /* RX INITIALIZATION */
1746 netif_dbg(priv, probe, priv->dev,
1747 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1748
1749 for (queue = 0; queue < rx_count; queue++) {
1750 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1751 if (ret)
1752 goto err_init_rx_buffers;
1753 }
1754
1755 return 0;
1756
1757 err_init_rx_buffers:
1758 while (queue >= 0) {
1759 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1760
1761 if (rx_q->xsk_pool)
1762 dma_free_rx_xskbufs(priv, dma_conf, queue);
1763 else
1764 dma_free_rx_skbufs(priv, dma_conf, queue);
1765
1766 rx_q->buf_alloc_num = 0;
1767 rx_q->xsk_pool = NULL;
1768
1769 queue--;
1770 }
1771
1772 return ret;
1773 }
1774
1775 /**
1776 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1777 * @priv: driver private structure
1778 * @dma_conf: structure to take the dma data
1779 * @queue: TX queue index
1780 * Description: this function initializes the DMA TX descriptors
1781 * and allocates the socket buffers. It supports the chained and ring
1782 * modes.
1783 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1784 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1785 struct stmmac_dma_conf *dma_conf,
1786 u32 queue)
1787 {
1788 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1789 int i;
1790
1791 netif_dbg(priv, probe, priv->dev,
1792 "(%s) dma_tx_phy=0x%08x\n", __func__,
1793 (u32)tx_q->dma_tx_phy);
1794
1795 /* Setup the chained descriptor addresses */
1796 if (priv->mode == STMMAC_CHAIN_MODE) {
1797 if (priv->extend_desc)
1798 stmmac_mode_init(priv, tx_q->dma_etx,
1799 tx_q->dma_tx_phy,
1800 dma_conf->dma_tx_size, 1);
1801 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1802 stmmac_mode_init(priv, tx_q->dma_tx,
1803 tx_q->dma_tx_phy,
1804 dma_conf->dma_tx_size, 0);
1805 }
1806
1807 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1808
1809 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1810 struct dma_desc *p;
1811
1812 if (priv->extend_desc)
1813 p = &((tx_q->dma_etx + i)->basic);
1814 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1815 p = &((tx_q->dma_entx + i)->basic);
1816 else
1817 p = tx_q->dma_tx + i;
1818
1819 stmmac_clear_desc(priv, p);
1820
1821 tx_q->tx_skbuff_dma[i].buf = 0;
1822 tx_q->tx_skbuff_dma[i].map_as_page = false;
1823 tx_q->tx_skbuff_dma[i].len = 0;
1824 tx_q->tx_skbuff_dma[i].last_segment = false;
1825 tx_q->tx_skbuff[i] = NULL;
1826 }
1827
1828 return 0;
1829 }
1830
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1831 static int init_dma_tx_desc_rings(struct net_device *dev,
1832 struct stmmac_dma_conf *dma_conf)
1833 {
1834 struct stmmac_priv *priv = netdev_priv(dev);
1835 u32 tx_queue_cnt;
1836 u32 queue;
1837
1838 tx_queue_cnt = priv->plat->tx_queues_to_use;
1839
1840 for (queue = 0; queue < tx_queue_cnt; queue++)
1841 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1842
1843 return 0;
1844 }
1845
1846 /**
1847 * init_dma_desc_rings - init the RX/TX descriptor rings
1848 * @dev: net device structure
1849 * @dma_conf: structure to take the dma data
1850 * @flags: gfp flag.
1851 * Description: this function initializes the DMA RX/TX descriptors
1852 * and allocates the socket buffers. It supports the chained and ring
1853 * modes.
1854 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1855 static int init_dma_desc_rings(struct net_device *dev,
1856 struct stmmac_dma_conf *dma_conf,
1857 gfp_t flags)
1858 {
1859 struct stmmac_priv *priv = netdev_priv(dev);
1860 int ret;
1861
1862 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
1863 if (ret)
1864 return ret;
1865
1866 ret = init_dma_tx_desc_rings(dev, dma_conf);
1867
1868 stmmac_clear_descriptors(priv, dma_conf);
1869
1870 if (netif_msg_hw(priv))
1871 stmmac_display_rings(priv, dma_conf);
1872
1873 return ret;
1874 }
1875
1876 /**
1877 * dma_free_tx_skbufs - free TX dma buffers
1878 * @priv: private structure
1879 * @dma_conf: structure to take the dma data
1880 * @queue: TX queue index
1881 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1882 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
1883 struct stmmac_dma_conf *dma_conf,
1884 u32 queue)
1885 {
1886 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1887 int i;
1888
1889 tx_q->xsk_frames_done = 0;
1890
1891 for (i = 0; i < dma_conf->dma_tx_size; i++)
1892 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
1893
1894 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
1895 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
1896 tx_q->xsk_frames_done = 0;
1897 tx_q->xsk_pool = NULL;
1898 }
1899 }
1900
1901 /**
1902 * stmmac_free_tx_skbufs - free TX skb buffers
1903 * @priv: private structure
1904 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)1905 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
1906 {
1907 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1908 u32 queue;
1909
1910 for (queue = 0; queue < tx_queue_cnt; queue++)
1911 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
1912 }
1913
1914 /**
1915 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1916 * @priv: private structure
1917 * @dma_conf: structure to take the dma data
1918 * @queue: RX queue index
1919 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1920 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
1921 struct stmmac_dma_conf *dma_conf,
1922 u32 queue)
1923 {
1924 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1925
1926 /* Release the DMA RX socket buffers */
1927 if (rx_q->xsk_pool)
1928 dma_free_rx_xskbufs(priv, dma_conf, queue);
1929 else
1930 dma_free_rx_skbufs(priv, dma_conf, queue);
1931
1932 rx_q->buf_alloc_num = 0;
1933 rx_q->xsk_pool = NULL;
1934
1935 /* Free DMA regions of consistent memory previously allocated */
1936 if (!priv->extend_desc)
1937 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1938 sizeof(struct dma_desc),
1939 rx_q->dma_rx, rx_q->dma_rx_phy);
1940 else
1941 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
1942 sizeof(struct dma_extended_desc),
1943 rx_q->dma_erx, rx_q->dma_rx_phy);
1944
1945 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
1946 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
1947
1948 kfree(rx_q->buf_pool);
1949 if (rx_q->page_pool)
1950 page_pool_destroy(rx_q->page_pool);
1951 }
1952
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1953 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
1954 struct stmmac_dma_conf *dma_conf)
1955 {
1956 u32 rx_count = priv->plat->rx_queues_to_use;
1957 u32 queue;
1958
1959 /* Free RX queue resources */
1960 for (queue = 0; queue < rx_count; queue++)
1961 __free_dma_rx_desc_resources(priv, dma_conf, queue);
1962 }
1963
1964 /**
1965 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1966 * @priv: private structure
1967 * @dma_conf: structure to take the dma data
1968 * @queue: TX queue index
1969 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1970 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
1971 struct stmmac_dma_conf *dma_conf,
1972 u32 queue)
1973 {
1974 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1975 size_t size;
1976 void *addr;
1977
1978 /* Release the DMA TX socket buffers */
1979 dma_free_tx_skbufs(priv, dma_conf, queue);
1980
1981 if (priv->extend_desc) {
1982 size = sizeof(struct dma_extended_desc);
1983 addr = tx_q->dma_etx;
1984 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1985 size = sizeof(struct dma_edesc);
1986 addr = tx_q->dma_entx;
1987 } else {
1988 size = sizeof(struct dma_desc);
1989 addr = tx_q->dma_tx;
1990 }
1991
1992 size *= dma_conf->dma_tx_size;
1993
1994 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
1995
1996 kfree(tx_q->tx_skbuff_dma);
1997 kfree(tx_q->tx_skbuff);
1998 }
1999
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2000 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2001 struct stmmac_dma_conf *dma_conf)
2002 {
2003 u32 tx_count = priv->plat->tx_queues_to_use;
2004 u32 queue;
2005
2006 /* Free TX queue resources */
2007 for (queue = 0; queue < tx_count; queue++)
2008 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2009 }
2010
2011 /**
2012 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2013 * @priv: private structure
2014 * @dma_conf: structure to take the dma data
2015 * @queue: RX queue index
2016 * Description: according to which descriptor can be used (extend or basic)
2017 * this function allocates the resources for TX and RX paths. In case of
2018 * reception, for example, it pre-allocated the RX socket buffer in order to
2019 * allow zero-copy mechanism.
2020 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2021 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2022 struct stmmac_dma_conf *dma_conf,
2023 u32 queue)
2024 {
2025 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2026 struct stmmac_channel *ch = &priv->channel[queue];
2027 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2028 struct page_pool_params pp_params = { 0 };
2029 unsigned int num_pages;
2030 unsigned int napi_id;
2031 int ret;
2032
2033 rx_q->queue_index = queue;
2034 rx_q->priv_data = priv;
2035
2036 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2037 pp_params.pool_size = dma_conf->dma_rx_size;
2038 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE);
2039 pp_params.order = ilog2(num_pages);
2040 pp_params.nid = dev_to_node(priv->device);
2041 pp_params.dev = priv->device;
2042 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2043 pp_params.offset = stmmac_rx_offset(priv);
2044 pp_params.max_len = STMMAC_MAX_RX_BUF_SIZE(num_pages);
2045
2046 rx_q->page_pool = page_pool_create(&pp_params);
2047 if (IS_ERR(rx_q->page_pool)) {
2048 ret = PTR_ERR(rx_q->page_pool);
2049 rx_q->page_pool = NULL;
2050 return ret;
2051 }
2052
2053 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size,
2054 sizeof(*rx_q->buf_pool),
2055 GFP_KERNEL);
2056 if (!rx_q->buf_pool)
2057 return -ENOMEM;
2058
2059 if (priv->extend_desc) {
2060 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2061 dma_conf->dma_rx_size *
2062 sizeof(struct dma_extended_desc),
2063 &rx_q->dma_rx_phy,
2064 GFP_KERNEL);
2065 if (!rx_q->dma_erx)
2066 return -ENOMEM;
2067
2068 } else {
2069 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2070 dma_conf->dma_rx_size *
2071 sizeof(struct dma_desc),
2072 &rx_q->dma_rx_phy,
2073 GFP_KERNEL);
2074 if (!rx_q->dma_rx)
2075 return -ENOMEM;
2076 }
2077
2078 if (stmmac_xdp_is_enabled(priv) &&
2079 test_bit(queue, priv->af_xdp_zc_qps))
2080 napi_id = ch->rxtx_napi.napi_id;
2081 else
2082 napi_id = ch->rx_napi.napi_id;
2083
2084 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2085 rx_q->queue_index,
2086 napi_id);
2087 if (ret) {
2088 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2089 return -EINVAL;
2090 }
2091
2092 return 0;
2093 }
2094
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2095 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2096 struct stmmac_dma_conf *dma_conf)
2097 {
2098 u32 rx_count = priv->plat->rx_queues_to_use;
2099 u32 queue;
2100 int ret;
2101
2102 /* RX queues buffers and DMA */
2103 for (queue = 0; queue < rx_count; queue++) {
2104 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2105 if (ret)
2106 goto err_dma;
2107 }
2108
2109 return 0;
2110
2111 err_dma:
2112 free_dma_rx_desc_resources(priv, dma_conf);
2113
2114 return ret;
2115 }
2116
2117 /**
2118 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2119 * @priv: private structure
2120 * @dma_conf: structure to take the dma data
2121 * @queue: TX queue index
2122 * Description: according to which descriptor can be used (extend or basic)
2123 * this function allocates the resources for TX and RX paths. In case of
2124 * reception, for example, it pre-allocated the RX socket buffer in order to
2125 * allow zero-copy mechanism.
2126 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2127 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2128 struct stmmac_dma_conf *dma_conf,
2129 u32 queue)
2130 {
2131 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2132 size_t size;
2133 void *addr;
2134
2135 tx_q->queue_index = queue;
2136 tx_q->priv_data = priv;
2137
2138 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size,
2139 sizeof(*tx_q->tx_skbuff_dma),
2140 GFP_KERNEL);
2141 if (!tx_q->tx_skbuff_dma)
2142 return -ENOMEM;
2143
2144 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size,
2145 sizeof(struct sk_buff *),
2146 GFP_KERNEL);
2147 if (!tx_q->tx_skbuff)
2148 return -ENOMEM;
2149
2150 if (priv->extend_desc)
2151 size = sizeof(struct dma_extended_desc);
2152 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2153 size = sizeof(struct dma_edesc);
2154 else
2155 size = sizeof(struct dma_desc);
2156
2157 size *= dma_conf->dma_tx_size;
2158
2159 addr = dma_alloc_coherent(priv->device, size,
2160 &tx_q->dma_tx_phy, GFP_KERNEL);
2161 if (!addr)
2162 return -ENOMEM;
2163
2164 if (priv->extend_desc)
2165 tx_q->dma_etx = addr;
2166 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2167 tx_q->dma_entx = addr;
2168 else
2169 tx_q->dma_tx = addr;
2170
2171 return 0;
2172 }
2173
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2174 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2175 struct stmmac_dma_conf *dma_conf)
2176 {
2177 u32 tx_count = priv->plat->tx_queues_to_use;
2178 u32 queue;
2179 int ret;
2180
2181 /* TX queues buffers and DMA */
2182 for (queue = 0; queue < tx_count; queue++) {
2183 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2184 if (ret)
2185 goto err_dma;
2186 }
2187
2188 return 0;
2189
2190 err_dma:
2191 free_dma_tx_desc_resources(priv, dma_conf);
2192 return ret;
2193 }
2194
2195 /**
2196 * alloc_dma_desc_resources - alloc TX/RX resources.
2197 * @priv: private structure
2198 * @dma_conf: structure to take the dma data
2199 * Description: according to which descriptor can be used (extend or basic)
2200 * this function allocates the resources for TX and RX paths. In case of
2201 * reception, for example, it pre-allocated the RX socket buffer in order to
2202 * allow zero-copy mechanism.
2203 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2204 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2205 struct stmmac_dma_conf *dma_conf)
2206 {
2207 /* RX Allocation */
2208 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2209
2210 if (ret)
2211 return ret;
2212
2213 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2214
2215 return ret;
2216 }
2217
2218 /**
2219 * free_dma_desc_resources - free dma desc resources
2220 * @priv: private structure
2221 * @dma_conf: structure to take the dma data
2222 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2223 static void free_dma_desc_resources(struct stmmac_priv *priv,
2224 struct stmmac_dma_conf *dma_conf)
2225 {
2226 /* Release the DMA TX socket buffers */
2227 free_dma_tx_desc_resources(priv, dma_conf);
2228
2229 /* Release the DMA RX socket buffers later
2230 * to ensure all pending XDP_TX buffers are returned.
2231 */
2232 free_dma_rx_desc_resources(priv, dma_conf);
2233 }
2234
2235 /**
2236 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2237 * @priv: driver private structure
2238 * Description: It is used for enabling the rx queues in the MAC
2239 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2240 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2241 {
2242 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2243 int queue;
2244 u8 mode;
2245
2246 for (queue = 0; queue < rx_queues_count; queue++) {
2247 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2248 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2249 }
2250 }
2251
2252 /**
2253 * stmmac_start_rx_dma - start RX DMA channel
2254 * @priv: driver private structure
2255 * @chan: RX channel index
2256 * Description:
2257 * This starts a RX DMA channel
2258 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2259 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2260 {
2261 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2262 stmmac_start_rx(priv, priv->ioaddr, chan);
2263 }
2264
2265 /**
2266 * stmmac_start_tx_dma - start TX DMA channel
2267 * @priv: driver private structure
2268 * @chan: TX channel index
2269 * Description:
2270 * This starts a TX DMA channel
2271 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2272 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2273 {
2274 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2275 stmmac_start_tx(priv, priv->ioaddr, chan);
2276 }
2277
2278 /**
2279 * stmmac_stop_rx_dma - stop RX DMA channel
2280 * @priv: driver private structure
2281 * @chan: RX channel index
2282 * Description:
2283 * This stops a RX DMA channel
2284 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2285 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2286 {
2287 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2288 stmmac_stop_rx(priv, priv->ioaddr, chan);
2289 }
2290
2291 /**
2292 * stmmac_stop_tx_dma - stop TX DMA channel
2293 * @priv: driver private structure
2294 * @chan: TX channel index
2295 * Description:
2296 * This stops a TX DMA channel
2297 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2298 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2299 {
2300 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2301 stmmac_stop_tx(priv, priv->ioaddr, chan);
2302 }
2303
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2304 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2305 {
2306 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2307 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2308 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2309 u32 chan;
2310
2311 for (chan = 0; chan < dma_csr_ch; chan++) {
2312 struct stmmac_channel *ch = &priv->channel[chan];
2313 unsigned long flags;
2314
2315 spin_lock_irqsave(&ch->lock, flags);
2316 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2317 spin_unlock_irqrestore(&ch->lock, flags);
2318 }
2319 }
2320
2321 /**
2322 * stmmac_start_all_dma - start all RX and TX DMA channels
2323 * @priv: driver private structure
2324 * Description:
2325 * This starts all the RX and TX DMA channels
2326 */
stmmac_start_all_dma(struct stmmac_priv * priv)2327 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2328 {
2329 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2330 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2331 u32 chan = 0;
2332
2333 for (chan = 0; chan < rx_channels_count; chan++)
2334 stmmac_start_rx_dma(priv, chan);
2335
2336 for (chan = 0; chan < tx_channels_count; chan++)
2337 stmmac_start_tx_dma(priv, chan);
2338 }
2339
2340 /**
2341 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2342 * @priv: driver private structure
2343 * Description:
2344 * This stops the RX and TX DMA channels
2345 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2346 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2347 {
2348 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2349 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2350 u32 chan = 0;
2351
2352 for (chan = 0; chan < rx_channels_count; chan++)
2353 stmmac_stop_rx_dma(priv, chan);
2354
2355 for (chan = 0; chan < tx_channels_count; chan++)
2356 stmmac_stop_tx_dma(priv, chan);
2357 }
2358
2359 /**
2360 * stmmac_dma_operation_mode - HW DMA operation mode
2361 * @priv: driver private structure
2362 * Description: it is used for configuring the DMA operation mode register in
2363 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2364 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2365 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2366 {
2367 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2368 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2369 int rxfifosz = priv->plat->rx_fifo_size;
2370 int txfifosz = priv->plat->tx_fifo_size;
2371 u32 txmode = 0;
2372 u32 rxmode = 0;
2373 u32 chan = 0;
2374 u8 qmode = 0;
2375
2376 if (rxfifosz == 0)
2377 rxfifosz = priv->dma_cap.rx_fifo_size;
2378 if (txfifosz == 0)
2379 txfifosz = priv->dma_cap.tx_fifo_size;
2380
2381 /* Adjust for real per queue fifo size */
2382 rxfifosz /= rx_channels_count;
2383 txfifosz /= tx_channels_count;
2384
2385 if (priv->plat->force_thresh_dma_mode) {
2386 txmode = tc;
2387 rxmode = tc;
2388 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2389 /*
2390 * In case of GMAC, SF mode can be enabled
2391 * to perform the TX COE in HW. This depends on:
2392 * 1) TX COE if actually supported
2393 * 2) There is no bugged Jumbo frame support
2394 * that needs to not insert csum in the TDES.
2395 */
2396 txmode = SF_DMA_MODE;
2397 rxmode = SF_DMA_MODE;
2398 priv->xstats.threshold = SF_DMA_MODE;
2399 } else {
2400 txmode = tc;
2401 rxmode = SF_DMA_MODE;
2402 }
2403
2404 /* configure all channels */
2405 for (chan = 0; chan < rx_channels_count; chan++) {
2406 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2407 u32 buf_size;
2408
2409 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2410
2411 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2412 rxfifosz, qmode);
2413
2414 if (rx_q->xsk_pool) {
2415 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2416 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2417 buf_size,
2418 chan);
2419 } else {
2420 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2421 priv->dma_conf.dma_buf_sz,
2422 chan);
2423 }
2424 }
2425
2426 for (chan = 0; chan < tx_channels_count; chan++) {
2427 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2428
2429 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2430 txfifosz, qmode);
2431 }
2432 }
2433
stmmac_xsk_request_timestamp(void * _priv)2434 static void stmmac_xsk_request_timestamp(void *_priv)
2435 {
2436 struct stmmac_metadata_request *meta_req = _priv;
2437
2438 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2439 *meta_req->set_ic = true;
2440 }
2441
stmmac_xsk_fill_timestamp(void * _priv)2442 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2443 {
2444 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2445 struct stmmac_priv *priv = tx_compl->priv;
2446 struct dma_desc *desc = tx_compl->desc;
2447 bool found = false;
2448 u64 ns = 0;
2449
2450 if (!priv->hwts_tx_en)
2451 return 0;
2452
2453 /* check tx tstamp status */
2454 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2455 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2456 found = true;
2457 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2458 found = true;
2459 }
2460
2461 if (found) {
2462 ns -= priv->plat->cdc_error_adj;
2463 return ns_to_ktime(ns);
2464 }
2465
2466 return 0;
2467 }
2468
2469 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2470 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2471 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2472 };
2473
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2475 {
2476 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2477 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2478 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2479 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2480 unsigned int entry = tx_q->cur_tx;
2481 struct dma_desc *tx_desc = NULL;
2482 struct xdp_desc xdp_desc;
2483 bool work_done = true;
2484 u32 tx_set_ic_bit = 0;
2485
2486 /* Avoids TX time-out as we are sharing with slow path */
2487 txq_trans_cond_update(nq);
2488
2489 budget = min(budget, stmmac_tx_avail(priv, queue));
2490
2491 while (budget-- > 0) {
2492 struct stmmac_metadata_request meta_req;
2493 struct xsk_tx_metadata *meta = NULL;
2494 dma_addr_t dma_addr;
2495 bool set_ic;
2496
2497 /* We are sharing with slow path and stop XSK TX desc submission when
2498 * available TX ring is less than threshold.
2499 */
2500 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2501 !netif_carrier_ok(priv->dev)) {
2502 work_done = false;
2503 break;
2504 }
2505
2506 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2507 break;
2508
2509 if (likely(priv->extend_desc))
2510 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2511 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2512 tx_desc = &tx_q->dma_entx[entry].basic;
2513 else
2514 tx_desc = tx_q->dma_tx + entry;
2515
2516 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2517 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2518 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2519
2520 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2521
2522 /* To return XDP buffer to XSK pool, we simple call
2523 * xsk_tx_completed(), so we don't need to fill up
2524 * 'buf' and 'xdpf'.
2525 */
2526 tx_q->tx_skbuff_dma[entry].buf = 0;
2527 tx_q->xdpf[entry] = NULL;
2528
2529 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2530 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2531 tx_q->tx_skbuff_dma[entry].last_segment = true;
2532 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2533
2534 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2535
2536 tx_q->tx_count_frames++;
2537
2538 if (!priv->tx_coal_frames[queue])
2539 set_ic = false;
2540 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2541 set_ic = true;
2542 else
2543 set_ic = false;
2544
2545 meta_req.priv = priv;
2546 meta_req.tx_desc = tx_desc;
2547 meta_req.set_ic = &set_ic;
2548 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2549 &meta_req);
2550 if (set_ic) {
2551 tx_q->tx_count_frames = 0;
2552 stmmac_set_tx_ic(priv, tx_desc);
2553 tx_set_ic_bit++;
2554 }
2555
2556 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2557 true, priv->mode, true, true,
2558 xdp_desc.len);
2559
2560 stmmac_enable_dma_transmission(priv, priv->ioaddr);
2561
2562 xsk_tx_metadata_to_compl(meta,
2563 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2564
2565 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2566 entry = tx_q->cur_tx;
2567 }
2568 u64_stats_update_begin(&txq_stats->napi_syncp);
2569 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2570 u64_stats_update_end(&txq_stats->napi_syncp);
2571
2572 if (tx_desc) {
2573 stmmac_flush_tx_descriptors(priv, queue);
2574 xsk_tx_release(pool);
2575 }
2576
2577 /* Return true if all of the 3 conditions are met
2578 * a) TX Budget is still available
2579 * b) work_done = true when XSK TX desc peek is empty (no more
2580 * pending XSK TX for transmission)
2581 */
2582 return !!budget && work_done;
2583 }
2584
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2585 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2586 {
2587 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2588 tc += 64;
2589
2590 if (priv->plat->force_thresh_dma_mode)
2591 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2592 else
2593 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2594 chan);
2595
2596 priv->xstats.threshold = tc;
2597 }
2598 }
2599
2600 /**
2601 * stmmac_tx_clean - to manage the transmission completion
2602 * @priv: driver private structure
2603 * @budget: napi budget limiting this functions packet handling
2604 * @queue: TX queue index
2605 * @pending_packets: signal to arm the TX coal timer
2606 * Description: it reclaims the transmit resources after transmission completes.
2607 * If some packets still needs to be handled, due to TX coalesce, set
2608 * pending_packets to true to make NAPI arm the TX coal timer.
2609 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2610 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2611 bool *pending_packets)
2612 {
2613 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2614 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2615 unsigned int bytes_compl = 0, pkts_compl = 0;
2616 unsigned int entry, xmits = 0, count = 0;
2617 u32 tx_packets = 0, tx_errors = 0;
2618
2619 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2620
2621 tx_q->xsk_frames_done = 0;
2622
2623 entry = tx_q->dirty_tx;
2624
2625 /* Try to clean all TX complete frame in 1 shot */
2626 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2627 struct xdp_frame *xdpf;
2628 struct sk_buff *skb;
2629 struct dma_desc *p;
2630 int status;
2631
2632 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2633 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2634 xdpf = tx_q->xdpf[entry];
2635 skb = NULL;
2636 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2637 xdpf = NULL;
2638 skb = tx_q->tx_skbuff[entry];
2639 } else {
2640 xdpf = NULL;
2641 skb = NULL;
2642 }
2643
2644 if (priv->extend_desc)
2645 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2646 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2647 p = &tx_q->dma_entx[entry].basic;
2648 else
2649 p = tx_q->dma_tx + entry;
2650
2651 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2652 /* Check if the descriptor is owned by the DMA */
2653 if (unlikely(status & tx_dma_own))
2654 break;
2655
2656 count++;
2657
2658 /* Make sure descriptor fields are read after reading
2659 * the own bit.
2660 */
2661 dma_rmb();
2662
2663 /* Just consider the last segment and ...*/
2664 if (likely(!(status & tx_not_ls))) {
2665 /* ... verify the status error condition */
2666 if (unlikely(status & tx_err)) {
2667 tx_errors++;
2668 if (unlikely(status & tx_err_bump_tc))
2669 stmmac_bump_dma_threshold(priv, queue);
2670 } else {
2671 tx_packets++;
2672 }
2673 if (skb) {
2674 stmmac_get_tx_hwtstamp(priv, p, skb);
2675 } else if (tx_q->xsk_pool &&
2676 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2677 struct stmmac_xsk_tx_complete tx_compl = {
2678 .priv = priv,
2679 .desc = p,
2680 };
2681
2682 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2683 &stmmac_xsk_tx_metadata_ops,
2684 &tx_compl);
2685 }
2686 }
2687
2688 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2689 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2690 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2691 dma_unmap_page(priv->device,
2692 tx_q->tx_skbuff_dma[entry].buf,
2693 tx_q->tx_skbuff_dma[entry].len,
2694 DMA_TO_DEVICE);
2695 else
2696 dma_unmap_single(priv->device,
2697 tx_q->tx_skbuff_dma[entry].buf,
2698 tx_q->tx_skbuff_dma[entry].len,
2699 DMA_TO_DEVICE);
2700 tx_q->tx_skbuff_dma[entry].buf = 0;
2701 tx_q->tx_skbuff_dma[entry].len = 0;
2702 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2703 }
2704
2705 stmmac_clean_desc3(priv, tx_q, p);
2706
2707 tx_q->tx_skbuff_dma[entry].last_segment = false;
2708 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2709
2710 if (xdpf &&
2711 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2712 xdp_return_frame_rx_napi(xdpf);
2713 tx_q->xdpf[entry] = NULL;
2714 }
2715
2716 if (xdpf &&
2717 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2718 xdp_return_frame(xdpf);
2719 tx_q->xdpf[entry] = NULL;
2720 }
2721
2722 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2723 tx_q->xsk_frames_done++;
2724
2725 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2726 if (likely(skb)) {
2727 pkts_compl++;
2728 bytes_compl += skb->len;
2729 dev_consume_skb_any(skb);
2730 tx_q->tx_skbuff[entry] = NULL;
2731 }
2732 }
2733
2734 stmmac_release_tx_desc(priv, p, priv->mode);
2735
2736 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2737 }
2738 tx_q->dirty_tx = entry;
2739
2740 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2741 pkts_compl, bytes_compl);
2742
2743 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2744 queue))) &&
2745 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2746
2747 netif_dbg(priv, tx_done, priv->dev,
2748 "%s: restart transmit\n", __func__);
2749 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2750 }
2751
2752 if (tx_q->xsk_pool) {
2753 bool work_done;
2754
2755 if (tx_q->xsk_frames_done)
2756 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2757
2758 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2759 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2760
2761 /* For XSK TX, we try to send as many as possible.
2762 * If XSK work done (XSK TX desc empty and budget still
2763 * available), return "budget - 1" to reenable TX IRQ.
2764 * Else, return "budget" to make NAPI continue polling.
2765 */
2766 work_done = stmmac_xdp_xmit_zc(priv, queue,
2767 STMMAC_XSK_TX_BUDGET_MAX);
2768 if (work_done)
2769 xmits = budget - 1;
2770 else
2771 xmits = budget;
2772 }
2773
2774 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode &&
2775 priv->eee_sw_timer_en) {
2776 if (stmmac_enable_eee_mode(priv))
2777 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
2778 }
2779
2780 /* We still have pending packets, let's call for a new scheduling */
2781 if (tx_q->dirty_tx != tx_q->cur_tx)
2782 *pending_packets = true;
2783
2784 u64_stats_update_begin(&txq_stats->napi_syncp);
2785 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2786 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2787 u64_stats_inc(&txq_stats->napi.tx_clean);
2788 u64_stats_update_end(&txq_stats->napi_syncp);
2789
2790 priv->xstats.tx_errors += tx_errors;
2791
2792 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2793
2794 /* Combine decisions from TX clean and XSK TX */
2795 return max(count, xmits);
2796 }
2797
2798 /**
2799 * stmmac_tx_err - to manage the tx error
2800 * @priv: driver private structure
2801 * @chan: channel index
2802 * Description: it cleans the descriptors and restarts the transmission
2803 * in case of transmission errors.
2804 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2805 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2806 {
2807 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2808
2809 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2810
2811 stmmac_stop_tx_dma(priv, chan);
2812 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2813 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2814 stmmac_reset_tx_queue(priv, chan);
2815 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2816 tx_q->dma_tx_phy, chan);
2817 stmmac_start_tx_dma(priv, chan);
2818
2819 priv->xstats.tx_errors++;
2820 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
2821 }
2822
2823 /**
2824 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2825 * @priv: driver private structure
2826 * @txmode: TX operating mode
2827 * @rxmode: RX operating mode
2828 * @chan: channel index
2829 * Description: it is used for configuring of the DMA operation mode in
2830 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2831 * mode.
2832 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)2833 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
2834 u32 rxmode, u32 chan)
2835 {
2836 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2837 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2838 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2839 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2840 int rxfifosz = priv->plat->rx_fifo_size;
2841 int txfifosz = priv->plat->tx_fifo_size;
2842
2843 if (rxfifosz == 0)
2844 rxfifosz = priv->dma_cap.rx_fifo_size;
2845 if (txfifosz == 0)
2846 txfifosz = priv->dma_cap.tx_fifo_size;
2847
2848 /* Adjust for real per queue fifo size */
2849 rxfifosz /= rx_channels_count;
2850 txfifosz /= tx_channels_count;
2851
2852 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
2853 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
2854 }
2855
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)2856 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
2857 {
2858 int ret;
2859
2860 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
2861 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
2862 if (ret && (ret != -EINVAL)) {
2863 stmmac_global_err(priv);
2864 return true;
2865 }
2866
2867 return false;
2868 }
2869
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)2870 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
2871 {
2872 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
2873 &priv->xstats, chan, dir);
2874 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2875 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2876 struct stmmac_channel *ch = &priv->channel[chan];
2877 struct napi_struct *rx_napi;
2878 struct napi_struct *tx_napi;
2879 unsigned long flags;
2880
2881 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
2882 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
2883
2884 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
2885 if (napi_schedule_prep(rx_napi)) {
2886 spin_lock_irqsave(&ch->lock, flags);
2887 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
2888 spin_unlock_irqrestore(&ch->lock, flags);
2889 __napi_schedule(rx_napi);
2890 }
2891 }
2892
2893 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
2894 if (napi_schedule_prep(tx_napi)) {
2895 spin_lock_irqsave(&ch->lock, flags);
2896 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
2897 spin_unlock_irqrestore(&ch->lock, flags);
2898 __napi_schedule(tx_napi);
2899 }
2900 }
2901
2902 return status;
2903 }
2904
2905 /**
2906 * stmmac_dma_interrupt - DMA ISR
2907 * @priv: driver private structure
2908 * Description: this is the DMA ISR. It is called by the main ISR.
2909 * It calls the dwmac dma routine and schedule poll method in case of some
2910 * work can be done.
2911 */
stmmac_dma_interrupt(struct stmmac_priv * priv)2912 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
2913 {
2914 u32 tx_channel_count = priv->plat->tx_queues_to_use;
2915 u32 rx_channel_count = priv->plat->rx_queues_to_use;
2916 u32 channels_to_check = tx_channel_count > rx_channel_count ?
2917 tx_channel_count : rx_channel_count;
2918 u32 chan;
2919 int status[max_t(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
2920
2921 /* Make sure we never check beyond our status buffer. */
2922 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
2923 channels_to_check = ARRAY_SIZE(status);
2924
2925 for (chan = 0; chan < channels_to_check; chan++)
2926 status[chan] = stmmac_napi_check(priv, chan,
2927 DMA_DIR_RXTX);
2928
2929 for (chan = 0; chan < tx_channel_count; chan++) {
2930 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
2931 /* Try to bump up the dma threshold on this failure */
2932 stmmac_bump_dma_threshold(priv, chan);
2933 } else if (unlikely(status[chan] == tx_hard_error)) {
2934 stmmac_tx_err(priv, chan);
2935 }
2936 }
2937 }
2938
2939 /**
2940 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
2941 * @priv: driver private structure
2942 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
2943 */
stmmac_mmc_setup(struct stmmac_priv * priv)2944 static void stmmac_mmc_setup(struct stmmac_priv *priv)
2945 {
2946 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
2947 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
2948
2949 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
2950
2951 if (priv->dma_cap.rmon) {
2952 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
2953 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
2954 } else
2955 netdev_info(priv->dev, "No MAC Management Counters available\n");
2956 }
2957
2958 /**
2959 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2960 * @priv: driver private structure
2961 * Description:
2962 * new GMAC chip generations have a new register to indicate the
2963 * presence of the optional feature/functions.
2964 * This can be also used to override the value passed through the
2965 * platform and necessary for old MAC10/100 and GMAC chips.
2966 */
stmmac_get_hw_features(struct stmmac_priv * priv)2967 static int stmmac_get_hw_features(struct stmmac_priv *priv)
2968 {
2969 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
2970 }
2971
2972 /**
2973 * stmmac_check_ether_addr - check if the MAC addr is valid
2974 * @priv: driver private structure
2975 * Description:
2976 * it is to verify if the MAC address is valid, in case of failures it
2977 * generates a random MAC address
2978 */
stmmac_check_ether_addr(struct stmmac_priv * priv)2979 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
2980 {
2981 u8 addr[ETH_ALEN];
2982
2983 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
2984 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
2985 if (is_valid_ether_addr(addr))
2986 eth_hw_addr_set(priv->dev, addr);
2987 else
2988 eth_hw_addr_random(priv->dev);
2989 dev_info(priv->device, "device MAC address %pM\n",
2990 priv->dev->dev_addr);
2991 }
2992 }
2993
2994 /**
2995 * stmmac_init_dma_engine - DMA init.
2996 * @priv: driver private structure
2997 * Description:
2998 * It inits the DMA invoking the specific MAC/GMAC callback.
2999 * Some DMA parameters can be passed from the platform;
3000 * in case of these are not passed a default is kept for the MAC or GMAC.
3001 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3002 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3003 {
3004 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3005 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3006 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3007 struct stmmac_rx_queue *rx_q;
3008 struct stmmac_tx_queue *tx_q;
3009 u32 chan = 0;
3010 int atds = 0;
3011 int ret = 0;
3012
3013 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3014 dev_err(priv->device, "Invalid DMA configuration\n");
3015 return -EINVAL;
3016 }
3017
3018 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3019 atds = 1;
3020
3021 ret = stmmac_reset(priv, priv->ioaddr);
3022 if (ret) {
3023 dev_err(priv->device, "Failed to reset the dma\n");
3024 return ret;
3025 }
3026
3027 /* DMA Configuration */
3028 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds);
3029
3030 if (priv->plat->axi)
3031 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3032
3033 /* DMA CSR Channel configuration */
3034 for (chan = 0; chan < dma_csr_ch; chan++) {
3035 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3036 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3037 }
3038
3039 /* DMA RX Channel Configuration */
3040 for (chan = 0; chan < rx_channels_count; chan++) {
3041 rx_q = &priv->dma_conf.rx_queue[chan];
3042
3043 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3044 rx_q->dma_rx_phy, chan);
3045
3046 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3047 (rx_q->buf_alloc_num *
3048 sizeof(struct dma_desc));
3049 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3050 rx_q->rx_tail_addr, chan);
3051 }
3052
3053 /* DMA TX Channel Configuration */
3054 for (chan = 0; chan < tx_channels_count; chan++) {
3055 tx_q = &priv->dma_conf.tx_queue[chan];
3056
3057 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3058 tx_q->dma_tx_phy, chan);
3059
3060 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3061 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3062 tx_q->tx_tail_addr, chan);
3063 }
3064
3065 return ret;
3066 }
3067
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3068 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3069 {
3070 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3071 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3072 struct stmmac_channel *ch;
3073 struct napi_struct *napi;
3074
3075 if (!tx_coal_timer)
3076 return;
3077
3078 ch = &priv->channel[tx_q->queue_index];
3079 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3080
3081 /* Arm timer only if napi is not already scheduled.
3082 * Try to cancel any timer if napi is scheduled, timer will be armed
3083 * again in the next scheduled napi.
3084 */
3085 if (unlikely(!napi_is_scheduled(napi)))
3086 hrtimer_start(&tx_q->txtimer,
3087 STMMAC_COAL_TIMER(tx_coal_timer),
3088 HRTIMER_MODE_REL);
3089 else
3090 hrtimer_try_to_cancel(&tx_q->txtimer);
3091 }
3092
3093 /**
3094 * stmmac_tx_timer - mitigation sw timer for tx.
3095 * @t: data pointer
3096 * Description:
3097 * This is the timer handler to directly invoke the stmmac_tx_clean.
3098 */
stmmac_tx_timer(struct hrtimer * t)3099 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3100 {
3101 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3102 struct stmmac_priv *priv = tx_q->priv_data;
3103 struct stmmac_channel *ch;
3104 struct napi_struct *napi;
3105
3106 ch = &priv->channel[tx_q->queue_index];
3107 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3108
3109 if (likely(napi_schedule_prep(napi))) {
3110 unsigned long flags;
3111
3112 spin_lock_irqsave(&ch->lock, flags);
3113 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3114 spin_unlock_irqrestore(&ch->lock, flags);
3115 __napi_schedule(napi);
3116 }
3117
3118 return HRTIMER_NORESTART;
3119 }
3120
3121 /**
3122 * stmmac_init_coalesce - init mitigation options.
3123 * @priv: driver private structure
3124 * Description:
3125 * This inits the coalesce parameters: i.e. timer rate,
3126 * timer handler and default threshold used for enabling the
3127 * interrupt on completion bit.
3128 */
stmmac_init_coalesce(struct stmmac_priv * priv)3129 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3130 {
3131 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3132 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3133 u32 chan;
3134
3135 for (chan = 0; chan < tx_channel_count; chan++) {
3136 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3137
3138 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3139 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3140
3141 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3142 tx_q->txtimer.function = stmmac_tx_timer;
3143 }
3144
3145 for (chan = 0; chan < rx_channel_count; chan++)
3146 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3147 }
3148
stmmac_set_rings_length(struct stmmac_priv * priv)3149 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3150 {
3151 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3152 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3153 u32 chan;
3154
3155 /* set TX ring length */
3156 for (chan = 0; chan < tx_channels_count; chan++)
3157 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3158 (priv->dma_conf.dma_tx_size - 1), chan);
3159
3160 /* set RX ring length */
3161 for (chan = 0; chan < rx_channels_count; chan++)
3162 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3163 (priv->dma_conf.dma_rx_size - 1), chan);
3164 }
3165
3166 /**
3167 * stmmac_set_tx_queue_weight - Set TX queue weight
3168 * @priv: driver private structure
3169 * Description: It is used for setting TX queues weight
3170 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3171 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3172 {
3173 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3174 u32 weight;
3175 u32 queue;
3176
3177 for (queue = 0; queue < tx_queues_count; queue++) {
3178 weight = priv->plat->tx_queues_cfg[queue].weight;
3179 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3180 }
3181 }
3182
3183 /**
3184 * stmmac_configure_cbs - Configure CBS in TX queue
3185 * @priv: driver private structure
3186 * Description: It is used for configuring CBS in AVB TX queues
3187 */
stmmac_configure_cbs(struct stmmac_priv * priv)3188 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3189 {
3190 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3191 u32 mode_to_use;
3192 u32 queue;
3193
3194 /* queue 0 is reserved for legacy traffic */
3195 for (queue = 1; queue < tx_queues_count; queue++) {
3196 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3197 if (mode_to_use == MTL_QUEUE_DCB)
3198 continue;
3199
3200 stmmac_config_cbs(priv, priv->hw,
3201 priv->plat->tx_queues_cfg[queue].send_slope,
3202 priv->plat->tx_queues_cfg[queue].idle_slope,
3203 priv->plat->tx_queues_cfg[queue].high_credit,
3204 priv->plat->tx_queues_cfg[queue].low_credit,
3205 queue);
3206 }
3207 }
3208
3209 /**
3210 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3211 * @priv: driver private structure
3212 * Description: It is used for mapping RX queues to RX dma channels
3213 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3214 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3215 {
3216 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3217 u32 queue;
3218 u32 chan;
3219
3220 for (queue = 0; queue < rx_queues_count; queue++) {
3221 chan = priv->plat->rx_queues_cfg[queue].chan;
3222 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3223 }
3224 }
3225
3226 /**
3227 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3228 * @priv: driver private structure
3229 * Description: It is used for configuring the RX Queue Priority
3230 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3231 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3232 {
3233 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3234 u32 queue;
3235 u32 prio;
3236
3237 for (queue = 0; queue < rx_queues_count; queue++) {
3238 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3239 continue;
3240
3241 prio = priv->plat->rx_queues_cfg[queue].prio;
3242 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3243 }
3244 }
3245
3246 /**
3247 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3248 * @priv: driver private structure
3249 * Description: It is used for configuring the TX Queue Priority
3250 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3251 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3252 {
3253 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3254 u32 queue;
3255 u32 prio;
3256
3257 for (queue = 0; queue < tx_queues_count; queue++) {
3258 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3259 continue;
3260
3261 prio = priv->plat->tx_queues_cfg[queue].prio;
3262 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3263 }
3264 }
3265
3266 /**
3267 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3268 * @priv: driver private structure
3269 * Description: It is used for configuring the RX queue routing
3270 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3271 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3272 {
3273 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3274 u32 queue;
3275 u8 packet;
3276
3277 for (queue = 0; queue < rx_queues_count; queue++) {
3278 /* no specific packet type routing specified for the queue */
3279 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3280 continue;
3281
3282 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3283 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3284 }
3285 }
3286
stmmac_mac_config_rss(struct stmmac_priv * priv)3287 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3288 {
3289 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3290 priv->rss.enable = false;
3291 return;
3292 }
3293
3294 if (priv->dev->features & NETIF_F_RXHASH)
3295 priv->rss.enable = true;
3296 else
3297 priv->rss.enable = false;
3298
3299 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3300 priv->plat->rx_queues_to_use);
3301 }
3302
3303 /**
3304 * stmmac_mtl_configuration - Configure MTL
3305 * @priv: driver private structure
3306 * Description: It is used for configurring MTL
3307 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3308 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3309 {
3310 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3311 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3312
3313 if (tx_queues_count > 1)
3314 stmmac_set_tx_queue_weight(priv);
3315
3316 /* Configure MTL RX algorithms */
3317 if (rx_queues_count > 1)
3318 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3319 priv->plat->rx_sched_algorithm);
3320
3321 /* Configure MTL TX algorithms */
3322 if (tx_queues_count > 1)
3323 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3324 priv->plat->tx_sched_algorithm);
3325
3326 /* Configure CBS in AVB TX queues */
3327 if (tx_queues_count > 1)
3328 stmmac_configure_cbs(priv);
3329
3330 /* Map RX MTL to DMA channels */
3331 stmmac_rx_queue_dma_chan_map(priv);
3332
3333 /* Enable MAC RX Queues */
3334 stmmac_mac_enable_rx_queues(priv);
3335
3336 /* Set RX priorities */
3337 if (rx_queues_count > 1)
3338 stmmac_mac_config_rx_queues_prio(priv);
3339
3340 /* Set TX priorities */
3341 if (tx_queues_count > 1)
3342 stmmac_mac_config_tx_queues_prio(priv);
3343
3344 /* Set RX routing */
3345 if (rx_queues_count > 1)
3346 stmmac_mac_config_rx_queues_routing(priv);
3347
3348 /* Receive Side Scaling */
3349 if (rx_queues_count > 1)
3350 stmmac_mac_config_rss(priv);
3351 }
3352
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3353 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3354 {
3355 if (priv->dma_cap.asp) {
3356 netdev_info(priv->dev, "Enabling Safety Features\n");
3357 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3358 priv->plat->safety_feat_cfg);
3359 } else {
3360 netdev_info(priv->dev, "No Safety Features support found\n");
3361 }
3362 }
3363
stmmac_fpe_start_wq(struct stmmac_priv * priv)3364 static int stmmac_fpe_start_wq(struct stmmac_priv *priv)
3365 {
3366 char *name;
3367
3368 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
3369 clear_bit(__FPE_REMOVING, &priv->fpe_task_state);
3370
3371 name = priv->wq_name;
3372 sprintf(name, "%s-fpe", priv->dev->name);
3373
3374 priv->fpe_wq = create_singlethread_workqueue(name);
3375 if (!priv->fpe_wq) {
3376 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name);
3377
3378 return -ENOMEM;
3379 }
3380 netdev_info(priv->dev, "FPE workqueue start");
3381
3382 return 0;
3383 }
3384
3385 /**
3386 * stmmac_hw_setup - setup mac in a usable state.
3387 * @dev : pointer to the device structure.
3388 * @ptp_register: register PTP if set
3389 * Description:
3390 * this is the main function to setup the HW in a usable state because the
3391 * dma engine is reset, the core registers are configured (e.g. AXI,
3392 * Checksum features, timers). The DMA is ready to start receiving and
3393 * transmitting.
3394 * Return value:
3395 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3396 * file on failure.
3397 */
stmmac_hw_setup(struct net_device * dev,bool ptp_register)3398 static int stmmac_hw_setup(struct net_device *dev, bool ptp_register)
3399 {
3400 struct stmmac_priv *priv = netdev_priv(dev);
3401 u32 rx_cnt = priv->plat->rx_queues_to_use;
3402 u32 tx_cnt = priv->plat->tx_queues_to_use;
3403 bool sph_en;
3404 u32 chan;
3405 int ret;
3406
3407 /* DMA initialization and SW reset */
3408 ret = stmmac_init_dma_engine(priv);
3409 if (ret < 0) {
3410 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3411 __func__);
3412 return ret;
3413 }
3414
3415 /* Copy the MAC addr into the HW */
3416 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3417
3418 /* PS and related bits will be programmed according to the speed */
3419 if (priv->hw->pcs) {
3420 int speed = priv->plat->mac_port_sel_speed;
3421
3422 if ((speed == SPEED_10) || (speed == SPEED_100) ||
3423 (speed == SPEED_1000)) {
3424 priv->hw->ps = speed;
3425 } else {
3426 dev_warn(priv->device, "invalid port speed\n");
3427 priv->hw->ps = 0;
3428 }
3429 }
3430
3431 /* Initialize the MAC Core */
3432 stmmac_core_init(priv, priv->hw, dev);
3433
3434 /* Initialize MTL*/
3435 stmmac_mtl_configuration(priv);
3436
3437 /* Initialize Safety Features */
3438 stmmac_safety_feat_configuration(priv);
3439
3440 ret = stmmac_rx_ipc(priv, priv->hw);
3441 if (!ret) {
3442 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3443 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3444 priv->hw->rx_csum = 0;
3445 }
3446
3447 /* Enable the MAC Rx/Tx */
3448 stmmac_mac_set(priv, priv->ioaddr, true);
3449
3450 /* Set the HW DMA mode and the COE */
3451 stmmac_dma_operation_mode(priv);
3452
3453 stmmac_mmc_setup(priv);
3454
3455 if (ptp_register) {
3456 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
3457 if (ret < 0)
3458 netdev_warn(priv->dev,
3459 "failed to enable PTP reference clock: %pe\n",
3460 ERR_PTR(ret));
3461 }
3462
3463 ret = stmmac_init_ptp(priv);
3464 if (ret == -EOPNOTSUPP)
3465 netdev_info(priv->dev, "PTP not supported by HW\n");
3466 else if (ret)
3467 netdev_warn(priv->dev, "PTP init failed\n");
3468 else if (ptp_register)
3469 stmmac_ptp_register(priv);
3470
3471 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS;
3472
3473 /* Convert the timer from msec to usec */
3474 if (!priv->tx_lpi_timer)
3475 priv->tx_lpi_timer = eee_timer * 1000;
3476
3477 if (priv->use_riwt) {
3478 u32 queue;
3479
3480 for (queue = 0; queue < rx_cnt; queue++) {
3481 if (!priv->rx_riwt[queue])
3482 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3483
3484 stmmac_rx_watchdog(priv, priv->ioaddr,
3485 priv->rx_riwt[queue], queue);
3486 }
3487 }
3488
3489 if (priv->hw->pcs)
3490 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0);
3491
3492 /* set TX and RX rings length */
3493 stmmac_set_rings_length(priv);
3494
3495 /* Enable TSO */
3496 if (priv->tso) {
3497 for (chan = 0; chan < tx_cnt; chan++) {
3498 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3499
3500 /* TSO and TBS cannot co-exist */
3501 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3502 continue;
3503
3504 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3505 }
3506 }
3507
3508 /* Enable Split Header */
3509 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
3510 for (chan = 0; chan < rx_cnt; chan++)
3511 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3512
3513
3514 /* VLAN Tag Insertion */
3515 if (priv->dma_cap.vlins)
3516 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3517
3518 /* TBS */
3519 for (chan = 0; chan < tx_cnt; chan++) {
3520 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3521 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3522
3523 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3524 }
3525
3526 /* Configure real RX and TX queues */
3527 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3528 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3529
3530 /* Start the ball rolling... */
3531 stmmac_start_all_dma(priv);
3532
3533 stmmac_set_hw_vlan_mode(priv, priv->hw);
3534
3535 if (priv->dma_cap.fpesel) {
3536 stmmac_fpe_start_wq(priv);
3537
3538 if (priv->plat->fpe_cfg->enable)
3539 stmmac_fpe_handshake(priv, true);
3540 }
3541
3542 return 0;
3543 }
3544
stmmac_hw_teardown(struct net_device * dev)3545 static void stmmac_hw_teardown(struct net_device *dev)
3546 {
3547 struct stmmac_priv *priv = netdev_priv(dev);
3548
3549 clk_disable_unprepare(priv->plat->clk_ptp_ref);
3550 }
3551
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3552 static void stmmac_free_irq(struct net_device *dev,
3553 enum request_irq_err irq_err, int irq_idx)
3554 {
3555 struct stmmac_priv *priv = netdev_priv(dev);
3556 int j;
3557
3558 switch (irq_err) {
3559 case REQ_IRQ_ERR_ALL:
3560 irq_idx = priv->plat->tx_queues_to_use;
3561 fallthrough;
3562 case REQ_IRQ_ERR_TX:
3563 for (j = irq_idx - 1; j >= 0; j--) {
3564 if (priv->tx_irq[j] > 0) {
3565 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3566 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3567 }
3568 }
3569 irq_idx = priv->plat->rx_queues_to_use;
3570 fallthrough;
3571 case REQ_IRQ_ERR_RX:
3572 for (j = irq_idx - 1; j >= 0; j--) {
3573 if (priv->rx_irq[j] > 0) {
3574 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3575 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3576 }
3577 }
3578
3579 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3580 free_irq(priv->sfty_ue_irq, dev);
3581 fallthrough;
3582 case REQ_IRQ_ERR_SFTY_UE:
3583 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3584 free_irq(priv->sfty_ce_irq, dev);
3585 fallthrough;
3586 case REQ_IRQ_ERR_SFTY_CE:
3587 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq)
3588 free_irq(priv->lpi_irq, dev);
3589 fallthrough;
3590 case REQ_IRQ_ERR_LPI:
3591 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3592 free_irq(priv->wol_irq, dev);
3593 fallthrough;
3594 case REQ_IRQ_ERR_WOL:
3595 free_irq(dev->irq, dev);
3596 fallthrough;
3597 case REQ_IRQ_ERR_MAC:
3598 case REQ_IRQ_ERR_NO:
3599 /* If MAC IRQ request error, no more IRQ to free */
3600 break;
3601 }
3602 }
3603
stmmac_request_irq_multi_msi(struct net_device * dev)3604 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3605 {
3606 struct stmmac_priv *priv = netdev_priv(dev);
3607 enum request_irq_err irq_err;
3608 cpumask_t cpu_mask;
3609 int irq_idx = 0;
3610 char *int_name;
3611 int ret;
3612 int i;
3613
3614 /* For common interrupt */
3615 int_name = priv->int_name_mac;
3616 sprintf(int_name, "%s:%s", dev->name, "mac");
3617 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3618 0, int_name, dev);
3619 if (unlikely(ret < 0)) {
3620 netdev_err(priv->dev,
3621 "%s: alloc mac MSI %d (error: %d)\n",
3622 __func__, dev->irq, ret);
3623 irq_err = REQ_IRQ_ERR_MAC;
3624 goto irq_error;
3625 }
3626
3627 /* Request the Wake IRQ in case of another line
3628 * is used for WoL
3629 */
3630 priv->wol_irq_disabled = true;
3631 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3632 int_name = priv->int_name_wol;
3633 sprintf(int_name, "%s:%s", dev->name, "wol");
3634 ret = request_irq(priv->wol_irq,
3635 stmmac_mac_interrupt,
3636 0, int_name, dev);
3637 if (unlikely(ret < 0)) {
3638 netdev_err(priv->dev,
3639 "%s: alloc wol MSI %d (error: %d)\n",
3640 __func__, priv->wol_irq, ret);
3641 irq_err = REQ_IRQ_ERR_WOL;
3642 goto irq_error;
3643 }
3644 }
3645
3646 /* Request the LPI IRQ in case of another line
3647 * is used for LPI
3648 */
3649 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3650 int_name = priv->int_name_lpi;
3651 sprintf(int_name, "%s:%s", dev->name, "lpi");
3652 ret = request_irq(priv->lpi_irq,
3653 stmmac_mac_interrupt,
3654 0, int_name, dev);
3655 if (unlikely(ret < 0)) {
3656 netdev_err(priv->dev,
3657 "%s: alloc lpi MSI %d (error: %d)\n",
3658 __func__, priv->lpi_irq, ret);
3659 irq_err = REQ_IRQ_ERR_LPI;
3660 goto irq_error;
3661 }
3662 }
3663
3664 /* Request the Safety Feature Correctible Error line in
3665 * case of another line is used
3666 */
3667 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3668 int_name = priv->int_name_sfty_ce;
3669 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3670 ret = request_irq(priv->sfty_ce_irq,
3671 stmmac_safety_interrupt,
3672 0, int_name, dev);
3673 if (unlikely(ret < 0)) {
3674 netdev_err(priv->dev,
3675 "%s: alloc sfty ce MSI %d (error: %d)\n",
3676 __func__, priv->sfty_ce_irq, ret);
3677 irq_err = REQ_IRQ_ERR_SFTY_CE;
3678 goto irq_error;
3679 }
3680 }
3681
3682 /* Request the Safety Feature Uncorrectible Error line in
3683 * case of another line is used
3684 */
3685 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3686 int_name = priv->int_name_sfty_ue;
3687 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3688 ret = request_irq(priv->sfty_ue_irq,
3689 stmmac_safety_interrupt,
3690 0, int_name, dev);
3691 if (unlikely(ret < 0)) {
3692 netdev_err(priv->dev,
3693 "%s: alloc sfty ue MSI %d (error: %d)\n",
3694 __func__, priv->sfty_ue_irq, ret);
3695 irq_err = REQ_IRQ_ERR_SFTY_UE;
3696 goto irq_error;
3697 }
3698 }
3699
3700 /* Request Rx MSI irq */
3701 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3702 if (i >= MTL_MAX_RX_QUEUES)
3703 break;
3704 if (priv->rx_irq[i] == 0)
3705 continue;
3706
3707 int_name = priv->int_name_rx_irq[i];
3708 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3709 ret = request_irq(priv->rx_irq[i],
3710 stmmac_msi_intr_rx,
3711 0, int_name, &priv->dma_conf.rx_queue[i]);
3712 if (unlikely(ret < 0)) {
3713 netdev_err(priv->dev,
3714 "%s: alloc rx-%d MSI %d (error: %d)\n",
3715 __func__, i, priv->rx_irq[i], ret);
3716 irq_err = REQ_IRQ_ERR_RX;
3717 irq_idx = i;
3718 goto irq_error;
3719 }
3720 cpumask_clear(&cpu_mask);
3721 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3722 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask);
3723 }
3724
3725 /* Request Tx MSI irq */
3726 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3727 if (i >= MTL_MAX_TX_QUEUES)
3728 break;
3729 if (priv->tx_irq[i] == 0)
3730 continue;
3731
3732 int_name = priv->int_name_tx_irq[i];
3733 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3734 ret = request_irq(priv->tx_irq[i],
3735 stmmac_msi_intr_tx,
3736 0, int_name, &priv->dma_conf.tx_queue[i]);
3737 if (unlikely(ret < 0)) {
3738 netdev_err(priv->dev,
3739 "%s: alloc tx-%d MSI %d (error: %d)\n",
3740 __func__, i, priv->tx_irq[i], ret);
3741 irq_err = REQ_IRQ_ERR_TX;
3742 irq_idx = i;
3743 goto irq_error;
3744 }
3745 cpumask_clear(&cpu_mask);
3746 cpumask_set_cpu(i % num_online_cpus(), &cpu_mask);
3747 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask);
3748 }
3749
3750 return 0;
3751
3752 irq_error:
3753 stmmac_free_irq(dev, irq_err, irq_idx);
3754 return ret;
3755 }
3756
stmmac_request_irq_single(struct net_device * dev)3757 static int stmmac_request_irq_single(struct net_device *dev)
3758 {
3759 struct stmmac_priv *priv = netdev_priv(dev);
3760 enum request_irq_err irq_err;
3761 int ret;
3762
3763 ret = request_irq(dev->irq, stmmac_interrupt,
3764 IRQF_SHARED, dev->name, dev);
3765 if (unlikely(ret < 0)) {
3766 netdev_err(priv->dev,
3767 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3768 __func__, dev->irq, ret);
3769 irq_err = REQ_IRQ_ERR_MAC;
3770 goto irq_error;
3771 }
3772
3773 /* Request the Wake IRQ in case of another line
3774 * is used for WoL
3775 */
3776 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3777 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3778 IRQF_SHARED, dev->name, dev);
3779 if (unlikely(ret < 0)) {
3780 netdev_err(priv->dev,
3781 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3782 __func__, priv->wol_irq, ret);
3783 irq_err = REQ_IRQ_ERR_WOL;
3784 goto irq_error;
3785 }
3786 }
3787
3788 /* Request the IRQ lines */
3789 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) {
3790 ret = request_irq(priv->lpi_irq, stmmac_interrupt,
3791 IRQF_SHARED, dev->name, dev);
3792 if (unlikely(ret < 0)) {
3793 netdev_err(priv->dev,
3794 "%s: ERROR: allocating the LPI IRQ %d (%d)\n",
3795 __func__, priv->lpi_irq, ret);
3796 irq_err = REQ_IRQ_ERR_LPI;
3797 goto irq_error;
3798 }
3799 }
3800
3801 return 0;
3802
3803 irq_error:
3804 stmmac_free_irq(dev, irq_err, 0);
3805 return ret;
3806 }
3807
stmmac_request_irq(struct net_device * dev)3808 static int stmmac_request_irq(struct net_device *dev)
3809 {
3810 struct stmmac_priv *priv = netdev_priv(dev);
3811 int ret;
3812
3813 /* Request the IRQ lines */
3814 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3815 ret = stmmac_request_irq_multi_msi(dev);
3816 else
3817 ret = stmmac_request_irq_single(dev);
3818
3819 return ret;
3820 }
3821
3822 /**
3823 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3824 * @priv: driver private structure
3825 * @mtu: MTU to setup the dma queue and buf with
3826 * Description: Allocate and generate a dma_conf based on the provided MTU.
3827 * Allocate the Tx/Rx DMA queue and init them.
3828 * Return value:
3829 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
3830 */
3831 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)3832 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
3833 {
3834 struct stmmac_dma_conf *dma_conf;
3835 int chan, bfsize, ret;
3836
3837 dma_conf = kzalloc(sizeof(*dma_conf), GFP_KERNEL);
3838 if (!dma_conf) {
3839 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
3840 __func__);
3841 return ERR_PTR(-ENOMEM);
3842 }
3843
3844 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
3845 if (bfsize < 0)
3846 bfsize = 0;
3847
3848 if (bfsize < BUF_SIZE_16KiB)
3849 bfsize = stmmac_set_bfsize(mtu, 0);
3850
3851 dma_conf->dma_buf_sz = bfsize;
3852 /* Chose the tx/rx size from the already defined one in the
3853 * priv struct. (if defined)
3854 */
3855 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
3856 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
3857
3858 if (!dma_conf->dma_tx_size)
3859 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
3860 if (!dma_conf->dma_rx_size)
3861 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
3862
3863 /* Earlier check for TBS */
3864 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
3865 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
3866 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
3867
3868 /* Setup per-TXQ tbs flag before TX descriptor alloc */
3869 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
3870 }
3871
3872 ret = alloc_dma_desc_resources(priv, dma_conf);
3873 if (ret < 0) {
3874 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
3875 __func__);
3876 goto alloc_error;
3877 }
3878
3879 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
3880 if (ret < 0) {
3881 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
3882 __func__);
3883 goto init_error;
3884 }
3885
3886 return dma_conf;
3887
3888 init_error:
3889 free_dma_desc_resources(priv, dma_conf);
3890 alloc_error:
3891 kfree(dma_conf);
3892 return ERR_PTR(ret);
3893 }
3894
3895 /**
3896 * __stmmac_open - open entry point of the driver
3897 * @dev : pointer to the device structure.
3898 * @dma_conf : structure to take the dma data
3899 * Description:
3900 * This function is the open entry point of the driver.
3901 * Return value:
3902 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3903 * file on failure.
3904 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)3905 static int __stmmac_open(struct net_device *dev,
3906 struct stmmac_dma_conf *dma_conf)
3907 {
3908 struct stmmac_priv *priv = netdev_priv(dev);
3909 int mode = priv->plat->phy_interface;
3910 u32 chan;
3911 int ret;
3912
3913 ret = pm_runtime_resume_and_get(priv->device);
3914 if (ret < 0)
3915 return ret;
3916
3917 if (priv->hw->pcs != STMMAC_PCS_TBI &&
3918 priv->hw->pcs != STMMAC_PCS_RTBI &&
3919 (!priv->hw->xpcs ||
3920 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) &&
3921 !priv->hw->lynx_pcs) {
3922 ret = stmmac_init_phy(dev);
3923 if (ret) {
3924 netdev_err(priv->dev,
3925 "%s: Cannot attach to PHY (error: %d)\n",
3926 __func__, ret);
3927 goto init_phy_error;
3928 }
3929 }
3930
3931 priv->rx_copybreak = STMMAC_RX_COPYBREAK;
3932
3933 buf_sz = dma_conf->dma_buf_sz;
3934 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
3935 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
3936 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
3937 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
3938
3939 stmmac_reset_queues_param(priv);
3940
3941 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
3942 priv->plat->serdes_powerup) {
3943 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv);
3944 if (ret < 0) {
3945 netdev_err(priv->dev, "%s: Serdes powerup failed\n",
3946 __func__);
3947 goto init_error;
3948 }
3949 }
3950
3951 ret = stmmac_hw_setup(dev, true);
3952 if (ret < 0) {
3953 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
3954 goto init_error;
3955 }
3956
3957 stmmac_init_coalesce(priv);
3958
3959 phylink_start(priv->phylink);
3960 /* We may have called phylink_speed_down before */
3961 phylink_speed_up(priv->phylink);
3962
3963 ret = stmmac_request_irq(dev);
3964 if (ret)
3965 goto irq_error;
3966
3967 stmmac_enable_all_queues(priv);
3968 netif_tx_start_all_queues(priv->dev);
3969 stmmac_enable_all_dma_irq(priv);
3970
3971 return 0;
3972
3973 irq_error:
3974 phylink_stop(priv->phylink);
3975
3976 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
3977 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
3978
3979 stmmac_hw_teardown(dev);
3980 init_error:
3981 phylink_disconnect_phy(priv->phylink);
3982 init_phy_error:
3983 pm_runtime_put(priv->device);
3984 return ret;
3985 }
3986
stmmac_open(struct net_device * dev)3987 static int stmmac_open(struct net_device *dev)
3988 {
3989 struct stmmac_priv *priv = netdev_priv(dev);
3990 struct stmmac_dma_conf *dma_conf;
3991 int ret;
3992
3993 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
3994 if (IS_ERR(dma_conf))
3995 return PTR_ERR(dma_conf);
3996
3997 ret = __stmmac_open(dev, dma_conf);
3998 if (ret)
3999 free_dma_desc_resources(priv, dma_conf);
4000
4001 kfree(dma_conf);
4002 return ret;
4003 }
4004
stmmac_fpe_stop_wq(struct stmmac_priv * priv)4005 static void stmmac_fpe_stop_wq(struct stmmac_priv *priv)
4006 {
4007 set_bit(__FPE_REMOVING, &priv->fpe_task_state);
4008
4009 if (priv->fpe_wq) {
4010 destroy_workqueue(priv->fpe_wq);
4011 priv->fpe_wq = NULL;
4012 }
4013
4014 netdev_info(priv->dev, "FPE workqueue stop");
4015 }
4016
4017 /**
4018 * stmmac_release - close entry point of the driver
4019 * @dev : device pointer.
4020 * Description:
4021 * This is the stop entry point of the driver.
4022 */
stmmac_release(struct net_device * dev)4023 static int stmmac_release(struct net_device *dev)
4024 {
4025 struct stmmac_priv *priv = netdev_priv(dev);
4026 u32 chan;
4027
4028 if (device_may_wakeup(priv->device))
4029 phylink_speed_down(priv->phylink, false);
4030 /* Stop and disconnect the PHY */
4031 phylink_stop(priv->phylink);
4032 phylink_disconnect_phy(priv->phylink);
4033
4034 stmmac_disable_all_queues(priv);
4035
4036 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4037 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4038
4039 netif_tx_disable(dev);
4040
4041 /* Free the IRQ lines */
4042 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4043
4044 if (priv->eee_enabled) {
4045 priv->tx_path_in_lpi_mode = false;
4046 del_timer_sync(&priv->eee_ctrl_timer);
4047 }
4048
4049 /* Stop TX/RX DMA and clear the descriptors */
4050 stmmac_stop_all_dma(priv);
4051
4052 /* Release and free the Rx/Tx resources */
4053 free_dma_desc_resources(priv, &priv->dma_conf);
4054
4055 /* Disable the MAC Rx/Tx */
4056 stmmac_mac_set(priv, priv->ioaddr, false);
4057
4058 /* Powerdown Serdes if there is */
4059 if (priv->plat->serdes_powerdown)
4060 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv);
4061
4062 netif_carrier_off(dev);
4063
4064 stmmac_release_ptp(priv);
4065
4066 pm_runtime_put(priv->device);
4067
4068 if (priv->dma_cap.fpesel)
4069 stmmac_fpe_stop_wq(priv);
4070
4071 return 0;
4072 }
4073
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4074 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4075 struct stmmac_tx_queue *tx_q)
4076 {
4077 u16 tag = 0x0, inner_tag = 0x0;
4078 u32 inner_type = 0x0;
4079 struct dma_desc *p;
4080
4081 if (!priv->dma_cap.vlins)
4082 return false;
4083 if (!skb_vlan_tag_present(skb))
4084 return false;
4085 if (skb->vlan_proto == htons(ETH_P_8021AD)) {
4086 inner_tag = skb_vlan_tag_get(skb);
4087 inner_type = STMMAC_VLAN_INSERT;
4088 }
4089
4090 tag = skb_vlan_tag_get(skb);
4091
4092 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4093 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4094 else
4095 p = &tx_q->dma_tx[tx_q->cur_tx];
4096
4097 if (stmmac_set_desc_vlan_tag(priv, p, tag, inner_tag, inner_type))
4098 return false;
4099
4100 stmmac_set_tx_owner(priv, p);
4101 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4102 return true;
4103 }
4104
4105 /**
4106 * stmmac_tso_allocator - close entry point of the driver
4107 * @priv: driver private structure
4108 * @des: buffer start address
4109 * @total_len: total length to fill in descriptors
4110 * @last_segment: condition for the last descriptor
4111 * @queue: TX queue index
4112 * Description:
4113 * This function fills descriptor and request new descriptors according to
4114 * buffer length to fill
4115 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4116 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4117 int total_len, bool last_segment, u32 queue)
4118 {
4119 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4120 struct dma_desc *desc;
4121 u32 buff_size;
4122 int tmp_len;
4123
4124 tmp_len = total_len;
4125
4126 while (tmp_len > 0) {
4127 dma_addr_t curr_addr;
4128
4129 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4130 priv->dma_conf.dma_tx_size);
4131 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4132
4133 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4134 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4135 else
4136 desc = &tx_q->dma_tx[tx_q->cur_tx];
4137
4138 curr_addr = des + (total_len - tmp_len);
4139 if (priv->dma_cap.addr64 <= 32)
4140 desc->des0 = cpu_to_le32(curr_addr);
4141 else
4142 stmmac_set_desc_addr(priv, desc, curr_addr);
4143
4144 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4145 TSO_MAX_BUFF_SIZE : tmp_len;
4146
4147 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4148 0, 1,
4149 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4150 0, 0);
4151
4152 tmp_len -= TSO_MAX_BUFF_SIZE;
4153 }
4154 }
4155
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4156 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4157 {
4158 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4159 int desc_size;
4160
4161 if (likely(priv->extend_desc))
4162 desc_size = sizeof(struct dma_extended_desc);
4163 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4164 desc_size = sizeof(struct dma_edesc);
4165 else
4166 desc_size = sizeof(struct dma_desc);
4167
4168 /* The own bit must be the latest setting done when prepare the
4169 * descriptor and then barrier is needed to make sure that
4170 * all is coherent before granting the DMA engine.
4171 */
4172 wmb();
4173
4174 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4175 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4176 }
4177
4178 /**
4179 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4180 * @skb : the socket buffer
4181 * @dev : device pointer
4182 * Description: this is the transmit function that is called on TSO frames
4183 * (support available on GMAC4 and newer chips).
4184 * Diagram below show the ring programming in case of TSO frames:
4185 *
4186 * First Descriptor
4187 * --------
4188 * | DES0 |---> buffer1 = L2/L3/L4 header
4189 * | DES1 |---> TCP Payload (can continue on next descr...)
4190 * | DES2 |---> buffer 1 and 2 len
4191 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4192 * --------
4193 * |
4194 * ...
4195 * |
4196 * --------
4197 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4198 * | DES1 | --|
4199 * | DES2 | --> buffer 1 and 2 len
4200 * | DES3 |
4201 * --------
4202 *
4203 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4204 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4205 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4206 {
4207 struct dma_desc *desc, *first, *mss_desc = NULL;
4208 struct stmmac_priv *priv = netdev_priv(dev);
4209 int nfrags = skb_shinfo(skb)->nr_frags;
4210 u32 queue = skb_get_queue_mapping(skb);
4211 unsigned int first_entry, tx_packets;
4212 struct stmmac_txq_stats *txq_stats;
4213 int tmp_pay_len = 0, first_tx;
4214 struct stmmac_tx_queue *tx_q;
4215 bool has_vlan, set_ic;
4216 u8 proto_hdr_len, hdr;
4217 u32 pay_len, mss;
4218 dma_addr_t des;
4219 int i;
4220
4221 tx_q = &priv->dma_conf.tx_queue[queue];
4222 txq_stats = &priv->xstats.txq_stats[queue];
4223 first_tx = tx_q->cur_tx;
4224
4225 /* Compute header lengths */
4226 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4227 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4228 hdr = sizeof(struct udphdr);
4229 } else {
4230 proto_hdr_len = skb_tcp_all_headers(skb);
4231 hdr = tcp_hdrlen(skb);
4232 }
4233
4234 /* Desc availability based on threshold should be enough safe */
4235 if (unlikely(stmmac_tx_avail(priv, queue) <
4236 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4237 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4238 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4239 queue));
4240 /* This is a hard error, log it. */
4241 netdev_err(priv->dev,
4242 "%s: Tx Ring full when queue awake\n",
4243 __func__);
4244 }
4245 return NETDEV_TX_BUSY;
4246 }
4247
4248 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4249
4250 mss = skb_shinfo(skb)->gso_size;
4251
4252 /* set new MSS value if needed */
4253 if (mss != tx_q->mss) {
4254 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4255 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4256 else
4257 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4258
4259 stmmac_set_mss(priv, mss_desc, mss);
4260 tx_q->mss = mss;
4261 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4262 priv->dma_conf.dma_tx_size);
4263 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4264 }
4265
4266 if (netif_msg_tx_queued(priv)) {
4267 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4268 __func__, hdr, proto_hdr_len, pay_len, mss);
4269 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4270 skb->data_len);
4271 }
4272
4273 /* Check if VLAN can be inserted by HW */
4274 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4275
4276 first_entry = tx_q->cur_tx;
4277 WARN_ON(tx_q->tx_skbuff[first_entry]);
4278
4279 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4280 desc = &tx_q->dma_entx[first_entry].basic;
4281 else
4282 desc = &tx_q->dma_tx[first_entry];
4283 first = desc;
4284
4285 if (has_vlan)
4286 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4287
4288 /* first descriptor: fill Headers on Buf1 */
4289 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4290 DMA_TO_DEVICE);
4291 if (dma_mapping_error(priv->device, des))
4292 goto dma_map_err;
4293
4294 tx_q->tx_skbuff_dma[first_entry].buf = des;
4295 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb);
4296 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4297 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4298
4299 if (priv->dma_cap.addr64 <= 32) {
4300 first->des0 = cpu_to_le32(des);
4301
4302 /* Fill start of payload in buff2 of first descriptor */
4303 if (pay_len)
4304 first->des1 = cpu_to_le32(des + proto_hdr_len);
4305
4306 /* If needed take extra descriptors to fill the remaining payload */
4307 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE;
4308 } else {
4309 stmmac_set_desc_addr(priv, first, des);
4310 tmp_pay_len = pay_len;
4311 des += proto_hdr_len;
4312 pay_len = 0;
4313 }
4314
4315 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue);
4316
4317 /* Prepare fragments */
4318 for (i = 0; i < nfrags; i++) {
4319 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4320
4321 des = skb_frag_dma_map(priv->device, frag, 0,
4322 skb_frag_size(frag),
4323 DMA_TO_DEVICE);
4324 if (dma_mapping_error(priv->device, des))
4325 goto dma_map_err;
4326
4327 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4328 (i == nfrags - 1), queue);
4329
4330 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4331 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4332 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4333 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4334 }
4335
4336 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4337
4338 /* Only the last descriptor gets to point to the skb. */
4339 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4340 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4341
4342 /* Manage tx mitigation */
4343 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4344 tx_q->tx_count_frames += tx_packets;
4345
4346 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4347 set_ic = true;
4348 else if (!priv->tx_coal_frames[queue])
4349 set_ic = false;
4350 else if (tx_packets > priv->tx_coal_frames[queue])
4351 set_ic = true;
4352 else if ((tx_q->tx_count_frames %
4353 priv->tx_coal_frames[queue]) < tx_packets)
4354 set_ic = true;
4355 else
4356 set_ic = false;
4357
4358 if (set_ic) {
4359 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4360 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4361 else
4362 desc = &tx_q->dma_tx[tx_q->cur_tx];
4363
4364 tx_q->tx_count_frames = 0;
4365 stmmac_set_tx_ic(priv, desc);
4366 }
4367
4368 /* We've used all descriptors we need for this skb, however,
4369 * advance cur_tx so that it references a fresh descriptor.
4370 * ndo_start_xmit will fill this descriptor the next time it's
4371 * called and stmmac_tx_clean may clean up to this descriptor.
4372 */
4373 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4374
4375 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4376 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4377 __func__);
4378 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4379 }
4380
4381 u64_stats_update_begin(&txq_stats->q_syncp);
4382 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4383 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4384 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4385 if (set_ic)
4386 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4387 u64_stats_update_end(&txq_stats->q_syncp);
4388
4389 if (priv->sarc_type)
4390 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4391
4392 skb_tx_timestamp(skb);
4393
4394 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4395 priv->hwts_tx_en)) {
4396 /* declare that device is doing timestamping */
4397 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4398 stmmac_enable_tx_timestamp(priv, first);
4399 }
4400
4401 /* Complete the first descriptor before granting the DMA */
4402 stmmac_prepare_tso_tx_desc(priv, first, 1,
4403 proto_hdr_len,
4404 pay_len,
4405 1, tx_q->tx_skbuff_dma[first_entry].last_segment,
4406 hdr / 4, (skb->len - proto_hdr_len));
4407
4408 /* If context desc is used to change MSS */
4409 if (mss_desc) {
4410 /* Make sure that first descriptor has been completely
4411 * written, including its own bit. This is because MSS is
4412 * actually before first descriptor, so we need to make
4413 * sure that MSS's own bit is the last thing written.
4414 */
4415 dma_wmb();
4416 stmmac_set_tx_owner(priv, mss_desc);
4417 }
4418
4419 if (netif_msg_pktdata(priv)) {
4420 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4421 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4422 tx_q->cur_tx, first, nfrags);
4423 pr_info(">>> frame to be transmitted: ");
4424 print_pkt(skb->data, skb_headlen(skb));
4425 }
4426
4427 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4428
4429 stmmac_flush_tx_descriptors(priv, queue);
4430 stmmac_tx_timer_arm(priv, queue);
4431
4432 return NETDEV_TX_OK;
4433
4434 dma_map_err:
4435 dev_err(priv->device, "Tx dma map failed\n");
4436 dev_kfree_skb(skb);
4437 priv->xstats.tx_dropped++;
4438 return NETDEV_TX_OK;
4439 }
4440
4441 /**
4442 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4443 * @skb: socket buffer to check
4444 *
4445 * Check if a packet has an ethertype that will trigger the IP header checks
4446 * and IP/TCP checksum engine of the stmmac core.
4447 *
4448 * Return: true if the ethertype can trigger the checksum engine, false
4449 * otherwise
4450 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4451 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4452 {
4453 int depth = 0;
4454 __be16 proto;
4455
4456 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4457 &depth);
4458
4459 return (depth <= ETH_HLEN) &&
4460 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4461 }
4462
4463 /**
4464 * stmmac_xmit - Tx entry point of the driver
4465 * @skb : the socket buffer
4466 * @dev : device pointer
4467 * Description : this is the tx entry point of the driver.
4468 * It programs the chain or the ring and supports oversized frames
4469 * and SG feature.
4470 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4471 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4472 {
4473 unsigned int first_entry, tx_packets, enh_desc;
4474 struct stmmac_priv *priv = netdev_priv(dev);
4475 unsigned int nopaged_len = skb_headlen(skb);
4476 int i, csum_insertion = 0, is_jumbo = 0;
4477 u32 queue = skb_get_queue_mapping(skb);
4478 int nfrags = skb_shinfo(skb)->nr_frags;
4479 int gso = skb_shinfo(skb)->gso_type;
4480 struct stmmac_txq_stats *txq_stats;
4481 struct dma_edesc *tbs_desc = NULL;
4482 struct dma_desc *desc, *first;
4483 struct stmmac_tx_queue *tx_q;
4484 bool has_vlan, set_ic;
4485 int entry, first_tx;
4486 dma_addr_t des;
4487
4488 tx_q = &priv->dma_conf.tx_queue[queue];
4489 txq_stats = &priv->xstats.txq_stats[queue];
4490 first_tx = tx_q->cur_tx;
4491
4492 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4493 stmmac_disable_eee_mode(priv);
4494
4495 /* Manage oversized TCP frames for GMAC4 device */
4496 if (skb_is_gso(skb) && priv->tso) {
4497 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4498 return stmmac_tso_xmit(skb, dev);
4499 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4))
4500 return stmmac_tso_xmit(skb, dev);
4501 }
4502
4503 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4504 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4505 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4506 queue));
4507 /* This is a hard error, log it. */
4508 netdev_err(priv->dev,
4509 "%s: Tx Ring full when queue awake\n",
4510 __func__);
4511 }
4512 return NETDEV_TX_BUSY;
4513 }
4514
4515 /* Check if VLAN can be inserted by HW */
4516 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4517
4518 entry = tx_q->cur_tx;
4519 first_entry = entry;
4520 WARN_ON(tx_q->tx_skbuff[first_entry]);
4521
4522 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4523 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4524 * queues. In that case, checksum offloading for those queues that don't
4525 * support tx coe needs to fallback to software checksum calculation.
4526 *
4527 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4528 * also have to be checksummed in software.
4529 */
4530 if (csum_insertion &&
4531 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4532 !stmmac_has_ip_ethertype(skb))) {
4533 if (unlikely(skb_checksum_help(skb)))
4534 goto dma_map_err;
4535 csum_insertion = !csum_insertion;
4536 }
4537
4538 if (likely(priv->extend_desc))
4539 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4540 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4541 desc = &tx_q->dma_entx[entry].basic;
4542 else
4543 desc = tx_q->dma_tx + entry;
4544
4545 first = desc;
4546
4547 if (has_vlan)
4548 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4549
4550 enh_desc = priv->plat->enh_desc;
4551 /* To program the descriptors according to the size of the frame */
4552 if (enh_desc)
4553 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4554
4555 if (unlikely(is_jumbo)) {
4556 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4557 if (unlikely(entry < 0) && (entry != -EINVAL))
4558 goto dma_map_err;
4559 }
4560
4561 for (i = 0; i < nfrags; i++) {
4562 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4563 int len = skb_frag_size(frag);
4564 bool last_segment = (i == (nfrags - 1));
4565
4566 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4567 WARN_ON(tx_q->tx_skbuff[entry]);
4568
4569 if (likely(priv->extend_desc))
4570 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4571 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4572 desc = &tx_q->dma_entx[entry].basic;
4573 else
4574 desc = tx_q->dma_tx + entry;
4575
4576 des = skb_frag_dma_map(priv->device, frag, 0, len,
4577 DMA_TO_DEVICE);
4578 if (dma_mapping_error(priv->device, des))
4579 goto dma_map_err; /* should reuse desc w/o issues */
4580
4581 tx_q->tx_skbuff_dma[entry].buf = des;
4582
4583 stmmac_set_desc_addr(priv, desc, des);
4584
4585 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4586 tx_q->tx_skbuff_dma[entry].len = len;
4587 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4588 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4589
4590 /* Prepare the descriptor and set the own bit too */
4591 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4592 priv->mode, 1, last_segment, skb->len);
4593 }
4594
4595 /* Only the last descriptor gets to point to the skb. */
4596 tx_q->tx_skbuff[entry] = skb;
4597 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4598
4599 /* According to the coalesce parameter the IC bit for the latest
4600 * segment is reset and the timer re-started to clean the tx status.
4601 * This approach takes care about the fragments: desc is the first
4602 * element in case of no SG.
4603 */
4604 tx_packets = (entry + 1) - first_tx;
4605 tx_q->tx_count_frames += tx_packets;
4606
4607 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4608 set_ic = true;
4609 else if (!priv->tx_coal_frames[queue])
4610 set_ic = false;
4611 else if (tx_packets > priv->tx_coal_frames[queue])
4612 set_ic = true;
4613 else if ((tx_q->tx_count_frames %
4614 priv->tx_coal_frames[queue]) < tx_packets)
4615 set_ic = true;
4616 else
4617 set_ic = false;
4618
4619 if (set_ic) {
4620 if (likely(priv->extend_desc))
4621 desc = &tx_q->dma_etx[entry].basic;
4622 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4623 desc = &tx_q->dma_entx[entry].basic;
4624 else
4625 desc = &tx_q->dma_tx[entry];
4626
4627 tx_q->tx_count_frames = 0;
4628 stmmac_set_tx_ic(priv, desc);
4629 }
4630
4631 /* We've used all descriptors we need for this skb, however,
4632 * advance cur_tx so that it references a fresh descriptor.
4633 * ndo_start_xmit will fill this descriptor the next time it's
4634 * called and stmmac_tx_clean may clean up to this descriptor.
4635 */
4636 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4637 tx_q->cur_tx = entry;
4638
4639 if (netif_msg_pktdata(priv)) {
4640 netdev_dbg(priv->dev,
4641 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4642 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4643 entry, first, nfrags);
4644
4645 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4646 print_pkt(skb->data, skb->len);
4647 }
4648
4649 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4650 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4651 __func__);
4652 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4653 }
4654
4655 u64_stats_update_begin(&txq_stats->q_syncp);
4656 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4657 if (set_ic)
4658 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4659 u64_stats_update_end(&txq_stats->q_syncp);
4660
4661 if (priv->sarc_type)
4662 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4663
4664 skb_tx_timestamp(skb);
4665
4666 /* Ready to fill the first descriptor and set the OWN bit w/o any
4667 * problems because all the descriptors are actually ready to be
4668 * passed to the DMA engine.
4669 */
4670 if (likely(!is_jumbo)) {
4671 bool last_segment = (nfrags == 0);
4672
4673 des = dma_map_single(priv->device, skb->data,
4674 nopaged_len, DMA_TO_DEVICE);
4675 if (dma_mapping_error(priv->device, des))
4676 goto dma_map_err;
4677
4678 tx_q->tx_skbuff_dma[first_entry].buf = des;
4679 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4680 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4681
4682 stmmac_set_desc_addr(priv, first, des);
4683
4684 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4685 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4686
4687 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4688 priv->hwts_tx_en)) {
4689 /* declare that device is doing timestamping */
4690 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4691 stmmac_enable_tx_timestamp(priv, first);
4692 }
4693
4694 /* Prepare the first descriptor setting the OWN bit too */
4695 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4696 csum_insertion, priv->mode, 0, last_segment,
4697 skb->len);
4698 }
4699
4700 if (tx_q->tbs & STMMAC_TBS_EN) {
4701 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4702
4703 tbs_desc = &tx_q->dma_entx[first_entry];
4704 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4705 }
4706
4707 stmmac_set_tx_owner(priv, first);
4708
4709 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4710
4711 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4712
4713 stmmac_flush_tx_descriptors(priv, queue);
4714 stmmac_tx_timer_arm(priv, queue);
4715
4716 return NETDEV_TX_OK;
4717
4718 dma_map_err:
4719 netdev_err(priv->dev, "Tx DMA map failed\n");
4720 dev_kfree_skb(skb);
4721 priv->xstats.tx_dropped++;
4722 return NETDEV_TX_OK;
4723 }
4724
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4725 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4726 {
4727 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4728 __be16 vlan_proto = veth->h_vlan_proto;
4729 u16 vlanid;
4730
4731 if ((vlan_proto == htons(ETH_P_8021Q) &&
4732 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4733 (vlan_proto == htons(ETH_P_8021AD) &&
4734 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4735 /* pop the vlan tag */
4736 vlanid = ntohs(veth->h_vlan_TCI);
4737 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4738 skb_pull(skb, VLAN_HLEN);
4739 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4740 }
4741 }
4742
4743 /**
4744 * stmmac_rx_refill - refill used skb preallocated buffers
4745 * @priv: driver private structure
4746 * @queue: RX queue index
4747 * Description : this is to reallocate the skb for the reception process
4748 * that is based on zero-copy.
4749 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4750 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4751 {
4752 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4753 int dirty = stmmac_rx_dirty(priv, queue);
4754 unsigned int entry = rx_q->dirty_rx;
4755 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4756
4757 if (priv->dma_cap.host_dma_width <= 32)
4758 gfp |= GFP_DMA32;
4759
4760 while (dirty-- > 0) {
4761 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4762 struct dma_desc *p;
4763 bool use_rx_wd;
4764
4765 if (priv->extend_desc)
4766 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4767 else
4768 p = rx_q->dma_rx + entry;
4769
4770 if (!buf->page) {
4771 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4772 if (!buf->page)
4773 break;
4774 }
4775
4776 if (priv->sph && !buf->sec_page) {
4777 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4778 if (!buf->sec_page)
4779 break;
4780
4781 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4782 }
4783
4784 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4785
4786 stmmac_set_desc_addr(priv, p, buf->addr);
4787 if (priv->sph)
4788 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4789 else
4790 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4791 stmmac_refill_desc3(priv, rx_q, p);
4792
4793 rx_q->rx_count_frames++;
4794 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4795 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4796 rx_q->rx_count_frames = 0;
4797
4798 use_rx_wd = !priv->rx_coal_frames[queue];
4799 use_rx_wd |= rx_q->rx_count_frames > 0;
4800 if (!priv->use_riwt)
4801 use_rx_wd = false;
4802
4803 dma_wmb();
4804 stmmac_set_rx_owner(priv, p, use_rx_wd);
4805
4806 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4807 }
4808 rx_q->dirty_rx = entry;
4809 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4810 (rx_q->dirty_rx * sizeof(struct dma_desc));
4811 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4812 }
4813
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4814 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
4815 struct dma_desc *p,
4816 int status, unsigned int len)
4817 {
4818 unsigned int plen = 0, hlen = 0;
4819 int coe = priv->hw->rx_csum;
4820
4821 /* Not first descriptor, buffer is always zero */
4822 if (priv->sph && len)
4823 return 0;
4824
4825 /* First descriptor, get split header length */
4826 stmmac_get_rx_header_len(priv, p, &hlen);
4827 if (priv->sph && hlen) {
4828 priv->xstats.rx_split_hdr_pkt_n++;
4829 return hlen;
4830 }
4831
4832 /* First descriptor, not last descriptor and not split header */
4833 if (status & rx_not_ls)
4834 return priv->dma_conf.dma_buf_sz;
4835
4836 plen = stmmac_get_rx_frame_len(priv, p, coe);
4837
4838 /* First descriptor and last descriptor and not split header */
4839 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
4840 }
4841
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)4842 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
4843 struct dma_desc *p,
4844 int status, unsigned int len)
4845 {
4846 int coe = priv->hw->rx_csum;
4847 unsigned int plen = 0;
4848
4849 /* Not split header, buffer is not available */
4850 if (!priv->sph)
4851 return 0;
4852
4853 /* Not last descriptor */
4854 if (status & rx_not_ls)
4855 return priv->dma_conf.dma_buf_sz;
4856
4857 plen = stmmac_get_rx_frame_len(priv, p, coe);
4858
4859 /* Last descriptor */
4860 return plen - len;
4861 }
4862
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)4863 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
4864 struct xdp_frame *xdpf, bool dma_map)
4865 {
4866 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
4867 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4868 unsigned int entry = tx_q->cur_tx;
4869 struct dma_desc *tx_desc;
4870 dma_addr_t dma_addr;
4871 bool set_ic;
4872
4873 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
4874 return STMMAC_XDP_CONSUMED;
4875
4876 if (likely(priv->extend_desc))
4877 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4878 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4879 tx_desc = &tx_q->dma_entx[entry].basic;
4880 else
4881 tx_desc = tx_q->dma_tx + entry;
4882
4883 if (dma_map) {
4884 dma_addr = dma_map_single(priv->device, xdpf->data,
4885 xdpf->len, DMA_TO_DEVICE);
4886 if (dma_mapping_error(priv->device, dma_addr))
4887 return STMMAC_XDP_CONSUMED;
4888
4889 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
4890 } else {
4891 struct page *page = virt_to_page(xdpf->data);
4892
4893 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
4894 xdpf->headroom;
4895 dma_sync_single_for_device(priv->device, dma_addr,
4896 xdpf->len, DMA_BIDIRECTIONAL);
4897
4898 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
4899 }
4900
4901 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
4902 tx_q->tx_skbuff_dma[entry].map_as_page = false;
4903 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
4904 tx_q->tx_skbuff_dma[entry].last_segment = true;
4905 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
4906
4907 tx_q->xdpf[entry] = xdpf;
4908
4909 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
4910
4911 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
4912 true, priv->mode, true, true,
4913 xdpf->len);
4914
4915 tx_q->tx_count_frames++;
4916
4917 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
4918 set_ic = true;
4919 else
4920 set_ic = false;
4921
4922 if (set_ic) {
4923 tx_q->tx_count_frames = 0;
4924 stmmac_set_tx_ic(priv, tx_desc);
4925 u64_stats_update_begin(&txq_stats->q_syncp);
4926 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4927 u64_stats_update_end(&txq_stats->q_syncp);
4928 }
4929
4930 stmmac_enable_dma_transmission(priv, priv->ioaddr);
4931
4932 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4933 tx_q->cur_tx = entry;
4934
4935 return STMMAC_XDP_TX;
4936 }
4937
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)4938 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
4939 int cpu)
4940 {
4941 int index = cpu;
4942
4943 if (unlikely(index < 0))
4944 index = 0;
4945
4946 while (index >= priv->plat->tx_queues_to_use)
4947 index -= priv->plat->tx_queues_to_use;
4948
4949 return index;
4950 }
4951
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)4952 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
4953 struct xdp_buff *xdp)
4954 {
4955 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
4956 int cpu = smp_processor_id();
4957 struct netdev_queue *nq;
4958 int queue;
4959 int res;
4960
4961 if (unlikely(!xdpf))
4962 return STMMAC_XDP_CONSUMED;
4963
4964 queue = stmmac_xdp_get_tx_queue(priv, cpu);
4965 nq = netdev_get_tx_queue(priv->dev, queue);
4966
4967 __netif_tx_lock(nq, cpu);
4968 /* Avoids TX time-out as we are sharing with slow path */
4969 txq_trans_cond_update(nq);
4970
4971 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false);
4972 if (res == STMMAC_XDP_TX)
4973 stmmac_flush_tx_descriptors(priv, queue);
4974
4975 __netif_tx_unlock(nq);
4976
4977 return res;
4978 }
4979
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)4980 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
4981 struct bpf_prog *prog,
4982 struct xdp_buff *xdp)
4983 {
4984 u32 act;
4985 int res;
4986
4987 act = bpf_prog_run_xdp(prog, xdp);
4988 switch (act) {
4989 case XDP_PASS:
4990 res = STMMAC_XDP_PASS;
4991 break;
4992 case XDP_TX:
4993 res = stmmac_xdp_xmit_back(priv, xdp);
4994 break;
4995 case XDP_REDIRECT:
4996 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
4997 res = STMMAC_XDP_CONSUMED;
4998 else
4999 res = STMMAC_XDP_REDIRECT;
5000 break;
5001 default:
5002 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5003 fallthrough;
5004 case XDP_ABORTED:
5005 trace_xdp_exception(priv->dev, prog, act);
5006 fallthrough;
5007 case XDP_DROP:
5008 res = STMMAC_XDP_CONSUMED;
5009 break;
5010 }
5011
5012 return res;
5013 }
5014
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5015 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5016 struct xdp_buff *xdp)
5017 {
5018 struct bpf_prog *prog;
5019 int res;
5020
5021 prog = READ_ONCE(priv->xdp_prog);
5022 if (!prog) {
5023 res = STMMAC_XDP_PASS;
5024 goto out;
5025 }
5026
5027 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5028 out:
5029 return ERR_PTR(-res);
5030 }
5031
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5032 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5033 int xdp_status)
5034 {
5035 int cpu = smp_processor_id();
5036 int queue;
5037
5038 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5039
5040 if (xdp_status & STMMAC_XDP_TX)
5041 stmmac_tx_timer_arm(priv, queue);
5042
5043 if (xdp_status & STMMAC_XDP_REDIRECT)
5044 xdp_do_flush();
5045 }
5046
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5047 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5048 struct xdp_buff *xdp)
5049 {
5050 unsigned int metasize = xdp->data - xdp->data_meta;
5051 unsigned int datasize = xdp->data_end - xdp->data;
5052 struct sk_buff *skb;
5053
5054 skb = __napi_alloc_skb(&ch->rxtx_napi,
5055 xdp->data_end - xdp->data_hard_start,
5056 GFP_ATOMIC | __GFP_NOWARN);
5057 if (unlikely(!skb))
5058 return NULL;
5059
5060 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5061 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5062 if (metasize)
5063 skb_metadata_set(skb, metasize);
5064
5065 return skb;
5066 }
5067
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5068 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5069 struct dma_desc *p, struct dma_desc *np,
5070 struct xdp_buff *xdp)
5071 {
5072 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5073 struct stmmac_channel *ch = &priv->channel[queue];
5074 unsigned int len = xdp->data_end - xdp->data;
5075 enum pkt_hash_types hash_type;
5076 int coe = priv->hw->rx_csum;
5077 struct sk_buff *skb;
5078 u32 hash;
5079
5080 skb = stmmac_construct_skb_zc(ch, xdp);
5081 if (!skb) {
5082 priv->xstats.rx_dropped++;
5083 return;
5084 }
5085
5086 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5087 if (priv->hw->hw_vlan_en)
5088 /* MAC level stripping. */
5089 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5090 else
5091 /* Driver level stripping. */
5092 stmmac_rx_vlan(priv->dev, skb);
5093 skb->protocol = eth_type_trans(skb, priv->dev);
5094
5095 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5096 skb_checksum_none_assert(skb);
5097 else
5098 skb->ip_summed = CHECKSUM_UNNECESSARY;
5099
5100 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5101 skb_set_hash(skb, hash, hash_type);
5102
5103 skb_record_rx_queue(skb, queue);
5104 napi_gro_receive(&ch->rxtx_napi, skb);
5105
5106 u64_stats_update_begin(&rxq_stats->napi_syncp);
5107 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5108 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5109 u64_stats_update_end(&rxq_stats->napi_syncp);
5110 }
5111
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5112 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5113 {
5114 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5115 unsigned int entry = rx_q->dirty_rx;
5116 struct dma_desc *rx_desc = NULL;
5117 bool ret = true;
5118
5119 budget = min(budget, stmmac_rx_dirty(priv, queue));
5120
5121 while (budget-- > 0 && entry != rx_q->cur_rx) {
5122 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5123 dma_addr_t dma_addr;
5124 bool use_rx_wd;
5125
5126 if (!buf->xdp) {
5127 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5128 if (!buf->xdp) {
5129 ret = false;
5130 break;
5131 }
5132 }
5133
5134 if (priv->extend_desc)
5135 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5136 else
5137 rx_desc = rx_q->dma_rx + entry;
5138
5139 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5140 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5141 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5142 stmmac_refill_desc3(priv, rx_q, rx_desc);
5143
5144 rx_q->rx_count_frames++;
5145 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5146 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5147 rx_q->rx_count_frames = 0;
5148
5149 use_rx_wd = !priv->rx_coal_frames[queue];
5150 use_rx_wd |= rx_q->rx_count_frames > 0;
5151 if (!priv->use_riwt)
5152 use_rx_wd = false;
5153
5154 dma_wmb();
5155 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5156
5157 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5158 }
5159
5160 if (rx_desc) {
5161 rx_q->dirty_rx = entry;
5162 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5163 (rx_q->dirty_rx * sizeof(struct dma_desc));
5164 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5165 }
5166
5167 return ret;
5168 }
5169
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5170 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5171 {
5172 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5173 * to represent incoming packet, whereas cb field in the same structure
5174 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5175 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5176 */
5177 return (struct stmmac_xdp_buff *)xdp;
5178 }
5179
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5180 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5181 {
5182 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5183 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5184 unsigned int count = 0, error = 0, len = 0;
5185 int dirty = stmmac_rx_dirty(priv, queue);
5186 unsigned int next_entry = rx_q->cur_rx;
5187 u32 rx_errors = 0, rx_dropped = 0;
5188 unsigned int desc_size;
5189 struct bpf_prog *prog;
5190 bool failure = false;
5191 int xdp_status = 0;
5192 int status = 0;
5193
5194 if (netif_msg_rx_status(priv)) {
5195 void *rx_head;
5196
5197 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5198 if (priv->extend_desc) {
5199 rx_head = (void *)rx_q->dma_erx;
5200 desc_size = sizeof(struct dma_extended_desc);
5201 } else {
5202 rx_head = (void *)rx_q->dma_rx;
5203 desc_size = sizeof(struct dma_desc);
5204 }
5205
5206 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5207 rx_q->dma_rx_phy, desc_size);
5208 }
5209 while (count < limit) {
5210 struct stmmac_rx_buffer *buf;
5211 struct stmmac_xdp_buff *ctx;
5212 unsigned int buf1_len = 0;
5213 struct dma_desc *np, *p;
5214 int entry;
5215 int res;
5216
5217 if (!count && rx_q->state_saved) {
5218 error = rx_q->state.error;
5219 len = rx_q->state.len;
5220 } else {
5221 rx_q->state_saved = false;
5222 error = 0;
5223 len = 0;
5224 }
5225
5226 if (count >= limit)
5227 break;
5228
5229 read_again:
5230 buf1_len = 0;
5231 entry = next_entry;
5232 buf = &rx_q->buf_pool[entry];
5233
5234 if (dirty >= STMMAC_RX_FILL_BATCH) {
5235 failure = failure ||
5236 !stmmac_rx_refill_zc(priv, queue, dirty);
5237 dirty = 0;
5238 }
5239
5240 if (priv->extend_desc)
5241 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5242 else
5243 p = rx_q->dma_rx + entry;
5244
5245 /* read the status of the incoming frame */
5246 status = stmmac_rx_status(priv, &priv->xstats, p);
5247 /* check if managed by the DMA otherwise go ahead */
5248 if (unlikely(status & dma_own))
5249 break;
5250
5251 /* Prefetch the next RX descriptor */
5252 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5253 priv->dma_conf.dma_rx_size);
5254 next_entry = rx_q->cur_rx;
5255
5256 if (priv->extend_desc)
5257 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5258 else
5259 np = rx_q->dma_rx + next_entry;
5260
5261 prefetch(np);
5262
5263 /* Ensure a valid XSK buffer before proceed */
5264 if (!buf->xdp)
5265 break;
5266
5267 if (priv->extend_desc)
5268 stmmac_rx_extended_status(priv, &priv->xstats,
5269 rx_q->dma_erx + entry);
5270 if (unlikely(status == discard_frame)) {
5271 xsk_buff_free(buf->xdp);
5272 buf->xdp = NULL;
5273 dirty++;
5274 error = 1;
5275 if (!priv->hwts_rx_en)
5276 rx_errors++;
5277 }
5278
5279 if (unlikely(error && (status & rx_not_ls)))
5280 goto read_again;
5281 if (unlikely(error)) {
5282 count++;
5283 continue;
5284 }
5285
5286 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5287 if (likely(status & rx_not_ls)) {
5288 xsk_buff_free(buf->xdp);
5289 buf->xdp = NULL;
5290 dirty++;
5291 count++;
5292 goto read_again;
5293 }
5294
5295 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5296 ctx->priv = priv;
5297 ctx->desc = p;
5298 ctx->ndesc = np;
5299
5300 /* XDP ZC Frame only support primary buffers for now */
5301 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5302 len += buf1_len;
5303
5304 /* ACS is disabled; strip manually. */
5305 if (likely(!(status & rx_not_ls))) {
5306 buf1_len -= ETH_FCS_LEN;
5307 len -= ETH_FCS_LEN;
5308 }
5309
5310 /* RX buffer is good and fit into a XSK pool buffer */
5311 buf->xdp->data_end = buf->xdp->data + buf1_len;
5312 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool);
5313
5314 prog = READ_ONCE(priv->xdp_prog);
5315 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5316
5317 switch (res) {
5318 case STMMAC_XDP_PASS:
5319 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5320 xsk_buff_free(buf->xdp);
5321 break;
5322 case STMMAC_XDP_CONSUMED:
5323 xsk_buff_free(buf->xdp);
5324 rx_dropped++;
5325 break;
5326 case STMMAC_XDP_TX:
5327 case STMMAC_XDP_REDIRECT:
5328 xdp_status |= res;
5329 break;
5330 }
5331
5332 buf->xdp = NULL;
5333 dirty++;
5334 count++;
5335 }
5336
5337 if (status & rx_not_ls) {
5338 rx_q->state_saved = true;
5339 rx_q->state.error = error;
5340 rx_q->state.len = len;
5341 }
5342
5343 stmmac_finalize_xdp_rx(priv, xdp_status);
5344
5345 u64_stats_update_begin(&rxq_stats->napi_syncp);
5346 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5347 u64_stats_update_end(&rxq_stats->napi_syncp);
5348
5349 priv->xstats.rx_dropped += rx_dropped;
5350 priv->xstats.rx_errors += rx_errors;
5351
5352 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5353 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5354 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5355 else
5356 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5357
5358 return (int)count;
5359 }
5360
5361 return failure ? limit : (int)count;
5362 }
5363
5364 /**
5365 * stmmac_rx - manage the receive process
5366 * @priv: driver private structure
5367 * @limit: napi bugget
5368 * @queue: RX queue index.
5369 * Description : this the function called by the napi poll method.
5370 * It gets all the frames inside the ring.
5371 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5372 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5373 {
5374 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5375 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5376 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5377 struct stmmac_channel *ch = &priv->channel[queue];
5378 unsigned int count = 0, error = 0, len = 0;
5379 int status = 0, coe = priv->hw->rx_csum;
5380 unsigned int next_entry = rx_q->cur_rx;
5381 enum dma_data_direction dma_dir;
5382 unsigned int desc_size;
5383 struct sk_buff *skb = NULL;
5384 struct stmmac_xdp_buff ctx;
5385 int xdp_status = 0;
5386 int buf_sz;
5387
5388 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5389 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5390 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5391
5392 if (netif_msg_rx_status(priv)) {
5393 void *rx_head;
5394
5395 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5396 if (priv->extend_desc) {
5397 rx_head = (void *)rx_q->dma_erx;
5398 desc_size = sizeof(struct dma_extended_desc);
5399 } else {
5400 rx_head = (void *)rx_q->dma_rx;
5401 desc_size = sizeof(struct dma_desc);
5402 }
5403
5404 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5405 rx_q->dma_rx_phy, desc_size);
5406 }
5407 while (count < limit) {
5408 unsigned int buf1_len = 0, buf2_len = 0;
5409 enum pkt_hash_types hash_type;
5410 struct stmmac_rx_buffer *buf;
5411 struct dma_desc *np, *p;
5412 int entry;
5413 u32 hash;
5414
5415 if (!count && rx_q->state_saved) {
5416 skb = rx_q->state.skb;
5417 error = rx_q->state.error;
5418 len = rx_q->state.len;
5419 } else {
5420 rx_q->state_saved = false;
5421 skb = NULL;
5422 error = 0;
5423 len = 0;
5424 }
5425
5426 read_again:
5427 if (count >= limit)
5428 break;
5429
5430 buf1_len = 0;
5431 buf2_len = 0;
5432 entry = next_entry;
5433 buf = &rx_q->buf_pool[entry];
5434
5435 if (priv->extend_desc)
5436 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5437 else
5438 p = rx_q->dma_rx + entry;
5439
5440 /* read the status of the incoming frame */
5441 status = stmmac_rx_status(priv, &priv->xstats, p);
5442 /* check if managed by the DMA otherwise go ahead */
5443 if (unlikely(status & dma_own))
5444 break;
5445
5446 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5447 priv->dma_conf.dma_rx_size);
5448 next_entry = rx_q->cur_rx;
5449
5450 if (priv->extend_desc)
5451 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5452 else
5453 np = rx_q->dma_rx + next_entry;
5454
5455 prefetch(np);
5456
5457 if (priv->extend_desc)
5458 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5459 if (unlikely(status == discard_frame)) {
5460 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5461 buf->page = NULL;
5462 error = 1;
5463 if (!priv->hwts_rx_en)
5464 rx_errors++;
5465 }
5466
5467 if (unlikely(error && (status & rx_not_ls)))
5468 goto read_again;
5469 if (unlikely(error)) {
5470 dev_kfree_skb(skb);
5471 skb = NULL;
5472 count++;
5473 continue;
5474 }
5475
5476 /* Buffer is good. Go on. */
5477
5478 prefetch(page_address(buf->page) + buf->page_offset);
5479 if (buf->sec_page)
5480 prefetch(page_address(buf->sec_page));
5481
5482 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5483 len += buf1_len;
5484 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5485 len += buf2_len;
5486
5487 /* ACS is disabled; strip manually. */
5488 if (likely(!(status & rx_not_ls))) {
5489 if (buf2_len) {
5490 buf2_len -= ETH_FCS_LEN;
5491 len -= ETH_FCS_LEN;
5492 } else if (buf1_len) {
5493 buf1_len -= ETH_FCS_LEN;
5494 len -= ETH_FCS_LEN;
5495 }
5496 }
5497
5498 if (!skb) {
5499 unsigned int pre_len, sync_len;
5500
5501 dma_sync_single_for_cpu(priv->device, buf->addr,
5502 buf1_len, dma_dir);
5503
5504 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq);
5505 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5506 buf->page_offset, buf1_len, true);
5507
5508 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5509 buf->page_offset;
5510
5511 ctx.priv = priv;
5512 ctx.desc = p;
5513 ctx.ndesc = np;
5514
5515 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5516 /* Due xdp_adjust_tail: DMA sync for_device
5517 * cover max len CPU touch
5518 */
5519 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5520 buf->page_offset;
5521 sync_len = max(sync_len, pre_len);
5522
5523 /* For Not XDP_PASS verdict */
5524 if (IS_ERR(skb)) {
5525 unsigned int xdp_res = -PTR_ERR(skb);
5526
5527 if (xdp_res & STMMAC_XDP_CONSUMED) {
5528 page_pool_put_page(rx_q->page_pool,
5529 virt_to_head_page(ctx.xdp.data),
5530 sync_len, true);
5531 buf->page = NULL;
5532 rx_dropped++;
5533
5534 /* Clear skb as it was set as
5535 * status by XDP program.
5536 */
5537 skb = NULL;
5538
5539 if (unlikely((status & rx_not_ls)))
5540 goto read_again;
5541
5542 count++;
5543 continue;
5544 } else if (xdp_res & (STMMAC_XDP_TX |
5545 STMMAC_XDP_REDIRECT)) {
5546 xdp_status |= xdp_res;
5547 buf->page = NULL;
5548 skb = NULL;
5549 count++;
5550 continue;
5551 }
5552 }
5553 }
5554
5555 if (!skb) {
5556 /* XDP program may expand or reduce tail */
5557 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5558
5559 skb = napi_alloc_skb(&ch->rx_napi, buf1_len);
5560 if (!skb) {
5561 rx_dropped++;
5562 count++;
5563 goto drain_data;
5564 }
5565
5566 /* XDP program may adjust header */
5567 skb_copy_to_linear_data(skb, ctx.xdp.data, buf1_len);
5568 skb_put(skb, buf1_len);
5569
5570 /* Data payload copied into SKB, page ready for recycle */
5571 page_pool_recycle_direct(rx_q->page_pool, buf->page);
5572 buf->page = NULL;
5573 } else if (buf1_len) {
5574 dma_sync_single_for_cpu(priv->device, buf->addr,
5575 buf1_len, dma_dir);
5576 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5577 buf->page, buf->page_offset, buf1_len,
5578 priv->dma_conf.dma_buf_sz);
5579
5580 /* Data payload appended into SKB */
5581 skb_mark_for_recycle(skb);
5582 buf->page = NULL;
5583 }
5584
5585 if (buf2_len) {
5586 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5587 buf2_len, dma_dir);
5588 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5589 buf->sec_page, 0, buf2_len,
5590 priv->dma_conf.dma_buf_sz);
5591
5592 /* Data payload appended into SKB */
5593 skb_mark_for_recycle(skb);
5594 buf->sec_page = NULL;
5595 }
5596
5597 drain_data:
5598 if (likely(status & rx_not_ls))
5599 goto read_again;
5600 if (!skb)
5601 continue;
5602
5603 /* Got entire packet into SKB. Finish it. */
5604
5605 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5606
5607 if (priv->hw->hw_vlan_en)
5608 /* MAC level stripping. */
5609 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5610 else
5611 /* Driver level stripping. */
5612 stmmac_rx_vlan(priv->dev, skb);
5613
5614 skb->protocol = eth_type_trans(skb, priv->dev);
5615
5616 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5617 skb_checksum_none_assert(skb);
5618 else
5619 skb->ip_summed = CHECKSUM_UNNECESSARY;
5620
5621 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5622 skb_set_hash(skb, hash, hash_type);
5623
5624 skb_record_rx_queue(skb, queue);
5625 napi_gro_receive(&ch->rx_napi, skb);
5626 skb = NULL;
5627
5628 rx_packets++;
5629 rx_bytes += len;
5630 count++;
5631 }
5632
5633 if (status & rx_not_ls || skb) {
5634 rx_q->state_saved = true;
5635 rx_q->state.skb = skb;
5636 rx_q->state.error = error;
5637 rx_q->state.len = len;
5638 }
5639
5640 stmmac_finalize_xdp_rx(priv, xdp_status);
5641
5642 stmmac_rx_refill(priv, queue);
5643
5644 u64_stats_update_begin(&rxq_stats->napi_syncp);
5645 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5646 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5647 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5648 u64_stats_update_end(&rxq_stats->napi_syncp);
5649
5650 priv->xstats.rx_dropped += rx_dropped;
5651 priv->xstats.rx_errors += rx_errors;
5652
5653 return count;
5654 }
5655
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5656 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5657 {
5658 struct stmmac_channel *ch =
5659 container_of(napi, struct stmmac_channel, rx_napi);
5660 struct stmmac_priv *priv = ch->priv_data;
5661 struct stmmac_rxq_stats *rxq_stats;
5662 u32 chan = ch->index;
5663 int work_done;
5664
5665 rxq_stats = &priv->xstats.rxq_stats[chan];
5666 u64_stats_update_begin(&rxq_stats->napi_syncp);
5667 u64_stats_inc(&rxq_stats->napi.poll);
5668 u64_stats_update_end(&rxq_stats->napi_syncp);
5669
5670 work_done = stmmac_rx(priv, budget, chan);
5671 if (work_done < budget && napi_complete_done(napi, work_done)) {
5672 unsigned long flags;
5673
5674 spin_lock_irqsave(&ch->lock, flags);
5675 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5676 spin_unlock_irqrestore(&ch->lock, flags);
5677 }
5678
5679 return work_done;
5680 }
5681
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5682 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5683 {
5684 struct stmmac_channel *ch =
5685 container_of(napi, struct stmmac_channel, tx_napi);
5686 struct stmmac_priv *priv = ch->priv_data;
5687 struct stmmac_txq_stats *txq_stats;
5688 bool pending_packets = false;
5689 u32 chan = ch->index;
5690 int work_done;
5691
5692 txq_stats = &priv->xstats.txq_stats[chan];
5693 u64_stats_update_begin(&txq_stats->napi_syncp);
5694 u64_stats_inc(&txq_stats->napi.poll);
5695 u64_stats_update_end(&txq_stats->napi_syncp);
5696
5697 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5698 work_done = min(work_done, budget);
5699
5700 if (work_done < budget && napi_complete_done(napi, work_done)) {
5701 unsigned long flags;
5702
5703 spin_lock_irqsave(&ch->lock, flags);
5704 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5705 spin_unlock_irqrestore(&ch->lock, flags);
5706 }
5707
5708 /* TX still have packet to handle, check if we need to arm tx timer */
5709 if (pending_packets)
5710 stmmac_tx_timer_arm(priv, chan);
5711
5712 return work_done;
5713 }
5714
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5715 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5716 {
5717 struct stmmac_channel *ch =
5718 container_of(napi, struct stmmac_channel, rxtx_napi);
5719 struct stmmac_priv *priv = ch->priv_data;
5720 bool tx_pending_packets = false;
5721 int rx_done, tx_done, rxtx_done;
5722 struct stmmac_rxq_stats *rxq_stats;
5723 struct stmmac_txq_stats *txq_stats;
5724 u32 chan = ch->index;
5725
5726 rxq_stats = &priv->xstats.rxq_stats[chan];
5727 u64_stats_update_begin(&rxq_stats->napi_syncp);
5728 u64_stats_inc(&rxq_stats->napi.poll);
5729 u64_stats_update_end(&rxq_stats->napi_syncp);
5730
5731 txq_stats = &priv->xstats.txq_stats[chan];
5732 u64_stats_update_begin(&txq_stats->napi_syncp);
5733 u64_stats_inc(&txq_stats->napi.poll);
5734 u64_stats_update_end(&txq_stats->napi_syncp);
5735
5736 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5737 tx_done = min(tx_done, budget);
5738
5739 rx_done = stmmac_rx_zc(priv, budget, chan);
5740
5741 rxtx_done = max(tx_done, rx_done);
5742
5743 /* If either TX or RX work is not complete, return budget
5744 * and keep pooling
5745 */
5746 if (rxtx_done >= budget)
5747 return budget;
5748
5749 /* all work done, exit the polling mode */
5750 if (napi_complete_done(napi, rxtx_done)) {
5751 unsigned long flags;
5752
5753 spin_lock_irqsave(&ch->lock, flags);
5754 /* Both RX and TX work done are compelte,
5755 * so enable both RX & TX IRQs.
5756 */
5757 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5758 spin_unlock_irqrestore(&ch->lock, flags);
5759 }
5760
5761 /* TX still have packet to handle, check if we need to arm tx timer */
5762 if (tx_pending_packets)
5763 stmmac_tx_timer_arm(priv, chan);
5764
5765 return min(rxtx_done, budget - 1);
5766 }
5767
5768 /**
5769 * stmmac_tx_timeout
5770 * @dev : Pointer to net device structure
5771 * @txqueue: the index of the hanging transmit queue
5772 * Description: this function is called when a packet transmission fails to
5773 * complete within a reasonable time. The driver will mark the error in the
5774 * netdev structure and arrange for the device to be reset to a sane state
5775 * in order to transmit a new packet.
5776 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5777 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5778 {
5779 struct stmmac_priv *priv = netdev_priv(dev);
5780
5781 stmmac_global_err(priv);
5782 }
5783
5784 /**
5785 * stmmac_set_rx_mode - entry point for multicast addressing
5786 * @dev : pointer to the device structure
5787 * Description:
5788 * This function is a driver entry point which gets called by the kernel
5789 * whenever multicast addresses must be enabled/disabled.
5790 * Return value:
5791 * void.
5792 */
stmmac_set_rx_mode(struct net_device * dev)5793 static void stmmac_set_rx_mode(struct net_device *dev)
5794 {
5795 struct stmmac_priv *priv = netdev_priv(dev);
5796
5797 stmmac_set_filter(priv, priv->hw, dev);
5798 }
5799
5800 /**
5801 * stmmac_change_mtu - entry point to change MTU size for the device.
5802 * @dev : device pointer.
5803 * @new_mtu : the new MTU size for the device.
5804 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
5805 * to drive packet transmission. Ethernet has an MTU of 1500 octets
5806 * (ETH_DATA_LEN). This value can be changed with ifconfig.
5807 * Return value:
5808 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5809 * file on failure.
5810 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)5811 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
5812 {
5813 struct stmmac_priv *priv = netdev_priv(dev);
5814 int txfifosz = priv->plat->tx_fifo_size;
5815 struct stmmac_dma_conf *dma_conf;
5816 const int mtu = new_mtu;
5817 int ret;
5818
5819 if (txfifosz == 0)
5820 txfifosz = priv->dma_cap.tx_fifo_size;
5821
5822 txfifosz /= priv->plat->tx_queues_to_use;
5823
5824 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
5825 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
5826 return -EINVAL;
5827 }
5828
5829 new_mtu = STMMAC_ALIGN(new_mtu);
5830
5831 /* If condition true, FIFO is too small or MTU too large */
5832 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
5833 return -EINVAL;
5834
5835 if (netif_running(dev)) {
5836 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
5837 /* Try to allocate the new DMA conf with the new mtu */
5838 dma_conf = stmmac_setup_dma_desc(priv, mtu);
5839 if (IS_ERR(dma_conf)) {
5840 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
5841 mtu);
5842 return PTR_ERR(dma_conf);
5843 }
5844
5845 stmmac_release(dev);
5846
5847 ret = __stmmac_open(dev, dma_conf);
5848 if (ret) {
5849 free_dma_desc_resources(priv, dma_conf);
5850 kfree(dma_conf);
5851 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
5852 return ret;
5853 }
5854
5855 kfree(dma_conf);
5856
5857 stmmac_set_rx_mode(dev);
5858 }
5859
5860 dev->mtu = mtu;
5861 netdev_update_features(dev);
5862
5863 return 0;
5864 }
5865
stmmac_fix_features(struct net_device * dev,netdev_features_t features)5866 static netdev_features_t stmmac_fix_features(struct net_device *dev,
5867 netdev_features_t features)
5868 {
5869 struct stmmac_priv *priv = netdev_priv(dev);
5870
5871 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
5872 features &= ~NETIF_F_RXCSUM;
5873
5874 if (!priv->plat->tx_coe)
5875 features &= ~NETIF_F_CSUM_MASK;
5876
5877 /* Some GMAC devices have a bugged Jumbo frame support that
5878 * needs to have the Tx COE disabled for oversized frames
5879 * (due to limited buffer sizes). In this case we disable
5880 * the TX csum insertion in the TDES and not use SF.
5881 */
5882 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
5883 features &= ~NETIF_F_CSUM_MASK;
5884
5885 /* Disable tso if asked by ethtool */
5886 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
5887 if (features & NETIF_F_TSO)
5888 priv->tso = true;
5889 else
5890 priv->tso = false;
5891 }
5892
5893 return features;
5894 }
5895
stmmac_set_features(struct net_device * netdev,netdev_features_t features)5896 static int stmmac_set_features(struct net_device *netdev,
5897 netdev_features_t features)
5898 {
5899 struct stmmac_priv *priv = netdev_priv(netdev);
5900
5901 /* Keep the COE Type in case of csum is supporting */
5902 if (features & NETIF_F_RXCSUM)
5903 priv->hw->rx_csum = priv->plat->rx_coe;
5904 else
5905 priv->hw->rx_csum = 0;
5906 /* No check needed because rx_coe has been set before and it will be
5907 * fixed in case of issue.
5908 */
5909 stmmac_rx_ipc(priv, priv->hw);
5910
5911 if (priv->sph_cap) {
5912 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph;
5913 u32 chan;
5914
5915 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
5916 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
5917 }
5918
5919 if (features & NETIF_F_HW_VLAN_CTAG_RX)
5920 priv->hw->hw_vlan_en = true;
5921 else
5922 priv->hw->hw_vlan_en = false;
5923
5924 stmmac_set_hw_vlan_mode(priv, priv->hw);
5925
5926 return 0;
5927 }
5928
stmmac_fpe_event_status(struct stmmac_priv * priv,int status)5929 static void stmmac_fpe_event_status(struct stmmac_priv *priv, int status)
5930 {
5931 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
5932 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
5933 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
5934 bool *hs_enable = &fpe_cfg->hs_enable;
5935
5936 if (status == FPE_EVENT_UNKNOWN || !*hs_enable)
5937 return;
5938
5939 /* If LP has sent verify mPacket, LP is FPE capable */
5940 if ((status & FPE_EVENT_RVER) == FPE_EVENT_RVER) {
5941 if (*lp_state < FPE_STATE_CAPABLE)
5942 *lp_state = FPE_STATE_CAPABLE;
5943
5944 /* If user has requested FPE enable, quickly response */
5945 if (*hs_enable)
5946 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
5947 fpe_cfg,
5948 MPACKET_RESPONSE);
5949 }
5950
5951 /* If Local has sent verify mPacket, Local is FPE capable */
5952 if ((status & FPE_EVENT_TVER) == FPE_EVENT_TVER) {
5953 if (*lo_state < FPE_STATE_CAPABLE)
5954 *lo_state = FPE_STATE_CAPABLE;
5955 }
5956
5957 /* If LP has sent response mPacket, LP is entering FPE ON */
5958 if ((status & FPE_EVENT_RRSP) == FPE_EVENT_RRSP)
5959 *lp_state = FPE_STATE_ENTERING_ON;
5960
5961 /* If Local has sent response mPacket, Local is entering FPE ON */
5962 if ((status & FPE_EVENT_TRSP) == FPE_EVENT_TRSP)
5963 *lo_state = FPE_STATE_ENTERING_ON;
5964
5965 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) &&
5966 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) &&
5967 priv->fpe_wq) {
5968 queue_work(priv->fpe_wq, &priv->fpe_task);
5969 }
5970 }
5971
stmmac_common_interrupt(struct stmmac_priv * priv)5972 static void stmmac_common_interrupt(struct stmmac_priv *priv)
5973 {
5974 u32 rx_cnt = priv->plat->rx_queues_to_use;
5975 u32 tx_cnt = priv->plat->tx_queues_to_use;
5976 u32 queues_count;
5977 u32 queue;
5978 bool xmac;
5979
5980 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac;
5981 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
5982
5983 if (priv->irq_wake)
5984 pm_wakeup_event(priv->device, 0);
5985
5986 if (priv->dma_cap.estsel)
5987 stmmac_est_irq_status(priv, priv, priv->dev,
5988 &priv->xstats, tx_cnt);
5989
5990 if (priv->dma_cap.fpesel) {
5991 int status = stmmac_fpe_irq_status(priv, priv->ioaddr,
5992 priv->dev);
5993
5994 stmmac_fpe_event_status(priv, status);
5995 }
5996
5997 /* To handle GMAC own interrupts */
5998 if ((priv->plat->has_gmac) || xmac) {
5999 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats);
6000
6001 if (unlikely(status)) {
6002 /* For LPI we need to save the tx status */
6003 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6004 priv->tx_path_in_lpi_mode = true;
6005 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6006 priv->tx_path_in_lpi_mode = false;
6007 }
6008
6009 for (queue = 0; queue < queues_count; queue++) {
6010 status = stmmac_host_mtl_irq_status(priv, priv->hw,
6011 queue);
6012 }
6013
6014 /* PCS link status */
6015 if (priv->hw->pcs &&
6016 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) {
6017 if (priv->xstats.pcs_link)
6018 netif_carrier_on(priv->dev);
6019 else
6020 netif_carrier_off(priv->dev);
6021 }
6022
6023 stmmac_timestamp_interrupt(priv, priv);
6024 }
6025 }
6026
6027 /**
6028 * stmmac_interrupt - main ISR
6029 * @irq: interrupt number.
6030 * @dev_id: to pass the net device pointer.
6031 * Description: this is the main driver interrupt service routine.
6032 * It can call:
6033 * o DMA service routine (to manage incoming frame reception and transmission
6034 * status)
6035 * o Core interrupts to manage: remote wake-up, management counter, LPI
6036 * interrupts.
6037 */
stmmac_interrupt(int irq,void * dev_id)6038 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6039 {
6040 struct net_device *dev = (struct net_device *)dev_id;
6041 struct stmmac_priv *priv = netdev_priv(dev);
6042
6043 /* Check if adapter is up */
6044 if (test_bit(STMMAC_DOWN, &priv->state))
6045 return IRQ_HANDLED;
6046
6047 /* Check if a fatal error happened */
6048 if (stmmac_safety_feat_interrupt(priv))
6049 return IRQ_HANDLED;
6050
6051 /* To handle Common interrupts */
6052 stmmac_common_interrupt(priv);
6053
6054 /* To handle DMA interrupts */
6055 stmmac_dma_interrupt(priv);
6056
6057 return IRQ_HANDLED;
6058 }
6059
stmmac_mac_interrupt(int irq,void * dev_id)6060 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6061 {
6062 struct net_device *dev = (struct net_device *)dev_id;
6063 struct stmmac_priv *priv = netdev_priv(dev);
6064
6065 /* Check if adapter is up */
6066 if (test_bit(STMMAC_DOWN, &priv->state))
6067 return IRQ_HANDLED;
6068
6069 /* To handle Common interrupts */
6070 stmmac_common_interrupt(priv);
6071
6072 return IRQ_HANDLED;
6073 }
6074
stmmac_safety_interrupt(int irq,void * dev_id)6075 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6076 {
6077 struct net_device *dev = (struct net_device *)dev_id;
6078 struct stmmac_priv *priv = netdev_priv(dev);
6079
6080 /* Check if adapter is up */
6081 if (test_bit(STMMAC_DOWN, &priv->state))
6082 return IRQ_HANDLED;
6083
6084 /* Check if a fatal error happened */
6085 stmmac_safety_feat_interrupt(priv);
6086
6087 return IRQ_HANDLED;
6088 }
6089
stmmac_msi_intr_tx(int irq,void * data)6090 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6091 {
6092 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6093 struct stmmac_dma_conf *dma_conf;
6094 int chan = tx_q->queue_index;
6095 struct stmmac_priv *priv;
6096 int status;
6097
6098 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6099 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6100
6101 /* Check if adapter is up */
6102 if (test_bit(STMMAC_DOWN, &priv->state))
6103 return IRQ_HANDLED;
6104
6105 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6106
6107 if (unlikely(status & tx_hard_error_bump_tc)) {
6108 /* Try to bump up the dma threshold on this failure */
6109 stmmac_bump_dma_threshold(priv, chan);
6110 } else if (unlikely(status == tx_hard_error)) {
6111 stmmac_tx_err(priv, chan);
6112 }
6113
6114 return IRQ_HANDLED;
6115 }
6116
stmmac_msi_intr_rx(int irq,void * data)6117 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6118 {
6119 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6120 struct stmmac_dma_conf *dma_conf;
6121 int chan = rx_q->queue_index;
6122 struct stmmac_priv *priv;
6123
6124 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6125 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6126
6127 /* Check if adapter is up */
6128 if (test_bit(STMMAC_DOWN, &priv->state))
6129 return IRQ_HANDLED;
6130
6131 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6132
6133 return IRQ_HANDLED;
6134 }
6135
6136 /**
6137 * stmmac_ioctl - Entry point for the Ioctl
6138 * @dev: Device pointer.
6139 * @rq: An IOCTL specefic structure, that can contain a pointer to
6140 * a proprietary structure used to pass information to the driver.
6141 * @cmd: IOCTL command
6142 * Description:
6143 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6144 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6145 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6146 {
6147 struct stmmac_priv *priv = netdev_priv (dev);
6148 int ret = -EOPNOTSUPP;
6149
6150 if (!netif_running(dev))
6151 return -EINVAL;
6152
6153 switch (cmd) {
6154 case SIOCGMIIPHY:
6155 case SIOCGMIIREG:
6156 case SIOCSMIIREG:
6157 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6158 break;
6159 case SIOCSHWTSTAMP:
6160 ret = stmmac_hwtstamp_set(dev, rq);
6161 break;
6162 case SIOCGHWTSTAMP:
6163 ret = stmmac_hwtstamp_get(dev, rq);
6164 break;
6165 default:
6166 break;
6167 }
6168
6169 return ret;
6170 }
6171
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6172 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6173 void *cb_priv)
6174 {
6175 struct stmmac_priv *priv = cb_priv;
6176 int ret = -EOPNOTSUPP;
6177
6178 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6179 return ret;
6180
6181 __stmmac_disable_all_queues(priv);
6182
6183 switch (type) {
6184 case TC_SETUP_CLSU32:
6185 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6186 break;
6187 case TC_SETUP_CLSFLOWER:
6188 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6189 break;
6190 default:
6191 break;
6192 }
6193
6194 stmmac_enable_all_queues(priv);
6195 return ret;
6196 }
6197
6198 static LIST_HEAD(stmmac_block_cb_list);
6199
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6200 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6201 void *type_data)
6202 {
6203 struct stmmac_priv *priv = netdev_priv(ndev);
6204
6205 switch (type) {
6206 case TC_QUERY_CAPS:
6207 return stmmac_tc_query_caps(priv, priv, type_data);
6208 case TC_SETUP_BLOCK:
6209 return flow_block_cb_setup_simple(type_data,
6210 &stmmac_block_cb_list,
6211 stmmac_setup_tc_block_cb,
6212 priv, priv, true);
6213 case TC_SETUP_QDISC_CBS:
6214 return stmmac_tc_setup_cbs(priv, priv, type_data);
6215 case TC_SETUP_QDISC_TAPRIO:
6216 return stmmac_tc_setup_taprio(priv, priv, type_data);
6217 case TC_SETUP_QDISC_ETF:
6218 return stmmac_tc_setup_etf(priv, priv, type_data);
6219 default:
6220 return -EOPNOTSUPP;
6221 }
6222 }
6223
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6224 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6225 struct net_device *sb_dev)
6226 {
6227 int gso = skb_shinfo(skb)->gso_type;
6228
6229 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6230 /*
6231 * There is no way to determine the number of TSO/USO
6232 * capable Queues. Let's use always the Queue 0
6233 * because if TSO/USO is supported then at least this
6234 * one will be capable.
6235 */
6236 return 0;
6237 }
6238
6239 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6240 }
6241
stmmac_set_mac_address(struct net_device * ndev,void * addr)6242 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6243 {
6244 struct stmmac_priv *priv = netdev_priv(ndev);
6245 int ret = 0;
6246
6247 ret = pm_runtime_resume_and_get(priv->device);
6248 if (ret < 0)
6249 return ret;
6250
6251 ret = eth_mac_addr(ndev, addr);
6252 if (ret)
6253 goto set_mac_error;
6254
6255 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6256
6257 set_mac_error:
6258 pm_runtime_put(priv->device);
6259
6260 return ret;
6261 }
6262
6263 #ifdef CONFIG_DEBUG_FS
6264 static struct dentry *stmmac_fs_dir;
6265
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6266 static void sysfs_display_ring(void *head, int size, int extend_desc,
6267 struct seq_file *seq, dma_addr_t dma_phy_addr)
6268 {
6269 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6270 struct dma_desc *p = (struct dma_desc *)head;
6271 unsigned int desc_size;
6272 dma_addr_t dma_addr;
6273 int i;
6274
6275 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6276 for (i = 0; i < size; i++) {
6277 dma_addr = dma_phy_addr + i * desc_size;
6278 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6279 i, &dma_addr,
6280 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6281 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6282 if (extend_desc)
6283 p = &(++ep)->basic;
6284 else
6285 p++;
6286 }
6287 }
6288
stmmac_rings_status_show(struct seq_file * seq,void * v)6289 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6290 {
6291 struct net_device *dev = seq->private;
6292 struct stmmac_priv *priv = netdev_priv(dev);
6293 u32 rx_count = priv->plat->rx_queues_to_use;
6294 u32 tx_count = priv->plat->tx_queues_to_use;
6295 u32 queue;
6296
6297 if ((dev->flags & IFF_UP) == 0)
6298 return 0;
6299
6300 for (queue = 0; queue < rx_count; queue++) {
6301 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6302
6303 seq_printf(seq, "RX Queue %d:\n", queue);
6304
6305 if (priv->extend_desc) {
6306 seq_printf(seq, "Extended descriptor ring:\n");
6307 sysfs_display_ring((void *)rx_q->dma_erx,
6308 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6309 } else {
6310 seq_printf(seq, "Descriptor ring:\n");
6311 sysfs_display_ring((void *)rx_q->dma_rx,
6312 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6313 }
6314 }
6315
6316 for (queue = 0; queue < tx_count; queue++) {
6317 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6318
6319 seq_printf(seq, "TX Queue %d:\n", queue);
6320
6321 if (priv->extend_desc) {
6322 seq_printf(seq, "Extended descriptor ring:\n");
6323 sysfs_display_ring((void *)tx_q->dma_etx,
6324 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6325 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6326 seq_printf(seq, "Descriptor ring:\n");
6327 sysfs_display_ring((void *)tx_q->dma_tx,
6328 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6329 }
6330 }
6331
6332 return 0;
6333 }
6334 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6335
stmmac_dma_cap_show(struct seq_file * seq,void * v)6336 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6337 {
6338 static const char * const dwxgmac_timestamp_source[] = {
6339 "None",
6340 "Internal",
6341 "External",
6342 "Both",
6343 };
6344 static const char * const dwxgmac_safety_feature_desc[] = {
6345 "No",
6346 "All Safety Features with ECC and Parity",
6347 "All Safety Features without ECC or Parity",
6348 "All Safety Features with Parity Only",
6349 "ECC Only",
6350 "UNDEFINED",
6351 "UNDEFINED",
6352 "UNDEFINED",
6353 };
6354 struct net_device *dev = seq->private;
6355 struct stmmac_priv *priv = netdev_priv(dev);
6356
6357 if (!priv->hw_cap_support) {
6358 seq_printf(seq, "DMA HW features not supported\n");
6359 return 0;
6360 }
6361
6362 seq_printf(seq, "==============================\n");
6363 seq_printf(seq, "\tDMA HW features\n");
6364 seq_printf(seq, "==============================\n");
6365
6366 seq_printf(seq, "\t10/100 Mbps: %s\n",
6367 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6368 seq_printf(seq, "\t1000 Mbps: %s\n",
6369 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6370 seq_printf(seq, "\tHalf duplex: %s\n",
6371 (priv->dma_cap.half_duplex) ? "Y" : "N");
6372 if (priv->plat->has_xgmac) {
6373 seq_printf(seq,
6374 "\tNumber of Additional MAC address registers: %d\n",
6375 priv->dma_cap.multi_addr);
6376 } else {
6377 seq_printf(seq, "\tHash Filter: %s\n",
6378 (priv->dma_cap.hash_filter) ? "Y" : "N");
6379 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6380 (priv->dma_cap.multi_addr) ? "Y" : "N");
6381 }
6382 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6383 (priv->dma_cap.pcs) ? "Y" : "N");
6384 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6385 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6386 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6387 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6388 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6389 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6390 seq_printf(seq, "\tRMON module: %s\n",
6391 (priv->dma_cap.rmon) ? "Y" : "N");
6392 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6393 (priv->dma_cap.time_stamp) ? "Y" : "N");
6394 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6395 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6396 if (priv->plat->has_xgmac)
6397 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6398 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6399 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6400 (priv->dma_cap.eee) ? "Y" : "N");
6401 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6402 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6403 (priv->dma_cap.tx_coe) ? "Y" : "N");
6404 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6405 priv->plat->has_xgmac) {
6406 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6407 (priv->dma_cap.rx_coe) ? "Y" : "N");
6408 } else {
6409 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6410 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6411 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6412 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6413 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6414 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6415 }
6416 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6417 priv->dma_cap.number_rx_channel);
6418 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6419 priv->dma_cap.number_tx_channel);
6420 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6421 priv->dma_cap.number_rx_queues);
6422 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6423 priv->dma_cap.number_tx_queues);
6424 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6425 (priv->dma_cap.enh_desc) ? "Y" : "N");
6426 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6427 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6428 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6429 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6430 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6431 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6432 priv->dma_cap.pps_out_num);
6433 seq_printf(seq, "\tSafety Features: %s\n",
6434 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6435 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6436 priv->dma_cap.frpsel ? "Y" : "N");
6437 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6438 priv->dma_cap.host_dma_width);
6439 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6440 priv->dma_cap.rssen ? "Y" : "N");
6441 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6442 priv->dma_cap.vlhash ? "Y" : "N");
6443 seq_printf(seq, "\tSplit Header: %s\n",
6444 priv->dma_cap.sphen ? "Y" : "N");
6445 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6446 priv->dma_cap.vlins ? "Y" : "N");
6447 seq_printf(seq, "\tDouble VLAN: %s\n",
6448 priv->dma_cap.dvlan ? "Y" : "N");
6449 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6450 priv->dma_cap.l3l4fnum);
6451 seq_printf(seq, "\tARP Offloading: %s\n",
6452 priv->dma_cap.arpoffsel ? "Y" : "N");
6453 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6454 priv->dma_cap.estsel ? "Y" : "N");
6455 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6456 priv->dma_cap.fpesel ? "Y" : "N");
6457 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6458 priv->dma_cap.tbssel ? "Y" : "N");
6459 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6460 priv->dma_cap.tbs_ch_num);
6461 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6462 priv->dma_cap.sgfsel ? "Y" : "N");
6463 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6464 BIT(priv->dma_cap.ttsfd) >> 1);
6465 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6466 priv->dma_cap.numtc);
6467 seq_printf(seq, "\tDCB Feature: %s\n",
6468 priv->dma_cap.dcben ? "Y" : "N");
6469 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6470 priv->dma_cap.advthword ? "Y" : "N");
6471 seq_printf(seq, "\tPTP Offload: %s\n",
6472 priv->dma_cap.ptoen ? "Y" : "N");
6473 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6474 priv->dma_cap.osten ? "Y" : "N");
6475 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6476 priv->dma_cap.pfcen ? "Y" : "N");
6477 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6478 BIT(priv->dma_cap.frpes) << 6);
6479 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6480 BIT(priv->dma_cap.frpbs) << 6);
6481 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6482 priv->dma_cap.frppipe_num);
6483 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6484 priv->dma_cap.nrvf_num ?
6485 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6486 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6487 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6488 seq_printf(seq, "\tDepth of GCL: %lu\n",
6489 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6490 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6491 priv->dma_cap.cbtisel ? "Y" : "N");
6492 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6493 priv->dma_cap.aux_snapshot_n);
6494 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6495 priv->dma_cap.pou_ost_en ? "Y" : "N");
6496 seq_printf(seq, "\tEnhanced DMA: %s\n",
6497 priv->dma_cap.edma ? "Y" : "N");
6498 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6499 priv->dma_cap.ediffc ? "Y" : "N");
6500 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6501 priv->dma_cap.vxn ? "Y" : "N");
6502 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6503 priv->dma_cap.dbgmem ? "Y" : "N");
6504 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6505 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6506 return 0;
6507 }
6508 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6509
6510 /* Use network device events to rename debugfs file entries.
6511 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6512 static int stmmac_device_event(struct notifier_block *unused,
6513 unsigned long event, void *ptr)
6514 {
6515 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6516 struct stmmac_priv *priv = netdev_priv(dev);
6517
6518 if (dev->netdev_ops != &stmmac_netdev_ops)
6519 goto done;
6520
6521 switch (event) {
6522 case NETDEV_CHANGENAME:
6523 if (priv->dbgfs_dir)
6524 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir,
6525 priv->dbgfs_dir,
6526 stmmac_fs_dir,
6527 dev->name);
6528 break;
6529 }
6530 done:
6531 return NOTIFY_DONE;
6532 }
6533
6534 static struct notifier_block stmmac_notifier = {
6535 .notifier_call = stmmac_device_event,
6536 };
6537
stmmac_init_fs(struct net_device * dev)6538 static void stmmac_init_fs(struct net_device *dev)
6539 {
6540 struct stmmac_priv *priv = netdev_priv(dev);
6541
6542 rtnl_lock();
6543
6544 /* Create per netdev entries */
6545 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6546
6547 /* Entry to report DMA RX/TX rings */
6548 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6549 &stmmac_rings_status_fops);
6550
6551 /* Entry to report the DMA HW features */
6552 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6553 &stmmac_dma_cap_fops);
6554
6555 rtnl_unlock();
6556 }
6557
stmmac_exit_fs(struct net_device * dev)6558 static void stmmac_exit_fs(struct net_device *dev)
6559 {
6560 struct stmmac_priv *priv = netdev_priv(dev);
6561
6562 debugfs_remove_recursive(priv->dbgfs_dir);
6563 }
6564 #endif /* CONFIG_DEBUG_FS */
6565
stmmac_vid_crc32_le(__le16 vid_le)6566 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6567 {
6568 unsigned char *data = (unsigned char *)&vid_le;
6569 unsigned char data_byte = 0;
6570 u32 crc = ~0x0;
6571 u32 temp = 0;
6572 int i, bits;
6573
6574 bits = get_bitmask_order(VLAN_VID_MASK);
6575 for (i = 0; i < bits; i++) {
6576 if ((i % 8) == 0)
6577 data_byte = data[i / 8];
6578
6579 temp = ((crc & 1) ^ data_byte) & 1;
6580 crc >>= 1;
6581 data_byte >>= 1;
6582
6583 if (temp)
6584 crc ^= 0xedb88320;
6585 }
6586
6587 return crc;
6588 }
6589
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6590 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6591 {
6592 u32 crc, hash = 0;
6593 __le16 pmatch = 0;
6594 int count = 0;
6595 u16 vid = 0;
6596
6597 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6598 __le16 vid_le = cpu_to_le16(vid);
6599 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6600 hash |= (1 << crc);
6601 count++;
6602 }
6603
6604 if (!priv->dma_cap.vlhash) {
6605 if (count > 2) /* VID = 0 always passes filter */
6606 return -EOPNOTSUPP;
6607
6608 pmatch = cpu_to_le16(vid);
6609 hash = 0;
6610 }
6611
6612 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6613 }
6614
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6615 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6616 {
6617 struct stmmac_priv *priv = netdev_priv(ndev);
6618 bool is_double = false;
6619 int ret;
6620
6621 ret = pm_runtime_resume_and_get(priv->device);
6622 if (ret < 0)
6623 return ret;
6624
6625 if (be16_to_cpu(proto) == ETH_P_8021AD)
6626 is_double = true;
6627
6628 set_bit(vid, priv->active_vlans);
6629 ret = stmmac_vlan_update(priv, is_double);
6630 if (ret) {
6631 clear_bit(vid, priv->active_vlans);
6632 goto err_pm_put;
6633 }
6634
6635 if (priv->hw->num_vlan) {
6636 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6637 if (ret)
6638 goto err_pm_put;
6639 }
6640 err_pm_put:
6641 pm_runtime_put(priv->device);
6642
6643 return ret;
6644 }
6645
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6646 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6647 {
6648 struct stmmac_priv *priv = netdev_priv(ndev);
6649 bool is_double = false;
6650 int ret;
6651
6652 ret = pm_runtime_resume_and_get(priv->device);
6653 if (ret < 0)
6654 return ret;
6655
6656 if (be16_to_cpu(proto) == ETH_P_8021AD)
6657 is_double = true;
6658
6659 clear_bit(vid, priv->active_vlans);
6660
6661 if (priv->hw->num_vlan) {
6662 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6663 if (ret)
6664 goto del_vlan_error;
6665 }
6666
6667 ret = stmmac_vlan_update(priv, is_double);
6668
6669 del_vlan_error:
6670 pm_runtime_put(priv->device);
6671
6672 return ret;
6673 }
6674
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6675 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6676 {
6677 struct stmmac_priv *priv = netdev_priv(dev);
6678
6679 switch (bpf->command) {
6680 case XDP_SETUP_PROG:
6681 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6682 case XDP_SETUP_XSK_POOL:
6683 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6684 bpf->xsk.queue_id);
6685 default:
6686 return -EOPNOTSUPP;
6687 }
6688 }
6689
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6690 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6691 struct xdp_frame **frames, u32 flags)
6692 {
6693 struct stmmac_priv *priv = netdev_priv(dev);
6694 int cpu = smp_processor_id();
6695 struct netdev_queue *nq;
6696 int i, nxmit = 0;
6697 int queue;
6698
6699 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6700 return -ENETDOWN;
6701
6702 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6703 return -EINVAL;
6704
6705 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6706 nq = netdev_get_tx_queue(priv->dev, queue);
6707
6708 __netif_tx_lock(nq, cpu);
6709 /* Avoids TX time-out as we are sharing with slow path */
6710 txq_trans_cond_update(nq);
6711
6712 for (i = 0; i < num_frames; i++) {
6713 int res;
6714
6715 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6716 if (res == STMMAC_XDP_CONSUMED)
6717 break;
6718
6719 nxmit++;
6720 }
6721
6722 if (flags & XDP_XMIT_FLUSH) {
6723 stmmac_flush_tx_descriptors(priv, queue);
6724 stmmac_tx_timer_arm(priv, queue);
6725 }
6726
6727 __netif_tx_unlock(nq);
6728
6729 return nxmit;
6730 }
6731
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6732 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6733 {
6734 struct stmmac_channel *ch = &priv->channel[queue];
6735 unsigned long flags;
6736
6737 spin_lock_irqsave(&ch->lock, flags);
6738 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6739 spin_unlock_irqrestore(&ch->lock, flags);
6740
6741 stmmac_stop_rx_dma(priv, queue);
6742 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6743 }
6744
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6745 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6746 {
6747 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6748 struct stmmac_channel *ch = &priv->channel[queue];
6749 unsigned long flags;
6750 u32 buf_size;
6751 int ret;
6752
6753 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6754 if (ret) {
6755 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6756 return;
6757 }
6758
6759 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6760 if (ret) {
6761 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6762 netdev_err(priv->dev, "Failed to init RX desc.\n");
6763 return;
6764 }
6765
6766 stmmac_reset_rx_queue(priv, queue);
6767 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6768
6769 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6770 rx_q->dma_rx_phy, rx_q->queue_index);
6771
6772 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6773 sizeof(struct dma_desc));
6774 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6775 rx_q->rx_tail_addr, rx_q->queue_index);
6776
6777 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6778 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6779 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6780 buf_size,
6781 rx_q->queue_index);
6782 } else {
6783 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6784 priv->dma_conf.dma_buf_sz,
6785 rx_q->queue_index);
6786 }
6787
6788 stmmac_start_rx_dma(priv, queue);
6789
6790 spin_lock_irqsave(&ch->lock, flags);
6791 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6792 spin_unlock_irqrestore(&ch->lock, flags);
6793 }
6794
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6795 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6796 {
6797 struct stmmac_channel *ch = &priv->channel[queue];
6798 unsigned long flags;
6799
6800 spin_lock_irqsave(&ch->lock, flags);
6801 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6802 spin_unlock_irqrestore(&ch->lock, flags);
6803
6804 stmmac_stop_tx_dma(priv, queue);
6805 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6806 }
6807
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)6808 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
6809 {
6810 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6811 struct stmmac_channel *ch = &priv->channel[queue];
6812 unsigned long flags;
6813 int ret;
6814
6815 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6816 if (ret) {
6817 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
6818 return;
6819 }
6820
6821 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
6822 if (ret) {
6823 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
6824 netdev_err(priv->dev, "Failed to init TX desc.\n");
6825 return;
6826 }
6827
6828 stmmac_reset_tx_queue(priv, queue);
6829 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
6830
6831 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6832 tx_q->dma_tx_phy, tx_q->queue_index);
6833
6834 if (tx_q->tbs & STMMAC_TBS_AVAIL)
6835 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
6836
6837 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6838 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6839 tx_q->tx_tail_addr, tx_q->queue_index);
6840
6841 stmmac_start_tx_dma(priv, queue);
6842
6843 spin_lock_irqsave(&ch->lock, flags);
6844 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
6845 spin_unlock_irqrestore(&ch->lock, flags);
6846 }
6847
stmmac_xdp_release(struct net_device * dev)6848 void stmmac_xdp_release(struct net_device *dev)
6849 {
6850 struct stmmac_priv *priv = netdev_priv(dev);
6851 u32 chan;
6852
6853 /* Ensure tx function is not running */
6854 netif_tx_disable(dev);
6855
6856 /* Disable NAPI process */
6857 stmmac_disable_all_queues(priv);
6858
6859 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6860 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6861
6862 /* Free the IRQ lines */
6863 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
6864
6865 /* Stop TX/RX DMA channels */
6866 stmmac_stop_all_dma(priv);
6867
6868 /* Release and free the Rx/Tx resources */
6869 free_dma_desc_resources(priv, &priv->dma_conf);
6870
6871 /* Disable the MAC Rx/Tx */
6872 stmmac_mac_set(priv, priv->ioaddr, false);
6873
6874 /* set trans_start so we don't get spurious
6875 * watchdogs during reset
6876 */
6877 netif_trans_update(dev);
6878 netif_carrier_off(dev);
6879 }
6880
stmmac_xdp_open(struct net_device * dev)6881 int stmmac_xdp_open(struct net_device *dev)
6882 {
6883 struct stmmac_priv *priv = netdev_priv(dev);
6884 u32 rx_cnt = priv->plat->rx_queues_to_use;
6885 u32 tx_cnt = priv->plat->tx_queues_to_use;
6886 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
6887 struct stmmac_rx_queue *rx_q;
6888 struct stmmac_tx_queue *tx_q;
6889 u32 buf_size;
6890 bool sph_en;
6891 u32 chan;
6892 int ret;
6893
6894 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
6895 if (ret < 0) {
6896 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
6897 __func__);
6898 goto dma_desc_error;
6899 }
6900
6901 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
6902 if (ret < 0) {
6903 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
6904 __func__);
6905 goto init_error;
6906 }
6907
6908 stmmac_reset_queues_param(priv);
6909
6910 /* DMA CSR Channel configuration */
6911 for (chan = 0; chan < dma_csr_ch; chan++) {
6912 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
6913 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6914 }
6915
6916 /* Adjust Split header */
6917 sph_en = (priv->hw->rx_csum > 0) && priv->sph;
6918
6919 /* DMA RX Channel Configuration */
6920 for (chan = 0; chan < rx_cnt; chan++) {
6921 rx_q = &priv->dma_conf.rx_queue[chan];
6922
6923 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6924 rx_q->dma_rx_phy, chan);
6925
6926 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
6927 (rx_q->buf_alloc_num *
6928 sizeof(struct dma_desc));
6929 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6930 rx_q->rx_tail_addr, chan);
6931
6932 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6933 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6934 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6935 buf_size,
6936 rx_q->queue_index);
6937 } else {
6938 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6939 priv->dma_conf.dma_buf_sz,
6940 rx_q->queue_index);
6941 }
6942
6943 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6944 }
6945
6946 /* DMA TX Channel Configuration */
6947 for (chan = 0; chan < tx_cnt; chan++) {
6948 tx_q = &priv->dma_conf.tx_queue[chan];
6949
6950 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6951 tx_q->dma_tx_phy, chan);
6952
6953 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
6954 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
6955 tx_q->tx_tail_addr, chan);
6956
6957 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
6958 tx_q->txtimer.function = stmmac_tx_timer;
6959 }
6960
6961 /* Enable the MAC Rx/Tx */
6962 stmmac_mac_set(priv, priv->ioaddr, true);
6963
6964 /* Start Rx & Tx DMA Channels */
6965 stmmac_start_all_dma(priv);
6966
6967 ret = stmmac_request_irq(dev);
6968 if (ret)
6969 goto irq_error;
6970
6971 /* Enable NAPI process*/
6972 stmmac_enable_all_queues(priv);
6973 netif_carrier_on(dev);
6974 netif_tx_start_all_queues(dev);
6975 stmmac_enable_all_dma_irq(priv);
6976
6977 return 0;
6978
6979 irq_error:
6980 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
6981 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
6982
6983 stmmac_hw_teardown(dev);
6984 init_error:
6985 free_dma_desc_resources(priv, &priv->dma_conf);
6986 dma_desc_error:
6987 return ret;
6988 }
6989
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)6990 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
6991 {
6992 struct stmmac_priv *priv = netdev_priv(dev);
6993 struct stmmac_rx_queue *rx_q;
6994 struct stmmac_tx_queue *tx_q;
6995 struct stmmac_channel *ch;
6996
6997 if (test_bit(STMMAC_DOWN, &priv->state) ||
6998 !netif_carrier_ok(priv->dev))
6999 return -ENETDOWN;
7000
7001 if (!stmmac_xdp_is_enabled(priv))
7002 return -EINVAL;
7003
7004 if (queue >= priv->plat->rx_queues_to_use ||
7005 queue >= priv->plat->tx_queues_to_use)
7006 return -EINVAL;
7007
7008 rx_q = &priv->dma_conf.rx_queue[queue];
7009 tx_q = &priv->dma_conf.tx_queue[queue];
7010 ch = &priv->channel[queue];
7011
7012 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7013 return -EINVAL;
7014
7015 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7016 /* EQoS does not have per-DMA channel SW interrupt,
7017 * so we schedule RX Napi straight-away.
7018 */
7019 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7020 __napi_schedule(&ch->rxtx_napi);
7021 }
7022
7023 return 0;
7024 }
7025
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7026 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7027 {
7028 struct stmmac_priv *priv = netdev_priv(dev);
7029 u32 tx_cnt = priv->plat->tx_queues_to_use;
7030 u32 rx_cnt = priv->plat->rx_queues_to_use;
7031 unsigned int start;
7032 int q;
7033
7034 for (q = 0; q < tx_cnt; q++) {
7035 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7036 u64 tx_packets;
7037 u64 tx_bytes;
7038
7039 do {
7040 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7041 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7042 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7043 do {
7044 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7045 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7046 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7047
7048 stats->tx_packets += tx_packets;
7049 stats->tx_bytes += tx_bytes;
7050 }
7051
7052 for (q = 0; q < rx_cnt; q++) {
7053 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7054 u64 rx_packets;
7055 u64 rx_bytes;
7056
7057 do {
7058 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7059 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7060 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7061 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7062
7063 stats->rx_packets += rx_packets;
7064 stats->rx_bytes += rx_bytes;
7065 }
7066
7067 stats->rx_dropped = priv->xstats.rx_dropped;
7068 stats->rx_errors = priv->xstats.rx_errors;
7069 stats->tx_dropped = priv->xstats.tx_dropped;
7070 stats->tx_errors = priv->xstats.tx_errors;
7071 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7072 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7073 stats->rx_length_errors = priv->xstats.rx_length;
7074 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7075 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7076 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7077 }
7078
7079 static const struct net_device_ops stmmac_netdev_ops = {
7080 .ndo_open = stmmac_open,
7081 .ndo_start_xmit = stmmac_xmit,
7082 .ndo_stop = stmmac_release,
7083 .ndo_change_mtu = stmmac_change_mtu,
7084 .ndo_fix_features = stmmac_fix_features,
7085 .ndo_set_features = stmmac_set_features,
7086 .ndo_set_rx_mode = stmmac_set_rx_mode,
7087 .ndo_tx_timeout = stmmac_tx_timeout,
7088 .ndo_eth_ioctl = stmmac_ioctl,
7089 .ndo_get_stats64 = stmmac_get_stats64,
7090 .ndo_setup_tc = stmmac_setup_tc,
7091 .ndo_select_queue = stmmac_select_queue,
7092 .ndo_set_mac_address = stmmac_set_mac_address,
7093 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7094 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7095 .ndo_bpf = stmmac_bpf,
7096 .ndo_xdp_xmit = stmmac_xdp_xmit,
7097 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7098 };
7099
stmmac_reset_subtask(struct stmmac_priv * priv)7100 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7101 {
7102 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7103 return;
7104 if (test_bit(STMMAC_DOWN, &priv->state))
7105 return;
7106
7107 netdev_err(priv->dev, "Reset adapter.\n");
7108
7109 rtnl_lock();
7110 netif_trans_update(priv->dev);
7111 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7112 usleep_range(1000, 2000);
7113
7114 set_bit(STMMAC_DOWN, &priv->state);
7115 dev_close(priv->dev);
7116 dev_open(priv->dev, NULL);
7117 clear_bit(STMMAC_DOWN, &priv->state);
7118 clear_bit(STMMAC_RESETING, &priv->state);
7119 rtnl_unlock();
7120 }
7121
stmmac_service_task(struct work_struct * work)7122 static void stmmac_service_task(struct work_struct *work)
7123 {
7124 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7125 service_task);
7126
7127 stmmac_reset_subtask(priv);
7128 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7129 }
7130
7131 /**
7132 * stmmac_hw_init - Init the MAC device
7133 * @priv: driver private structure
7134 * Description: this function is to configure the MAC device according to
7135 * some platform parameters or the HW capability register. It prepares the
7136 * driver to use either ring or chain modes and to setup either enhanced or
7137 * normal descriptors.
7138 */
stmmac_hw_init(struct stmmac_priv * priv)7139 static int stmmac_hw_init(struct stmmac_priv *priv)
7140 {
7141 int ret;
7142
7143 /* dwmac-sun8i only work in chain mode */
7144 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7145 chain_mode = 1;
7146 priv->chain_mode = chain_mode;
7147
7148 /* Initialize HW Interface */
7149 ret = stmmac_hwif_init(priv);
7150 if (ret)
7151 return ret;
7152
7153 /* Get the HW capability (new GMAC newer than 3.50a) */
7154 priv->hw_cap_support = stmmac_get_hw_features(priv);
7155 if (priv->hw_cap_support) {
7156 dev_info(priv->device, "DMA HW capability register supported\n");
7157
7158 /* We can override some gmac/dma configuration fields: e.g.
7159 * enh_desc, tx_coe (e.g. that are passed through the
7160 * platform) with the values from the HW capability
7161 * register (if supported).
7162 */
7163 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7164 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7165 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7166 priv->hw->pmt = priv->plat->pmt;
7167 if (priv->dma_cap.hash_tb_sz) {
7168 priv->hw->multicast_filter_bins =
7169 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7170 priv->hw->mcast_bits_log2 =
7171 ilog2(priv->hw->multicast_filter_bins);
7172 }
7173
7174 /* TXCOE doesn't work in thresh DMA mode */
7175 if (priv->plat->force_thresh_dma_mode)
7176 priv->plat->tx_coe = 0;
7177 else
7178 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7179
7180 /* In case of GMAC4 rx_coe is from HW cap register. */
7181 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7182
7183 if (priv->dma_cap.rx_coe_type2)
7184 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7185 else if (priv->dma_cap.rx_coe_type1)
7186 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7187
7188 } else {
7189 dev_info(priv->device, "No HW DMA feature register supported\n");
7190 }
7191
7192 if (priv->plat->rx_coe) {
7193 priv->hw->rx_csum = priv->plat->rx_coe;
7194 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7195 if (priv->synopsys_id < DWMAC_CORE_4_00)
7196 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7197 }
7198 if (priv->plat->tx_coe)
7199 dev_info(priv->device, "TX Checksum insertion supported\n");
7200
7201 if (priv->plat->pmt) {
7202 dev_info(priv->device, "Wake-Up On Lan supported\n");
7203 device_set_wakeup_capable(priv->device, 1);
7204 }
7205
7206 if (priv->dma_cap.tsoen)
7207 dev_info(priv->device, "TSO supported\n");
7208
7209 priv->hw->vlan_fail_q_en =
7210 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7211 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7212
7213 /* Run HW quirks, if any */
7214 if (priv->hwif_quirks) {
7215 ret = priv->hwif_quirks(priv);
7216 if (ret)
7217 return ret;
7218 }
7219
7220 /* Rx Watchdog is available in the COREs newer than the 3.40.
7221 * In some case, for example on bugged HW this feature
7222 * has to be disable and this can be done by passing the
7223 * riwt_off field from the platform.
7224 */
7225 if (((priv->synopsys_id >= DWMAC_CORE_3_50) ||
7226 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) {
7227 priv->use_riwt = 1;
7228 dev_info(priv->device,
7229 "Enable RX Mitigation via HW Watchdog Timer\n");
7230 }
7231
7232 return 0;
7233 }
7234
stmmac_napi_add(struct net_device * dev)7235 static void stmmac_napi_add(struct net_device *dev)
7236 {
7237 struct stmmac_priv *priv = netdev_priv(dev);
7238 u32 queue, maxq;
7239
7240 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7241
7242 for (queue = 0; queue < maxq; queue++) {
7243 struct stmmac_channel *ch = &priv->channel[queue];
7244
7245 ch->priv_data = priv;
7246 ch->index = queue;
7247 spin_lock_init(&ch->lock);
7248
7249 if (queue < priv->plat->rx_queues_to_use) {
7250 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7251 }
7252 if (queue < priv->plat->tx_queues_to_use) {
7253 netif_napi_add_tx(dev, &ch->tx_napi,
7254 stmmac_napi_poll_tx);
7255 }
7256 if (queue < priv->plat->rx_queues_to_use &&
7257 queue < priv->plat->tx_queues_to_use) {
7258 netif_napi_add(dev, &ch->rxtx_napi,
7259 stmmac_napi_poll_rxtx);
7260 }
7261 }
7262 }
7263
stmmac_napi_del(struct net_device * dev)7264 static void stmmac_napi_del(struct net_device *dev)
7265 {
7266 struct stmmac_priv *priv = netdev_priv(dev);
7267 u32 queue, maxq;
7268
7269 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7270
7271 for (queue = 0; queue < maxq; queue++) {
7272 struct stmmac_channel *ch = &priv->channel[queue];
7273
7274 if (queue < priv->plat->rx_queues_to_use)
7275 netif_napi_del(&ch->rx_napi);
7276 if (queue < priv->plat->tx_queues_to_use)
7277 netif_napi_del(&ch->tx_napi);
7278 if (queue < priv->plat->rx_queues_to_use &&
7279 queue < priv->plat->tx_queues_to_use) {
7280 netif_napi_del(&ch->rxtx_napi);
7281 }
7282 }
7283 }
7284
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7285 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7286 {
7287 struct stmmac_priv *priv = netdev_priv(dev);
7288 int ret = 0, i;
7289
7290 if (netif_running(dev))
7291 stmmac_release(dev);
7292
7293 stmmac_napi_del(dev);
7294
7295 priv->plat->rx_queues_to_use = rx_cnt;
7296 priv->plat->tx_queues_to_use = tx_cnt;
7297 if (!netif_is_rxfh_configured(dev))
7298 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7299 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7300 rx_cnt);
7301
7302 stmmac_set_half_duplex(priv);
7303 stmmac_napi_add(dev);
7304
7305 if (netif_running(dev))
7306 ret = stmmac_open(dev);
7307
7308 return ret;
7309 }
7310
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7311 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7312 {
7313 struct stmmac_priv *priv = netdev_priv(dev);
7314 int ret = 0;
7315
7316 if (netif_running(dev))
7317 stmmac_release(dev);
7318
7319 priv->dma_conf.dma_rx_size = rx_size;
7320 priv->dma_conf.dma_tx_size = tx_size;
7321
7322 if (netif_running(dev))
7323 ret = stmmac_open(dev);
7324
7325 return ret;
7326 }
7327
7328 #define SEND_VERIFY_MPAKCET_FMT "Send Verify mPacket lo_state=%d lp_state=%d\n"
stmmac_fpe_lp_task(struct work_struct * work)7329 static void stmmac_fpe_lp_task(struct work_struct *work)
7330 {
7331 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7332 fpe_task);
7333 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg;
7334 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state;
7335 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state;
7336 bool *hs_enable = &fpe_cfg->hs_enable;
7337 bool *enable = &fpe_cfg->enable;
7338 int retries = 20;
7339
7340 while (retries-- > 0) {
7341 /* Bail out immediately if FPE handshake is OFF */
7342 if (*lo_state == FPE_STATE_OFF || !*hs_enable)
7343 break;
7344
7345 if (*lo_state == FPE_STATE_ENTERING_ON &&
7346 *lp_state == FPE_STATE_ENTERING_ON) {
7347 stmmac_fpe_configure(priv, priv->ioaddr,
7348 fpe_cfg,
7349 priv->plat->tx_queues_to_use,
7350 priv->plat->rx_queues_to_use,
7351 *enable);
7352
7353 netdev_info(priv->dev, "configured FPE\n");
7354
7355 *lo_state = FPE_STATE_ON;
7356 *lp_state = FPE_STATE_ON;
7357 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n");
7358 break;
7359 }
7360
7361 if ((*lo_state == FPE_STATE_CAPABLE ||
7362 *lo_state == FPE_STATE_ENTERING_ON) &&
7363 *lp_state != FPE_STATE_ON) {
7364 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT,
7365 *lo_state, *lp_state);
7366 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7367 fpe_cfg,
7368 MPACKET_VERIFY);
7369 }
7370 /* Sleep then retry */
7371 msleep(500);
7372 }
7373
7374 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state);
7375 }
7376
stmmac_fpe_handshake(struct stmmac_priv * priv,bool enable)7377 void stmmac_fpe_handshake(struct stmmac_priv *priv, bool enable)
7378 {
7379 if (priv->plat->fpe_cfg->hs_enable != enable) {
7380 if (enable) {
7381 stmmac_fpe_send_mpacket(priv, priv->ioaddr,
7382 priv->plat->fpe_cfg,
7383 MPACKET_VERIFY);
7384 } else {
7385 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF;
7386 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF;
7387 }
7388
7389 priv->plat->fpe_cfg->hs_enable = enable;
7390 }
7391 }
7392
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7393 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7394 {
7395 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7396 struct dma_desc *desc_contains_ts = ctx->desc;
7397 struct stmmac_priv *priv = ctx->priv;
7398 struct dma_desc *ndesc = ctx->ndesc;
7399 struct dma_desc *desc = ctx->desc;
7400 u64 ns = 0;
7401
7402 if (!priv->hwts_rx_en)
7403 return -ENODATA;
7404
7405 /* For GMAC4, the valid timestamp is from CTX next desc. */
7406 if (priv->plat->has_gmac4 || priv->plat->has_xgmac)
7407 desc_contains_ts = ndesc;
7408
7409 /* Check if timestamp is available */
7410 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7411 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7412 ns -= priv->plat->cdc_error_adj;
7413 *timestamp = ns_to_ktime(ns);
7414 return 0;
7415 }
7416
7417 return -ENODATA;
7418 }
7419
7420 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7421 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7422 };
7423
7424 /**
7425 * stmmac_dvr_probe
7426 * @device: device pointer
7427 * @plat_dat: platform data pointer
7428 * @res: stmmac resource pointer
7429 * Description: this is the main probe function used to
7430 * call the alloc_etherdev, allocate the priv structure.
7431 * Return:
7432 * returns 0 on success, otherwise errno.
7433 */
stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7434 int stmmac_dvr_probe(struct device *device,
7435 struct plat_stmmacenet_data *plat_dat,
7436 struct stmmac_resources *res)
7437 {
7438 struct net_device *ndev = NULL;
7439 struct stmmac_priv *priv;
7440 u32 rxq;
7441 int i, ret = 0;
7442
7443 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7444 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7445 if (!ndev)
7446 return -ENOMEM;
7447
7448 SET_NETDEV_DEV(ndev, device);
7449
7450 priv = netdev_priv(ndev);
7451 priv->device = device;
7452 priv->dev = ndev;
7453
7454 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7455 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7456 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7457 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7458 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7459 }
7460
7461 priv->xstats.pcpu_stats =
7462 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7463 if (!priv->xstats.pcpu_stats)
7464 return -ENOMEM;
7465
7466 stmmac_set_ethtool_ops(ndev);
7467 priv->pause = pause;
7468 priv->plat = plat_dat;
7469 priv->ioaddr = res->addr;
7470 priv->dev->base_addr = (unsigned long)res->addr;
7471 priv->plat->dma_cfg->multi_msi_en =
7472 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7473
7474 priv->dev->irq = res->irq;
7475 priv->wol_irq = res->wol_irq;
7476 priv->lpi_irq = res->lpi_irq;
7477 priv->sfty_ce_irq = res->sfty_ce_irq;
7478 priv->sfty_ue_irq = res->sfty_ue_irq;
7479 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7480 priv->rx_irq[i] = res->rx_irq[i];
7481 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7482 priv->tx_irq[i] = res->tx_irq[i];
7483
7484 if (!is_zero_ether_addr(res->mac))
7485 eth_hw_addr_set(priv->dev, res->mac);
7486
7487 dev_set_drvdata(device, priv->dev);
7488
7489 /* Verify driver arguments */
7490 stmmac_verify_args();
7491
7492 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7493 if (!priv->af_xdp_zc_qps)
7494 return -ENOMEM;
7495
7496 /* Allocate workqueue */
7497 priv->wq = create_singlethread_workqueue("stmmac_wq");
7498 if (!priv->wq) {
7499 dev_err(priv->device, "failed to create workqueue\n");
7500 ret = -ENOMEM;
7501 goto error_wq_init;
7502 }
7503
7504 INIT_WORK(&priv->service_task, stmmac_service_task);
7505
7506 /* Initialize Link Partner FPE workqueue */
7507 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task);
7508
7509 /* Override with kernel parameters if supplied XXX CRS XXX
7510 * this needs to have multiple instances
7511 */
7512 if ((phyaddr >= 0) && (phyaddr <= 31))
7513 priv->plat->phy_addr = phyaddr;
7514
7515 if (priv->plat->stmmac_rst) {
7516 ret = reset_control_assert(priv->plat->stmmac_rst);
7517 reset_control_deassert(priv->plat->stmmac_rst);
7518 /* Some reset controllers have only reset callback instead of
7519 * assert + deassert callbacks pair.
7520 */
7521 if (ret == -ENOTSUPP)
7522 reset_control_reset(priv->plat->stmmac_rst);
7523 }
7524
7525 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7526 if (ret == -ENOTSUPP)
7527 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7528 ERR_PTR(ret));
7529
7530 /* Wait a bit for the reset to take effect */
7531 udelay(10);
7532
7533 /* Init MAC and get the capabilities */
7534 ret = stmmac_hw_init(priv);
7535 if (ret)
7536 goto error_hw_init;
7537
7538 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7539 */
7540 if (priv->synopsys_id < DWMAC_CORE_5_20)
7541 priv->plat->dma_cfg->dche = false;
7542
7543 stmmac_check_ether_addr(priv);
7544
7545 ndev->netdev_ops = &stmmac_netdev_ops;
7546
7547 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7548 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7549
7550 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7551 NETIF_F_RXCSUM;
7552 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7553 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7554
7555 ret = stmmac_tc_init(priv, priv);
7556 if (!ret) {
7557 ndev->hw_features |= NETIF_F_HW_TC;
7558 }
7559
7560 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7561 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7562 if (priv->plat->has_gmac4)
7563 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7564 priv->tso = true;
7565 dev_info(priv->device, "TSO feature enabled\n");
7566 }
7567
7568 if (priv->dma_cap.sphen &&
7569 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7570 ndev->hw_features |= NETIF_F_GRO;
7571 priv->sph_cap = true;
7572 priv->sph = priv->sph_cap;
7573 dev_info(priv->device, "SPH feature enabled\n");
7574 }
7575
7576 /* Ideally our host DMA address width is the same as for the
7577 * device. However, it may differ and then we have to use our
7578 * host DMA width for allocation and the device DMA width for
7579 * register handling.
7580 */
7581 if (priv->plat->host_dma_width)
7582 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7583 else
7584 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7585
7586 if (priv->dma_cap.host_dma_width) {
7587 ret = dma_set_mask_and_coherent(device,
7588 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7589 if (!ret) {
7590 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7591 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7592
7593 /*
7594 * If more than 32 bits can be addressed, make sure to
7595 * enable enhanced addressing mode.
7596 */
7597 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7598 priv->plat->dma_cfg->eame = true;
7599 } else {
7600 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7601 if (ret) {
7602 dev_err(priv->device, "Failed to set DMA Mask\n");
7603 goto error_hw_init;
7604 }
7605
7606 priv->dma_cap.host_dma_width = 32;
7607 }
7608 }
7609
7610 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7611 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7612 #ifdef STMMAC_VLAN_TAG_USED
7613 /* Both mac100 and gmac support receive VLAN tag detection */
7614 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7615 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7616 priv->hw->hw_vlan_en = true;
7617
7618 if (priv->dma_cap.vlhash) {
7619 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7620 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7621 }
7622 if (priv->dma_cap.vlins) {
7623 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7624 if (priv->dma_cap.dvlan)
7625 ndev->features |= NETIF_F_HW_VLAN_STAG_TX;
7626 }
7627 #endif
7628 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7629
7630 priv->xstats.threshold = tc;
7631
7632 /* Initialize RSS */
7633 rxq = priv->plat->rx_queues_to_use;
7634 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7635 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7636 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7637
7638 if (priv->dma_cap.rssen && priv->plat->rss_en)
7639 ndev->features |= NETIF_F_RXHASH;
7640
7641 ndev->vlan_features |= ndev->features;
7642 /* TSO doesn't work on VLANs yet */
7643 ndev->vlan_features &= ~NETIF_F_TSO;
7644
7645 /* MTU range: 46 - hw-specific max */
7646 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7647 if (priv->plat->has_xgmac)
7648 ndev->max_mtu = XGMAC_JUMBO_LEN;
7649 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00))
7650 ndev->max_mtu = JUMBO_LEN;
7651 else
7652 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7653 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu
7654 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range.
7655 */
7656 if ((priv->plat->maxmtu < ndev->max_mtu) &&
7657 (priv->plat->maxmtu >= ndev->min_mtu))
7658 ndev->max_mtu = priv->plat->maxmtu;
7659 else if (priv->plat->maxmtu < ndev->min_mtu)
7660 dev_warn(priv->device,
7661 "%s: warning: maxmtu having invalid value (%d)\n",
7662 __func__, priv->plat->maxmtu);
7663
7664 if (flow_ctrl)
7665 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */
7666
7667 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7668
7669 /* Setup channels NAPI */
7670 stmmac_napi_add(ndev);
7671
7672 mutex_init(&priv->lock);
7673
7674 /* If a specific clk_csr value is passed from the platform
7675 * this means that the CSR Clock Range selection cannot be
7676 * changed at run-time and it is fixed. Viceversa the driver'll try to
7677 * set the MDC clock dynamically according to the csr actual
7678 * clock input.
7679 */
7680 if (priv->plat->clk_csr >= 0)
7681 priv->clk_csr = priv->plat->clk_csr;
7682 else
7683 stmmac_clk_csr_set(priv);
7684
7685 stmmac_check_pcs_mode(priv);
7686
7687 pm_runtime_get_noresume(device);
7688 pm_runtime_set_active(device);
7689 if (!pm_runtime_enabled(device))
7690 pm_runtime_enable(device);
7691
7692 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7693 priv->hw->pcs != STMMAC_PCS_RTBI) {
7694 /* MDIO bus Registration */
7695 ret = stmmac_mdio_register(ndev);
7696 if (ret < 0) {
7697 dev_err_probe(priv->device, ret,
7698 "%s: MDIO bus (id: %d) registration failed\n",
7699 __func__, priv->plat->bus_id);
7700 goto error_mdio_register;
7701 }
7702 }
7703
7704 if (priv->plat->speed_mode_2500)
7705 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv);
7706
7707 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) {
7708 ret = stmmac_xpcs_setup(priv->mii);
7709 if (ret)
7710 goto error_xpcs_setup;
7711 }
7712
7713 ret = stmmac_phy_setup(priv);
7714 if (ret) {
7715 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
7716 goto error_phy_setup;
7717 }
7718
7719 ret = register_netdev(ndev);
7720 if (ret) {
7721 dev_err(priv->device, "%s: ERROR %i registering the device\n",
7722 __func__, ret);
7723 goto error_netdev_register;
7724 }
7725
7726 #ifdef CONFIG_DEBUG_FS
7727 stmmac_init_fs(ndev);
7728 #endif
7729
7730 if (priv->plat->dump_debug_regs)
7731 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
7732
7733 /* Let pm_runtime_put() disable the clocks.
7734 * If CONFIG_PM is not enabled, the clocks will stay powered.
7735 */
7736 pm_runtime_put(device);
7737
7738 return ret;
7739
7740 error_netdev_register:
7741 phylink_destroy(priv->phylink);
7742 error_xpcs_setup:
7743 error_phy_setup:
7744 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7745 priv->hw->pcs != STMMAC_PCS_RTBI)
7746 stmmac_mdio_unregister(ndev);
7747 error_mdio_register:
7748 stmmac_napi_del(ndev);
7749 error_hw_init:
7750 destroy_workqueue(priv->wq);
7751 error_wq_init:
7752 bitmap_free(priv->af_xdp_zc_qps);
7753
7754 return ret;
7755 }
7756 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
7757
7758 /**
7759 * stmmac_dvr_remove
7760 * @dev: device pointer
7761 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7762 * changes the link status, releases the DMA descriptor rings.
7763 */
stmmac_dvr_remove(struct device * dev)7764 void stmmac_dvr_remove(struct device *dev)
7765 {
7766 struct net_device *ndev = dev_get_drvdata(dev);
7767 struct stmmac_priv *priv = netdev_priv(ndev);
7768
7769 netdev_info(priv->dev, "%s: removing driver", __func__);
7770
7771 pm_runtime_get_sync(dev);
7772
7773 stmmac_stop_all_dma(priv);
7774 stmmac_mac_set(priv, priv->ioaddr, false);
7775 netif_carrier_off(ndev);
7776 unregister_netdev(ndev);
7777
7778 #ifdef CONFIG_DEBUG_FS
7779 stmmac_exit_fs(ndev);
7780 #endif
7781 phylink_destroy(priv->phylink);
7782 if (priv->plat->stmmac_rst)
7783 reset_control_assert(priv->plat->stmmac_rst);
7784 reset_control_assert(priv->plat->stmmac_ahb_rst);
7785 if (priv->hw->pcs != STMMAC_PCS_TBI &&
7786 priv->hw->pcs != STMMAC_PCS_RTBI)
7787 stmmac_mdio_unregister(ndev);
7788 destroy_workqueue(priv->wq);
7789 mutex_destroy(&priv->lock);
7790 bitmap_free(priv->af_xdp_zc_qps);
7791
7792 pm_runtime_disable(dev);
7793 pm_runtime_put_noidle(dev);
7794 }
7795 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
7796
7797 /**
7798 * stmmac_suspend - suspend callback
7799 * @dev: device pointer
7800 * Description: this is the function to suspend the device and it is called
7801 * by the platform driver to stop the network queue, release the resources,
7802 * program the PMT register (for WoL), clean and release driver resources.
7803 */
stmmac_suspend(struct device * dev)7804 int stmmac_suspend(struct device *dev)
7805 {
7806 struct net_device *ndev = dev_get_drvdata(dev);
7807 struct stmmac_priv *priv = netdev_priv(ndev);
7808 u32 chan;
7809
7810 if (!ndev || !netif_running(ndev))
7811 return 0;
7812
7813 mutex_lock(&priv->lock);
7814
7815 netif_device_detach(ndev);
7816
7817 stmmac_disable_all_queues(priv);
7818
7819 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7820 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7821
7822 if (priv->eee_enabled) {
7823 priv->tx_path_in_lpi_mode = false;
7824 del_timer_sync(&priv->eee_ctrl_timer);
7825 }
7826
7827 /* Stop TX/RX DMA */
7828 stmmac_stop_all_dma(priv);
7829
7830 if (priv->plat->serdes_powerdown)
7831 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv);
7832
7833 /* Enable Power down mode by programming the PMT regs */
7834 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7835 stmmac_pmt(priv, priv->hw, priv->wolopts);
7836 priv->irq_wake = 1;
7837 } else {
7838 stmmac_mac_set(priv, priv->ioaddr, false);
7839 pinctrl_pm_select_sleep_state(priv->device);
7840 }
7841
7842 mutex_unlock(&priv->lock);
7843
7844 rtnl_lock();
7845 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7846 phylink_suspend(priv->phylink, true);
7847 } else {
7848 if (device_may_wakeup(priv->device))
7849 phylink_speed_down(priv->phylink, false);
7850 phylink_suspend(priv->phylink, false);
7851 }
7852 rtnl_unlock();
7853
7854 if (priv->dma_cap.fpesel) {
7855 /* Disable FPE */
7856 stmmac_fpe_configure(priv, priv->ioaddr,
7857 priv->plat->fpe_cfg,
7858 priv->plat->tx_queues_to_use,
7859 priv->plat->rx_queues_to_use, false);
7860
7861 stmmac_fpe_handshake(priv, false);
7862 stmmac_fpe_stop_wq(priv);
7863 }
7864
7865 priv->speed = SPEED_UNKNOWN;
7866 return 0;
7867 }
7868 EXPORT_SYMBOL_GPL(stmmac_suspend);
7869
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)7870 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
7871 {
7872 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7873
7874 rx_q->cur_rx = 0;
7875 rx_q->dirty_rx = 0;
7876 }
7877
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)7878 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
7879 {
7880 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7881
7882 tx_q->cur_tx = 0;
7883 tx_q->dirty_tx = 0;
7884 tx_q->mss = 0;
7885
7886 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
7887 }
7888
7889 /**
7890 * stmmac_reset_queues_param - reset queue parameters
7891 * @priv: device pointer
7892 */
stmmac_reset_queues_param(struct stmmac_priv * priv)7893 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
7894 {
7895 u32 rx_cnt = priv->plat->rx_queues_to_use;
7896 u32 tx_cnt = priv->plat->tx_queues_to_use;
7897 u32 queue;
7898
7899 for (queue = 0; queue < rx_cnt; queue++)
7900 stmmac_reset_rx_queue(priv, queue);
7901
7902 for (queue = 0; queue < tx_cnt; queue++)
7903 stmmac_reset_tx_queue(priv, queue);
7904 }
7905
7906 /**
7907 * stmmac_resume - resume callback
7908 * @dev: device pointer
7909 * Description: when resume this function is invoked to setup the DMA and CORE
7910 * in a usable state.
7911 */
stmmac_resume(struct device * dev)7912 int stmmac_resume(struct device *dev)
7913 {
7914 struct net_device *ndev = dev_get_drvdata(dev);
7915 struct stmmac_priv *priv = netdev_priv(ndev);
7916 int ret;
7917
7918 if (!netif_running(ndev))
7919 return 0;
7920
7921 /* Power Down bit, into the PM register, is cleared
7922 * automatically as soon as a magic packet or a Wake-up frame
7923 * is received. Anyway, it's better to manually clear
7924 * this bit because it can generate problems while resuming
7925 * from another devices (e.g. serial console).
7926 */
7927 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7928 mutex_lock(&priv->lock);
7929 stmmac_pmt(priv, priv->hw, 0);
7930 mutex_unlock(&priv->lock);
7931 priv->irq_wake = 0;
7932 } else {
7933 pinctrl_pm_select_default_state(priv->device);
7934 /* reset the phy so that it's ready */
7935 if (priv->mii)
7936 stmmac_mdio_reset(priv->mii);
7937 }
7938
7939 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) &&
7940 priv->plat->serdes_powerup) {
7941 ret = priv->plat->serdes_powerup(ndev,
7942 priv->plat->bsp_priv);
7943
7944 if (ret < 0)
7945 return ret;
7946 }
7947
7948 rtnl_lock();
7949 if (device_may_wakeup(priv->device) && priv->plat->pmt) {
7950 phylink_resume(priv->phylink);
7951 } else {
7952 phylink_resume(priv->phylink);
7953 if (device_may_wakeup(priv->device))
7954 phylink_speed_up(priv->phylink);
7955 }
7956 rtnl_unlock();
7957
7958 rtnl_lock();
7959 mutex_lock(&priv->lock);
7960
7961 stmmac_reset_queues_param(priv);
7962
7963 stmmac_free_tx_skbufs(priv);
7964 stmmac_clear_descriptors(priv, &priv->dma_conf);
7965
7966 stmmac_hw_setup(ndev, false);
7967 stmmac_init_coalesce(priv);
7968 stmmac_set_rx_mode(ndev);
7969
7970 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw);
7971
7972 stmmac_enable_all_queues(priv);
7973 stmmac_enable_all_dma_irq(priv);
7974
7975 mutex_unlock(&priv->lock);
7976 rtnl_unlock();
7977
7978 netif_device_attach(ndev);
7979
7980 return 0;
7981 }
7982 EXPORT_SYMBOL_GPL(stmmac_resume);
7983
7984 #ifndef MODULE
stmmac_cmdline_opt(char * str)7985 static int __init stmmac_cmdline_opt(char *str)
7986 {
7987 char *opt;
7988
7989 if (!str || !*str)
7990 return 1;
7991 while ((opt = strsep(&str, ",")) != NULL) {
7992 if (!strncmp(opt, "debug:", 6)) {
7993 if (kstrtoint(opt + 6, 0, &debug))
7994 goto err;
7995 } else if (!strncmp(opt, "phyaddr:", 8)) {
7996 if (kstrtoint(opt + 8, 0, &phyaddr))
7997 goto err;
7998 } else if (!strncmp(opt, "buf_sz:", 7)) {
7999 if (kstrtoint(opt + 7, 0, &buf_sz))
8000 goto err;
8001 } else if (!strncmp(opt, "tc:", 3)) {
8002 if (kstrtoint(opt + 3, 0, &tc))
8003 goto err;
8004 } else if (!strncmp(opt, "watchdog:", 9)) {
8005 if (kstrtoint(opt + 9, 0, &watchdog))
8006 goto err;
8007 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8008 if (kstrtoint(opt + 10, 0, &flow_ctrl))
8009 goto err;
8010 } else if (!strncmp(opt, "pause:", 6)) {
8011 if (kstrtoint(opt + 6, 0, &pause))
8012 goto err;
8013 } else if (!strncmp(opt, "eee_timer:", 10)) {
8014 if (kstrtoint(opt + 10, 0, &eee_timer))
8015 goto err;
8016 } else if (!strncmp(opt, "chain_mode:", 11)) {
8017 if (kstrtoint(opt + 11, 0, &chain_mode))
8018 goto err;
8019 }
8020 }
8021 return 1;
8022
8023 err:
8024 pr_err("%s: ERROR broken module parameter conversion", __func__);
8025 return 1;
8026 }
8027
8028 __setup("stmmaceth=", stmmac_cmdline_opt);
8029 #endif /* MODULE */
8030
stmmac_init(void)8031 static int __init stmmac_init(void)
8032 {
8033 #ifdef CONFIG_DEBUG_FS
8034 /* Create debugfs main directory if it doesn't exist yet */
8035 if (!stmmac_fs_dir)
8036 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8037 register_netdevice_notifier(&stmmac_notifier);
8038 #endif
8039
8040 return 0;
8041 }
8042
stmmac_exit(void)8043 static void __exit stmmac_exit(void)
8044 {
8045 #ifdef CONFIG_DEBUG_FS
8046 unregister_netdevice_notifier(&stmmac_notifier);
8047 debugfs_remove_recursive(stmmac_fs_dir);
8048 #endif
8049 }
8050
8051 module_init(stmmac_init)
8052 module_exit(stmmac_exit)
8053
8054 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8055 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8056 MODULE_LICENSE("GPL");
8057