1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/clk.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/ip.h>
21 #include <linux/tcp.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/if_ether.h>
25 #include <linux/crc32.h>
26 #include <linux/mii.h>
27 #include <linux/if.h>
28 #include <linux/if_vlan.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/slab.h>
31 #include <linux/pm_runtime.h>
32 #include <linux/pm_wakeirq.h>
33 #include <linux/prefetch.h>
34 #include <linux/pinctrl/consumer.h>
35 #ifdef CONFIG_DEBUG_FS
36 #include <linux/debugfs.h>
37 #include <linux/seq_file.h>
38 #endif /* CONFIG_DEBUG_FS */
39 #include <linux/net_tstamp.h>
40 #include <linux/phylink.h>
41 #include <linux/udp.h>
42 #include <linux/bpf_trace.h>
43 #include <net/devlink.h>
44 #include <net/page_pool/helpers.h>
45 #include <net/pkt_cls.h>
46 #include <net/xdp_sock_drv.h>
47 #include "stmmac_ptp.h"
48 #include "stmmac_fpe.h"
49 #include "stmmac.h"
50 #include "stmmac_pcs.h"
51 #include "stmmac_xdp.h"
52 #include <linux/reset.h>
53 #include <linux/of_mdio.h>
54 #include "dwmac1000.h"
55 #include "dwxgmac2.h"
56 #include "hwif.h"
57
58 /* As long as the interface is active, we keep the timestamping counter enabled
59 * with fine resolution and binary rollover. This avoid non-monotonic behavior
60 * (clock jumps) when changing timestamping settings at runtime.
61 */
62 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
63
64 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
65 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
66
67 /* Module parameters */
68 #define TX_TIMEO 5000
69 static int watchdog = TX_TIMEO;
70 module_param(watchdog, int, 0644);
71 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
72
73 static int debug = -1;
74 module_param(debug, int, 0644);
75 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
76
77 static int phyaddr = -1;
78 module_param(phyaddr, int, 0444);
79 MODULE_PARM_DESC(phyaddr, "Physical device address");
80
81 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
82
83 /* Limit to make sure XDP TX and slow path can coexist */
84 #define STMMAC_XSK_TX_BUDGET_MAX 256
85 #define STMMAC_TX_XSK_AVAIL 16
86 #define STMMAC_RX_FILL_BATCH 16
87
88 #define STMMAC_XDP_PASS 0
89 #define STMMAC_XDP_CONSUMED BIT(0)
90 #define STMMAC_XDP_TX BIT(1)
91 #define STMMAC_XDP_REDIRECT BIT(2)
92 #define STMMAC_XSK_CONSUMED BIT(3)
93
94 static int flow_ctrl = 0xdead;
95 module_param(flow_ctrl, int, 0644);
96 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
97
98 static int pause = PAUSE_TIME;
99 module_param(pause, int, 0644);
100 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
101
102 #define TC_DEFAULT 64
103 static int tc = TC_DEFAULT;
104 module_param(tc, int, 0644);
105 MODULE_PARM_DESC(tc, "DMA threshold control value");
106
107 /* This is unused */
108 #define DEFAULT_BUFSIZE 1536
109 static int buf_sz = DEFAULT_BUFSIZE;
110 module_param(buf_sz, int, 0644);
111 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
112
113 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
114 NETIF_MSG_LINK | NETIF_MSG_IFUP |
115 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
116
117 #define STMMAC_DEFAULT_LPI_TIMER 1000
118 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
119 module_param(eee_timer, uint, 0644);
120 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
121 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
122
123 /* By default the driver will use the ring mode to manage tx and rx descriptors,
124 * but allow user to force to use the chain instead of the ring
125 */
126 static unsigned int chain_mode;
127 module_param(chain_mode, int, 0444);
128 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
129
130 static const char *stmmac_dwmac_actphyif[8] = {
131 [PHY_INTF_SEL_GMII_MII] = "GMII/MII",
132 [PHY_INTF_SEL_RGMII] = "RGMII",
133 [PHY_INTF_SEL_SGMII] = "SGMII",
134 [PHY_INTF_SEL_TBI] = "TBI",
135 [PHY_INTF_SEL_RMII] = "RMII",
136 [PHY_INTF_SEL_RTBI] = "RTBI",
137 [PHY_INTF_SEL_SMII] = "SMII",
138 [PHY_INTF_SEL_REVMII] = "REVMII",
139 };
140
141 static const char *stmmac_dwxgmac_phyif[4] = {
142 [PHY_INTF_GMII] = "GMII",
143 [PHY_INTF_RGMII] = "RGMII",
144 };
145
146 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
147 /* For MSI interrupts handling */
148 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
149 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
150 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
151 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
152 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
153 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
154 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
155 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
156 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
157 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
158 u32 rxmode, u32 chan);
159 static void stmmac_vlan_restore(struct stmmac_priv *priv);
160
161 #ifdef CONFIG_DEBUG_FS
162 static const struct net_device_ops stmmac_netdev_ops;
163 static void stmmac_init_fs(struct net_device *dev);
164 static void stmmac_exit_fs(struct net_device *dev);
165 #endif
166
167 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
168
169 struct stmmac_devlink_priv {
170 struct stmmac_priv *stmmac_priv;
171 };
172
173 enum stmmac_dl_param_id {
174 STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
175 STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
176 };
177
178 /**
179 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
180 * @bsp_priv: BSP private data structure (unused)
181 * @clk_tx_i: the transmit clock
182 * @interface: the selected interface mode
183 * @speed: the speed that the MAC will be operating at
184 *
185 * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
186 * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
187 * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
188 * the plat_data->set_clk_tx_rate method directly, call it via their own
189 * implementation, or implement their own method should they have more
190 * complex requirements. It is intended to only be used in this method.
191 *
192 * plat_data->clk_tx_i must be filled in.
193 */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)194 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
195 phy_interface_t interface, int speed)
196 {
197 long rate = rgmii_clock(speed);
198
199 /* Silently ignore unsupported speeds as rgmii_clock() only
200 * supports 10, 100 and 1000Mbps. We do not want to spit
201 * errors for 2500 and higher speeds here.
202 */
203 if (rate < 0)
204 return 0;
205
206 return clk_set_rate(clk_tx_i, rate);
207 }
208 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
209
210 /**
211 * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
212 * @regval: pointer to a u32 for the resulting register value
213 * @blen: pointer to an array of u32 containing the burst length values in bytes
214 * @len: the number of entries in the @blen array
215 */
stmmac_axi_blen_to_mask(u32 * regval,const u32 * blen,size_t len)216 void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
217 {
218 size_t i;
219 u32 val;
220
221 for (val = i = 0; i < len; i++) {
222 u32 burst = blen[i];
223
224 /* Burst values of zero must be skipped. */
225 if (!burst)
226 continue;
227
228 /* The valid range for the burst length is 4 to 256 inclusive,
229 * and it must be a power of two.
230 */
231 if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
232 pr_err("stmmac: invalid burst length %u at index %zu\n",
233 burst, i);
234 continue;
235 }
236
237 /* Since burst is a power of two, and the register field starts
238 * with burst = 4, shift right by two bits so bit 0 of the field
239 * corresponds with the minimum value.
240 */
241 val |= burst >> 2;
242 }
243
244 *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
245 }
246 EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
247
248 /**
249 * stmmac_verify_args - verify the driver parameters.
250 * Description: it checks the driver parameters and set a default in case of
251 * errors.
252 */
stmmac_verify_args(void)253 static void stmmac_verify_args(void)
254 {
255 if (unlikely(watchdog < 0))
256 watchdog = TX_TIMEO;
257 if (unlikely((pause < 0) || (pause > 0xffff)))
258 pause = PAUSE_TIME;
259
260 if (flow_ctrl != 0xdead)
261 pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
262 }
263
__stmmac_disable_all_queues(struct stmmac_priv * priv)264 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
265 {
266 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
267 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
268 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
269 u32 queue;
270
271 for (queue = 0; queue < maxq; queue++) {
272 struct stmmac_channel *ch = &priv->channel[queue];
273
274 if (stmmac_xdp_is_enabled(priv) &&
275 test_bit(queue, priv->af_xdp_zc_qps)) {
276 napi_disable(&ch->rxtx_napi);
277 continue;
278 }
279
280 if (queue < rx_queues_cnt)
281 napi_disable(&ch->rx_napi);
282 if (queue < tx_queues_cnt)
283 napi_disable(&ch->tx_napi);
284 }
285 }
286
287 /**
288 * stmmac_disable_all_queues - Disable all queues
289 * @priv: driver private structure
290 */
stmmac_disable_all_queues(struct stmmac_priv * priv)291 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
292 {
293 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
294 struct stmmac_rx_queue *rx_q;
295 u32 queue;
296
297 /* synchronize_rcu() needed for pending XDP buffers to drain */
298 for (queue = 0; queue < rx_queues_cnt; queue++) {
299 rx_q = &priv->dma_conf.rx_queue[queue];
300 if (rx_q->xsk_pool) {
301 synchronize_rcu();
302 break;
303 }
304 }
305
306 __stmmac_disable_all_queues(priv);
307 }
308
309 /**
310 * stmmac_enable_all_queues - Enable all queues
311 * @priv: driver private structure
312 */
stmmac_enable_all_queues(struct stmmac_priv * priv)313 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
314 {
315 u32 rx_queues_cnt = priv->plat->rx_queues_to_use;
316 u32 tx_queues_cnt = priv->plat->tx_queues_to_use;
317 u32 maxq = max(rx_queues_cnt, tx_queues_cnt);
318 u32 queue;
319
320 for (queue = 0; queue < maxq; queue++) {
321 struct stmmac_channel *ch = &priv->channel[queue];
322
323 if (stmmac_xdp_is_enabled(priv) &&
324 test_bit(queue, priv->af_xdp_zc_qps)) {
325 napi_enable(&ch->rxtx_napi);
326 continue;
327 }
328
329 if (queue < rx_queues_cnt)
330 napi_enable(&ch->rx_napi);
331 if (queue < tx_queues_cnt)
332 napi_enable(&ch->tx_napi);
333 }
334 }
335
stmmac_service_event_schedule(struct stmmac_priv * priv)336 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
337 {
338 if (!test_bit(STMMAC_DOWN, &priv->state) &&
339 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
340 queue_work(priv->wq, &priv->service_task);
341 }
342
stmmac_global_err(struct stmmac_priv * priv)343 static void stmmac_global_err(struct stmmac_priv *priv)
344 {
345 netif_carrier_off(priv->dev);
346 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
347 stmmac_service_event_schedule(priv);
348 }
349
print_pkt(unsigned char * buf,int len)350 static void print_pkt(unsigned char *buf, int len)
351 {
352 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
353 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
354 }
355
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)356 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
357 {
358 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
359 u32 avail;
360
361 if (tx_q->dirty_tx > tx_q->cur_tx)
362 avail = tx_q->dirty_tx - tx_q->cur_tx - 1;
363 else
364 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1;
365
366 return avail;
367 }
368
369 /**
370 * stmmac_rx_dirty - Get RX queue dirty
371 * @priv: driver private structure
372 * @queue: RX queue index
373 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)374 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
375 {
376 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
377 u32 dirty;
378
379 if (rx_q->dirty_rx <= rx_q->cur_rx)
380 dirty = rx_q->cur_rx - rx_q->dirty_rx;
381 else
382 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx;
383
384 return dirty;
385 }
386
stmmac_eee_tx_busy(struct stmmac_priv * priv)387 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
388 {
389 u32 tx_cnt = priv->plat->tx_queues_to_use;
390 u32 queue;
391
392 /* check if all TX queues have the work finished */
393 for (queue = 0; queue < tx_cnt; queue++) {
394 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
395
396 if (tx_q->dirty_tx != tx_q->cur_tx)
397 return true; /* still unfinished work */
398 }
399
400 return false;
401 }
402
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)403 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
404 {
405 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
406 }
407
408 /**
409 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
410 * @priv: driver private structure
411 * Description: this function is to verify and enter in LPI mode in case of
412 * EEE.
413 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)414 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
415 {
416 if (stmmac_eee_tx_busy(priv)) {
417 stmmac_restart_sw_lpi_timer(priv);
418 return;
419 }
420
421 /* Check and enter in LPI mode */
422 if (!priv->tx_path_in_lpi_mode)
423 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
424 priv->tx_lpi_clk_stop, 0);
425 }
426
427 /**
428 * stmmac_stop_sw_lpi - stop transmitting LPI
429 * @priv: driver private structure
430 * Description: When using software-controlled LPI, stop transmitting LPI state.
431 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)432 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
433 {
434 timer_delete_sync(&priv->eee_ctrl_timer);
435 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
436 priv->tx_path_in_lpi_mode = false;
437 }
438
439 /**
440 * stmmac_eee_ctrl_timer - EEE TX SW timer.
441 * @t: timer_list struct containing private info
442 * Description:
443 * if there is no data transfer and if we are not in LPI state,
444 * then MAC Transmitter can be moved to LPI state.
445 */
stmmac_eee_ctrl_timer(struct timer_list * t)446 static void stmmac_eee_ctrl_timer(struct timer_list *t)
447 {
448 struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
449
450 stmmac_try_to_start_sw_lpi(priv);
451 }
452
453 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
454 * @priv: driver private structure
455 * @p : descriptor pointer
456 * @skb : the socket buffer
457 * Description :
458 * This function will read timestamp from the descriptor & pass it to stack.
459 * and also perform some sanity checks.
460 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)461 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
462 struct dma_desc *p, struct sk_buff *skb)
463 {
464 struct skb_shared_hwtstamps shhwtstamp;
465 bool found = false;
466 u64 ns = 0;
467
468 if (!priv->hwts_tx_en)
469 return;
470
471 /* exit if skb doesn't support hw tstamp */
472 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
473 return;
474
475 /* check tx tstamp status */
476 if (stmmac_get_tx_timestamp_status(priv, p)) {
477 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
478 found = true;
479 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
480 found = true;
481 }
482
483 if (found) {
484 ns -= priv->plat->cdc_error_adj;
485
486 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
487 shhwtstamp.hwtstamp = ns_to_ktime(ns);
488
489 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
490 /* pass tstamp to stack */
491 skb_tstamp_tx(skb, &shhwtstamp);
492 }
493 }
494
495 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
496 * @priv: driver private structure
497 * @p : descriptor pointer
498 * @np : next descriptor pointer
499 * @skb : the socket buffer
500 * Description :
501 * This function will read received packet's timestamp from the descriptor
502 * and pass it to stack. It also perform some sanity checks.
503 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)504 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
505 struct dma_desc *np, struct sk_buff *skb)
506 {
507 struct skb_shared_hwtstamps *shhwtstamp = NULL;
508 struct dma_desc *desc = p;
509 u64 ns = 0;
510
511 if (!priv->hwts_rx_en)
512 return;
513 /* For GMAC4, the valid timestamp is from CTX next desc. */
514 if (dwmac_is_xmac(priv->plat->core_type))
515 desc = np;
516
517 /* Check if timestamp is available */
518 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
519 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
520
521 ns -= priv->plat->cdc_error_adj;
522
523 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
524 shhwtstamp = skb_hwtstamps(skb);
525 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
526 shhwtstamp->hwtstamp = ns_to_ktime(ns);
527 } else {
528 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
529 }
530 }
531
stmmac_update_subsecond_increment(struct stmmac_priv * priv)532 static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
533 {
534 bool xmac = dwmac_is_xmac(priv->plat->core_type);
535 u32 sec_inc = 0;
536 u64 temp = 0;
537
538 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
539
540 /* program Sub Second Increment reg */
541 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
542 priv->plat->clk_ptp_rate,
543 xmac, &sec_inc);
544 temp = div_u64(1000000000ULL, sec_inc);
545
546 /* Store sub second increment for later use */
547 priv->sub_second_inc = sec_inc;
548
549 /* calculate default added value:
550 * formula is :
551 * addend = (2^32)/freq_div_ratio;
552 * where, freq_div_ratio = 1e9ns/sec_inc
553 */
554 temp = (u64)(temp << 32);
555 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
556 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
557 }
558
559 /**
560 * stmmac_hwtstamp_set - control hardware timestamping.
561 * @dev: device pointer.
562 * @config: the timestamping configuration.
563 * @extack: netlink extended ack structure for error reporting.
564 * Description:
565 * This function configures the MAC to enable/disable both outgoing(TX)
566 * and incoming(RX) packets time stamping based on user input.
567 * Return Value:
568 * 0 on success and an appropriate -ve integer on failure.
569 */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)570 static int stmmac_hwtstamp_set(struct net_device *dev,
571 struct kernel_hwtstamp_config *config,
572 struct netlink_ext_ack *extack)
573 {
574 struct stmmac_priv *priv = netdev_priv(dev);
575 u32 ptp_v2 = 0;
576 u32 tstamp_all = 0;
577 u32 ptp_over_ipv4_udp = 0;
578 u32 ptp_over_ipv6_udp = 0;
579 u32 ptp_over_ethernet = 0;
580 u32 snap_type_sel = 0;
581 u32 ts_master_en = 0;
582 u32 ts_event_en = 0;
583
584 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
585 NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
586 priv->hwts_tx_en = 0;
587 priv->hwts_rx_en = 0;
588
589 return -EOPNOTSUPP;
590 }
591
592 if (!netif_running(dev)) {
593 NL_SET_ERR_MSG_MOD(extack,
594 "Cannot change timestamping configuration while down");
595 return -ENODEV;
596 }
597
598 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
599 __func__, config->flags, config->tx_type, config->rx_filter);
600
601 if (config->tx_type != HWTSTAMP_TX_OFF &&
602 config->tx_type != HWTSTAMP_TX_ON)
603 return -ERANGE;
604
605 if (priv->adv_ts) {
606 switch (config->rx_filter) {
607 case HWTSTAMP_FILTER_NONE:
608 /* time stamp no incoming packet at all */
609 config->rx_filter = HWTSTAMP_FILTER_NONE;
610 break;
611
612 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
613 /* PTP v1, UDP, any kind of event packet */
614 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
615 /* 'xmac' hardware can support Sync, Pdelay_Req and
616 * Pdelay_resp by setting bit14 and bits17/16 to 01
617 * This leaves Delay_Req timestamps out.
618 * Enable all events *and* general purpose message
619 * timestamping
620 */
621 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
622 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
623 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
624 break;
625
626 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
627 /* PTP v1, UDP, Sync packet */
628 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
629 /* take time stamp for SYNC messages only */
630 ts_event_en = PTP_TCR_TSEVNTENA;
631
632 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
633 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
634 break;
635
636 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
637 /* PTP v1, UDP, Delay_req packet */
638 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
639 /* take time stamp for Delay_Req messages only */
640 ts_master_en = PTP_TCR_TSMSTRENA;
641 ts_event_en = PTP_TCR_TSEVNTENA;
642
643 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
644 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
645 break;
646
647 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
648 /* PTP v2, UDP, any kind of event packet */
649 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
650 ptp_v2 = PTP_TCR_TSVER2ENA;
651 /* take time stamp for all event messages */
652 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
653
654 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
655 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
656 break;
657
658 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
659 /* PTP v2, UDP, Sync packet */
660 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
661 ptp_v2 = PTP_TCR_TSVER2ENA;
662 /* take time stamp for SYNC messages only */
663 ts_event_en = PTP_TCR_TSEVNTENA;
664
665 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
666 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
667 break;
668
669 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
670 /* PTP v2, UDP, Delay_req packet */
671 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
672 ptp_v2 = PTP_TCR_TSVER2ENA;
673 /* take time stamp for Delay_Req messages only */
674 ts_master_en = PTP_TCR_TSMSTRENA;
675 ts_event_en = PTP_TCR_TSEVNTENA;
676
677 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
678 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
679 break;
680
681 case HWTSTAMP_FILTER_PTP_V2_EVENT:
682 /* PTP v2/802.AS1 any layer, any kind of event packet */
683 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
684 ptp_v2 = PTP_TCR_TSVER2ENA;
685 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
686 if (priv->synopsys_id < DWMAC_CORE_4_10)
687 ts_event_en = PTP_TCR_TSEVNTENA;
688 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
689 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
690 ptp_over_ethernet = PTP_TCR_TSIPENA;
691 break;
692
693 case HWTSTAMP_FILTER_PTP_V2_SYNC:
694 /* PTP v2/802.AS1, any layer, Sync packet */
695 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
696 ptp_v2 = PTP_TCR_TSVER2ENA;
697 /* take time stamp for SYNC messages only */
698 ts_event_en = PTP_TCR_TSEVNTENA;
699
700 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
701 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
702 ptp_over_ethernet = PTP_TCR_TSIPENA;
703 break;
704
705 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
706 /* PTP v2/802.AS1, any layer, Delay_req packet */
707 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
708 ptp_v2 = PTP_TCR_TSVER2ENA;
709 /* take time stamp for Delay_Req messages only */
710 ts_master_en = PTP_TCR_TSMSTRENA;
711 ts_event_en = PTP_TCR_TSEVNTENA;
712
713 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
714 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
715 ptp_over_ethernet = PTP_TCR_TSIPENA;
716 break;
717
718 case HWTSTAMP_FILTER_NTP_ALL:
719 case HWTSTAMP_FILTER_ALL:
720 /* time stamp any incoming packet */
721 config->rx_filter = HWTSTAMP_FILTER_ALL;
722 tstamp_all = PTP_TCR_TSENALL;
723 break;
724
725 default:
726 return -ERANGE;
727 }
728 } else {
729 switch (config->rx_filter) {
730 case HWTSTAMP_FILTER_NONE:
731 config->rx_filter = HWTSTAMP_FILTER_NONE;
732 break;
733 default:
734 /* PTP v1, UDP, any kind of event packet */
735 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
736 break;
737 }
738 }
739 priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
740 priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
741
742 priv->systime_flags = STMMAC_HWTS_ACTIVE;
743 if (!priv->tsfupdt_coarse)
744 priv->systime_flags |= PTP_TCR_TSCFUPDT;
745
746 if (priv->hwts_tx_en || priv->hwts_rx_en) {
747 priv->systime_flags |= tstamp_all | ptp_v2 |
748 ptp_over_ethernet | ptp_over_ipv6_udp |
749 ptp_over_ipv4_udp | ts_event_en |
750 ts_master_en | snap_type_sel;
751 }
752
753 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
754
755 priv->tstamp_config = *config;
756
757 return 0;
758 }
759
760 /**
761 * stmmac_hwtstamp_get - read hardware timestamping.
762 * @dev: device pointer.
763 * @config: the timestamping configuration.
764 * Description:
765 * This function obtain the current hardware timestamping settings
766 * as requested.
767 */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)768 static int stmmac_hwtstamp_get(struct net_device *dev,
769 struct kernel_hwtstamp_config *config)
770 {
771 struct stmmac_priv *priv = netdev_priv(dev);
772
773 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
774 return -EOPNOTSUPP;
775
776 *config = priv->tstamp_config;
777
778 return 0;
779 }
780
781 /**
782 * stmmac_init_tstamp_counter - init hardware timestamping counter
783 * @priv: driver private structure
784 * @systime_flags: timestamping flags
785 * Description:
786 * Initialize hardware counter for packet timestamping.
787 * This is valid as long as the interface is open and not suspended.
788 * Will be rerun after resuming from suspend, case in which the timestamping
789 * flags updated by stmmac_hwtstamp_set() also need to be restored.
790 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)791 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
792 u32 systime_flags)
793 {
794 struct timespec64 now;
795
796 if (!priv->plat->clk_ptp_rate) {
797 netdev_err(priv->dev, "Invalid PTP clock rate");
798 return -EINVAL;
799 }
800
801 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
802 priv->systime_flags = systime_flags;
803
804 stmmac_update_subsecond_increment(priv);
805
806 /* initialize system time */
807 ktime_get_real_ts64(&now);
808
809 /* lower 32 bits of tv_sec are safe until y2106 */
810 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
811
812 return 0;
813 }
814
815 /**
816 * stmmac_init_timestamping - initialise timestamping
817 * @priv: driver private structure
818 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
819 * This is done by looking at the HW cap. register.
820 * This function also registers the ptp driver.
821 */
stmmac_init_timestamping(struct stmmac_priv * priv)822 static int stmmac_init_timestamping(struct stmmac_priv *priv)
823 {
824 bool xmac = dwmac_is_xmac(priv->plat->core_type);
825 int ret;
826
827 if (priv->plat->ptp_clk_freq_config)
828 priv->plat->ptp_clk_freq_config(priv);
829
830 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
831 netdev_info(priv->dev, "PTP not supported by HW\n");
832 return -EOPNOTSUPP;
833 }
834
835 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
836 PTP_TCR_TSCFUPDT);
837 if (ret) {
838 netdev_warn(priv->dev, "PTP init failed\n");
839 return ret;
840 }
841
842 priv->adv_ts = 0;
843 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
844 if (xmac && priv->dma_cap.atime_stamp)
845 priv->adv_ts = 1;
846 /* Dwmac 3.x core with extend_desc can support adv_ts */
847 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
848 priv->adv_ts = 1;
849
850 if (priv->dma_cap.time_stamp)
851 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
852
853 if (priv->adv_ts)
854 netdev_info(priv->dev,
855 "IEEE 1588-2008 Advanced Timestamp supported\n");
856
857 memset(&priv->tstamp_config, 0, sizeof(priv->tstamp_config));
858 priv->hwts_tx_en = 0;
859 priv->hwts_rx_en = 0;
860
861 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
862 stmmac_hwtstamp_correct_latency(priv, priv);
863
864 return 0;
865 }
866
stmmac_setup_ptp(struct stmmac_priv * priv)867 static void stmmac_setup_ptp(struct stmmac_priv *priv)
868 {
869 int ret;
870
871 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
872 if (ret < 0)
873 netdev_warn(priv->dev,
874 "failed to enable PTP reference clock: %pe\n",
875 ERR_PTR(ret));
876
877 if (stmmac_init_timestamping(priv) == 0)
878 stmmac_ptp_register(priv);
879 }
880
stmmac_release_ptp(struct stmmac_priv * priv)881 static void stmmac_release_ptp(struct stmmac_priv *priv)
882 {
883 stmmac_ptp_unregister(priv);
884 clk_disable_unprepare(priv->plat->clk_ptp_ref);
885 }
886
stmmac_legacy_serdes_power_down(struct stmmac_priv * priv)887 static void stmmac_legacy_serdes_power_down(struct stmmac_priv *priv)
888 {
889 if (priv->plat->serdes_powerdown && priv->legacy_serdes_is_powered)
890 priv->plat->serdes_powerdown(priv->dev, priv->plat->bsp_priv);
891
892 priv->legacy_serdes_is_powered = false;
893 }
894
stmmac_legacy_serdes_power_up(struct stmmac_priv * priv)895 static int stmmac_legacy_serdes_power_up(struct stmmac_priv *priv)
896 {
897 int ret;
898
899 if (!priv->plat->serdes_powerup)
900 return 0;
901
902 ret = priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
903 if (ret < 0)
904 netdev_err(priv->dev, "SerDes powerup failed\n");
905 else
906 priv->legacy_serdes_is_powered = true;
907
908 return ret;
909 }
910
911 /**
912 * stmmac_mac_flow_ctrl - Configure flow control in all queues
913 * @priv: driver private structure
914 * @duplex: duplex passed to the next function
915 * @flow_ctrl: desired flow control modes
916 * Description: It is used for configuring the flow control in all queues
917 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)918 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
919 unsigned int flow_ctrl)
920 {
921 u32 tx_cnt = priv->plat->tx_queues_to_use;
922
923 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
924 tx_cnt);
925 }
926
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)927 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
928 phy_interface_t interface)
929 {
930 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
931
932 /* Refresh the MAC-specific capabilities */
933 stmmac_mac_update_caps(priv);
934
935 if (priv->hw_cap_support && !priv->dma_cap.half_duplex)
936 priv->hw->link.caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
937
938 config->mac_capabilities = priv->hw->link.caps;
939
940 if (priv->plat->max_speed)
941 phylink_limit_mac_speed(config, priv->plat->max_speed);
942
943 return config->mac_capabilities;
944 }
945
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)946 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
947 phy_interface_t interface)
948 {
949 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
950 struct phylink_pcs *pcs;
951
952 if (priv->plat->select_pcs) {
953 pcs = priv->plat->select_pcs(priv, interface);
954 if (!IS_ERR(pcs))
955 return pcs;
956 }
957
958 /* The PCS control register is only relevant for SGMII, TBI and RTBI
959 * modes. We no longer support TBI or RTBI, so only configure this
960 * register when operating in SGMII mode with the integrated PCS.
961 */
962 if (priv->hw->pcs & STMMAC_PCS_SGMII && priv->integrated_pcs)
963 return &priv->integrated_pcs->pcs;
964
965 return NULL;
966 }
967
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)968 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
969 const struct phylink_link_state *state)
970 {
971 /* Nothing to do, xpcs_config() handles everything */
972 }
973
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)974 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
975 phy_interface_t interface)
976 {
977 struct net_device *ndev = to_net_dev(config->dev);
978 struct stmmac_priv *priv = netdev_priv(ndev);
979
980 if (priv->plat->mac_finish)
981 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface);
982
983 return 0;
984 }
985
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)986 static void stmmac_mac_link_down(struct phylink_config *config,
987 unsigned int mode, phy_interface_t interface)
988 {
989 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
990
991 stmmac_mac_set(priv, priv->ioaddr, false);
992 if (priv->dma_cap.eee)
993 stmmac_set_eee_pls(priv, priv->hw, false);
994
995 if (stmmac_fpe_supported(priv))
996 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
997 }
998
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)999 static void stmmac_mac_link_up(struct phylink_config *config,
1000 struct phy_device *phy,
1001 unsigned int mode, phy_interface_t interface,
1002 int speed, int duplex,
1003 bool tx_pause, bool rx_pause)
1004 {
1005 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1006 unsigned int flow_ctrl;
1007 u32 old_ctrl, ctrl;
1008 int ret;
1009
1010 if (priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)
1011 stmmac_legacy_serdes_power_up(priv);
1012
1013 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1014 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1015
1016 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1017 switch (speed) {
1018 case SPEED_10000:
1019 ctrl |= priv->hw->link.xgmii.speed10000;
1020 break;
1021 case SPEED_5000:
1022 ctrl |= priv->hw->link.xgmii.speed5000;
1023 break;
1024 case SPEED_2500:
1025 ctrl |= priv->hw->link.xgmii.speed2500;
1026 break;
1027 default:
1028 return;
1029 }
1030 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1031 switch (speed) {
1032 case SPEED_100000:
1033 ctrl |= priv->hw->link.xlgmii.speed100000;
1034 break;
1035 case SPEED_50000:
1036 ctrl |= priv->hw->link.xlgmii.speed50000;
1037 break;
1038 case SPEED_40000:
1039 ctrl |= priv->hw->link.xlgmii.speed40000;
1040 break;
1041 case SPEED_25000:
1042 ctrl |= priv->hw->link.xlgmii.speed25000;
1043 break;
1044 case SPEED_10000:
1045 ctrl |= priv->hw->link.xgmii.speed10000;
1046 break;
1047 case SPEED_2500:
1048 ctrl |= priv->hw->link.speed2500;
1049 break;
1050 case SPEED_1000:
1051 ctrl |= priv->hw->link.speed1000;
1052 break;
1053 default:
1054 return;
1055 }
1056 } else {
1057 switch (speed) {
1058 case SPEED_2500:
1059 ctrl |= priv->hw->link.speed2500;
1060 break;
1061 case SPEED_1000:
1062 ctrl |= priv->hw->link.speed1000;
1063 break;
1064 case SPEED_100:
1065 ctrl |= priv->hw->link.speed100;
1066 break;
1067 case SPEED_10:
1068 ctrl |= priv->hw->link.speed10;
1069 break;
1070 default:
1071 return;
1072 }
1073 }
1074
1075 if (priv->plat->fix_mac_speed)
1076 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode);
1077
1078 if (!duplex)
1079 ctrl &= ~priv->hw->link.duplex;
1080 else
1081 ctrl |= priv->hw->link.duplex;
1082
1083 /* Flow Control operation */
1084 if (rx_pause && tx_pause)
1085 flow_ctrl = FLOW_AUTO;
1086 else if (rx_pause && !tx_pause)
1087 flow_ctrl = FLOW_RX;
1088 else if (!rx_pause && tx_pause)
1089 flow_ctrl = FLOW_TX;
1090 else
1091 flow_ctrl = FLOW_OFF;
1092
1093 stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1094
1095 if (ctrl != old_ctrl)
1096 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1097
1098 if (priv->plat->set_clk_tx_rate) {
1099 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1100 priv->plat->clk_tx_i,
1101 interface, speed);
1102 if (ret < 0)
1103 netdev_err(priv->dev,
1104 "failed to configure %s transmit clock for %dMbps: %pe\n",
1105 phy_modes(interface), speed, ERR_PTR(ret));
1106 }
1107
1108 stmmac_mac_set(priv, priv->ioaddr, true);
1109 if (priv->dma_cap.eee)
1110 stmmac_set_eee_pls(priv, priv->hw, true);
1111
1112 if (stmmac_fpe_supported(priv))
1113 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1114
1115 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1116 stmmac_hwtstamp_correct_latency(priv, priv);
1117 }
1118
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1119 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1120 {
1121 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1122
1123 priv->eee_active = false;
1124
1125 mutex_lock(&priv->lock);
1126
1127 priv->eee_enabled = false;
1128
1129 netdev_dbg(priv->dev, "disable EEE\n");
1130 priv->eee_sw_timer_en = false;
1131 timer_delete_sync(&priv->eee_ctrl_timer);
1132 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1133 priv->tx_path_in_lpi_mode = false;
1134
1135 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1136 mutex_unlock(&priv->lock);
1137 }
1138
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1139 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1140 bool tx_clk_stop)
1141 {
1142 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1143 int ret;
1144
1145 priv->tx_lpi_timer = timer;
1146 priv->eee_active = true;
1147
1148 mutex_lock(&priv->lock);
1149
1150 priv->eee_enabled = true;
1151
1152 /* Update the transmit clock stop according to PHY capability if
1153 * the platform allows
1154 */
1155 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1156 priv->tx_lpi_clk_stop = tx_clk_stop;
1157
1158 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1159 STMMAC_DEFAULT_TWT_LS);
1160
1161 /* Try to configure the hardware timer. */
1162 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1163 priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1164
1165 if (ret) {
1166 /* Hardware timer mode not supported, or value out of range.
1167 * Fall back to using software LPI mode
1168 */
1169 priv->eee_sw_timer_en = true;
1170 stmmac_restart_sw_lpi_timer(priv);
1171 }
1172
1173 mutex_unlock(&priv->lock);
1174 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1175
1176 return 0;
1177 }
1178
stmmac_mac_wol_set(struct phylink_config * config,u32 wolopts,const u8 * sopass)1179 static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
1180 const u8 *sopass)
1181 {
1182 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1183
1184 device_set_wakeup_enable(priv->device, !!wolopts);
1185
1186 mutex_lock(&priv->lock);
1187 priv->wolopts = wolopts;
1188 mutex_unlock(&priv->lock);
1189
1190 return 0;
1191 }
1192
1193 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1194 .mac_get_caps = stmmac_mac_get_caps,
1195 .mac_select_pcs = stmmac_mac_select_pcs,
1196 .mac_config = stmmac_mac_config,
1197 .mac_finish = stmmac_mac_finish,
1198 .mac_link_down = stmmac_mac_link_down,
1199 .mac_link_up = stmmac_mac_link_up,
1200 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1201 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1202 .mac_wol_set = stmmac_mac_wol_set,
1203 };
1204
1205 /**
1206 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1207 * @priv: driver private structure
1208 * Description: this is to verify if the HW supports the PCS.
1209 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1210 * configured for the TBI, RTBI, or SGMII PHY interface.
1211 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1212 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1213 {
1214 int interface = priv->plat->phy_interface;
1215 int speed = priv->plat->mac_port_sel_speed;
1216
1217 if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
1218 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1219 priv->hw->pcs = STMMAC_PCS_SGMII;
1220
1221 switch (speed) {
1222 case SPEED_10:
1223 case SPEED_100:
1224 case SPEED_1000:
1225 priv->hw->reverse_sgmii_enable = true;
1226 break;
1227
1228 default:
1229 dev_warn(priv->device, "invalid port speed\n");
1230 fallthrough;
1231 case 0:
1232 priv->hw->reverse_sgmii_enable = false;
1233 break;
1234 }
1235 }
1236 }
1237
1238 /**
1239 * stmmac_init_phy - PHY initialization
1240 * @dev: net device structure
1241 * Description: it initializes the driver's PHY state, and attaches the PHY
1242 * to the mac driver.
1243 * Return value:
1244 * 0 on success
1245 */
stmmac_init_phy(struct net_device * dev)1246 static int stmmac_init_phy(struct net_device *dev)
1247 {
1248 struct stmmac_priv *priv = netdev_priv(dev);
1249 int mode = priv->plat->phy_interface;
1250 struct fwnode_handle *phy_fwnode;
1251 struct fwnode_handle *fwnode;
1252 struct ethtool_keee eee;
1253 u32 dev_flags = 0;
1254 int ret;
1255
1256 if (!phylink_expects_phy(priv->phylink))
1257 return 0;
1258
1259 if (priv->hw->xpcs &&
1260 xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1261 return 0;
1262
1263 fwnode = priv->plat->port_node;
1264 if (!fwnode)
1265 fwnode = dev_fwnode(priv->device);
1266
1267 if (fwnode)
1268 phy_fwnode = fwnode_get_phy_node(fwnode);
1269 else
1270 phy_fwnode = NULL;
1271
1272 if (priv->plat->flags & STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD)
1273 dev_flags |= PHY_F_KEEP_PREAMBLE_BEFORE_SFD;
1274
1275 /* Some DT bindings do not set-up the PHY handle. Let's try to
1276 * manually parse it
1277 */
1278 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1279 int addr = priv->plat->phy_addr;
1280 struct phy_device *phydev;
1281
1282 if (addr < 0) {
1283 netdev_err(priv->dev, "no phy found\n");
1284 return -ENODEV;
1285 }
1286
1287 phydev = mdiobus_get_phy(priv->mii, addr);
1288 if (!phydev) {
1289 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1290 return -ENODEV;
1291 }
1292
1293 phydev->dev_flags |= dev_flags;
1294
1295 ret = phylink_connect_phy(priv->phylink, phydev);
1296 } else {
1297 fwnode_handle_put(phy_fwnode);
1298 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, dev_flags);
1299 }
1300
1301 if (ret) {
1302 netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1303 ERR_PTR(ret));
1304 return ret;
1305 }
1306
1307 /* Configure phylib's copy of the LPI timer. Normally,
1308 * phylink_config.lpi_timer_default would do this, but there is a
1309 * chance that userspace could change the eee_timer setting via sysfs
1310 * before the first open. Thus, preserve existing behaviour.
1311 */
1312 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1313 eee.tx_lpi_timer = priv->tx_lpi_timer;
1314 phylink_ethtool_set_eee(priv->phylink, &eee);
1315 }
1316
1317 return 0;
1318 }
1319
stmmac_phylink_setup(struct stmmac_priv * priv)1320 static int stmmac_phylink_setup(struct stmmac_priv *priv)
1321 {
1322 struct stmmac_mdio_bus_data *mdio_bus_data;
1323 struct phylink_config *config;
1324 struct fwnode_handle *fwnode;
1325 struct phylink_pcs *pcs;
1326 struct phylink *phylink;
1327
1328 config = &priv->phylink_config;
1329
1330 config->dev = &priv->dev->dev;
1331 config->type = PHYLINK_NETDEV;
1332 config->mac_managed_pm = true;
1333
1334 /* Stmmac always requires an RX clock for hardware initialization */
1335 config->mac_requires_rxc = true;
1336
1337 /* Disable EEE RX clock stop to ensure VLAN register access works
1338 * correctly.
1339 */
1340 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
1341 !(priv->dev->features & NETIF_F_VLAN_FEATURES))
1342 config->eee_rx_clk_stop_enable = true;
1343
1344 /* Set the default transmit clock stop bit based on the platform glue */
1345 priv->tx_lpi_clk_stop = priv->plat->flags &
1346 STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1347
1348 mdio_bus_data = priv->plat->mdio_bus_data;
1349 if (mdio_bus_data)
1350 config->default_an_inband = mdio_bus_data->default_an_inband;
1351
1352 /* Get the PHY interface modes (at the PHY end of the link) that
1353 * are supported by the platform.
1354 */
1355 if (priv->plat->get_interfaces)
1356 priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1357 config->supported_interfaces);
1358
1359 /* Set the platform/firmware specified interface mode if the
1360 * supported interfaces have not already been provided using
1361 * phy_interface as a last resort.
1362 */
1363 if (phy_interface_empty(config->supported_interfaces))
1364 __set_bit(priv->plat->phy_interface,
1365 config->supported_interfaces);
1366
1367 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1368 if (priv->hw->xpcs)
1369 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1370 else
1371 pcs = priv->hw->phylink_pcs;
1372
1373 if (pcs)
1374 phy_interface_or(config->supported_interfaces,
1375 config->supported_interfaces,
1376 pcs->supported_interfaces);
1377
1378 if (priv->dma_cap.eee) {
1379 /* Assume all supported interfaces also support LPI */
1380 memcpy(config->lpi_interfaces, config->supported_interfaces,
1381 sizeof(config->lpi_interfaces));
1382
1383 /* All full duplex speeds above 100Mbps are supported */
1384 config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1385 config->lpi_timer_default = eee_timer * 1000;
1386 config->eee_enabled_default = true;
1387 }
1388
1389 config->wol_phy_speed_ctrl = true;
1390 if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
1391 config->wol_phy_legacy = true;
1392 } else {
1393 if (priv->dma_cap.pmt_remote_wake_up)
1394 config->wol_mac_support |= WAKE_UCAST;
1395 if (priv->dma_cap.pmt_magic_frame)
1396 config->wol_mac_support |= WAKE_MAGIC;
1397 }
1398
1399 fwnode = priv->plat->port_node;
1400 if (!fwnode)
1401 fwnode = dev_fwnode(priv->device);
1402
1403 phylink = phylink_create(config, fwnode, priv->plat->phy_interface,
1404 &stmmac_phylink_mac_ops);
1405 if (IS_ERR(phylink))
1406 return PTR_ERR(phylink);
1407
1408 priv->phylink = phylink;
1409 return 0;
1410 }
1411
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1412 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1413 struct stmmac_dma_conf *dma_conf)
1414 {
1415 u32 rx_cnt = priv->plat->rx_queues_to_use;
1416 unsigned int desc_size;
1417 void *head_rx;
1418 u32 queue;
1419
1420 /* Display RX rings */
1421 for (queue = 0; queue < rx_cnt; queue++) {
1422 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1423
1424 pr_info("\tRX Queue %u rings\n", queue);
1425
1426 if (priv->extend_desc) {
1427 head_rx = (void *)rx_q->dma_erx;
1428 desc_size = sizeof(struct dma_extended_desc);
1429 } else {
1430 head_rx = (void *)rx_q->dma_rx;
1431 desc_size = sizeof(struct dma_desc);
1432 }
1433
1434 /* Display RX ring */
1435 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1436 rx_q->dma_rx_phy, desc_size);
1437 }
1438 }
1439
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1440 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1441 struct stmmac_dma_conf *dma_conf)
1442 {
1443 u32 tx_cnt = priv->plat->tx_queues_to_use;
1444 unsigned int desc_size;
1445 void *head_tx;
1446 u32 queue;
1447
1448 /* Display TX rings */
1449 for (queue = 0; queue < tx_cnt; queue++) {
1450 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1451
1452 pr_info("\tTX Queue %d rings\n", queue);
1453
1454 if (priv->extend_desc) {
1455 head_tx = (void *)tx_q->dma_etx;
1456 desc_size = sizeof(struct dma_extended_desc);
1457 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
1458 head_tx = (void *)tx_q->dma_entx;
1459 desc_size = sizeof(struct dma_edesc);
1460 } else {
1461 head_tx = (void *)tx_q->dma_tx;
1462 desc_size = sizeof(struct dma_desc);
1463 }
1464
1465 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1466 tx_q->dma_tx_phy, desc_size);
1467 }
1468 }
1469
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1470 static void stmmac_display_rings(struct stmmac_priv *priv,
1471 struct stmmac_dma_conf *dma_conf)
1472 {
1473 /* Display RX ring */
1474 stmmac_display_rx_rings(priv, dma_conf);
1475
1476 /* Display TX ring */
1477 stmmac_display_tx_rings(priv, dma_conf);
1478 }
1479
stmmac_rx_offset(struct stmmac_priv * priv)1480 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1481 {
1482 if (stmmac_xdp_is_enabled(priv))
1483 return XDP_PACKET_HEADROOM;
1484
1485 return NET_SKB_PAD;
1486 }
1487
stmmac_set_bfsize(int mtu)1488 static int stmmac_set_bfsize(int mtu)
1489 {
1490 int ret;
1491
1492 if (mtu >= BUF_SIZE_8KiB)
1493 ret = BUF_SIZE_16KiB;
1494 else if (mtu >= BUF_SIZE_4KiB)
1495 ret = BUF_SIZE_8KiB;
1496 else if (mtu >= BUF_SIZE_2KiB)
1497 ret = BUF_SIZE_4KiB;
1498 else if (mtu > DEFAULT_BUFSIZE)
1499 ret = BUF_SIZE_2KiB;
1500 else
1501 ret = DEFAULT_BUFSIZE;
1502
1503 return ret;
1504 }
1505
1506 /**
1507 * stmmac_clear_rx_descriptors - clear RX descriptors
1508 * @priv: driver private structure
1509 * @dma_conf: structure to take the dma data
1510 * @queue: RX queue index
1511 * Description: this function is called to clear the RX descriptors
1512 * in case of both basic and extended descriptors are used.
1513 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1514 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1515 struct stmmac_dma_conf *dma_conf,
1516 u32 queue)
1517 {
1518 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1519 int i;
1520
1521 /* Clear the RX descriptors */
1522 for (i = 0; i < dma_conf->dma_rx_size; i++)
1523 if (priv->extend_desc)
1524 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic,
1525 priv->use_riwt, priv->mode,
1526 (i == dma_conf->dma_rx_size - 1),
1527 dma_conf->dma_buf_sz);
1528 else
1529 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i],
1530 priv->use_riwt, priv->mode,
1531 (i == dma_conf->dma_rx_size - 1),
1532 dma_conf->dma_buf_sz);
1533 }
1534
1535 /**
1536 * stmmac_clear_tx_descriptors - clear tx descriptors
1537 * @priv: driver private structure
1538 * @dma_conf: structure to take the dma data
1539 * @queue: TX queue index.
1540 * Description: this function is called to clear the TX descriptors
1541 * in case of both basic and extended descriptors are used.
1542 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1543 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1544 struct stmmac_dma_conf *dma_conf,
1545 u32 queue)
1546 {
1547 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1548 int i;
1549
1550 /* Clear the TX descriptors */
1551 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1552 int last = (i == (dma_conf->dma_tx_size - 1));
1553 struct dma_desc *p;
1554
1555 if (priv->extend_desc)
1556 p = &tx_q->dma_etx[i].basic;
1557 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1558 p = &tx_q->dma_entx[i].basic;
1559 else
1560 p = &tx_q->dma_tx[i];
1561
1562 stmmac_init_tx_desc(priv, p, priv->mode, last);
1563 }
1564 }
1565
1566 /**
1567 * stmmac_clear_descriptors - clear descriptors
1568 * @priv: driver private structure
1569 * @dma_conf: structure to take the dma data
1570 * Description: this function is called to clear the TX and RX descriptors
1571 * in case of both basic and extended descriptors are used.
1572 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1573 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1574 struct stmmac_dma_conf *dma_conf)
1575 {
1576 u32 rx_queue_cnt = priv->plat->rx_queues_to_use;
1577 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
1578 u32 queue;
1579
1580 /* Clear the RX descriptors */
1581 for (queue = 0; queue < rx_queue_cnt; queue++)
1582 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1583
1584 /* Clear the TX descriptors */
1585 for (queue = 0; queue < tx_queue_cnt; queue++)
1586 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1587 }
1588
1589 /**
1590 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1591 * @priv: driver private structure
1592 * @dma_conf: structure to take the dma data
1593 * @p: descriptor pointer
1594 * @i: descriptor index
1595 * @flags: gfp flag
1596 * @queue: RX queue index
1597 * Description: this function is called to allocate a receive buffer, perform
1598 * the DMA mapping and init the descriptor.
1599 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1600 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1601 struct stmmac_dma_conf *dma_conf,
1602 struct dma_desc *p,
1603 int i, gfp_t flags, u32 queue)
1604 {
1605 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1606 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1607 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1608
1609 if (priv->dma_cap.host_dma_width <= 32)
1610 gfp |= GFP_DMA32;
1611
1612 if (!buf->page) {
1613 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1614 if (!buf->page)
1615 return -ENOMEM;
1616 buf->page_offset = stmmac_rx_offset(priv);
1617 }
1618
1619 if (priv->sph_active && !buf->sec_page) {
1620 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1621 if (!buf->sec_page)
1622 return -ENOMEM;
1623
1624 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1625 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1626 } else {
1627 buf->sec_page = NULL;
1628 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1629 }
1630
1631 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1632
1633 stmmac_set_desc_addr(priv, p, buf->addr);
1634 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1635 stmmac_init_desc3(priv, p);
1636
1637 return 0;
1638 }
1639
1640 /**
1641 * stmmac_free_rx_buffer - free RX dma buffers
1642 * @priv: private structure
1643 * @rx_q: RX queue
1644 * @i: buffer index.
1645 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1646 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1647 struct stmmac_rx_queue *rx_q,
1648 int i)
1649 {
1650 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1651
1652 if (buf->page)
1653 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1654 buf->page = NULL;
1655
1656 if (buf->sec_page)
1657 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1658 buf->sec_page = NULL;
1659 }
1660
1661 /**
1662 * stmmac_free_tx_buffer - free RX dma buffers
1663 * @priv: private structure
1664 * @dma_conf: structure to take the dma data
1665 * @queue: RX queue index
1666 * @i: buffer index.
1667 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1668 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1669 struct stmmac_dma_conf *dma_conf,
1670 u32 queue, int i)
1671 {
1672 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1673
1674 if (tx_q->tx_skbuff_dma[i].buf &&
1675 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1676 if (tx_q->tx_skbuff_dma[i].map_as_page)
1677 dma_unmap_page(priv->device,
1678 tx_q->tx_skbuff_dma[i].buf,
1679 tx_q->tx_skbuff_dma[i].len,
1680 DMA_TO_DEVICE);
1681 else
1682 dma_unmap_single(priv->device,
1683 tx_q->tx_skbuff_dma[i].buf,
1684 tx_q->tx_skbuff_dma[i].len,
1685 DMA_TO_DEVICE);
1686 }
1687
1688 if (tx_q->xdpf[i] &&
1689 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1690 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1691 xdp_return_frame(tx_q->xdpf[i]);
1692 tx_q->xdpf[i] = NULL;
1693 }
1694
1695 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1696 tx_q->xsk_frames_done++;
1697
1698 if (tx_q->tx_skbuff[i] &&
1699 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1700 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1701 tx_q->tx_skbuff[i] = NULL;
1702 }
1703
1704 tx_q->tx_skbuff_dma[i].buf = 0;
1705 tx_q->tx_skbuff_dma[i].map_as_page = false;
1706 }
1707
1708 /**
1709 * dma_free_rx_skbufs - free RX dma buffers
1710 * @priv: private structure
1711 * @dma_conf: structure to take the dma data
1712 * @queue: RX queue index
1713 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1714 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1715 struct stmmac_dma_conf *dma_conf,
1716 u32 queue)
1717 {
1718 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1719 int i;
1720
1721 for (i = 0; i < dma_conf->dma_rx_size; i++)
1722 stmmac_free_rx_buffer(priv, rx_q, i);
1723 }
1724
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1725 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1726 struct stmmac_dma_conf *dma_conf,
1727 u32 queue, gfp_t flags)
1728 {
1729 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1730 int i;
1731
1732 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1733 struct dma_desc *p;
1734 int ret;
1735
1736 if (priv->extend_desc)
1737 p = &((rx_q->dma_erx + i)->basic);
1738 else
1739 p = rx_q->dma_rx + i;
1740
1741 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1742 queue);
1743 if (ret)
1744 return ret;
1745
1746 rx_q->buf_alloc_num++;
1747 }
1748
1749 return 0;
1750 }
1751
1752 /**
1753 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1754 * @priv: private structure
1755 * @dma_conf: structure to take the dma data
1756 * @queue: RX queue index
1757 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1758 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1759 struct stmmac_dma_conf *dma_conf,
1760 u32 queue)
1761 {
1762 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1763 int i;
1764
1765 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1766 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1767
1768 if (!buf->xdp)
1769 continue;
1770
1771 xsk_buff_free(buf->xdp);
1772 buf->xdp = NULL;
1773 }
1774 }
1775
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1776 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1777 struct stmmac_dma_conf *dma_conf,
1778 u32 queue)
1779 {
1780 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1781 int i;
1782
1783 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1784 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1785 * use this macro to make sure no size violations.
1786 */
1787 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1788
1789 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1790 struct stmmac_rx_buffer *buf;
1791 dma_addr_t dma_addr;
1792 struct dma_desc *p;
1793
1794 if (priv->extend_desc)
1795 p = (struct dma_desc *)(rx_q->dma_erx + i);
1796 else
1797 p = rx_q->dma_rx + i;
1798
1799 buf = &rx_q->buf_pool[i];
1800
1801 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1802 if (!buf->xdp)
1803 return -ENOMEM;
1804
1805 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1806 stmmac_set_desc_addr(priv, p, dma_addr);
1807 rx_q->buf_alloc_num++;
1808 }
1809
1810 return 0;
1811 }
1812
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1813 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1814 {
1815 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1816 return NULL;
1817
1818 return xsk_get_pool_from_qid(priv->dev, queue);
1819 }
1820
1821 /**
1822 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1823 * @priv: driver private structure
1824 * @dma_conf: structure to take the dma data
1825 * @queue: RX queue index
1826 * @flags: gfp flag.
1827 * Description: this function initializes the DMA RX descriptors
1828 * and allocates the socket buffers. It supports the chained and ring
1829 * modes.
1830 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1831 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1832 struct stmmac_dma_conf *dma_conf,
1833 u32 queue, gfp_t flags)
1834 {
1835 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1836 int ret;
1837
1838 netif_dbg(priv, probe, priv->dev,
1839 "(%s) dma_rx_phy=0x%08x\n", __func__,
1840 (u32)rx_q->dma_rx_phy);
1841
1842 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1843
1844 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1845
1846 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1847
1848 if (rx_q->xsk_pool) {
1849 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1850 MEM_TYPE_XSK_BUFF_POOL,
1851 NULL));
1852 netdev_info(priv->dev,
1853 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1854 rx_q->queue_index);
1855 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1856 } else {
1857 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1858 MEM_TYPE_PAGE_POOL,
1859 rx_q->page_pool));
1860 netdev_info(priv->dev,
1861 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1862 rx_q->queue_index);
1863 }
1864
1865 if (rx_q->xsk_pool) {
1866 /* RX XDP ZC buffer pool may not be populated, e.g.
1867 * xdpsock TX-only.
1868 */
1869 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1870 } else {
1871 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1872 if (ret < 0)
1873 return -ENOMEM;
1874 }
1875
1876 /* Setup the chained descriptor addresses */
1877 if (priv->mode == STMMAC_CHAIN_MODE) {
1878 if (priv->extend_desc)
1879 stmmac_mode_init(priv, rx_q->dma_erx,
1880 rx_q->dma_rx_phy,
1881 dma_conf->dma_rx_size, 1);
1882 else
1883 stmmac_mode_init(priv, rx_q->dma_rx,
1884 rx_q->dma_rx_phy,
1885 dma_conf->dma_rx_size, 0);
1886 }
1887
1888 return 0;
1889 }
1890
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1891 static int init_dma_rx_desc_rings(struct net_device *dev,
1892 struct stmmac_dma_conf *dma_conf,
1893 gfp_t flags)
1894 {
1895 struct stmmac_priv *priv = netdev_priv(dev);
1896 u32 rx_count = priv->plat->rx_queues_to_use;
1897 int queue;
1898 int ret;
1899
1900 /* RX INITIALIZATION */
1901 netif_dbg(priv, probe, priv->dev,
1902 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1903
1904 for (queue = 0; queue < rx_count; queue++) {
1905 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1906 if (ret)
1907 goto err_init_rx_buffers;
1908 }
1909
1910 return 0;
1911
1912 err_init_rx_buffers:
1913 while (queue >= 0) {
1914 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1915
1916 if (rx_q->xsk_pool)
1917 dma_free_rx_xskbufs(priv, dma_conf, queue);
1918 else
1919 dma_free_rx_skbufs(priv, dma_conf, queue);
1920
1921 rx_q->buf_alloc_num = 0;
1922 rx_q->xsk_pool = NULL;
1923
1924 queue--;
1925 }
1926
1927 return ret;
1928 }
1929
1930 /**
1931 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1932 * @priv: driver private structure
1933 * @dma_conf: structure to take the dma data
1934 * @queue: TX queue index
1935 * Description: this function initializes the DMA TX descriptors
1936 * and allocates the socket buffers. It supports the chained and ring
1937 * modes.
1938 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1939 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
1940 struct stmmac_dma_conf *dma_conf,
1941 u32 queue)
1942 {
1943 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1944 int i;
1945
1946 netif_dbg(priv, probe, priv->dev,
1947 "(%s) dma_tx_phy=0x%08x\n", __func__,
1948 (u32)tx_q->dma_tx_phy);
1949
1950 /* Setup the chained descriptor addresses */
1951 if (priv->mode == STMMAC_CHAIN_MODE) {
1952 if (priv->extend_desc)
1953 stmmac_mode_init(priv, tx_q->dma_etx,
1954 tx_q->dma_tx_phy,
1955 dma_conf->dma_tx_size, 1);
1956 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
1957 stmmac_mode_init(priv, tx_q->dma_tx,
1958 tx_q->dma_tx_phy,
1959 dma_conf->dma_tx_size, 0);
1960 }
1961
1962 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1963
1964 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1965 struct dma_desc *p;
1966
1967 if (priv->extend_desc)
1968 p = &((tx_q->dma_etx + i)->basic);
1969 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
1970 p = &((tx_q->dma_entx + i)->basic);
1971 else
1972 p = tx_q->dma_tx + i;
1973
1974 stmmac_clear_desc(priv, p);
1975
1976 tx_q->tx_skbuff_dma[i].buf = 0;
1977 tx_q->tx_skbuff_dma[i].map_as_page = false;
1978 tx_q->tx_skbuff_dma[i].len = 0;
1979 tx_q->tx_skbuff_dma[i].last_segment = false;
1980 tx_q->tx_skbuff[i] = NULL;
1981 }
1982
1983 return 0;
1984 }
1985
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)1986 static int init_dma_tx_desc_rings(struct net_device *dev,
1987 struct stmmac_dma_conf *dma_conf)
1988 {
1989 struct stmmac_priv *priv = netdev_priv(dev);
1990 u32 tx_queue_cnt;
1991 u32 queue;
1992
1993 tx_queue_cnt = priv->plat->tx_queues_to_use;
1994
1995 for (queue = 0; queue < tx_queue_cnt; queue++)
1996 __init_dma_tx_desc_rings(priv, dma_conf, queue);
1997
1998 return 0;
1999 }
2000
2001 /**
2002 * init_dma_desc_rings - init the RX/TX descriptor rings
2003 * @dev: net device structure
2004 * @dma_conf: structure to take the dma data
2005 * @flags: gfp flag.
2006 * Description: this function initializes the DMA RX/TX descriptors
2007 * and allocates the socket buffers. It supports the chained and ring
2008 * modes.
2009 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)2010 static int init_dma_desc_rings(struct net_device *dev,
2011 struct stmmac_dma_conf *dma_conf,
2012 gfp_t flags)
2013 {
2014 struct stmmac_priv *priv = netdev_priv(dev);
2015 int ret;
2016
2017 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
2018 if (ret)
2019 return ret;
2020
2021 ret = init_dma_tx_desc_rings(dev, dma_conf);
2022
2023 stmmac_clear_descriptors(priv, dma_conf);
2024
2025 if (netif_msg_hw(priv))
2026 stmmac_display_rings(priv, dma_conf);
2027
2028 return ret;
2029 }
2030
2031 /**
2032 * dma_free_tx_skbufs - free TX dma buffers
2033 * @priv: private structure
2034 * @dma_conf: structure to take the dma data
2035 * @queue: TX queue index
2036 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2037 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
2038 struct stmmac_dma_conf *dma_conf,
2039 u32 queue)
2040 {
2041 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2042 int i;
2043
2044 tx_q->xsk_frames_done = 0;
2045
2046 for (i = 0; i < dma_conf->dma_tx_size; i++)
2047 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
2048
2049 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
2050 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2051 tx_q->xsk_frames_done = 0;
2052 tx_q->xsk_pool = NULL;
2053 }
2054 }
2055
2056 /**
2057 * stmmac_free_tx_skbufs - free TX skb buffers
2058 * @priv: private structure
2059 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)2060 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
2061 {
2062 u32 tx_queue_cnt = priv->plat->tx_queues_to_use;
2063 u32 queue;
2064
2065 for (queue = 0; queue < tx_queue_cnt; queue++)
2066 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
2067 }
2068
2069 /**
2070 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2071 * @priv: private structure
2072 * @dma_conf: structure to take the dma data
2073 * @queue: RX queue index
2074 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2075 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2076 struct stmmac_dma_conf *dma_conf,
2077 u32 queue)
2078 {
2079 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2080
2081 /* Release the DMA RX socket buffers */
2082 if (rx_q->xsk_pool)
2083 dma_free_rx_xskbufs(priv, dma_conf, queue);
2084 else
2085 dma_free_rx_skbufs(priv, dma_conf, queue);
2086
2087 rx_q->buf_alloc_num = 0;
2088 rx_q->xsk_pool = NULL;
2089
2090 /* Free DMA regions of consistent memory previously allocated */
2091 if (!priv->extend_desc)
2092 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2093 sizeof(struct dma_desc),
2094 rx_q->dma_rx, rx_q->dma_rx_phy);
2095 else
2096 dma_free_coherent(priv->device, dma_conf->dma_rx_size *
2097 sizeof(struct dma_extended_desc),
2098 rx_q->dma_erx, rx_q->dma_rx_phy);
2099
2100 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2101 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2102
2103 kfree(rx_q->buf_pool);
2104 if (rx_q->page_pool)
2105 page_pool_destroy(rx_q->page_pool);
2106 }
2107
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2108 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2109 struct stmmac_dma_conf *dma_conf)
2110 {
2111 u32 rx_count = priv->plat->rx_queues_to_use;
2112 u32 queue;
2113
2114 /* Free RX queue resources */
2115 for (queue = 0; queue < rx_count; queue++)
2116 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2117 }
2118
2119 /**
2120 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2121 * @priv: private structure
2122 * @dma_conf: structure to take the dma data
2123 * @queue: TX queue index
2124 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2125 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2126 struct stmmac_dma_conf *dma_conf,
2127 u32 queue)
2128 {
2129 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2130 size_t size;
2131 void *addr;
2132
2133 /* Release the DMA TX socket buffers */
2134 dma_free_tx_skbufs(priv, dma_conf, queue);
2135
2136 if (priv->extend_desc) {
2137 size = sizeof(struct dma_extended_desc);
2138 addr = tx_q->dma_etx;
2139 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2140 size = sizeof(struct dma_edesc);
2141 addr = tx_q->dma_entx;
2142 } else {
2143 size = sizeof(struct dma_desc);
2144 addr = tx_q->dma_tx;
2145 }
2146
2147 size *= dma_conf->dma_tx_size;
2148
2149 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2150
2151 kfree(tx_q->tx_skbuff_dma);
2152 kfree(tx_q->tx_skbuff);
2153 }
2154
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2155 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2156 struct stmmac_dma_conf *dma_conf)
2157 {
2158 u32 tx_count = priv->plat->tx_queues_to_use;
2159 u32 queue;
2160
2161 /* Free TX queue resources */
2162 for (queue = 0; queue < tx_count; queue++)
2163 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2164 }
2165
2166 /**
2167 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2168 * @priv: private structure
2169 * @dma_conf: structure to take the dma data
2170 * @queue: RX queue index
2171 * Description: according to which descriptor can be used (extend or basic)
2172 * this function allocates the resources for TX and RX paths. In case of
2173 * reception, for example, it pre-allocated the RX socket buffer in order to
2174 * allow zero-copy mechanism.
2175 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2176 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2177 struct stmmac_dma_conf *dma_conf,
2178 u32 queue)
2179 {
2180 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2181 struct stmmac_channel *ch = &priv->channel[queue];
2182 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2183 struct page_pool_params pp_params = { 0 };
2184 unsigned int dma_buf_sz_pad, num_pages;
2185 unsigned int napi_id;
2186 int ret;
2187
2188 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2189 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2190 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2191
2192 rx_q->queue_index = queue;
2193 rx_q->priv_data = priv;
2194 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2195
2196 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2197 pp_params.pool_size = dma_conf->dma_rx_size;
2198 pp_params.order = order_base_2(num_pages);
2199 pp_params.nid = dev_to_node(priv->device);
2200 pp_params.dev = priv->device;
2201 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2202 pp_params.offset = stmmac_rx_offset(priv);
2203 pp_params.max_len = dma_conf->dma_buf_sz;
2204
2205 if (priv->sph_active) {
2206 pp_params.offset = 0;
2207 pp_params.max_len += stmmac_rx_offset(priv);
2208 }
2209
2210 rx_q->page_pool = page_pool_create(&pp_params);
2211 if (IS_ERR(rx_q->page_pool)) {
2212 ret = PTR_ERR(rx_q->page_pool);
2213 rx_q->page_pool = NULL;
2214 return ret;
2215 }
2216
2217 rx_q->buf_pool = kzalloc_objs(*rx_q->buf_pool, dma_conf->dma_rx_size);
2218 if (!rx_q->buf_pool)
2219 return -ENOMEM;
2220
2221 if (priv->extend_desc) {
2222 rx_q->dma_erx = dma_alloc_coherent(priv->device,
2223 dma_conf->dma_rx_size *
2224 sizeof(struct dma_extended_desc),
2225 &rx_q->dma_rx_phy,
2226 GFP_KERNEL);
2227 if (!rx_q->dma_erx)
2228 return -ENOMEM;
2229
2230 } else {
2231 rx_q->dma_rx = dma_alloc_coherent(priv->device,
2232 dma_conf->dma_rx_size *
2233 sizeof(struct dma_desc),
2234 &rx_q->dma_rx_phy,
2235 GFP_KERNEL);
2236 if (!rx_q->dma_rx)
2237 return -ENOMEM;
2238 }
2239
2240 if (stmmac_xdp_is_enabled(priv) &&
2241 test_bit(queue, priv->af_xdp_zc_qps))
2242 napi_id = ch->rxtx_napi.napi_id;
2243 else
2244 napi_id = ch->rx_napi.napi_id;
2245
2246 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev,
2247 rx_q->queue_index,
2248 napi_id);
2249 if (ret) {
2250 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2251 return -EINVAL;
2252 }
2253
2254 return 0;
2255 }
2256
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2257 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2258 struct stmmac_dma_conf *dma_conf)
2259 {
2260 u32 rx_count = priv->plat->rx_queues_to_use;
2261 u32 queue;
2262 int ret;
2263
2264 /* RX queues buffers and DMA */
2265 for (queue = 0; queue < rx_count; queue++) {
2266 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2267 if (ret)
2268 goto err_dma;
2269 }
2270
2271 return 0;
2272
2273 err_dma:
2274 free_dma_rx_desc_resources(priv, dma_conf);
2275
2276 return ret;
2277 }
2278
2279 /**
2280 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2281 * @priv: private structure
2282 * @dma_conf: structure to take the dma data
2283 * @queue: TX queue index
2284 * Description: according to which descriptor can be used (extend or basic)
2285 * this function allocates the resources for TX and RX paths. In case of
2286 * reception, for example, it pre-allocated the RX socket buffer in order to
2287 * allow zero-copy mechanism.
2288 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2289 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2290 struct stmmac_dma_conf *dma_conf,
2291 u32 queue)
2292 {
2293 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2294 size_t size;
2295 void *addr;
2296
2297 tx_q->queue_index = queue;
2298 tx_q->priv_data = priv;
2299
2300 tx_q->tx_skbuff_dma = kzalloc_objs(*tx_q->tx_skbuff_dma,
2301 dma_conf->dma_tx_size);
2302 if (!tx_q->tx_skbuff_dma)
2303 return -ENOMEM;
2304
2305 tx_q->tx_skbuff = kzalloc_objs(struct sk_buff *, dma_conf->dma_tx_size);
2306 if (!tx_q->tx_skbuff)
2307 return -ENOMEM;
2308
2309 if (priv->extend_desc)
2310 size = sizeof(struct dma_extended_desc);
2311 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2312 size = sizeof(struct dma_edesc);
2313 else
2314 size = sizeof(struct dma_desc);
2315
2316 size *= dma_conf->dma_tx_size;
2317
2318 addr = dma_alloc_coherent(priv->device, size,
2319 &tx_q->dma_tx_phy, GFP_KERNEL);
2320 if (!addr)
2321 return -ENOMEM;
2322
2323 if (priv->extend_desc)
2324 tx_q->dma_etx = addr;
2325 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2326 tx_q->dma_entx = addr;
2327 else
2328 tx_q->dma_tx = addr;
2329
2330 return 0;
2331 }
2332
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2333 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2334 struct stmmac_dma_conf *dma_conf)
2335 {
2336 u32 tx_count = priv->plat->tx_queues_to_use;
2337 u32 queue;
2338 int ret;
2339
2340 /* TX queues buffers and DMA */
2341 for (queue = 0; queue < tx_count; queue++) {
2342 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2343 if (ret)
2344 goto err_dma;
2345 }
2346
2347 return 0;
2348
2349 err_dma:
2350 free_dma_tx_desc_resources(priv, dma_conf);
2351 return ret;
2352 }
2353
2354 /**
2355 * alloc_dma_desc_resources - alloc TX/RX resources.
2356 * @priv: private structure
2357 * @dma_conf: structure to take the dma data
2358 * Description: according to which descriptor can be used (extend or basic)
2359 * this function allocates the resources for TX and RX paths. In case of
2360 * reception, for example, it pre-allocated the RX socket buffer in order to
2361 * allow zero-copy mechanism.
2362 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2363 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2364 struct stmmac_dma_conf *dma_conf)
2365 {
2366 /* RX Allocation */
2367 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2368
2369 if (ret)
2370 return ret;
2371
2372 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2373
2374 return ret;
2375 }
2376
2377 /**
2378 * free_dma_desc_resources - free dma desc resources
2379 * @priv: private structure
2380 * @dma_conf: structure to take the dma data
2381 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2382 static void free_dma_desc_resources(struct stmmac_priv *priv,
2383 struct stmmac_dma_conf *dma_conf)
2384 {
2385 /* Release the DMA TX socket buffers */
2386 free_dma_tx_desc_resources(priv, dma_conf);
2387
2388 /* Release the DMA RX socket buffers later
2389 * to ensure all pending XDP_TX buffers are returned.
2390 */
2391 free_dma_rx_desc_resources(priv, dma_conf);
2392 }
2393
2394 /**
2395 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2396 * @priv: driver private structure
2397 * Description: It is used for enabling the rx queues in the MAC
2398 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2399 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2400 {
2401 u32 rx_queues_count = priv->plat->rx_queues_to_use;
2402 int queue;
2403 u8 mode;
2404
2405 for (queue = 0; queue < rx_queues_count; queue++) {
2406 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2407 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2408 }
2409 }
2410
2411 /**
2412 * stmmac_start_rx_dma - start RX DMA channel
2413 * @priv: driver private structure
2414 * @chan: RX channel index
2415 * Description:
2416 * This starts a RX DMA channel
2417 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2418 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2419 {
2420 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2421 stmmac_start_rx(priv, priv->ioaddr, chan);
2422 }
2423
2424 /**
2425 * stmmac_start_tx_dma - start TX DMA channel
2426 * @priv: driver private structure
2427 * @chan: TX channel index
2428 * Description:
2429 * This starts a TX DMA channel
2430 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2431 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2432 {
2433 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2434 stmmac_start_tx(priv, priv->ioaddr, chan);
2435 }
2436
2437 /**
2438 * stmmac_stop_rx_dma - stop RX DMA channel
2439 * @priv: driver private structure
2440 * @chan: RX channel index
2441 * Description:
2442 * This stops a RX DMA channel
2443 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2444 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2445 {
2446 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2447 stmmac_stop_rx(priv, priv->ioaddr, chan);
2448 }
2449
2450 /**
2451 * stmmac_stop_tx_dma - stop TX DMA channel
2452 * @priv: driver private structure
2453 * @chan: TX channel index
2454 * Description:
2455 * This stops a TX DMA channel
2456 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2457 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2458 {
2459 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2460 stmmac_stop_tx(priv, priv->ioaddr, chan);
2461 }
2462
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2463 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2464 {
2465 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2466 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2467 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2468 u32 chan;
2469
2470 for (chan = 0; chan < dma_csr_ch; chan++) {
2471 struct stmmac_channel *ch = &priv->channel[chan];
2472 unsigned long flags;
2473
2474 spin_lock_irqsave(&ch->lock, flags);
2475 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2476 spin_unlock_irqrestore(&ch->lock, flags);
2477 }
2478 }
2479
2480 /**
2481 * stmmac_start_all_dma - start all RX and TX DMA channels
2482 * @priv: driver private structure
2483 * Description:
2484 * This starts all the RX and TX DMA channels
2485 */
stmmac_start_all_dma(struct stmmac_priv * priv)2486 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2487 {
2488 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2489 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2490 u32 chan = 0;
2491
2492 for (chan = 0; chan < rx_channels_count; chan++)
2493 stmmac_start_rx_dma(priv, chan);
2494
2495 for (chan = 0; chan < tx_channels_count; chan++)
2496 stmmac_start_tx_dma(priv, chan);
2497 }
2498
2499 /**
2500 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2501 * @priv: driver private structure
2502 * Description:
2503 * This stops the RX and TX DMA channels
2504 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2505 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2506 {
2507 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2508 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2509 u32 chan = 0;
2510
2511 for (chan = 0; chan < rx_channels_count; chan++)
2512 stmmac_stop_rx_dma(priv, chan);
2513
2514 for (chan = 0; chan < tx_channels_count; chan++)
2515 stmmac_stop_tx_dma(priv, chan);
2516 }
2517
2518 /**
2519 * stmmac_dma_operation_mode - HW DMA operation mode
2520 * @priv: driver private structure
2521 * Description: it is used for configuring the DMA operation mode register in
2522 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2523 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2524 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2525 {
2526 u32 rx_channels_count = priv->plat->rx_queues_to_use;
2527 u32 tx_channels_count = priv->plat->tx_queues_to_use;
2528 int rxfifosz = priv->plat->rx_fifo_size;
2529 int txfifosz = priv->plat->tx_fifo_size;
2530 u32 txmode = 0;
2531 u32 rxmode = 0;
2532 u32 chan = 0;
2533 u8 qmode = 0;
2534
2535 if (rxfifosz == 0)
2536 rxfifosz = priv->dma_cap.rx_fifo_size;
2537 if (txfifosz == 0)
2538 txfifosz = priv->dma_cap.tx_fifo_size;
2539
2540 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2541 if (dwmac_is_xmac(priv->plat->core_type)) {
2542 rxfifosz /= rx_channels_count;
2543 txfifosz /= tx_channels_count;
2544 }
2545
2546 if (priv->plat->force_thresh_dma_mode) {
2547 txmode = tc;
2548 rxmode = tc;
2549 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2550 /*
2551 * In case of GMAC, SF mode can be enabled
2552 * to perform the TX COE in HW. This depends on:
2553 * 1) TX COE if actually supported
2554 * 2) There is no bugged Jumbo frame support
2555 * that needs to not insert csum in the TDES.
2556 */
2557 txmode = SF_DMA_MODE;
2558 rxmode = SF_DMA_MODE;
2559 priv->xstats.threshold = SF_DMA_MODE;
2560 } else {
2561 txmode = tc;
2562 rxmode = SF_DMA_MODE;
2563 }
2564
2565 /* configure all channels */
2566 for (chan = 0; chan < rx_channels_count; chan++) {
2567 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2568 u32 buf_size;
2569
2570 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2571
2572 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2573 rxfifosz, qmode);
2574
2575 if (rx_q->xsk_pool) {
2576 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
2577 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2578 buf_size,
2579 chan);
2580 } else {
2581 stmmac_set_dma_bfsize(priv, priv->ioaddr,
2582 priv->dma_conf.dma_buf_sz,
2583 chan);
2584 }
2585 }
2586
2587 for (chan = 0; chan < tx_channels_count; chan++) {
2588 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2589
2590 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2591 txfifosz, qmode);
2592 }
2593 }
2594
stmmac_xsk_request_timestamp(void * _priv)2595 static void stmmac_xsk_request_timestamp(void *_priv)
2596 {
2597 struct stmmac_metadata_request *meta_req = _priv;
2598
2599 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2600 *meta_req->set_ic = true;
2601 }
2602
stmmac_xsk_fill_timestamp(void * _priv)2603 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2604 {
2605 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2606 struct stmmac_priv *priv = tx_compl->priv;
2607 struct dma_desc *desc = tx_compl->desc;
2608 bool found = false;
2609 u64 ns = 0;
2610
2611 if (!priv->hwts_tx_en)
2612 return 0;
2613
2614 /* check tx tstamp status */
2615 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2616 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2617 found = true;
2618 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2619 found = true;
2620 }
2621
2622 if (found) {
2623 ns -= priv->plat->cdc_error_adj;
2624 return ns_to_ktime(ns);
2625 }
2626
2627 return 0;
2628 }
2629
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2630 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2631 {
2632 struct timespec64 ts = ns_to_timespec64(launch_time);
2633 struct stmmac_metadata_request *meta_req = _priv;
2634
2635 if (meta_req->tbs & STMMAC_TBS_EN)
2636 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2637 ts.tv_nsec);
2638 }
2639
2640 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2641 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2642 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2643 .tmo_request_launch_time = stmmac_xsk_request_launch_time,
2644 };
2645
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2646 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2647 {
2648 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2649 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2650 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2651 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2652 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2653 unsigned int entry = tx_q->cur_tx;
2654 struct dma_desc *tx_desc = NULL;
2655 struct xdp_desc xdp_desc;
2656 bool work_done = true;
2657 u32 tx_set_ic_bit = 0;
2658
2659 /* Avoids TX time-out as we are sharing with slow path */
2660 txq_trans_cond_update(nq);
2661
2662 budget = min(budget, stmmac_tx_avail(priv, queue));
2663
2664 for (; budget > 0; budget--) {
2665 struct stmmac_metadata_request meta_req;
2666 struct xsk_tx_metadata *meta = NULL;
2667 dma_addr_t dma_addr;
2668 bool set_ic;
2669
2670 /* We are sharing with slow path and stop XSK TX desc submission when
2671 * available TX ring is less than threshold.
2672 */
2673 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2674 !netif_carrier_ok(priv->dev)) {
2675 work_done = false;
2676 break;
2677 }
2678
2679 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2680 break;
2681
2682 if (priv->est && priv->est->enable &&
2683 priv->est->max_sdu[queue] &&
2684 xdp_desc.len > priv->est->max_sdu[queue]) {
2685 priv->xstats.max_sdu_txq_drop[queue]++;
2686 continue;
2687 }
2688
2689 if (likely(priv->extend_desc))
2690 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
2691 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2692 tx_desc = &tx_q->dma_entx[entry].basic;
2693 else
2694 tx_desc = tx_q->dma_tx + entry;
2695
2696 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2697 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2698 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2699
2700 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX;
2701
2702 /* To return XDP buffer to XSK pool, we simple call
2703 * xsk_tx_completed(), so we don't need to fill up
2704 * 'buf' and 'xdpf'.
2705 */
2706 tx_q->tx_skbuff_dma[entry].buf = 0;
2707 tx_q->xdpf[entry] = NULL;
2708
2709 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2710 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len;
2711 tx_q->tx_skbuff_dma[entry].last_segment = true;
2712 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2713
2714 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2715
2716 tx_q->tx_count_frames++;
2717
2718 if (!priv->tx_coal_frames[queue])
2719 set_ic = false;
2720 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2721 set_ic = true;
2722 else
2723 set_ic = false;
2724
2725 meta_req.priv = priv;
2726 meta_req.tx_desc = tx_desc;
2727 meta_req.set_ic = &set_ic;
2728 meta_req.tbs = tx_q->tbs;
2729 meta_req.edesc = &tx_q->dma_entx[entry];
2730 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2731 &meta_req);
2732 if (set_ic) {
2733 tx_q->tx_count_frames = 0;
2734 stmmac_set_tx_ic(priv, tx_desc);
2735 tx_set_ic_bit++;
2736 }
2737
2738 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2739 csum, priv->mode, true, true,
2740 xdp_desc.len);
2741
2742 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2743
2744 xsk_tx_metadata_to_compl(meta,
2745 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2746
2747 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2748 entry = tx_q->cur_tx;
2749 }
2750 u64_stats_update_begin(&txq_stats->napi_syncp);
2751 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2752 u64_stats_update_end(&txq_stats->napi_syncp);
2753
2754 if (tx_desc) {
2755 stmmac_flush_tx_descriptors(priv, queue);
2756 xsk_tx_release(pool);
2757 }
2758
2759 /* Return true if all of the 3 conditions are met
2760 * a) TX Budget is still available
2761 * b) work_done = true when XSK TX desc peek is empty (no more
2762 * pending XSK TX for transmission)
2763 */
2764 return !!budget && work_done;
2765 }
2766
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2767 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2768 {
2769 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2770 tc += 64;
2771
2772 if (priv->plat->force_thresh_dma_mode)
2773 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2774 else
2775 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2776 chan);
2777
2778 priv->xstats.threshold = tc;
2779 }
2780 }
2781
2782 /**
2783 * stmmac_tx_clean - to manage the transmission completion
2784 * @priv: driver private structure
2785 * @budget: napi budget limiting this functions packet handling
2786 * @queue: TX queue index
2787 * @pending_packets: signal to arm the TX coal timer
2788 * Description: it reclaims the transmit resources after transmission completes.
2789 * If some packets still needs to be handled, due to TX coalesce, set
2790 * pending_packets to true to make NAPI arm the TX coal timer.
2791 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2792 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2793 bool *pending_packets)
2794 {
2795 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2796 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2797 unsigned int bytes_compl = 0, pkts_compl = 0;
2798 unsigned int entry, xmits = 0, count = 0;
2799 u32 tx_packets = 0, tx_errors = 0;
2800
2801 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2802
2803 tx_q->xsk_frames_done = 0;
2804
2805 entry = tx_q->dirty_tx;
2806
2807 /* Try to clean all TX complete frame in 1 shot */
2808 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2809 struct xdp_frame *xdpf;
2810 struct sk_buff *skb;
2811 struct dma_desc *p;
2812 int status;
2813
2814 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2815 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2816 xdpf = tx_q->xdpf[entry];
2817 skb = NULL;
2818 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2819 xdpf = NULL;
2820 skb = tx_q->tx_skbuff[entry];
2821 } else {
2822 xdpf = NULL;
2823 skb = NULL;
2824 }
2825
2826 if (priv->extend_desc)
2827 p = (struct dma_desc *)(tx_q->dma_etx + entry);
2828 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2829 p = &tx_q->dma_entx[entry].basic;
2830 else
2831 p = tx_q->dma_tx + entry;
2832
2833 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2834 /* Check if the descriptor is owned by the DMA */
2835 if (unlikely(status & tx_dma_own))
2836 break;
2837
2838 count++;
2839
2840 /* Make sure descriptor fields are read after reading
2841 * the own bit.
2842 */
2843 dma_rmb();
2844
2845 /* Just consider the last segment and ...*/
2846 if (likely(!(status & tx_not_ls))) {
2847 /* ... verify the status error condition */
2848 if (unlikely(status & tx_err)) {
2849 tx_errors++;
2850 if (unlikely(status & tx_err_bump_tc))
2851 stmmac_bump_dma_threshold(priv, queue);
2852 } else {
2853 tx_packets++;
2854 }
2855 if (skb) {
2856 stmmac_get_tx_hwtstamp(priv, p, skb);
2857 } else if (tx_q->xsk_pool &&
2858 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2859 struct stmmac_xsk_tx_complete tx_compl = {
2860 .priv = priv,
2861 .desc = p,
2862 };
2863
2864 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2865 &stmmac_xsk_tx_metadata_ops,
2866 &tx_compl);
2867 }
2868 }
2869
2870 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2871 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2872 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2873 dma_unmap_page(priv->device,
2874 tx_q->tx_skbuff_dma[entry].buf,
2875 tx_q->tx_skbuff_dma[entry].len,
2876 DMA_TO_DEVICE);
2877 else
2878 dma_unmap_single(priv->device,
2879 tx_q->tx_skbuff_dma[entry].buf,
2880 tx_q->tx_skbuff_dma[entry].len,
2881 DMA_TO_DEVICE);
2882 tx_q->tx_skbuff_dma[entry].buf = 0;
2883 tx_q->tx_skbuff_dma[entry].len = 0;
2884 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2885 }
2886
2887 stmmac_clean_desc3(priv, tx_q, p);
2888
2889 tx_q->tx_skbuff_dma[entry].last_segment = false;
2890 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2891
2892 if (xdpf &&
2893 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2894 xdp_return_frame_rx_napi(xdpf);
2895 tx_q->xdpf[entry] = NULL;
2896 }
2897
2898 if (xdpf &&
2899 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2900 xdp_return_frame(xdpf);
2901 tx_q->xdpf[entry] = NULL;
2902 }
2903
2904 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2905 tx_q->xsk_frames_done++;
2906
2907 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2908 if (likely(skb)) {
2909 pkts_compl++;
2910 bytes_compl += skb->len;
2911 dev_consume_skb_any(skb);
2912 tx_q->tx_skbuff[entry] = NULL;
2913 }
2914 }
2915
2916 stmmac_release_tx_desc(priv, p, priv->mode);
2917
2918 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
2919 }
2920 tx_q->dirty_tx = entry;
2921
2922 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2923 pkts_compl, bytes_compl);
2924
2925 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2926 queue))) &&
2927 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2928
2929 netif_dbg(priv, tx_done, priv->dev,
2930 "%s: restart transmit\n", __func__);
2931 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2932 }
2933
2934 if (tx_q->xsk_pool) {
2935 bool work_done;
2936
2937 if (tx_q->xsk_frames_done)
2938 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2939
2940 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2941 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2942
2943 /* For XSK TX, we try to send as many as possible.
2944 * If XSK work done (XSK TX desc empty and budget still
2945 * available), return "budget - 1" to reenable TX IRQ.
2946 * Else, return "budget" to make NAPI continue polling.
2947 */
2948 work_done = stmmac_xdp_xmit_zc(priv, queue,
2949 STMMAC_XSK_TX_BUDGET_MAX);
2950 if (work_done)
2951 xmits = budget - 1;
2952 else
2953 xmits = budget;
2954 }
2955
2956 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2957 stmmac_restart_sw_lpi_timer(priv);
2958
2959 /* We still have pending packets, let's call for a new scheduling */
2960 if (tx_q->dirty_tx != tx_q->cur_tx)
2961 *pending_packets = true;
2962
2963 u64_stats_update_begin(&txq_stats->napi_syncp);
2964 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2965 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2966 u64_stats_inc(&txq_stats->napi.tx_clean);
2967 u64_stats_update_end(&txq_stats->napi_syncp);
2968
2969 priv->xstats.tx_errors += tx_errors;
2970
2971 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
2972
2973 /* Combine decisions from TX clean and XSK TX */
2974 return max(count, xmits);
2975 }
2976
2977 /**
2978 * stmmac_tx_err - to manage the tx error
2979 * @priv: driver private structure
2980 * @chan: channel index
2981 * Description: it cleans the descriptors and restarts the transmission
2982 * in case of transmission errors.
2983 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)2984 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
2985 {
2986 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
2987
2988 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
2989
2990 stmmac_stop_tx_dma(priv, chan);
2991 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
2992 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
2993 stmmac_reset_tx_queue(priv, chan);
2994 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
2995 tx_q->dma_tx_phy, chan);
2996 stmmac_start_tx_dma(priv, chan);
2997
2998 priv->xstats.tx_errors++;
2999 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
3000 }
3001
3002 /**
3003 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
3004 * @priv: driver private structure
3005 * @txmode: TX operating mode
3006 * @rxmode: RX operating mode
3007 * @chan: channel index
3008 * Description: it is used for configuring of the DMA operation mode in
3009 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
3010 * mode.
3011 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)3012 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
3013 u32 rxmode, u32 chan)
3014 {
3015 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
3016 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
3017 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3018 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3019 int rxfifosz = priv->plat->rx_fifo_size;
3020 int txfifosz = priv->plat->tx_fifo_size;
3021
3022 if (rxfifosz == 0)
3023 rxfifosz = priv->dma_cap.rx_fifo_size;
3024 if (txfifosz == 0)
3025 txfifosz = priv->dma_cap.tx_fifo_size;
3026
3027 /* Adjust for real per queue fifo size */
3028 rxfifosz /= rx_channels_count;
3029 txfifosz /= tx_channels_count;
3030
3031 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
3032 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
3033 }
3034
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)3035 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
3036 {
3037 int ret;
3038
3039 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
3040 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
3041 if (ret && (ret != -EINVAL)) {
3042 stmmac_global_err(priv);
3043 return true;
3044 }
3045
3046 return false;
3047 }
3048
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)3049 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
3050 {
3051 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3052 &priv->xstats, chan, dir);
3053 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
3054 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3055 struct stmmac_channel *ch = &priv->channel[chan];
3056 struct napi_struct *rx_napi;
3057 struct napi_struct *tx_napi;
3058 unsigned long flags;
3059
3060 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
3061 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3062
3063 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3064 if (napi_schedule_prep(rx_napi)) {
3065 spin_lock_irqsave(&ch->lock, flags);
3066 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3067 spin_unlock_irqrestore(&ch->lock, flags);
3068 __napi_schedule(rx_napi);
3069 }
3070 }
3071
3072 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3073 if (napi_schedule_prep(tx_napi)) {
3074 spin_lock_irqsave(&ch->lock, flags);
3075 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3076 spin_unlock_irqrestore(&ch->lock, flags);
3077 __napi_schedule(tx_napi);
3078 }
3079 }
3080
3081 return status;
3082 }
3083
3084 /**
3085 * stmmac_dma_interrupt - DMA ISR
3086 * @priv: driver private structure
3087 * Description: this is the DMA ISR. It is called by the main ISR.
3088 * It calls the dwmac dma routine and schedule poll method in case of some
3089 * work can be done.
3090 */
stmmac_dma_interrupt(struct stmmac_priv * priv)3091 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3092 {
3093 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3094 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3095 u32 channels_to_check = tx_channel_count > rx_channel_count ?
3096 tx_channel_count : rx_channel_count;
3097 u32 chan;
3098 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3099
3100 /* Make sure we never check beyond our status buffer. */
3101 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3102 channels_to_check = ARRAY_SIZE(status);
3103
3104 for (chan = 0; chan < channels_to_check; chan++)
3105 status[chan] = stmmac_napi_check(priv, chan,
3106 DMA_DIR_RXTX);
3107
3108 for (chan = 0; chan < tx_channel_count; chan++) {
3109 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3110 /* Try to bump up the dma threshold on this failure */
3111 stmmac_bump_dma_threshold(priv, chan);
3112 } else if (unlikely(status[chan] == tx_hard_error)) {
3113 stmmac_tx_err(priv, chan);
3114 }
3115 }
3116 }
3117
3118 /**
3119 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3120 * @priv: driver private structure
3121 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3122 */
stmmac_mmc_setup(struct stmmac_priv * priv)3123 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3124 {
3125 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3126 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3127
3128 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3129
3130 if (priv->dma_cap.rmon) {
3131 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3132 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3133 } else
3134 netdev_info(priv->dev, "No MAC Management Counters available\n");
3135 }
3136
3137 /**
3138 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3139 * @priv: driver private structure
3140 * Description:
3141 * new GMAC chip generations have a new register to indicate the
3142 * presence of the optional feature/functions.
3143 * This can be also used to override the value passed through the
3144 * platform and necessary for old MAC10/100 and GMAC chips.
3145 */
stmmac_get_hw_features(struct stmmac_priv * priv)3146 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3147 {
3148 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3149 }
3150
3151 /**
3152 * stmmac_check_ether_addr - check if the MAC addr is valid
3153 * @priv: driver private structure
3154 * Description:
3155 * it is to verify if the MAC address is valid, in case of failures it
3156 * generates a random MAC address
3157 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3158 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3159 {
3160 u8 addr[ETH_ALEN];
3161
3162 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3163 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3164 if (is_valid_ether_addr(addr))
3165 eth_hw_addr_set(priv->dev, addr);
3166 else
3167 eth_hw_addr_random(priv->dev);
3168 dev_info(priv->device, "device MAC address %pM\n",
3169 priv->dev->dev_addr);
3170 }
3171 }
3172
stmmac_get_phy_intf_sel(phy_interface_t interface)3173 int stmmac_get_phy_intf_sel(phy_interface_t interface)
3174 {
3175 int phy_intf_sel = -EINVAL;
3176
3177 if (interface == PHY_INTERFACE_MODE_MII ||
3178 interface == PHY_INTERFACE_MODE_GMII)
3179 phy_intf_sel = PHY_INTF_SEL_GMII_MII;
3180 else if (phy_interface_mode_is_rgmii(interface))
3181 phy_intf_sel = PHY_INTF_SEL_RGMII;
3182 else if (interface == PHY_INTERFACE_MODE_RMII)
3183 phy_intf_sel = PHY_INTF_SEL_RMII;
3184 else if (interface == PHY_INTERFACE_MODE_REVMII)
3185 phy_intf_sel = PHY_INTF_SEL_REVMII;
3186
3187 return phy_intf_sel;
3188 }
3189 EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
3190
stmmac_prereset_configure(struct stmmac_priv * priv)3191 static int stmmac_prereset_configure(struct stmmac_priv *priv)
3192 {
3193 struct plat_stmmacenet_data *plat_dat = priv->plat;
3194 phy_interface_t interface;
3195 struct phylink_pcs *pcs;
3196 int phy_intf_sel, ret;
3197
3198 if (!plat_dat->set_phy_intf_sel)
3199 return 0;
3200
3201 interface = plat_dat->phy_interface;
3202
3203 /* Check whether this mode uses a PCS */
3204 pcs = stmmac_mac_select_pcs(&priv->phylink_config, interface);
3205 if (priv->integrated_pcs && pcs == &priv->integrated_pcs->pcs) {
3206 /* Request the phy_intf_sel from the integrated PCS */
3207 phy_intf_sel = stmmac_integrated_pcs_get_phy_intf_sel(pcs,
3208 interface);
3209 } else {
3210 phy_intf_sel = stmmac_get_phy_intf_sel(interface);
3211 }
3212
3213 if (phy_intf_sel < 0) {
3214 netdev_err(priv->dev,
3215 "failed to get phy_intf_sel for %s: %pe\n",
3216 phy_modes(interface), ERR_PTR(phy_intf_sel));
3217 return phy_intf_sel;
3218 }
3219
3220 ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
3221 if (ret == -EINVAL)
3222 netdev_err(priv->dev, "platform does not support %s\n",
3223 phy_modes(interface));
3224 else if (ret < 0)
3225 netdev_err(priv->dev,
3226 "platform failed to set interface %s: %pe\n",
3227 phy_modes(interface), ERR_PTR(ret));
3228
3229 return ret;
3230 }
3231
3232 /**
3233 * stmmac_init_dma_engine - DMA init.
3234 * @priv: driver private structure
3235 * Description:
3236 * It inits the DMA invoking the specific MAC/GMAC callback.
3237 * Some DMA parameters can be passed from the platform;
3238 * in case of these are not passed a default is kept for the MAC or GMAC.
3239 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3240 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3241 {
3242 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3243 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3244 u32 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3245 struct stmmac_rx_queue *rx_q;
3246 struct stmmac_tx_queue *tx_q;
3247 u32 chan = 0;
3248 int ret = 0;
3249
3250 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) {
3251 netdev_err(priv->dev, "Invalid DMA configuration\n");
3252 return -EINVAL;
3253 }
3254
3255 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE))
3256 priv->plat->dma_cfg->atds = 1;
3257
3258 ret = stmmac_prereset_configure(priv);
3259 if (ret)
3260 return ret;
3261
3262 ret = stmmac_reset(priv);
3263 if (ret) {
3264 netdev_err(priv->dev, "Failed to reset the dma\n");
3265 return ret;
3266 }
3267
3268 /* DMA Configuration */
3269 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3270
3271 if (priv->plat->axi)
3272 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3273
3274 /* DMA CSR Channel configuration */
3275 for (chan = 0; chan < dma_csr_ch; chan++) {
3276 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3277 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3278 }
3279
3280 /* DMA RX Channel Configuration */
3281 for (chan = 0; chan < rx_channels_count; chan++) {
3282 rx_q = &priv->dma_conf.rx_queue[chan];
3283
3284 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3285 rx_q->dma_rx_phy, chan);
3286
3287 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
3288 (rx_q->buf_alloc_num *
3289 sizeof(struct dma_desc));
3290 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
3291 rx_q->rx_tail_addr, chan);
3292 }
3293
3294 /* DMA TX Channel Configuration */
3295 for (chan = 0; chan < tx_channels_count; chan++) {
3296 tx_q = &priv->dma_conf.tx_queue[chan];
3297
3298 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3299 tx_q->dma_tx_phy, chan);
3300
3301 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
3302 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
3303 tx_q->tx_tail_addr, chan);
3304 }
3305
3306 return ret;
3307 }
3308
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3309 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3310 {
3311 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3312 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3313 struct stmmac_channel *ch;
3314 struct napi_struct *napi;
3315
3316 if (!tx_coal_timer)
3317 return;
3318
3319 ch = &priv->channel[tx_q->queue_index];
3320 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3321
3322 /* Arm timer only if napi is not already scheduled.
3323 * Try to cancel any timer if napi is scheduled, timer will be armed
3324 * again in the next scheduled napi.
3325 */
3326 if (unlikely(!napi_is_scheduled(napi)))
3327 hrtimer_start(&tx_q->txtimer,
3328 STMMAC_COAL_TIMER(tx_coal_timer),
3329 HRTIMER_MODE_REL);
3330 else
3331 hrtimer_try_to_cancel(&tx_q->txtimer);
3332 }
3333
3334 /**
3335 * stmmac_tx_timer - mitigation sw timer for tx.
3336 * @t: data pointer
3337 * Description:
3338 * This is the timer handler to directly invoke the stmmac_tx_clean.
3339 */
stmmac_tx_timer(struct hrtimer * t)3340 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3341 {
3342 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3343 struct stmmac_priv *priv = tx_q->priv_data;
3344 struct stmmac_channel *ch;
3345 struct napi_struct *napi;
3346
3347 ch = &priv->channel[tx_q->queue_index];
3348 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3349
3350 if (likely(napi_schedule_prep(napi))) {
3351 unsigned long flags;
3352
3353 spin_lock_irqsave(&ch->lock, flags);
3354 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3355 spin_unlock_irqrestore(&ch->lock, flags);
3356 __napi_schedule(napi);
3357 }
3358
3359 return HRTIMER_NORESTART;
3360 }
3361
3362 /**
3363 * stmmac_init_coalesce - init mitigation options.
3364 * @priv: driver private structure
3365 * Description:
3366 * This inits the coalesce parameters: i.e. timer rate,
3367 * timer handler and default threshold used for enabling the
3368 * interrupt on completion bit.
3369 */
stmmac_init_coalesce(struct stmmac_priv * priv)3370 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3371 {
3372 u32 tx_channel_count = priv->plat->tx_queues_to_use;
3373 u32 rx_channel_count = priv->plat->rx_queues_to_use;
3374 u32 chan;
3375
3376 for (chan = 0; chan < tx_channel_count; chan++) {
3377 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3378
3379 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3380 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3381
3382 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3383 }
3384
3385 for (chan = 0; chan < rx_channel_count; chan++)
3386 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3387 }
3388
stmmac_set_rings_length(struct stmmac_priv * priv)3389 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3390 {
3391 u32 rx_channels_count = priv->plat->rx_queues_to_use;
3392 u32 tx_channels_count = priv->plat->tx_queues_to_use;
3393 u32 chan;
3394
3395 /* set TX ring length */
3396 for (chan = 0; chan < tx_channels_count; chan++)
3397 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3398 (priv->dma_conf.dma_tx_size - 1), chan);
3399
3400 /* set RX ring length */
3401 for (chan = 0; chan < rx_channels_count; chan++)
3402 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3403 (priv->dma_conf.dma_rx_size - 1), chan);
3404 }
3405
3406 /**
3407 * stmmac_set_tx_queue_weight - Set TX queue weight
3408 * @priv: driver private structure
3409 * Description: It is used for setting TX queues weight
3410 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3411 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3412 {
3413 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3414 u32 weight;
3415 u32 queue;
3416
3417 for (queue = 0; queue < tx_queues_count; queue++) {
3418 weight = priv->plat->tx_queues_cfg[queue].weight;
3419 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3420 }
3421 }
3422
3423 /**
3424 * stmmac_configure_cbs - Configure CBS in TX queue
3425 * @priv: driver private structure
3426 * Description: It is used for configuring CBS in AVB TX queues
3427 */
stmmac_configure_cbs(struct stmmac_priv * priv)3428 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3429 {
3430 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3431 u32 mode_to_use;
3432 u32 queue;
3433
3434 /* queue 0 is reserved for legacy traffic */
3435 for (queue = 1; queue < tx_queues_count; queue++) {
3436 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3437 if (mode_to_use == MTL_QUEUE_DCB)
3438 continue;
3439
3440 stmmac_config_cbs(priv, priv->hw,
3441 priv->plat->tx_queues_cfg[queue].send_slope,
3442 priv->plat->tx_queues_cfg[queue].idle_slope,
3443 priv->plat->tx_queues_cfg[queue].high_credit,
3444 priv->plat->tx_queues_cfg[queue].low_credit,
3445 queue);
3446 }
3447 }
3448
3449 /**
3450 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3451 * @priv: driver private structure
3452 * Description: It is used for mapping RX queues to RX dma channels
3453 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3454 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3455 {
3456 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3457 u32 queue;
3458 u32 chan;
3459
3460 for (queue = 0; queue < rx_queues_count; queue++) {
3461 chan = priv->plat->rx_queues_cfg[queue].chan;
3462 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3463 }
3464 }
3465
3466 /**
3467 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3468 * @priv: driver private structure
3469 * Description: It is used for configuring the RX Queue Priority
3470 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3471 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3472 {
3473 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3474 u32 queue;
3475 u32 prio;
3476
3477 for (queue = 0; queue < rx_queues_count; queue++) {
3478 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3479 continue;
3480
3481 prio = priv->plat->rx_queues_cfg[queue].prio;
3482 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3483 }
3484 }
3485
3486 /**
3487 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3488 * @priv: driver private structure
3489 * Description: It is used for configuring the TX Queue Priority
3490 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3491 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3492 {
3493 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3494 u32 queue;
3495 u32 prio;
3496
3497 for (queue = 0; queue < tx_queues_count; queue++) {
3498 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3499 continue;
3500
3501 prio = priv->plat->tx_queues_cfg[queue].prio;
3502 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3503 }
3504 }
3505
3506 /**
3507 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3508 * @priv: driver private structure
3509 * Description: It is used for configuring the RX queue routing
3510 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3511 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3512 {
3513 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3514 u32 queue;
3515 u8 packet;
3516
3517 for (queue = 0; queue < rx_queues_count; queue++) {
3518 /* no specific packet type routing specified for the queue */
3519 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3520 continue;
3521
3522 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3523 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3524 }
3525 }
3526
stmmac_mac_config_rss(struct stmmac_priv * priv)3527 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3528 {
3529 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3530 priv->rss.enable = false;
3531 return;
3532 }
3533
3534 if (priv->dev->features & NETIF_F_RXHASH)
3535 priv->rss.enable = true;
3536 else
3537 priv->rss.enable = false;
3538
3539 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3540 priv->plat->rx_queues_to_use);
3541 }
3542
3543 /**
3544 * stmmac_mtl_configuration - Configure MTL
3545 * @priv: driver private structure
3546 * Description: It is used for configuring MTL
3547 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3548 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3549 {
3550 u32 rx_queues_count = priv->plat->rx_queues_to_use;
3551 u32 tx_queues_count = priv->plat->tx_queues_to_use;
3552
3553 if (tx_queues_count > 1)
3554 stmmac_set_tx_queue_weight(priv);
3555
3556 /* Configure MTL RX algorithms */
3557 if (rx_queues_count > 1)
3558 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3559 priv->plat->rx_sched_algorithm);
3560
3561 /* Configure MTL TX algorithms */
3562 if (tx_queues_count > 1)
3563 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3564 priv->plat->tx_sched_algorithm);
3565
3566 /* Configure CBS in AVB TX queues */
3567 if (tx_queues_count > 1)
3568 stmmac_configure_cbs(priv);
3569
3570 /* Map RX MTL to DMA channels */
3571 stmmac_rx_queue_dma_chan_map(priv);
3572
3573 /* Enable MAC RX Queues */
3574 stmmac_mac_enable_rx_queues(priv);
3575
3576 /* Set RX priorities */
3577 if (rx_queues_count > 1)
3578 stmmac_mac_config_rx_queues_prio(priv);
3579
3580 /* Set TX priorities */
3581 if (tx_queues_count > 1)
3582 stmmac_mac_config_tx_queues_prio(priv);
3583
3584 /* Set RX routing */
3585 if (rx_queues_count > 1)
3586 stmmac_mac_config_rx_queues_routing(priv);
3587
3588 /* Receive Side Scaling */
3589 if (rx_queues_count > 1)
3590 stmmac_mac_config_rss(priv);
3591 }
3592
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3593 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3594 {
3595 if (priv->dma_cap.asp) {
3596 netdev_info(priv->dev, "Enabling Safety Features\n");
3597 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3598 priv->plat->safety_feat_cfg);
3599 } else {
3600 netdev_info(priv->dev, "No Safety Features support found\n");
3601 }
3602 }
3603
3604 /**
3605 * stmmac_hw_setup - setup mac in a usable state.
3606 * @dev : pointer to the device structure.
3607 * Description:
3608 * this is the main function to setup the HW in a usable state because the
3609 * dma engine is reset, the core registers are configured (e.g. AXI,
3610 * Checksum features, timers). The DMA is ready to start receiving and
3611 * transmitting.
3612 * Return value:
3613 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3614 * file on failure.
3615 */
stmmac_hw_setup(struct net_device * dev)3616 static int stmmac_hw_setup(struct net_device *dev)
3617 {
3618 struct stmmac_priv *priv = netdev_priv(dev);
3619 u32 rx_cnt = priv->plat->rx_queues_to_use;
3620 u32 tx_cnt = priv->plat->tx_queues_to_use;
3621 bool sph_en;
3622 u32 chan;
3623 int ret;
3624
3625 /* Make sure RX clock is enabled */
3626 if (priv->hw->phylink_pcs)
3627 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3628
3629 /* Note that clk_rx_i must be running for reset to complete. This
3630 * clock may also be required when setting the MAC address.
3631 *
3632 * Block the receive clock stop for LPI mode at the PHY in case
3633 * the link is established with EEE mode active.
3634 */
3635 phylink_rx_clk_stop_block(priv->phylink);
3636
3637 /* DMA initialization and SW reset */
3638 ret = stmmac_init_dma_engine(priv);
3639 if (ret < 0) {
3640 phylink_rx_clk_stop_unblock(priv->phylink);
3641 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3642 __func__);
3643 return ret;
3644 }
3645
3646 /* Copy the MAC addr into the HW */
3647 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3648 phylink_rx_clk_stop_unblock(priv->phylink);
3649
3650 /* Initialize the MAC Core */
3651 stmmac_core_init(priv, priv->hw, dev);
3652
3653 /* Initialize MTL*/
3654 stmmac_mtl_configuration(priv);
3655
3656 /* Initialize Safety Features */
3657 stmmac_safety_feat_configuration(priv);
3658
3659 ret = stmmac_rx_ipc(priv, priv->hw);
3660 if (!ret) {
3661 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3662 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3663 priv->hw->rx_csum = 0;
3664 }
3665
3666 /* Enable the MAC Rx/Tx */
3667 stmmac_mac_set(priv, priv->ioaddr, true);
3668
3669 /* Set the HW DMA mode and the COE */
3670 stmmac_dma_operation_mode(priv);
3671
3672 stmmac_mmc_setup(priv);
3673
3674 if (priv->use_riwt) {
3675 u32 queue;
3676
3677 for (queue = 0; queue < rx_cnt; queue++) {
3678 if (!priv->rx_riwt[queue])
3679 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3680
3681 stmmac_rx_watchdog(priv, priv->ioaddr,
3682 priv->rx_riwt[queue], queue);
3683 }
3684 }
3685
3686 /* set TX and RX rings length */
3687 stmmac_set_rings_length(priv);
3688
3689 /* Enable TSO */
3690 if (priv->tso) {
3691 for (chan = 0; chan < tx_cnt; chan++) {
3692 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3693
3694 /* TSO and TBS cannot co-exist */
3695 if (tx_q->tbs & STMMAC_TBS_AVAIL)
3696 continue;
3697
3698 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3699 }
3700 }
3701
3702 /* Enable Split Header */
3703 sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
3704 for (chan = 0; chan < rx_cnt; chan++)
3705 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3706
3707
3708 /* VLAN Tag Insertion */
3709 if (priv->dma_cap.vlins)
3710 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3711
3712 /* TBS */
3713 for (chan = 0; chan < tx_cnt; chan++) {
3714 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3715 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3716
3717 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3718 }
3719
3720 /* Configure real RX and TX queues */
3721 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3722 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3723
3724 /* Start the ball rolling... */
3725 stmmac_start_all_dma(priv);
3726
3727 phylink_rx_clk_stop_block(priv->phylink);
3728 stmmac_set_hw_vlan_mode(priv, priv->hw);
3729 phylink_rx_clk_stop_unblock(priv->phylink);
3730
3731 return 0;
3732 }
3733
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3734 static void stmmac_free_irq(struct net_device *dev,
3735 enum request_irq_err irq_err, int irq_idx)
3736 {
3737 struct stmmac_priv *priv = netdev_priv(dev);
3738 int j;
3739
3740 switch (irq_err) {
3741 case REQ_IRQ_ERR_ALL:
3742 irq_idx = priv->plat->tx_queues_to_use;
3743 fallthrough;
3744 case REQ_IRQ_ERR_TX:
3745 for (j = irq_idx - 1; j >= 0; j--) {
3746 if (priv->tx_irq[j] > 0) {
3747 irq_set_affinity_hint(priv->tx_irq[j], NULL);
3748 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]);
3749 }
3750 }
3751 irq_idx = priv->plat->rx_queues_to_use;
3752 fallthrough;
3753 case REQ_IRQ_ERR_RX:
3754 for (j = irq_idx - 1; j >= 0; j--) {
3755 if (priv->rx_irq[j] > 0) {
3756 irq_set_affinity_hint(priv->rx_irq[j], NULL);
3757 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]);
3758 }
3759 }
3760
3761 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq)
3762 free_irq(priv->sfty_ue_irq, dev);
3763 fallthrough;
3764 case REQ_IRQ_ERR_SFTY_UE:
3765 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq)
3766 free_irq(priv->sfty_ce_irq, dev);
3767 fallthrough;
3768 case REQ_IRQ_ERR_SFTY_CE:
3769 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3770 free_irq(priv->wol_irq, dev);
3771 fallthrough;
3772 case REQ_IRQ_ERR_SFTY:
3773 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3774 free_irq(priv->sfty_irq, dev);
3775 fallthrough;
3776 case REQ_IRQ_ERR_WOL:
3777 free_irq(dev->irq, dev);
3778 fallthrough;
3779 case REQ_IRQ_ERR_MAC:
3780 case REQ_IRQ_ERR_NO:
3781 /* If MAC IRQ request error, no more IRQ to free */
3782 break;
3783 }
3784 }
3785
stmmac_request_irq_multi_msi(struct net_device * dev)3786 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3787 {
3788 struct stmmac_priv *priv = netdev_priv(dev);
3789 enum request_irq_err irq_err;
3790 int irq_idx = 0;
3791 char *int_name;
3792 int ret;
3793 int i;
3794
3795 /* For common interrupt */
3796 int_name = priv->int_name_mac;
3797 sprintf(int_name, "%s:%s", dev->name, "mac");
3798 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3799 0, int_name, dev);
3800 if (unlikely(ret < 0)) {
3801 netdev_err(priv->dev,
3802 "%s: alloc mac MSI %d (error: %d)\n",
3803 __func__, dev->irq, ret);
3804 irq_err = REQ_IRQ_ERR_MAC;
3805 goto irq_error;
3806 }
3807
3808 /* Request the Wake IRQ in case of another line
3809 * is used for WoL
3810 */
3811 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3812 int_name = priv->int_name_wol;
3813 sprintf(int_name, "%s:%s", dev->name, "wol");
3814 ret = request_irq(priv->wol_irq,
3815 stmmac_mac_interrupt,
3816 0, int_name, dev);
3817 if (unlikely(ret < 0)) {
3818 netdev_err(priv->dev,
3819 "%s: alloc wol MSI %d (error: %d)\n",
3820 __func__, priv->wol_irq, ret);
3821 irq_err = REQ_IRQ_ERR_WOL;
3822 goto irq_error;
3823 }
3824 }
3825
3826 /* Request the common Safety Feature Correctible/Uncorrectible
3827 * Error line in case of another line is used
3828 */
3829 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3830 int_name = priv->int_name_sfty;
3831 sprintf(int_name, "%s:%s", dev->name, "safety");
3832 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3833 0, int_name, dev);
3834 if (unlikely(ret < 0)) {
3835 netdev_err(priv->dev,
3836 "%s: alloc sfty MSI %d (error: %d)\n",
3837 __func__, priv->sfty_irq, ret);
3838 irq_err = REQ_IRQ_ERR_SFTY;
3839 goto irq_error;
3840 }
3841 }
3842
3843 /* Request the Safety Feature Correctible Error line in
3844 * case of another line is used
3845 */
3846 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) {
3847 int_name = priv->int_name_sfty_ce;
3848 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3849 ret = request_irq(priv->sfty_ce_irq,
3850 stmmac_safety_interrupt,
3851 0, int_name, dev);
3852 if (unlikely(ret < 0)) {
3853 netdev_err(priv->dev,
3854 "%s: alloc sfty ce MSI %d (error: %d)\n",
3855 __func__, priv->sfty_ce_irq, ret);
3856 irq_err = REQ_IRQ_ERR_SFTY_CE;
3857 goto irq_error;
3858 }
3859 }
3860
3861 /* Request the Safety Feature Uncorrectible Error line in
3862 * case of another line is used
3863 */
3864 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) {
3865 int_name = priv->int_name_sfty_ue;
3866 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3867 ret = request_irq(priv->sfty_ue_irq,
3868 stmmac_safety_interrupt,
3869 0, int_name, dev);
3870 if (unlikely(ret < 0)) {
3871 netdev_err(priv->dev,
3872 "%s: alloc sfty ue MSI %d (error: %d)\n",
3873 __func__, priv->sfty_ue_irq, ret);
3874 irq_err = REQ_IRQ_ERR_SFTY_UE;
3875 goto irq_error;
3876 }
3877 }
3878
3879 /* Request Rx MSI irq */
3880 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3881 if (i >= MTL_MAX_RX_QUEUES)
3882 break;
3883 if (priv->rx_irq[i] == 0)
3884 continue;
3885
3886 int_name = priv->int_name_rx_irq[i];
3887 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3888 ret = request_irq(priv->rx_irq[i],
3889 stmmac_msi_intr_rx,
3890 0, int_name, &priv->dma_conf.rx_queue[i]);
3891 if (unlikely(ret < 0)) {
3892 netdev_err(priv->dev,
3893 "%s: alloc rx-%d MSI %d (error: %d)\n",
3894 __func__, i, priv->rx_irq[i], ret);
3895 irq_err = REQ_IRQ_ERR_RX;
3896 irq_idx = i;
3897 goto irq_error;
3898 }
3899 irq_set_affinity_hint(priv->rx_irq[i],
3900 cpumask_of(i % num_online_cpus()));
3901 }
3902
3903 /* Request Tx MSI irq */
3904 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3905 if (i >= MTL_MAX_TX_QUEUES)
3906 break;
3907 if (priv->tx_irq[i] == 0)
3908 continue;
3909
3910 int_name = priv->int_name_tx_irq[i];
3911 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3912 ret = request_irq(priv->tx_irq[i],
3913 stmmac_msi_intr_tx,
3914 0, int_name, &priv->dma_conf.tx_queue[i]);
3915 if (unlikely(ret < 0)) {
3916 netdev_err(priv->dev,
3917 "%s: alloc tx-%d MSI %d (error: %d)\n",
3918 __func__, i, priv->tx_irq[i], ret);
3919 irq_err = REQ_IRQ_ERR_TX;
3920 irq_idx = i;
3921 goto irq_error;
3922 }
3923 irq_set_affinity_hint(priv->tx_irq[i],
3924 cpumask_of(i % num_online_cpus()));
3925 }
3926
3927 return 0;
3928
3929 irq_error:
3930 stmmac_free_irq(dev, irq_err, irq_idx);
3931 return ret;
3932 }
3933
stmmac_request_irq_single(struct net_device * dev)3934 static int stmmac_request_irq_single(struct net_device *dev)
3935 {
3936 struct stmmac_priv *priv = netdev_priv(dev);
3937 enum request_irq_err irq_err;
3938 int ret;
3939
3940 ret = request_irq(dev->irq, stmmac_interrupt,
3941 IRQF_SHARED, dev->name, dev);
3942 if (unlikely(ret < 0)) {
3943 netdev_err(priv->dev,
3944 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3945 __func__, dev->irq, ret);
3946 irq_err = REQ_IRQ_ERR_MAC;
3947 goto irq_error;
3948 }
3949
3950 /* Request the Wake IRQ in case of another line
3951 * is used for WoL
3952 */
3953 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3954 ret = request_irq(priv->wol_irq, stmmac_interrupt,
3955 IRQF_SHARED, dev->name, dev);
3956 if (unlikely(ret < 0)) {
3957 netdev_err(priv->dev,
3958 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
3959 __func__, priv->wol_irq, ret);
3960 irq_err = REQ_IRQ_ERR_WOL;
3961 goto irq_error;
3962 }
3963 }
3964
3965 /* Request the common Safety Feature Correctible/Uncorrectible
3966 * Error line in case of another line is used
3967 */
3968 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3969 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3970 IRQF_SHARED, dev->name, dev);
3971 if (unlikely(ret < 0)) {
3972 netdev_err(priv->dev,
3973 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
3974 __func__, priv->sfty_irq, ret);
3975 irq_err = REQ_IRQ_ERR_SFTY;
3976 goto irq_error;
3977 }
3978 }
3979
3980 return 0;
3981
3982 irq_error:
3983 stmmac_free_irq(dev, irq_err, 0);
3984 return ret;
3985 }
3986
stmmac_request_irq(struct net_device * dev)3987 static int stmmac_request_irq(struct net_device *dev)
3988 {
3989 struct stmmac_priv *priv = netdev_priv(dev);
3990 int ret;
3991
3992 /* Request the IRQ lines */
3993 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
3994 ret = stmmac_request_irq_multi_msi(dev);
3995 else
3996 ret = stmmac_request_irq_single(dev);
3997
3998 return ret;
3999 }
4000
4001 /**
4002 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
4003 * @priv: driver private structure
4004 * @mtu: MTU to setup the dma queue and buf with
4005 * Description: Allocate and generate a dma_conf based on the provided MTU.
4006 * Allocate the Tx/Rx DMA queue and init them.
4007 * Return value:
4008 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
4009 */
4010 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)4011 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
4012 {
4013 struct stmmac_dma_conf *dma_conf;
4014 int chan, bfsize, ret;
4015
4016 dma_conf = kzalloc_obj(*dma_conf);
4017 if (!dma_conf) {
4018 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
4019 __func__);
4020 return ERR_PTR(-ENOMEM);
4021 }
4022
4023 /* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
4024 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
4025 if (bfsize < 0)
4026 bfsize = 0;
4027
4028 if (bfsize < BUF_SIZE_16KiB)
4029 bfsize = stmmac_set_bfsize(mtu);
4030
4031 dma_conf->dma_buf_sz = bfsize;
4032 /* Chose the tx/rx size from the already defined one in the
4033 * priv struct. (if defined)
4034 */
4035 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
4036 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
4037
4038 if (!dma_conf->dma_tx_size)
4039 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
4040 if (!dma_conf->dma_rx_size)
4041 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
4042
4043 /* Earlier check for TBS */
4044 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
4045 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
4046 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
4047
4048 /* Setup per-TXQ tbs flag before TX descriptor alloc */
4049 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
4050 }
4051
4052 ret = alloc_dma_desc_resources(priv, dma_conf);
4053 if (ret < 0) {
4054 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4055 __func__);
4056 goto alloc_error;
4057 }
4058
4059 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4060 if (ret < 0) {
4061 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4062 __func__);
4063 goto init_error;
4064 }
4065
4066 return dma_conf;
4067
4068 init_error:
4069 free_dma_desc_resources(priv, dma_conf);
4070 alloc_error:
4071 kfree(dma_conf);
4072 return ERR_PTR(ret);
4073 }
4074
4075 /**
4076 * __stmmac_open - open entry point of the driver
4077 * @dev : pointer to the device structure.
4078 * @dma_conf : structure to take the dma data
4079 * Description:
4080 * This function is the open entry point of the driver.
4081 * Return value:
4082 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4083 * file on failure.
4084 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)4085 static int __stmmac_open(struct net_device *dev,
4086 struct stmmac_dma_conf *dma_conf)
4087 {
4088 struct stmmac_priv *priv = netdev_priv(dev);
4089 u32 chan;
4090 int ret;
4091
4092 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4093 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4094 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4095 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4096
4097 stmmac_reset_queues_param(priv);
4098
4099 ret = stmmac_hw_setup(dev);
4100 if (ret < 0) {
4101 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4102 goto init_error;
4103 }
4104
4105 stmmac_setup_ptp(priv);
4106
4107 stmmac_init_coalesce(priv);
4108
4109 phylink_start(priv->phylink);
4110
4111 stmmac_vlan_restore(priv);
4112
4113 ret = stmmac_request_irq(dev);
4114 if (ret)
4115 goto irq_error;
4116
4117 stmmac_enable_all_queues(priv);
4118 netif_tx_start_all_queues(priv->dev);
4119 stmmac_enable_all_dma_irq(priv);
4120
4121 return 0;
4122
4123 irq_error:
4124 phylink_stop(priv->phylink);
4125
4126 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4127 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4128
4129 stmmac_release_ptp(priv);
4130 init_error:
4131 return ret;
4132 }
4133
stmmac_open(struct net_device * dev)4134 static int stmmac_open(struct net_device *dev)
4135 {
4136 struct stmmac_priv *priv = netdev_priv(dev);
4137 struct stmmac_dma_conf *dma_conf;
4138 int ret;
4139
4140 /* Initialise the tx lpi timer, converting from msec to usec */
4141 if (!priv->tx_lpi_timer)
4142 priv->tx_lpi_timer = eee_timer * 1000;
4143
4144 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4145 if (IS_ERR(dma_conf))
4146 return PTR_ERR(dma_conf);
4147
4148 ret = pm_runtime_resume_and_get(priv->device);
4149 if (ret < 0)
4150 goto err_dma_resources;
4151
4152 ret = stmmac_init_phy(dev);
4153 if (ret)
4154 goto err_runtime_pm;
4155
4156 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
4157 ret = stmmac_legacy_serdes_power_up(priv);
4158 if (ret < 0)
4159 goto err_disconnect_phy;
4160 }
4161
4162 ret = __stmmac_open(dev, dma_conf);
4163 if (ret)
4164 goto err_serdes;
4165
4166 kfree(dma_conf);
4167
4168 /* We may have called phylink_speed_down before */
4169 phylink_speed_up(priv->phylink);
4170
4171 return ret;
4172
4173 err_serdes:
4174 stmmac_legacy_serdes_power_down(priv);
4175 err_disconnect_phy:
4176 phylink_disconnect_phy(priv->phylink);
4177 err_runtime_pm:
4178 pm_runtime_put(priv->device);
4179 err_dma_resources:
4180 free_dma_desc_resources(priv, dma_conf);
4181 kfree(dma_conf);
4182 return ret;
4183 }
4184
__stmmac_release(struct net_device * dev)4185 static void __stmmac_release(struct net_device *dev)
4186 {
4187 struct stmmac_priv *priv = netdev_priv(dev);
4188 u32 chan;
4189
4190 /* Stop and disconnect the PHY */
4191 phylink_stop(priv->phylink);
4192
4193 stmmac_disable_all_queues(priv);
4194
4195 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4196 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4197
4198 netif_tx_disable(dev);
4199
4200 /* Free the IRQ lines */
4201 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4202
4203 /* Stop TX/RX DMA and clear the descriptors */
4204 stmmac_stop_all_dma(priv);
4205
4206 /* Release and free the Rx/Tx resources */
4207 free_dma_desc_resources(priv, &priv->dma_conf);
4208
4209 stmmac_release_ptp(priv);
4210
4211 if (stmmac_fpe_supported(priv))
4212 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4213 }
4214
4215 /**
4216 * stmmac_release - close entry point of the driver
4217 * @dev : device pointer.
4218 * Description:
4219 * This is the stop entry point of the driver.
4220 */
stmmac_release(struct net_device * dev)4221 static int stmmac_release(struct net_device *dev)
4222 {
4223 struct stmmac_priv *priv = netdev_priv(dev);
4224
4225 /* If the PHY or MAC has WoL enabled, then the PHY will not be
4226 * suspended when phylink_stop() is called below. Set the PHY
4227 * to its slowest speed to save power.
4228 */
4229 if (device_may_wakeup(priv->device))
4230 phylink_speed_down(priv->phylink, false);
4231
4232 __stmmac_release(dev);
4233
4234 stmmac_legacy_serdes_power_down(priv);
4235 phylink_disconnect_phy(priv->phylink);
4236 pm_runtime_put(priv->device);
4237
4238 return 0;
4239 }
4240
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4241 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4242 struct stmmac_tx_queue *tx_q)
4243 {
4244 struct dma_desc *p;
4245 u16 tag = 0x0;
4246
4247 if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
4248 return false;
4249
4250 tag = skb_vlan_tag_get(skb);
4251
4252 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4253 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4254 else
4255 p = &tx_q->dma_tx[tx_q->cur_tx];
4256
4257 if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
4258 return false;
4259
4260 stmmac_set_tx_owner(priv, p);
4261 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4262 return true;
4263 }
4264
4265 /**
4266 * stmmac_tso_allocator - close entry point of the driver
4267 * @priv: driver private structure
4268 * @des: buffer start address
4269 * @total_len: total length to fill in descriptors
4270 * @last_segment: condition for the last descriptor
4271 * @queue: TX queue index
4272 * Description:
4273 * This function fills descriptor and request new descriptors according to
4274 * buffer length to fill
4275 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4276 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4277 int total_len, bool last_segment, u32 queue)
4278 {
4279 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4280 struct dma_desc *desc;
4281 u32 buff_size;
4282 int tmp_len;
4283
4284 tmp_len = total_len;
4285
4286 while (tmp_len > 0) {
4287 dma_addr_t curr_addr;
4288
4289 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4290 priv->dma_conf.dma_tx_size);
4291 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4292
4293 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4294 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4295 else
4296 desc = &tx_q->dma_tx[tx_q->cur_tx];
4297
4298 curr_addr = des + (total_len - tmp_len);
4299 stmmac_set_desc_addr(priv, desc, curr_addr);
4300 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4301 TSO_MAX_BUFF_SIZE : tmp_len;
4302
4303 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4304 0, 1,
4305 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4306 0, 0);
4307
4308 tmp_len -= TSO_MAX_BUFF_SIZE;
4309 }
4310 }
4311
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4312 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4313 {
4314 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4315 int desc_size;
4316
4317 if (likely(priv->extend_desc))
4318 desc_size = sizeof(struct dma_extended_desc);
4319 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4320 desc_size = sizeof(struct dma_edesc);
4321 else
4322 desc_size = sizeof(struct dma_desc);
4323
4324 /* The own bit must be the latest setting done when prepare the
4325 * descriptor and then barrier is needed to make sure that
4326 * all is coherent before granting the DMA engine.
4327 */
4328 wmb();
4329
4330 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size);
4331 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue);
4332 }
4333
4334 /**
4335 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4336 * @skb : the socket buffer
4337 * @dev : device pointer
4338 * Description: this is the transmit function that is called on TSO frames
4339 * (support available on GMAC4 and newer chips).
4340 * Diagram below show the ring programming in case of TSO frames:
4341 *
4342 * First Descriptor
4343 * --------
4344 * | DES0 |---> buffer1 = L2/L3/L4 header
4345 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4346 * | | width is 32-bit, but we never use it.
4347 * | | Also can be used as the most-significant 8-bits or 16-bits of
4348 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4349 * | | or 48-bit, and we always use it.
4350 * | DES2 |---> buffer1 len
4351 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4352 * --------
4353 * --------
4354 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4355 * | DES1 |---> same as the First Descriptor
4356 * | DES2 |---> buffer1 len
4357 * | DES3 |
4358 * --------
4359 * |
4360 * ...
4361 * |
4362 * --------
4363 * | DES0 |---> buffer1 = Split TCP Payload
4364 * | DES1 |---> same as the First Descriptor
4365 * | DES2 |---> buffer1 len
4366 * | DES3 |
4367 * --------
4368 *
4369 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4370 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4371 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4372 {
4373 struct dma_desc *desc, *first, *mss_desc = NULL;
4374 struct stmmac_priv *priv = netdev_priv(dev);
4375 unsigned int first_entry, tx_packets;
4376 struct stmmac_txq_stats *txq_stats;
4377 struct stmmac_tx_queue *tx_q;
4378 bool set_ic, is_last_segment;
4379 u32 pay_len, mss, queue;
4380 int i, first_tx, nfrags;
4381 u8 proto_hdr_len, hdr;
4382 dma_addr_t des;
4383
4384 /* Always insert VLAN tag to SKB payload for TSO frames.
4385 *
4386 * Never insert VLAN tag by HW, since segments split by
4387 * TSO engine will be un-tagged by mistake.
4388 */
4389 if (skb_vlan_tag_present(skb)) {
4390 skb = __vlan_hwaccel_push_inside(skb);
4391 if (unlikely(!skb)) {
4392 priv->xstats.tx_dropped++;
4393 return NETDEV_TX_OK;
4394 }
4395 }
4396
4397 nfrags = skb_shinfo(skb)->nr_frags;
4398 queue = skb_get_queue_mapping(skb);
4399
4400 tx_q = &priv->dma_conf.tx_queue[queue];
4401 txq_stats = &priv->xstats.txq_stats[queue];
4402 first_tx = tx_q->cur_tx;
4403
4404 /* Compute header lengths */
4405 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) {
4406 proto_hdr_len = skb_transport_offset(skb) + sizeof(struct udphdr);
4407 hdr = sizeof(struct udphdr);
4408 } else {
4409 proto_hdr_len = skb_tcp_all_headers(skb);
4410 hdr = tcp_hdrlen(skb);
4411 }
4412
4413 /* Desc availability based on threshold should be enough safe */
4414 if (unlikely(stmmac_tx_avail(priv, queue) <
4415 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4416 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4417 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4418 queue));
4419 /* This is a hard error, log it. */
4420 netdev_err(priv->dev,
4421 "%s: Tx Ring full when queue awake\n",
4422 __func__);
4423 }
4424 return NETDEV_TX_BUSY;
4425 }
4426
4427 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4428
4429 mss = skb_shinfo(skb)->gso_size;
4430
4431 /* set new MSS value if needed */
4432 if (mss != tx_q->mss) {
4433 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4434 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4435 else
4436 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4437
4438 stmmac_set_mss(priv, mss_desc, mss);
4439 tx_q->mss = mss;
4440 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx,
4441 priv->dma_conf.dma_tx_size);
4442 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4443 }
4444
4445 if (netif_msg_tx_queued(priv)) {
4446 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4447 __func__, hdr, proto_hdr_len, pay_len, mss);
4448 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4449 skb->data_len);
4450 }
4451
4452 first_entry = tx_q->cur_tx;
4453 WARN_ON(tx_q->tx_skbuff[first_entry]);
4454
4455 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4456 desc = &tx_q->dma_entx[first_entry].basic;
4457 else
4458 desc = &tx_q->dma_tx[first_entry];
4459 first = desc;
4460
4461 /* first descriptor: fill Headers on Buf1 */
4462 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4463 DMA_TO_DEVICE);
4464 if (dma_mapping_error(priv->device, des))
4465 goto dma_map_err;
4466
4467 stmmac_set_desc_addr(priv, first, des);
4468 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4469 (nfrags == 0), queue);
4470
4471 /* In case two or more DMA transmit descriptors are allocated for this
4472 * non-paged SKB data, the DMA buffer address should be saved to
4473 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4474 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4475 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4476 * since the tail areas of the DMA buffer can be accessed by DMA engine
4477 * sooner or later.
4478 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4479 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4480 * this DMA buffer right after the DMA engine completely finishes the
4481 * full buffer transmission.
4482 */
4483 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4484 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb);
4485 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false;
4486 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4487
4488 /* Prepare fragments */
4489 for (i = 0; i < nfrags; i++) {
4490 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4491
4492 des = skb_frag_dma_map(priv->device, frag, 0,
4493 skb_frag_size(frag),
4494 DMA_TO_DEVICE);
4495 if (dma_mapping_error(priv->device, des))
4496 goto dma_map_err;
4497
4498 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4499 (i == nfrags - 1), queue);
4500
4501 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des;
4502 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag);
4503 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true;
4504 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4505 }
4506
4507 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true;
4508
4509 /* Only the last descriptor gets to point to the skb. */
4510 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4511 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB;
4512
4513 /* Manage tx mitigation */
4514 tx_packets = (tx_q->cur_tx + 1) - first_tx;
4515 tx_q->tx_count_frames += tx_packets;
4516
4517 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4518 set_ic = true;
4519 else if (!priv->tx_coal_frames[queue])
4520 set_ic = false;
4521 else if (tx_packets > priv->tx_coal_frames[queue])
4522 set_ic = true;
4523 else if ((tx_q->tx_count_frames %
4524 priv->tx_coal_frames[queue]) < tx_packets)
4525 set_ic = true;
4526 else
4527 set_ic = false;
4528
4529 if (set_ic) {
4530 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4531 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4532 else
4533 desc = &tx_q->dma_tx[tx_q->cur_tx];
4534
4535 tx_q->tx_count_frames = 0;
4536 stmmac_set_tx_ic(priv, desc);
4537 }
4538
4539 /* We've used all descriptors we need for this skb, however,
4540 * advance cur_tx so that it references a fresh descriptor.
4541 * ndo_start_xmit will fill this descriptor the next time it's
4542 * called and stmmac_tx_clean may clean up to this descriptor.
4543 */
4544 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4545
4546 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4547 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4548 __func__);
4549 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4550 }
4551
4552 u64_stats_update_begin(&txq_stats->q_syncp);
4553 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4554 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4555 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4556 if (set_ic)
4557 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4558 u64_stats_update_end(&txq_stats->q_syncp);
4559
4560 if (priv->sarc_type)
4561 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4562
4563 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4564 priv->hwts_tx_en)) {
4565 /* declare that device is doing timestamping */
4566 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4567 stmmac_enable_tx_timestamp(priv, first);
4568 }
4569
4570 /* If we only have one entry used, then the first entry is the last
4571 * segment.
4572 */
4573 is_last_segment = ((tx_q->cur_tx - first_entry) &
4574 (priv->dma_conf.dma_tx_size - 1)) == 1;
4575
4576 /* Complete the first descriptor before granting the DMA */
4577 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4578 is_last_segment, hdr / 4,
4579 skb->len - proto_hdr_len);
4580
4581 /* If context desc is used to change MSS */
4582 if (mss_desc) {
4583 /* Make sure that first descriptor has been completely
4584 * written, including its own bit. This is because MSS is
4585 * actually before first descriptor, so we need to make
4586 * sure that MSS's own bit is the last thing written.
4587 */
4588 dma_wmb();
4589 stmmac_set_tx_owner(priv, mss_desc);
4590 }
4591
4592 if (netif_msg_pktdata(priv)) {
4593 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4594 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4595 tx_q->cur_tx, first, nfrags);
4596 pr_info(">>> frame to be transmitted: ");
4597 print_pkt(skb->data, skb_headlen(skb));
4598 }
4599
4600 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4601 skb_tx_timestamp(skb);
4602
4603 stmmac_flush_tx_descriptors(priv, queue);
4604 stmmac_tx_timer_arm(priv, queue);
4605
4606 return NETDEV_TX_OK;
4607
4608 dma_map_err:
4609 dev_err(priv->device, "Tx dma map failed\n");
4610 dev_kfree_skb(skb);
4611 priv->xstats.tx_dropped++;
4612 return NETDEV_TX_OK;
4613 }
4614
4615 /**
4616 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4617 * @skb: socket buffer to check
4618 *
4619 * Check if a packet has an ethertype that will trigger the IP header checks
4620 * and IP/TCP checksum engine of the stmmac core.
4621 *
4622 * Return: true if the ethertype can trigger the checksum engine, false
4623 * otherwise
4624 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4625 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4626 {
4627 int depth = 0;
4628 __be16 proto;
4629
4630 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4631 &depth);
4632
4633 return (depth <= ETH_HLEN) &&
4634 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4635 }
4636
4637 /**
4638 * stmmac_xmit - Tx entry point of the driver
4639 * @skb : the socket buffer
4640 * @dev : device pointer
4641 * Description : this is the tx entry point of the driver.
4642 * It programs the chain or the ring and supports oversized frames
4643 * and SG feature.
4644 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4645 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4646 {
4647 bool enh_desc, has_vlan, set_ic, is_jumbo = false;
4648 struct stmmac_priv *priv = netdev_priv(dev);
4649 unsigned int nopaged_len = skb_headlen(skb);
4650 u32 queue = skb_get_queue_mapping(skb);
4651 int nfrags = skb_shinfo(skb)->nr_frags;
4652 unsigned int first_entry, tx_packets;
4653 int gso = skb_shinfo(skb)->gso_type;
4654 struct stmmac_txq_stats *txq_stats;
4655 struct dma_edesc *tbs_desc = NULL;
4656 struct dma_desc *desc, *first;
4657 struct stmmac_tx_queue *tx_q;
4658 int i, csum_insertion = 0;
4659 int entry, first_tx;
4660 dma_addr_t des;
4661 u32 sdu_len;
4662
4663 tx_q = &priv->dma_conf.tx_queue[queue];
4664 txq_stats = &priv->xstats.txq_stats[queue];
4665 first_tx = tx_q->cur_tx;
4666
4667 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4668 stmmac_stop_sw_lpi(priv);
4669
4670 /* Manage oversized TCP frames for GMAC4 device */
4671 if (skb_is_gso(skb) && priv->tso) {
4672 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
4673 return stmmac_tso_xmit(skb, dev);
4674 if (priv->plat->core_type == DWMAC_CORE_GMAC4 &&
4675 (gso & SKB_GSO_UDP_L4))
4676 return stmmac_tso_xmit(skb, dev);
4677 }
4678
4679 if (priv->est && priv->est->enable &&
4680 priv->est->max_sdu[queue]) {
4681 sdu_len = skb->len;
4682 /* Add VLAN tag length if VLAN tag insertion offload is requested */
4683 if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
4684 sdu_len += VLAN_HLEN;
4685 if (sdu_len > priv->est->max_sdu[queue]) {
4686 priv->xstats.max_sdu_txq_drop[queue]++;
4687 goto max_sdu_err;
4688 }
4689 }
4690
4691 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4692 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4693 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4694 queue));
4695 /* This is a hard error, log it. */
4696 netdev_err(priv->dev,
4697 "%s: Tx Ring full when queue awake\n",
4698 __func__);
4699 }
4700 return NETDEV_TX_BUSY;
4701 }
4702
4703 /* Check if VLAN can be inserted by HW */
4704 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4705
4706 entry = tx_q->cur_tx;
4707 first_entry = entry;
4708 WARN_ON(tx_q->tx_skbuff[first_entry]);
4709
4710 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL);
4711 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4712 * queues. In that case, checksum offloading for those queues that don't
4713 * support tx coe needs to fallback to software checksum calculation.
4714 *
4715 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4716 * also have to be checksummed in software.
4717 */
4718 if (csum_insertion &&
4719 (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4720 !stmmac_has_ip_ethertype(skb))) {
4721 if (unlikely(skb_checksum_help(skb)))
4722 goto dma_map_err;
4723 csum_insertion = !csum_insertion;
4724 }
4725
4726 if (likely(priv->extend_desc))
4727 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4728 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4729 desc = &tx_q->dma_entx[entry].basic;
4730 else
4731 desc = tx_q->dma_tx + entry;
4732
4733 first = desc;
4734
4735 if (has_vlan)
4736 stmmac_set_desc_vlan(priv, first, STMMAC_VLAN_INSERT);
4737
4738 enh_desc = priv->plat->enh_desc;
4739 /* To program the descriptors according to the size of the frame */
4740 if (enh_desc)
4741 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4742
4743 if (unlikely(is_jumbo)) {
4744 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4745 if (unlikely(entry < 0) && (entry != -EINVAL))
4746 goto dma_map_err;
4747 }
4748
4749 for (i = 0; i < nfrags; i++) {
4750 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4751 int len = skb_frag_size(frag);
4752 bool last_segment = (i == (nfrags - 1));
4753
4754 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4755 WARN_ON(tx_q->tx_skbuff[entry]);
4756
4757 if (likely(priv->extend_desc))
4758 desc = (struct dma_desc *)(tx_q->dma_etx + entry);
4759 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4760 desc = &tx_q->dma_entx[entry].basic;
4761 else
4762 desc = tx_q->dma_tx + entry;
4763
4764 des = skb_frag_dma_map(priv->device, frag, 0, len,
4765 DMA_TO_DEVICE);
4766 if (dma_mapping_error(priv->device, des))
4767 goto dma_map_err; /* should reuse desc w/o issues */
4768
4769 tx_q->tx_skbuff_dma[entry].buf = des;
4770
4771 stmmac_set_desc_addr(priv, desc, des);
4772
4773 tx_q->tx_skbuff_dma[entry].map_as_page = true;
4774 tx_q->tx_skbuff_dma[entry].len = len;
4775 tx_q->tx_skbuff_dma[entry].last_segment = last_segment;
4776 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4777
4778 /* Prepare the descriptor and set the own bit too */
4779 stmmac_prepare_tx_desc(priv, desc, 0, len, csum_insertion,
4780 priv->mode, 1, last_segment, skb->len);
4781 }
4782
4783 /* Only the last descriptor gets to point to the skb. */
4784 tx_q->tx_skbuff[entry] = skb;
4785 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB;
4786
4787 /* According to the coalesce parameter the IC bit for the latest
4788 * segment is reset and the timer re-started to clean the tx status.
4789 * This approach takes care about the fragments: desc is the first
4790 * element in case of no SG.
4791 */
4792 tx_packets = (entry + 1) - first_tx;
4793 tx_q->tx_count_frames += tx_packets;
4794
4795 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4796 set_ic = true;
4797 else if (!priv->tx_coal_frames[queue])
4798 set_ic = false;
4799 else if (tx_packets > priv->tx_coal_frames[queue])
4800 set_ic = true;
4801 else if ((tx_q->tx_count_frames %
4802 priv->tx_coal_frames[queue]) < tx_packets)
4803 set_ic = true;
4804 else
4805 set_ic = false;
4806
4807 if (set_ic) {
4808 if (likely(priv->extend_desc))
4809 desc = &tx_q->dma_etx[entry].basic;
4810 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
4811 desc = &tx_q->dma_entx[entry].basic;
4812 else
4813 desc = &tx_q->dma_tx[entry];
4814
4815 tx_q->tx_count_frames = 0;
4816 stmmac_set_tx_ic(priv, desc);
4817 }
4818
4819 /* We've used all descriptors we need for this skb, however,
4820 * advance cur_tx so that it references a fresh descriptor.
4821 * ndo_start_xmit will fill this descriptor the next time it's
4822 * called and stmmac_tx_clean may clean up to this descriptor.
4823 */
4824 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
4825 tx_q->cur_tx = entry;
4826
4827 if (netif_msg_pktdata(priv)) {
4828 netdev_dbg(priv->dev,
4829 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4830 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4831 entry, first, nfrags);
4832
4833 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4834 print_pkt(skb->data, skb->len);
4835 }
4836
4837 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4838 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4839 __func__);
4840 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4841 }
4842
4843 u64_stats_update_begin(&txq_stats->q_syncp);
4844 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4845 if (set_ic)
4846 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4847 u64_stats_update_end(&txq_stats->q_syncp);
4848
4849 if (priv->sarc_type)
4850 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4851
4852 /* Ready to fill the first descriptor and set the OWN bit w/o any
4853 * problems because all the descriptors are actually ready to be
4854 * passed to the DMA engine.
4855 */
4856 if (likely(!is_jumbo)) {
4857 bool last_segment = (nfrags == 0);
4858
4859 des = dma_map_single(priv->device, skb->data,
4860 nopaged_len, DMA_TO_DEVICE);
4861 if (dma_mapping_error(priv->device, des))
4862 goto dma_map_err;
4863
4864 tx_q->tx_skbuff_dma[first_entry].buf = des;
4865 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB;
4866 tx_q->tx_skbuff_dma[first_entry].map_as_page = false;
4867
4868 stmmac_set_desc_addr(priv, first, des);
4869
4870 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len;
4871 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment;
4872
4873 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4874 priv->hwts_tx_en)) {
4875 /* declare that device is doing timestamping */
4876 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4877 stmmac_enable_tx_timestamp(priv, first);
4878 }
4879
4880 /* Prepare the first descriptor setting the OWN bit too */
4881 stmmac_prepare_tx_desc(priv, first, 1, nopaged_len,
4882 csum_insertion, priv->mode, 0, last_segment,
4883 skb->len);
4884 }
4885
4886 if (tx_q->tbs & STMMAC_TBS_EN) {
4887 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4888
4889 tbs_desc = &tx_q->dma_entx[first_entry];
4890 stmmac_set_desc_tbs(priv, tbs_desc, ts.tv_sec, ts.tv_nsec);
4891 }
4892
4893 stmmac_set_tx_owner(priv, first);
4894
4895 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4896
4897 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4898 skb_tx_timestamp(skb);
4899 stmmac_flush_tx_descriptors(priv, queue);
4900 stmmac_tx_timer_arm(priv, queue);
4901
4902 return NETDEV_TX_OK;
4903
4904 dma_map_err:
4905 netdev_err(priv->dev, "Tx DMA map failed\n");
4906 max_sdu_err:
4907 dev_kfree_skb(skb);
4908 priv->xstats.tx_dropped++;
4909 return NETDEV_TX_OK;
4910 }
4911
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)4912 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
4913 {
4914 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
4915 __be16 vlan_proto = veth->h_vlan_proto;
4916 u16 vlanid;
4917
4918 if ((vlan_proto == htons(ETH_P_8021Q) &&
4919 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
4920 (vlan_proto == htons(ETH_P_8021AD) &&
4921 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
4922 /* pop the vlan tag */
4923 vlanid = ntohs(veth->h_vlan_TCI);
4924 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
4925 skb_pull(skb, VLAN_HLEN);
4926 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
4927 }
4928 }
4929
4930 /**
4931 * stmmac_rx_refill - refill used skb preallocated buffers
4932 * @priv: driver private structure
4933 * @queue: RX queue index
4934 * Description : this is to reallocate the skb for the reception process
4935 * that is based on zero-copy.
4936 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)4937 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
4938 {
4939 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
4940 int dirty = stmmac_rx_dirty(priv, queue);
4941 unsigned int entry = rx_q->dirty_rx;
4942 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
4943
4944 if (priv->dma_cap.host_dma_width <= 32)
4945 gfp |= GFP_DMA32;
4946
4947 while (dirty-- > 0) {
4948 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
4949 struct dma_desc *p;
4950 bool use_rx_wd;
4951
4952 if (priv->extend_desc)
4953 p = (struct dma_desc *)(rx_q->dma_erx + entry);
4954 else
4955 p = rx_q->dma_rx + entry;
4956
4957 if (!buf->page) {
4958 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4959 if (!buf->page)
4960 break;
4961 }
4962
4963 if (priv->sph_active && !buf->sec_page) {
4964 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
4965 if (!buf->sec_page)
4966 break;
4967
4968 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
4969 }
4970
4971 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
4972
4973 stmmac_set_desc_addr(priv, p, buf->addr);
4974 if (priv->sph_active)
4975 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
4976 else
4977 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
4978 stmmac_refill_desc3(priv, rx_q, p);
4979
4980 rx_q->rx_count_frames++;
4981 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
4982 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
4983 rx_q->rx_count_frames = 0;
4984
4985 use_rx_wd = !priv->rx_coal_frames[queue];
4986 use_rx_wd |= rx_q->rx_count_frames > 0;
4987 if (!priv->use_riwt)
4988 use_rx_wd = false;
4989
4990 dma_wmb();
4991 stmmac_set_rx_owner(priv, p, use_rx_wd);
4992
4993 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
4994 }
4995 rx_q->dirty_rx = entry;
4996 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
4997 (rx_q->dirty_rx * sizeof(struct dma_desc));
4998 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
4999 /* Wake up Rx DMA from the suspend state if required */
5000 stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
5001 }
5002
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)5003 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
5004 struct dma_desc *p,
5005 int status, unsigned int len)
5006 {
5007 unsigned int plen = 0, hlen = 0;
5008 int coe = priv->hw->rx_csum;
5009
5010 /* Not first descriptor, buffer is always zero */
5011 if (priv->sph_active && len)
5012 return 0;
5013
5014 /* First descriptor, get split header length */
5015 stmmac_get_rx_header_len(priv, p, &hlen);
5016 if (priv->sph_active && hlen) {
5017 priv->xstats.rx_split_hdr_pkt_n++;
5018 return hlen;
5019 }
5020
5021 /* First descriptor, not last descriptor and not split header */
5022 if (status & rx_not_ls)
5023 return priv->dma_conf.dma_buf_sz;
5024
5025 plen = stmmac_get_rx_frame_len(priv, p, coe);
5026
5027 /* First descriptor and last descriptor and not split header */
5028 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
5029 }
5030
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)5031 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
5032 struct dma_desc *p,
5033 int status, unsigned int len)
5034 {
5035 int coe = priv->hw->rx_csum;
5036 unsigned int plen = 0;
5037
5038 /* Not split header, buffer is not available */
5039 if (!priv->sph_active)
5040 return 0;
5041
5042 /* For GMAC4, when split header is enabled, in some rare cases, the
5043 * hardware does not fill buf2 of the first descriptor with payload.
5044 * Thus we cannot assume buf2 is always fully filled if it is not
5045 * the last descriptor. Otherwise, the length of buf2 of the second
5046 * descriptor will be calculated wrong and cause an oops.
5047 *
5048 * If this is the last descriptor, 'plen' is the length of the
5049 * received packet that was transferred to system memory.
5050 * Otherwise, it is the accumulated number of bytes that have been
5051 * transferred for the current packet.
5052 *
5053 * Thus 'plen - len' always gives the correct length of buf2.
5054 */
5055
5056 /* Not GMAC4 and not last descriptor */
5057 if (priv->plat->core_type != DWMAC_CORE_GMAC4 && (status & rx_not_ls))
5058 return priv->dma_conf.dma_buf_sz;
5059
5060 /* GMAC4 or last descriptor */
5061 plen = stmmac_get_rx_frame_len(priv, p, coe);
5062
5063 return plen - len;
5064 }
5065
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)5066 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
5067 struct xdp_frame *xdpf, bool dma_map)
5068 {
5069 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
5070 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
5071 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
5072 unsigned int entry = tx_q->cur_tx;
5073 struct dma_desc *tx_desc;
5074 dma_addr_t dma_addr;
5075 bool set_ic;
5076
5077 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
5078 return STMMAC_XDP_CONSUMED;
5079
5080 if (priv->est && priv->est->enable &&
5081 priv->est->max_sdu[queue] &&
5082 xdpf->len > priv->est->max_sdu[queue]) {
5083 priv->xstats.max_sdu_txq_drop[queue]++;
5084 return STMMAC_XDP_CONSUMED;
5085 }
5086
5087 if (likely(priv->extend_desc))
5088 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry);
5089 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
5090 tx_desc = &tx_q->dma_entx[entry].basic;
5091 else
5092 tx_desc = tx_q->dma_tx + entry;
5093
5094 if (dma_map) {
5095 dma_addr = dma_map_single(priv->device, xdpf->data,
5096 xdpf->len, DMA_TO_DEVICE);
5097 if (dma_mapping_error(priv->device, dma_addr))
5098 return STMMAC_XDP_CONSUMED;
5099
5100 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO;
5101 } else {
5102 struct page *page = virt_to_page(xdpf->data);
5103
5104 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5105 xdpf->headroom;
5106 dma_sync_single_for_device(priv->device, dma_addr,
5107 xdpf->len, DMA_BIDIRECTIONAL);
5108
5109 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX;
5110 }
5111
5112 tx_q->tx_skbuff_dma[entry].buf = dma_addr;
5113 tx_q->tx_skbuff_dma[entry].map_as_page = false;
5114 tx_q->tx_skbuff_dma[entry].len = xdpf->len;
5115 tx_q->tx_skbuff_dma[entry].last_segment = true;
5116 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
5117
5118 tx_q->xdpf[entry] = xdpf;
5119
5120 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5121
5122 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5123 csum, priv->mode, true, true,
5124 xdpf->len);
5125
5126 tx_q->tx_count_frames++;
5127
5128 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5129 set_ic = true;
5130 else
5131 set_ic = false;
5132
5133 if (set_ic) {
5134 tx_q->tx_count_frames = 0;
5135 stmmac_set_tx_ic(priv, tx_desc);
5136 u64_stats_update_begin(&txq_stats->q_syncp);
5137 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5138 u64_stats_update_end(&txq_stats->q_syncp);
5139 }
5140
5141 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5142
5143 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size);
5144 tx_q->cur_tx = entry;
5145
5146 return STMMAC_XDP_TX;
5147 }
5148
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5149 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5150 int cpu)
5151 {
5152 int index = cpu;
5153
5154 if (unlikely(index < 0))
5155 index = 0;
5156
5157 while (index >= priv->plat->tx_queues_to_use)
5158 index -= priv->plat->tx_queues_to_use;
5159
5160 return index;
5161 }
5162
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5163 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5164 struct xdp_buff *xdp)
5165 {
5166 bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
5167 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5168 int cpu = smp_processor_id();
5169 struct netdev_queue *nq;
5170 int queue;
5171 int res;
5172
5173 if (unlikely(!xdpf))
5174 return STMMAC_XDP_CONSUMED;
5175
5176 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5177 nq = netdev_get_tx_queue(priv->dev, queue);
5178
5179 __netif_tx_lock(nq, cpu);
5180 /* Avoids TX time-out as we are sharing with slow path */
5181 txq_trans_cond_update(nq);
5182
5183 /* For zero copy XDP_TX action, dma_map is true */
5184 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc);
5185 if (res == STMMAC_XDP_TX) {
5186 stmmac_flush_tx_descriptors(priv, queue);
5187 } else if (res == STMMAC_XDP_CONSUMED && zc) {
5188 /* xdp has been freed by xdp_convert_buff_to_frame(),
5189 * no need to call xsk_buff_free() again, so return
5190 * STMMAC_XSK_CONSUMED.
5191 */
5192 res = STMMAC_XSK_CONSUMED;
5193 xdp_return_frame(xdpf);
5194 }
5195
5196 __netif_tx_unlock(nq);
5197
5198 return res;
5199 }
5200
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5201 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5202 struct bpf_prog *prog,
5203 struct xdp_buff *xdp)
5204 {
5205 u32 act;
5206 int res;
5207
5208 act = bpf_prog_run_xdp(prog, xdp);
5209 switch (act) {
5210 case XDP_PASS:
5211 res = STMMAC_XDP_PASS;
5212 break;
5213 case XDP_TX:
5214 res = stmmac_xdp_xmit_back(priv, xdp);
5215 break;
5216 case XDP_REDIRECT:
5217 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5218 res = STMMAC_XDP_CONSUMED;
5219 else
5220 res = STMMAC_XDP_REDIRECT;
5221 break;
5222 default:
5223 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5224 fallthrough;
5225 case XDP_ABORTED:
5226 trace_xdp_exception(priv->dev, prog, act);
5227 fallthrough;
5228 case XDP_DROP:
5229 res = STMMAC_XDP_CONSUMED;
5230 break;
5231 }
5232
5233 return res;
5234 }
5235
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5236 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5237 struct xdp_buff *xdp)
5238 {
5239 struct bpf_prog *prog;
5240 int res;
5241
5242 prog = READ_ONCE(priv->xdp_prog);
5243 if (!prog) {
5244 res = STMMAC_XDP_PASS;
5245 goto out;
5246 }
5247
5248 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5249 out:
5250 return ERR_PTR(-res);
5251 }
5252
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5253 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5254 int xdp_status)
5255 {
5256 int cpu = smp_processor_id();
5257 int queue;
5258
5259 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5260
5261 if (xdp_status & STMMAC_XDP_TX)
5262 stmmac_tx_timer_arm(priv, queue);
5263
5264 if (xdp_status & STMMAC_XDP_REDIRECT)
5265 xdp_do_flush();
5266 }
5267
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5268 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5269 struct xdp_buff *xdp)
5270 {
5271 unsigned int metasize = xdp->data - xdp->data_meta;
5272 unsigned int datasize = xdp->data_end - xdp->data;
5273 struct sk_buff *skb;
5274
5275 skb = napi_alloc_skb(&ch->rxtx_napi,
5276 xdp->data_end - xdp->data_hard_start);
5277 if (unlikely(!skb))
5278 return NULL;
5279
5280 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5281 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5282 if (metasize)
5283 skb_metadata_set(skb, metasize);
5284
5285 return skb;
5286 }
5287
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5288 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5289 struct dma_desc *p, struct dma_desc *np,
5290 struct xdp_buff *xdp)
5291 {
5292 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5293 struct stmmac_channel *ch = &priv->channel[queue];
5294 unsigned int len = xdp->data_end - xdp->data;
5295 enum pkt_hash_types hash_type;
5296 int coe = priv->hw->rx_csum;
5297 struct sk_buff *skb;
5298 u32 hash;
5299
5300 skb = stmmac_construct_skb_zc(ch, xdp);
5301 if (!skb) {
5302 priv->xstats.rx_dropped++;
5303 return;
5304 }
5305
5306 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5307 if (priv->hw->hw_vlan_en)
5308 /* MAC level stripping. */
5309 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5310 else
5311 /* Driver level stripping. */
5312 stmmac_rx_vlan(priv->dev, skb);
5313 skb->protocol = eth_type_trans(skb, priv->dev);
5314
5315 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5316 skb_checksum_none_assert(skb);
5317 else
5318 skb->ip_summed = CHECKSUM_UNNECESSARY;
5319
5320 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5321 skb_set_hash(skb, hash, hash_type);
5322
5323 skb_record_rx_queue(skb, queue);
5324 napi_gro_receive(&ch->rxtx_napi, skb);
5325
5326 u64_stats_update_begin(&rxq_stats->napi_syncp);
5327 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5328 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5329 u64_stats_update_end(&rxq_stats->napi_syncp);
5330 }
5331
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5332 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5333 {
5334 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5335 unsigned int entry = rx_q->dirty_rx;
5336 struct dma_desc *rx_desc = NULL;
5337 bool ret = true;
5338
5339 budget = min(budget, stmmac_rx_dirty(priv, queue));
5340
5341 while (budget-- > 0 && entry != rx_q->cur_rx) {
5342 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5343 dma_addr_t dma_addr;
5344 bool use_rx_wd;
5345
5346 if (!buf->xdp) {
5347 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5348 if (!buf->xdp) {
5349 ret = false;
5350 break;
5351 }
5352 }
5353
5354 if (priv->extend_desc)
5355 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry);
5356 else
5357 rx_desc = rx_q->dma_rx + entry;
5358
5359 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5360 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5361 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5362 stmmac_refill_desc3(priv, rx_q, rx_desc);
5363
5364 rx_q->rx_count_frames++;
5365 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5366 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5367 rx_q->rx_count_frames = 0;
5368
5369 use_rx_wd = !priv->rx_coal_frames[queue];
5370 use_rx_wd |= rx_q->rx_count_frames > 0;
5371 if (!priv->use_riwt)
5372 use_rx_wd = false;
5373
5374 dma_wmb();
5375 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5376
5377 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size);
5378 }
5379
5380 if (rx_desc) {
5381 rx_q->dirty_rx = entry;
5382 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
5383 (rx_q->dirty_rx * sizeof(struct dma_desc));
5384 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue);
5385 }
5386
5387 return ret;
5388 }
5389
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5390 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5391 {
5392 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5393 * to represent incoming packet, whereas cb field in the same structure
5394 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5395 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5396 */
5397 return (struct stmmac_xdp_buff *)xdp;
5398 }
5399
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5400 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5401 {
5402 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5403 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5404 unsigned int count = 0, error = 0, len = 0;
5405 int dirty = stmmac_rx_dirty(priv, queue);
5406 unsigned int next_entry = rx_q->cur_rx;
5407 u32 rx_errors = 0, rx_dropped = 0;
5408 unsigned int desc_size;
5409 struct bpf_prog *prog;
5410 bool failure = false;
5411 int xdp_status = 0;
5412 int status = 0;
5413
5414 if (netif_msg_rx_status(priv)) {
5415 void *rx_head;
5416
5417 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5418 if (priv->extend_desc) {
5419 rx_head = (void *)rx_q->dma_erx;
5420 desc_size = sizeof(struct dma_extended_desc);
5421 } else {
5422 rx_head = (void *)rx_q->dma_rx;
5423 desc_size = sizeof(struct dma_desc);
5424 }
5425
5426 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5427 rx_q->dma_rx_phy, desc_size);
5428 }
5429 while (count < limit) {
5430 struct stmmac_rx_buffer *buf;
5431 struct stmmac_xdp_buff *ctx;
5432 unsigned int buf1_len = 0;
5433 struct dma_desc *np, *p;
5434 int entry;
5435 int res;
5436
5437 if (!count && rx_q->state_saved) {
5438 error = rx_q->state.error;
5439 len = rx_q->state.len;
5440 } else {
5441 rx_q->state_saved = false;
5442 error = 0;
5443 len = 0;
5444 }
5445
5446 read_again:
5447 if (count >= limit)
5448 break;
5449
5450 buf1_len = 0;
5451 entry = next_entry;
5452 buf = &rx_q->buf_pool[entry];
5453
5454 if (dirty >= STMMAC_RX_FILL_BATCH) {
5455 failure = failure ||
5456 !stmmac_rx_refill_zc(priv, queue, dirty);
5457 dirty = 0;
5458 }
5459
5460 if (priv->extend_desc)
5461 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5462 else
5463 p = rx_q->dma_rx + entry;
5464
5465 /* read the status of the incoming frame */
5466 status = stmmac_rx_status(priv, &priv->xstats, p);
5467 /* check if managed by the DMA otherwise go ahead */
5468 if (unlikely(status & dma_own))
5469 break;
5470
5471 /* Prefetch the next RX descriptor */
5472 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5473 priv->dma_conf.dma_rx_size);
5474 next_entry = rx_q->cur_rx;
5475
5476 if (priv->extend_desc)
5477 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5478 else
5479 np = rx_q->dma_rx + next_entry;
5480
5481 prefetch(np);
5482
5483 /* Ensure a valid XSK buffer before proceed */
5484 if (!buf->xdp)
5485 break;
5486
5487 if (priv->extend_desc)
5488 stmmac_rx_extended_status(priv, &priv->xstats,
5489 rx_q->dma_erx + entry);
5490 if (unlikely(status == discard_frame)) {
5491 xsk_buff_free(buf->xdp);
5492 buf->xdp = NULL;
5493 dirty++;
5494 error = 1;
5495 if (!priv->hwts_rx_en)
5496 rx_errors++;
5497 }
5498
5499 if (unlikely(error && (status & rx_not_ls)))
5500 goto read_again;
5501 if (unlikely(error)) {
5502 count++;
5503 continue;
5504 }
5505
5506 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5507 if (likely(status & rx_not_ls)) {
5508 xsk_buff_free(buf->xdp);
5509 buf->xdp = NULL;
5510 dirty++;
5511 count++;
5512 goto read_again;
5513 }
5514
5515 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5516 ctx->priv = priv;
5517 ctx->desc = p;
5518 ctx->ndesc = np;
5519
5520 /* XDP ZC Frame only support primary buffers for now */
5521 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5522 len += buf1_len;
5523
5524 /* ACS is disabled; strip manually. */
5525 if (likely(!(status & rx_not_ls))) {
5526 buf1_len -= ETH_FCS_LEN;
5527 len -= ETH_FCS_LEN;
5528 }
5529
5530 /* RX buffer is good and fit into a XSK pool buffer */
5531 buf->xdp->data_end = buf->xdp->data + buf1_len;
5532 xsk_buff_dma_sync_for_cpu(buf->xdp);
5533
5534 prog = READ_ONCE(priv->xdp_prog);
5535 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5536
5537 switch (res) {
5538 case STMMAC_XDP_PASS:
5539 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5540 xsk_buff_free(buf->xdp);
5541 break;
5542 case STMMAC_XDP_CONSUMED:
5543 xsk_buff_free(buf->xdp);
5544 fallthrough;
5545 case STMMAC_XSK_CONSUMED:
5546 rx_dropped++;
5547 break;
5548 case STMMAC_XDP_TX:
5549 case STMMAC_XDP_REDIRECT:
5550 xdp_status |= res;
5551 break;
5552 }
5553
5554 buf->xdp = NULL;
5555 dirty++;
5556 count++;
5557 }
5558
5559 if (status & rx_not_ls) {
5560 rx_q->state_saved = true;
5561 rx_q->state.error = error;
5562 rx_q->state.len = len;
5563 }
5564
5565 stmmac_finalize_xdp_rx(priv, xdp_status);
5566
5567 u64_stats_update_begin(&rxq_stats->napi_syncp);
5568 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5569 u64_stats_update_end(&rxq_stats->napi_syncp);
5570
5571 priv->xstats.rx_dropped += rx_dropped;
5572 priv->xstats.rx_errors += rx_errors;
5573
5574 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5575 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5576 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5577 else
5578 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5579
5580 return (int)count;
5581 }
5582
5583 return failure ? limit : (int)count;
5584 }
5585
5586 /**
5587 * stmmac_rx - manage the receive process
5588 * @priv: driver private structure
5589 * @limit: napi bugget
5590 * @queue: RX queue index.
5591 * Description : this the function called by the napi poll method.
5592 * It gets all the frames inside the ring.
5593 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5594 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5595 {
5596 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5597 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5598 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5599 struct stmmac_channel *ch = &priv->channel[queue];
5600 unsigned int count = 0, error = 0, len = 0;
5601 int status = 0, coe = priv->hw->rx_csum;
5602 unsigned int next_entry = rx_q->cur_rx;
5603 enum dma_data_direction dma_dir;
5604 unsigned int desc_size;
5605 struct sk_buff *skb = NULL;
5606 struct stmmac_xdp_buff ctx;
5607 int xdp_status = 0;
5608 int bufsz;
5609
5610 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5611 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5612 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5613
5614 if (netif_msg_rx_status(priv)) {
5615 void *rx_head;
5616
5617 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5618 if (priv->extend_desc) {
5619 rx_head = (void *)rx_q->dma_erx;
5620 desc_size = sizeof(struct dma_extended_desc);
5621 } else {
5622 rx_head = (void *)rx_q->dma_rx;
5623 desc_size = sizeof(struct dma_desc);
5624 }
5625
5626 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5627 rx_q->dma_rx_phy, desc_size);
5628 }
5629 while (count < limit) {
5630 unsigned int buf1_len = 0, buf2_len = 0;
5631 enum pkt_hash_types hash_type;
5632 struct stmmac_rx_buffer *buf;
5633 struct dma_desc *np, *p;
5634 int entry;
5635 u32 hash;
5636
5637 if (!count && rx_q->state_saved) {
5638 skb = rx_q->state.skb;
5639 error = rx_q->state.error;
5640 len = rx_q->state.len;
5641 } else {
5642 rx_q->state_saved = false;
5643 skb = NULL;
5644 error = 0;
5645 len = 0;
5646 }
5647
5648 read_again:
5649 if (count >= limit)
5650 break;
5651
5652 buf1_len = 0;
5653 buf2_len = 0;
5654 entry = next_entry;
5655 buf = &rx_q->buf_pool[entry];
5656
5657 if (priv->extend_desc)
5658 p = (struct dma_desc *)(rx_q->dma_erx + entry);
5659 else
5660 p = rx_q->dma_rx + entry;
5661
5662 /* read the status of the incoming frame */
5663 status = stmmac_rx_status(priv, &priv->xstats, p);
5664 /* check if managed by the DMA otherwise go ahead */
5665 if (unlikely(status & dma_own))
5666 break;
5667
5668 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx,
5669 priv->dma_conf.dma_rx_size);
5670 next_entry = rx_q->cur_rx;
5671
5672 if (priv->extend_desc)
5673 np = (struct dma_desc *)(rx_q->dma_erx + next_entry);
5674 else
5675 np = rx_q->dma_rx + next_entry;
5676
5677 prefetch(np);
5678
5679 if (priv->extend_desc)
5680 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5681 if (unlikely(status == discard_frame)) {
5682 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5683 buf->page = NULL;
5684 error = 1;
5685 if (!priv->hwts_rx_en)
5686 rx_errors++;
5687 }
5688
5689 if (unlikely(error && (status & rx_not_ls)))
5690 goto read_again;
5691 if (unlikely(error)) {
5692 dev_kfree_skb(skb);
5693 skb = NULL;
5694 count++;
5695 continue;
5696 }
5697
5698 /* Buffer is good. Go on. */
5699
5700 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5701 len += buf1_len;
5702 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5703 len += buf2_len;
5704
5705 /* ACS is disabled; strip manually. */
5706 if (likely(!(status & rx_not_ls))) {
5707 if (buf2_len) {
5708 buf2_len -= ETH_FCS_LEN;
5709 len -= ETH_FCS_LEN;
5710 } else if (buf1_len) {
5711 buf1_len -= ETH_FCS_LEN;
5712 len -= ETH_FCS_LEN;
5713 }
5714 }
5715
5716 if (!skb) {
5717 unsigned int pre_len, sync_len;
5718
5719 dma_sync_single_for_cpu(priv->device, buf->addr,
5720 buf1_len, dma_dir);
5721 net_prefetch(page_address(buf->page) +
5722 buf->page_offset);
5723
5724 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5725 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5726 buf->page_offset, buf1_len, true);
5727
5728 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5729 buf->page_offset;
5730
5731 ctx.priv = priv;
5732 ctx.desc = p;
5733 ctx.ndesc = np;
5734
5735 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5736 /* Due xdp_adjust_tail: DMA sync for_device
5737 * cover max len CPU touch
5738 */
5739 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5740 buf->page_offset;
5741 sync_len = max(sync_len, pre_len);
5742
5743 /* For Not XDP_PASS verdict */
5744 if (IS_ERR(skb)) {
5745 unsigned int xdp_res = -PTR_ERR(skb);
5746
5747 if (xdp_res & STMMAC_XDP_CONSUMED) {
5748 page_pool_put_page(rx_q->page_pool,
5749 virt_to_head_page(ctx.xdp.data),
5750 sync_len, true);
5751 buf->page = NULL;
5752 rx_dropped++;
5753
5754 /* Clear skb as it was set as
5755 * status by XDP program.
5756 */
5757 skb = NULL;
5758
5759 if (unlikely((status & rx_not_ls)))
5760 goto read_again;
5761
5762 count++;
5763 continue;
5764 } else if (xdp_res & (STMMAC_XDP_TX |
5765 STMMAC_XDP_REDIRECT)) {
5766 xdp_status |= xdp_res;
5767 buf->page = NULL;
5768 skb = NULL;
5769 count++;
5770 continue;
5771 }
5772 }
5773 }
5774
5775 if (!skb) {
5776 unsigned int head_pad_len;
5777
5778 /* XDP program may expand or reduce tail */
5779 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5780
5781 skb = napi_build_skb(page_address(buf->page),
5782 rx_q->napi_skb_frag_size);
5783 if (!skb) {
5784 page_pool_recycle_direct(rx_q->page_pool,
5785 buf->page);
5786 rx_dropped++;
5787 count++;
5788 goto drain_data;
5789 }
5790
5791 /* XDP program may adjust header */
5792 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5793 skb_reserve(skb, head_pad_len);
5794 skb_put(skb, buf1_len);
5795 skb_mark_for_recycle(skb);
5796 buf->page = NULL;
5797 } else if (buf1_len) {
5798 dma_sync_single_for_cpu(priv->device, buf->addr,
5799 buf1_len, dma_dir);
5800 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5801 buf->page, buf->page_offset, buf1_len,
5802 priv->dma_conf.dma_buf_sz);
5803 buf->page = NULL;
5804 }
5805
5806 if (buf2_len) {
5807 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5808 buf2_len, dma_dir);
5809 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5810 buf->sec_page, 0, buf2_len,
5811 priv->dma_conf.dma_buf_sz);
5812 buf->sec_page = NULL;
5813 }
5814
5815 drain_data:
5816 if (likely(status & rx_not_ls))
5817 goto read_again;
5818 if (!skb)
5819 continue;
5820
5821 /* Got entire packet into SKB. Finish it. */
5822
5823 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5824
5825 if (priv->hw->hw_vlan_en)
5826 /* MAC level stripping. */
5827 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5828 else
5829 /* Driver level stripping. */
5830 stmmac_rx_vlan(priv->dev, skb);
5831
5832 skb->protocol = eth_type_trans(skb, priv->dev);
5833
5834 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5835 (status & csum_none))
5836 skb_checksum_none_assert(skb);
5837 else
5838 skb->ip_summed = CHECKSUM_UNNECESSARY;
5839
5840 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5841 skb_set_hash(skb, hash, hash_type);
5842
5843 skb_record_rx_queue(skb, queue);
5844 napi_gro_receive(&ch->rx_napi, skb);
5845 skb = NULL;
5846
5847 rx_packets++;
5848 rx_bytes += len;
5849 count++;
5850 }
5851
5852 if (status & rx_not_ls || skb) {
5853 rx_q->state_saved = true;
5854 rx_q->state.skb = skb;
5855 rx_q->state.error = error;
5856 rx_q->state.len = len;
5857 }
5858
5859 stmmac_finalize_xdp_rx(priv, xdp_status);
5860
5861 stmmac_rx_refill(priv, queue);
5862
5863 u64_stats_update_begin(&rxq_stats->napi_syncp);
5864 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5865 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5866 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5867 u64_stats_update_end(&rxq_stats->napi_syncp);
5868
5869 priv->xstats.rx_dropped += rx_dropped;
5870 priv->xstats.rx_errors += rx_errors;
5871
5872 return count;
5873 }
5874
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5875 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5876 {
5877 struct stmmac_channel *ch =
5878 container_of(napi, struct stmmac_channel, rx_napi);
5879 struct stmmac_priv *priv = ch->priv_data;
5880 struct stmmac_rxq_stats *rxq_stats;
5881 u32 chan = ch->index;
5882 int work_done;
5883
5884 rxq_stats = &priv->xstats.rxq_stats[chan];
5885 u64_stats_update_begin(&rxq_stats->napi_syncp);
5886 u64_stats_inc(&rxq_stats->napi.poll);
5887 u64_stats_update_end(&rxq_stats->napi_syncp);
5888
5889 work_done = stmmac_rx(priv, budget, chan);
5890 if (work_done < budget && napi_complete_done(napi, work_done)) {
5891 unsigned long flags;
5892
5893 spin_lock_irqsave(&ch->lock, flags);
5894 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5895 spin_unlock_irqrestore(&ch->lock, flags);
5896 }
5897
5898 return work_done;
5899 }
5900
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5901 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5902 {
5903 struct stmmac_channel *ch =
5904 container_of(napi, struct stmmac_channel, tx_napi);
5905 struct stmmac_priv *priv = ch->priv_data;
5906 struct stmmac_txq_stats *txq_stats;
5907 bool pending_packets = false;
5908 u32 chan = ch->index;
5909 int work_done;
5910
5911 txq_stats = &priv->xstats.txq_stats[chan];
5912 u64_stats_update_begin(&txq_stats->napi_syncp);
5913 u64_stats_inc(&txq_stats->napi.poll);
5914 u64_stats_update_end(&txq_stats->napi_syncp);
5915
5916 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5917 work_done = min(work_done, budget);
5918
5919 if (work_done < budget && napi_complete_done(napi, work_done)) {
5920 unsigned long flags;
5921
5922 spin_lock_irqsave(&ch->lock, flags);
5923 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5924 spin_unlock_irqrestore(&ch->lock, flags);
5925 }
5926
5927 /* TX still have packet to handle, check if we need to arm tx timer */
5928 if (pending_packets)
5929 stmmac_tx_timer_arm(priv, chan);
5930
5931 return work_done;
5932 }
5933
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5934 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
5935 {
5936 struct stmmac_channel *ch =
5937 container_of(napi, struct stmmac_channel, rxtx_napi);
5938 struct stmmac_priv *priv = ch->priv_data;
5939 bool tx_pending_packets = false;
5940 int rx_done, tx_done, rxtx_done;
5941 struct stmmac_rxq_stats *rxq_stats;
5942 struct stmmac_txq_stats *txq_stats;
5943 u32 chan = ch->index;
5944
5945 rxq_stats = &priv->xstats.rxq_stats[chan];
5946 u64_stats_update_begin(&rxq_stats->napi_syncp);
5947 u64_stats_inc(&rxq_stats->napi.poll);
5948 u64_stats_update_end(&rxq_stats->napi_syncp);
5949
5950 txq_stats = &priv->xstats.txq_stats[chan];
5951 u64_stats_update_begin(&txq_stats->napi_syncp);
5952 u64_stats_inc(&txq_stats->napi.poll);
5953 u64_stats_update_end(&txq_stats->napi_syncp);
5954
5955 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
5956 tx_done = min(tx_done, budget);
5957
5958 rx_done = stmmac_rx_zc(priv, budget, chan);
5959
5960 rxtx_done = max(tx_done, rx_done);
5961
5962 /* If either TX or RX work is not complete, return budget
5963 * and keep pooling
5964 */
5965 if (rxtx_done >= budget)
5966 return budget;
5967
5968 /* all work done, exit the polling mode */
5969 if (napi_complete_done(napi, rxtx_done)) {
5970 unsigned long flags;
5971
5972 spin_lock_irqsave(&ch->lock, flags);
5973 /* Both RX and TX work done are complete,
5974 * so enable both RX & TX IRQs.
5975 */
5976 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
5977 spin_unlock_irqrestore(&ch->lock, flags);
5978 }
5979
5980 /* TX still have packet to handle, check if we need to arm tx timer */
5981 if (tx_pending_packets)
5982 stmmac_tx_timer_arm(priv, chan);
5983
5984 return min(rxtx_done, budget - 1);
5985 }
5986
5987 /**
5988 * stmmac_tx_timeout
5989 * @dev : Pointer to net device structure
5990 * @txqueue: the index of the hanging transmit queue
5991 * Description: this function is called when a packet transmission fails to
5992 * complete within a reasonable time. The driver will mark the error in the
5993 * netdev structure and arrange for the device to be reset to a sane state
5994 * in order to transmit a new packet.
5995 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)5996 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
5997 {
5998 struct stmmac_priv *priv = netdev_priv(dev);
5999
6000 stmmac_global_err(priv);
6001 }
6002
6003 /**
6004 * stmmac_set_rx_mode - entry point for multicast addressing
6005 * @dev : pointer to the device structure
6006 * Description:
6007 * This function is a driver entry point which gets called by the kernel
6008 * whenever multicast addresses must be enabled/disabled.
6009 * Return value:
6010 * void.
6011 *
6012 * FIXME: This may need RXC to be running, but it may be called with BH
6013 * disabled, which means we can't call phylink_rx_clk_stop*().
6014 */
stmmac_set_rx_mode(struct net_device * dev)6015 static void stmmac_set_rx_mode(struct net_device *dev)
6016 {
6017 struct stmmac_priv *priv = netdev_priv(dev);
6018
6019 stmmac_set_filter(priv, priv->hw, dev);
6020 }
6021
6022 /**
6023 * stmmac_change_mtu - entry point to change MTU size for the device.
6024 * @dev : device pointer.
6025 * @new_mtu : the new MTU size for the device.
6026 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
6027 * to drive packet transmission. Ethernet has an MTU of 1500 octets
6028 * (ETH_DATA_LEN). This value can be changed with ifconfig.
6029 * Return value:
6030 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6031 * file on failure.
6032 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)6033 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
6034 {
6035 struct stmmac_priv *priv = netdev_priv(dev);
6036 int txfifosz = priv->plat->tx_fifo_size;
6037 struct stmmac_dma_conf *dma_conf;
6038 const int mtu = new_mtu;
6039 int ret;
6040
6041 if (txfifosz == 0)
6042 txfifosz = priv->dma_cap.tx_fifo_size;
6043
6044 txfifosz /= priv->plat->tx_queues_to_use;
6045
6046 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
6047 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
6048 return -EINVAL;
6049 }
6050
6051 new_mtu = STMMAC_ALIGN(new_mtu);
6052
6053 /* If condition true, FIFO is too small or MTU too large */
6054 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
6055 return -EINVAL;
6056
6057 if (netif_running(dev)) {
6058 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
6059 /* Try to allocate the new DMA conf with the new mtu */
6060 dma_conf = stmmac_setup_dma_desc(priv, mtu);
6061 if (IS_ERR(dma_conf)) {
6062 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
6063 mtu);
6064 return PTR_ERR(dma_conf);
6065 }
6066
6067 __stmmac_release(dev);
6068
6069 ret = __stmmac_open(dev, dma_conf);
6070 if (ret) {
6071 free_dma_desc_resources(priv, dma_conf);
6072 kfree(dma_conf);
6073 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
6074 return ret;
6075 }
6076
6077 kfree(dma_conf);
6078
6079 stmmac_set_rx_mode(dev);
6080 }
6081
6082 WRITE_ONCE(dev->mtu, mtu);
6083 netdev_update_features(dev);
6084
6085 return 0;
6086 }
6087
stmmac_fix_features(struct net_device * dev,netdev_features_t features)6088 static netdev_features_t stmmac_fix_features(struct net_device *dev,
6089 netdev_features_t features)
6090 {
6091 struct stmmac_priv *priv = netdev_priv(dev);
6092
6093 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
6094 features &= ~NETIF_F_RXCSUM;
6095
6096 if (!priv->plat->tx_coe)
6097 features &= ~NETIF_F_CSUM_MASK;
6098
6099 /* Some GMAC devices have a bugged Jumbo frame support that
6100 * needs to have the Tx COE disabled for oversized frames
6101 * (due to limited buffer sizes). In this case we disable
6102 * the TX csum insertion in the TDES and not use SF.
6103 */
6104 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6105 features &= ~NETIF_F_CSUM_MASK;
6106
6107 /* Disable tso if asked by ethtool */
6108 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
6109 if (features & NETIF_F_TSO)
6110 priv->tso = true;
6111 else
6112 priv->tso = false;
6113 }
6114
6115 return features;
6116 }
6117
stmmac_set_features(struct net_device * netdev,netdev_features_t features)6118 static int stmmac_set_features(struct net_device *netdev,
6119 netdev_features_t features)
6120 {
6121 struct stmmac_priv *priv = netdev_priv(netdev);
6122
6123 /* Keep the COE Type in case of csum is supporting */
6124 if (features & NETIF_F_RXCSUM)
6125 priv->hw->rx_csum = priv->plat->rx_coe;
6126 else
6127 priv->hw->rx_csum = 0;
6128 /* No check needed because rx_coe has been set before and it will be
6129 * fixed in case of issue.
6130 */
6131 stmmac_rx_ipc(priv, priv->hw);
6132
6133 if (priv->sph_capable) {
6134 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
6135 u32 chan;
6136
6137 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6138 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6139 }
6140
6141 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6142 priv->hw->hw_vlan_en = true;
6143 else
6144 priv->hw->hw_vlan_en = false;
6145
6146 phylink_rx_clk_stop_block(priv->phylink);
6147 stmmac_set_hw_vlan_mode(priv, priv->hw);
6148 phylink_rx_clk_stop_unblock(priv->phylink);
6149
6150 return 0;
6151 }
6152
stmmac_common_interrupt(struct stmmac_priv * priv)6153 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6154 {
6155 u32 rx_cnt = priv->plat->rx_queues_to_use;
6156 u32 tx_cnt = priv->plat->tx_queues_to_use;
6157 u32 queues_count;
6158 u32 queue;
6159 bool xmac;
6160
6161 xmac = dwmac_is_xmac(priv->plat->core_type);
6162 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6163
6164 if (priv->irq_wake)
6165 pm_wakeup_event(priv->device, 0);
6166
6167 if (priv->dma_cap.estsel)
6168 stmmac_est_irq_status(priv, priv, priv->dev,
6169 &priv->xstats, tx_cnt);
6170
6171 if (stmmac_fpe_supported(priv))
6172 stmmac_fpe_irq_status(priv);
6173
6174 /* To handle GMAC own interrupts */
6175 if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
6176 int status = stmmac_host_irq_status(priv, &priv->xstats);
6177
6178 if (unlikely(status)) {
6179 /* For LPI we need to save the tx status */
6180 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6181 priv->tx_path_in_lpi_mode = true;
6182 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6183 priv->tx_path_in_lpi_mode = false;
6184 }
6185
6186 for (queue = 0; queue < queues_count; queue++)
6187 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6188
6189 stmmac_timestamp_interrupt(priv, priv);
6190 }
6191 }
6192
6193 /**
6194 * stmmac_interrupt - main ISR
6195 * @irq: interrupt number.
6196 * @dev_id: to pass the net device pointer.
6197 * Description: this is the main driver interrupt service routine.
6198 * It can call:
6199 * o DMA service routine (to manage incoming frame reception and transmission
6200 * status)
6201 * o Core interrupts to manage: remote wake-up, management counter, LPI
6202 * interrupts.
6203 */
stmmac_interrupt(int irq,void * dev_id)6204 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6205 {
6206 struct net_device *dev = (struct net_device *)dev_id;
6207 struct stmmac_priv *priv = netdev_priv(dev);
6208
6209 /* Check if adapter is up */
6210 if (test_bit(STMMAC_DOWN, &priv->state))
6211 return IRQ_HANDLED;
6212
6213 /* Check ASP error if it isn't delivered via an individual IRQ */
6214 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6215 return IRQ_HANDLED;
6216
6217 /* To handle Common interrupts */
6218 stmmac_common_interrupt(priv);
6219
6220 /* To handle DMA interrupts */
6221 stmmac_dma_interrupt(priv);
6222
6223 return IRQ_HANDLED;
6224 }
6225
stmmac_mac_interrupt(int irq,void * dev_id)6226 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6227 {
6228 struct net_device *dev = (struct net_device *)dev_id;
6229 struct stmmac_priv *priv = netdev_priv(dev);
6230
6231 /* Check if adapter is up */
6232 if (test_bit(STMMAC_DOWN, &priv->state))
6233 return IRQ_HANDLED;
6234
6235 /* To handle Common interrupts */
6236 stmmac_common_interrupt(priv);
6237
6238 return IRQ_HANDLED;
6239 }
6240
stmmac_safety_interrupt(int irq,void * dev_id)6241 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6242 {
6243 struct net_device *dev = (struct net_device *)dev_id;
6244 struct stmmac_priv *priv = netdev_priv(dev);
6245
6246 /* Check if adapter is up */
6247 if (test_bit(STMMAC_DOWN, &priv->state))
6248 return IRQ_HANDLED;
6249
6250 /* Check if a fatal error happened */
6251 stmmac_safety_feat_interrupt(priv);
6252
6253 return IRQ_HANDLED;
6254 }
6255
stmmac_msi_intr_tx(int irq,void * data)6256 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6257 {
6258 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6259 struct stmmac_dma_conf *dma_conf;
6260 int chan = tx_q->queue_index;
6261 struct stmmac_priv *priv;
6262 int status;
6263
6264 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6265 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6266
6267 /* Check if adapter is up */
6268 if (test_bit(STMMAC_DOWN, &priv->state))
6269 return IRQ_HANDLED;
6270
6271 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6272
6273 if (unlikely(status & tx_hard_error_bump_tc)) {
6274 /* Try to bump up the dma threshold on this failure */
6275 stmmac_bump_dma_threshold(priv, chan);
6276 } else if (unlikely(status == tx_hard_error)) {
6277 stmmac_tx_err(priv, chan);
6278 }
6279
6280 return IRQ_HANDLED;
6281 }
6282
stmmac_msi_intr_rx(int irq,void * data)6283 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6284 {
6285 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6286 struct stmmac_dma_conf *dma_conf;
6287 int chan = rx_q->queue_index;
6288 struct stmmac_priv *priv;
6289
6290 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6291 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6292
6293 /* Check if adapter is up */
6294 if (test_bit(STMMAC_DOWN, &priv->state))
6295 return IRQ_HANDLED;
6296
6297 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6298
6299 return IRQ_HANDLED;
6300 }
6301
6302 /**
6303 * stmmac_ioctl - Entry point for the Ioctl
6304 * @dev: Device pointer.
6305 * @rq: An IOCTL specific structure, that can contain a pointer to
6306 * a proprietary structure used to pass information to the driver.
6307 * @cmd: IOCTL command
6308 * Description:
6309 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6310 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6311 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6312 {
6313 struct stmmac_priv *priv = netdev_priv (dev);
6314 int ret = -EOPNOTSUPP;
6315
6316 if (!netif_running(dev))
6317 return -EINVAL;
6318
6319 switch (cmd) {
6320 case SIOCGMIIPHY:
6321 case SIOCGMIIREG:
6322 case SIOCSMIIREG:
6323 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6324 break;
6325 default:
6326 break;
6327 }
6328
6329 return ret;
6330 }
6331
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6332 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6333 void *cb_priv)
6334 {
6335 struct stmmac_priv *priv = cb_priv;
6336 int ret = -EOPNOTSUPP;
6337
6338 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6339 return ret;
6340
6341 __stmmac_disable_all_queues(priv);
6342
6343 switch (type) {
6344 case TC_SETUP_CLSU32:
6345 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6346 break;
6347 case TC_SETUP_CLSFLOWER:
6348 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6349 break;
6350 default:
6351 break;
6352 }
6353
6354 stmmac_enable_all_queues(priv);
6355 return ret;
6356 }
6357
6358 static LIST_HEAD(stmmac_block_cb_list);
6359
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6360 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6361 void *type_data)
6362 {
6363 struct stmmac_priv *priv = netdev_priv(ndev);
6364
6365 switch (type) {
6366 case TC_QUERY_CAPS:
6367 return stmmac_tc_query_caps(priv, priv, type_data);
6368 case TC_SETUP_QDISC_MQPRIO:
6369 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6370 case TC_SETUP_BLOCK:
6371 return flow_block_cb_setup_simple(type_data,
6372 &stmmac_block_cb_list,
6373 stmmac_setup_tc_block_cb,
6374 priv, priv, true);
6375 case TC_SETUP_QDISC_CBS:
6376 return stmmac_tc_setup_cbs(priv, priv, type_data);
6377 case TC_SETUP_QDISC_TAPRIO:
6378 return stmmac_tc_setup_taprio(priv, priv, type_data);
6379 case TC_SETUP_QDISC_ETF:
6380 return stmmac_tc_setup_etf(priv, priv, type_data);
6381 default:
6382 return -EOPNOTSUPP;
6383 }
6384 }
6385
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6386 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6387 struct net_device *sb_dev)
6388 {
6389 int gso = skb_shinfo(skb)->gso_type;
6390
6391 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6392 /*
6393 * There is no way to determine the number of TSO/USO
6394 * capable Queues. Let's use always the Queue 0
6395 * because if TSO/USO is supported then at least this
6396 * one will be capable.
6397 */
6398 return 0;
6399 }
6400
6401 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6402 }
6403
stmmac_set_mac_address(struct net_device * ndev,void * addr)6404 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6405 {
6406 struct stmmac_priv *priv = netdev_priv(ndev);
6407 int ret = 0;
6408
6409 ret = pm_runtime_resume_and_get(priv->device);
6410 if (ret < 0)
6411 return ret;
6412
6413 ret = eth_mac_addr(ndev, addr);
6414 if (ret)
6415 goto set_mac_error;
6416
6417 phylink_rx_clk_stop_block(priv->phylink);
6418 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6419 phylink_rx_clk_stop_unblock(priv->phylink);
6420
6421 set_mac_error:
6422 pm_runtime_put(priv->device);
6423
6424 return ret;
6425 }
6426
6427 #ifdef CONFIG_DEBUG_FS
6428 static struct dentry *stmmac_fs_dir;
6429
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6430 static void sysfs_display_ring(void *head, int size, int extend_desc,
6431 struct seq_file *seq, dma_addr_t dma_phy_addr)
6432 {
6433 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6434 struct dma_desc *p = (struct dma_desc *)head;
6435 unsigned int desc_size;
6436 dma_addr_t dma_addr;
6437 int i;
6438
6439 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6440 for (i = 0; i < size; i++) {
6441 dma_addr = dma_phy_addr + i * desc_size;
6442 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6443 i, &dma_addr,
6444 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6445 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6446 if (extend_desc)
6447 p = &(++ep)->basic;
6448 else
6449 p++;
6450 }
6451 }
6452
stmmac_rings_status_show(struct seq_file * seq,void * v)6453 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6454 {
6455 struct net_device *dev = seq->private;
6456 struct stmmac_priv *priv = netdev_priv(dev);
6457 u32 rx_count = priv->plat->rx_queues_to_use;
6458 u32 tx_count = priv->plat->tx_queues_to_use;
6459 u32 queue;
6460
6461 if ((dev->flags & IFF_UP) == 0)
6462 return 0;
6463
6464 for (queue = 0; queue < rx_count; queue++) {
6465 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6466
6467 seq_printf(seq, "RX Queue %d:\n", queue);
6468
6469 if (priv->extend_desc) {
6470 seq_printf(seq, "Extended descriptor ring:\n");
6471 sysfs_display_ring((void *)rx_q->dma_erx,
6472 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6473 } else {
6474 seq_printf(seq, "Descriptor ring:\n");
6475 sysfs_display_ring((void *)rx_q->dma_rx,
6476 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6477 }
6478 }
6479
6480 for (queue = 0; queue < tx_count; queue++) {
6481 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6482
6483 seq_printf(seq, "TX Queue %d:\n", queue);
6484
6485 if (priv->extend_desc) {
6486 seq_printf(seq, "Extended descriptor ring:\n");
6487 sysfs_display_ring((void *)tx_q->dma_etx,
6488 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6489 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6490 seq_printf(seq, "Descriptor ring:\n");
6491 sysfs_display_ring((void *)tx_q->dma_tx,
6492 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6493 }
6494 }
6495
6496 return 0;
6497 }
6498 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6499
stmmac_dma_cap_show(struct seq_file * seq,void * v)6500 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6501 {
6502 static const char * const dwxgmac_timestamp_source[] = {
6503 "None",
6504 "Internal",
6505 "External",
6506 "Both",
6507 };
6508 static const char * const dwxgmac_safety_feature_desc[] = {
6509 "No",
6510 "All Safety Features with ECC and Parity",
6511 "All Safety Features without ECC or Parity",
6512 "All Safety Features with Parity Only",
6513 "ECC Only",
6514 "UNDEFINED",
6515 "UNDEFINED",
6516 "UNDEFINED",
6517 };
6518 struct net_device *dev = seq->private;
6519 struct stmmac_priv *priv = netdev_priv(dev);
6520
6521 if (!priv->hw_cap_support) {
6522 seq_printf(seq, "DMA HW features not supported\n");
6523 return 0;
6524 }
6525
6526 seq_printf(seq, "==============================\n");
6527 seq_printf(seq, "\tDMA HW features\n");
6528 seq_printf(seq, "==============================\n");
6529
6530 seq_printf(seq, "\t10/100 Mbps: %s\n",
6531 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6532 seq_printf(seq, "\t1000 Mbps: %s\n",
6533 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6534 seq_printf(seq, "\tHalf duplex: %s\n",
6535 (priv->dma_cap.half_duplex) ? "Y" : "N");
6536 if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
6537 seq_printf(seq,
6538 "\tNumber of Additional MAC address registers: %d\n",
6539 priv->dma_cap.multi_addr);
6540 } else {
6541 seq_printf(seq, "\tHash Filter: %s\n",
6542 (priv->dma_cap.hash_filter) ? "Y" : "N");
6543 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6544 (priv->dma_cap.multi_addr) ? "Y" : "N");
6545 }
6546 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6547 (priv->dma_cap.pcs) ? "Y" : "N");
6548 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6549 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6550 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6551 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6552 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6553 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6554 seq_printf(seq, "\tRMON module: %s\n",
6555 (priv->dma_cap.rmon) ? "Y" : "N");
6556 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6557 (priv->dma_cap.time_stamp) ? "Y" : "N");
6558 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6559 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6560 if (priv->plat->core_type == DWMAC_CORE_XGMAC)
6561 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6562 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6563 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6564 (priv->dma_cap.eee) ? "Y" : "N");
6565 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6566 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6567 (priv->dma_cap.tx_coe) ? "Y" : "N");
6568 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6569 priv->plat->core_type == DWMAC_CORE_XGMAC) {
6570 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6571 (priv->dma_cap.rx_coe) ? "Y" : "N");
6572 } else {
6573 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6574 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6575 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6576 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6577 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6578 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6579 }
6580 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6581 priv->dma_cap.number_rx_channel);
6582 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6583 priv->dma_cap.number_tx_channel);
6584 seq_printf(seq, "\tNumber of Additional RX queues: %d\n",
6585 priv->dma_cap.number_rx_queues);
6586 seq_printf(seq, "\tNumber of Additional TX queues: %d\n",
6587 priv->dma_cap.number_tx_queues);
6588 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6589 (priv->dma_cap.enh_desc) ? "Y" : "N");
6590 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6591 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6592 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6593 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6594 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6595 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6596 priv->dma_cap.pps_out_num);
6597 seq_printf(seq, "\tSafety Features: %s\n",
6598 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6599 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6600 priv->dma_cap.frpsel ? "Y" : "N");
6601 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6602 priv->dma_cap.host_dma_width);
6603 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6604 priv->dma_cap.rssen ? "Y" : "N");
6605 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6606 priv->dma_cap.vlhash ? "Y" : "N");
6607 seq_printf(seq, "\tSplit Header: %s\n",
6608 priv->dma_cap.sphen ? "Y" : "N");
6609 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6610 priv->dma_cap.vlins ? "Y" : "N");
6611 seq_printf(seq, "\tDouble VLAN: %s\n",
6612 priv->dma_cap.dvlan ? "Y" : "N");
6613 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6614 priv->dma_cap.l3l4fnum);
6615 seq_printf(seq, "\tARP Offloading: %s\n",
6616 priv->dma_cap.arpoffsel ? "Y" : "N");
6617 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6618 priv->dma_cap.estsel ? "Y" : "N");
6619 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6620 priv->dma_cap.fpesel ? "Y" : "N");
6621 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6622 priv->dma_cap.tbssel ? "Y" : "N");
6623 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6624 priv->dma_cap.tbs_ch_num);
6625 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6626 priv->dma_cap.sgfsel ? "Y" : "N");
6627 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6628 BIT(priv->dma_cap.ttsfd) >> 1);
6629 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6630 priv->dma_cap.numtc);
6631 seq_printf(seq, "\tDCB Feature: %s\n",
6632 priv->dma_cap.dcben ? "Y" : "N");
6633 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6634 priv->dma_cap.advthword ? "Y" : "N");
6635 seq_printf(seq, "\tPTP Offload: %s\n",
6636 priv->dma_cap.ptoen ? "Y" : "N");
6637 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6638 priv->dma_cap.osten ? "Y" : "N");
6639 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6640 priv->dma_cap.pfcen ? "Y" : "N");
6641 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6642 BIT(priv->dma_cap.frpes) << 6);
6643 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6644 BIT(priv->dma_cap.frpbs) << 6);
6645 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6646 priv->dma_cap.frppipe_num);
6647 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6648 priv->dma_cap.nrvf_num ?
6649 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6650 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6651 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6652 seq_printf(seq, "\tDepth of GCL: %lu\n",
6653 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6654 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6655 priv->dma_cap.cbtisel ? "Y" : "N");
6656 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6657 priv->dma_cap.aux_snapshot_n);
6658 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6659 priv->dma_cap.pou_ost_en ? "Y" : "N");
6660 seq_printf(seq, "\tEnhanced DMA: %s\n",
6661 priv->dma_cap.edma ? "Y" : "N");
6662 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6663 priv->dma_cap.ediffc ? "Y" : "N");
6664 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6665 priv->dma_cap.vxn ? "Y" : "N");
6666 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6667 priv->dma_cap.dbgmem ? "Y" : "N");
6668 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6669 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6670 return 0;
6671 }
6672 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6673
6674 /* Use network device events to rename debugfs file entries.
6675 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6676 static int stmmac_device_event(struct notifier_block *unused,
6677 unsigned long event, void *ptr)
6678 {
6679 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6680 struct stmmac_priv *priv = netdev_priv(dev);
6681
6682 if (dev->netdev_ops != &stmmac_netdev_ops)
6683 goto done;
6684
6685 switch (event) {
6686 case NETDEV_CHANGENAME:
6687 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6688 break;
6689 }
6690 done:
6691 return NOTIFY_DONE;
6692 }
6693
6694 static struct notifier_block stmmac_notifier = {
6695 .notifier_call = stmmac_device_event,
6696 };
6697
stmmac_init_fs(struct net_device * dev)6698 static void stmmac_init_fs(struct net_device *dev)
6699 {
6700 struct stmmac_priv *priv = netdev_priv(dev);
6701
6702 rtnl_lock();
6703
6704 /* Create per netdev entries */
6705 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6706
6707 /* Entry to report DMA RX/TX rings */
6708 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6709 &stmmac_rings_status_fops);
6710
6711 /* Entry to report the DMA HW features */
6712 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6713 &stmmac_dma_cap_fops);
6714
6715 rtnl_unlock();
6716 }
6717
stmmac_exit_fs(struct net_device * dev)6718 static void stmmac_exit_fs(struct net_device *dev)
6719 {
6720 struct stmmac_priv *priv = netdev_priv(dev);
6721
6722 debugfs_remove_recursive(priv->dbgfs_dir);
6723 }
6724 #endif /* CONFIG_DEBUG_FS */
6725
stmmac_vid_crc32_le(__le16 vid_le)6726 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6727 {
6728 unsigned char *data = (unsigned char *)&vid_le;
6729 unsigned char data_byte = 0;
6730 u32 crc = ~0x0;
6731 u32 temp = 0;
6732 int i, bits;
6733
6734 bits = get_bitmask_order(VLAN_VID_MASK);
6735 for (i = 0; i < bits; i++) {
6736 if ((i % 8) == 0)
6737 data_byte = data[i / 8];
6738
6739 temp = ((crc & 1) ^ data_byte) & 1;
6740 crc >>= 1;
6741 data_byte >>= 1;
6742
6743 if (temp)
6744 crc ^= 0xedb88320;
6745 }
6746
6747 return crc;
6748 }
6749
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6750 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6751 {
6752 u32 crc, hash = 0;
6753 u16 pmatch = 0;
6754 int count = 0;
6755 u16 vid = 0;
6756
6757 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6758 __le16 vid_le = cpu_to_le16(vid);
6759 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6760 hash |= (1 << crc);
6761 count++;
6762 }
6763
6764 if (!priv->dma_cap.vlhash) {
6765 if (count > 2) /* VID = 0 always passes filter */
6766 return -EOPNOTSUPP;
6767
6768 pmatch = vid;
6769 hash = 0;
6770 }
6771
6772 if (!netif_running(priv->dev))
6773 return 0;
6774
6775 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6776 }
6777
6778 /* FIXME: This may need RXC to be running, but it may be called with BH
6779 * disabled, which means we can't call phylink_rx_clk_stop*().
6780 */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6781 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6782 {
6783 struct stmmac_priv *priv = netdev_priv(ndev);
6784 unsigned int num_double_vlans;
6785 bool is_double = false;
6786 int ret;
6787
6788 ret = pm_runtime_resume_and_get(priv->device);
6789 if (ret < 0)
6790 return ret;
6791
6792 if (be16_to_cpu(proto) == ETH_P_8021AD)
6793 is_double = true;
6794
6795 set_bit(vid, priv->active_vlans);
6796 num_double_vlans = priv->num_double_vlans + is_double;
6797 ret = stmmac_vlan_update(priv, num_double_vlans);
6798 if (ret) {
6799 clear_bit(vid, priv->active_vlans);
6800 goto err_pm_put;
6801 }
6802
6803 if (priv->hw->num_vlan) {
6804 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6805 if (ret) {
6806 clear_bit(vid, priv->active_vlans);
6807 stmmac_vlan_update(priv, priv->num_double_vlans);
6808 goto err_pm_put;
6809 }
6810 }
6811
6812 priv->num_double_vlans = num_double_vlans;
6813
6814 err_pm_put:
6815 pm_runtime_put(priv->device);
6816
6817 return ret;
6818 }
6819
6820 /* FIXME: This may need RXC to be running, but it may be called with BH
6821 * disabled, which means we can't call phylink_rx_clk_stop*().
6822 */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6823 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6824 {
6825 struct stmmac_priv *priv = netdev_priv(ndev);
6826 unsigned int num_double_vlans;
6827 bool is_double = false;
6828 int ret;
6829
6830 ret = pm_runtime_resume_and_get(priv->device);
6831 if (ret < 0)
6832 return ret;
6833
6834 if (be16_to_cpu(proto) == ETH_P_8021AD)
6835 is_double = true;
6836
6837 clear_bit(vid, priv->active_vlans);
6838 num_double_vlans = priv->num_double_vlans - is_double;
6839 ret = stmmac_vlan_update(priv, num_double_vlans);
6840 if (ret) {
6841 set_bit(vid, priv->active_vlans);
6842 goto del_vlan_error;
6843 }
6844
6845 if (priv->hw->num_vlan) {
6846 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6847 if (ret) {
6848 set_bit(vid, priv->active_vlans);
6849 stmmac_vlan_update(priv, priv->num_double_vlans);
6850 goto del_vlan_error;
6851 }
6852 }
6853
6854 priv->num_double_vlans = num_double_vlans;
6855
6856 del_vlan_error:
6857 pm_runtime_put(priv->device);
6858
6859 return ret;
6860 }
6861
stmmac_vlan_restore(struct stmmac_priv * priv)6862 static void stmmac_vlan_restore(struct stmmac_priv *priv)
6863 {
6864 if (!(priv->dev->features & NETIF_F_VLAN_FEATURES))
6865 return;
6866
6867 if (priv->hw->num_vlan)
6868 stmmac_restore_hw_vlan_rx_fltr(priv, priv->dev, priv->hw);
6869
6870 stmmac_vlan_update(priv, priv->num_double_vlans);
6871 }
6872
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6873 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6874 {
6875 struct stmmac_priv *priv = netdev_priv(dev);
6876
6877 switch (bpf->command) {
6878 case XDP_SETUP_PROG:
6879 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6880 case XDP_SETUP_XSK_POOL:
6881 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6882 bpf->xsk.queue_id);
6883 default:
6884 return -EOPNOTSUPP;
6885 }
6886 }
6887
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6888 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6889 struct xdp_frame **frames, u32 flags)
6890 {
6891 struct stmmac_priv *priv = netdev_priv(dev);
6892 int cpu = smp_processor_id();
6893 struct netdev_queue *nq;
6894 int i, nxmit = 0;
6895 int queue;
6896
6897 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6898 return -ENETDOWN;
6899
6900 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6901 return -EINVAL;
6902
6903 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6904 nq = netdev_get_tx_queue(priv->dev, queue);
6905
6906 __netif_tx_lock(nq, cpu);
6907 /* Avoids TX time-out as we are sharing with slow path */
6908 txq_trans_cond_update(nq);
6909
6910 for (i = 0; i < num_frames; i++) {
6911 int res;
6912
6913 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6914 if (res == STMMAC_XDP_CONSUMED)
6915 break;
6916
6917 nxmit++;
6918 }
6919
6920 if (flags & XDP_XMIT_FLUSH) {
6921 stmmac_flush_tx_descriptors(priv, queue);
6922 stmmac_tx_timer_arm(priv, queue);
6923 }
6924
6925 __netif_tx_unlock(nq);
6926
6927 return nxmit;
6928 }
6929
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6930 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6931 {
6932 struct stmmac_channel *ch = &priv->channel[queue];
6933 unsigned long flags;
6934
6935 spin_lock_irqsave(&ch->lock, flags);
6936 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6937 spin_unlock_irqrestore(&ch->lock, flags);
6938
6939 stmmac_stop_rx_dma(priv, queue);
6940 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6941 }
6942
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)6943 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
6944 {
6945 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6946 struct stmmac_channel *ch = &priv->channel[queue];
6947 unsigned long flags;
6948 u32 buf_size;
6949 int ret;
6950
6951 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6952 if (ret) {
6953 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
6954 return;
6955 }
6956
6957 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
6958 if (ret) {
6959 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
6960 netdev_err(priv->dev, "Failed to init RX desc.\n");
6961 return;
6962 }
6963
6964 stmmac_reset_rx_queue(priv, queue);
6965 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
6966
6967 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
6968 rx_q->dma_rx_phy, rx_q->queue_index);
6969
6970 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num *
6971 sizeof(struct dma_desc));
6972 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
6973 rx_q->rx_tail_addr, rx_q->queue_index);
6974
6975 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
6976 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
6977 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6978 buf_size,
6979 rx_q->queue_index);
6980 } else {
6981 stmmac_set_dma_bfsize(priv, priv->ioaddr,
6982 priv->dma_conf.dma_buf_sz,
6983 rx_q->queue_index);
6984 }
6985
6986 stmmac_start_rx_dma(priv, queue);
6987
6988 spin_lock_irqsave(&ch->lock, flags);
6989 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6990 spin_unlock_irqrestore(&ch->lock, flags);
6991 }
6992
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)6993 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
6994 {
6995 struct stmmac_channel *ch = &priv->channel[queue];
6996 unsigned long flags;
6997
6998 spin_lock_irqsave(&ch->lock, flags);
6999 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
7000 spin_unlock_irqrestore(&ch->lock, flags);
7001
7002 stmmac_stop_tx_dma(priv, queue);
7003 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7004 }
7005
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)7006 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
7007 {
7008 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7009 struct stmmac_channel *ch = &priv->channel[queue];
7010 unsigned long flags;
7011 int ret;
7012
7013 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7014 if (ret) {
7015 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
7016 return;
7017 }
7018
7019 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
7020 if (ret) {
7021 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7022 netdev_err(priv->dev, "Failed to init TX desc.\n");
7023 return;
7024 }
7025
7026 stmmac_reset_tx_queue(priv, queue);
7027 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
7028
7029 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7030 tx_q->dma_tx_phy, tx_q->queue_index);
7031
7032 if (tx_q->tbs & STMMAC_TBS_AVAIL)
7033 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index);
7034
7035 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7036 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7037 tx_q->tx_tail_addr, tx_q->queue_index);
7038
7039 stmmac_start_tx_dma(priv, queue);
7040
7041 spin_lock_irqsave(&ch->lock, flags);
7042 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
7043 spin_unlock_irqrestore(&ch->lock, flags);
7044 }
7045
stmmac_xdp_release(struct net_device * dev)7046 void stmmac_xdp_release(struct net_device *dev)
7047 {
7048 struct stmmac_priv *priv = netdev_priv(dev);
7049 u32 chan;
7050
7051 /* Ensure tx function is not running */
7052 netif_tx_disable(dev);
7053
7054 /* Disable NAPI process */
7055 stmmac_disable_all_queues(priv);
7056
7057 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7058 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7059
7060 /* Free the IRQ lines */
7061 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
7062
7063 /* Stop TX/RX DMA channels */
7064 stmmac_stop_all_dma(priv);
7065
7066 /* Release and free the Rx/Tx resources */
7067 free_dma_desc_resources(priv, &priv->dma_conf);
7068
7069 /* Disable the MAC Rx/Tx */
7070 stmmac_mac_set(priv, priv->ioaddr, false);
7071
7072 /* set trans_start so we don't get spurious
7073 * watchdogs during reset
7074 */
7075 netif_trans_update(dev);
7076 netif_carrier_off(dev);
7077 }
7078
stmmac_xdp_open(struct net_device * dev)7079 int stmmac_xdp_open(struct net_device *dev)
7080 {
7081 struct stmmac_priv *priv = netdev_priv(dev);
7082 u32 rx_cnt = priv->plat->rx_queues_to_use;
7083 u32 tx_cnt = priv->plat->tx_queues_to_use;
7084 u32 dma_csr_ch = max(rx_cnt, tx_cnt);
7085 struct stmmac_rx_queue *rx_q;
7086 struct stmmac_tx_queue *tx_q;
7087 u32 buf_size;
7088 bool sph_en;
7089 u32 chan;
7090 int ret;
7091
7092 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
7093 if (ret < 0) {
7094 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
7095 __func__);
7096 goto dma_desc_error;
7097 }
7098
7099 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
7100 if (ret < 0) {
7101 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
7102 __func__);
7103 goto init_error;
7104 }
7105
7106 stmmac_reset_queues_param(priv);
7107
7108 /* DMA CSR Channel configuration */
7109 for (chan = 0; chan < dma_csr_ch; chan++) {
7110 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
7111 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
7112 }
7113
7114 /* Adjust Split header */
7115 sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
7116
7117 /* DMA RX Channel Configuration */
7118 for (chan = 0; chan < rx_cnt; chan++) {
7119 rx_q = &priv->dma_conf.rx_queue[chan];
7120
7121 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7122 rx_q->dma_rx_phy, chan);
7123
7124 rx_q->rx_tail_addr = rx_q->dma_rx_phy +
7125 (rx_q->buf_alloc_num *
7126 sizeof(struct dma_desc));
7127 stmmac_set_rx_tail_ptr(priv, priv->ioaddr,
7128 rx_q->rx_tail_addr, chan);
7129
7130 if (rx_q->xsk_pool && rx_q->buf_alloc_num) {
7131 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
7132 stmmac_set_dma_bfsize(priv, priv->ioaddr,
7133 buf_size,
7134 rx_q->queue_index);
7135 } else {
7136 stmmac_set_dma_bfsize(priv, priv->ioaddr,
7137 priv->dma_conf.dma_buf_sz,
7138 rx_q->queue_index);
7139 }
7140
7141 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7142 }
7143
7144 /* DMA TX Channel Configuration */
7145 for (chan = 0; chan < tx_cnt; chan++) {
7146 tx_q = &priv->dma_conf.tx_queue[chan];
7147
7148 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7149 tx_q->dma_tx_phy, chan);
7150
7151 tx_q->tx_tail_addr = tx_q->dma_tx_phy;
7152 stmmac_set_tx_tail_ptr(priv, priv->ioaddr,
7153 tx_q->tx_tail_addr, chan);
7154
7155 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7156 }
7157
7158 /* Enable the MAC Rx/Tx */
7159 stmmac_mac_set(priv, priv->ioaddr, true);
7160
7161 /* Start Rx & Tx DMA Channels */
7162 stmmac_start_all_dma(priv);
7163
7164 ret = stmmac_request_irq(dev);
7165 if (ret)
7166 goto irq_error;
7167
7168 /* Enable NAPI process*/
7169 stmmac_enable_all_queues(priv);
7170 netif_carrier_on(dev);
7171 netif_tx_start_all_queues(dev);
7172 stmmac_enable_all_dma_irq(priv);
7173
7174 return 0;
7175
7176 irq_error:
7177 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7178 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7179
7180 init_error:
7181 free_dma_desc_resources(priv, &priv->dma_conf);
7182 dma_desc_error:
7183 return ret;
7184 }
7185
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7186 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7187 {
7188 struct stmmac_priv *priv = netdev_priv(dev);
7189 struct stmmac_rx_queue *rx_q;
7190 struct stmmac_tx_queue *tx_q;
7191 struct stmmac_channel *ch;
7192
7193 if (test_bit(STMMAC_DOWN, &priv->state) ||
7194 !netif_carrier_ok(priv->dev))
7195 return -ENETDOWN;
7196
7197 if (!stmmac_xdp_is_enabled(priv))
7198 return -EINVAL;
7199
7200 if (queue >= priv->plat->rx_queues_to_use ||
7201 queue >= priv->plat->tx_queues_to_use)
7202 return -EINVAL;
7203
7204 rx_q = &priv->dma_conf.rx_queue[queue];
7205 tx_q = &priv->dma_conf.tx_queue[queue];
7206 ch = &priv->channel[queue];
7207
7208 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7209 return -EINVAL;
7210
7211 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7212 /* EQoS does not have per-DMA channel SW interrupt,
7213 * so we schedule RX Napi straight-away.
7214 */
7215 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7216 __napi_schedule(&ch->rxtx_napi);
7217 }
7218
7219 return 0;
7220 }
7221
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7222 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7223 {
7224 struct stmmac_priv *priv = netdev_priv(dev);
7225 u32 tx_cnt = priv->plat->tx_queues_to_use;
7226 u32 rx_cnt = priv->plat->rx_queues_to_use;
7227 unsigned int start;
7228 int q;
7229
7230 for (q = 0; q < tx_cnt; q++) {
7231 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7232 u64 tx_packets;
7233 u64 tx_bytes;
7234
7235 do {
7236 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7237 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7238 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7239 do {
7240 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7241 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7242 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7243
7244 stats->tx_packets += tx_packets;
7245 stats->tx_bytes += tx_bytes;
7246 }
7247
7248 for (q = 0; q < rx_cnt; q++) {
7249 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7250 u64 rx_packets;
7251 u64 rx_bytes;
7252
7253 do {
7254 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7255 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7256 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7257 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7258
7259 stats->rx_packets += rx_packets;
7260 stats->rx_bytes += rx_bytes;
7261 }
7262
7263 stats->rx_dropped = priv->xstats.rx_dropped;
7264 stats->rx_errors = priv->xstats.rx_errors;
7265 stats->tx_dropped = priv->xstats.tx_dropped;
7266 stats->tx_errors = priv->xstats.tx_errors;
7267 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7268 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7269 stats->rx_length_errors = priv->xstats.rx_length;
7270 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7271 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7272 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7273 }
7274
7275 static const struct net_device_ops stmmac_netdev_ops = {
7276 .ndo_open = stmmac_open,
7277 .ndo_start_xmit = stmmac_xmit,
7278 .ndo_stop = stmmac_release,
7279 .ndo_change_mtu = stmmac_change_mtu,
7280 .ndo_fix_features = stmmac_fix_features,
7281 .ndo_set_features = stmmac_set_features,
7282 .ndo_set_rx_mode = stmmac_set_rx_mode,
7283 .ndo_tx_timeout = stmmac_tx_timeout,
7284 .ndo_eth_ioctl = stmmac_ioctl,
7285 .ndo_get_stats64 = stmmac_get_stats64,
7286 .ndo_setup_tc = stmmac_setup_tc,
7287 .ndo_select_queue = stmmac_select_queue,
7288 .ndo_set_mac_address = stmmac_set_mac_address,
7289 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7290 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7291 .ndo_bpf = stmmac_bpf,
7292 .ndo_xdp_xmit = stmmac_xdp_xmit,
7293 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7294 .ndo_hwtstamp_get = stmmac_hwtstamp_get,
7295 .ndo_hwtstamp_set = stmmac_hwtstamp_set,
7296 };
7297
stmmac_reset_subtask(struct stmmac_priv * priv)7298 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7299 {
7300 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7301 return;
7302 if (test_bit(STMMAC_DOWN, &priv->state))
7303 return;
7304
7305 netdev_err(priv->dev, "Reset adapter.\n");
7306
7307 rtnl_lock();
7308 netif_trans_update(priv->dev);
7309 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7310 usleep_range(1000, 2000);
7311
7312 set_bit(STMMAC_DOWN, &priv->state);
7313 dev_close(priv->dev);
7314 dev_open(priv->dev, NULL);
7315 clear_bit(STMMAC_DOWN, &priv->state);
7316 clear_bit(STMMAC_RESETING, &priv->state);
7317 rtnl_unlock();
7318 }
7319
stmmac_service_task(struct work_struct * work)7320 static void stmmac_service_task(struct work_struct *work)
7321 {
7322 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7323 service_task);
7324
7325 stmmac_reset_subtask(priv);
7326 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7327 }
7328
stmmac_print_actphyif(struct stmmac_priv * priv)7329 static void stmmac_print_actphyif(struct stmmac_priv *priv)
7330 {
7331 const char **phyif_table;
7332 const char *actphyif_str;
7333 size_t phyif_table_size;
7334
7335 switch (priv->plat->core_type) {
7336 case DWMAC_CORE_MAC100:
7337 return;
7338
7339 case DWMAC_CORE_GMAC:
7340 case DWMAC_CORE_GMAC4:
7341 phyif_table = stmmac_dwmac_actphyif;
7342 phyif_table_size = ARRAY_SIZE(stmmac_dwmac_actphyif);
7343 break;
7344
7345 case DWMAC_CORE_XGMAC:
7346 phyif_table = stmmac_dwxgmac_phyif;
7347 phyif_table_size = ARRAY_SIZE(stmmac_dwxgmac_phyif);
7348 break;
7349 }
7350
7351 if (priv->dma_cap.actphyif < phyif_table_size)
7352 actphyif_str = phyif_table[priv->dma_cap.actphyif];
7353 else
7354 actphyif_str = NULL;
7355
7356 if (!actphyif_str)
7357 actphyif_str = "unknown";
7358
7359 dev_info(priv->device, "Active PHY interface: %s (%u)\n",
7360 actphyif_str, priv->dma_cap.actphyif);
7361 }
7362
7363 /**
7364 * stmmac_hw_init - Init the MAC device
7365 * @priv: driver private structure
7366 * Description: this function is to configure the MAC device according to
7367 * some platform parameters or the HW capability register. It prepares the
7368 * driver to use either ring or chain modes and to setup either enhanced or
7369 * normal descriptors.
7370 */
stmmac_hw_init(struct stmmac_priv * priv)7371 static int stmmac_hw_init(struct stmmac_priv *priv)
7372 {
7373 int ret;
7374
7375 /* dwmac-sun8i only work in chain mode */
7376 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7377 chain_mode = 1;
7378 priv->chain_mode = chain_mode;
7379
7380 /* Initialize HW Interface */
7381 ret = stmmac_hwif_init(priv);
7382 if (ret)
7383 return ret;
7384
7385 /* Get the HW capability (new GMAC newer than 3.50a) */
7386 priv->hw_cap_support = stmmac_get_hw_features(priv);
7387 if (priv->hw_cap_support) {
7388 dev_info(priv->device, "DMA HW capability register supported\n");
7389
7390 /* We can override some gmac/dma configuration fields: e.g.
7391 * enh_desc, tx_coe (e.g. that are passed through the
7392 * platform) with the values from the HW capability
7393 * register (if supported).
7394 */
7395 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7396 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7397 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7398 if (priv->dma_cap.hash_tb_sz) {
7399 priv->hw->multicast_filter_bins =
7400 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7401 priv->hw->mcast_bits_log2 =
7402 ilog2(priv->hw->multicast_filter_bins);
7403 }
7404
7405 /* TXCOE doesn't work in thresh DMA mode */
7406 if (priv->plat->force_thresh_dma_mode)
7407 priv->plat->tx_coe = 0;
7408 else
7409 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7410
7411 /* In case of GMAC4 rx_coe is from HW cap register. */
7412 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7413
7414 if (priv->dma_cap.rx_coe_type2)
7415 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7416 else if (priv->dma_cap.rx_coe_type1)
7417 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7418
7419 stmmac_print_actphyif(priv);
7420 } else {
7421 dev_info(priv->device, "No HW DMA feature register supported\n");
7422 }
7423
7424 if (priv->plat->rx_coe) {
7425 priv->hw->rx_csum = priv->plat->rx_coe;
7426 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7427 if (priv->synopsys_id < DWMAC_CORE_4_00)
7428 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7429 }
7430 if (priv->plat->tx_coe)
7431 dev_info(priv->device, "TX Checksum insertion supported\n");
7432
7433 if (priv->plat->pmt) {
7434 dev_info(priv->device, "Wake-Up On Lan supported\n");
7435 device_set_wakeup_capable(priv->device, 1);
7436 devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7437 }
7438
7439 if (priv->dma_cap.tsoen)
7440 dev_info(priv->device, "TSO supported\n");
7441
7442 if (priv->dma_cap.number_rx_queues &&
7443 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7444 dev_warn(priv->device,
7445 "Number of Rx queues (%u) exceeds dma capability\n",
7446 priv->plat->rx_queues_to_use);
7447 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7448 }
7449 if (priv->dma_cap.number_tx_queues &&
7450 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7451 dev_warn(priv->device,
7452 "Number of Tx queues (%u) exceeds dma capability\n",
7453 priv->plat->tx_queues_to_use);
7454 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7455 }
7456
7457 if (priv->dma_cap.rx_fifo_size &&
7458 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7459 dev_warn(priv->device,
7460 "Rx FIFO size (%u) exceeds dma capability\n",
7461 priv->plat->rx_fifo_size);
7462 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7463 }
7464 if (priv->dma_cap.tx_fifo_size &&
7465 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7466 dev_warn(priv->device,
7467 "Tx FIFO size (%u) exceeds dma capability\n",
7468 priv->plat->tx_fifo_size);
7469 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7470 }
7471
7472 priv->hw->vlan_fail_q_en =
7473 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7474 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7475
7476 /* Run HW quirks, if any */
7477 if (priv->hwif_quirks) {
7478 ret = priv->hwif_quirks(priv);
7479 if (ret)
7480 return ret;
7481 }
7482
7483 /* Rx Watchdog is available in the COREs newer than the 3.40.
7484 * In some case, for example on bugged HW this feature
7485 * has to be disable and this can be done by passing the
7486 * riwt_off field from the platform.
7487 */
7488 if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
7489 priv->plat->core_type == DWMAC_CORE_XGMAC) &&
7490 !priv->plat->riwt_off) {
7491 priv->use_riwt = 1;
7492 dev_info(priv->device,
7493 "Enable RX Mitigation via HW Watchdog Timer\n");
7494 }
7495
7496 /* Unimplemented PCS init (as indicated by stmmac_do_callback()
7497 * perversely returning -EINVAL) is non-fatal.
7498 */
7499 ret = stmmac_mac_pcs_init(priv);
7500 if (ret != -EINVAL)
7501 return ret;
7502
7503 return 0;
7504 }
7505
stmmac_napi_add(struct net_device * dev)7506 static void stmmac_napi_add(struct net_device *dev)
7507 {
7508 struct stmmac_priv *priv = netdev_priv(dev);
7509 u32 queue, maxq;
7510
7511 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7512
7513 for (queue = 0; queue < maxq; queue++) {
7514 struct stmmac_channel *ch = &priv->channel[queue];
7515
7516 ch->priv_data = priv;
7517 ch->index = queue;
7518 spin_lock_init(&ch->lock);
7519
7520 if (queue < priv->plat->rx_queues_to_use) {
7521 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7522 }
7523 if (queue < priv->plat->tx_queues_to_use) {
7524 netif_napi_add_tx(dev, &ch->tx_napi,
7525 stmmac_napi_poll_tx);
7526 }
7527 if (queue < priv->plat->rx_queues_to_use &&
7528 queue < priv->plat->tx_queues_to_use) {
7529 netif_napi_add(dev, &ch->rxtx_napi,
7530 stmmac_napi_poll_rxtx);
7531 }
7532 }
7533 }
7534
stmmac_napi_del(struct net_device * dev)7535 static void stmmac_napi_del(struct net_device *dev)
7536 {
7537 struct stmmac_priv *priv = netdev_priv(dev);
7538 u32 queue, maxq;
7539
7540 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7541
7542 for (queue = 0; queue < maxq; queue++) {
7543 struct stmmac_channel *ch = &priv->channel[queue];
7544
7545 if (queue < priv->plat->rx_queues_to_use)
7546 netif_napi_del(&ch->rx_napi);
7547 if (queue < priv->plat->tx_queues_to_use)
7548 netif_napi_del(&ch->tx_napi);
7549 if (queue < priv->plat->rx_queues_to_use &&
7550 queue < priv->plat->tx_queues_to_use) {
7551 netif_napi_del(&ch->rxtx_napi);
7552 }
7553 }
7554 }
7555
stmmac_reinit_queues(struct net_device * dev,u32 rx_cnt,u32 tx_cnt)7556 int stmmac_reinit_queues(struct net_device *dev, u32 rx_cnt, u32 tx_cnt)
7557 {
7558 struct stmmac_priv *priv = netdev_priv(dev);
7559 int ret = 0, i;
7560
7561 if (netif_running(dev))
7562 stmmac_release(dev);
7563
7564 stmmac_napi_del(dev);
7565
7566 priv->plat->rx_queues_to_use = rx_cnt;
7567 priv->plat->tx_queues_to_use = tx_cnt;
7568 if (!netif_is_rxfh_configured(dev))
7569 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7570 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7571 rx_cnt);
7572
7573 stmmac_napi_add(dev);
7574
7575 if (netif_running(dev))
7576 ret = stmmac_open(dev);
7577
7578 return ret;
7579 }
7580
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7581 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7582 {
7583 struct stmmac_priv *priv = netdev_priv(dev);
7584 int ret = 0;
7585
7586 if (netif_running(dev))
7587 stmmac_release(dev);
7588
7589 priv->dma_conf.dma_rx_size = rx_size;
7590 priv->dma_conf.dma_tx_size = tx_size;
7591
7592 if (netif_running(dev))
7593 ret = stmmac_open(dev);
7594
7595 return ret;
7596 }
7597
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7598 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7599 {
7600 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7601 struct dma_desc *desc_contains_ts = ctx->desc;
7602 struct stmmac_priv *priv = ctx->priv;
7603 struct dma_desc *ndesc = ctx->ndesc;
7604 struct dma_desc *desc = ctx->desc;
7605 u64 ns = 0;
7606
7607 if (!priv->hwts_rx_en)
7608 return -ENODATA;
7609
7610 /* For GMAC4, the valid timestamp is from CTX next desc. */
7611 if (dwmac_is_xmac(priv->plat->core_type))
7612 desc_contains_ts = ndesc;
7613
7614 /* Check if timestamp is available */
7615 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7616 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7617 ns -= priv->plat->cdc_error_adj;
7618 *timestamp = ns_to_ktime(ns);
7619 return 0;
7620 }
7621
7622 return -ENODATA;
7623 }
7624
7625 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7626 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7627 };
7628
stmmac_dl_ts_coarse_set(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7629 static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
7630 struct devlink_param_gset_ctx *ctx,
7631 struct netlink_ext_ack *extack)
7632 {
7633 struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7634 struct stmmac_priv *priv = dl_priv->stmmac_priv;
7635
7636 priv->tsfupdt_coarse = ctx->val.vbool;
7637
7638 if (priv->tsfupdt_coarse)
7639 priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
7640 else
7641 priv->systime_flags |= PTP_TCR_TSCFUPDT;
7642
7643 /* In Coarse mode, we can use a smaller subsecond increment, let's
7644 * reconfigure the systime, subsecond increment and addend.
7645 */
7646 stmmac_update_subsecond_increment(priv);
7647
7648 return 0;
7649 }
7650
stmmac_dl_ts_coarse_get(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7651 static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
7652 struct devlink_param_gset_ctx *ctx,
7653 struct netlink_ext_ack *extack)
7654 {
7655 struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7656 struct stmmac_priv *priv = dl_priv->stmmac_priv;
7657
7658 ctx->val.vbool = priv->tsfupdt_coarse;
7659
7660 return 0;
7661 }
7662
7663 static const struct devlink_param stmmac_devlink_params[] = {
7664 DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
7665 DEVLINK_PARAM_TYPE_BOOL,
7666 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
7667 stmmac_dl_ts_coarse_get,
7668 stmmac_dl_ts_coarse_set, NULL),
7669 };
7670
7671 /* None of the generic devlink parameters are implemented */
7672 static const struct devlink_ops stmmac_devlink_ops = {};
7673
stmmac_register_devlink(struct stmmac_priv * priv)7674 static int stmmac_register_devlink(struct stmmac_priv *priv)
7675 {
7676 struct stmmac_devlink_priv *dl_priv;
7677 int ret;
7678
7679 /* For now, what is exposed over devlink is only relevant when
7680 * timestamping is available and we have a valid ptp clock rate
7681 */
7682 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
7683 !priv->plat->clk_ptp_rate)
7684 return 0;
7685
7686 priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
7687 priv->device);
7688 if (!priv->devlink)
7689 return -ENOMEM;
7690
7691 dl_priv = devlink_priv(priv->devlink);
7692 dl_priv->stmmac_priv = priv;
7693
7694 ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
7695 ARRAY_SIZE(stmmac_devlink_params));
7696 if (ret)
7697 goto dl_free;
7698
7699 devlink_register(priv->devlink);
7700 return 0;
7701
7702 dl_free:
7703 devlink_free(priv->devlink);
7704
7705 return ret;
7706 }
7707
stmmac_unregister_devlink(struct stmmac_priv * priv)7708 static void stmmac_unregister_devlink(struct stmmac_priv *priv)
7709 {
7710 if (!priv->devlink)
7711 return;
7712
7713 devlink_unregister(priv->devlink);
7714 devlink_params_unregister(priv->devlink, stmmac_devlink_params,
7715 ARRAY_SIZE(stmmac_devlink_params));
7716 devlink_free(priv->devlink);
7717 }
7718
stmmac_plat_dat_alloc(struct device * dev)7719 struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
7720 {
7721 struct plat_stmmacenet_data *plat_dat;
7722 int i;
7723
7724 plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
7725 if (!plat_dat)
7726 return NULL;
7727
7728 /* Set the defaults:
7729 * - phy autodetection
7730 * - determine GMII_Address CR field from CSR clock
7731 * - allow MTU up to JUMBO_LEN
7732 * - hash table size
7733 * - one unicast filter entry
7734 */
7735 plat_dat->phy_addr = -1;
7736 plat_dat->clk_csr = -1;
7737 plat_dat->maxmtu = JUMBO_LEN;
7738 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
7739 plat_dat->unicast_filter_entries = 1;
7740
7741 /* Set the mtl defaults */
7742 plat_dat->tx_queues_to_use = 1;
7743 plat_dat->rx_queues_to_use = 1;
7744
7745 /* Setup the default RX queue channel map */
7746 for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
7747 plat_dat->rx_queues_cfg[i].chan = i;
7748
7749 return plat_dat;
7750 }
7751 EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
7752
__stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7753 static int __stmmac_dvr_probe(struct device *device,
7754 struct plat_stmmacenet_data *plat_dat,
7755 struct stmmac_resources *res)
7756 {
7757 struct net_device *ndev = NULL;
7758 struct stmmac_priv *priv;
7759 u32 rxq;
7760 int i, ret = 0;
7761
7762 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7763 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7764 if (!ndev)
7765 return -ENOMEM;
7766
7767 SET_NETDEV_DEV(ndev, device);
7768
7769 priv = netdev_priv(ndev);
7770 priv->device = device;
7771 priv->dev = ndev;
7772
7773 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7774 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7775 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7776 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7777 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7778 }
7779
7780 priv->xstats.pcpu_stats =
7781 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7782 if (!priv->xstats.pcpu_stats)
7783 return -ENOMEM;
7784
7785 stmmac_set_ethtool_ops(ndev);
7786 priv->pause_time = pause;
7787 priv->plat = plat_dat;
7788 priv->ioaddr = res->addr;
7789 priv->dev->base_addr = (unsigned long)res->addr;
7790 priv->plat->dma_cfg->multi_msi_en =
7791 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7792
7793 priv->dev->irq = res->irq;
7794 priv->wol_irq = res->wol_irq;
7795 priv->sfty_irq = res->sfty_irq;
7796 priv->sfty_ce_irq = res->sfty_ce_irq;
7797 priv->sfty_ue_irq = res->sfty_ue_irq;
7798 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7799 priv->rx_irq[i] = res->rx_irq[i];
7800 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
7801 priv->tx_irq[i] = res->tx_irq[i];
7802
7803 if (!is_zero_ether_addr(res->mac))
7804 eth_hw_addr_set(priv->dev, res->mac);
7805
7806 dev_set_drvdata(device, priv->dev);
7807
7808 /* Verify driver arguments */
7809 stmmac_verify_args();
7810
7811 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7812 if (!priv->af_xdp_zc_qps)
7813 return -ENOMEM;
7814
7815 /* Allocate workqueue */
7816 priv->wq = create_singlethread_workqueue("stmmac_wq");
7817 if (!priv->wq) {
7818 dev_err(priv->device, "failed to create workqueue\n");
7819 ret = -ENOMEM;
7820 goto error_wq_init;
7821 }
7822
7823 INIT_WORK(&priv->service_task, stmmac_service_task);
7824
7825 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7826
7827 /* Override with kernel parameters if supplied XXX CRS XXX
7828 * this needs to have multiple instances
7829 */
7830 if ((phyaddr >= 0) && (phyaddr <= 31))
7831 priv->plat->phy_addr = phyaddr;
7832
7833 if (priv->plat->stmmac_rst) {
7834 ret = reset_control_assert(priv->plat->stmmac_rst);
7835 reset_control_deassert(priv->plat->stmmac_rst);
7836 /* Some reset controllers have only reset callback instead of
7837 * assert + deassert callbacks pair.
7838 */
7839 if (ret == -ENOTSUPP)
7840 reset_control_reset(priv->plat->stmmac_rst);
7841 }
7842
7843 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7844 if (ret == -ENOTSUPP)
7845 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7846 ERR_PTR(ret));
7847
7848 /* Wait a bit for the reset to take effect */
7849 udelay(10);
7850
7851 /* Init MAC and get the capabilities */
7852 ret = stmmac_hw_init(priv);
7853 if (ret)
7854 goto error_hw_init;
7855
7856 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7857 */
7858 if (priv->synopsys_id < DWMAC_CORE_5_20)
7859 priv->plat->dma_cfg->dche = false;
7860
7861 stmmac_check_ether_addr(priv);
7862
7863 ndev->netdev_ops = &stmmac_netdev_ops;
7864
7865 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7866 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7867
7868 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7869 NETIF_F_RXCSUM;
7870 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7871 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7872
7873 ret = stmmac_tc_init(priv, priv);
7874 if (!ret) {
7875 ndev->hw_features |= NETIF_F_HW_TC;
7876 }
7877
7878 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) {
7879 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
7880 if (priv->plat->core_type == DWMAC_CORE_GMAC4)
7881 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
7882 priv->tso = true;
7883 dev_info(priv->device, "TSO feature enabled\n");
7884 }
7885
7886 if (priv->dma_cap.sphen &&
7887 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7888 ndev->hw_features |= NETIF_F_GRO;
7889 priv->sph_capable = true;
7890 priv->sph_active = priv->sph_capable;
7891 dev_info(priv->device, "SPH feature enabled\n");
7892 }
7893
7894 /* Ideally our host DMA address width is the same as for the
7895 * device. However, it may differ and then we have to use our
7896 * host DMA width for allocation and the device DMA width for
7897 * register handling.
7898 */
7899 if (priv->plat->host_dma_width)
7900 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7901 else
7902 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7903
7904 if (priv->dma_cap.host_dma_width) {
7905 ret = dma_set_mask_and_coherent(device,
7906 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7907 if (!ret) {
7908 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7909 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7910
7911 /*
7912 * If more than 32 bits can be addressed, make sure to
7913 * enable enhanced addressing mode.
7914 */
7915 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7916 priv->plat->dma_cfg->eame = true;
7917 } else {
7918 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7919 if (ret) {
7920 dev_err(priv->device, "Failed to set DMA Mask\n");
7921 goto error_hw_init;
7922 }
7923
7924 priv->dma_cap.host_dma_width = 32;
7925 }
7926 }
7927
7928 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7929 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7930 #ifdef STMMAC_VLAN_TAG_USED
7931 /* Both mac100 and gmac support receive VLAN tag detection */
7932 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7933 if (dwmac_is_xmac(priv->plat->core_type)) {
7934 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7935 priv->hw->hw_vlan_en = true;
7936 }
7937 if (priv->dma_cap.vlhash) {
7938 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7939 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7940 }
7941 if (priv->dma_cap.vlins)
7942 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7943 #endif
7944 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7945
7946 priv->xstats.threshold = tc;
7947
7948 /* Initialize RSS */
7949 rxq = priv->plat->rx_queues_to_use;
7950 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7951 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7952 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7953
7954 if (priv->dma_cap.rssen && priv->plat->rss_en)
7955 ndev->features |= NETIF_F_RXHASH;
7956
7957 ndev->vlan_features |= ndev->features;
7958
7959 /* MTU range: 46 - hw-specific max */
7960 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7961
7962 if (priv->plat->core_type == DWMAC_CORE_XGMAC)
7963 ndev->max_mtu = XGMAC_JUMBO_LEN;
7964 else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
7965 ndev->max_mtu = JUMBO_LEN;
7966 else
7967 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
7968
7969 /* Warn if the platform's maxmtu is smaller than the minimum MTU,
7970 * otherwise clamp the maximum MTU above to the platform's maxmtu.
7971 */
7972 if (priv->plat->maxmtu < ndev->min_mtu)
7973 dev_warn(priv->device,
7974 "%s: warning: maxmtu having invalid value (%d)\n",
7975 __func__, priv->plat->maxmtu);
7976 else if (priv->plat->maxmtu < ndev->max_mtu)
7977 ndev->max_mtu = priv->plat->maxmtu;
7978
7979 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
7980
7981 /* Setup channels NAPI */
7982 stmmac_napi_add(ndev);
7983
7984 mutex_init(&priv->lock);
7985
7986 stmmac_fpe_init(priv);
7987
7988 stmmac_check_pcs_mode(priv);
7989
7990 pm_runtime_get_noresume(device);
7991 pm_runtime_set_active(device);
7992 if (!pm_runtime_enabled(device))
7993 pm_runtime_enable(device);
7994
7995 ret = stmmac_mdio_register(ndev);
7996 if (ret < 0) {
7997 dev_err_probe(priv->device, ret,
7998 "MDIO bus (id: %d) registration failed\n",
7999 priv->plat->bus_id);
8000 goto error_mdio_register;
8001 }
8002
8003 ret = stmmac_pcs_setup(ndev);
8004 if (ret)
8005 goto error_pcs_setup;
8006
8007 ret = stmmac_phylink_setup(priv);
8008 if (ret) {
8009 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
8010 goto error_phy_setup;
8011 }
8012
8013 ret = stmmac_register_devlink(priv);
8014 if (ret)
8015 goto error_devlink_setup;
8016
8017 ret = register_netdev(ndev);
8018 if (ret) {
8019 dev_err(priv->device, "%s: ERROR %i registering the device\n",
8020 __func__, ret);
8021 goto error_netdev_register;
8022 }
8023
8024 #ifdef CONFIG_DEBUG_FS
8025 stmmac_init_fs(ndev);
8026 #endif
8027
8028 if (priv->plat->dump_debug_regs)
8029 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
8030
8031 /* Let pm_runtime_put() disable the clocks.
8032 * If CONFIG_PM is not enabled, the clocks will stay powered.
8033 */
8034 pm_runtime_put(device);
8035
8036 return ret;
8037
8038 error_netdev_register:
8039 stmmac_unregister_devlink(priv);
8040 error_devlink_setup:
8041 phylink_destroy(priv->phylink);
8042 error_phy_setup:
8043 stmmac_pcs_clean(ndev);
8044 error_pcs_setup:
8045 stmmac_mdio_unregister(ndev);
8046 error_mdio_register:
8047 stmmac_napi_del(ndev);
8048 error_hw_init:
8049 destroy_workqueue(priv->wq);
8050 error_wq_init:
8051 bitmap_free(priv->af_xdp_zc_qps);
8052
8053 return ret;
8054 }
8055
8056 /**
8057 * stmmac_dvr_probe
8058 * @dev: device pointer
8059 * @plat_dat: platform data pointer
8060 * @res: stmmac resource pointer
8061 * Description: this is the main probe function used to
8062 * call the alloc_etherdev, allocate the priv structure.
8063 * Return:
8064 * returns 0 on success, otherwise errno.
8065 */
stmmac_dvr_probe(struct device * dev,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)8066 int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
8067 struct stmmac_resources *res)
8068 {
8069 int ret;
8070
8071 if (plat_dat->init) {
8072 ret = plat_dat->init(dev, plat_dat->bsp_priv);
8073 if (ret)
8074 return ret;
8075 }
8076
8077 ret = __stmmac_dvr_probe(dev, plat_dat, res);
8078 if (ret && plat_dat->exit)
8079 plat_dat->exit(dev, plat_dat->bsp_priv);
8080
8081 return ret;
8082 }
8083 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
8084
8085 /**
8086 * stmmac_dvr_remove
8087 * @dev: device pointer
8088 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
8089 * changes the link status, releases the DMA descriptor rings.
8090 */
stmmac_dvr_remove(struct device * dev)8091 void stmmac_dvr_remove(struct device *dev)
8092 {
8093 struct net_device *ndev = dev_get_drvdata(dev);
8094 struct stmmac_priv *priv = netdev_priv(ndev);
8095
8096 netdev_info(priv->dev, "%s: removing driver", __func__);
8097
8098 pm_runtime_get_sync(dev);
8099
8100 unregister_netdev(ndev);
8101
8102 #ifdef CONFIG_DEBUG_FS
8103 stmmac_exit_fs(ndev);
8104 #endif
8105 stmmac_unregister_devlink(priv);
8106
8107 phylink_destroy(priv->phylink);
8108 if (priv->plat->stmmac_rst)
8109 reset_control_assert(priv->plat->stmmac_rst);
8110 reset_control_assert(priv->plat->stmmac_ahb_rst);
8111
8112 stmmac_pcs_clean(ndev);
8113 stmmac_mdio_unregister(ndev);
8114
8115 destroy_workqueue(priv->wq);
8116 mutex_destroy(&priv->lock);
8117 bitmap_free(priv->af_xdp_zc_qps);
8118
8119 pm_runtime_disable(dev);
8120 pm_runtime_put_noidle(dev);
8121
8122 if (priv->plat->exit)
8123 priv->plat->exit(dev, priv->plat->bsp_priv);
8124 }
8125 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
8126
8127 /**
8128 * stmmac_suspend - suspend callback
8129 * @dev: device pointer
8130 * Description: this is the function to suspend the device and it is called
8131 * by the platform driver to stop the network queue, release the resources,
8132 * program the PMT register (for WoL), clean and release driver resources.
8133 */
stmmac_suspend(struct device * dev)8134 int stmmac_suspend(struct device *dev)
8135 {
8136 struct net_device *ndev = dev_get_drvdata(dev);
8137 struct stmmac_priv *priv = netdev_priv(ndev);
8138 u32 chan;
8139
8140 if (!ndev || !netif_running(ndev))
8141 goto suspend_bsp;
8142
8143 mutex_lock(&priv->lock);
8144
8145 netif_device_detach(ndev);
8146
8147 stmmac_disable_all_queues(priv);
8148
8149 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
8150 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
8151
8152 if (priv->eee_sw_timer_en) {
8153 priv->tx_path_in_lpi_mode = false;
8154 timer_delete_sync(&priv->eee_ctrl_timer);
8155 }
8156
8157 /* Stop TX/RX DMA */
8158 stmmac_stop_all_dma(priv);
8159
8160 stmmac_legacy_serdes_power_down(priv);
8161
8162 /* Enable Power down mode by programming the PMT regs */
8163 if (priv->wolopts) {
8164 stmmac_pmt(priv, priv->hw, priv->wolopts);
8165 priv->irq_wake = 1;
8166 } else {
8167 stmmac_mac_set(priv, priv->ioaddr, false);
8168 pinctrl_pm_select_sleep_state(priv->device);
8169 }
8170
8171 mutex_unlock(&priv->lock);
8172
8173 rtnl_lock();
8174 phylink_suspend(priv->phylink, !!priv->wolopts);
8175 rtnl_unlock();
8176
8177 if (stmmac_fpe_supported(priv))
8178 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
8179
8180 suspend_bsp:
8181 if (priv->plat->suspend)
8182 return priv->plat->suspend(dev, priv->plat->bsp_priv);
8183
8184 return 0;
8185 }
8186 EXPORT_SYMBOL_GPL(stmmac_suspend);
8187
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)8188 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
8189 {
8190 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
8191
8192 rx_q->cur_rx = 0;
8193 rx_q->dirty_rx = 0;
8194 }
8195
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)8196 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
8197 {
8198 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
8199
8200 tx_q->cur_tx = 0;
8201 tx_q->dirty_tx = 0;
8202 tx_q->mss = 0;
8203
8204 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
8205 }
8206
8207 /**
8208 * stmmac_reset_queues_param - reset queue parameters
8209 * @priv: device pointer
8210 */
stmmac_reset_queues_param(struct stmmac_priv * priv)8211 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
8212 {
8213 u32 rx_cnt = priv->plat->rx_queues_to_use;
8214 u32 tx_cnt = priv->plat->tx_queues_to_use;
8215 u32 queue;
8216
8217 for (queue = 0; queue < rx_cnt; queue++)
8218 stmmac_reset_rx_queue(priv, queue);
8219
8220 for (queue = 0; queue < tx_cnt; queue++)
8221 stmmac_reset_tx_queue(priv, queue);
8222 }
8223
8224 /**
8225 * stmmac_resume - resume callback
8226 * @dev: device pointer
8227 * Description: when resume this function is invoked to setup the DMA and CORE
8228 * in a usable state.
8229 */
stmmac_resume(struct device * dev)8230 int stmmac_resume(struct device *dev)
8231 {
8232 struct net_device *ndev = dev_get_drvdata(dev);
8233 struct stmmac_priv *priv = netdev_priv(ndev);
8234 int ret;
8235
8236 if (priv->plat->resume) {
8237 ret = priv->plat->resume(dev, priv->plat->bsp_priv);
8238 if (ret)
8239 return ret;
8240 }
8241
8242 if (!netif_running(ndev))
8243 return 0;
8244
8245 /* Power Down bit, into the PM register, is cleared
8246 * automatically as soon as a magic packet or a Wake-up frame
8247 * is received. Anyway, it's better to manually clear
8248 * this bit because it can generate problems while resuming
8249 * from another devices (e.g. serial console).
8250 */
8251 if (priv->wolopts) {
8252 mutex_lock(&priv->lock);
8253 stmmac_pmt(priv, priv->hw, 0);
8254 mutex_unlock(&priv->lock);
8255 priv->irq_wake = 0;
8256 } else {
8257 pinctrl_pm_select_default_state(priv->device);
8258 /* reset the phy so that it's ready */
8259 if (priv->mii)
8260 stmmac_mdio_reset(priv->mii);
8261 }
8262
8263 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
8264 ret = stmmac_legacy_serdes_power_up(priv);
8265 if (ret < 0)
8266 return ret;
8267 }
8268
8269 rtnl_lock();
8270
8271 /* Prepare the PHY to resume, ensuring that its clocks which are
8272 * necessary for the MAC DMA reset to complete are running
8273 */
8274 phylink_prepare_resume(priv->phylink);
8275
8276 mutex_lock(&priv->lock);
8277
8278 stmmac_reset_queues_param(priv);
8279
8280 stmmac_free_tx_skbufs(priv);
8281 stmmac_clear_descriptors(priv, &priv->dma_conf);
8282
8283 ret = stmmac_hw_setup(ndev);
8284 if (ret < 0) {
8285 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
8286 stmmac_legacy_serdes_power_down(priv);
8287 mutex_unlock(&priv->lock);
8288 rtnl_unlock();
8289 return ret;
8290 }
8291
8292 stmmac_init_timestamping(priv);
8293
8294 stmmac_init_coalesce(priv);
8295 phylink_rx_clk_stop_block(priv->phylink);
8296 stmmac_set_rx_mode(ndev);
8297 phylink_rx_clk_stop_unblock(priv->phylink);
8298
8299 stmmac_vlan_restore(priv);
8300
8301 stmmac_enable_all_queues(priv);
8302 stmmac_enable_all_dma_irq(priv);
8303
8304 mutex_unlock(&priv->lock);
8305
8306 /* phylink_resume() must be called after the hardware has been
8307 * initialised because it may bring the link up immediately in a
8308 * workqueue thread, which will race with initialisation.
8309 */
8310 phylink_resume(priv->phylink);
8311 rtnl_unlock();
8312
8313 netif_device_attach(ndev);
8314
8315 return 0;
8316 }
8317 EXPORT_SYMBOL_GPL(stmmac_resume);
8318
8319 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
8320 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
8321 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
8322
8323 #ifndef MODULE
stmmac_cmdline_opt(char * str)8324 static int __init stmmac_cmdline_opt(char *str)
8325 {
8326 char *opt;
8327
8328 if (!str || !*str)
8329 return 1;
8330 while ((opt = strsep(&str, ",")) != NULL) {
8331 if (!strncmp(opt, "debug:", 6)) {
8332 if (kstrtoint(opt + 6, 0, &debug))
8333 goto err;
8334 } else if (!strncmp(opt, "phyaddr:", 8)) {
8335 if (kstrtoint(opt + 8, 0, &phyaddr))
8336 goto err;
8337 } else if (!strncmp(opt, "tc:", 3)) {
8338 if (kstrtoint(opt + 3, 0, &tc))
8339 goto err;
8340 } else if (!strncmp(opt, "watchdog:", 9)) {
8341 if (kstrtoint(opt + 9, 0, &watchdog))
8342 goto err;
8343 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8344 if (kstrtoint(opt + 10, 0, &flow_ctrl))
8345 goto err;
8346 } else if (!strncmp(opt, "pause:", 6)) {
8347 if (kstrtoint(opt + 6, 0, &pause))
8348 goto err;
8349 } else if (!strncmp(opt, "eee_timer:", 10)) {
8350 if (kstrtoint(opt + 10, 0, &eee_timer))
8351 goto err;
8352 } else if (!strncmp(opt, "chain_mode:", 11)) {
8353 if (kstrtoint(opt + 11, 0, &chain_mode))
8354 goto err;
8355 }
8356 }
8357 return 1;
8358
8359 err:
8360 pr_err("%s: ERROR broken module parameter conversion", __func__);
8361 return 1;
8362 }
8363
8364 __setup("stmmaceth=", stmmac_cmdline_opt);
8365 #endif /* MODULE */
8366
stmmac_init(void)8367 static int __init stmmac_init(void)
8368 {
8369 #ifdef CONFIG_DEBUG_FS
8370 /* Create debugfs main directory if it doesn't exist yet */
8371 if (!stmmac_fs_dir)
8372 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8373 register_netdevice_notifier(&stmmac_notifier);
8374 #endif
8375
8376 return 0;
8377 }
8378
stmmac_exit(void)8379 static void __exit stmmac_exit(void)
8380 {
8381 #ifdef CONFIG_DEBUG_FS
8382 unregister_netdevice_notifier(&stmmac_notifier);
8383 debugfs_remove_recursive(stmmac_fs_dir);
8384 #endif
8385 }
8386
8387 module_init(stmmac_init)
8388 module_exit(stmmac_exit)
8389
8390 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8391 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8392 MODULE_LICENSE("GPL");
8393