1 // SPDX-License-Identifier: GPL-2.0-only
2 /*******************************************************************************
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
4 ST Ethernet IPs are built around a Synopsys IP Core.
5
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
7
8
9 Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
10
11 Documentation available at:
12 http://www.stlinux.com
13 Support available at:
14 https://bugzilla.stlinux.com/
15 *******************************************************************************/
16
17 #include <linux/circ_buf.h>
18 #include <linux/clk.h>
19 #include <linux/kernel.h>
20 #include <linux/interrupt.h>
21 #include <linux/ip.h>
22 #include <linux/tcp.h>
23 #include <linux/skbuff.h>
24 #include <linux/ethtool.h>
25 #include <linux/if_ether.h>
26 #include <linux/crc32.h>
27 #include <linux/mii.h>
28 #include <linux/if.h>
29 #include <linux/if_vlan.h>
30 #include <linux/dma-mapping.h>
31 #include <linux/slab.h>
32 #include <linux/pm_runtime.h>
33 #include <linux/pm_wakeirq.h>
34 #include <linux/prefetch.h>
35 #include <linux/pinctrl/consumer.h>
36 #ifdef CONFIG_DEBUG_FS
37 #include <linux/debugfs.h>
38 #include <linux/seq_file.h>
39 #endif /* CONFIG_DEBUG_FS */
40 #include <linux/net_tstamp.h>
41 #include <linux/phylink.h>
42 #include <linux/udp.h>
43 #include <linux/bpf_trace.h>
44 #include <net/devlink.h>
45 #include <net/page_pool/helpers.h>
46 #include <net/pkt_cls.h>
47 #include <net/xdp_sock_drv.h>
48 #include "stmmac_ptp.h"
49 #include "stmmac_fpe.h"
50 #include "stmmac.h"
51 #include "stmmac_pcs.h"
52 #include "stmmac_xdp.h"
53 #include <linux/reset.h>
54 #include <linux/of_mdio.h>
55 #include "dwmac1000.h"
56 #include "dwxgmac2.h"
57 #include "hwif.h"
58
59 /* As long as the interface is active, we keep the timestamping counter enabled
60 * with fine resolution and binary rollover. This avoid non-monotonic behavior
61 * (clock jumps) when changing timestamping settings at runtime.
62 */
63 #define STMMAC_HWTS_ACTIVE (PTP_TCR_TSENA | PTP_TCR_TSCTRLSSR)
64
65 #define STMMAC_ALIGN(x) ALIGN(ALIGN(x, SMP_CACHE_BYTES), 16)
66 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
67
68 /* Module parameters */
69 #define TX_TIMEO 5000
70 static int watchdog = TX_TIMEO;
71 module_param(watchdog, int, 0644);
72 MODULE_PARM_DESC(watchdog, "Transmit timeout in milliseconds (default 5s)");
73
74 static int debug = -1;
75 module_param(debug, int, 0644);
76 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
77
78 static int phyaddr = -1;
79 module_param(phyaddr, int, 0444);
80 MODULE_PARM_DESC(phyaddr, "Physical device address");
81
82 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
83
84 /* Limit to make sure XDP TX and slow path can coexist */
85 #define STMMAC_XSK_TX_BUDGET_MAX 256
86 #define STMMAC_TX_XSK_AVAIL 16
87 #define STMMAC_RX_FILL_BATCH 16
88
89 #define STMMAC_XDP_PASS 0
90 #define STMMAC_XDP_CONSUMED BIT(0)
91 #define STMMAC_XDP_TX BIT(1)
92 #define STMMAC_XDP_REDIRECT BIT(2)
93 #define STMMAC_XSK_CONSUMED BIT(3)
94
95 static int flow_ctrl = 0xdead;
96 module_param(flow_ctrl, int, 0644);
97 MODULE_PARM_DESC(flow_ctrl, "Flow control ability [on/off] (obsolete)");
98
99 static int pause = PAUSE_TIME;
100 module_param(pause, int, 0644);
101 MODULE_PARM_DESC(pause, "Flow Control Pause Time (units of 512 bit times)");
102
103 #define TC_DEFAULT 64
104 static int tc = TC_DEFAULT;
105 module_param(tc, int, 0644);
106 MODULE_PARM_DESC(tc, "DMA threshold control value");
107
108 /* This is unused */
109 #define DEFAULT_BUFSIZE 1536
110 static int buf_sz = DEFAULT_BUFSIZE;
111 module_param(buf_sz, int, 0644);
112 MODULE_PARM_DESC(buf_sz, "DMA buffer size");
113
114 static const u32 default_msg_level = (NETIF_MSG_DRV | NETIF_MSG_PROBE |
115 NETIF_MSG_LINK | NETIF_MSG_IFUP |
116 NETIF_MSG_IFDOWN | NETIF_MSG_TIMER);
117
118 #define STMMAC_DEFAULT_LPI_TIMER 1000
119 static unsigned int eee_timer = STMMAC_DEFAULT_LPI_TIMER;
120 module_param(eee_timer, uint, 0644);
121 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
122 #define STMMAC_LPI_T(x) (jiffies + usecs_to_jiffies(x))
123
124 /* By default the driver will use the ring mode to manage tx and rx descriptors,
125 * but allow user to force to use the chain instead of the ring
126 */
127 static unsigned int chain_mode;
128 module_param(chain_mode, int, 0444);
129 MODULE_PARM_DESC(chain_mode, "To use chain instead of ring mode");
130
131 static const char *stmmac_dwmac_actphyif[8] = {
132 [PHY_INTF_SEL_GMII_MII] = "GMII/MII",
133 [PHY_INTF_SEL_RGMII] = "RGMII",
134 [PHY_INTF_SEL_SGMII] = "SGMII",
135 [PHY_INTF_SEL_TBI] = "TBI",
136 [PHY_INTF_SEL_RMII] = "RMII",
137 [PHY_INTF_SEL_RTBI] = "RTBI",
138 [PHY_INTF_SEL_SMII] = "SMII",
139 [PHY_INTF_SEL_REVMII] = "REVMII",
140 };
141
142 static const char *stmmac_dwxgmac_phyif[4] = {
143 [PHY_INTF_GMII] = "GMII",
144 [PHY_INTF_RGMII] = "RGMII",
145 };
146
147 static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
148 /* For MSI interrupts handling */
149 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id);
150 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id);
151 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data);
152 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data);
153 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
154 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
155 static void stmmac_reset_queues_param(struct stmmac_priv *priv);
156 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
157 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
158 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
159 u32 rxmode, u32 chan);
160 static void stmmac_vlan_restore(struct stmmac_priv *priv);
161
162 #ifdef CONFIG_DEBUG_FS
163 static const struct net_device_ops stmmac_netdev_ops;
164 static void stmmac_init_fs(struct net_device *dev);
165 static void stmmac_exit_fs(struct net_device *dev);
166 #endif
167
168 #define STMMAC_COAL_TIMER(x) (ns_to_ktime((x) * NSEC_PER_USEC))
169
170 struct stmmac_devlink_priv {
171 struct stmmac_priv *stmmac_priv;
172 };
173
174 enum stmmac_dl_param_id {
175 STMMAC_DEVLINK_PARAM_ID_BASE = DEVLINK_PARAM_GENERIC_ID_MAX,
176 STMMAC_DEVLINK_PARAM_ID_TS_COARSE,
177 };
178
179 /**
180 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
181 * @bsp_priv: BSP private data structure (unused)
182 * @clk_tx_i: the transmit clock
183 * @interface: the selected interface mode
184 * @speed: the speed that the MAC will be operating at
185 *
186 * Set the transmit clock rate for the MAC, normally 2.5MHz for 10Mbps,
187 * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
188 * MII, GMII, RGMII and RMII interface modes. Platforms can hook this into
189 * the plat_data->set_clk_tx_rate method directly, call it via their own
190 * implementation, or implement their own method should they have more
191 * complex requirements. It is intended to only be used in this method.
192 *
193 * plat_data->clk_tx_i must be filled in.
194 */
stmmac_set_clk_tx_rate(void * bsp_priv,struct clk * clk_tx_i,phy_interface_t interface,int speed)195 int stmmac_set_clk_tx_rate(void *bsp_priv, struct clk *clk_tx_i,
196 phy_interface_t interface, int speed)
197 {
198 long rate = rgmii_clock(speed);
199
200 /* Silently ignore unsupported speeds as rgmii_clock() only
201 * supports 10, 100 and 1000Mbps. We do not want to spit
202 * errors for 2500 and higher speeds here.
203 */
204 if (rate < 0)
205 return 0;
206
207 return clk_set_rate(clk_tx_i, rate);
208 }
209 EXPORT_SYMBOL_GPL(stmmac_set_clk_tx_rate);
210
211 /**
212 * stmmac_axi_blen_to_mask() - convert a burst length array to reg value
213 * @regval: pointer to a u32 for the resulting register value
214 * @blen: pointer to an array of u32 containing the burst length values in bytes
215 * @len: the number of entries in the @blen array
216 */
stmmac_axi_blen_to_mask(u32 * regval,const u32 * blen,size_t len)217 void stmmac_axi_blen_to_mask(u32 *regval, const u32 *blen, size_t len)
218 {
219 size_t i;
220 u32 val;
221
222 for (val = i = 0; i < len; i++) {
223 u32 burst = blen[i];
224
225 /* Burst values of zero must be skipped. */
226 if (!burst)
227 continue;
228
229 /* The valid range for the burst length is 4 to 256 inclusive,
230 * and it must be a power of two.
231 */
232 if (burst < 4 || burst > 256 || !is_power_of_2(burst)) {
233 pr_err("stmmac: invalid burst length %u at index %zu\n",
234 burst, i);
235 continue;
236 }
237
238 /* Since burst is a power of two, and the register field starts
239 * with burst = 4, shift right by two bits so bit 0 of the field
240 * corresponds with the minimum value.
241 */
242 val |= burst >> 2;
243 }
244
245 *regval = FIELD_PREP(DMA_AXI_BLEN_MASK, val);
246 }
247 EXPORT_SYMBOL_GPL(stmmac_axi_blen_to_mask);
248
249 /**
250 * stmmac_verify_args - verify the driver parameters.
251 * Description: it checks the driver parameters and set a default in case of
252 * errors.
253 */
stmmac_verify_args(void)254 static void stmmac_verify_args(void)
255 {
256 if (unlikely(watchdog < 0))
257 watchdog = TX_TIMEO;
258 if (unlikely((pause < 0) || (pause > 0xffff)))
259 pause = PAUSE_TIME;
260
261 if (flow_ctrl != 0xdead)
262 pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configuration\n");
263 }
264
__stmmac_disable_all_queues(struct stmmac_priv * priv)265 static void __stmmac_disable_all_queues(struct stmmac_priv *priv)
266 {
267 u8 rx_queues_cnt = priv->plat->rx_queues_to_use;
268 u8 tx_queues_cnt = priv->plat->tx_queues_to_use;
269 u8 maxq = max(rx_queues_cnt, tx_queues_cnt);
270 u8 queue;
271
272 for (queue = 0; queue < maxq; queue++) {
273 struct stmmac_channel *ch = &priv->channel[queue];
274
275 if (stmmac_xdp_is_enabled(priv) &&
276 test_bit(queue, priv->af_xdp_zc_qps)) {
277 napi_disable(&ch->rxtx_napi);
278 continue;
279 }
280
281 if (queue < rx_queues_cnt)
282 napi_disable(&ch->rx_napi);
283 if (queue < tx_queues_cnt)
284 napi_disable(&ch->tx_napi);
285 }
286 }
287
288 /**
289 * stmmac_disable_all_queues - Disable all queues
290 * @priv: driver private structure
291 */
stmmac_disable_all_queues(struct stmmac_priv * priv)292 static void stmmac_disable_all_queues(struct stmmac_priv *priv)
293 {
294 u8 rx_queues_cnt = priv->plat->rx_queues_to_use;
295 struct stmmac_rx_queue *rx_q;
296 u8 queue;
297
298 /* synchronize_rcu() needed for pending XDP buffers to drain */
299 for (queue = 0; queue < rx_queues_cnt; queue++) {
300 rx_q = &priv->dma_conf.rx_queue[queue];
301 if (rx_q->xsk_pool) {
302 synchronize_rcu();
303 break;
304 }
305 }
306
307 __stmmac_disable_all_queues(priv);
308 }
309
310 /**
311 * stmmac_enable_all_queues - Enable all queues
312 * @priv: driver private structure
313 */
stmmac_enable_all_queues(struct stmmac_priv * priv)314 static void stmmac_enable_all_queues(struct stmmac_priv *priv)
315 {
316 u8 rx_queues_cnt = priv->plat->rx_queues_to_use;
317 u8 tx_queues_cnt = priv->plat->tx_queues_to_use;
318 u8 maxq = max(rx_queues_cnt, tx_queues_cnt);
319 u8 queue;
320
321 for (queue = 0; queue < maxq; queue++) {
322 struct stmmac_channel *ch = &priv->channel[queue];
323
324 if (stmmac_xdp_is_enabled(priv) &&
325 test_bit(queue, priv->af_xdp_zc_qps)) {
326 napi_enable(&ch->rxtx_napi);
327 continue;
328 }
329
330 if (queue < rx_queues_cnt)
331 napi_enable(&ch->rx_napi);
332 if (queue < tx_queues_cnt)
333 napi_enable(&ch->tx_napi);
334 }
335 }
336
stmmac_service_event_schedule(struct stmmac_priv * priv)337 static void stmmac_service_event_schedule(struct stmmac_priv *priv)
338 {
339 if (!test_bit(STMMAC_DOWN, &priv->state) &&
340 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state))
341 queue_work(priv->wq, &priv->service_task);
342 }
343
stmmac_global_err(struct stmmac_priv * priv)344 static void stmmac_global_err(struct stmmac_priv *priv)
345 {
346 netif_carrier_off(priv->dev);
347 set_bit(STMMAC_RESET_REQUESTED, &priv->state);
348 stmmac_service_event_schedule(priv);
349 }
350
print_pkt(unsigned char * buf,int len)351 static void print_pkt(unsigned char *buf, int len)
352 {
353 pr_debug("len = %d byte, buf addr: 0x%p\n", len, buf);
354 print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len);
355 }
356
stmmac_tx_avail(struct stmmac_priv * priv,u32 queue)357 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue)
358 {
359 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
360
361 return CIRC_SPACE(tx_q->cur_tx, tx_q->dirty_tx,
362 priv->dma_conf.dma_tx_size);
363 }
364
stmmac_get_tx_desc_size(struct stmmac_priv * priv,struct stmmac_tx_queue * tx_q)365 static size_t stmmac_get_tx_desc_size(struct stmmac_priv *priv,
366 struct stmmac_tx_queue *tx_q)
367 {
368 if (priv->extend_desc)
369 return sizeof(struct dma_extended_desc);
370 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
371 return sizeof(struct dma_edesc);
372 else
373 return sizeof(struct dma_desc);
374 }
375
stmmac_get_tx_desc(struct stmmac_priv * priv,struct stmmac_tx_queue * tx_q,unsigned int index)376 static struct dma_desc *stmmac_get_tx_desc(struct stmmac_priv *priv,
377 struct stmmac_tx_queue *tx_q,
378 unsigned int index)
379 {
380 if (priv->extend_desc)
381 return &tx_q->dma_etx[index].basic;
382 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
383 return &tx_q->dma_entx[index].basic;
384 else
385 return &tx_q->dma_tx[index];
386 }
387
stmmac_set_queue_tx_tail_ptr(struct stmmac_priv * priv,struct stmmac_tx_queue * tx_q,unsigned int chan,unsigned int index)388 static void stmmac_set_queue_tx_tail_ptr(struct stmmac_priv *priv,
389 struct stmmac_tx_queue *tx_q,
390 unsigned int chan, unsigned int index)
391 {
392 size_t desc_size;
393 u32 tx_tail_addr;
394
395 desc_size = stmmac_get_tx_desc_size(priv, tx_q);
396
397 tx_tail_addr = tx_q->dma_tx_phy + index * desc_size;
398 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_tail_addr, chan);
399 }
400
stmmac_get_rx_desc_size(struct stmmac_priv * priv)401 static size_t stmmac_get_rx_desc_size(struct stmmac_priv *priv)
402 {
403 if (priv->extend_desc)
404 return sizeof(struct dma_extended_desc);
405 else
406 return sizeof(struct dma_desc);
407 }
408
stmmac_get_rx_desc(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,unsigned int index)409 static struct dma_desc *stmmac_get_rx_desc(struct stmmac_priv *priv,
410 struct stmmac_rx_queue *rx_q,
411 unsigned int index)
412 {
413 if (priv->extend_desc)
414 return &rx_q->dma_erx[index].basic;
415 else
416 return &rx_q->dma_rx[index];
417 }
418
stmmac_set_queue_rx_tail_ptr(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,unsigned int chan,unsigned int index)419 static void stmmac_set_queue_rx_tail_ptr(struct stmmac_priv *priv,
420 struct stmmac_rx_queue *rx_q,
421 unsigned int chan, unsigned int index)
422 {
423 /* This only needs to deal with normal descriptors as enhanced
424 * descriptiors are only supported with dwmac1000 (<v4.0) which
425 * does not implement .set_rx_tail_ptr
426 */
427 u32 rx_tail_addr = rx_q->dma_rx_phy + index * sizeof(struct dma_desc);
428
429 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_tail_addr, chan);
430 }
431
stmmac_set_queue_rx_buf_size(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,unsigned int chan)432 static void stmmac_set_queue_rx_buf_size(struct stmmac_priv *priv,
433 struct stmmac_rx_queue *rx_q,
434 unsigned int chan)
435 {
436 u32 buf_size;
437
438 if (rx_q->xsk_pool && rx_q->buf_alloc_num)
439 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool);
440 else
441 buf_size = priv->dma_conf.dma_buf_sz;
442
443 stmmac_set_dma_bfsize(priv, priv->ioaddr, buf_size, chan);
444 }
445
446 /**
447 * stmmac_rx_dirty - Get RX queue dirty
448 * @priv: driver private structure
449 * @queue: RX queue index
450 */
stmmac_rx_dirty(struct stmmac_priv * priv,u32 queue)451 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue)
452 {
453 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
454
455 return CIRC_CNT(rx_q->cur_rx, rx_q->dirty_rx,
456 priv->dma_conf.dma_rx_size);
457 }
458
stmmac_eee_tx_busy(struct stmmac_priv * priv)459 static bool stmmac_eee_tx_busy(struct stmmac_priv *priv)
460 {
461 u8 tx_cnt = priv->plat->tx_queues_to_use;
462 u8 queue;
463
464 /* check if all TX queues have the work finished */
465 for (queue = 0; queue < tx_cnt; queue++) {
466 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
467
468 if (tx_q->dirty_tx != tx_q->cur_tx)
469 return true; /* still unfinished work */
470 }
471
472 return false;
473 }
474
stmmac_restart_sw_lpi_timer(struct stmmac_priv * priv)475 static void stmmac_restart_sw_lpi_timer(struct stmmac_priv *priv)
476 {
477 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer));
478 }
479
480 /**
481 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
482 * @priv: driver private structure
483 * Description: this function is to verify and enter in LPI mode in case of
484 * EEE.
485 */
stmmac_try_to_start_sw_lpi(struct stmmac_priv * priv)486 static void stmmac_try_to_start_sw_lpi(struct stmmac_priv *priv)
487 {
488 if (stmmac_eee_tx_busy(priv)) {
489 stmmac_restart_sw_lpi_timer(priv);
490 return;
491 }
492
493 /* Check and enter in LPI mode */
494 if (!priv->tx_path_in_lpi_mode)
495 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED,
496 priv->tx_lpi_clk_stop, 0);
497 }
498
499 /**
500 * stmmac_stop_sw_lpi - stop transmitting LPI
501 * @priv: driver private structure
502 * Description: When using software-controlled LPI, stop transmitting LPI state.
503 */
stmmac_stop_sw_lpi(struct stmmac_priv * priv)504 static void stmmac_stop_sw_lpi(struct stmmac_priv *priv)
505 {
506 timer_delete_sync(&priv->eee_ctrl_timer);
507 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
508 priv->tx_path_in_lpi_mode = false;
509 }
510
511 /**
512 * stmmac_eee_ctrl_timer - EEE TX SW timer.
513 * @t: timer_list struct containing private info
514 * Description:
515 * if there is no data transfer and if we are not in LPI state,
516 * then MAC Transmitter can be moved to LPI state.
517 */
stmmac_eee_ctrl_timer(struct timer_list * t)518 static void stmmac_eee_ctrl_timer(struct timer_list *t)
519 {
520 struct stmmac_priv *priv = timer_container_of(priv, t, eee_ctrl_timer);
521
522 stmmac_try_to_start_sw_lpi(priv);
523 }
524
525 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
526 * @priv: driver private structure
527 * @p : descriptor pointer
528 * @skb : the socket buffer
529 * Description :
530 * This function will read timestamp from the descriptor & pass it to stack.
531 * and also perform some sanity checks.
532 */
stmmac_get_tx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct sk_buff * skb)533 static void stmmac_get_tx_hwtstamp(struct stmmac_priv *priv,
534 struct dma_desc *p, struct sk_buff *skb)
535 {
536 struct skb_shared_hwtstamps shhwtstamp;
537 bool found = false;
538 u64 ns = 0;
539
540 if (!priv->hwts_tx_en)
541 return;
542
543 /* exit if skb doesn't support hw tstamp */
544 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)))
545 return;
546
547 /* check tx tstamp status */
548 if (stmmac_get_tx_timestamp_status(priv, p)) {
549 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns);
550 found = true;
551 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
552 found = true;
553 }
554
555 if (found) {
556 ns -= priv->plat->cdc_error_adj;
557
558 memset(&shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
559 shhwtstamp.hwtstamp = ns_to_ktime(ns);
560
561 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns);
562 /* pass tstamp to stack */
563 skb_tstamp_tx(skb, &shhwtstamp);
564 }
565 }
566
567 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
568 * @priv: driver private structure
569 * @p : descriptor pointer
570 * @np : next descriptor pointer
571 * @skb : the socket buffer
572 * Description :
573 * This function will read received packet's timestamp from the descriptor
574 * and pass it to stack. It also perform some sanity checks.
575 */
stmmac_get_rx_hwtstamp(struct stmmac_priv * priv,struct dma_desc * p,struct dma_desc * np,struct sk_buff * skb)576 static void stmmac_get_rx_hwtstamp(struct stmmac_priv *priv, struct dma_desc *p,
577 struct dma_desc *np, struct sk_buff *skb)
578 {
579 struct skb_shared_hwtstamps *shhwtstamp = NULL;
580 struct dma_desc *desc = p;
581 u64 ns = 0;
582
583 if (!priv->hwts_rx_en)
584 return;
585 /* For GMAC4, the valid timestamp is from CTX next desc. */
586 if (dwmac_is_xmac(priv->plat->core_type))
587 desc = np;
588
589 /* Check if timestamp is available */
590 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) {
591 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
592
593 ns -= priv->plat->cdc_error_adj;
594
595 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns);
596 shhwtstamp = skb_hwtstamps(skb);
597 memset(shhwtstamp, 0, sizeof(struct skb_shared_hwtstamps));
598 shhwtstamp->hwtstamp = ns_to_ktime(ns);
599 } else {
600 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n");
601 }
602 }
603
stmmac_update_subsecond_increment(struct stmmac_priv * priv)604 static void stmmac_update_subsecond_increment(struct stmmac_priv *priv)
605 {
606 bool xmac = dwmac_is_xmac(priv->plat->core_type);
607 u32 sec_inc = 0;
608 u64 temp = 0;
609
610 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
611
612 /* program Sub Second Increment reg */
613 stmmac_config_sub_second_increment(priv, priv->ptpaddr,
614 priv->plat->clk_ptp_rate,
615 xmac, &sec_inc);
616 temp = div_u64(1000000000ULL, sec_inc);
617
618 /* Store sub second increment for later use */
619 priv->sub_second_inc = sec_inc;
620
621 /* calculate default added value:
622 * formula is :
623 * addend = (2^32)/freq_div_ratio;
624 * where, freq_div_ratio = 1e9ns/sec_inc
625 */
626 temp = (u64)(temp << 32);
627 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate);
628 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend);
629 }
630
631 /**
632 * stmmac_hwtstamp_set - control hardware timestamping.
633 * @dev: device pointer.
634 * @config: the timestamping configuration.
635 * @extack: netlink extended ack structure for error reporting.
636 * Description:
637 * This function configures the MAC to enable/disable both outgoing(TX)
638 * and incoming(RX) packets time stamping based on user input.
639 * Return Value:
640 * 0 on success and an appropriate -ve integer on failure.
641 */
stmmac_hwtstamp_set(struct net_device * dev,struct kernel_hwtstamp_config * config,struct netlink_ext_ack * extack)642 static int stmmac_hwtstamp_set(struct net_device *dev,
643 struct kernel_hwtstamp_config *config,
644 struct netlink_ext_ack *extack)
645 {
646 struct stmmac_priv *priv = netdev_priv(dev);
647 u32 ptp_v2 = 0;
648 u32 tstamp_all = 0;
649 u32 ptp_over_ipv4_udp = 0;
650 u32 ptp_over_ipv6_udp = 0;
651 u32 ptp_over_ethernet = 0;
652 u32 snap_type_sel = 0;
653 u32 ts_master_en = 0;
654 u32 ts_event_en = 0;
655
656 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) {
657 NL_SET_ERR_MSG_MOD(extack, "No support for HW time stamping");
658 priv->hwts_tx_en = 0;
659 priv->hwts_rx_en = 0;
660
661 return -EOPNOTSUPP;
662 }
663
664 if (!netif_running(dev)) {
665 NL_SET_ERR_MSG_MOD(extack,
666 "Cannot change timestamping configuration while down");
667 return -ENODEV;
668 }
669
670 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n",
671 __func__, config->flags, config->tx_type, config->rx_filter);
672
673 if (config->tx_type != HWTSTAMP_TX_OFF &&
674 config->tx_type != HWTSTAMP_TX_ON)
675 return -ERANGE;
676
677 if (priv->adv_ts) {
678 switch (config->rx_filter) {
679 case HWTSTAMP_FILTER_NONE:
680 /* time stamp no incoming packet at all */
681 config->rx_filter = HWTSTAMP_FILTER_NONE;
682 break;
683
684 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
685 /* PTP v1, UDP, any kind of event packet */
686 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
687 /* 'xmac' hardware can support Sync, Pdelay_Req and
688 * Pdelay_resp by setting bit14 and bits17/16 to 01
689 * This leaves Delay_Req timestamps out.
690 * Enable all events *and* general purpose message
691 * timestamping
692 */
693 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
694 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
695 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
696 break;
697
698 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
699 /* PTP v1, UDP, Sync packet */
700 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_SYNC;
701 /* take time stamp for SYNC messages only */
702 ts_event_en = PTP_TCR_TSEVNTENA;
703
704 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
705 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
706 break;
707
708 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
709 /* PTP v1, UDP, Delay_req packet */
710 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ;
711 /* take time stamp for Delay_Req messages only */
712 ts_master_en = PTP_TCR_TSMSTRENA;
713 ts_event_en = PTP_TCR_TSEVNTENA;
714
715 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
716 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
717 break;
718
719 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
720 /* PTP v2, UDP, any kind of event packet */
721 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
722 ptp_v2 = PTP_TCR_TSVER2ENA;
723 /* take time stamp for all event messages */
724 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
725
726 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
727 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
728 break;
729
730 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
731 /* PTP v2, UDP, Sync packet */
732 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_SYNC;
733 ptp_v2 = PTP_TCR_TSVER2ENA;
734 /* take time stamp for SYNC messages only */
735 ts_event_en = PTP_TCR_TSEVNTENA;
736
737 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
738 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
739 break;
740
741 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
742 /* PTP v2, UDP, Delay_req packet */
743 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ;
744 ptp_v2 = PTP_TCR_TSVER2ENA;
745 /* take time stamp for Delay_Req messages only */
746 ts_master_en = PTP_TCR_TSMSTRENA;
747 ts_event_en = PTP_TCR_TSEVNTENA;
748
749 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
750 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
751 break;
752
753 case HWTSTAMP_FILTER_PTP_V2_EVENT:
754 /* PTP v2/802.AS1 any layer, any kind of event packet */
755 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
756 ptp_v2 = PTP_TCR_TSVER2ENA;
757 snap_type_sel = PTP_TCR_SNAPTYPSEL_1;
758 if (priv->synopsys_id < DWMAC_CORE_4_10)
759 ts_event_en = PTP_TCR_TSEVNTENA;
760 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
761 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
762 ptp_over_ethernet = PTP_TCR_TSIPENA;
763 break;
764
765 case HWTSTAMP_FILTER_PTP_V2_SYNC:
766 /* PTP v2/802.AS1, any layer, Sync packet */
767 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_SYNC;
768 ptp_v2 = PTP_TCR_TSVER2ENA;
769 /* take time stamp for SYNC messages only */
770 ts_event_en = PTP_TCR_TSEVNTENA;
771
772 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
773 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
774 ptp_over_ethernet = PTP_TCR_TSIPENA;
775 break;
776
777 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
778 /* PTP v2/802.AS1, any layer, Delay_req packet */
779 config->rx_filter = HWTSTAMP_FILTER_PTP_V2_DELAY_REQ;
780 ptp_v2 = PTP_TCR_TSVER2ENA;
781 /* take time stamp for Delay_Req messages only */
782 ts_master_en = PTP_TCR_TSMSTRENA;
783 ts_event_en = PTP_TCR_TSEVNTENA;
784
785 ptp_over_ipv4_udp = PTP_TCR_TSIPV4ENA;
786 ptp_over_ipv6_udp = PTP_TCR_TSIPV6ENA;
787 ptp_over_ethernet = PTP_TCR_TSIPENA;
788 break;
789
790 case HWTSTAMP_FILTER_NTP_ALL:
791 case HWTSTAMP_FILTER_ALL:
792 /* time stamp any incoming packet */
793 config->rx_filter = HWTSTAMP_FILTER_ALL;
794 tstamp_all = PTP_TCR_TSENALL;
795 break;
796
797 default:
798 return -ERANGE;
799 }
800 } else {
801 switch (config->rx_filter) {
802 case HWTSTAMP_FILTER_NONE:
803 config->rx_filter = HWTSTAMP_FILTER_NONE;
804 break;
805 default:
806 /* PTP v1, UDP, any kind of event packet */
807 config->rx_filter = HWTSTAMP_FILTER_PTP_V1_L4_EVENT;
808 break;
809 }
810 }
811 priv->hwts_rx_en = config->rx_filter != HWTSTAMP_FILTER_NONE;
812 priv->hwts_tx_en = config->tx_type == HWTSTAMP_TX_ON;
813
814 priv->systime_flags = STMMAC_HWTS_ACTIVE;
815 if (!priv->tsfupdt_coarse)
816 priv->systime_flags |= PTP_TCR_TSCFUPDT;
817
818 if (priv->hwts_tx_en || priv->hwts_rx_en) {
819 priv->systime_flags |= tstamp_all | ptp_v2 |
820 ptp_over_ethernet | ptp_over_ipv6_udp |
821 ptp_over_ipv4_udp | ts_event_en |
822 ts_master_en | snap_type_sel;
823 }
824
825 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags);
826
827 priv->tstamp_config = *config;
828
829 return 0;
830 }
831
832 /**
833 * stmmac_hwtstamp_get - read hardware timestamping.
834 * @dev: device pointer.
835 * @config: the timestamping configuration.
836 * Description:
837 * This function obtain the current hardware timestamping settings
838 * as requested.
839 */
stmmac_hwtstamp_get(struct net_device * dev,struct kernel_hwtstamp_config * config)840 static int stmmac_hwtstamp_get(struct net_device *dev,
841 struct kernel_hwtstamp_config *config)
842 {
843 struct stmmac_priv *priv = netdev_priv(dev);
844
845 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp))
846 return -EOPNOTSUPP;
847
848 *config = priv->tstamp_config;
849
850 return 0;
851 }
852
853 /**
854 * stmmac_init_tstamp_counter - init hardware timestamping counter
855 * @priv: driver private structure
856 * @systime_flags: timestamping flags
857 * Description:
858 * Initialize hardware counter for packet timestamping.
859 * This is valid as long as the interface is open and not suspended.
860 * Will be rerun after resuming from suspend, case in which the timestamping
861 * flags updated by stmmac_hwtstamp_set() also need to be restored.
862 */
stmmac_init_tstamp_counter(struct stmmac_priv * priv,u32 systime_flags)863 static int stmmac_init_tstamp_counter(struct stmmac_priv *priv,
864 u32 systime_flags)
865 {
866 struct timespec64 now;
867
868 if (!priv->plat->clk_ptp_rate) {
869 netdev_err(priv->dev, "Invalid PTP clock rate");
870 return -EINVAL;
871 }
872
873 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags);
874 priv->systime_flags = systime_flags;
875
876 stmmac_update_subsecond_increment(priv);
877
878 /* initialize system time */
879 ktime_get_real_ts64(&now);
880
881 /* lower 32 bits of tv_sec are safe until y2106 */
882 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec);
883
884 return 0;
885 }
886
887 /**
888 * stmmac_init_timestamping - initialise timestamping
889 * @priv: driver private structure
890 * Description: this is to verify if the HW supports the PTPv1 or PTPv2.
891 * This is done by looking at the HW cap. register.
892 * This function also registers the ptp driver.
893 */
stmmac_init_timestamping(struct stmmac_priv * priv)894 static int stmmac_init_timestamping(struct stmmac_priv *priv)
895 {
896 bool xmac = dwmac_is_xmac(priv->plat->core_type);
897 int ret;
898
899 if (priv->plat->ptp_clk_freq_config)
900 priv->plat->ptp_clk_freq_config(priv);
901
902 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) {
903 netdev_info(priv->dev, "PTP not supported by HW\n");
904 return -EOPNOTSUPP;
905 }
906
907 ret = stmmac_init_tstamp_counter(priv, STMMAC_HWTS_ACTIVE |
908 PTP_TCR_TSCFUPDT);
909 if (ret) {
910 netdev_warn(priv->dev, "PTP init failed\n");
911 return ret;
912 }
913
914 priv->adv_ts = 0;
915 /* Check if adv_ts can be enabled for dwmac 4.x / xgmac core */
916 if (xmac && priv->dma_cap.atime_stamp)
917 priv->adv_ts = 1;
918 /* Dwmac 3.x core with extend_desc can support adv_ts */
919 else if (priv->extend_desc && priv->dma_cap.atime_stamp)
920 priv->adv_ts = 1;
921
922 if (priv->dma_cap.time_stamp)
923 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n");
924
925 if (priv->adv_ts)
926 netdev_info(priv->dev,
927 "IEEE 1588-2008 Advanced Timestamp supported\n");
928
929 memset(&priv->tstamp_config, 0, sizeof(priv->tstamp_config));
930 priv->hwts_tx_en = 0;
931 priv->hwts_rx_en = 0;
932
933 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
934 stmmac_hwtstamp_correct_latency(priv, priv);
935
936 return 0;
937 }
938
stmmac_setup_ptp(struct stmmac_priv * priv)939 static void stmmac_setup_ptp(struct stmmac_priv *priv)
940 {
941 int ret;
942
943 ret = clk_prepare_enable(priv->plat->clk_ptp_ref);
944 if (ret < 0)
945 netdev_warn(priv->dev,
946 "failed to enable PTP reference clock: %pe\n",
947 ERR_PTR(ret));
948
949 if (stmmac_init_timestamping(priv) == 0)
950 stmmac_ptp_register(priv);
951 }
952
stmmac_release_ptp(struct stmmac_priv * priv)953 static void stmmac_release_ptp(struct stmmac_priv *priv)
954 {
955 stmmac_ptp_unregister(priv);
956 clk_disable_unprepare(priv->plat->clk_ptp_ref);
957 }
958
stmmac_legacy_serdes_power_down(struct stmmac_priv * priv)959 static void stmmac_legacy_serdes_power_down(struct stmmac_priv *priv)
960 {
961 if (priv->plat->serdes_powerdown && priv->legacy_serdes_is_powered)
962 priv->plat->serdes_powerdown(priv->dev, priv->plat->bsp_priv);
963
964 priv->legacy_serdes_is_powered = false;
965 }
966
stmmac_legacy_serdes_power_up(struct stmmac_priv * priv)967 static int stmmac_legacy_serdes_power_up(struct stmmac_priv *priv)
968 {
969 int ret;
970
971 if (!priv->plat->serdes_powerup)
972 return 0;
973
974 ret = priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv);
975 if (ret < 0)
976 netdev_err(priv->dev, "SerDes powerup failed\n");
977 else
978 priv->legacy_serdes_is_powered = true;
979
980 return ret;
981 }
982
983 /**
984 * stmmac_mac_flow_ctrl - Configure flow control in all queues
985 * @priv: driver private structure
986 * @duplex: duplex passed to the next function
987 * @flow_ctrl: desired flow control modes
988 * Description: It is used for configuring the flow control in all queues
989 */
stmmac_mac_flow_ctrl(struct stmmac_priv * priv,u32 duplex,unsigned int flow_ctrl)990 static void stmmac_mac_flow_ctrl(struct stmmac_priv *priv, u32 duplex,
991 unsigned int flow_ctrl)
992 {
993 u8 tx_cnt = priv->plat->tx_queues_to_use;
994
995 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time,
996 tx_cnt);
997 }
998
stmmac_mac_get_caps(struct phylink_config * config,phy_interface_t interface)999 static unsigned long stmmac_mac_get_caps(struct phylink_config *config,
1000 phy_interface_t interface)
1001 {
1002 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1003
1004 /* Refresh the MAC-specific capabilities */
1005 stmmac_mac_update_caps(priv);
1006
1007 if (priv->hw_cap_support && !priv->dma_cap.half_duplex)
1008 priv->hw->link.caps &= ~(MAC_1000HD | MAC_100HD | MAC_10HD);
1009
1010 config->mac_capabilities = priv->hw->link.caps;
1011
1012 if (priv->plat->max_speed)
1013 phylink_limit_mac_speed(config, priv->plat->max_speed);
1014
1015 return config->mac_capabilities;
1016 }
1017
stmmac_mac_select_pcs(struct phylink_config * config,phy_interface_t interface)1018 static struct phylink_pcs *stmmac_mac_select_pcs(struct phylink_config *config,
1019 phy_interface_t interface)
1020 {
1021 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1022 struct phylink_pcs *pcs;
1023
1024 if (priv->plat->select_pcs) {
1025 pcs = priv->plat->select_pcs(priv, interface);
1026 if (!IS_ERR(pcs))
1027 return pcs;
1028 }
1029
1030 if (priv->integrated_pcs &&
1031 test_bit(interface, priv->integrated_pcs->pcs.supported_interfaces))
1032 return &priv->integrated_pcs->pcs;
1033
1034 return NULL;
1035 }
1036
stmmac_mac_config(struct phylink_config * config,unsigned int mode,const struct phylink_link_state * state)1037 static void stmmac_mac_config(struct phylink_config *config, unsigned int mode,
1038 const struct phylink_link_state *state)
1039 {
1040 /* Nothing to do, xpcs_config() handles everything */
1041 }
1042
stmmac_mac_finish(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1043 static int stmmac_mac_finish(struct phylink_config *config, unsigned int mode,
1044 phy_interface_t interface)
1045 {
1046 struct net_device *ndev = to_net_dev(config->dev);
1047 struct stmmac_priv *priv = netdev_priv(ndev);
1048
1049 if (priv->plat->mac_finish)
1050 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode,
1051 interface);
1052
1053 return 0;
1054 }
1055
stmmac_mac_link_down(struct phylink_config * config,unsigned int mode,phy_interface_t interface)1056 static void stmmac_mac_link_down(struct phylink_config *config,
1057 unsigned int mode, phy_interface_t interface)
1058 {
1059 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1060
1061 stmmac_mac_set(priv, priv->ioaddr, false);
1062 if (priv->dma_cap.eee)
1063 stmmac_set_eee_pls(priv, priv->hw, false);
1064
1065 if (stmmac_fpe_supported(priv))
1066 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, false);
1067 }
1068
stmmac_mac_link_up(struct phylink_config * config,struct phy_device * phy,unsigned int mode,phy_interface_t interface,int speed,int duplex,bool tx_pause,bool rx_pause)1069 static void stmmac_mac_link_up(struct phylink_config *config,
1070 struct phy_device *phy,
1071 unsigned int mode, phy_interface_t interface,
1072 int speed, int duplex,
1073 bool tx_pause, bool rx_pause)
1074 {
1075 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1076 unsigned int flow_ctrl;
1077 u32 old_ctrl, ctrl;
1078 int ret;
1079
1080 if (priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)
1081 stmmac_legacy_serdes_power_up(priv);
1082
1083 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG);
1084 ctrl = old_ctrl & ~priv->hw->link.speed_mask;
1085
1086 if (interface == PHY_INTERFACE_MODE_USXGMII) {
1087 switch (speed) {
1088 case SPEED_10000:
1089 ctrl |= priv->hw->link.xgmii.speed10000;
1090 break;
1091 case SPEED_5000:
1092 ctrl |= priv->hw->link.xgmii.speed5000;
1093 break;
1094 case SPEED_2500:
1095 ctrl |= priv->hw->link.xgmii.speed2500;
1096 break;
1097 default:
1098 return;
1099 }
1100 } else if (interface == PHY_INTERFACE_MODE_XLGMII) {
1101 switch (speed) {
1102 case SPEED_100000:
1103 ctrl |= priv->hw->link.xlgmii.speed100000;
1104 break;
1105 case SPEED_50000:
1106 ctrl |= priv->hw->link.xlgmii.speed50000;
1107 break;
1108 case SPEED_40000:
1109 ctrl |= priv->hw->link.xlgmii.speed40000;
1110 break;
1111 case SPEED_25000:
1112 ctrl |= priv->hw->link.xlgmii.speed25000;
1113 break;
1114 case SPEED_10000:
1115 ctrl |= priv->hw->link.xgmii.speed10000;
1116 break;
1117 case SPEED_2500:
1118 ctrl |= priv->hw->link.speed2500;
1119 break;
1120 case SPEED_1000:
1121 ctrl |= priv->hw->link.speed1000;
1122 break;
1123 default:
1124 return;
1125 }
1126 } else {
1127 switch (speed) {
1128 case SPEED_2500:
1129 ctrl |= priv->hw->link.speed2500;
1130 break;
1131 case SPEED_1000:
1132 ctrl |= priv->hw->link.speed1000;
1133 break;
1134 case SPEED_100:
1135 ctrl |= priv->hw->link.speed100;
1136 break;
1137 case SPEED_10:
1138 ctrl |= priv->hw->link.speed10;
1139 break;
1140 default:
1141 return;
1142 }
1143 }
1144
1145 if (priv->plat->fix_mac_speed)
1146 priv->plat->fix_mac_speed(priv->plat->bsp_priv, interface,
1147 speed, mode);
1148
1149 if (!duplex)
1150 ctrl &= ~priv->hw->link.duplex;
1151 else
1152 ctrl |= priv->hw->link.duplex;
1153
1154 /* Flow Control operation */
1155 if (rx_pause && tx_pause)
1156 flow_ctrl = FLOW_AUTO;
1157 else if (rx_pause && !tx_pause)
1158 flow_ctrl = FLOW_RX;
1159 else if (!rx_pause && tx_pause)
1160 flow_ctrl = FLOW_TX;
1161 else
1162 flow_ctrl = FLOW_OFF;
1163
1164 stmmac_mac_flow_ctrl(priv, duplex, flow_ctrl);
1165
1166 if (ctrl != old_ctrl)
1167 writel(ctrl, priv->ioaddr + MAC_CTRL_REG);
1168
1169 if (priv->plat->set_clk_tx_rate) {
1170 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv,
1171 priv->plat->clk_tx_i,
1172 interface, speed);
1173 if (ret < 0)
1174 netdev_err(priv->dev,
1175 "failed to configure %s transmit clock for %dMbps: %pe\n",
1176 phy_modes(interface), speed, ERR_PTR(ret));
1177 }
1178
1179 stmmac_mac_set(priv, priv->ioaddr, true);
1180 if (priv->dma_cap.eee)
1181 stmmac_set_eee_pls(priv, priv->hw, true);
1182
1183 if (stmmac_fpe_supported(priv))
1184 ethtool_mmsv_link_state_handle(&priv->fpe_cfg.mmsv, true);
1185
1186 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY)
1187 stmmac_hwtstamp_correct_latency(priv, priv);
1188 }
1189
stmmac_mac_disable_tx_lpi(struct phylink_config * config)1190 static void stmmac_mac_disable_tx_lpi(struct phylink_config *config)
1191 {
1192 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1193
1194 priv->eee_active = false;
1195
1196 mutex_lock(&priv->lock);
1197
1198 priv->eee_enabled = false;
1199
1200 netdev_dbg(priv->dev, "disable EEE\n");
1201 priv->eee_sw_timer_en = false;
1202 timer_delete_sync(&priv->eee_ctrl_timer);
1203 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0);
1204 priv->tx_path_in_lpi_mode = false;
1205
1206 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS);
1207 mutex_unlock(&priv->lock);
1208 }
1209
stmmac_mac_enable_tx_lpi(struct phylink_config * config,u32 timer,bool tx_clk_stop)1210 static int stmmac_mac_enable_tx_lpi(struct phylink_config *config, u32 timer,
1211 bool tx_clk_stop)
1212 {
1213 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1214 int ret;
1215
1216 priv->tx_lpi_timer = timer;
1217 priv->eee_active = true;
1218
1219 mutex_lock(&priv->lock);
1220
1221 priv->eee_enabled = true;
1222
1223 /* Update the transmit clock stop according to PHY capability if
1224 * the platform allows
1225 */
1226 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP)
1227 priv->tx_lpi_clk_stop = tx_clk_stop;
1228
1229 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS,
1230 STMMAC_DEFAULT_TWT_LS);
1231
1232 /* Try to configure the hardware timer. */
1233 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER,
1234 priv->tx_lpi_clk_stop, priv->tx_lpi_timer);
1235
1236 if (ret) {
1237 /* Hardware timer mode not supported, or value out of range.
1238 * Fall back to using software LPI mode
1239 */
1240 priv->eee_sw_timer_en = true;
1241 stmmac_restart_sw_lpi_timer(priv);
1242 }
1243
1244 mutex_unlock(&priv->lock);
1245 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n");
1246
1247 return 0;
1248 }
1249
stmmac_mac_wol_set(struct phylink_config * config,u32 wolopts,const u8 * sopass)1250 static int stmmac_mac_wol_set(struct phylink_config *config, u32 wolopts,
1251 const u8 *sopass)
1252 {
1253 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev));
1254
1255 device_set_wakeup_enable(priv->device, !!wolopts);
1256
1257 mutex_lock(&priv->lock);
1258 priv->wolopts = wolopts;
1259 mutex_unlock(&priv->lock);
1260
1261 return 0;
1262 }
1263
1264 static const struct phylink_mac_ops stmmac_phylink_mac_ops = {
1265 .mac_get_caps = stmmac_mac_get_caps,
1266 .mac_select_pcs = stmmac_mac_select_pcs,
1267 .mac_config = stmmac_mac_config,
1268 .mac_finish = stmmac_mac_finish,
1269 .mac_link_down = stmmac_mac_link_down,
1270 .mac_link_up = stmmac_mac_link_up,
1271 .mac_disable_tx_lpi = stmmac_mac_disable_tx_lpi,
1272 .mac_enable_tx_lpi = stmmac_mac_enable_tx_lpi,
1273 .mac_wol_set = stmmac_mac_wol_set,
1274 };
1275
1276 /**
1277 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1278 * @priv: driver private structure
1279 * Description: this is to verify if the HW supports the PCS.
1280 * Physical Coding Sublayer (PCS) interface that can be used when the MAC is
1281 * configured for the TBI, RTBI, or SGMII PHY interface.
1282 */
stmmac_check_pcs_mode(struct stmmac_priv * priv)1283 static void stmmac_check_pcs_mode(struct stmmac_priv *priv)
1284 {
1285 int interface = priv->plat->phy_interface;
1286 int speed = priv->plat->mac_port_sel_speed;
1287
1288 if (priv->dma_cap.pcs && interface == PHY_INTERFACE_MODE_SGMII) {
1289 netdev_dbg(priv->dev, "PCS SGMII support enabled\n");
1290
1291 switch (speed) {
1292 case SPEED_10:
1293 case SPEED_100:
1294 case SPEED_1000:
1295 priv->hw->reverse_sgmii_enable = true;
1296 break;
1297
1298 default:
1299 dev_warn(priv->device, "invalid port speed\n");
1300 fallthrough;
1301 case 0:
1302 priv->hw->reverse_sgmii_enable = false;
1303 break;
1304 }
1305 }
1306 }
1307
1308 /**
1309 * stmmac_init_phy - PHY initialization
1310 * @dev: net device structure
1311 * Description: it initializes the driver's PHY state, and attaches the PHY
1312 * to the mac driver.
1313 * Return value:
1314 * 0 on success
1315 */
stmmac_init_phy(struct net_device * dev)1316 static int stmmac_init_phy(struct net_device *dev)
1317 {
1318 struct stmmac_priv *priv = netdev_priv(dev);
1319 int mode = priv->plat->phy_interface;
1320 struct fwnode_handle *phy_fwnode;
1321 struct fwnode_handle *fwnode;
1322 struct ethtool_keee eee;
1323 u32 dev_flags = 0;
1324 int ret;
1325
1326 if (!phylink_expects_phy(priv->phylink))
1327 return 0;
1328
1329 if (priv->hw->xpcs &&
1330 xpcs_get_an_mode(priv->hw->xpcs, mode) == DW_AN_C73)
1331 return 0;
1332
1333 fwnode = dev_fwnode(priv->device);
1334 if (fwnode)
1335 phy_fwnode = fwnode_get_phy_node(fwnode);
1336 else
1337 phy_fwnode = NULL;
1338
1339 if (priv->plat->flags & STMMAC_FLAG_KEEP_PREAMBLE_BEFORE_SFD)
1340 dev_flags |= PHY_F_KEEP_PREAMBLE_BEFORE_SFD;
1341
1342 /* Some DT bindings do not set-up the PHY handle. Let's try to
1343 * manually parse it
1344 */
1345 if (!phy_fwnode || IS_ERR(phy_fwnode)) {
1346 int addr = priv->plat->phy_addr;
1347 struct phy_device *phydev;
1348
1349 if (addr < 0) {
1350 netdev_err(priv->dev, "no phy found\n");
1351 return -ENODEV;
1352 }
1353
1354 phydev = mdiobus_get_phy(priv->mii, addr);
1355 if (!phydev) {
1356 netdev_err(priv->dev, "no phy at addr %d\n", addr);
1357 return -ENODEV;
1358 }
1359
1360 phydev->dev_flags |= dev_flags;
1361
1362 ret = phylink_connect_phy(priv->phylink, phydev);
1363 } else {
1364 fwnode_handle_put(phy_fwnode);
1365 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, dev_flags);
1366 }
1367
1368 if (ret) {
1369 netdev_err(priv->dev, "cannot attach to PHY (error: %pe)\n",
1370 ERR_PTR(ret));
1371 return ret;
1372 }
1373
1374 /* Configure phylib's copy of the LPI timer. Normally,
1375 * phylink_config.lpi_timer_default would do this, but there is a
1376 * chance that userspace could change the eee_timer setting via sysfs
1377 * before the first open. Thus, preserve existing behaviour.
1378 */
1379 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) {
1380 eee.tx_lpi_timer = priv->tx_lpi_timer;
1381 phylink_ethtool_set_eee(priv->phylink, &eee);
1382 }
1383
1384 return 0;
1385 }
1386
stmmac_phylink_setup(struct stmmac_priv * priv)1387 static int stmmac_phylink_setup(struct stmmac_priv *priv)
1388 {
1389 struct phylink_config *config;
1390 struct phylink_pcs *pcs;
1391 struct phylink *phylink;
1392
1393 config = &priv->phylink_config;
1394
1395 config->dev = &priv->dev->dev;
1396 config->type = PHYLINK_NETDEV;
1397 config->mac_managed_pm = true;
1398
1399 /* Stmmac always requires an RX clock for hardware initialization */
1400 config->mac_requires_rxc = true;
1401
1402 /* Disable EEE RX clock stop to ensure VLAN register access works
1403 * correctly.
1404 */
1405 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI) &&
1406 !(priv->dev->features & NETIF_F_VLAN_FEATURES))
1407 config->eee_rx_clk_stop_enable = true;
1408
1409 /* Set the default transmit clock stop bit based on the platform glue */
1410 priv->tx_lpi_clk_stop = priv->plat->flags &
1411 STMMAC_FLAG_EN_TX_LPI_CLOCKGATING;
1412
1413 config->default_an_inband = priv->plat->default_an_inband;
1414
1415 /* Get the PHY interface modes (at the PHY end of the link) that
1416 * are supported by the platform.
1417 */
1418 if (priv->plat->get_interfaces)
1419 priv->plat->get_interfaces(priv, priv->plat->bsp_priv,
1420 config->supported_interfaces);
1421
1422 /* Set the platform/firmware specified interface mode if the
1423 * supported interfaces have not already been provided using
1424 * phy_interface as a last resort.
1425 */
1426 if (phy_interface_empty(config->supported_interfaces))
1427 __set_bit(priv->plat->phy_interface,
1428 config->supported_interfaces);
1429
1430 /* If we have an xpcs, it defines which PHY interfaces are supported. */
1431 if (priv->hw->xpcs)
1432 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs);
1433 else
1434 pcs = priv->hw->phylink_pcs;
1435
1436 if (pcs)
1437 phy_interface_or(config->supported_interfaces,
1438 config->supported_interfaces,
1439 pcs->supported_interfaces);
1440
1441 /* Some platforms, e.g. iMX8MP, wire lpi_intr_o to the same interrupt
1442 * used for stmmac's main interrupts, which leads to interrupt storms.
1443 * STMMAC_FLAG_EEE_DISABLE allows EEE to be disabled on such platforms.
1444 */
1445 if (priv->dma_cap.eee &&
1446 !(priv->plat->flags & STMMAC_FLAG_EEE_DISABLE)) {
1447 /* The GMAC 3.74a databook states that EEE is only supported
1448 * in MII, GMII, and RGMII interfaces.
1449 */
1450 __set_bit(PHY_INTERFACE_MODE_MII, config->lpi_interfaces);
1451 __set_bit(PHY_INTERFACE_MODE_GMII, config->lpi_interfaces);
1452 phy_interface_set_rgmii(config->lpi_interfaces);
1453
1454 /* If we have a non-integrated PCS, assume that it is connected
1455 * to the GMAC using GMII or another EEE compatible interface,
1456 * and thus all PCS-supported interfaces support LPI.
1457 */
1458 if (pcs)
1459 phy_interface_or(config->lpi_interfaces,
1460 config->lpi_interfaces,
1461 pcs->supported_interfaces);
1462
1463 /* All full duplex speeds above 100Mbps are supported */
1464 config->lpi_capabilities = ~(MAC_1000FD - 1) | MAC_100FD;
1465 config->lpi_timer_default = eee_timer * 1000;
1466 config->eee_enabled_default = true;
1467 }
1468
1469 config->wol_phy_speed_ctrl = true;
1470 if (priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL) {
1471 config->wol_phy_legacy = true;
1472 } else {
1473 if (priv->dma_cap.pmt_remote_wake_up)
1474 config->wol_mac_support |= WAKE_UCAST;
1475 if (priv->dma_cap.pmt_magic_frame)
1476 config->wol_mac_support |= WAKE_MAGIC;
1477 }
1478
1479 phylink = phylink_create(config, dev_fwnode(priv->device),
1480 priv->plat->phy_interface,
1481 &stmmac_phylink_mac_ops);
1482 if (IS_ERR(phylink))
1483 return PTR_ERR(phylink);
1484
1485 priv->phylink = phylink;
1486 return 0;
1487 }
1488
stmmac_display_rx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1489 static void stmmac_display_rx_rings(struct stmmac_priv *priv,
1490 struct stmmac_dma_conf *dma_conf)
1491 {
1492 u8 rx_cnt = priv->plat->rx_queues_to_use;
1493 unsigned int desc_size;
1494 void *head_rx;
1495 u8 queue;
1496
1497 /* Display RX rings */
1498 for (queue = 0; queue < rx_cnt; queue++) {
1499 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1500
1501 pr_info("\tRX Queue %u rings\n", queue);
1502
1503 head_rx = stmmac_get_rx_desc(priv, rx_q, 0);
1504 desc_size = stmmac_get_rx_desc_size(priv);
1505
1506 /* Display RX ring */
1507 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true,
1508 rx_q->dma_rx_phy, desc_size);
1509 }
1510 }
1511
stmmac_display_tx_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1512 static void stmmac_display_tx_rings(struct stmmac_priv *priv,
1513 struct stmmac_dma_conf *dma_conf)
1514 {
1515 u8 tx_cnt = priv->plat->tx_queues_to_use;
1516 unsigned int desc_size;
1517 void *head_tx;
1518 u8 queue;
1519
1520 /* Display TX rings */
1521 for (queue = 0; queue < tx_cnt; queue++) {
1522 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1523
1524 pr_info("\tTX Queue %d rings\n", queue);
1525
1526 head_tx = stmmac_get_tx_desc(priv, tx_q, 0);
1527 desc_size = stmmac_get_tx_desc_size(priv, tx_q);
1528
1529 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false,
1530 tx_q->dma_tx_phy, desc_size);
1531 }
1532 }
1533
stmmac_display_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1534 static void stmmac_display_rings(struct stmmac_priv *priv,
1535 struct stmmac_dma_conf *dma_conf)
1536 {
1537 /* Display RX ring */
1538 stmmac_display_rx_rings(priv, dma_conf);
1539
1540 /* Display TX ring */
1541 stmmac_display_tx_rings(priv, dma_conf);
1542 }
1543
stmmac_rx_offset(struct stmmac_priv * priv)1544 static unsigned int stmmac_rx_offset(struct stmmac_priv *priv)
1545 {
1546 if (stmmac_xdp_is_enabled(priv))
1547 return XDP_PACKET_HEADROOM;
1548
1549 return NET_SKB_PAD;
1550 }
1551
stmmac_set_bfsize(int mtu)1552 static int stmmac_set_bfsize(int mtu)
1553 {
1554 int ret;
1555
1556 if (mtu >= BUF_SIZE_8KiB)
1557 ret = BUF_SIZE_16KiB;
1558 else if (mtu >= BUF_SIZE_4KiB)
1559 ret = BUF_SIZE_8KiB;
1560 else if (mtu >= BUF_SIZE_2KiB)
1561 ret = BUF_SIZE_4KiB;
1562 else if (mtu > DEFAULT_BUFSIZE)
1563 ret = BUF_SIZE_2KiB;
1564 else
1565 ret = DEFAULT_BUFSIZE;
1566
1567 return ret;
1568 }
1569
1570 /**
1571 * stmmac_clear_rx_descriptors - clear RX descriptors
1572 * @priv: driver private structure
1573 * @dma_conf: structure to take the dma data
1574 * @queue: RX queue index
1575 * Description: this function is called to clear the RX descriptors
1576 * in case of both basic and extended descriptors are used.
1577 */
stmmac_clear_rx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1578 static void stmmac_clear_rx_descriptors(struct stmmac_priv *priv,
1579 struct stmmac_dma_conf *dma_conf,
1580 u32 queue)
1581 {
1582 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1583 struct dma_desc *desc;
1584 int i;
1585
1586 /* Clear the RX descriptors */
1587 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1588 desc = stmmac_get_rx_desc(priv, rx_q, i);
1589
1590 stmmac_init_rx_desc(priv, desc, priv->use_riwt,
1591 priv->descriptor_mode,
1592 (i == dma_conf->dma_rx_size - 1),
1593 dma_conf->dma_buf_sz);
1594 }
1595 }
1596
1597 /**
1598 * stmmac_clear_tx_descriptors - clear tx descriptors
1599 * @priv: driver private structure
1600 * @dma_conf: structure to take the dma data
1601 * @queue: TX queue index.
1602 * Description: this function is called to clear the TX descriptors
1603 * in case of both basic and extended descriptors are used.
1604 */
stmmac_clear_tx_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1605 static void stmmac_clear_tx_descriptors(struct stmmac_priv *priv,
1606 struct stmmac_dma_conf *dma_conf,
1607 u32 queue)
1608 {
1609 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1610 int i;
1611
1612 /* Clear the TX descriptors */
1613 for (i = 0; i < dma_conf->dma_tx_size; i++) {
1614 int last = (i == (dma_conf->dma_tx_size - 1));
1615 struct dma_desc *p;
1616
1617 p = stmmac_get_tx_desc(priv, tx_q, i);
1618 stmmac_init_tx_desc(priv, p, priv->descriptor_mode, last);
1619 }
1620 }
1621
1622 /**
1623 * stmmac_clear_descriptors - clear descriptors
1624 * @priv: driver private structure
1625 * @dma_conf: structure to take the dma data
1626 * Description: this function is called to clear the TX and RX descriptors
1627 * in case of both basic and extended descriptors are used.
1628 */
stmmac_clear_descriptors(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)1629 static void stmmac_clear_descriptors(struct stmmac_priv *priv,
1630 struct stmmac_dma_conf *dma_conf)
1631 {
1632 u8 rx_queue_cnt = priv->plat->rx_queues_to_use;
1633 u8 tx_queue_cnt = priv->plat->tx_queues_to_use;
1634 u8 queue;
1635
1636 /* Clear the RX descriptors */
1637 for (queue = 0; queue < rx_queue_cnt; queue++)
1638 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1639
1640 /* Clear the TX descriptors */
1641 for (queue = 0; queue < tx_queue_cnt; queue++)
1642 stmmac_clear_tx_descriptors(priv, dma_conf, queue);
1643 }
1644
1645 /**
1646 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1647 * @priv: driver private structure
1648 * @dma_conf: structure to take the dma data
1649 * @p: descriptor pointer
1650 * @i: descriptor index
1651 * @flags: gfp flag
1652 * @queue: RX queue index
1653 * Description: this function is called to allocate a receive buffer, perform
1654 * the DMA mapping and init the descriptor.
1655 */
stmmac_init_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,struct dma_desc * p,int i,gfp_t flags,u32 queue)1656 static int stmmac_init_rx_buffers(struct stmmac_priv *priv,
1657 struct stmmac_dma_conf *dma_conf,
1658 struct dma_desc *p,
1659 int i, gfp_t flags, u32 queue)
1660 {
1661 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1662 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1663 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
1664
1665 if (priv->dma_cap.host_dma_width <= 32)
1666 gfp |= GFP_DMA32;
1667
1668 if (!buf->page) {
1669 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1670 if (!buf->page)
1671 return -ENOMEM;
1672 buf->page_offset = stmmac_rx_offset(priv);
1673 }
1674
1675 if (priv->sph_active && !buf->sec_page) {
1676 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
1677 if (!buf->sec_page)
1678 return -ENOMEM;
1679
1680 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
1681 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
1682 } else {
1683 buf->sec_page = NULL;
1684 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
1685 }
1686
1687 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
1688
1689 stmmac_set_desc_addr(priv, p, buf->addr);
1690 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB)
1691 stmmac_init_desc3(priv, p);
1692
1693 return 0;
1694 }
1695
1696 /**
1697 * stmmac_free_rx_buffer - free RX dma buffers
1698 * @priv: private structure
1699 * @rx_q: RX queue
1700 * @i: buffer index.
1701 */
stmmac_free_rx_buffer(struct stmmac_priv * priv,struct stmmac_rx_queue * rx_q,int i)1702 static void stmmac_free_rx_buffer(struct stmmac_priv *priv,
1703 struct stmmac_rx_queue *rx_q,
1704 int i)
1705 {
1706 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1707
1708 if (buf->page)
1709 page_pool_put_full_page(rx_q->page_pool, buf->page, false);
1710 buf->page = NULL;
1711
1712 if (buf->sec_page)
1713 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false);
1714 buf->sec_page = NULL;
1715 }
1716
1717 /**
1718 * stmmac_free_tx_buffer - free RX dma buffers
1719 * @priv: private structure
1720 * @dma_conf: structure to take the dma data
1721 * @queue: RX queue index
1722 * @i: buffer index.
1723 */
stmmac_free_tx_buffer(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,int i)1724 static void stmmac_free_tx_buffer(struct stmmac_priv *priv,
1725 struct stmmac_dma_conf *dma_conf,
1726 u32 queue, int i)
1727 {
1728 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
1729
1730 if (tx_q->tx_skbuff_dma[i].buf &&
1731 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) {
1732 if (tx_q->tx_skbuff_dma[i].map_as_page)
1733 dma_unmap_page(priv->device,
1734 tx_q->tx_skbuff_dma[i].buf,
1735 tx_q->tx_skbuff_dma[i].len,
1736 DMA_TO_DEVICE);
1737 else
1738 dma_unmap_single(priv->device,
1739 tx_q->tx_skbuff_dma[i].buf,
1740 tx_q->tx_skbuff_dma[i].len,
1741 DMA_TO_DEVICE);
1742 }
1743
1744 if (tx_q->xdpf[i] &&
1745 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX ||
1746 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) {
1747 xdp_return_frame(tx_q->xdpf[i]);
1748 tx_q->xdpf[i] = NULL;
1749 }
1750
1751 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX)
1752 tx_q->xsk_frames_done++;
1753
1754 if (tx_q->tx_skbuff[i] &&
1755 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) {
1756 dev_kfree_skb_any(tx_q->tx_skbuff[i]);
1757 tx_q->tx_skbuff[i] = NULL;
1758 }
1759
1760 tx_q->tx_skbuff_dma[i].buf = 0;
1761 tx_q->tx_skbuff_dma[i].map_as_page = false;
1762 }
1763
1764 /**
1765 * dma_free_rx_skbufs - free RX dma buffers
1766 * @priv: private structure
1767 * @dma_conf: structure to take the dma data
1768 * @queue: RX queue index
1769 */
dma_free_rx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1770 static void dma_free_rx_skbufs(struct stmmac_priv *priv,
1771 struct stmmac_dma_conf *dma_conf,
1772 u32 queue)
1773 {
1774 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1775 int i;
1776
1777 for (i = 0; i < dma_conf->dma_rx_size; i++)
1778 stmmac_free_rx_buffer(priv, rx_q, i);
1779 }
1780
stmmac_alloc_rx_buffers(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1781 static int stmmac_alloc_rx_buffers(struct stmmac_priv *priv,
1782 struct stmmac_dma_conf *dma_conf,
1783 u32 queue, gfp_t flags)
1784 {
1785 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1786 int i;
1787
1788 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1789 struct dma_desc *p;
1790 int ret;
1791
1792 p = stmmac_get_rx_desc(priv, rx_q, i);
1793
1794 ret = stmmac_init_rx_buffers(priv, dma_conf, p, i, flags,
1795 queue);
1796 if (ret)
1797 return ret;
1798
1799 rx_q->buf_alloc_num++;
1800 }
1801
1802 return 0;
1803 }
1804
1805 /**
1806 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1807 * @priv: private structure
1808 * @dma_conf: structure to take the dma data
1809 * @queue: RX queue index
1810 */
dma_free_rx_xskbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1811 static void dma_free_rx_xskbufs(struct stmmac_priv *priv,
1812 struct stmmac_dma_conf *dma_conf,
1813 u32 queue)
1814 {
1815 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1816 int i;
1817
1818 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1819 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i];
1820
1821 if (!buf->xdp)
1822 continue;
1823
1824 xsk_buff_free(buf->xdp);
1825 buf->xdp = NULL;
1826 }
1827 }
1828
stmmac_alloc_rx_buffers_zc(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)1829 static int stmmac_alloc_rx_buffers_zc(struct stmmac_priv *priv,
1830 struct stmmac_dma_conf *dma_conf,
1831 u32 queue)
1832 {
1833 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1834 int i;
1835
1836 /* struct stmmac_xdp_buff is using cb field (maximum size of 24 bytes)
1837 * in struct xdp_buff_xsk to stash driver specific information. Thus,
1838 * use this macro to make sure no size violations.
1839 */
1840 XSK_CHECK_PRIV_TYPE(struct stmmac_xdp_buff);
1841
1842 for (i = 0; i < dma_conf->dma_rx_size; i++) {
1843 struct stmmac_rx_buffer *buf;
1844 dma_addr_t dma_addr;
1845 struct dma_desc *p;
1846
1847 p = stmmac_get_rx_desc(priv, rx_q, i);
1848
1849 buf = &rx_q->buf_pool[i];
1850
1851 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
1852 if (!buf->xdp)
1853 return -ENOMEM;
1854
1855 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
1856 stmmac_set_desc_addr(priv, p, dma_addr);
1857 rx_q->buf_alloc_num++;
1858 }
1859
1860 return 0;
1861 }
1862
stmmac_get_xsk_pool(struct stmmac_priv * priv,u32 queue)1863 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue)
1864 {
1865 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps))
1866 return NULL;
1867
1868 return xsk_get_pool_from_qid(priv->dev, queue);
1869 }
1870
1871 /**
1872 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1873 * @priv: driver private structure
1874 * @dma_conf: structure to take the dma data
1875 * @queue: RX queue index
1876 * @flags: gfp flag.
1877 * Description: this function initializes the DMA RX descriptors
1878 * and allocates the socket buffers. It supports the chained and ring
1879 * modes.
1880 */
__init_dma_rx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue,gfp_t flags)1881 static int __init_dma_rx_desc_rings(struct stmmac_priv *priv,
1882 struct stmmac_dma_conf *dma_conf,
1883 u32 queue, gfp_t flags)
1884 {
1885 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1886 void *des;
1887 int ret;
1888
1889 netif_dbg(priv, probe, priv->dev,
1890 "(%s) dma_rx_phy=0x%08x\n", __func__,
1891 (u32)rx_q->dma_rx_phy);
1892
1893 stmmac_clear_rx_descriptors(priv, dma_conf, queue);
1894
1895 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq);
1896
1897 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
1898
1899 if (rx_q->xsk_pool) {
1900 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1901 MEM_TYPE_XSK_BUFF_POOL,
1902 NULL));
1903 netdev_info(priv->dev,
1904 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n",
1905 queue);
1906 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq);
1907 } else {
1908 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq,
1909 MEM_TYPE_PAGE_POOL,
1910 rx_q->page_pool));
1911 netdev_info(priv->dev,
1912 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n",
1913 queue);
1914 }
1915
1916 if (rx_q->xsk_pool) {
1917 /* RX XDP ZC buffer pool may not be populated, e.g.
1918 * xdpsock TX-only.
1919 */
1920 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue);
1921 } else {
1922 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags);
1923 if (ret < 0)
1924 return -ENOMEM;
1925 }
1926
1927 /* Setup the chained descriptor addresses */
1928 if (priv->descriptor_mode == STMMAC_CHAIN_MODE) {
1929 if (priv->extend_desc)
1930 des = rx_q->dma_erx;
1931 else
1932 des = rx_q->dma_rx;
1933
1934 stmmac_mode_init(priv, des, rx_q->dma_rx_phy,
1935 dma_conf->dma_rx_size, priv->extend_desc);
1936 }
1937
1938 return 0;
1939 }
1940
init_dma_rx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)1941 static int init_dma_rx_desc_rings(struct net_device *dev,
1942 struct stmmac_dma_conf *dma_conf,
1943 gfp_t flags)
1944 {
1945 struct stmmac_priv *priv = netdev_priv(dev);
1946 u8 rx_count = priv->plat->rx_queues_to_use;
1947 int queue;
1948 int ret;
1949
1950 /* RX INITIALIZATION */
1951 netif_dbg(priv, probe, priv->dev,
1952 "SKB addresses:\nskb\t\tskb data\tdma data\n");
1953
1954 for (queue = 0; queue < rx_count; queue++) {
1955 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags);
1956 if (ret)
1957 goto err_init_rx_buffers;
1958 }
1959
1960 return 0;
1961
1962 err_init_rx_buffers:
1963 while (queue >= 0) {
1964 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
1965
1966 if (rx_q->xsk_pool)
1967 dma_free_rx_xskbufs(priv, dma_conf, queue);
1968 else
1969 dma_free_rx_skbufs(priv, dma_conf, queue);
1970
1971 rx_q->buf_alloc_num = 0;
1972 rx_q->xsk_pool = NULL;
1973
1974 queue--;
1975 }
1976
1977 return ret;
1978 }
1979
stmmac_set_tx_dma_entry(struct stmmac_tx_queue * tx_q,unsigned int entry,enum stmmac_txbuf_type type,dma_addr_t addr,size_t len,bool map_as_page)1980 static void stmmac_set_tx_dma_entry(struct stmmac_tx_queue *tx_q,
1981 unsigned int entry,
1982 enum stmmac_txbuf_type type,
1983 dma_addr_t addr, size_t len,
1984 bool map_as_page)
1985 {
1986 tx_q->tx_skbuff_dma[entry].buf = addr;
1987 tx_q->tx_skbuff_dma[entry].len = len;
1988 tx_q->tx_skbuff_dma[entry].buf_type = type;
1989 tx_q->tx_skbuff_dma[entry].map_as_page = map_as_page;
1990 tx_q->tx_skbuff_dma[entry].last_segment = false;
1991 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
1992 }
1993
stmmac_set_tx_skb_dma_entry(struct stmmac_tx_queue * tx_q,unsigned int entry,dma_addr_t addr,size_t len,bool map_as_page)1994 static void stmmac_set_tx_skb_dma_entry(struct stmmac_tx_queue *tx_q,
1995 unsigned int entry, dma_addr_t addr,
1996 size_t len, bool map_as_page)
1997 {
1998 stmmac_set_tx_dma_entry(tx_q, entry, STMMAC_TXBUF_T_SKB, addr, len,
1999 map_as_page);
2000 }
2001
stmmac_set_tx_dma_last_segment(struct stmmac_tx_queue * tx_q,unsigned int entry)2002 static void stmmac_set_tx_dma_last_segment(struct stmmac_tx_queue *tx_q,
2003 unsigned int entry)
2004 {
2005 tx_q->tx_skbuff_dma[entry].last_segment = true;
2006 }
2007
2008 /**
2009 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
2010 * @priv: driver private structure
2011 * @dma_conf: structure to take the dma data
2012 * @queue: TX queue index
2013 * Description: this function initializes the DMA TX descriptors
2014 * and allocates the socket buffers. It supports the chained and ring
2015 * modes.
2016 */
__init_dma_tx_desc_rings(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2017 static int __init_dma_tx_desc_rings(struct stmmac_priv *priv,
2018 struct stmmac_dma_conf *dma_conf,
2019 u32 queue)
2020 {
2021 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2022 int i;
2023
2024 netif_dbg(priv, probe, priv->dev,
2025 "(%s) dma_tx_phy=0x%08x\n", __func__,
2026 (u32)tx_q->dma_tx_phy);
2027
2028 /* Setup the chained descriptor addresses */
2029 if (priv->descriptor_mode == STMMAC_CHAIN_MODE) {
2030 if (priv->extend_desc)
2031 stmmac_mode_init(priv, tx_q->dma_etx,
2032 tx_q->dma_tx_phy,
2033 dma_conf->dma_tx_size, 1);
2034 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL))
2035 stmmac_mode_init(priv, tx_q->dma_tx,
2036 tx_q->dma_tx_phy,
2037 dma_conf->dma_tx_size, 0);
2038 }
2039
2040 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue);
2041
2042 for (i = 0; i < dma_conf->dma_tx_size; i++) {
2043 struct dma_desc *p;
2044
2045 p = stmmac_get_tx_desc(priv, tx_q, i);
2046 stmmac_clear_desc(priv, p);
2047 stmmac_set_tx_skb_dma_entry(tx_q, i, 0, 0, false);
2048
2049 tx_q->tx_skbuff[i] = NULL;
2050 }
2051
2052 return 0;
2053 }
2054
init_dma_tx_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf)2055 static int init_dma_tx_desc_rings(struct net_device *dev,
2056 struct stmmac_dma_conf *dma_conf)
2057 {
2058 struct stmmac_priv *priv = netdev_priv(dev);
2059 u8 tx_queue_cnt;
2060 u8 queue;
2061
2062 tx_queue_cnt = priv->plat->tx_queues_to_use;
2063
2064 for (queue = 0; queue < tx_queue_cnt; queue++)
2065 __init_dma_tx_desc_rings(priv, dma_conf, queue);
2066
2067 return 0;
2068 }
2069
2070 /**
2071 * init_dma_desc_rings - init the RX/TX descriptor rings
2072 * @dev: net device structure
2073 * @dma_conf: structure to take the dma data
2074 * @flags: gfp flag.
2075 * Description: this function initializes the DMA RX/TX descriptors
2076 * and allocates the socket buffers. It supports the chained and ring
2077 * modes.
2078 */
init_dma_desc_rings(struct net_device * dev,struct stmmac_dma_conf * dma_conf,gfp_t flags)2079 static int init_dma_desc_rings(struct net_device *dev,
2080 struct stmmac_dma_conf *dma_conf,
2081 gfp_t flags)
2082 {
2083 struct stmmac_priv *priv = netdev_priv(dev);
2084 int ret;
2085
2086 ret = init_dma_rx_desc_rings(dev, dma_conf, flags);
2087 if (ret)
2088 return ret;
2089
2090 ret = init_dma_tx_desc_rings(dev, dma_conf);
2091
2092 stmmac_clear_descriptors(priv, dma_conf);
2093
2094 if (netif_msg_hw(priv))
2095 stmmac_display_rings(priv, dma_conf);
2096
2097 return ret;
2098 }
2099
2100 /**
2101 * dma_free_tx_skbufs - free TX dma buffers
2102 * @priv: private structure
2103 * @dma_conf: structure to take the dma data
2104 * @queue: TX queue index
2105 */
dma_free_tx_skbufs(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2106 static void dma_free_tx_skbufs(struct stmmac_priv *priv,
2107 struct stmmac_dma_conf *dma_conf,
2108 u32 queue)
2109 {
2110 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2111 int i;
2112
2113 tx_q->xsk_frames_done = 0;
2114
2115 for (i = 0; i < dma_conf->dma_tx_size; i++)
2116 stmmac_free_tx_buffer(priv, dma_conf, queue, i);
2117
2118 if (tx_q->xsk_pool && tx_q->xsk_frames_done) {
2119 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2120 tx_q->xsk_frames_done = 0;
2121 tx_q->xsk_pool = NULL;
2122 }
2123 }
2124
2125 /**
2126 * stmmac_free_tx_skbufs - free TX skb buffers
2127 * @priv: private structure
2128 */
stmmac_free_tx_skbufs(struct stmmac_priv * priv)2129 static void stmmac_free_tx_skbufs(struct stmmac_priv *priv)
2130 {
2131 u8 tx_queue_cnt = priv->plat->tx_queues_to_use;
2132 u8 queue;
2133
2134 for (queue = 0; queue < tx_queue_cnt; queue++)
2135 dma_free_tx_skbufs(priv, &priv->dma_conf, queue);
2136 }
2137
2138 /**
2139 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
2140 * @priv: private structure
2141 * @dma_conf: structure to take the dma data
2142 * @queue: RX queue index
2143 */
__free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2144 static void __free_dma_rx_desc_resources(struct stmmac_priv *priv,
2145 struct stmmac_dma_conf *dma_conf,
2146 u32 queue)
2147 {
2148 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2149 size_t size;
2150 void *addr;
2151
2152 /* Release the DMA RX socket buffers */
2153 if (rx_q->xsk_pool)
2154 dma_free_rx_xskbufs(priv, dma_conf, queue);
2155 else
2156 dma_free_rx_skbufs(priv, dma_conf, queue);
2157
2158 rx_q->buf_alloc_num = 0;
2159 rx_q->xsk_pool = NULL;
2160
2161 /* Free DMA regions of consistent memory previously allocated */
2162 if (priv->extend_desc)
2163 addr = rx_q->dma_erx;
2164 else
2165 addr = rx_q->dma_rx;
2166
2167 size = stmmac_get_rx_desc_size(priv) * dma_conf->dma_rx_size;
2168
2169 dma_free_coherent(priv->device, size, addr, rx_q->dma_rx_phy);
2170
2171 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq))
2172 xdp_rxq_info_unreg(&rx_q->xdp_rxq);
2173
2174 kfree(rx_q->buf_pool);
2175 if (rx_q->page_pool)
2176 page_pool_destroy(rx_q->page_pool);
2177 }
2178
free_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2179 static void free_dma_rx_desc_resources(struct stmmac_priv *priv,
2180 struct stmmac_dma_conf *dma_conf)
2181 {
2182 u8 rx_count = priv->plat->rx_queues_to_use;
2183 u8 queue;
2184
2185 /* Free RX queue resources */
2186 for (queue = 0; queue < rx_count; queue++)
2187 __free_dma_rx_desc_resources(priv, dma_conf, queue);
2188 }
2189
2190 /**
2191 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2192 * @priv: private structure
2193 * @dma_conf: structure to take the dma data
2194 * @queue: TX queue index
2195 */
__free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2196 static void __free_dma_tx_desc_resources(struct stmmac_priv *priv,
2197 struct stmmac_dma_conf *dma_conf,
2198 u32 queue)
2199 {
2200 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2201 size_t size;
2202 void *addr;
2203
2204 /* Release the DMA TX socket buffers */
2205 dma_free_tx_skbufs(priv, dma_conf, queue);
2206
2207 if (priv->extend_desc) {
2208 addr = tx_q->dma_etx;
2209 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) {
2210 addr = tx_q->dma_entx;
2211 } else {
2212 addr = tx_q->dma_tx;
2213 }
2214
2215 size = stmmac_get_tx_desc_size(priv, tx_q) * dma_conf->dma_tx_size;
2216
2217 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy);
2218
2219 kfree(tx_q->tx_skbuff_dma);
2220 kfree(tx_q->tx_skbuff);
2221 }
2222
free_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2223 static void free_dma_tx_desc_resources(struct stmmac_priv *priv,
2224 struct stmmac_dma_conf *dma_conf)
2225 {
2226 u8 tx_count = priv->plat->tx_queues_to_use;
2227 u8 queue;
2228
2229 /* Free TX queue resources */
2230 for (queue = 0; queue < tx_count; queue++)
2231 __free_dma_tx_desc_resources(priv, dma_conf, queue);
2232 }
2233
2234 /**
2235 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2236 * @priv: private structure
2237 * @dma_conf: structure to take the dma data
2238 * @queue: RX queue index
2239 * Description: according to which descriptor can be used (extend or basic)
2240 * this function allocates the resources for TX and RX paths. In case of
2241 * reception, for example, it pre-allocated the RX socket buffer in order to
2242 * allow zero-copy mechanism.
2243 */
__alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2244 static int __alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2245 struct stmmac_dma_conf *dma_conf,
2246 u32 queue)
2247 {
2248 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue];
2249 struct stmmac_channel *ch = &priv->channel[queue];
2250 bool xdp_prog = stmmac_xdp_is_enabled(priv);
2251 struct page_pool_params pp_params = { 0 };
2252 unsigned int dma_buf_sz_pad, num_pages;
2253 unsigned int napi_id;
2254 size_t size;
2255 void *addr;
2256 int ret;
2257
2258 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz +
2259 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
2260 num_pages = DIV_ROUND_UP(dma_buf_sz_pad, PAGE_SIZE);
2261
2262 rx_q->queue_index = queue;
2263 rx_q->priv_data = priv;
2264 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE;
2265
2266 pp_params.flags = PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV;
2267 pp_params.pool_size = dma_conf->dma_rx_size;
2268 pp_params.order = order_base_2(num_pages);
2269 pp_params.nid = dev_to_node(priv->device);
2270 pp_params.dev = priv->device;
2271 pp_params.dma_dir = xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE;
2272 pp_params.offset = stmmac_rx_offset(priv);
2273 pp_params.max_len = dma_conf->dma_buf_sz;
2274
2275 if (priv->sph_active) {
2276 pp_params.offset = 0;
2277 pp_params.max_len += stmmac_rx_offset(priv);
2278 }
2279
2280 rx_q->page_pool = page_pool_create(&pp_params);
2281 if (IS_ERR(rx_q->page_pool)) {
2282 ret = PTR_ERR(rx_q->page_pool);
2283 rx_q->page_pool = NULL;
2284 return ret;
2285 }
2286
2287 rx_q->buf_pool = kzalloc_objs(*rx_q->buf_pool, dma_conf->dma_rx_size);
2288 if (!rx_q->buf_pool)
2289 return -ENOMEM;
2290
2291 size = stmmac_get_rx_desc_size(priv) * dma_conf->dma_rx_size;
2292
2293 addr = dma_alloc_coherent(priv->device, size, &rx_q->dma_rx_phy,
2294 GFP_KERNEL);
2295 if (!addr)
2296 return -ENOMEM;
2297
2298 if (priv->extend_desc)
2299 rx_q->dma_erx = addr;
2300 else
2301 rx_q->dma_rx = addr;
2302
2303 if (stmmac_xdp_is_enabled(priv) &&
2304 test_bit(queue, priv->af_xdp_zc_qps))
2305 napi_id = ch->rxtx_napi.napi_id;
2306 else
2307 napi_id = ch->rx_napi.napi_id;
2308
2309 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, queue, napi_id);
2310 if (ret) {
2311 netdev_err(priv->dev, "Failed to register xdp rxq info\n");
2312 return -EINVAL;
2313 }
2314
2315 return 0;
2316 }
2317
alloc_dma_rx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2318 static int alloc_dma_rx_desc_resources(struct stmmac_priv *priv,
2319 struct stmmac_dma_conf *dma_conf)
2320 {
2321 u8 rx_count = priv->plat->rx_queues_to_use;
2322 u8 queue;
2323 int ret;
2324
2325 /* RX queues buffers and DMA */
2326 for (queue = 0; queue < rx_count; queue++) {
2327 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue);
2328 if (ret)
2329 goto err_dma;
2330 }
2331
2332 return 0;
2333
2334 err_dma:
2335 free_dma_rx_desc_resources(priv, dma_conf);
2336
2337 return ret;
2338 }
2339
2340 /**
2341 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2342 * @priv: private structure
2343 * @dma_conf: structure to take the dma data
2344 * @queue: TX queue index
2345 * Description: according to which descriptor can be used (extend or basic)
2346 * this function allocates the resources for TX and RX paths. In case of
2347 * reception, for example, it pre-allocated the RX socket buffer in order to
2348 * allow zero-copy mechanism.
2349 */
__alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf,u32 queue)2350 static int __alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2351 struct stmmac_dma_conf *dma_conf,
2352 u32 queue)
2353 {
2354 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue];
2355 size_t size;
2356 void *addr;
2357
2358 tx_q->queue_index = queue;
2359 tx_q->priv_data = priv;
2360
2361 tx_q->tx_skbuff_dma = kzalloc_objs(*tx_q->tx_skbuff_dma,
2362 dma_conf->dma_tx_size);
2363 if (!tx_q->tx_skbuff_dma)
2364 return -ENOMEM;
2365
2366 tx_q->tx_skbuff = kzalloc_objs(struct sk_buff *, dma_conf->dma_tx_size);
2367 if (!tx_q->tx_skbuff)
2368 return -ENOMEM;
2369
2370 size = stmmac_get_tx_desc_size(priv, tx_q) * dma_conf->dma_tx_size;
2371
2372 addr = dma_alloc_coherent(priv->device, size,
2373 &tx_q->dma_tx_phy, GFP_KERNEL);
2374 if (!addr)
2375 return -ENOMEM;
2376
2377 if (priv->extend_desc)
2378 tx_q->dma_etx = addr;
2379 else if (tx_q->tbs & STMMAC_TBS_AVAIL)
2380 tx_q->dma_entx = addr;
2381 else
2382 tx_q->dma_tx = addr;
2383
2384 return 0;
2385 }
2386
alloc_dma_tx_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2387 static int alloc_dma_tx_desc_resources(struct stmmac_priv *priv,
2388 struct stmmac_dma_conf *dma_conf)
2389 {
2390 u8 tx_count = priv->plat->tx_queues_to_use;
2391 u8 queue;
2392 int ret;
2393
2394 /* TX queues buffers and DMA */
2395 for (queue = 0; queue < tx_count; queue++) {
2396 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue);
2397 if (ret)
2398 goto err_dma;
2399 }
2400
2401 return 0;
2402
2403 err_dma:
2404 free_dma_tx_desc_resources(priv, dma_conf);
2405 return ret;
2406 }
2407
2408 /**
2409 * alloc_dma_desc_resources - alloc TX/RX resources.
2410 * @priv: private structure
2411 * @dma_conf: structure to take the dma data
2412 * Description: according to which descriptor can be used (extend or basic)
2413 * this function allocates the resources for TX and RX paths. In case of
2414 * reception, for example, it pre-allocated the RX socket buffer in order to
2415 * allow zero-copy mechanism.
2416 */
alloc_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2417 static int alloc_dma_desc_resources(struct stmmac_priv *priv,
2418 struct stmmac_dma_conf *dma_conf)
2419 {
2420 /* RX Allocation */
2421 int ret = alloc_dma_rx_desc_resources(priv, dma_conf);
2422
2423 if (ret)
2424 return ret;
2425
2426 ret = alloc_dma_tx_desc_resources(priv, dma_conf);
2427
2428 return ret;
2429 }
2430
2431 /**
2432 * free_dma_desc_resources - free dma desc resources
2433 * @priv: private structure
2434 * @dma_conf: structure to take the dma data
2435 */
free_dma_desc_resources(struct stmmac_priv * priv,struct stmmac_dma_conf * dma_conf)2436 static void free_dma_desc_resources(struct stmmac_priv *priv,
2437 struct stmmac_dma_conf *dma_conf)
2438 {
2439 /* Release the DMA TX socket buffers */
2440 free_dma_tx_desc_resources(priv, dma_conf);
2441
2442 /* Release the DMA RX socket buffers later
2443 * to ensure all pending XDP_TX buffers are returned.
2444 */
2445 free_dma_rx_desc_resources(priv, dma_conf);
2446 }
2447
2448 /**
2449 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2450 * @priv: driver private structure
2451 * Description: It is used for enabling the rx queues in the MAC
2452 */
stmmac_mac_enable_rx_queues(struct stmmac_priv * priv)2453 static void stmmac_mac_enable_rx_queues(struct stmmac_priv *priv)
2454 {
2455 u8 rx_queues_count = priv->plat->rx_queues_to_use;
2456 u8 queue;
2457 u8 mode;
2458
2459 for (queue = 0; queue < rx_queues_count; queue++) {
2460 mode = priv->plat->rx_queues_cfg[queue].mode_to_use;
2461 stmmac_rx_queue_enable(priv, priv->hw, mode, queue);
2462 }
2463 }
2464
2465 /**
2466 * stmmac_start_rx_dma - start RX DMA channel
2467 * @priv: driver private structure
2468 * @chan: RX channel index
2469 * Description:
2470 * This starts a RX DMA channel
2471 */
stmmac_start_rx_dma(struct stmmac_priv * priv,u32 chan)2472 static void stmmac_start_rx_dma(struct stmmac_priv *priv, u32 chan)
2473 {
2474 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan);
2475 stmmac_start_rx(priv, priv->ioaddr, chan);
2476 }
2477
2478 /**
2479 * stmmac_start_tx_dma - start TX DMA channel
2480 * @priv: driver private structure
2481 * @chan: TX channel index
2482 * Description:
2483 * This starts a TX DMA channel
2484 */
stmmac_start_tx_dma(struct stmmac_priv * priv,u32 chan)2485 static void stmmac_start_tx_dma(struct stmmac_priv *priv, u32 chan)
2486 {
2487 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan);
2488 stmmac_start_tx(priv, priv->ioaddr, chan);
2489 }
2490
2491 /**
2492 * stmmac_stop_rx_dma - stop RX DMA channel
2493 * @priv: driver private structure
2494 * @chan: RX channel index
2495 * Description:
2496 * This stops a RX DMA channel
2497 */
stmmac_stop_rx_dma(struct stmmac_priv * priv,u32 chan)2498 static void stmmac_stop_rx_dma(struct stmmac_priv *priv, u32 chan)
2499 {
2500 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan);
2501 stmmac_stop_rx(priv, priv->ioaddr, chan);
2502 }
2503
2504 /**
2505 * stmmac_stop_tx_dma - stop TX DMA channel
2506 * @priv: driver private structure
2507 * @chan: TX channel index
2508 * Description:
2509 * This stops a TX DMA channel
2510 */
stmmac_stop_tx_dma(struct stmmac_priv * priv,u32 chan)2511 static void stmmac_stop_tx_dma(struct stmmac_priv *priv, u32 chan)
2512 {
2513 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan);
2514 stmmac_stop_tx(priv, priv->ioaddr, chan);
2515 }
2516
stmmac_enable_all_dma_irq(struct stmmac_priv * priv)2517 static void stmmac_enable_all_dma_irq(struct stmmac_priv *priv)
2518 {
2519 u8 rx_channels_count = priv->plat->rx_queues_to_use;
2520 u8 tx_channels_count = priv->plat->tx_queues_to_use;
2521 u8 dma_csr_ch = max(rx_channels_count, tx_channels_count);
2522 u8 chan;
2523
2524 for (chan = 0; chan < dma_csr_ch; chan++) {
2525 struct stmmac_channel *ch = &priv->channel[chan];
2526 unsigned long flags;
2527
2528 spin_lock_irqsave(&ch->lock, flags);
2529 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
2530 spin_unlock_irqrestore(&ch->lock, flags);
2531 }
2532 }
2533
2534 /**
2535 * stmmac_start_all_dma - start all RX and TX DMA channels
2536 * @priv: driver private structure
2537 * Description:
2538 * This starts all the RX and TX DMA channels
2539 */
stmmac_start_all_dma(struct stmmac_priv * priv)2540 static void stmmac_start_all_dma(struct stmmac_priv *priv)
2541 {
2542 u8 rx_channels_count = priv->plat->rx_queues_to_use;
2543 u8 tx_channels_count = priv->plat->tx_queues_to_use;
2544 u8 chan;
2545
2546 for (chan = 0; chan < rx_channels_count; chan++)
2547 stmmac_start_rx_dma(priv, chan);
2548
2549 for (chan = 0; chan < tx_channels_count; chan++)
2550 stmmac_start_tx_dma(priv, chan);
2551 }
2552
2553 /**
2554 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2555 * @priv: driver private structure
2556 * Description:
2557 * This stops the RX and TX DMA channels
2558 */
stmmac_stop_all_dma(struct stmmac_priv * priv)2559 static void stmmac_stop_all_dma(struct stmmac_priv *priv)
2560 {
2561 u8 rx_channels_count = priv->plat->rx_queues_to_use;
2562 u8 tx_channels_count = priv->plat->tx_queues_to_use;
2563 u8 chan;
2564
2565 for (chan = 0; chan < rx_channels_count; chan++)
2566 stmmac_stop_rx_dma(priv, chan);
2567
2568 for (chan = 0; chan < tx_channels_count; chan++)
2569 stmmac_stop_tx_dma(priv, chan);
2570 }
2571
2572 /**
2573 * stmmac_dma_operation_mode - HW DMA operation mode
2574 * @priv: driver private structure
2575 * Description: it is used for configuring the DMA operation mode register in
2576 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2577 */
stmmac_dma_operation_mode(struct stmmac_priv * priv)2578 static void stmmac_dma_operation_mode(struct stmmac_priv *priv)
2579 {
2580 u8 rx_channels_count = priv->plat->rx_queues_to_use;
2581 u8 tx_channels_count = priv->plat->tx_queues_to_use;
2582 int rxfifosz = priv->plat->rx_fifo_size;
2583 int txfifosz = priv->plat->tx_fifo_size;
2584 u32 txmode = 0;
2585 u32 rxmode = 0;
2586 u8 qmode = 0;
2587 u8 chan;
2588
2589 if (rxfifosz == 0)
2590 rxfifosz = priv->dma_cap.rx_fifo_size;
2591 if (txfifosz == 0)
2592 txfifosz = priv->dma_cap.tx_fifo_size;
2593
2594 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */
2595 if (dwmac_is_xmac(priv->plat->core_type)) {
2596 rxfifosz /= rx_channels_count;
2597 txfifosz /= tx_channels_count;
2598 }
2599
2600 if (priv->plat->force_thresh_dma_mode) {
2601 txmode = tc;
2602 rxmode = tc;
2603 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) {
2604 /*
2605 * In case of GMAC, SF mode can be enabled
2606 * to perform the TX COE in HW. This depends on:
2607 * 1) TX COE if actually supported
2608 * 2) There is no bugged Jumbo frame support
2609 * that needs to not insert csum in the TDES.
2610 */
2611 txmode = SF_DMA_MODE;
2612 rxmode = SF_DMA_MODE;
2613 priv->xstats.threshold = SF_DMA_MODE;
2614 } else {
2615 txmode = tc;
2616 rxmode = SF_DMA_MODE;
2617 }
2618
2619 /* configure all channels */
2620 for (chan = 0; chan < rx_channels_count; chan++) {
2621 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
2622
2623 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
2624
2625 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan,
2626 rxfifosz, qmode);
2627
2628 stmmac_set_queue_rx_buf_size(priv, rx_q, chan);
2629 }
2630
2631 for (chan = 0; chan < tx_channels_count; chan++) {
2632 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
2633
2634 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan,
2635 txfifosz, qmode);
2636 }
2637 }
2638
stmmac_xsk_request_timestamp(void * _priv)2639 static void stmmac_xsk_request_timestamp(void *_priv)
2640 {
2641 struct stmmac_metadata_request *meta_req = _priv;
2642
2643 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc);
2644 *meta_req->set_ic = true;
2645 }
2646
stmmac_xsk_fill_timestamp(void * _priv)2647 static u64 stmmac_xsk_fill_timestamp(void *_priv)
2648 {
2649 struct stmmac_xsk_tx_complete *tx_compl = _priv;
2650 struct stmmac_priv *priv = tx_compl->priv;
2651 struct dma_desc *desc = tx_compl->desc;
2652 bool found = false;
2653 u64 ns = 0;
2654
2655 if (!priv->hwts_tx_en)
2656 return 0;
2657
2658 /* check tx tstamp status */
2659 if (stmmac_get_tx_timestamp_status(priv, desc)) {
2660 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns);
2661 found = true;
2662 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) {
2663 found = true;
2664 }
2665
2666 if (found) {
2667 ns -= priv->plat->cdc_error_adj;
2668 return ns_to_ktime(ns);
2669 }
2670
2671 return 0;
2672 }
2673
stmmac_xsk_request_launch_time(u64 launch_time,void * _priv)2674 static void stmmac_xsk_request_launch_time(u64 launch_time, void *_priv)
2675 {
2676 struct timespec64 ts = ns_to_timespec64(launch_time);
2677 struct stmmac_metadata_request *meta_req = _priv;
2678
2679 if (meta_req->tbs & STMMAC_TBS_EN)
2680 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec,
2681 ts.tv_nsec);
2682 }
2683
2684 static const struct xsk_tx_metadata_ops stmmac_xsk_tx_metadata_ops = {
2685 .tmo_request_timestamp = stmmac_xsk_request_timestamp,
2686 .tmo_fill_timestamp = stmmac_xsk_fill_timestamp,
2687 .tmo_request_launch_time = stmmac_xsk_request_launch_time,
2688 };
2689
stmmac_xdp_xmit_zc(struct stmmac_priv * priv,u32 queue,u32 budget)2690 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
2691 {
2692 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue);
2693 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2694 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2695 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
2696 struct xsk_buff_pool *pool = tx_q->xsk_pool;
2697 unsigned int entry = tx_q->cur_tx;
2698 struct dma_desc *tx_desc = NULL;
2699 struct xdp_desc xdp_desc;
2700 bool work_done = true;
2701 u32 tx_set_ic_bit = 0;
2702
2703 /* Avoids TX time-out as we are sharing with slow path */
2704 txq_trans_cond_update(nq);
2705
2706 budget = min(budget, stmmac_tx_avail(priv, queue));
2707
2708 for (; budget > 0; budget--) {
2709 struct stmmac_metadata_request meta_req;
2710 struct xsk_tx_metadata *meta = NULL;
2711 dma_addr_t dma_addr;
2712 bool set_ic;
2713
2714 /* We are sharing with slow path and stop XSK TX desc submission when
2715 * available TX ring is less than threshold.
2716 */
2717 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) ||
2718 !netif_carrier_ok(priv->dev)) {
2719 work_done = false;
2720 break;
2721 }
2722
2723 if (!xsk_tx_peek_desc(pool, &xdp_desc))
2724 break;
2725
2726 if (priv->est && priv->est->enable &&
2727 priv->est->max_sdu[queue] &&
2728 xdp_desc.len > priv->est->max_sdu[queue]) {
2729 priv->xstats.max_sdu_txq_drop[queue]++;
2730 continue;
2731 }
2732
2733 tx_desc = stmmac_get_tx_desc(priv, tx_q, entry);
2734 dma_addr = xsk_buff_raw_get_dma(pool, xdp_desc.addr);
2735 meta = xsk_buff_get_metadata(pool, xdp_desc.addr);
2736 xsk_buff_raw_dma_sync_for_device(pool, dma_addr, xdp_desc.len);
2737
2738 /* To return XDP buffer to XSK pool, we simple call
2739 * xsk_tx_completed(), so we don't need to fill up
2740 * 'buf' and 'xdpf'.
2741 */
2742 stmmac_set_tx_dma_entry(tx_q, entry, STMMAC_TXBUF_T_XSK_TX,
2743 0, xdp_desc.len, false);
2744 stmmac_set_tx_dma_last_segment(tx_q, entry);
2745
2746 tx_q->xdpf[entry] = NULL;
2747
2748 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
2749
2750 tx_q->tx_count_frames++;
2751
2752 if (!priv->tx_coal_frames[queue])
2753 set_ic = false;
2754 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
2755 set_ic = true;
2756 else
2757 set_ic = false;
2758
2759 meta_req.priv = priv;
2760 meta_req.tx_desc = tx_desc;
2761 meta_req.set_ic = &set_ic;
2762 meta_req.tbs = tx_q->tbs;
2763 meta_req.edesc = &tx_q->dma_entx[entry];
2764 xsk_tx_metadata_request(meta, &stmmac_xsk_tx_metadata_ops,
2765 &meta_req);
2766 if (set_ic) {
2767 tx_q->tx_count_frames = 0;
2768 stmmac_set_tx_ic(priv, tx_desc);
2769 tx_set_ic_bit++;
2770 }
2771
2772 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdp_desc.len,
2773 csum, priv->descriptor_mode, true, true,
2774 xdp_desc.len);
2775
2776 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
2777
2778 xsk_tx_metadata_to_compl(meta,
2779 &tx_q->tx_skbuff_dma[entry].xsk_meta);
2780
2781 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
2782 entry = tx_q->cur_tx;
2783 }
2784 u64_stats_update_begin(&txq_stats->napi_syncp);
2785 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit);
2786 u64_stats_update_end(&txq_stats->napi_syncp);
2787
2788 if (tx_desc) {
2789 stmmac_flush_tx_descriptors(priv, queue);
2790 xsk_tx_release(pool);
2791 }
2792
2793 /* Return true if all of the 3 conditions are met
2794 * a) TX Budget is still available
2795 * b) work_done = true when XSK TX desc peek is empty (no more
2796 * pending XSK TX for transmission)
2797 */
2798 return !!budget && work_done;
2799 }
2800
stmmac_bump_dma_threshold(struct stmmac_priv * priv,u32 chan)2801 static void stmmac_bump_dma_threshold(struct stmmac_priv *priv, u32 chan)
2802 {
2803 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) {
2804 tc += 64;
2805
2806 if (priv->plat->force_thresh_dma_mode)
2807 stmmac_set_dma_operation_mode(priv, tc, tc, chan);
2808 else
2809 stmmac_set_dma_operation_mode(priv, tc, SF_DMA_MODE,
2810 chan);
2811
2812 priv->xstats.threshold = tc;
2813 }
2814 }
2815
2816 /**
2817 * stmmac_tx_clean - to manage the transmission completion
2818 * @priv: driver private structure
2819 * @budget: napi budget limiting this functions packet handling
2820 * @queue: TX queue index
2821 * @pending_packets: signal to arm the TX coal timer
2822 * Description: it reclaims the transmit resources after transmission completes.
2823 * If some packets still needs to be handled, due to TX coalesce, set
2824 * pending_packets to true to make NAPI arm the TX coal timer.
2825 */
stmmac_tx_clean(struct stmmac_priv * priv,int budget,u32 queue,bool * pending_packets)2826 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue,
2827 bool *pending_packets)
2828 {
2829 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
2830 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
2831 unsigned int bytes_compl = 0, pkts_compl = 0;
2832 unsigned int entry, xmits = 0, count = 0;
2833 u32 tx_packets = 0, tx_errors = 0;
2834
2835 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue));
2836
2837 tx_q->xsk_frames_done = 0;
2838
2839 entry = tx_q->dirty_tx;
2840
2841 /* Try to clean all TX complete frame in 1 shot */
2842 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) {
2843 struct xdp_frame *xdpf;
2844 struct sk_buff *skb;
2845 struct dma_desc *p;
2846 int status;
2847
2848 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX ||
2849 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2850 xdpf = tx_q->xdpf[entry];
2851 skb = NULL;
2852 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2853 xdpf = NULL;
2854 skb = tx_q->tx_skbuff[entry];
2855 } else {
2856 xdpf = NULL;
2857 skb = NULL;
2858 }
2859
2860 p = stmmac_get_tx_desc(priv, tx_q, entry);
2861 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr);
2862 /* Check if the descriptor is owned by the DMA */
2863 if (unlikely(status & tx_dma_own))
2864 break;
2865
2866 count++;
2867
2868 /* Make sure descriptor fields are read after reading
2869 * the own bit.
2870 */
2871 dma_rmb();
2872
2873 /* Just consider the last segment and ...*/
2874 if (likely(!(status & tx_not_ls))) {
2875 /* ... verify the status error condition */
2876 if (unlikely(status & tx_err)) {
2877 tx_errors++;
2878 if (unlikely(status & tx_err_bump_tc))
2879 stmmac_bump_dma_threshold(priv, queue);
2880 } else {
2881 tx_packets++;
2882 }
2883 if (skb) {
2884 stmmac_get_tx_hwtstamp(priv, p, skb);
2885 } else if (tx_q->xsk_pool &&
2886 xp_tx_metadata_enabled(tx_q->xsk_pool)) {
2887 struct stmmac_xsk_tx_complete tx_compl = {
2888 .priv = priv,
2889 .desc = p,
2890 };
2891
2892 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta,
2893 &stmmac_xsk_tx_metadata_ops,
2894 &tx_compl);
2895 }
2896 }
2897
2898 if (likely(tx_q->tx_skbuff_dma[entry].buf &&
2899 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) {
2900 if (tx_q->tx_skbuff_dma[entry].map_as_page)
2901 dma_unmap_page(priv->device,
2902 tx_q->tx_skbuff_dma[entry].buf,
2903 tx_q->tx_skbuff_dma[entry].len,
2904 DMA_TO_DEVICE);
2905 else
2906 dma_unmap_single(priv->device,
2907 tx_q->tx_skbuff_dma[entry].buf,
2908 tx_q->tx_skbuff_dma[entry].len,
2909 DMA_TO_DEVICE);
2910 tx_q->tx_skbuff_dma[entry].buf = 0;
2911 tx_q->tx_skbuff_dma[entry].len = 0;
2912 tx_q->tx_skbuff_dma[entry].map_as_page = false;
2913 }
2914
2915 /* This looks at tx_q->tx_skbuff_dma[tx_q->dirty_tx].is_jumbo
2916 * and tx_q->tx_skbuff_dma[tx_q->dirty_tx].last_segment
2917 */
2918 stmmac_clean_desc3(priv, tx_q, p);
2919
2920 tx_q->tx_skbuff_dma[entry].last_segment = false;
2921 tx_q->tx_skbuff_dma[entry].is_jumbo = false;
2922
2923 if (xdpf &&
2924 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) {
2925 xdp_return_frame_rx_napi(xdpf);
2926 tx_q->xdpf[entry] = NULL;
2927 }
2928
2929 if (xdpf &&
2930 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) {
2931 xdp_return_frame(xdpf);
2932 tx_q->xdpf[entry] = NULL;
2933 }
2934
2935 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX)
2936 tx_q->xsk_frames_done++;
2937
2938 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) {
2939 if (likely(skb)) {
2940 pkts_compl++;
2941 bytes_compl += skb->len;
2942 dev_consume_skb_any(skb);
2943 tx_q->tx_skbuff[entry] = NULL;
2944 }
2945 }
2946
2947 stmmac_release_tx_desc(priv, p, priv->descriptor_mode);
2948
2949 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
2950 }
2951 tx_q->dirty_tx = entry;
2952
2953 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue),
2954 pkts_compl, bytes_compl);
2955
2956 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev,
2957 queue))) &&
2958 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) {
2959
2960 netif_dbg(priv, tx_done, priv->dev,
2961 "%s: restart transmit\n", __func__);
2962 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue));
2963 }
2964
2965 if (tx_q->xsk_pool) {
2966 bool work_done;
2967
2968 if (tx_q->xsk_frames_done)
2969 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done);
2970
2971 if (xsk_uses_need_wakeup(tx_q->xsk_pool))
2972 xsk_set_tx_need_wakeup(tx_q->xsk_pool);
2973
2974 /* For XSK TX, we try to send as many as possible.
2975 * If XSK work done (XSK TX desc empty and budget still
2976 * available), return "budget - 1" to reenable TX IRQ.
2977 * Else, return "budget" to make NAPI continue polling.
2978 */
2979 work_done = stmmac_xdp_xmit_zc(priv, queue,
2980 STMMAC_XSK_TX_BUDGET_MAX);
2981 if (work_done)
2982 xmits = budget - 1;
2983 else
2984 xmits = budget;
2985 }
2986
2987 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode)
2988 stmmac_restart_sw_lpi_timer(priv);
2989
2990 /* We still have pending packets, let's call for a new scheduling */
2991 if (tx_q->dirty_tx != tx_q->cur_tx)
2992 *pending_packets = true;
2993
2994 u64_stats_update_begin(&txq_stats->napi_syncp);
2995 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets);
2996 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets);
2997 u64_stats_inc(&txq_stats->napi.tx_clean);
2998 u64_stats_update_end(&txq_stats->napi_syncp);
2999
3000 priv->xstats.tx_errors += tx_errors;
3001
3002 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue));
3003
3004 /* Combine decisions from TX clean and XSK TX */
3005 return max(count, xmits);
3006 }
3007
3008 /**
3009 * stmmac_tx_err - to manage the tx error
3010 * @priv: driver private structure
3011 * @chan: channel index
3012 * Description: it cleans the descriptors and restarts the transmission
3013 * in case of transmission errors.
3014 */
stmmac_tx_err(struct stmmac_priv * priv,u32 chan)3015 static void stmmac_tx_err(struct stmmac_priv *priv, u32 chan)
3016 {
3017 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3018
3019 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan));
3020
3021 stmmac_stop_tx_dma(priv, chan);
3022 dma_free_tx_skbufs(priv, &priv->dma_conf, chan);
3023 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan);
3024 stmmac_reset_tx_queue(priv, chan);
3025 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3026 tx_q->dma_tx_phy, chan);
3027 stmmac_start_tx_dma(priv, chan);
3028
3029 priv->xstats.tx_errors++;
3030 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan));
3031 }
3032
3033 /**
3034 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
3035 * @priv: driver private structure
3036 * @txmode: TX operating mode
3037 * @rxmode: RX operating mode
3038 * @chan: channel index
3039 * Description: it is used for configuring of the DMA operation mode in
3040 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
3041 * mode.
3042 */
stmmac_set_dma_operation_mode(struct stmmac_priv * priv,u32 txmode,u32 rxmode,u32 chan)3043 static void stmmac_set_dma_operation_mode(struct stmmac_priv *priv, u32 txmode,
3044 u32 rxmode, u32 chan)
3045 {
3046 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use;
3047 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use;
3048 u8 rx_channels_count = priv->plat->rx_queues_to_use;
3049 u8 tx_channels_count = priv->plat->tx_queues_to_use;
3050 int rxfifosz = priv->plat->rx_fifo_size;
3051 int txfifosz = priv->plat->tx_fifo_size;
3052
3053 if (rxfifosz == 0)
3054 rxfifosz = priv->dma_cap.rx_fifo_size;
3055 if (txfifosz == 0)
3056 txfifosz = priv->dma_cap.tx_fifo_size;
3057
3058 /* Adjust for real per queue fifo size */
3059 rxfifosz /= rx_channels_count;
3060 txfifosz /= tx_channels_count;
3061
3062 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode);
3063 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode);
3064 }
3065
stmmac_safety_feat_interrupt(struct stmmac_priv * priv)3066 static bool stmmac_safety_feat_interrupt(struct stmmac_priv *priv)
3067 {
3068 int ret;
3069
3070 ret = stmmac_safety_feat_irq_status(priv, priv->dev,
3071 priv->ioaddr, priv->dma_cap.asp, &priv->sstats);
3072 if (ret && (ret != -EINVAL)) {
3073 stmmac_global_err(priv);
3074 return true;
3075 }
3076
3077 return false;
3078 }
3079
stmmac_napi_check(struct stmmac_priv * priv,u32 chan,u32 dir)3080 static int stmmac_napi_check(struct stmmac_priv *priv, u32 chan, u32 dir)
3081 {
3082 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr,
3083 &priv->xstats, chan, dir);
3084 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan];
3085 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3086 struct stmmac_channel *ch = &priv->channel[chan];
3087 struct napi_struct *rx_napi;
3088 struct napi_struct *tx_napi;
3089 unsigned long flags;
3090
3091 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi;
3092 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3093
3094 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) {
3095 if (napi_schedule_prep(rx_napi)) {
3096 spin_lock_irqsave(&ch->lock, flags);
3097 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
3098 spin_unlock_irqrestore(&ch->lock, flags);
3099 __napi_schedule(rx_napi);
3100 }
3101 }
3102
3103 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) {
3104 if (napi_schedule_prep(tx_napi)) {
3105 spin_lock_irqsave(&ch->lock, flags);
3106 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
3107 spin_unlock_irqrestore(&ch->lock, flags);
3108 __napi_schedule(tx_napi);
3109 }
3110 }
3111
3112 return status;
3113 }
3114
3115 /**
3116 * stmmac_dma_interrupt - DMA ISR
3117 * @priv: driver private structure
3118 * Description: this is the DMA ISR. It is called by the main ISR.
3119 * It calls the dwmac dma routine and schedule poll method in case of some
3120 * work can be done.
3121 */
stmmac_dma_interrupt(struct stmmac_priv * priv)3122 static void stmmac_dma_interrupt(struct stmmac_priv *priv)
3123 {
3124 u8 tx_channel_count = priv->plat->tx_queues_to_use;
3125 u8 rx_channel_count = priv->plat->rx_queues_to_use;
3126 u8 channels_to_check = tx_channel_count > rx_channel_count ?
3127 tx_channel_count : rx_channel_count;
3128 int status[MAX_T(u32, MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES)];
3129 u8 chan;
3130
3131 /* Make sure we never check beyond our status buffer. */
3132 if (WARN_ON_ONCE(channels_to_check > ARRAY_SIZE(status)))
3133 channels_to_check = ARRAY_SIZE(status);
3134
3135 for (chan = 0; chan < channels_to_check; chan++)
3136 status[chan] = stmmac_napi_check(priv, chan,
3137 DMA_DIR_RXTX);
3138
3139 for (chan = 0; chan < tx_channel_count; chan++) {
3140 if (unlikely(status[chan] & tx_hard_error_bump_tc)) {
3141 /* Try to bump up the dma threshold on this failure */
3142 stmmac_bump_dma_threshold(priv, chan);
3143 } else if (unlikely(status[chan] == tx_hard_error)) {
3144 stmmac_tx_err(priv, chan);
3145 }
3146 }
3147 }
3148
3149 /**
3150 * stmmac_mmc_setup: setup the Mac Management Counters (MMC)
3151 * @priv: driver private structure
3152 * Description: this masks the MMC irq, in fact, the counters are managed in SW.
3153 */
stmmac_mmc_setup(struct stmmac_priv * priv)3154 static void stmmac_mmc_setup(struct stmmac_priv *priv)
3155 {
3156 unsigned int mode = MMC_CNTRL_RESET_ON_READ | MMC_CNTRL_COUNTER_RESET |
3157 MMC_CNTRL_PRESET | MMC_CNTRL_FULL_HALF_PRESET;
3158
3159 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr);
3160
3161 if (priv->dma_cap.rmon) {
3162 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode);
3163 memset(&priv->mmc, 0, sizeof(struct stmmac_counters));
3164 } else
3165 netdev_info(priv->dev, "No MAC Management Counters available\n");
3166 }
3167
3168 /**
3169 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3170 * @priv: driver private structure
3171 * Description:
3172 * new GMAC chip generations have a new register to indicate the
3173 * presence of the optional feature/functions.
3174 * This can be also used to override the value passed through the
3175 * platform and necessary for old MAC10/100 and GMAC chips.
3176 */
stmmac_get_hw_features(struct stmmac_priv * priv)3177 static int stmmac_get_hw_features(struct stmmac_priv *priv)
3178 {
3179 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0;
3180 }
3181
3182 /**
3183 * stmmac_check_ether_addr - check if the MAC addr is valid
3184 * @priv: driver private structure
3185 * Description:
3186 * it is to verify if the MAC address is valid, in case of failures it
3187 * generates a random MAC address
3188 */
stmmac_check_ether_addr(struct stmmac_priv * priv)3189 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
3190 {
3191 u8 addr[ETH_ALEN];
3192
3193 if (!is_valid_ether_addr(priv->dev->dev_addr)) {
3194 stmmac_get_umac_addr(priv, priv->hw, addr, 0);
3195 if (is_valid_ether_addr(addr))
3196 eth_hw_addr_set(priv->dev, addr);
3197 else
3198 eth_hw_addr_random(priv->dev);
3199 dev_info(priv->device, "device MAC address %pM\n",
3200 priv->dev->dev_addr);
3201 }
3202 }
3203
stmmac_get_phy_intf_sel(phy_interface_t interface)3204 int stmmac_get_phy_intf_sel(phy_interface_t interface)
3205 {
3206 int phy_intf_sel = -EINVAL;
3207
3208 if (interface == PHY_INTERFACE_MODE_MII ||
3209 interface == PHY_INTERFACE_MODE_GMII)
3210 phy_intf_sel = PHY_INTF_SEL_GMII_MII;
3211 else if (phy_interface_mode_is_rgmii(interface))
3212 phy_intf_sel = PHY_INTF_SEL_RGMII;
3213 else if (interface == PHY_INTERFACE_MODE_RMII)
3214 phy_intf_sel = PHY_INTF_SEL_RMII;
3215 else if (interface == PHY_INTERFACE_MODE_REVMII)
3216 phy_intf_sel = PHY_INTF_SEL_REVMII;
3217
3218 return phy_intf_sel;
3219 }
3220 EXPORT_SYMBOL_GPL(stmmac_get_phy_intf_sel);
3221
stmmac_prereset_configure(struct stmmac_priv * priv)3222 static int stmmac_prereset_configure(struct stmmac_priv *priv)
3223 {
3224 struct plat_stmmacenet_data *plat_dat = priv->plat;
3225 phy_interface_t interface;
3226 struct phylink_pcs *pcs;
3227 int phy_intf_sel, ret;
3228
3229 if (!plat_dat->set_phy_intf_sel)
3230 return 0;
3231
3232 interface = plat_dat->phy_interface;
3233
3234 /* Check whether this mode uses a PCS */
3235 pcs = stmmac_mac_select_pcs(&priv->phylink_config, interface);
3236 if (priv->integrated_pcs && pcs == &priv->integrated_pcs->pcs) {
3237 /* Request the phy_intf_sel from the integrated PCS */
3238 phy_intf_sel = stmmac_integrated_pcs_get_phy_intf_sel(pcs,
3239 interface);
3240 } else {
3241 phy_intf_sel = stmmac_get_phy_intf_sel(interface);
3242 }
3243
3244 if (phy_intf_sel < 0) {
3245 netdev_err(priv->dev,
3246 "failed to get phy_intf_sel for %s: %pe\n",
3247 phy_modes(interface), ERR_PTR(phy_intf_sel));
3248 return phy_intf_sel;
3249 }
3250
3251 ret = plat_dat->set_phy_intf_sel(plat_dat->bsp_priv, phy_intf_sel);
3252 if (ret == -EINVAL)
3253 netdev_err(priv->dev, "platform does not support %s\n",
3254 phy_modes(interface));
3255 else if (ret < 0)
3256 netdev_err(priv->dev,
3257 "platform failed to set interface %s: %pe\n",
3258 phy_modes(interface), ERR_PTR(ret));
3259
3260 return ret;
3261 }
3262
3263 /**
3264 * stmmac_init_dma_engine - DMA init.
3265 * @priv: driver private structure
3266 * Description:
3267 * It inits the DMA invoking the specific MAC/GMAC callback.
3268 * Some DMA parameters can be passed from the platform;
3269 * in case of these are not passed a default is kept for the MAC or GMAC.
3270 */
stmmac_init_dma_engine(struct stmmac_priv * priv)3271 static int stmmac_init_dma_engine(struct stmmac_priv *priv)
3272 {
3273 u8 rx_channels_count = priv->plat->rx_queues_to_use;
3274 u8 tx_channels_count = priv->plat->tx_queues_to_use;
3275 u8 dma_csr_ch = max(rx_channels_count, tx_channels_count);
3276 struct stmmac_rx_queue *rx_q;
3277 struct stmmac_tx_queue *tx_q;
3278 int ret = 0;
3279 u8 chan;
3280
3281 ret = stmmac_prereset_configure(priv);
3282 if (ret)
3283 return ret;
3284
3285 ret = stmmac_reset(priv);
3286 if (ret) {
3287 netdev_err(priv->dev, "Failed to reset the dma\n");
3288 return ret;
3289 }
3290
3291 /* DMA Configuration */
3292 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg);
3293
3294 if (priv->plat->axi)
3295 stmmac_axi(priv, priv->ioaddr, priv->plat->axi);
3296
3297 /* DMA CSR Channel configuration */
3298 for (chan = 0; chan < dma_csr_ch; chan++) {
3299 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
3300 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
3301 }
3302
3303 /* DMA RX Channel Configuration */
3304 for (chan = 0; chan < rx_channels_count; chan++) {
3305 rx_q = &priv->dma_conf.rx_queue[chan];
3306
3307 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3308 rx_q->dma_rx_phy, chan);
3309
3310 stmmac_set_queue_rx_tail_ptr(priv, rx_q, chan,
3311 rx_q->buf_alloc_num);
3312 }
3313
3314 /* DMA TX Channel Configuration */
3315 for (chan = 0; chan < tx_channels_count; chan++) {
3316 tx_q = &priv->dma_conf.tx_queue[chan];
3317
3318 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
3319 tx_q->dma_tx_phy, chan);
3320
3321 stmmac_set_queue_tx_tail_ptr(priv, tx_q, chan, 0);
3322 }
3323
3324 return ret;
3325 }
3326
stmmac_tx_timer_arm(struct stmmac_priv * priv,u32 queue)3327 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue)
3328 {
3329 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
3330 u32 tx_coal_timer = priv->tx_coal_timer[queue];
3331 struct stmmac_channel *ch;
3332 struct napi_struct *napi;
3333
3334 if (!tx_coal_timer)
3335 return;
3336
3337 ch = &priv->channel[queue];
3338 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3339
3340 /* Arm timer only if napi is not already scheduled.
3341 * Try to cancel any timer if napi is scheduled, timer will be armed
3342 * again in the next scheduled napi.
3343 */
3344 if (unlikely(!napi_is_scheduled(napi)))
3345 hrtimer_start(&tx_q->txtimer,
3346 STMMAC_COAL_TIMER(tx_coal_timer),
3347 HRTIMER_MODE_REL);
3348 else
3349 hrtimer_try_to_cancel(&tx_q->txtimer);
3350 }
3351
3352 /**
3353 * stmmac_tx_timer - mitigation sw timer for tx.
3354 * @t: data pointer
3355 * Description:
3356 * This is the timer handler to directly invoke the stmmac_tx_clean.
3357 */
stmmac_tx_timer(struct hrtimer * t)3358 static enum hrtimer_restart stmmac_tx_timer(struct hrtimer *t)
3359 {
3360 struct stmmac_tx_queue *tx_q = container_of(t, struct stmmac_tx_queue, txtimer);
3361 struct stmmac_priv *priv = tx_q->priv_data;
3362 struct stmmac_channel *ch;
3363 struct napi_struct *napi;
3364
3365 ch = &priv->channel[tx_q->queue_index];
3366 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi;
3367
3368 if (likely(napi_schedule_prep(napi))) {
3369 unsigned long flags;
3370
3371 spin_lock_irqsave(&ch->lock, flags);
3372 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1);
3373 spin_unlock_irqrestore(&ch->lock, flags);
3374 __napi_schedule(napi);
3375 }
3376
3377 return HRTIMER_NORESTART;
3378 }
3379
3380 /**
3381 * stmmac_init_coalesce - init mitigation options.
3382 * @priv: driver private structure
3383 * Description:
3384 * This inits the coalesce parameters: i.e. timer rate,
3385 * timer handler and default threshold used for enabling the
3386 * interrupt on completion bit.
3387 */
stmmac_init_coalesce(struct stmmac_priv * priv)3388 static void stmmac_init_coalesce(struct stmmac_priv *priv)
3389 {
3390 u8 tx_channel_count = priv->plat->tx_queues_to_use;
3391 u8 rx_channel_count = priv->plat->rx_queues_to_use;
3392 u8 chan;
3393
3394 for (chan = 0; chan < tx_channel_count; chan++) {
3395 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3396
3397 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES;
3398 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER;
3399
3400 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
3401 }
3402
3403 for (chan = 0; chan < rx_channel_count; chan++)
3404 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES;
3405 }
3406
stmmac_set_rings_length(struct stmmac_priv * priv)3407 static void stmmac_set_rings_length(struct stmmac_priv *priv)
3408 {
3409 u8 rx_channels_count = priv->plat->rx_queues_to_use;
3410 u8 tx_channels_count = priv->plat->tx_queues_to_use;
3411 u8 chan;
3412
3413 /* set TX ring length */
3414 for (chan = 0; chan < tx_channels_count; chan++)
3415 stmmac_set_tx_ring_len(priv, priv->ioaddr,
3416 (priv->dma_conf.dma_tx_size - 1), chan);
3417
3418 /* set RX ring length */
3419 for (chan = 0; chan < rx_channels_count; chan++)
3420 stmmac_set_rx_ring_len(priv, priv->ioaddr,
3421 (priv->dma_conf.dma_rx_size - 1), chan);
3422 }
3423
3424 /**
3425 * stmmac_set_tx_queue_weight - Set TX queue weight
3426 * @priv: driver private structure
3427 * Description: It is used for setting TX queues weight
3428 */
stmmac_set_tx_queue_weight(struct stmmac_priv * priv)3429 static void stmmac_set_tx_queue_weight(struct stmmac_priv *priv)
3430 {
3431 u8 tx_queues_count = priv->plat->tx_queues_to_use;
3432 u32 weight;
3433 u8 queue;
3434
3435 for (queue = 0; queue < tx_queues_count; queue++) {
3436 weight = priv->plat->tx_queues_cfg[queue].weight;
3437 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue);
3438 }
3439 }
3440
3441 /**
3442 * stmmac_configure_cbs - Configure CBS in TX queue
3443 * @priv: driver private structure
3444 * Description: It is used for configuring CBS in AVB TX queues
3445 */
stmmac_configure_cbs(struct stmmac_priv * priv)3446 static void stmmac_configure_cbs(struct stmmac_priv *priv)
3447 {
3448 u8 tx_queues_count = priv->plat->tx_queues_to_use;
3449 u32 mode_to_use;
3450 u8 queue;
3451
3452 /* queue 0 is reserved for legacy traffic */
3453 for (queue = 1; queue < tx_queues_count; queue++) {
3454 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use;
3455 if (mode_to_use == MTL_QUEUE_DCB)
3456 continue;
3457
3458 stmmac_config_cbs(priv, priv->hw,
3459 priv->plat->tx_queues_cfg[queue].send_slope,
3460 priv->plat->tx_queues_cfg[queue].idle_slope,
3461 priv->plat->tx_queues_cfg[queue].high_credit,
3462 priv->plat->tx_queues_cfg[queue].low_credit,
3463 queue);
3464 }
3465 }
3466
3467 /**
3468 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3469 * @priv: driver private structure
3470 * Description: It is used for mapping RX queues to RX dma channels
3471 */
stmmac_rx_queue_dma_chan_map(struct stmmac_priv * priv)3472 static void stmmac_rx_queue_dma_chan_map(struct stmmac_priv *priv)
3473 {
3474 u8 rx_queues_count = priv->plat->rx_queues_to_use;
3475 u8 queue;
3476 u32 chan;
3477
3478 for (queue = 0; queue < rx_queues_count; queue++) {
3479 chan = priv->plat->rx_queues_cfg[queue].chan;
3480 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan);
3481 }
3482 }
3483
3484 /**
3485 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3486 * @priv: driver private structure
3487 * Description: It is used for configuring the RX Queue Priority
3488 */
stmmac_mac_config_rx_queues_prio(struct stmmac_priv * priv)3489 static void stmmac_mac_config_rx_queues_prio(struct stmmac_priv *priv)
3490 {
3491 u8 rx_queues_count = priv->plat->rx_queues_to_use;
3492 u8 queue;
3493 u32 prio;
3494
3495 for (queue = 0; queue < rx_queues_count; queue++) {
3496 if (!priv->plat->rx_queues_cfg[queue].use_prio)
3497 continue;
3498
3499 prio = priv->plat->rx_queues_cfg[queue].prio;
3500 stmmac_rx_queue_prio(priv, priv->hw, prio, queue);
3501 }
3502 }
3503
3504 /**
3505 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3506 * @priv: driver private structure
3507 * Description: It is used for configuring the TX Queue Priority
3508 */
stmmac_mac_config_tx_queues_prio(struct stmmac_priv * priv)3509 static void stmmac_mac_config_tx_queues_prio(struct stmmac_priv *priv)
3510 {
3511 u8 tx_queues_count = priv->plat->tx_queues_to_use;
3512 u8 queue;
3513 u32 prio;
3514
3515 for (queue = 0; queue < tx_queues_count; queue++) {
3516 if (!priv->plat->tx_queues_cfg[queue].use_prio)
3517 continue;
3518
3519 prio = priv->plat->tx_queues_cfg[queue].prio;
3520 stmmac_tx_queue_prio(priv, priv->hw, prio, queue);
3521 }
3522 }
3523
3524 /**
3525 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3526 * @priv: driver private structure
3527 * Description: It is used for configuring the RX queue routing
3528 */
stmmac_mac_config_rx_queues_routing(struct stmmac_priv * priv)3529 static void stmmac_mac_config_rx_queues_routing(struct stmmac_priv *priv)
3530 {
3531 u8 rx_queues_count = priv->plat->rx_queues_to_use;
3532 u8 packet;
3533 u8 queue;
3534
3535 for (queue = 0; queue < rx_queues_count; queue++) {
3536 /* no specific packet type routing specified for the queue */
3537 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0)
3538 continue;
3539
3540 packet = priv->plat->rx_queues_cfg[queue].pkt_route;
3541 stmmac_rx_queue_routing(priv, priv->hw, packet, queue);
3542 }
3543 }
3544
stmmac_mac_config_rss(struct stmmac_priv * priv)3545 static void stmmac_mac_config_rss(struct stmmac_priv *priv)
3546 {
3547 if (!priv->dma_cap.rssen || !priv->plat->rss_en) {
3548 priv->rss.enable = false;
3549 return;
3550 }
3551
3552 if (priv->dev->features & NETIF_F_RXHASH)
3553 priv->rss.enable = true;
3554 else
3555 priv->rss.enable = false;
3556
3557 stmmac_rss_configure(priv, priv->hw, &priv->rss,
3558 priv->plat->rx_queues_to_use);
3559 }
3560
3561 /**
3562 * stmmac_mtl_configuration - Configure MTL
3563 * @priv: driver private structure
3564 * Description: It is used for configuring MTL
3565 */
stmmac_mtl_configuration(struct stmmac_priv * priv)3566 static void stmmac_mtl_configuration(struct stmmac_priv *priv)
3567 {
3568 u8 rx_queues_count = priv->plat->rx_queues_to_use;
3569 u8 tx_queues_count = priv->plat->tx_queues_to_use;
3570
3571 if (tx_queues_count > 1)
3572 stmmac_set_tx_queue_weight(priv);
3573
3574 /* Configure MTL RX algorithms */
3575 if (rx_queues_count > 1)
3576 stmmac_prog_mtl_rx_algorithms(priv, priv->hw,
3577 priv->plat->rx_sched_algorithm);
3578
3579 /* Configure MTL TX algorithms */
3580 if (tx_queues_count > 1)
3581 stmmac_prog_mtl_tx_algorithms(priv, priv->hw,
3582 priv->plat->tx_sched_algorithm);
3583
3584 /* Configure CBS in AVB TX queues */
3585 if (tx_queues_count > 1)
3586 stmmac_configure_cbs(priv);
3587
3588 /* Map RX MTL to DMA channels */
3589 stmmac_rx_queue_dma_chan_map(priv);
3590
3591 /* Enable MAC RX Queues */
3592 stmmac_mac_enable_rx_queues(priv);
3593
3594 /* Set RX priorities */
3595 if (rx_queues_count > 1)
3596 stmmac_mac_config_rx_queues_prio(priv);
3597
3598 /* Set TX priorities */
3599 if (tx_queues_count > 1)
3600 stmmac_mac_config_tx_queues_prio(priv);
3601
3602 /* Set RX routing */
3603 if (rx_queues_count > 1)
3604 stmmac_mac_config_rx_queues_routing(priv);
3605
3606 /* Receive Side Scaling */
3607 if (rx_queues_count > 1)
3608 stmmac_mac_config_rss(priv);
3609 }
3610
stmmac_safety_feat_configuration(struct stmmac_priv * priv)3611 static void stmmac_safety_feat_configuration(struct stmmac_priv *priv)
3612 {
3613 if (priv->dma_cap.asp) {
3614 netdev_info(priv->dev, "Enabling Safety Features\n");
3615 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp,
3616 priv->plat->safety_feat_cfg);
3617 } else {
3618 netdev_info(priv->dev, "No Safety Features support found\n");
3619 }
3620 }
3621
3622 /* STM32MP25xx (dwmac v5.3) states "Do not enable time-based scheduling for
3623 * channels on which the TSO feature is enabled." If we have a skb for a
3624 * channel which has TBS enabled, fall back to software GSO.
3625 */
stmmac_tso_channel_permitted(struct stmmac_priv * priv,unsigned int chan)3626 static bool stmmac_tso_channel_permitted(struct stmmac_priv *priv,
3627 unsigned int chan)
3628 {
3629 /* TSO and TBS cannot co-exist */
3630 return !(priv->dma_conf.tx_queue[chan].tbs & STMMAC_TBS_AVAIL);
3631 }
3632
3633 /**
3634 * stmmac_hw_setup - setup mac in a usable state.
3635 * @dev : pointer to the device structure.
3636 * Description:
3637 * this is the main function to setup the HW in a usable state because the
3638 * dma engine is reset, the core registers are configured (e.g. AXI,
3639 * Checksum features, timers). The DMA is ready to start receiving and
3640 * transmitting.
3641 * Return value:
3642 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3643 * file on failure.
3644 */
stmmac_hw_setup(struct net_device * dev)3645 static int stmmac_hw_setup(struct net_device *dev)
3646 {
3647 struct stmmac_priv *priv = netdev_priv(dev);
3648 u8 rx_cnt = priv->plat->rx_queues_to_use;
3649 u8 tx_cnt = priv->plat->tx_queues_to_use;
3650 bool sph_en;
3651 u8 chan;
3652 int ret;
3653
3654 /* Make sure RX clock is enabled */
3655 if (priv->hw->phylink_pcs)
3656 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs);
3657
3658 /* Note that clk_rx_i must be running for reset to complete. This
3659 * clock may also be required when setting the MAC address.
3660 *
3661 * Block the receive clock stop for LPI mode at the PHY in case
3662 * the link is established with EEE mode active.
3663 */
3664 phylink_rx_clk_stop_block(priv->phylink);
3665
3666 /* DMA initialization and SW reset */
3667 ret = stmmac_init_dma_engine(priv);
3668 if (ret < 0) {
3669 phylink_rx_clk_stop_unblock(priv->phylink);
3670 netdev_err(priv->dev, "%s: DMA engine initialization failed\n",
3671 __func__);
3672 return ret;
3673 }
3674
3675 /* Copy the MAC addr into the HW */
3676 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0);
3677 phylink_rx_clk_stop_unblock(priv->phylink);
3678
3679 /* Initialize the MAC Core */
3680 stmmac_core_init(priv, priv->hw, dev);
3681
3682 /* Initialize MTL*/
3683 stmmac_mtl_configuration(priv);
3684
3685 /* Initialize Safety Features */
3686 stmmac_safety_feat_configuration(priv);
3687
3688 ret = stmmac_rx_ipc(priv, priv->hw);
3689 if (!ret) {
3690 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n");
3691 priv->plat->rx_coe = STMMAC_RX_COE_NONE;
3692 priv->hw->rx_csum = 0;
3693 }
3694
3695 /* Enable the MAC Rx/Tx */
3696 stmmac_mac_set(priv, priv->ioaddr, true);
3697
3698 /* Set the HW DMA mode and the COE */
3699 stmmac_dma_operation_mode(priv);
3700
3701 stmmac_mmc_setup(priv);
3702
3703 if (priv->use_riwt) {
3704 u32 queue;
3705
3706 for (queue = 0; queue < rx_cnt; queue++) {
3707 if (!priv->rx_riwt[queue])
3708 priv->rx_riwt[queue] = DEF_DMA_RIWT;
3709
3710 stmmac_rx_watchdog(priv, priv->ioaddr,
3711 priv->rx_riwt[queue], queue);
3712 }
3713 }
3714
3715 /* set TX and RX rings length */
3716 stmmac_set_rings_length(priv);
3717
3718 /* Enable TSO */
3719 if (priv->dma_cap.tsoen && priv->plat->flags & STMMAC_FLAG_TSO_EN) {
3720 for (chan = 0; chan < tx_cnt; chan++) {
3721 if (!stmmac_tso_channel_permitted(priv, chan))
3722 continue;
3723
3724 stmmac_enable_tso(priv, priv->ioaddr, 1, chan);
3725 }
3726 }
3727
3728 /* Enable Split Header */
3729 sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
3730 for (chan = 0; chan < rx_cnt; chan++)
3731 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
3732
3733
3734 /* VLAN Tag Insertion */
3735 if (priv->dma_cap.vlins)
3736 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT);
3737
3738 /* TBS */
3739 for (chan = 0; chan < tx_cnt; chan++) {
3740 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan];
3741 int enable = tx_q->tbs & STMMAC_TBS_AVAIL;
3742
3743 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan);
3744 }
3745
3746 /* Configure real RX and TX queues */
3747 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use);
3748 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use);
3749
3750 /* Start the ball rolling... */
3751 stmmac_start_all_dma(priv);
3752
3753 phylink_rx_clk_stop_block(priv->phylink);
3754 stmmac_set_hw_vlan_mode(priv, priv->hw);
3755 phylink_rx_clk_stop_unblock(priv->phylink);
3756
3757 return 0;
3758 }
3759
stmmac_free_irq(struct net_device * dev,enum request_irq_err irq_err,int irq_idx)3760 static void stmmac_free_irq(struct net_device *dev,
3761 enum request_irq_err irq_err, int irq_idx)
3762 {
3763 struct stmmac_priv *priv = netdev_priv(dev);
3764 struct stmmac_msi *msi = priv->msi;
3765 int j;
3766
3767 switch (irq_err) {
3768 case REQ_IRQ_ERR_ALL:
3769 irq_idx = priv->plat->tx_queues_to_use;
3770 fallthrough;
3771 case REQ_IRQ_ERR_TX:
3772 for (j = irq_idx - 1; msi && j >= 0; j--) {
3773 if (msi->tx_irq[j] > 0) {
3774 irq_set_affinity_hint(msi->tx_irq[j], NULL);
3775 free_irq(msi->tx_irq[j],
3776 &priv->dma_conf.tx_queue[j]);
3777 }
3778 }
3779 irq_idx = priv->plat->rx_queues_to_use;
3780 fallthrough;
3781 case REQ_IRQ_ERR_RX:
3782 for (j = irq_idx - 1; msi && j >= 0; j--) {
3783 if (msi->rx_irq[j] > 0) {
3784 irq_set_affinity_hint(msi->rx_irq[j], NULL);
3785 free_irq(msi->rx_irq[j],
3786 &priv->dma_conf.rx_queue[j]);
3787 }
3788 }
3789
3790 if (msi && msi->sfty_ue_irq > 0 && msi->sfty_ue_irq != dev->irq)
3791 free_irq(msi->sfty_ue_irq, dev);
3792 fallthrough;
3793 case REQ_IRQ_ERR_SFTY_UE:
3794 if (msi && msi->sfty_ce_irq > 0 && msi->sfty_ce_irq != dev->irq)
3795 free_irq(msi->sfty_ce_irq, dev);
3796 fallthrough;
3797 case REQ_IRQ_ERR_SFTY_CE:
3798 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq)
3799 free_irq(priv->wol_irq, dev);
3800 fallthrough;
3801 case REQ_IRQ_ERR_SFTY:
3802 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq)
3803 free_irq(priv->sfty_irq, dev);
3804 fallthrough;
3805 case REQ_IRQ_ERR_WOL:
3806 free_irq(dev->irq, dev);
3807 fallthrough;
3808 case REQ_IRQ_ERR_MAC:
3809 case REQ_IRQ_ERR_NO:
3810 /* If MAC IRQ request error, no more IRQ to free */
3811 break;
3812 }
3813 }
3814
stmmac_msi_init(struct stmmac_priv * priv,struct stmmac_resources * res)3815 static int stmmac_msi_init(struct stmmac_priv *priv,
3816 struct stmmac_resources *res)
3817 {
3818 int i;
3819
3820 priv->msi = devm_kmalloc(priv->device, sizeof(*priv->msi), GFP_KERNEL);
3821 if (!priv->msi)
3822 return -ENOMEM;
3823
3824 priv->msi->sfty_ce_irq = res->sfty_ce_irq;
3825 priv->msi->sfty_ue_irq = res->sfty_ue_irq;
3826
3827 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
3828 priv->msi->rx_irq[i] = res->rx_irq[i];
3829 for (i = 0; i < MTL_MAX_TX_QUEUES; i++)
3830 priv->msi->tx_irq[i] = res->tx_irq[i];
3831
3832 return 0;
3833 }
3834
stmmac_request_irq_multi_msi(struct net_device * dev)3835 static int stmmac_request_irq_multi_msi(struct net_device *dev)
3836 {
3837 struct stmmac_priv *priv = netdev_priv(dev);
3838 struct stmmac_msi *msi = priv->msi;
3839 enum request_irq_err irq_err;
3840 int irq_idx = 0;
3841 char *int_name;
3842 int ret;
3843 int i;
3844
3845 /* For common interrupt */
3846 int_name = msi->int_name_mac;
3847 sprintf(int_name, "%s:%s", dev->name, "mac");
3848 ret = request_irq(dev->irq, stmmac_mac_interrupt,
3849 0, int_name, dev);
3850 if (unlikely(ret < 0)) {
3851 netdev_err(priv->dev,
3852 "%s: alloc mac MSI %d (error: %d)\n",
3853 __func__, dev->irq, ret);
3854 irq_err = REQ_IRQ_ERR_MAC;
3855 goto irq_error;
3856 }
3857
3858 /* Request the Wake IRQ in case of another line
3859 * is used for WoL
3860 */
3861 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
3862 int_name = msi->int_name_wol;
3863 sprintf(int_name, "%s:%s", dev->name, "wol");
3864 ret = request_irq(priv->wol_irq,
3865 stmmac_mac_interrupt,
3866 0, int_name, dev);
3867 if (unlikely(ret < 0)) {
3868 netdev_err(priv->dev,
3869 "%s: alloc wol MSI %d (error: %d)\n",
3870 __func__, priv->wol_irq, ret);
3871 irq_err = REQ_IRQ_ERR_WOL;
3872 goto irq_error;
3873 }
3874 }
3875
3876 /* Request the common Safety Feature Correctible/Uncorrectible
3877 * Error line in case of another line is used
3878 */
3879 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
3880 int_name = msi->int_name_sfty;
3881 sprintf(int_name, "%s:%s", dev->name, "safety");
3882 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
3883 0, int_name, dev);
3884 if (unlikely(ret < 0)) {
3885 netdev_err(priv->dev,
3886 "%s: alloc sfty MSI %d (error: %d)\n",
3887 __func__, priv->sfty_irq, ret);
3888 irq_err = REQ_IRQ_ERR_SFTY;
3889 goto irq_error;
3890 }
3891 }
3892
3893 /* Request the Safety Feature Correctible Error line in
3894 * case of another line is used
3895 */
3896 if (msi->sfty_ce_irq > 0 && msi->sfty_ce_irq != dev->irq) {
3897 int_name = msi->int_name_sfty_ce;
3898 sprintf(int_name, "%s:%s", dev->name, "safety-ce");
3899 ret = request_irq(msi->sfty_ce_irq,
3900 stmmac_safety_interrupt,
3901 0, int_name, dev);
3902 if (unlikely(ret < 0)) {
3903 netdev_err(priv->dev,
3904 "%s: alloc sfty ce MSI %d (error: %d)\n",
3905 __func__, msi->sfty_ce_irq, ret);
3906 irq_err = REQ_IRQ_ERR_SFTY_CE;
3907 goto irq_error;
3908 }
3909 }
3910
3911 /* Request the Safety Feature Uncorrectible Error line in
3912 * case of another line is used
3913 */
3914 if (msi->sfty_ue_irq > 0 && msi->sfty_ue_irq != dev->irq) {
3915 int_name = msi->int_name_sfty_ue;
3916 sprintf(int_name, "%s:%s", dev->name, "safety-ue");
3917 ret = request_irq(msi->sfty_ue_irq,
3918 stmmac_safety_interrupt,
3919 0, int_name, dev);
3920 if (unlikely(ret < 0)) {
3921 netdev_err(priv->dev,
3922 "%s: alloc sfty ue MSI %d (error: %d)\n",
3923 __func__, msi->sfty_ue_irq, ret);
3924 irq_err = REQ_IRQ_ERR_SFTY_UE;
3925 goto irq_error;
3926 }
3927 }
3928
3929 /* Request Rx MSI irq */
3930 for (i = 0; i < priv->plat->rx_queues_to_use; i++) {
3931 if (i >= MTL_MAX_RX_QUEUES)
3932 break;
3933 if (msi->rx_irq[i] == 0)
3934 continue;
3935
3936 int_name = msi->int_name_rx_irq[i];
3937 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i);
3938 ret = request_irq(msi->rx_irq[i],
3939 stmmac_msi_intr_rx,
3940 0, int_name, &priv->dma_conf.rx_queue[i]);
3941 if (unlikely(ret < 0)) {
3942 netdev_err(priv->dev,
3943 "%s: alloc rx-%d MSI %d (error: %d)\n",
3944 __func__, i, msi->rx_irq[i], ret);
3945 irq_err = REQ_IRQ_ERR_RX;
3946 irq_idx = i;
3947 goto irq_error;
3948 }
3949 irq_set_affinity_hint(msi->rx_irq[i],
3950 cpumask_of(i % num_online_cpus()));
3951 }
3952
3953 /* Request Tx MSI irq */
3954 for (i = 0; i < priv->plat->tx_queues_to_use; i++) {
3955 if (i >= MTL_MAX_TX_QUEUES)
3956 break;
3957 if (msi->tx_irq[i] == 0)
3958 continue;
3959
3960 int_name = msi->int_name_tx_irq[i];
3961 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i);
3962 ret = request_irq(msi->tx_irq[i],
3963 stmmac_msi_intr_tx,
3964 0, int_name, &priv->dma_conf.tx_queue[i]);
3965 if (unlikely(ret < 0)) {
3966 netdev_err(priv->dev,
3967 "%s: alloc tx-%d MSI %d (error: %d)\n",
3968 __func__, i, msi->tx_irq[i], ret);
3969 irq_err = REQ_IRQ_ERR_TX;
3970 irq_idx = i;
3971 goto irq_error;
3972 }
3973 irq_set_affinity_hint(msi->tx_irq[i],
3974 cpumask_of(i % num_online_cpus()));
3975 }
3976
3977 return 0;
3978
3979 irq_error:
3980 stmmac_free_irq(dev, irq_err, irq_idx);
3981 return ret;
3982 }
3983
stmmac_request_irq_single(struct net_device * dev)3984 static int stmmac_request_irq_single(struct net_device *dev)
3985 {
3986 struct stmmac_priv *priv = netdev_priv(dev);
3987 enum request_irq_err irq_err;
3988 int ret;
3989
3990 ret = request_irq(dev->irq, stmmac_interrupt,
3991 IRQF_SHARED, dev->name, dev);
3992 if (unlikely(ret < 0)) {
3993 netdev_err(priv->dev,
3994 "%s: ERROR: allocating the IRQ %d (error: %d)\n",
3995 __func__, dev->irq, ret);
3996 irq_err = REQ_IRQ_ERR_MAC;
3997 goto irq_error;
3998 }
3999
4000 /* Request the Wake IRQ in case of another line
4001 * is used for WoL
4002 */
4003 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) {
4004 ret = request_irq(priv->wol_irq, stmmac_interrupt,
4005 IRQF_SHARED, dev->name, dev);
4006 if (unlikely(ret < 0)) {
4007 netdev_err(priv->dev,
4008 "%s: ERROR: allocating the WoL IRQ %d (%d)\n",
4009 __func__, priv->wol_irq, ret);
4010 irq_err = REQ_IRQ_ERR_WOL;
4011 goto irq_error;
4012 }
4013 }
4014
4015 /* Request the common Safety Feature Correctible/Uncorrectible
4016 * Error line in case of another line is used
4017 */
4018 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) {
4019 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt,
4020 IRQF_SHARED, dev->name, dev);
4021 if (unlikely(ret < 0)) {
4022 netdev_err(priv->dev,
4023 "%s: ERROR: allocating the sfty IRQ %d (%d)\n",
4024 __func__, priv->sfty_irq, ret);
4025 irq_err = REQ_IRQ_ERR_SFTY;
4026 goto irq_error;
4027 }
4028 }
4029
4030 return 0;
4031
4032 irq_error:
4033 stmmac_free_irq(dev, irq_err, 0);
4034 return ret;
4035 }
4036
stmmac_request_irq(struct net_device * dev)4037 static int stmmac_request_irq(struct net_device *dev)
4038 {
4039 struct stmmac_priv *priv = netdev_priv(dev);
4040 int ret;
4041
4042 /* Request the IRQ lines */
4043 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN)
4044 ret = stmmac_request_irq_multi_msi(dev);
4045 else
4046 ret = stmmac_request_irq_single(dev);
4047
4048 return ret;
4049 }
4050
4051 /**
4052 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
4053 * @priv: driver private structure
4054 * @mtu: MTU to setup the dma queue and buf with
4055 * Description: Allocate and generate a dma_conf based on the provided MTU.
4056 * Allocate the Tx/Rx DMA queue and init them.
4057 * Return value:
4058 * the dma_conf allocated struct on success and an appropriate ERR_PTR on failure.
4059 */
4060 static struct stmmac_dma_conf *
stmmac_setup_dma_desc(struct stmmac_priv * priv,unsigned int mtu)4061 stmmac_setup_dma_desc(struct stmmac_priv *priv, unsigned int mtu)
4062 {
4063 struct stmmac_dma_conf *dma_conf;
4064 int bfsize, ret;
4065 u8 chan;
4066
4067 dma_conf = kzalloc_obj(*dma_conf);
4068 if (!dma_conf) {
4069 netdev_err(priv->dev, "%s: DMA conf allocation failed\n",
4070 __func__);
4071 return ERR_PTR(-ENOMEM);
4072 }
4073
4074 /* Returns 0 or BUF_SIZE_16KiB if mtu > 8KiB and dwmac4 or ring mode */
4075 bfsize = stmmac_set_16kib_bfsize(priv, mtu);
4076 if (bfsize < 0)
4077 bfsize = 0;
4078
4079 if (bfsize < BUF_SIZE_16KiB)
4080 bfsize = stmmac_set_bfsize(mtu);
4081
4082 dma_conf->dma_buf_sz = bfsize;
4083 /* Chose the tx/rx size from the already defined one in the
4084 * priv struct. (if defined)
4085 */
4086 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size;
4087 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size;
4088
4089 if (!dma_conf->dma_tx_size)
4090 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE;
4091 if (!dma_conf->dma_rx_size)
4092 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE;
4093
4094 /* Earlier check for TBS */
4095 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) {
4096 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan];
4097 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en;
4098
4099 /* Setup per-TXQ tbs flag before TX descriptor alloc */
4100 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0;
4101 }
4102
4103 ret = alloc_dma_desc_resources(priv, dma_conf);
4104 if (ret < 0) {
4105 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n",
4106 __func__);
4107 goto alloc_error;
4108 }
4109
4110 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL);
4111 if (ret < 0) {
4112 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n",
4113 __func__);
4114 goto init_error;
4115 }
4116
4117 return dma_conf;
4118
4119 init_error:
4120 free_dma_desc_resources(priv, dma_conf);
4121 alloc_error:
4122 kfree(dma_conf);
4123 return ERR_PTR(ret);
4124 }
4125
4126 /**
4127 * __stmmac_open - open entry point of the driver
4128 * @dev : pointer to the device structure.
4129 * @dma_conf : structure to take the dma data
4130 * Description:
4131 * This function is the open entry point of the driver.
4132 * Return value:
4133 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4134 * file on failure.
4135 */
__stmmac_open(struct net_device * dev,struct stmmac_dma_conf * dma_conf)4136 static int __stmmac_open(struct net_device *dev,
4137 struct stmmac_dma_conf *dma_conf)
4138 {
4139 struct stmmac_priv *priv = netdev_priv(dev);
4140 u8 chan;
4141 int ret;
4142
4143 for (int i = 0; i < MTL_MAX_TX_QUEUES; i++)
4144 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN)
4145 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs;
4146 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf));
4147
4148 stmmac_reset_queues_param(priv);
4149
4150 ret = stmmac_hw_setup(dev);
4151 if (ret < 0) {
4152 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
4153 goto init_error;
4154 }
4155
4156 stmmac_setup_ptp(priv);
4157
4158 stmmac_init_coalesce(priv);
4159
4160 phylink_start(priv->phylink);
4161
4162 stmmac_vlan_restore(priv);
4163
4164 ret = stmmac_request_irq(dev);
4165 if (ret)
4166 goto irq_error;
4167
4168 stmmac_enable_all_queues(priv);
4169 netif_tx_start_all_queues(priv->dev);
4170 stmmac_enable_all_dma_irq(priv);
4171
4172 return 0;
4173
4174 irq_error:
4175 phylink_stop(priv->phylink);
4176
4177 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4178 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4179
4180 stmmac_release_ptp(priv);
4181 init_error:
4182 return ret;
4183 }
4184
stmmac_open(struct net_device * dev)4185 static int stmmac_open(struct net_device *dev)
4186 {
4187 struct stmmac_priv *priv = netdev_priv(dev);
4188 struct stmmac_dma_conf *dma_conf;
4189 int ret;
4190
4191 /* Initialise the tx lpi timer, converting from msec to usec */
4192 if (!priv->tx_lpi_timer)
4193 priv->tx_lpi_timer = eee_timer * 1000;
4194
4195 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu);
4196 if (IS_ERR(dma_conf))
4197 return PTR_ERR(dma_conf);
4198
4199 ret = pm_runtime_resume_and_get(priv->device);
4200 if (ret < 0)
4201 goto err_dma_resources;
4202
4203 ret = stmmac_init_phy(dev);
4204 if (ret)
4205 goto err_runtime_pm;
4206
4207 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
4208 ret = stmmac_legacy_serdes_power_up(priv);
4209 if (ret < 0)
4210 goto err_disconnect_phy;
4211 }
4212
4213 ret = __stmmac_open(dev, dma_conf);
4214 if (ret)
4215 goto err_serdes;
4216
4217 kfree(dma_conf);
4218
4219 /* We may have called phylink_speed_down before */
4220 phylink_speed_up(priv->phylink);
4221
4222 return ret;
4223
4224 err_serdes:
4225 stmmac_legacy_serdes_power_down(priv);
4226 err_disconnect_phy:
4227 phylink_disconnect_phy(priv->phylink);
4228 err_runtime_pm:
4229 pm_runtime_put(priv->device);
4230 err_dma_resources:
4231 free_dma_desc_resources(priv, dma_conf);
4232 kfree(dma_conf);
4233 return ret;
4234 }
4235
__stmmac_release(struct net_device * dev)4236 static void __stmmac_release(struct net_device *dev)
4237 {
4238 struct stmmac_priv *priv = netdev_priv(dev);
4239 u8 chan;
4240
4241 /* Stop and disconnect the PHY */
4242 phylink_stop(priv->phylink);
4243
4244 stmmac_disable_all_queues(priv);
4245
4246 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
4247 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
4248
4249 netif_tx_disable(dev);
4250
4251 /* Free the IRQ lines */
4252 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
4253
4254 /* Stop TX/RX DMA and clear the descriptors */
4255 stmmac_stop_all_dma(priv);
4256
4257 /* Release and free the Rx/Tx resources */
4258 free_dma_desc_resources(priv, &priv->dma_conf);
4259
4260 stmmac_release_ptp(priv);
4261
4262 if (stmmac_fpe_supported(priv))
4263 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
4264 }
4265
4266 /**
4267 * stmmac_release - close entry point of the driver
4268 * @dev : device pointer.
4269 * Description:
4270 * This is the stop entry point of the driver.
4271 */
stmmac_release(struct net_device * dev)4272 static int stmmac_release(struct net_device *dev)
4273 {
4274 struct stmmac_priv *priv = netdev_priv(dev);
4275
4276 /* If the PHY or MAC has WoL enabled, then the PHY will not be
4277 * suspended when phylink_stop() is called below. Set the PHY
4278 * to its slowest speed to save power.
4279 */
4280 if (device_may_wakeup(priv->device))
4281 phylink_speed_down(priv->phylink, false);
4282
4283 __stmmac_release(dev);
4284
4285 stmmac_legacy_serdes_power_down(priv);
4286 phylink_disconnect_phy(priv->phylink);
4287 pm_runtime_put(priv->device);
4288
4289 return 0;
4290 }
4291
stmmac_vlan_insert(struct stmmac_priv * priv,struct sk_buff * skb,struct stmmac_tx_queue * tx_q)4292 static bool stmmac_vlan_insert(struct stmmac_priv *priv, struct sk_buff *skb,
4293 struct stmmac_tx_queue *tx_q)
4294 {
4295 struct dma_desc *p;
4296 u16 tag = 0x0;
4297
4298 if (!priv->dma_cap.vlins || !skb_vlan_tag_present(skb))
4299 return false;
4300
4301 tag = skb_vlan_tag_get(skb);
4302
4303 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4304 p = &tx_q->dma_entx[tx_q->cur_tx].basic;
4305 else
4306 p = &tx_q->dma_tx[tx_q->cur_tx];
4307
4308 if (stmmac_set_desc_vlan_tag(priv, p, tag, 0x0, 0x0))
4309 return false;
4310
4311 stmmac_set_tx_owner(priv, p);
4312 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4313 return true;
4314 }
4315
4316 /**
4317 * stmmac_tso_allocator - close entry point of the driver
4318 * @priv: driver private structure
4319 * @des: buffer start address
4320 * @total_len: total length to fill in descriptors
4321 * @last_segment: condition for the last descriptor
4322 * @queue: TX queue index
4323 * Description:
4324 * This function fills descriptor and request new descriptors according to
4325 * buffer length to fill
4326 */
stmmac_tso_allocator(struct stmmac_priv * priv,dma_addr_t des,int total_len,bool last_segment,u32 queue)4327 static void stmmac_tso_allocator(struct stmmac_priv *priv, dma_addr_t des,
4328 int total_len, bool last_segment, u32 queue)
4329 {
4330 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4331 struct dma_desc *desc;
4332 u32 buff_size;
4333 int tmp_len;
4334
4335 tmp_len = total_len;
4336
4337 while (tmp_len > 0) {
4338 dma_addr_t curr_addr;
4339
4340 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
4341 priv->dma_conf.dma_tx_size);
4342 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4343
4344 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4345 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4346 else
4347 desc = &tx_q->dma_tx[tx_q->cur_tx];
4348
4349 curr_addr = des + (total_len - tmp_len);
4350 stmmac_set_desc_addr(priv, desc, curr_addr);
4351 buff_size = tmp_len >= TSO_MAX_BUFF_SIZE ?
4352 TSO_MAX_BUFF_SIZE : tmp_len;
4353
4354 stmmac_prepare_tso_tx_desc(priv, desc, 0, buff_size,
4355 0, 1,
4356 (last_segment) && (tmp_len <= TSO_MAX_BUFF_SIZE),
4357 0, 0);
4358
4359 tmp_len -= TSO_MAX_BUFF_SIZE;
4360 }
4361 }
4362
stmmac_flush_tx_descriptors(struct stmmac_priv * priv,int queue)4363 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue)
4364 {
4365 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
4366
4367 /* The own bit must be the latest setting done when prepare the
4368 * descriptor and then barrier is needed to make sure that
4369 * all is coherent before granting the DMA engine.
4370 */
4371 wmb();
4372
4373 stmmac_set_queue_tx_tail_ptr(priv, tx_q, queue, tx_q->cur_tx);
4374 }
4375
stmmac_set_gso_types(struct stmmac_priv * priv,bool tso)4376 static void stmmac_set_gso_types(struct stmmac_priv *priv, bool tso)
4377 {
4378 if (!tso) {
4379 priv->gso_enabled_types = 0;
4380 } else {
4381 /* Manage oversized TCP frames for GMAC4 device */
4382 priv->gso_enabled_types = SKB_GSO_TCPV4 | SKB_GSO_TCPV6;
4383 if (priv->plat->core_type == DWMAC_CORE_GMAC4)
4384 priv->gso_enabled_types |= SKB_GSO_UDP_L4;
4385 }
4386 }
4387
stmmac_set_gso_features(struct net_device * ndev)4388 static void stmmac_set_gso_features(struct net_device *ndev)
4389 {
4390 struct stmmac_priv *priv = netdev_priv(ndev);
4391 const struct stmmac_dma_cfg *dma_cfg;
4392 int txpbl;
4393
4394 if (priv->dma_cap.tsoen)
4395 dev_info(priv->device, "TSO supported\n");
4396
4397 if (!(priv->plat->flags & STMMAC_FLAG_TSO_EN))
4398 return;
4399
4400 if (!priv->dma_cap.tsoen) {
4401 dev_warn(priv->device, "platform requests unsupported TSO\n");
4402 return;
4403 }
4404
4405 /* FIXME:
4406 * STM32MP151 (v4.2 userver v4.0) states that TxPBL must be >= 4. It
4407 * is not clear whether PBLx8 (which multiplies the PBL value by 8)
4408 * influences this.
4409 */
4410 dma_cfg = priv->plat->dma_cfg;
4411 txpbl = dma_cfg->txpbl ?: dma_cfg->pbl;
4412 if (txpbl < 4) {
4413 dev_warn(priv->device, "txpbl(%d) is too low for TSO\n", txpbl);
4414 return;
4415 }
4416
4417 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6;
4418 if (priv->plat->core_type == DWMAC_CORE_GMAC4)
4419 ndev->hw_features |= NETIF_F_GSO_UDP_L4;
4420
4421 stmmac_set_gso_types(priv, true);
4422
4423 dev_info(priv->device, "TSO feature enabled\n");
4424 }
4425
stmmac_tso_header_size(struct sk_buff * skb)4426 static size_t stmmac_tso_header_size(struct sk_buff *skb)
4427 {
4428 size_t size;
4429
4430 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
4431 size = skb_transport_offset(skb) + sizeof(struct udphdr);
4432 else
4433 size = skb_tcp_all_headers(skb);
4434
4435 return size;
4436 }
4437
4438 /* STM32MP151 (dwmac v4.2) and STM32MP25xx (dwmac v5.3) states for TDES2 normal
4439 * (read format) descriptor that the maximum header length supported for the
4440 * TSO feature is 1023 bytes.
4441 *
4442 * While IPv4 is limited to MAC+VLAN+IPv4+ext+TCP+ext = 138 bytes, the IPv6
4443 * extension headers aren't similarly limited.
4444 *
4445 * Fall back to software GSO for these skbs. Also check that the MSS is >=
4446 * the recommended 64 bytes (documented in ETH_DMACxCR register description),
4447 * and that a the header plus MSS is not larger than 16383 (documented in
4448 * "Building the Descriptor and the packet for the TSO feature").
4449 */
stmmac_tso_valid_packet(struct sk_buff * skb)4450 static bool stmmac_tso_valid_packet(struct sk_buff *skb)
4451 {
4452 size_t header_len = stmmac_tso_header_size(skb);
4453 unsigned int gso_size = skb_shinfo(skb)->gso_size;
4454
4455 return header_len <= 1023 && gso_size >= 64 &&
4456 header_len + gso_size < 16383;
4457 }
4458
4459 /**
4460 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4461 * @skb : the socket buffer
4462 * @dev : device pointer
4463 * Description: this is the transmit function that is called on TSO frames
4464 * (support available on GMAC4 and newer chips).
4465 * Diagram below show the ring programming in case of TSO frames:
4466 *
4467 * First Descriptor
4468 * --------
4469 * | DES0 |---> buffer1 = L2/L3/L4 header
4470 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4471 * | | width is 32-bit, but we never use it.
4472 * | | Also can be used as the most-significant 8-bits or 16-bits of
4473 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4474 * | | or 48-bit, and we always use it.
4475 * | DES2 |---> buffer1 len
4476 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4477 * --------
4478 * --------
4479 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4480 * | DES1 |---> same as the First Descriptor
4481 * | DES2 |---> buffer1 len
4482 * | DES3 |
4483 * --------
4484 * |
4485 * ...
4486 * |
4487 * --------
4488 * | DES0 |---> buffer1 = Split TCP Payload
4489 * | DES1 |---> same as the First Descriptor
4490 * | DES2 |---> buffer1 len
4491 * | DES3 |
4492 * --------
4493 *
4494 * mss is fixed when enable tso, so w/o programming the TDES3 ctx field.
4495 */
stmmac_tso_xmit(struct sk_buff * skb,struct net_device * dev)4496 static netdev_tx_t stmmac_tso_xmit(struct sk_buff *skb, struct net_device *dev)
4497 {
4498 struct dma_desc *desc, *first, *mss_desc = NULL;
4499 struct stmmac_priv *priv = netdev_priv(dev);
4500 unsigned int first_entry, tx_packets;
4501 struct stmmac_txq_stats *txq_stats;
4502 struct stmmac_tx_queue *tx_q;
4503 bool set_ic, is_last_segment;
4504 u32 pay_len, mss, queue;
4505 int i, first_tx, nfrags;
4506 u8 proto_hdr_len, hdr;
4507 dma_addr_t des;
4508
4509 nfrags = skb_shinfo(skb)->nr_frags;
4510 queue = skb_get_queue_mapping(skb);
4511
4512 tx_q = &priv->dma_conf.tx_queue[queue];
4513 txq_stats = &priv->xstats.txq_stats[queue];
4514 first_tx = tx_q->cur_tx;
4515
4516 /* Compute header lengths */
4517 proto_hdr_len = stmmac_tso_header_size(skb);
4518 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4)
4519 hdr = sizeof(struct udphdr);
4520 else
4521 hdr = tcp_hdrlen(skb);
4522
4523 /* Desc availability based on threshold should be enough safe */
4524 if (unlikely(stmmac_tx_avail(priv, queue) <
4525 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) {
4526 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4527 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4528 queue));
4529 /* This is a hard error, log it. */
4530 netdev_err(priv->dev,
4531 "%s: Tx Ring full when queue awake\n",
4532 __func__);
4533 }
4534 return NETDEV_TX_BUSY;
4535 }
4536
4537 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */
4538
4539 mss = skb_shinfo(skb)->gso_size;
4540
4541 /* set new MSS value if needed */
4542 if (mss != tx_q->mss) {
4543 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4544 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4545 else
4546 mss_desc = &tx_q->dma_tx[tx_q->cur_tx];
4547
4548 stmmac_set_mss(priv, mss_desc, mss);
4549 tx_q->mss = mss;
4550 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx,
4551 priv->dma_conf.dma_tx_size);
4552 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]);
4553 }
4554
4555 if (netif_msg_tx_queued(priv)) {
4556 pr_info("%s: hdrlen %d, hdr_len %d, pay_len %d, mss %d\n",
4557 __func__, hdr, proto_hdr_len, pay_len, mss);
4558 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len,
4559 skb->data_len);
4560 }
4561
4562 first_entry = tx_q->cur_tx;
4563 WARN_ON(tx_q->tx_skbuff[first_entry]);
4564
4565 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4566 desc = &tx_q->dma_entx[first_entry].basic;
4567 else
4568 desc = &tx_q->dma_tx[first_entry];
4569 first = desc;
4570
4571 /* first descriptor: fill Headers on Buf1 */
4572 des = dma_map_single(priv->device, skb->data, skb_headlen(skb),
4573 DMA_TO_DEVICE);
4574 if (dma_mapping_error(priv->device, des))
4575 goto dma_map_err;
4576
4577 stmmac_set_desc_addr(priv, first, des);
4578 stmmac_tso_allocator(priv, des + proto_hdr_len, pay_len,
4579 (nfrags == 0), queue);
4580
4581 /* In case two or more DMA transmit descriptors are allocated for this
4582 * non-paged SKB data, the DMA buffer address should be saved to
4583 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor,
4584 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee
4585 * that stmmac_tx_clean() does not unmap the entire DMA buffer too early
4586 * since the tail areas of the DMA buffer can be accessed by DMA engine
4587 * sooner or later.
4588 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf
4589 * corresponding to the last descriptor, stmmac_tx_clean() will unmap
4590 * this DMA buffer right after the DMA engine completely finishes the
4591 * full buffer transmission.
4592 */
4593 stmmac_set_tx_skb_dma_entry(tx_q, tx_q->cur_tx, des, skb_headlen(skb),
4594 false);
4595
4596 /* Prepare fragments */
4597 for (i = 0; i < nfrags; i++) {
4598 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4599
4600 des = skb_frag_dma_map(priv->device, frag, 0,
4601 skb_frag_size(frag),
4602 DMA_TO_DEVICE);
4603 if (dma_mapping_error(priv->device, des))
4604 goto dma_map_err;
4605
4606 stmmac_tso_allocator(priv, des, skb_frag_size(frag),
4607 (i == nfrags - 1), queue);
4608
4609 stmmac_set_tx_skb_dma_entry(tx_q, tx_q->cur_tx, des,
4610 skb_frag_size(frag), true);
4611 }
4612
4613 stmmac_set_tx_dma_last_segment(tx_q, tx_q->cur_tx);
4614
4615 /* Only the last descriptor gets to point to the skb. */
4616 tx_q->tx_skbuff[tx_q->cur_tx] = skb;
4617
4618 /* Manage tx mitigation */
4619 tx_packets = CIRC_CNT(tx_q->cur_tx + 1, first_tx,
4620 priv->dma_conf.dma_tx_size);
4621 tx_q->tx_count_frames += tx_packets;
4622
4623 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4624 set_ic = true;
4625 else if (!priv->tx_coal_frames[queue])
4626 set_ic = false;
4627 else if (tx_packets > priv->tx_coal_frames[queue])
4628 set_ic = true;
4629 else if ((tx_q->tx_count_frames %
4630 priv->tx_coal_frames[queue]) < tx_packets)
4631 set_ic = true;
4632 else
4633 set_ic = false;
4634
4635 if (set_ic) {
4636 if (tx_q->tbs & STMMAC_TBS_AVAIL)
4637 desc = &tx_q->dma_entx[tx_q->cur_tx].basic;
4638 else
4639 desc = &tx_q->dma_tx[tx_q->cur_tx];
4640
4641 tx_q->tx_count_frames = 0;
4642 stmmac_set_tx_ic(priv, desc);
4643 }
4644
4645 /* We've used all descriptors we need for this skb, however,
4646 * advance cur_tx so that it references a fresh descriptor.
4647 * ndo_start_xmit will fill this descriptor the next time it's
4648 * called and stmmac_tx_clean may clean up to this descriptor.
4649 */
4650 tx_q->cur_tx = STMMAC_NEXT_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size);
4651
4652 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4653 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4654 __func__);
4655 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4656 }
4657
4658 u64_stats_update_begin(&txq_stats->q_syncp);
4659 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4660 u64_stats_inc(&txq_stats->q.tx_tso_frames);
4661 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags);
4662 if (set_ic)
4663 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4664 u64_stats_update_end(&txq_stats->q_syncp);
4665
4666 if (priv->sarc_type)
4667 stmmac_set_desc_sarc(priv, first, priv->sarc_type);
4668
4669 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4670 priv->hwts_tx_en)) {
4671 /* declare that device is doing timestamping */
4672 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4673 stmmac_enable_tx_timestamp(priv, first);
4674 }
4675
4676 /* If we only have one entry used, then the first entry is the last
4677 * segment.
4678 */
4679 is_last_segment = CIRC_CNT(tx_q->cur_tx, first_entry,
4680 priv->dma_conf.dma_tx_size) == 1;
4681
4682 /* Complete the first descriptor before granting the DMA */
4683 stmmac_prepare_tso_tx_desc(priv, first, 1, proto_hdr_len, 0, 1,
4684 is_last_segment, hdr / 4,
4685 skb->len - proto_hdr_len);
4686
4687 /* If context desc is used to change MSS */
4688 if (mss_desc) {
4689 /* Make sure that first descriptor has been completely
4690 * written, including its own bit. This is because MSS is
4691 * actually before first descriptor, so we need to make
4692 * sure that MSS's own bit is the last thing written.
4693 */
4694 dma_wmb();
4695 stmmac_set_tx_owner(priv, mss_desc);
4696 }
4697
4698 if (netif_msg_pktdata(priv)) {
4699 pr_info("%s: curr=%d dirty=%d f=%d, e=%d, f_p=%p, nfrags %d\n",
4700 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4701 tx_q->cur_tx, first, nfrags);
4702 pr_info(">>> frame to be transmitted: ");
4703 print_pkt(skb->data, skb_headlen(skb));
4704 }
4705
4706 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4707 skb_tx_timestamp(skb);
4708
4709 stmmac_flush_tx_descriptors(priv, queue);
4710 stmmac_tx_timer_arm(priv, queue);
4711
4712 return NETDEV_TX_OK;
4713
4714 dma_map_err:
4715 dev_err(priv->device, "Tx dma map failed\n");
4716 dev_kfree_skb(skb);
4717 priv->xstats.tx_dropped++;
4718 return NETDEV_TX_OK;
4719 }
4720
4721 /**
4722 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4723 * @skb: socket buffer to check
4724 *
4725 * Check if a packet has an ethertype that will trigger the IP header checks
4726 * and IP/TCP checksum engine of the stmmac core.
4727 *
4728 * Return: true if the ethertype can trigger the checksum engine, false
4729 * otherwise
4730 */
stmmac_has_ip_ethertype(struct sk_buff * skb)4731 static bool stmmac_has_ip_ethertype(struct sk_buff *skb)
4732 {
4733 int depth = 0;
4734 __be16 proto;
4735
4736 proto = __vlan_get_protocol(skb, eth_header_parse_protocol(skb),
4737 &depth);
4738
4739 return (depth <= ETH_HLEN) &&
4740 (proto == htons(ETH_P_IP) || proto == htons(ETH_P_IPV6));
4741 }
4742
4743 /**
4744 * stmmac_xmit - Tx entry point of the driver
4745 * @skb : the socket buffer
4746 * @dev : device pointer
4747 * Description : this is the tx entry point of the driver.
4748 * It programs the chain or the ring and supports oversized frames
4749 * and SG feature.
4750 */
stmmac_xmit(struct sk_buff * skb,struct net_device * dev)4751 static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
4752 {
4753 bool enh_desc, has_vlan, set_ic, is_jumbo = false;
4754 struct stmmac_priv *priv = netdev_priv(dev);
4755 unsigned int nopaged_len = skb_headlen(skb);
4756 u32 queue = skb_get_queue_mapping(skb);
4757 int nfrags = skb_shinfo(skb)->nr_frags;
4758 unsigned int first_entry, tx_packets;
4759 struct stmmac_txq_stats *txq_stats;
4760 struct dma_desc *desc, *first_desc;
4761 struct stmmac_tx_queue *tx_q;
4762 int i, csum_insertion = 0;
4763 int entry, first_tx;
4764 dma_addr_t dma_addr;
4765 u32 sdu_len;
4766
4767 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en)
4768 stmmac_stop_sw_lpi(priv);
4769
4770 if (skb_is_gso(skb) &&
4771 skb_shinfo(skb)->gso_type & priv->gso_enabled_types)
4772 return stmmac_tso_xmit(skb, dev);
4773
4774 if (priv->est && priv->est->enable &&
4775 priv->est->max_sdu[queue]) {
4776 sdu_len = skb->len;
4777 /* Add VLAN tag length if VLAN tag insertion offload is requested */
4778 if (priv->dma_cap.vlins && skb_vlan_tag_present(skb))
4779 sdu_len += VLAN_HLEN;
4780 if (sdu_len > priv->est->max_sdu[queue]) {
4781 priv->xstats.max_sdu_txq_drop[queue]++;
4782 goto max_sdu_err;
4783 }
4784 }
4785
4786 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
4787 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) {
4788 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev,
4789 queue));
4790 /* This is a hard error, log it. */
4791 netdev_err(priv->dev,
4792 "%s: Tx Ring full when queue awake\n",
4793 __func__);
4794 }
4795 return NETDEV_TX_BUSY;
4796 }
4797
4798 tx_q = &priv->dma_conf.tx_queue[queue];
4799 first_tx = tx_q->cur_tx;
4800
4801 /* Check if VLAN can be inserted by HW */
4802 has_vlan = stmmac_vlan_insert(priv, skb, tx_q);
4803
4804 entry = tx_q->cur_tx;
4805 first_entry = entry;
4806 WARN_ON(tx_q->tx_skbuff[first_entry]);
4807
4808 desc = stmmac_get_tx_desc(priv, tx_q, entry);
4809 first_desc = desc;
4810
4811 if (has_vlan)
4812 stmmac_set_desc_vlan(priv, first_desc, STMMAC_VLAN_INSERT);
4813
4814 enh_desc = priv->plat->enh_desc;
4815 /* To program the descriptors according to the size of the frame */
4816 if (enh_desc)
4817 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc);
4818
4819 csum_insertion = skb->ip_summed == CHECKSUM_PARTIAL;
4820
4821 if (unlikely(is_jumbo)) {
4822 entry = stmmac_jumbo_frm(priv, tx_q, skb, csum_insertion);
4823 if (unlikely(entry < 0) && (entry != -EINVAL))
4824 goto dma_map_err;
4825 } else {
4826 bool last_segment = (nfrags == 0);
4827
4828 dma_addr = dma_map_single(priv->device, skb->data,
4829 nopaged_len, DMA_TO_DEVICE);
4830 if (dma_mapping_error(priv->device, dma_addr))
4831 goto dma_map_err;
4832
4833 stmmac_set_tx_skb_dma_entry(tx_q, first_entry, dma_addr,
4834 nopaged_len, false);
4835
4836 stmmac_set_desc_addr(priv, first_desc, dma_addr);
4837
4838 if (last_segment)
4839 stmmac_set_tx_dma_last_segment(tx_q, first_entry);
4840
4841 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
4842 priv->hwts_tx_en)) {
4843 /* declare that device is doing timestamping */
4844 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
4845 stmmac_enable_tx_timestamp(priv, first_desc);
4846 }
4847
4848 /* Prepare the first descriptor without setting the OWN bit */
4849 stmmac_prepare_tx_desc(priv, first_desc, 1, nopaged_len,
4850 csum_insertion, priv->descriptor_mode,
4851 0, last_segment, skb->len);
4852 }
4853
4854 if (priv->sarc_type)
4855 stmmac_set_desc_sarc(priv, first_desc, priv->sarc_type);
4856
4857 /* STMMAC_TBS_EN can only be set if STMMAC_TBS_AVAIL has already
4858 * been set, which means the underlying type of the descriptors
4859 * will be struct stmmac_edesc. Therefore, it is safe to convert
4860 * the basic descriptor to the enhanced descriptor here.
4861 */
4862 if (tx_q->tbs & STMMAC_TBS_EN) {
4863 struct timespec64 ts = ns_to_timespec64(skb->tstamp);
4864
4865 stmmac_set_desc_tbs(priv, dma_desc_to_edesc(first_desc),
4866 ts.tv_sec, ts.tv_nsec);
4867 }
4868
4869 for (i = 0; i < nfrags; i++) {
4870 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4871 unsigned int frag_size = skb_frag_size(frag);
4872 bool last_segment = (i == (nfrags - 1));
4873
4874 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
4875 WARN_ON(tx_q->tx_skbuff[entry]);
4876
4877 desc = stmmac_get_tx_desc(priv, tx_q, entry);
4878
4879 dma_addr = skb_frag_dma_map(priv->device, frag, 0, frag_size,
4880 DMA_TO_DEVICE);
4881 if (dma_mapping_error(priv->device, dma_addr))
4882 goto dma_map_err; /* should reuse desc w/o issues */
4883
4884 stmmac_set_tx_skb_dma_entry(tx_q, entry, dma_addr, frag_size,
4885 true);
4886 stmmac_set_desc_addr(priv, desc, dma_addr);
4887
4888 /* Prepare the descriptor and set the own bit too */
4889 stmmac_prepare_tx_desc(priv, desc, 0, frag_size, csum_insertion,
4890 priv->descriptor_mode, 1, last_segment,
4891 skb->len);
4892 }
4893
4894 stmmac_set_tx_dma_last_segment(tx_q, entry);
4895
4896 /* Only the last descriptor gets to point to the skb. */
4897 tx_q->tx_skbuff[entry] = skb;
4898
4899 /* According to the coalesce parameter the IC bit for the latest
4900 * segment is reset and the timer re-started to clean the tx status.
4901 * This approach takes care about the fragments: desc is the first
4902 * element in case of no SG.
4903 */
4904 tx_packets = CIRC_CNT(entry + 1, first_tx, priv->dma_conf.dma_tx_size);
4905 tx_q->tx_count_frames += tx_packets;
4906
4907 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en)
4908 set_ic = true;
4909 else if (!priv->tx_coal_frames[queue])
4910 set_ic = false;
4911 else if (tx_packets > priv->tx_coal_frames[queue])
4912 set_ic = true;
4913 else if ((tx_q->tx_count_frames %
4914 priv->tx_coal_frames[queue]) < tx_packets)
4915 set_ic = true;
4916 else
4917 set_ic = false;
4918
4919 if (set_ic) {
4920 desc = stmmac_get_tx_desc(priv, tx_q, entry);
4921 tx_q->tx_count_frames = 0;
4922 stmmac_set_tx_ic(priv, desc);
4923 }
4924
4925 /* We've used all descriptors we need for this skb, however,
4926 * advance cur_tx so that it references a fresh descriptor.
4927 * ndo_start_xmit will fill this descriptor the next time it's
4928 * called and stmmac_tx_clean may clean up to this descriptor.
4929 */
4930 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
4931 tx_q->cur_tx = entry;
4932
4933 if (netif_msg_pktdata(priv)) {
4934 netdev_dbg(priv->dev,
4935 "%s: curr=%d dirty=%d f=%d, e=%d, first=%p, nfrags=%d",
4936 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry,
4937 entry, first_desc, nfrags);
4938
4939 netdev_dbg(priv->dev, ">>> frame to be transmitted: ");
4940 print_pkt(skb->data, skb->len);
4941 }
4942
4943 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) {
4944 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n",
4945 __func__);
4946 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue));
4947 }
4948
4949 txq_stats = &priv->xstats.txq_stats[queue];
4950 u64_stats_update_begin(&txq_stats->q_syncp);
4951 u64_stats_add(&txq_stats->q.tx_bytes, skb->len);
4952 if (set_ic)
4953 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
4954 u64_stats_update_end(&txq_stats->q_syncp);
4955
4956 /* Set the OWN bit on the first descriptor now that all descriptors
4957 * for this skb are populated.
4958 */
4959 stmmac_set_tx_owner(priv, first_desc);
4960
4961 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len);
4962
4963 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
4964 skb_tx_timestamp(skb);
4965 stmmac_flush_tx_descriptors(priv, queue);
4966 stmmac_tx_timer_arm(priv, queue);
4967
4968 return NETDEV_TX_OK;
4969
4970 dma_map_err:
4971 netdev_err(priv->dev, "Tx DMA map failed\n");
4972 max_sdu_err:
4973 dev_kfree_skb(skb);
4974 priv->xstats.tx_dropped++;
4975 return NETDEV_TX_OK;
4976 }
4977
stmmac_features_check(struct sk_buff * skb,struct net_device * dev,netdev_features_t features)4978 static netdev_features_t stmmac_features_check(struct sk_buff *skb,
4979 struct net_device *dev,
4980 netdev_features_t features)
4981 {
4982 struct stmmac_priv *priv = netdev_priv(dev);
4983 u16 queue = skb_get_queue_mapping(skb);
4984
4985 /* DWMAC IPs can be synthesized to support tx coe only for a few tx
4986 * queues. In that case, checksum offloading for those queues that don't
4987 * support tx coe needs to fallback to software checksum calculation.
4988 *
4989 * Packets that won't trigger the COE e.g. most DSA-tagged packets will
4990 * also have to be checksummed in software.
4991 *
4992 * Note that disabling hardware checksumming also disables TSO. See
4993 * harmonize_features() in net/core/dev.c
4994 */
4995 if (priv->plat->tx_queues_cfg[queue].coe_unsupported ||
4996 !stmmac_has_ip_ethertype(skb))
4997 features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
4998
4999 if (skb_is_gso(skb)) {
5000 if (!stmmac_tso_channel_permitted(priv, queue) ||
5001 !stmmac_tso_valid_packet(skb))
5002 features &= ~NETIF_F_GSO_MASK;
5003
5004 /* If we are going to be using hardware TSO, always insert
5005 * VLAN tag to SKB payload for TSO frames.
5006 *
5007 * Never insert VLAN tag by HW, since segments split by
5008 * TSO engine will be un-tagged by mistake.
5009 */
5010 if (features & NETIF_F_GSO_MASK)
5011 features &= ~(NETIF_F_HW_VLAN_STAG_TX |
5012 NETIF_F_HW_VLAN_CTAG_TX);
5013 }
5014
5015 return vlan_features_check(skb, features);
5016 }
5017
stmmac_rx_vlan(struct net_device * dev,struct sk_buff * skb)5018 static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb)
5019 {
5020 struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb);
5021 __be16 vlan_proto = veth->h_vlan_proto;
5022 u16 vlanid;
5023
5024 if ((vlan_proto == htons(ETH_P_8021Q) &&
5025 dev->features & NETIF_F_HW_VLAN_CTAG_RX) ||
5026 (vlan_proto == htons(ETH_P_8021AD) &&
5027 dev->features & NETIF_F_HW_VLAN_STAG_RX)) {
5028 /* pop the vlan tag */
5029 vlanid = ntohs(veth->h_vlan_TCI);
5030 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2);
5031 skb_pull(skb, VLAN_HLEN);
5032 __vlan_hwaccel_put_tag(skb, vlan_proto, vlanid);
5033 }
5034 }
5035
5036 /**
5037 * stmmac_rx_refill - refill used skb preallocated buffers
5038 * @priv: driver private structure
5039 * @queue: RX queue index
5040 * Description : this is to reallocate the skb for the reception process
5041 * that is based on zero-copy.
5042 */
stmmac_rx_refill(struct stmmac_priv * priv,u32 queue)5043 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue)
5044 {
5045 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5046 int dirty = stmmac_rx_dirty(priv, queue);
5047 unsigned int entry = rx_q->dirty_rx;
5048 gfp_t gfp = (GFP_ATOMIC | __GFP_NOWARN);
5049
5050 if (priv->dma_cap.host_dma_width <= 32)
5051 gfp |= GFP_DMA32;
5052
5053 while (dirty-- > 0) {
5054 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5055 struct dma_desc *p;
5056 bool use_rx_wd;
5057
5058 p = stmmac_get_rx_desc(priv, rx_q, entry);
5059
5060 if (!buf->page) {
5061 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp);
5062 if (!buf->page)
5063 break;
5064 }
5065
5066 if (priv->sph_active && !buf->sec_page) {
5067 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp);
5068 if (!buf->sec_page)
5069 break;
5070
5071 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page);
5072 }
5073
5074 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset;
5075
5076 stmmac_set_desc_addr(priv, p, buf->addr);
5077 if (priv->sph_active)
5078 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true);
5079 else
5080 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false);
5081 stmmac_refill_desc3(priv, rx_q, p);
5082
5083 rx_q->rx_count_frames++;
5084 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5085 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5086 rx_q->rx_count_frames = 0;
5087
5088 use_rx_wd = !priv->rx_coal_frames[queue];
5089 use_rx_wd |= rx_q->rx_count_frames > 0;
5090 if (!priv->use_riwt)
5091 use_rx_wd = false;
5092
5093 dma_wmb();
5094 stmmac_set_rx_owner(priv, p, use_rx_wd);
5095
5096 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
5097 }
5098 rx_q->dirty_rx = entry;
5099 stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->dirty_rx);
5100 /* Wake up Rx DMA from the suspend state if required */
5101 stmmac_enable_dma_reception(priv, priv->ioaddr, queue);
5102 }
5103
stmmac_rx_buf1_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)5104 static unsigned int stmmac_rx_buf1_len(struct stmmac_priv *priv,
5105 struct dma_desc *p,
5106 int status, unsigned int len)
5107 {
5108 unsigned int plen = 0, hlen = 0;
5109 int coe = priv->hw->rx_csum;
5110
5111 /* Not first descriptor, buffer is always zero */
5112 if (priv->sph_active && len)
5113 return 0;
5114
5115 /* First descriptor, get split header length */
5116 stmmac_get_rx_header_len(priv, p, &hlen);
5117 if (priv->sph_active && hlen) {
5118 priv->xstats.rx_split_hdr_pkt_n++;
5119 return hlen;
5120 }
5121
5122 /* First descriptor, not last descriptor and not split header */
5123 if (status & rx_not_ls)
5124 return priv->dma_conf.dma_buf_sz;
5125
5126 plen = stmmac_get_rx_frame_len(priv, p, coe);
5127
5128 /* First descriptor and last descriptor and not split header */
5129 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen);
5130 }
5131
stmmac_rx_buf2_len(struct stmmac_priv * priv,struct dma_desc * p,int status,unsigned int len)5132 static unsigned int stmmac_rx_buf2_len(struct stmmac_priv *priv,
5133 struct dma_desc *p,
5134 int status, unsigned int len)
5135 {
5136 int coe = priv->hw->rx_csum;
5137 unsigned int plen = 0;
5138
5139 /* Not split header, buffer is not available */
5140 if (!priv->sph_active)
5141 return 0;
5142
5143 /* For GMAC4, when split header is enabled, in some rare cases, the
5144 * hardware does not fill buf2 of the first descriptor with payload.
5145 * Thus we cannot assume buf2 is always fully filled if it is not
5146 * the last descriptor. Otherwise, the length of buf2 of the second
5147 * descriptor will be calculated wrong and cause an oops.
5148 *
5149 * If this is the last descriptor, 'plen' is the length of the
5150 * received packet that was transferred to system memory.
5151 * Otherwise, it is the accumulated number of bytes that have been
5152 * transferred for the current packet.
5153 *
5154 * Thus 'plen - len' always gives the correct length of buf2.
5155 */
5156
5157 /* Not GMAC4 and not last descriptor */
5158 if (priv->plat->core_type != DWMAC_CORE_GMAC4 && (status & rx_not_ls))
5159 return priv->dma_conf.dma_buf_sz;
5160
5161 /* GMAC4 or last descriptor */
5162 plen = stmmac_get_rx_frame_len(priv, p, coe);
5163
5164 return plen - len;
5165 }
5166
stmmac_xdp_xmit_xdpf(struct stmmac_priv * priv,int queue,struct xdp_frame * xdpf,bool dma_map)5167 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue,
5168 struct xdp_frame *xdpf, bool dma_map)
5169 {
5170 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue];
5171 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
5172 bool csum = !priv->plat->tx_queues_cfg[queue].coe_unsupported;
5173 unsigned int entry = tx_q->cur_tx;
5174 enum stmmac_txbuf_type buf_type;
5175 struct dma_desc *tx_desc;
5176 dma_addr_t dma_addr;
5177 bool set_ic;
5178
5179 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv))
5180 return STMMAC_XDP_CONSUMED;
5181
5182 if (priv->est && priv->est->enable &&
5183 priv->est->max_sdu[queue] &&
5184 xdpf->len > priv->est->max_sdu[queue]) {
5185 priv->xstats.max_sdu_txq_drop[queue]++;
5186 return STMMAC_XDP_CONSUMED;
5187 }
5188
5189 tx_desc = stmmac_get_tx_desc(priv, tx_q, entry);
5190 if (dma_map) {
5191 dma_addr = dma_map_single(priv->device, xdpf->data,
5192 xdpf->len, DMA_TO_DEVICE);
5193 if (dma_mapping_error(priv->device, dma_addr))
5194 return STMMAC_XDP_CONSUMED;
5195
5196 buf_type = STMMAC_TXBUF_T_XDP_NDO;
5197 } else {
5198 struct page *page = virt_to_page(xdpf->data);
5199
5200 dma_addr = page_pool_get_dma_addr(page) + sizeof(*xdpf) +
5201 xdpf->headroom;
5202 dma_sync_single_for_device(priv->device, dma_addr,
5203 xdpf->len, DMA_BIDIRECTIONAL);
5204
5205 buf_type = STMMAC_TXBUF_T_XDP_TX;
5206 }
5207
5208 stmmac_set_tx_dma_entry(tx_q, entry, buf_type, dma_addr, xdpf->len,
5209 false);
5210 stmmac_set_tx_dma_last_segment(tx_q, entry);
5211
5212 tx_q->xdpf[entry] = xdpf;
5213
5214 stmmac_set_desc_addr(priv, tx_desc, dma_addr);
5215
5216 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len,
5217 csum, priv->descriptor_mode, true, true,
5218 xdpf->len);
5219
5220 tx_q->tx_count_frames++;
5221
5222 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0)
5223 set_ic = true;
5224 else
5225 set_ic = false;
5226
5227 if (set_ic) {
5228 tx_q->tx_count_frames = 0;
5229 stmmac_set_tx_ic(priv, tx_desc);
5230 u64_stats_update_begin(&txq_stats->q_syncp);
5231 u64_stats_inc(&txq_stats->q.tx_set_ic_bit);
5232 u64_stats_update_end(&txq_stats->q_syncp);
5233 }
5234
5235 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue);
5236
5237 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_tx_size);
5238 tx_q->cur_tx = entry;
5239
5240 return STMMAC_XDP_TX;
5241 }
5242
stmmac_xdp_get_tx_queue(struct stmmac_priv * priv,int cpu)5243 static int stmmac_xdp_get_tx_queue(struct stmmac_priv *priv,
5244 int cpu)
5245 {
5246 int index = cpu;
5247
5248 if (unlikely(index < 0))
5249 index = 0;
5250
5251 while (index >= priv->plat->tx_queues_to_use)
5252 index -= priv->plat->tx_queues_to_use;
5253
5254 return index;
5255 }
5256
stmmac_xdp_xmit_back(struct stmmac_priv * priv,struct xdp_buff * xdp)5257 static int stmmac_xdp_xmit_back(struct stmmac_priv *priv,
5258 struct xdp_buff *xdp)
5259 {
5260 bool zc = !!(xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL);
5261 struct xdp_frame *xdpf = xdp_convert_buff_to_frame(xdp);
5262 int cpu = smp_processor_id();
5263 struct netdev_queue *nq;
5264 int queue;
5265 int res;
5266
5267 if (unlikely(!xdpf))
5268 return STMMAC_XDP_CONSUMED;
5269
5270 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5271 nq = netdev_get_tx_queue(priv->dev, queue);
5272
5273 __netif_tx_lock(nq, cpu);
5274 /* Avoids TX time-out as we are sharing with slow path */
5275 txq_trans_cond_update(nq);
5276
5277 /* For zero copy XDP_TX action, dma_map is true */
5278 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, zc);
5279 if (res == STMMAC_XDP_TX) {
5280 stmmac_flush_tx_descriptors(priv, queue);
5281 } else if (res == STMMAC_XDP_CONSUMED && zc) {
5282 /* xdp has been freed by xdp_convert_buff_to_frame(),
5283 * no need to call xsk_buff_free() again, so return
5284 * STMMAC_XSK_CONSUMED.
5285 */
5286 res = STMMAC_XSK_CONSUMED;
5287 xdp_return_frame(xdpf);
5288 }
5289
5290 __netif_tx_unlock(nq);
5291
5292 return res;
5293 }
5294
__stmmac_xdp_run_prog(struct stmmac_priv * priv,struct bpf_prog * prog,struct xdp_buff * xdp)5295 static int __stmmac_xdp_run_prog(struct stmmac_priv *priv,
5296 struct bpf_prog *prog,
5297 struct xdp_buff *xdp)
5298 {
5299 u32 act;
5300 int res;
5301
5302 act = bpf_prog_run_xdp(prog, xdp);
5303 switch (act) {
5304 case XDP_PASS:
5305 res = STMMAC_XDP_PASS;
5306 break;
5307 case XDP_TX:
5308 res = stmmac_xdp_xmit_back(priv, xdp);
5309 break;
5310 case XDP_REDIRECT:
5311 if (xdp_do_redirect(priv->dev, xdp, prog) < 0)
5312 res = STMMAC_XDP_CONSUMED;
5313 else
5314 res = STMMAC_XDP_REDIRECT;
5315 break;
5316 default:
5317 bpf_warn_invalid_xdp_action(priv->dev, prog, act);
5318 fallthrough;
5319 case XDP_ABORTED:
5320 trace_xdp_exception(priv->dev, prog, act);
5321 fallthrough;
5322 case XDP_DROP:
5323 res = STMMAC_XDP_CONSUMED;
5324 break;
5325 }
5326
5327 return res;
5328 }
5329
stmmac_xdp_run_prog(struct stmmac_priv * priv,struct xdp_buff * xdp)5330 static struct sk_buff *stmmac_xdp_run_prog(struct stmmac_priv *priv,
5331 struct xdp_buff *xdp)
5332 {
5333 struct bpf_prog *prog;
5334 int res;
5335
5336 prog = READ_ONCE(priv->xdp_prog);
5337 if (!prog) {
5338 res = STMMAC_XDP_PASS;
5339 goto out;
5340 }
5341
5342 res = __stmmac_xdp_run_prog(priv, prog, xdp);
5343 out:
5344 return ERR_PTR(-res);
5345 }
5346
stmmac_finalize_xdp_rx(struct stmmac_priv * priv,int xdp_status)5347 static void stmmac_finalize_xdp_rx(struct stmmac_priv *priv,
5348 int xdp_status)
5349 {
5350 int cpu = smp_processor_id();
5351 int queue;
5352
5353 queue = stmmac_xdp_get_tx_queue(priv, cpu);
5354
5355 if (xdp_status & STMMAC_XDP_TX)
5356 stmmac_tx_timer_arm(priv, queue);
5357
5358 if (xdp_status & STMMAC_XDP_REDIRECT)
5359 xdp_do_flush();
5360 }
5361
stmmac_construct_skb_zc(struct stmmac_channel * ch,struct xdp_buff * xdp)5362 static struct sk_buff *stmmac_construct_skb_zc(struct stmmac_channel *ch,
5363 struct xdp_buff *xdp)
5364 {
5365 unsigned int metasize = xdp->data - xdp->data_meta;
5366 unsigned int datasize = xdp->data_end - xdp->data;
5367 struct sk_buff *skb;
5368
5369 skb = napi_alloc_skb(&ch->rxtx_napi,
5370 xdp->data_end - xdp->data_hard_start);
5371 if (unlikely(!skb))
5372 return NULL;
5373
5374 skb_reserve(skb, xdp->data - xdp->data_hard_start);
5375 memcpy(__skb_put(skb, datasize), xdp->data, datasize);
5376 if (metasize)
5377 skb_metadata_set(skb, metasize);
5378
5379 return skb;
5380 }
5381
stmmac_dispatch_skb_zc(struct stmmac_priv * priv,u32 queue,struct dma_desc * p,struct dma_desc * np,struct xdp_buff * xdp)5382 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue,
5383 struct dma_desc *p, struct dma_desc *np,
5384 struct xdp_buff *xdp)
5385 {
5386 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5387 struct stmmac_channel *ch = &priv->channel[queue];
5388 unsigned int len = xdp->data_end - xdp->data;
5389 enum pkt_hash_types hash_type;
5390 int coe = priv->hw->rx_csum;
5391 struct sk_buff *skb;
5392 u32 hash;
5393
5394 skb = stmmac_construct_skb_zc(ch, xdp);
5395 if (!skb) {
5396 priv->xstats.rx_dropped++;
5397 return;
5398 }
5399
5400 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5401 if (priv->hw->hw_vlan_en)
5402 /* MAC level stripping. */
5403 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5404 else
5405 /* Driver level stripping. */
5406 stmmac_rx_vlan(priv->dev, skb);
5407 skb->protocol = eth_type_trans(skb, priv->dev);
5408
5409 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb))
5410 skb_checksum_none_assert(skb);
5411 else
5412 skb->ip_summed = CHECKSUM_UNNECESSARY;
5413
5414 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5415 skb_set_hash(skb, hash, hash_type);
5416
5417 skb_record_rx_queue(skb, queue);
5418 napi_gro_receive(&ch->rxtx_napi, skb);
5419
5420 u64_stats_update_begin(&rxq_stats->napi_syncp);
5421 u64_stats_inc(&rxq_stats->napi.rx_pkt_n);
5422 u64_stats_add(&rxq_stats->napi.rx_bytes, len);
5423 u64_stats_update_end(&rxq_stats->napi_syncp);
5424 }
5425
stmmac_rx_refill_zc(struct stmmac_priv * priv,u32 queue,u32 budget)5426 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget)
5427 {
5428 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5429 unsigned int entry = rx_q->dirty_rx;
5430 struct dma_desc *rx_desc = NULL;
5431 bool ret = true;
5432
5433 budget = min(budget, stmmac_rx_dirty(priv, queue));
5434
5435 while (budget-- > 0 && entry != rx_q->cur_rx) {
5436 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry];
5437 dma_addr_t dma_addr;
5438 bool use_rx_wd;
5439
5440 if (!buf->xdp) {
5441 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool);
5442 if (!buf->xdp) {
5443 ret = false;
5444 break;
5445 }
5446 }
5447
5448 rx_desc = stmmac_get_rx_desc(priv, rx_q, entry);
5449
5450 dma_addr = xsk_buff_xdp_get_dma(buf->xdp);
5451 stmmac_set_desc_addr(priv, rx_desc, dma_addr);
5452 stmmac_set_desc_sec_addr(priv, rx_desc, 0, false);
5453 stmmac_refill_desc3(priv, rx_q, rx_desc);
5454
5455 rx_q->rx_count_frames++;
5456 rx_q->rx_count_frames += priv->rx_coal_frames[queue];
5457 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue])
5458 rx_q->rx_count_frames = 0;
5459
5460 use_rx_wd = !priv->rx_coal_frames[queue];
5461 use_rx_wd |= rx_q->rx_count_frames > 0;
5462 if (!priv->use_riwt)
5463 use_rx_wd = false;
5464
5465 dma_wmb();
5466 stmmac_set_rx_owner(priv, rx_desc, use_rx_wd);
5467
5468 entry = STMMAC_NEXT_ENTRY(entry, priv->dma_conf.dma_rx_size);
5469 }
5470
5471 if (rx_desc) {
5472 rx_q->dirty_rx = entry;
5473 stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->dirty_rx);
5474 }
5475
5476 return ret;
5477 }
5478
xsk_buff_to_stmmac_ctx(struct xdp_buff * xdp)5479 static struct stmmac_xdp_buff *xsk_buff_to_stmmac_ctx(struct xdp_buff *xdp)
5480 {
5481 /* In XDP zero copy data path, xdp field in struct xdp_buff_xsk is used
5482 * to represent incoming packet, whereas cb field in the same structure
5483 * is used to store driver specific info. Thus, struct stmmac_xdp_buff
5484 * is laid on top of xdp and cb fields of struct xdp_buff_xsk.
5485 */
5486 return (struct stmmac_xdp_buff *)xdp;
5487 }
5488
stmmac_rx_zc(struct stmmac_priv * priv,int limit,u32 queue)5489 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue)
5490 {
5491 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5492 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5493 unsigned int count = 0, error = 0, len = 0;
5494 int dirty = stmmac_rx_dirty(priv, queue);
5495 unsigned int next_entry = rx_q->cur_rx;
5496 u32 rx_errors = 0, rx_dropped = 0;
5497 unsigned int desc_size;
5498 struct bpf_prog *prog;
5499 bool failure = false;
5500 int xdp_status = 0;
5501 int status = 0;
5502
5503 if (netif_msg_rx_status(priv)) {
5504 void *rx_head = stmmac_get_rx_desc(priv, rx_q, 0);
5505
5506 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5507 desc_size = stmmac_get_rx_desc_size(priv);
5508
5509 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5510 rx_q->dma_rx_phy, desc_size);
5511 }
5512 while (count < limit) {
5513 struct stmmac_rx_buffer *buf;
5514 struct stmmac_xdp_buff *ctx;
5515 unsigned int buf1_len = 0;
5516 struct dma_desc *np, *p;
5517 int entry;
5518 int res;
5519
5520 if (!count && rx_q->state_saved) {
5521 error = rx_q->state.error;
5522 len = rx_q->state.len;
5523 } else {
5524 rx_q->state_saved = false;
5525 error = 0;
5526 len = 0;
5527 }
5528
5529 read_again:
5530 if (count >= limit)
5531 break;
5532
5533 buf1_len = 0;
5534 entry = next_entry;
5535 buf = &rx_q->buf_pool[entry];
5536
5537 if (dirty >= STMMAC_RX_FILL_BATCH) {
5538 failure = failure ||
5539 !stmmac_rx_refill_zc(priv, queue, dirty);
5540 dirty = 0;
5541 }
5542
5543 p = stmmac_get_rx_desc(priv, rx_q, entry);
5544
5545 /* read the status of the incoming frame */
5546 status = stmmac_rx_status(priv, &priv->xstats, p);
5547 /* check if managed by the DMA otherwise go ahead */
5548 if (unlikely(status & dma_own))
5549 break;
5550
5551 /* Prefetch the next RX descriptor */
5552 rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
5553 priv->dma_conf.dma_rx_size);
5554 next_entry = rx_q->cur_rx;
5555
5556 np = stmmac_get_rx_desc(priv, rx_q, next_entry);
5557
5558 prefetch(np);
5559
5560 /* Ensure a valid XSK buffer before proceed */
5561 if (!buf->xdp)
5562 break;
5563
5564 if (priv->extend_desc)
5565 stmmac_rx_extended_status(priv, &priv->xstats,
5566 rx_q->dma_erx + entry);
5567 if (unlikely(status == discard_frame)) {
5568 xsk_buff_free(buf->xdp);
5569 buf->xdp = NULL;
5570 dirty++;
5571 error = 1;
5572 if (!priv->hwts_rx_en)
5573 rx_errors++;
5574 }
5575
5576 if (unlikely(error && (status & rx_not_ls)))
5577 goto read_again;
5578 if (unlikely(error)) {
5579 count++;
5580 continue;
5581 }
5582
5583 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */
5584 if (likely(status & rx_not_ls)) {
5585 xsk_buff_free(buf->xdp);
5586 buf->xdp = NULL;
5587 dirty++;
5588 count++;
5589 goto read_again;
5590 }
5591
5592 ctx = xsk_buff_to_stmmac_ctx(buf->xdp);
5593 ctx->priv = priv;
5594 ctx->desc = p;
5595 ctx->ndesc = np;
5596
5597 /* XDP ZC Frame only support primary buffers for now */
5598 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5599 len += buf1_len;
5600
5601 /* ACS is disabled; strip manually. */
5602 if (likely(!(status & rx_not_ls))) {
5603 buf1_len -= ETH_FCS_LEN;
5604 len -= ETH_FCS_LEN;
5605 }
5606
5607 /* RX buffer is good and fit into a XSK pool buffer */
5608 buf->xdp->data_end = buf->xdp->data + buf1_len;
5609 xsk_buff_dma_sync_for_cpu(buf->xdp);
5610
5611 prog = READ_ONCE(priv->xdp_prog);
5612 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp);
5613
5614 switch (res) {
5615 case STMMAC_XDP_PASS:
5616 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp);
5617 xsk_buff_free(buf->xdp);
5618 break;
5619 case STMMAC_XDP_CONSUMED:
5620 xsk_buff_free(buf->xdp);
5621 fallthrough;
5622 case STMMAC_XSK_CONSUMED:
5623 rx_dropped++;
5624 break;
5625 case STMMAC_XDP_TX:
5626 case STMMAC_XDP_REDIRECT:
5627 xdp_status |= res;
5628 break;
5629 }
5630
5631 buf->xdp = NULL;
5632 dirty++;
5633 count++;
5634 }
5635
5636 if (status & rx_not_ls) {
5637 rx_q->state_saved = true;
5638 rx_q->state.error = error;
5639 rx_q->state.len = len;
5640 }
5641
5642 stmmac_finalize_xdp_rx(priv, xdp_status);
5643
5644 u64_stats_update_begin(&rxq_stats->napi_syncp);
5645 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5646 u64_stats_update_end(&rxq_stats->napi_syncp);
5647
5648 priv->xstats.rx_dropped += rx_dropped;
5649 priv->xstats.rx_errors += rx_errors;
5650
5651 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) {
5652 if (failure || stmmac_rx_dirty(priv, queue) > 0)
5653 xsk_set_rx_need_wakeup(rx_q->xsk_pool);
5654 else
5655 xsk_clear_rx_need_wakeup(rx_q->xsk_pool);
5656
5657 return (int)count;
5658 }
5659
5660 return failure ? limit : (int)count;
5661 }
5662
5663 /**
5664 * stmmac_rx - manage the receive process
5665 * @priv: driver private structure
5666 * @limit: napi bugget
5667 * @queue: RX queue index.
5668 * Description : this the function called by the napi poll method.
5669 * It gets all the frames inside the ring.
5670 */
stmmac_rx(struct stmmac_priv * priv,int limit,u32 queue)5671 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue)
5672 {
5673 u32 rx_errors = 0, rx_dropped = 0, rx_bytes = 0, rx_packets = 0;
5674 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue];
5675 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
5676 struct stmmac_channel *ch = &priv->channel[queue];
5677 unsigned int count = 0, error = 0, len = 0;
5678 int status = 0, coe = priv->hw->rx_csum;
5679 unsigned int next_entry = rx_q->cur_rx;
5680 enum dma_data_direction dma_dir;
5681 unsigned int desc_size;
5682 struct sk_buff *skb = NULL;
5683 struct stmmac_xdp_buff ctx;
5684 int xdp_status = 0;
5685 int bufsz;
5686
5687 dma_dir = page_pool_get_dma_dir(rx_q->page_pool);
5688 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE;
5689 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit);
5690
5691 if (netif_msg_rx_status(priv)) {
5692 void *rx_head = stmmac_get_rx_desc(priv, rx_q, 0);
5693
5694 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__);
5695 desc_size = stmmac_get_rx_desc_size(priv);
5696
5697 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true,
5698 rx_q->dma_rx_phy, desc_size);
5699 }
5700 while (count < limit) {
5701 unsigned int buf1_len = 0, buf2_len = 0;
5702 enum pkt_hash_types hash_type;
5703 struct stmmac_rx_buffer *buf;
5704 struct dma_desc *np, *p;
5705 int entry;
5706 u32 hash;
5707
5708 if (!count && rx_q->state_saved) {
5709 skb = rx_q->state.skb;
5710 error = rx_q->state.error;
5711 len = rx_q->state.len;
5712 } else {
5713 rx_q->state_saved = false;
5714 skb = NULL;
5715 error = 0;
5716 len = 0;
5717 }
5718
5719 read_again:
5720 if (count >= limit)
5721 break;
5722
5723 buf1_len = 0;
5724 buf2_len = 0;
5725 entry = next_entry;
5726 buf = &rx_q->buf_pool[entry];
5727
5728 p = stmmac_get_rx_desc(priv, rx_q, entry);
5729
5730 /* read the status of the incoming frame */
5731 status = stmmac_rx_status(priv, &priv->xstats, p);
5732 /* check if managed by the DMA otherwise go ahead */
5733 if (unlikely(status & dma_own))
5734 break;
5735
5736 rx_q->cur_rx = STMMAC_NEXT_ENTRY(rx_q->cur_rx,
5737 priv->dma_conf.dma_rx_size);
5738 next_entry = rx_q->cur_rx;
5739
5740 np = stmmac_get_rx_desc(priv, rx_q, next_entry);
5741
5742 prefetch(np);
5743
5744 if (priv->extend_desc)
5745 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry);
5746 if (unlikely(status == discard_frame)) {
5747 page_pool_put_page(rx_q->page_pool, buf->page, 0, true);
5748 buf->page = NULL;
5749 error = 1;
5750 if (!priv->hwts_rx_en)
5751 rx_errors++;
5752 }
5753
5754 if (unlikely(error && (status & rx_not_ls)))
5755 goto read_again;
5756 if (unlikely(error)) {
5757 dev_kfree_skb(skb);
5758 skb = NULL;
5759 count++;
5760 continue;
5761 }
5762
5763 /* Buffer is good. Go on. */
5764
5765 buf1_len = stmmac_rx_buf1_len(priv, p, status, len);
5766 len += buf1_len;
5767 buf2_len = stmmac_rx_buf2_len(priv, p, status, len);
5768 len += buf2_len;
5769
5770 /* ACS is disabled; strip manually. */
5771 if (likely(!(status & rx_not_ls))) {
5772 if (buf2_len) {
5773 buf2_len -= ETH_FCS_LEN;
5774 len -= ETH_FCS_LEN;
5775 } else if (buf1_len) {
5776 buf1_len -= ETH_FCS_LEN;
5777 len -= ETH_FCS_LEN;
5778 }
5779 }
5780
5781 if (!skb) {
5782 unsigned int pre_len, sync_len;
5783
5784 dma_sync_single_for_cpu(priv->device, buf->addr,
5785 buf1_len, dma_dir);
5786 net_prefetch(page_address(buf->page) +
5787 buf->page_offset);
5788
5789 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq);
5790 xdp_prepare_buff(&ctx.xdp, page_address(buf->page),
5791 buf->page_offset, buf1_len, true);
5792
5793 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5794 buf->page_offset;
5795
5796 ctx.priv = priv;
5797 ctx.desc = p;
5798 ctx.ndesc = np;
5799
5800 skb = stmmac_xdp_run_prog(priv, &ctx.xdp);
5801 /* Due xdp_adjust_tail: DMA sync for_device
5802 * cover max len CPU touch
5803 */
5804 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start -
5805 buf->page_offset;
5806 sync_len = max(sync_len, pre_len);
5807
5808 /* For Not XDP_PASS verdict */
5809 if (IS_ERR(skb)) {
5810 unsigned int xdp_res = -PTR_ERR(skb);
5811
5812 if (xdp_res & STMMAC_XDP_CONSUMED) {
5813 page_pool_put_page(rx_q->page_pool,
5814 virt_to_head_page(ctx.xdp.data),
5815 sync_len, true);
5816 buf->page = NULL;
5817 rx_dropped++;
5818
5819 /* Clear skb as it was set as
5820 * status by XDP program.
5821 */
5822 skb = NULL;
5823
5824 if (unlikely((status & rx_not_ls)))
5825 goto read_again;
5826
5827 count++;
5828 continue;
5829 } else if (xdp_res & (STMMAC_XDP_TX |
5830 STMMAC_XDP_REDIRECT)) {
5831 xdp_status |= xdp_res;
5832 buf->page = NULL;
5833 skb = NULL;
5834 count++;
5835 continue;
5836 }
5837 }
5838 }
5839
5840 if (!skb) {
5841 unsigned int head_pad_len;
5842
5843 /* XDP program may expand or reduce tail */
5844 buf1_len = ctx.xdp.data_end - ctx.xdp.data;
5845
5846 skb = napi_build_skb(page_address(buf->page),
5847 rx_q->napi_skb_frag_size);
5848 if (!skb) {
5849 page_pool_recycle_direct(rx_q->page_pool,
5850 buf->page);
5851 rx_dropped++;
5852 count++;
5853 goto drain_data;
5854 }
5855
5856 /* XDP program may adjust header */
5857 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start;
5858 skb_reserve(skb, head_pad_len);
5859 skb_put(skb, buf1_len);
5860 skb_mark_for_recycle(skb);
5861 buf->page = NULL;
5862 } else if (buf1_len) {
5863 dma_sync_single_for_cpu(priv->device, buf->addr,
5864 buf1_len, dma_dir);
5865 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5866 buf->page, buf->page_offset, buf1_len,
5867 priv->dma_conf.dma_buf_sz);
5868 buf->page = NULL;
5869 }
5870
5871 if (buf2_len) {
5872 dma_sync_single_for_cpu(priv->device, buf->sec_addr,
5873 buf2_len, dma_dir);
5874 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
5875 buf->sec_page, 0, buf2_len,
5876 priv->dma_conf.dma_buf_sz);
5877 buf->sec_page = NULL;
5878 }
5879
5880 drain_data:
5881 if (likely(status & rx_not_ls))
5882 goto read_again;
5883 if (!skb)
5884 continue;
5885
5886 /* Got entire packet into SKB. Finish it. */
5887
5888 stmmac_get_rx_hwtstamp(priv, p, np, skb);
5889
5890 if (priv->hw->hw_vlan_en)
5891 /* MAC level stripping. */
5892 stmmac_rx_hw_vlan(priv, priv->hw, p, skb);
5893 else
5894 /* Driver level stripping. */
5895 stmmac_rx_vlan(priv->dev, skb);
5896
5897 skb->protocol = eth_type_trans(skb, priv->dev);
5898
5899 if (unlikely(!coe) || !stmmac_has_ip_ethertype(skb) ||
5900 (status & csum_none))
5901 skb_checksum_none_assert(skb);
5902 else
5903 skb->ip_summed = CHECKSUM_UNNECESSARY;
5904
5905 if (!stmmac_get_rx_hash(priv, p, &hash, &hash_type))
5906 skb_set_hash(skb, hash, hash_type);
5907
5908 skb_record_rx_queue(skb, queue);
5909 napi_gro_receive(&ch->rx_napi, skb);
5910 skb = NULL;
5911
5912 rx_packets++;
5913 rx_bytes += len;
5914 count++;
5915 }
5916
5917 if (status & rx_not_ls || skb) {
5918 rx_q->state_saved = true;
5919 rx_q->state.skb = skb;
5920 rx_q->state.error = error;
5921 rx_q->state.len = len;
5922 }
5923
5924 stmmac_finalize_xdp_rx(priv, xdp_status);
5925
5926 stmmac_rx_refill(priv, queue);
5927
5928 u64_stats_update_begin(&rxq_stats->napi_syncp);
5929 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets);
5930 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes);
5931 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count);
5932 u64_stats_update_end(&rxq_stats->napi_syncp);
5933
5934 priv->xstats.rx_dropped += rx_dropped;
5935 priv->xstats.rx_errors += rx_errors;
5936
5937 return count;
5938 }
5939
stmmac_napi_poll_rx(struct napi_struct * napi,int budget)5940 static int stmmac_napi_poll_rx(struct napi_struct *napi, int budget)
5941 {
5942 struct stmmac_channel *ch =
5943 container_of(napi, struct stmmac_channel, rx_napi);
5944 struct stmmac_priv *priv = ch->priv_data;
5945 struct stmmac_rxq_stats *rxq_stats;
5946 u32 chan = ch->index;
5947 int work_done;
5948
5949 rxq_stats = &priv->xstats.rxq_stats[chan];
5950 u64_stats_update_begin(&rxq_stats->napi_syncp);
5951 u64_stats_inc(&rxq_stats->napi.poll);
5952 u64_stats_update_end(&rxq_stats->napi_syncp);
5953
5954 work_done = stmmac_rx(priv, budget, chan);
5955 if (work_done < budget && napi_complete_done(napi, work_done)) {
5956 unsigned long flags;
5957
5958 spin_lock_irqsave(&ch->lock, flags);
5959 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0);
5960 spin_unlock_irqrestore(&ch->lock, flags);
5961 }
5962
5963 return work_done;
5964 }
5965
stmmac_napi_poll_tx(struct napi_struct * napi,int budget)5966 static int stmmac_napi_poll_tx(struct napi_struct *napi, int budget)
5967 {
5968 struct stmmac_channel *ch =
5969 container_of(napi, struct stmmac_channel, tx_napi);
5970 struct stmmac_priv *priv = ch->priv_data;
5971 struct stmmac_txq_stats *txq_stats;
5972 bool pending_packets = false;
5973 u32 chan = ch->index;
5974 int work_done;
5975
5976 txq_stats = &priv->xstats.txq_stats[chan];
5977 u64_stats_update_begin(&txq_stats->napi_syncp);
5978 u64_stats_inc(&txq_stats->napi.poll);
5979 u64_stats_update_end(&txq_stats->napi_syncp);
5980
5981 work_done = stmmac_tx_clean(priv, budget, chan, &pending_packets);
5982 work_done = min(work_done, budget);
5983
5984 if (work_done < budget && napi_complete_done(napi, work_done)) {
5985 unsigned long flags;
5986
5987 spin_lock_irqsave(&ch->lock, flags);
5988 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1);
5989 spin_unlock_irqrestore(&ch->lock, flags);
5990 }
5991
5992 /* TX still have packet to handle, check if we need to arm tx timer */
5993 if (pending_packets)
5994 stmmac_tx_timer_arm(priv, chan);
5995
5996 return work_done;
5997 }
5998
stmmac_napi_poll_rxtx(struct napi_struct * napi,int budget)5999 static int stmmac_napi_poll_rxtx(struct napi_struct *napi, int budget)
6000 {
6001 struct stmmac_channel *ch =
6002 container_of(napi, struct stmmac_channel, rxtx_napi);
6003 struct stmmac_priv *priv = ch->priv_data;
6004 bool tx_pending_packets = false;
6005 int rx_done, tx_done, rxtx_done;
6006 struct stmmac_rxq_stats *rxq_stats;
6007 struct stmmac_txq_stats *txq_stats;
6008 u32 chan = ch->index;
6009
6010 rxq_stats = &priv->xstats.rxq_stats[chan];
6011 u64_stats_update_begin(&rxq_stats->napi_syncp);
6012 u64_stats_inc(&rxq_stats->napi.poll);
6013 u64_stats_update_end(&rxq_stats->napi_syncp);
6014
6015 txq_stats = &priv->xstats.txq_stats[chan];
6016 u64_stats_update_begin(&txq_stats->napi_syncp);
6017 u64_stats_inc(&txq_stats->napi.poll);
6018 u64_stats_update_end(&txq_stats->napi_syncp);
6019
6020 tx_done = stmmac_tx_clean(priv, budget, chan, &tx_pending_packets);
6021 tx_done = min(tx_done, budget);
6022
6023 rx_done = stmmac_rx_zc(priv, budget, chan);
6024
6025 rxtx_done = max(tx_done, rx_done);
6026
6027 /* If either TX or RX work is not complete, return budget
6028 * and keep pooling
6029 */
6030 if (rxtx_done >= budget)
6031 return budget;
6032
6033 /* all work done, exit the polling mode */
6034 if (napi_complete_done(napi, rxtx_done)) {
6035 unsigned long flags;
6036
6037 spin_lock_irqsave(&ch->lock, flags);
6038 /* Both RX and TX work done are complete,
6039 * so enable both RX & TX IRQs.
6040 */
6041 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
6042 spin_unlock_irqrestore(&ch->lock, flags);
6043 }
6044
6045 /* TX still have packet to handle, check if we need to arm tx timer */
6046 if (tx_pending_packets)
6047 stmmac_tx_timer_arm(priv, chan);
6048
6049 return min(rxtx_done, budget - 1);
6050 }
6051
6052 /**
6053 * stmmac_tx_timeout
6054 * @dev : Pointer to net device structure
6055 * @txqueue: the index of the hanging transmit queue
6056 * Description: this function is called when a packet transmission fails to
6057 * complete within a reasonable time. The driver will mark the error in the
6058 * netdev structure and arrange for the device to be reset to a sane state
6059 * in order to transmit a new packet.
6060 */
stmmac_tx_timeout(struct net_device * dev,unsigned int txqueue)6061 static void stmmac_tx_timeout(struct net_device *dev, unsigned int txqueue)
6062 {
6063 struct stmmac_priv *priv = netdev_priv(dev);
6064
6065 stmmac_global_err(priv);
6066 }
6067
6068 /**
6069 * stmmac_set_rx_mode - entry point for multicast addressing
6070 * @dev : pointer to the device structure
6071 * Description:
6072 * This function is a driver entry point which gets called by the kernel
6073 * whenever multicast addresses must be enabled/disabled.
6074 * Return value:
6075 * void.
6076 *
6077 * FIXME: This may need RXC to be running, but it may be called with BH
6078 * disabled, which means we can't call phylink_rx_clk_stop*().
6079 */
stmmac_set_rx_mode(struct net_device * dev)6080 static void stmmac_set_rx_mode(struct net_device *dev)
6081 {
6082 struct stmmac_priv *priv = netdev_priv(dev);
6083
6084 stmmac_set_filter(priv, priv->hw, dev);
6085 }
6086
6087 /**
6088 * stmmac_change_mtu - entry point to change MTU size for the device.
6089 * @dev : device pointer.
6090 * @new_mtu : the new MTU size for the device.
6091 * Description: the Maximum Transfer Unit (MTU) is used by the network layer
6092 * to drive packet transmission. Ethernet has an MTU of 1500 octets
6093 * (ETH_DATA_LEN). This value can be changed with ifconfig.
6094 * Return value:
6095 * 0 on success and an appropriate (-)ve integer as defined in errno.h
6096 * file on failure.
6097 */
stmmac_change_mtu(struct net_device * dev,int new_mtu)6098 static int stmmac_change_mtu(struct net_device *dev, int new_mtu)
6099 {
6100 struct stmmac_priv *priv = netdev_priv(dev);
6101 int txfifosz = priv->plat->tx_fifo_size;
6102 struct stmmac_dma_conf *dma_conf;
6103 const int mtu = new_mtu;
6104 int ret;
6105
6106 if (txfifosz == 0)
6107 txfifosz = priv->dma_cap.tx_fifo_size;
6108
6109 txfifosz /= priv->plat->tx_queues_to_use;
6110
6111 if (stmmac_xdp_is_enabled(priv) && new_mtu > ETH_DATA_LEN) {
6112 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n");
6113 return -EINVAL;
6114 }
6115
6116 new_mtu = STMMAC_ALIGN(new_mtu);
6117
6118 /* If condition true, FIFO is too small or MTU too large */
6119 if ((txfifosz < new_mtu) || (new_mtu > BUF_SIZE_16KiB))
6120 return -EINVAL;
6121
6122 if (netif_running(dev)) {
6123 netdev_dbg(priv->dev, "restarting interface to change its MTU\n");
6124 /* Try to allocate the new DMA conf with the new mtu */
6125 dma_conf = stmmac_setup_dma_desc(priv, mtu);
6126 if (IS_ERR(dma_conf)) {
6127 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n",
6128 mtu);
6129 return PTR_ERR(dma_conf);
6130 }
6131
6132 __stmmac_release(dev);
6133
6134 ret = __stmmac_open(dev, dma_conf);
6135 if (ret) {
6136 free_dma_desc_resources(priv, dma_conf);
6137 kfree(dma_conf);
6138 netdev_err(priv->dev, "failed reopening the interface after MTU change\n");
6139 return ret;
6140 }
6141
6142 kfree(dma_conf);
6143
6144 stmmac_set_rx_mode(dev);
6145 }
6146
6147 WRITE_ONCE(dev->mtu, mtu);
6148 netdev_update_features(dev);
6149
6150 return 0;
6151 }
6152
stmmac_fix_features(struct net_device * dev,netdev_features_t features)6153 static netdev_features_t stmmac_fix_features(struct net_device *dev,
6154 netdev_features_t features)
6155 {
6156 struct stmmac_priv *priv = netdev_priv(dev);
6157
6158 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE)
6159 features &= ~NETIF_F_RXCSUM;
6160
6161 if (!priv->plat->tx_coe)
6162 features &= ~NETIF_F_CSUM_MASK;
6163
6164 /* Some GMAC devices have a bugged Jumbo frame support that
6165 * needs to have the Tx COE disabled for oversized frames
6166 * (due to limited buffer sizes). In this case we disable
6167 * the TX csum insertion in the TDES and not use SF.
6168 */
6169 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN))
6170 features &= ~NETIF_F_CSUM_MASK;
6171
6172 return features;
6173 }
6174
stmmac_set_features(struct net_device * netdev,netdev_features_t features)6175 static int stmmac_set_features(struct net_device *netdev,
6176 netdev_features_t features)
6177 {
6178 struct stmmac_priv *priv = netdev_priv(netdev);
6179
6180 /* Keep the COE Type in case of csum is supporting */
6181 if (features & NETIF_F_RXCSUM)
6182 priv->hw->rx_csum = priv->plat->rx_coe;
6183 else
6184 priv->hw->rx_csum = 0;
6185 /* No check needed because rx_coe has been set before and it will be
6186 * fixed in case of issue.
6187 */
6188 stmmac_rx_ipc(priv, priv->hw);
6189
6190 if (priv->sph_capable) {
6191 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
6192 u8 chan;
6193
6194 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++)
6195 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
6196 }
6197
6198 stmmac_set_gso_types(priv, features & NETIF_F_TSO);
6199
6200 if (features & NETIF_F_HW_VLAN_CTAG_RX)
6201 priv->hw->hw_vlan_en = true;
6202 else
6203 priv->hw->hw_vlan_en = false;
6204
6205 phylink_rx_clk_stop_block(priv->phylink);
6206 stmmac_set_hw_vlan_mode(priv, priv->hw);
6207 phylink_rx_clk_stop_unblock(priv->phylink);
6208
6209 return 0;
6210 }
6211
stmmac_common_interrupt(struct stmmac_priv * priv)6212 static void stmmac_common_interrupt(struct stmmac_priv *priv)
6213 {
6214 u8 rx_cnt = priv->plat->rx_queues_to_use;
6215 u8 tx_cnt = priv->plat->tx_queues_to_use;
6216 u8 queues_count;
6217 bool xmac;
6218 u8 queue;
6219
6220 xmac = dwmac_is_xmac(priv->plat->core_type);
6221 queues_count = (rx_cnt > tx_cnt) ? rx_cnt : tx_cnt;
6222
6223 if (priv->irq_wake)
6224 pm_wakeup_event(priv->device, 0);
6225
6226 if (priv->dma_cap.estsel)
6227 stmmac_est_irq_status(priv, priv, priv->dev,
6228 &priv->xstats, tx_cnt);
6229
6230 if (stmmac_fpe_supported(priv))
6231 stmmac_fpe_irq_status(priv);
6232
6233 /* To handle GMAC own interrupts */
6234 if (priv->plat->core_type == DWMAC_CORE_GMAC || xmac) {
6235 int status = stmmac_host_irq_status(priv, &priv->xstats);
6236
6237 if (unlikely(status)) {
6238 /* For LPI we need to save the tx status */
6239 if (status & CORE_IRQ_TX_PATH_IN_LPI_MODE)
6240 priv->tx_path_in_lpi_mode = true;
6241 if (status & CORE_IRQ_TX_PATH_EXIT_LPI_MODE)
6242 priv->tx_path_in_lpi_mode = false;
6243 }
6244
6245 for (queue = 0; queue < queues_count; queue++)
6246 stmmac_host_mtl_irq_status(priv, priv->hw, queue);
6247
6248 stmmac_timestamp_interrupt(priv, priv);
6249 }
6250 }
6251
6252 /**
6253 * stmmac_interrupt - main ISR
6254 * @irq: interrupt number.
6255 * @dev_id: to pass the net device pointer.
6256 * Description: this is the main driver interrupt service routine.
6257 * It can call:
6258 * o DMA service routine (to manage incoming frame reception and transmission
6259 * status)
6260 * o Core interrupts to manage: remote wake-up, management counter, LPI
6261 * interrupts.
6262 */
stmmac_interrupt(int irq,void * dev_id)6263 static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
6264 {
6265 struct net_device *dev = (struct net_device *)dev_id;
6266 struct stmmac_priv *priv = netdev_priv(dev);
6267
6268 /* Check if adapter is up */
6269 if (test_bit(STMMAC_DOWN, &priv->state))
6270 return IRQ_HANDLED;
6271
6272 /* Check ASP error if it isn't delivered via an individual IRQ */
6273 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv))
6274 return IRQ_HANDLED;
6275
6276 /* To handle Common interrupts */
6277 stmmac_common_interrupt(priv);
6278
6279 /* To handle DMA interrupts */
6280 stmmac_dma_interrupt(priv);
6281
6282 return IRQ_HANDLED;
6283 }
6284
stmmac_mac_interrupt(int irq,void * dev_id)6285 static irqreturn_t stmmac_mac_interrupt(int irq, void *dev_id)
6286 {
6287 struct net_device *dev = (struct net_device *)dev_id;
6288 struct stmmac_priv *priv = netdev_priv(dev);
6289
6290 /* Check if adapter is up */
6291 if (test_bit(STMMAC_DOWN, &priv->state))
6292 return IRQ_HANDLED;
6293
6294 /* To handle Common interrupts */
6295 stmmac_common_interrupt(priv);
6296
6297 return IRQ_HANDLED;
6298 }
6299
stmmac_safety_interrupt(int irq,void * dev_id)6300 static irqreturn_t stmmac_safety_interrupt(int irq, void *dev_id)
6301 {
6302 struct net_device *dev = (struct net_device *)dev_id;
6303 struct stmmac_priv *priv = netdev_priv(dev);
6304
6305 /* Check if adapter is up */
6306 if (test_bit(STMMAC_DOWN, &priv->state))
6307 return IRQ_HANDLED;
6308
6309 /* Check if a fatal error happened */
6310 stmmac_safety_feat_interrupt(priv);
6311
6312 return IRQ_HANDLED;
6313 }
6314
stmmac_msi_intr_tx(int irq,void * data)6315 static irqreturn_t stmmac_msi_intr_tx(int irq, void *data)
6316 {
6317 struct stmmac_tx_queue *tx_q = (struct stmmac_tx_queue *)data;
6318 struct stmmac_dma_conf *dma_conf;
6319 int chan = tx_q->queue_index;
6320 struct stmmac_priv *priv;
6321 int status;
6322
6323 dma_conf = container_of(tx_q, struct stmmac_dma_conf, tx_queue[chan]);
6324 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6325
6326 /* Check if adapter is up */
6327 if (test_bit(STMMAC_DOWN, &priv->state))
6328 return IRQ_HANDLED;
6329
6330 status = stmmac_napi_check(priv, chan, DMA_DIR_TX);
6331
6332 if (unlikely(status & tx_hard_error_bump_tc)) {
6333 /* Try to bump up the dma threshold on this failure */
6334 stmmac_bump_dma_threshold(priv, chan);
6335 } else if (unlikely(status == tx_hard_error)) {
6336 stmmac_tx_err(priv, chan);
6337 }
6338
6339 return IRQ_HANDLED;
6340 }
6341
stmmac_msi_intr_rx(int irq,void * data)6342 static irqreturn_t stmmac_msi_intr_rx(int irq, void *data)
6343 {
6344 struct stmmac_rx_queue *rx_q = (struct stmmac_rx_queue *)data;
6345 struct stmmac_dma_conf *dma_conf;
6346 int chan = rx_q->queue_index;
6347 struct stmmac_priv *priv;
6348
6349 dma_conf = container_of(rx_q, struct stmmac_dma_conf, rx_queue[chan]);
6350 priv = container_of(dma_conf, struct stmmac_priv, dma_conf);
6351
6352 /* Check if adapter is up */
6353 if (test_bit(STMMAC_DOWN, &priv->state))
6354 return IRQ_HANDLED;
6355
6356 stmmac_napi_check(priv, chan, DMA_DIR_RX);
6357
6358 return IRQ_HANDLED;
6359 }
6360
6361 /**
6362 * stmmac_ioctl - Entry point for the Ioctl
6363 * @dev: Device pointer.
6364 * @rq: An IOCTL specific structure, that can contain a pointer to
6365 * a proprietary structure used to pass information to the driver.
6366 * @cmd: IOCTL command
6367 * Description:
6368 * Currently it supports the phy_mii_ioctl(...) and HW time stamping.
6369 */
stmmac_ioctl(struct net_device * dev,struct ifreq * rq,int cmd)6370 static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
6371 {
6372 struct stmmac_priv *priv = netdev_priv (dev);
6373 int ret = -EOPNOTSUPP;
6374
6375 if (!netif_running(dev))
6376 return -EINVAL;
6377
6378 switch (cmd) {
6379 case SIOCGMIIPHY:
6380 case SIOCGMIIREG:
6381 case SIOCSMIIREG:
6382 ret = phylink_mii_ioctl(priv->phylink, rq, cmd);
6383 break;
6384 default:
6385 break;
6386 }
6387
6388 return ret;
6389 }
6390
stmmac_setup_tc_block_cb(enum tc_setup_type type,void * type_data,void * cb_priv)6391 static int stmmac_setup_tc_block_cb(enum tc_setup_type type, void *type_data,
6392 void *cb_priv)
6393 {
6394 struct stmmac_priv *priv = cb_priv;
6395 int ret = -EOPNOTSUPP;
6396
6397 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data))
6398 return ret;
6399
6400 __stmmac_disable_all_queues(priv);
6401
6402 switch (type) {
6403 case TC_SETUP_CLSU32:
6404 ret = stmmac_tc_setup_cls_u32(priv, priv, type_data);
6405 break;
6406 case TC_SETUP_CLSFLOWER:
6407 ret = stmmac_tc_setup_cls(priv, priv, type_data);
6408 break;
6409 default:
6410 break;
6411 }
6412
6413 stmmac_enable_all_queues(priv);
6414 return ret;
6415 }
6416
6417 static LIST_HEAD(stmmac_block_cb_list);
6418
stmmac_setup_tc(struct net_device * ndev,enum tc_setup_type type,void * type_data)6419 static int stmmac_setup_tc(struct net_device *ndev, enum tc_setup_type type,
6420 void *type_data)
6421 {
6422 struct stmmac_priv *priv = netdev_priv(ndev);
6423
6424 switch (type) {
6425 case TC_QUERY_CAPS:
6426 return stmmac_tc_query_caps(priv, priv, type_data);
6427 case TC_SETUP_QDISC_MQPRIO:
6428 return stmmac_tc_setup_mqprio(priv, priv, type_data);
6429 case TC_SETUP_BLOCK:
6430 return flow_block_cb_setup_simple(type_data,
6431 &stmmac_block_cb_list,
6432 stmmac_setup_tc_block_cb,
6433 priv, priv, true);
6434 case TC_SETUP_QDISC_CBS:
6435 return stmmac_tc_setup_cbs(priv, priv, type_data);
6436 case TC_SETUP_QDISC_TAPRIO:
6437 return stmmac_tc_setup_taprio(priv, priv, type_data);
6438 case TC_SETUP_QDISC_ETF:
6439 return stmmac_tc_setup_etf(priv, priv, type_data);
6440 default:
6441 return -EOPNOTSUPP;
6442 }
6443 }
6444
stmmac_select_queue(struct net_device * dev,struct sk_buff * skb,struct net_device * sb_dev)6445 static u16 stmmac_select_queue(struct net_device *dev, struct sk_buff *skb,
6446 struct net_device *sb_dev)
6447 {
6448 int gso = skb_shinfo(skb)->gso_type;
6449
6450 if (gso & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6 | SKB_GSO_UDP_L4)) {
6451 /*
6452 * There is no way to determine the number of TSO/USO
6453 * capable Queues. Let's use always the Queue 0
6454 * because if TSO/USO is supported then at least this
6455 * one will be capable.
6456 */
6457 return 0;
6458 }
6459
6460 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues;
6461 }
6462
stmmac_set_mac_address(struct net_device * ndev,void * addr)6463 static int stmmac_set_mac_address(struct net_device *ndev, void *addr)
6464 {
6465 struct stmmac_priv *priv = netdev_priv(ndev);
6466 int ret = 0;
6467
6468 ret = pm_runtime_resume_and_get(priv->device);
6469 if (ret < 0)
6470 return ret;
6471
6472 ret = eth_mac_addr(ndev, addr);
6473 if (ret)
6474 goto set_mac_error;
6475
6476 phylink_rx_clk_stop_block(priv->phylink);
6477 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0);
6478 phylink_rx_clk_stop_unblock(priv->phylink);
6479
6480 set_mac_error:
6481 pm_runtime_put(priv->device);
6482
6483 return ret;
6484 }
6485
6486 #ifdef CONFIG_DEBUG_FS
6487 static struct dentry *stmmac_fs_dir;
6488
sysfs_display_ring(void * head,int size,int extend_desc,struct seq_file * seq,dma_addr_t dma_phy_addr)6489 static void sysfs_display_ring(void *head, int size, int extend_desc,
6490 struct seq_file *seq, dma_addr_t dma_phy_addr)
6491 {
6492 struct dma_extended_desc *ep = (struct dma_extended_desc *)head;
6493 struct dma_desc *p = (struct dma_desc *)head;
6494 unsigned int desc_size;
6495 dma_addr_t dma_addr;
6496 int i;
6497
6498 desc_size = extend_desc ? sizeof(*ep) : sizeof(*p);
6499 for (i = 0; i < size; i++) {
6500 dma_addr = dma_phy_addr + i * desc_size;
6501 seq_printf(seq, "%d [%pad]: 0x%x 0x%x 0x%x 0x%x\n",
6502 i, &dma_addr,
6503 le32_to_cpu(p->des0), le32_to_cpu(p->des1),
6504 le32_to_cpu(p->des2), le32_to_cpu(p->des3));
6505 if (extend_desc)
6506 p = &(++ep)->basic;
6507 else
6508 p++;
6509 }
6510 }
6511
stmmac_rings_status_show(struct seq_file * seq,void * v)6512 static int stmmac_rings_status_show(struct seq_file *seq, void *v)
6513 {
6514 struct net_device *dev = seq->private;
6515 struct stmmac_priv *priv = netdev_priv(dev);
6516 u8 rx_count = priv->plat->rx_queues_to_use;
6517 u8 tx_count = priv->plat->tx_queues_to_use;
6518 u8 queue;
6519
6520 if ((dev->flags & IFF_UP) == 0)
6521 return 0;
6522
6523 for (queue = 0; queue < rx_count; queue++) {
6524 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
6525
6526 seq_printf(seq, "RX Queue %d:\n", queue);
6527
6528 if (priv->extend_desc) {
6529 seq_printf(seq, "Extended descriptor ring:\n");
6530 sysfs_display_ring((void *)rx_q->dma_erx,
6531 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy);
6532 } else {
6533 seq_printf(seq, "Descriptor ring:\n");
6534 sysfs_display_ring((void *)rx_q->dma_rx,
6535 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy);
6536 }
6537 }
6538
6539 for (queue = 0; queue < tx_count; queue++) {
6540 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
6541
6542 seq_printf(seq, "TX Queue %d:\n", queue);
6543
6544 if (priv->extend_desc) {
6545 seq_printf(seq, "Extended descriptor ring:\n");
6546 sysfs_display_ring((void *)tx_q->dma_etx,
6547 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy);
6548 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) {
6549 seq_printf(seq, "Descriptor ring:\n");
6550 sysfs_display_ring((void *)tx_q->dma_tx,
6551 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy);
6552 }
6553 }
6554
6555 return 0;
6556 }
6557 DEFINE_SHOW_ATTRIBUTE(stmmac_rings_status);
6558
stmmac_dma_cap_show(struct seq_file * seq,void * v)6559 static int stmmac_dma_cap_show(struct seq_file *seq, void *v)
6560 {
6561 static const char * const dwxgmac_timestamp_source[] = {
6562 "None",
6563 "Internal",
6564 "External",
6565 "Both",
6566 };
6567 static const char * const dwxgmac_safety_feature_desc[] = {
6568 "No",
6569 "All Safety Features with ECC and Parity",
6570 "All Safety Features without ECC or Parity",
6571 "All Safety Features with Parity Only",
6572 "ECC Only",
6573 "UNDEFINED",
6574 "UNDEFINED",
6575 "UNDEFINED",
6576 };
6577 struct net_device *dev = seq->private;
6578 struct stmmac_priv *priv = netdev_priv(dev);
6579
6580 if (!priv->hw_cap_support) {
6581 seq_printf(seq, "DMA HW features not supported\n");
6582 return 0;
6583 }
6584
6585 seq_printf(seq, "==============================\n");
6586 seq_printf(seq, "\tDMA HW features\n");
6587 seq_printf(seq, "==============================\n");
6588
6589 seq_printf(seq, "\t10/100 Mbps: %s\n",
6590 (priv->dma_cap.mbps_10_100) ? "Y" : "N");
6591 seq_printf(seq, "\t1000 Mbps: %s\n",
6592 (priv->dma_cap.mbps_1000) ? "Y" : "N");
6593 seq_printf(seq, "\tHalf duplex: %s\n",
6594 (priv->dma_cap.half_duplex) ? "Y" : "N");
6595 if (priv->plat->core_type == DWMAC_CORE_XGMAC) {
6596 seq_printf(seq,
6597 "\tNumber of Additional MAC address registers: %d\n",
6598 priv->dma_cap.multi_addr);
6599 } else {
6600 seq_printf(seq, "\tHash Filter: %s\n",
6601 (priv->dma_cap.hash_filter) ? "Y" : "N");
6602 seq_printf(seq, "\tMultiple MAC address registers: %s\n",
6603 (priv->dma_cap.multi_addr) ? "Y" : "N");
6604 }
6605 seq_printf(seq, "\tPCS (TBI/SGMII/RTBI PHY interfaces): %s\n",
6606 (priv->dma_cap.pcs) ? "Y" : "N");
6607 seq_printf(seq, "\tSMA (MDIO) Interface: %s\n",
6608 (priv->dma_cap.sma_mdio) ? "Y" : "N");
6609 seq_printf(seq, "\tPMT Remote wake up: %s\n",
6610 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N");
6611 seq_printf(seq, "\tPMT Magic Frame: %s\n",
6612 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N");
6613 seq_printf(seq, "\tRMON module: %s\n",
6614 (priv->dma_cap.rmon) ? "Y" : "N");
6615 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n",
6616 (priv->dma_cap.time_stamp) ? "Y" : "N");
6617 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n",
6618 (priv->dma_cap.atime_stamp) ? "Y" : "N");
6619 if (priv->plat->core_type == DWMAC_CORE_XGMAC)
6620 seq_printf(seq, "\tTimestamp System Time Source: %s\n",
6621 dwxgmac_timestamp_source[priv->dma_cap.tssrc]);
6622 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n",
6623 (priv->dma_cap.eee) ? "Y" : "N");
6624 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N");
6625 seq_printf(seq, "\tChecksum Offload in TX: %s\n",
6626 (priv->dma_cap.tx_coe) ? "Y" : "N");
6627 if (priv->synopsys_id >= DWMAC_CORE_4_00 ||
6628 priv->plat->core_type == DWMAC_CORE_XGMAC) {
6629 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n",
6630 (priv->dma_cap.rx_coe) ? "Y" : "N");
6631 } else {
6632 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n",
6633 (priv->dma_cap.rx_coe_type1) ? "Y" : "N");
6634 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n",
6635 (priv->dma_cap.rx_coe_type2) ? "Y" : "N");
6636 seq_printf(seq, "\tRXFIFO > 2048bytes: %s\n",
6637 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N");
6638 }
6639 seq_printf(seq, "\tNumber of Additional RX channel: %d\n",
6640 priv->dma_cap.number_rx_channel);
6641 seq_printf(seq, "\tNumber of Additional TX channel: %d\n",
6642 priv->dma_cap.number_tx_channel);
6643 seq_printf(seq, "\tNumber of Additional RX queues: %u\n",
6644 priv->dma_cap.number_rx_queues);
6645 seq_printf(seq, "\tNumber of Additional TX queues: %u\n",
6646 priv->dma_cap.number_tx_queues);
6647 seq_printf(seq, "\tEnhanced descriptors: %s\n",
6648 (priv->dma_cap.enh_desc) ? "Y" : "N");
6649 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size);
6650 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size);
6651 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ?
6652 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0);
6653 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N");
6654 seq_printf(seq, "\tNumber of PPS Outputs: %d\n",
6655 priv->dma_cap.pps_out_num);
6656 seq_printf(seq, "\tSafety Features: %s\n",
6657 dwxgmac_safety_feature_desc[priv->dma_cap.asp]);
6658 seq_printf(seq, "\tFlexible RX Parser: %s\n",
6659 priv->dma_cap.frpsel ? "Y" : "N");
6660 seq_printf(seq, "\tEnhanced Addressing: %d\n",
6661 priv->dma_cap.host_dma_width);
6662 seq_printf(seq, "\tReceive Side Scaling: %s\n",
6663 priv->dma_cap.rssen ? "Y" : "N");
6664 seq_printf(seq, "\tVLAN Hash Filtering: %s\n",
6665 priv->dma_cap.vlhash ? "Y" : "N");
6666 seq_printf(seq, "\tSplit Header: %s\n",
6667 priv->dma_cap.sphen ? "Y" : "N");
6668 seq_printf(seq, "\tVLAN TX Insertion: %s\n",
6669 priv->dma_cap.vlins ? "Y" : "N");
6670 seq_printf(seq, "\tDouble VLAN: %s\n",
6671 priv->dma_cap.dvlan ? "Y" : "N");
6672 seq_printf(seq, "\tNumber of L3/L4 Filters: %d\n",
6673 priv->dma_cap.l3l4fnum);
6674 seq_printf(seq, "\tARP Offloading: %s\n",
6675 priv->dma_cap.arpoffsel ? "Y" : "N");
6676 seq_printf(seq, "\tEnhancements to Scheduled Traffic (EST): %s\n",
6677 priv->dma_cap.estsel ? "Y" : "N");
6678 seq_printf(seq, "\tFrame Preemption (FPE): %s\n",
6679 priv->dma_cap.fpesel ? "Y" : "N");
6680 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n",
6681 priv->dma_cap.tbssel ? "Y" : "N");
6682 seq_printf(seq, "\tNumber of DMA Channels Enabled for TBS: %d\n",
6683 priv->dma_cap.tbs_ch_num);
6684 seq_printf(seq, "\tPer-Stream Filtering: %s\n",
6685 priv->dma_cap.sgfsel ? "Y" : "N");
6686 seq_printf(seq, "\tTX Timestamp FIFO Depth: %lu\n",
6687 BIT(priv->dma_cap.ttsfd) >> 1);
6688 seq_printf(seq, "\tNumber of Traffic Classes: %d\n",
6689 priv->dma_cap.numtc);
6690 seq_printf(seq, "\tDCB Feature: %s\n",
6691 priv->dma_cap.dcben ? "Y" : "N");
6692 seq_printf(seq, "\tIEEE 1588 High Word Register: %s\n",
6693 priv->dma_cap.advthword ? "Y" : "N");
6694 seq_printf(seq, "\tPTP Offload: %s\n",
6695 priv->dma_cap.ptoen ? "Y" : "N");
6696 seq_printf(seq, "\tOne-Step Timestamping: %s\n",
6697 priv->dma_cap.osten ? "Y" : "N");
6698 seq_printf(seq, "\tPriority-Based Flow Control: %s\n",
6699 priv->dma_cap.pfcen ? "Y" : "N");
6700 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n",
6701 BIT(priv->dma_cap.frpes) << 6);
6702 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n",
6703 BIT(priv->dma_cap.frpbs) << 6);
6704 seq_printf(seq, "\tParallel Instruction Processor Engines: %d\n",
6705 priv->dma_cap.frppipe_num);
6706 seq_printf(seq, "\tNumber of Extended VLAN Tag Filters: %lu\n",
6707 priv->dma_cap.nrvf_num ?
6708 (BIT(priv->dma_cap.nrvf_num) << 1) : 0);
6709 seq_printf(seq, "\tWidth of the Time Interval Field in GCL: %d\n",
6710 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0);
6711 seq_printf(seq, "\tDepth of GCL: %lu\n",
6712 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0);
6713 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n",
6714 priv->dma_cap.cbtisel ? "Y" : "N");
6715 seq_printf(seq, "\tNumber of Auxiliary Snapshot Inputs: %d\n",
6716 priv->dma_cap.aux_snapshot_n);
6717 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n",
6718 priv->dma_cap.pou_ost_en ? "Y" : "N");
6719 seq_printf(seq, "\tEnhanced DMA: %s\n",
6720 priv->dma_cap.edma ? "Y" : "N");
6721 seq_printf(seq, "\tDifferent Descriptor Cache: %s\n",
6722 priv->dma_cap.ediffc ? "Y" : "N");
6723 seq_printf(seq, "\tVxLAN/NVGRE: %s\n",
6724 priv->dma_cap.vxn ? "Y" : "N");
6725 seq_printf(seq, "\tDebug Memory Interface: %s\n",
6726 priv->dma_cap.dbgmem ? "Y" : "N");
6727 seq_printf(seq, "\tNumber of Policing Counters: %lu\n",
6728 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0);
6729 return 0;
6730 }
6731 DEFINE_SHOW_ATTRIBUTE(stmmac_dma_cap);
6732
6733 /* Use network device events to rename debugfs file entries.
6734 */
stmmac_device_event(struct notifier_block * unused,unsigned long event,void * ptr)6735 static int stmmac_device_event(struct notifier_block *unused,
6736 unsigned long event, void *ptr)
6737 {
6738 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
6739 struct stmmac_priv *priv = netdev_priv(dev);
6740
6741 if (dev->netdev_ops != &stmmac_netdev_ops)
6742 goto done;
6743
6744 switch (event) {
6745 case NETDEV_CHANGENAME:
6746 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name);
6747 break;
6748 }
6749 done:
6750 return NOTIFY_DONE;
6751 }
6752
6753 static struct notifier_block stmmac_notifier = {
6754 .notifier_call = stmmac_device_event,
6755 };
6756
stmmac_init_fs(struct net_device * dev)6757 static void stmmac_init_fs(struct net_device *dev)
6758 {
6759 struct stmmac_priv *priv = netdev_priv(dev);
6760
6761 rtnl_lock();
6762
6763 /* Create per netdev entries */
6764 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
6765
6766 /* Entry to report DMA RX/TX rings */
6767 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev,
6768 &stmmac_rings_status_fops);
6769
6770 /* Entry to report the DMA HW features */
6771 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev,
6772 &stmmac_dma_cap_fops);
6773
6774 rtnl_unlock();
6775 }
6776
stmmac_exit_fs(struct net_device * dev)6777 static void stmmac_exit_fs(struct net_device *dev)
6778 {
6779 struct stmmac_priv *priv = netdev_priv(dev);
6780
6781 debugfs_remove_recursive(priv->dbgfs_dir);
6782 }
6783 #endif /* CONFIG_DEBUG_FS */
6784
stmmac_vid_crc32_le(__le16 vid_le)6785 static u32 stmmac_vid_crc32_le(__le16 vid_le)
6786 {
6787 unsigned char *data = (unsigned char *)&vid_le;
6788 unsigned char data_byte = 0;
6789 u32 crc = ~0x0;
6790 u32 temp = 0;
6791 int i, bits;
6792
6793 bits = get_bitmask_order(VLAN_VID_MASK);
6794 for (i = 0; i < bits; i++) {
6795 if ((i % 8) == 0)
6796 data_byte = data[i / 8];
6797
6798 temp = ((crc & 1) ^ data_byte) & 1;
6799 crc >>= 1;
6800 data_byte >>= 1;
6801
6802 if (temp)
6803 crc ^= 0xedb88320;
6804 }
6805
6806 return crc;
6807 }
6808
stmmac_vlan_update(struct stmmac_priv * priv,bool is_double)6809 static int stmmac_vlan_update(struct stmmac_priv *priv, bool is_double)
6810 {
6811 u32 crc, hash = 0;
6812 u16 pmatch = 0;
6813 int count = 0;
6814 u16 vid = 0;
6815
6816 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
6817 __le16 vid_le = cpu_to_le16(vid);
6818 crc = bitrev32(~stmmac_vid_crc32_le(vid_le)) >> 28;
6819 hash |= (1 << crc);
6820 count++;
6821 }
6822
6823 if (!priv->dma_cap.vlhash) {
6824 if (count > 2) /* VID = 0 always passes filter */
6825 return -EOPNOTSUPP;
6826
6827 pmatch = vid;
6828 hash = 0;
6829 }
6830
6831 if (!netif_running(priv->dev))
6832 return 0;
6833
6834 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double);
6835 }
6836
6837 /* FIXME: This may need RXC to be running, but it may be called with BH
6838 * disabled, which means we can't call phylink_rx_clk_stop*().
6839 */
stmmac_vlan_rx_add_vid(struct net_device * ndev,__be16 proto,u16 vid)6840 static int stmmac_vlan_rx_add_vid(struct net_device *ndev, __be16 proto, u16 vid)
6841 {
6842 struct stmmac_priv *priv = netdev_priv(ndev);
6843 unsigned int num_double_vlans;
6844 bool is_double = false;
6845 int ret;
6846
6847 ret = pm_runtime_resume_and_get(priv->device);
6848 if (ret < 0)
6849 return ret;
6850
6851 if (be16_to_cpu(proto) == ETH_P_8021AD)
6852 is_double = true;
6853
6854 set_bit(vid, priv->active_vlans);
6855 num_double_vlans = priv->num_double_vlans + is_double;
6856 ret = stmmac_vlan_update(priv, num_double_vlans);
6857 if (ret) {
6858 clear_bit(vid, priv->active_vlans);
6859 goto err_pm_put;
6860 }
6861
6862 if (priv->hw->num_vlan) {
6863 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6864 if (ret) {
6865 clear_bit(vid, priv->active_vlans);
6866 stmmac_vlan_update(priv, priv->num_double_vlans);
6867 goto err_pm_put;
6868 }
6869 }
6870
6871 priv->num_double_vlans = num_double_vlans;
6872
6873 err_pm_put:
6874 pm_runtime_put(priv->device);
6875
6876 return ret;
6877 }
6878
6879 /* FIXME: This may need RXC to be running, but it may be called with BH
6880 * disabled, which means we can't call phylink_rx_clk_stop*().
6881 */
stmmac_vlan_rx_kill_vid(struct net_device * ndev,__be16 proto,u16 vid)6882 static int stmmac_vlan_rx_kill_vid(struct net_device *ndev, __be16 proto, u16 vid)
6883 {
6884 struct stmmac_priv *priv = netdev_priv(ndev);
6885 unsigned int num_double_vlans;
6886 bool is_double = false;
6887 int ret;
6888
6889 ret = pm_runtime_resume_and_get(priv->device);
6890 if (ret < 0)
6891 return ret;
6892
6893 if (be16_to_cpu(proto) == ETH_P_8021AD)
6894 is_double = true;
6895
6896 clear_bit(vid, priv->active_vlans);
6897 num_double_vlans = priv->num_double_vlans - is_double;
6898 ret = stmmac_vlan_update(priv, num_double_vlans);
6899 if (ret) {
6900 set_bit(vid, priv->active_vlans);
6901 goto del_vlan_error;
6902 }
6903
6904 if (priv->hw->num_vlan) {
6905 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid);
6906 if (ret) {
6907 set_bit(vid, priv->active_vlans);
6908 stmmac_vlan_update(priv, priv->num_double_vlans);
6909 goto del_vlan_error;
6910 }
6911 }
6912
6913 priv->num_double_vlans = num_double_vlans;
6914
6915 del_vlan_error:
6916 pm_runtime_put(priv->device);
6917
6918 return ret;
6919 }
6920
stmmac_vlan_restore(struct stmmac_priv * priv)6921 static void stmmac_vlan_restore(struct stmmac_priv *priv)
6922 {
6923 if (!(priv->dev->features & NETIF_F_VLAN_FEATURES))
6924 return;
6925
6926 if (priv->hw->num_vlan)
6927 stmmac_restore_hw_vlan_rx_fltr(priv, priv->dev, priv->hw);
6928
6929 stmmac_vlan_update(priv, priv->num_double_vlans);
6930 }
6931
stmmac_bpf(struct net_device * dev,struct netdev_bpf * bpf)6932 static int stmmac_bpf(struct net_device *dev, struct netdev_bpf *bpf)
6933 {
6934 struct stmmac_priv *priv = netdev_priv(dev);
6935
6936 switch (bpf->command) {
6937 case XDP_SETUP_PROG:
6938 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack);
6939 case XDP_SETUP_XSK_POOL:
6940 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool,
6941 bpf->xsk.queue_id);
6942 default:
6943 return -EOPNOTSUPP;
6944 }
6945 }
6946
stmmac_xdp_xmit(struct net_device * dev,int num_frames,struct xdp_frame ** frames,u32 flags)6947 static int stmmac_xdp_xmit(struct net_device *dev, int num_frames,
6948 struct xdp_frame **frames, u32 flags)
6949 {
6950 struct stmmac_priv *priv = netdev_priv(dev);
6951 int cpu = smp_processor_id();
6952 struct netdev_queue *nq;
6953 int i, nxmit = 0;
6954 int queue;
6955
6956 if (unlikely(test_bit(STMMAC_DOWN, &priv->state)))
6957 return -ENETDOWN;
6958
6959 if (unlikely(flags & ~XDP_XMIT_FLAGS_MASK))
6960 return -EINVAL;
6961
6962 queue = stmmac_xdp_get_tx_queue(priv, cpu);
6963 nq = netdev_get_tx_queue(priv->dev, queue);
6964
6965 __netif_tx_lock(nq, cpu);
6966 /* Avoids TX time-out as we are sharing with slow path */
6967 txq_trans_cond_update(nq);
6968
6969 for (i = 0; i < num_frames; i++) {
6970 int res;
6971
6972 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true);
6973 if (res == STMMAC_XDP_CONSUMED)
6974 break;
6975
6976 nxmit++;
6977 }
6978
6979 if (flags & XDP_XMIT_FLUSH) {
6980 stmmac_flush_tx_descriptors(priv, queue);
6981 stmmac_tx_timer_arm(priv, queue);
6982 }
6983
6984 __netif_tx_unlock(nq);
6985
6986 return nxmit;
6987 }
6988
stmmac_disable_rx_queue(struct stmmac_priv * priv,u32 queue)6989 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue)
6990 {
6991 struct stmmac_channel *ch = &priv->channel[queue];
6992 unsigned long flags;
6993
6994 spin_lock_irqsave(&ch->lock, flags);
6995 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
6996 spin_unlock_irqrestore(&ch->lock, flags);
6997
6998 stmmac_stop_rx_dma(priv, queue);
6999 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
7000 }
7001
stmmac_enable_rx_queue(struct stmmac_priv * priv,u32 queue)7002 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue)
7003 {
7004 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
7005 struct stmmac_channel *ch = &priv->channel[queue];
7006 unsigned long flags;
7007 int ret;
7008
7009 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
7010 if (ret) {
7011 netdev_err(priv->dev, "Failed to alloc RX desc.\n");
7012 return;
7013 }
7014
7015 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL);
7016 if (ret) {
7017 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue);
7018 netdev_err(priv->dev, "Failed to init RX desc.\n");
7019 return;
7020 }
7021
7022 stmmac_reset_rx_queue(priv, queue);
7023 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue);
7024
7025 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7026 rx_q->dma_rx_phy, queue);
7027
7028 stmmac_set_queue_rx_tail_ptr(priv, rx_q, queue, rx_q->buf_alloc_num);
7029
7030 stmmac_set_queue_rx_buf_size(priv, rx_q, queue);
7031
7032 stmmac_start_rx_dma(priv, queue);
7033
7034 spin_lock_irqsave(&ch->lock, flags);
7035 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0);
7036 spin_unlock_irqrestore(&ch->lock, flags);
7037 }
7038
stmmac_disable_tx_queue(struct stmmac_priv * priv,u32 queue)7039 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue)
7040 {
7041 struct stmmac_channel *ch = &priv->channel[queue];
7042 unsigned long flags;
7043
7044 spin_lock_irqsave(&ch->lock, flags);
7045 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
7046 spin_unlock_irqrestore(&ch->lock, flags);
7047
7048 stmmac_stop_tx_dma(priv, queue);
7049 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7050 }
7051
stmmac_enable_tx_queue(struct stmmac_priv * priv,u32 queue)7052 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue)
7053 {
7054 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
7055 struct stmmac_channel *ch = &priv->channel[queue];
7056 unsigned long flags;
7057 int ret;
7058
7059 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7060 if (ret) {
7061 netdev_err(priv->dev, "Failed to alloc TX desc.\n");
7062 return;
7063 }
7064
7065 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue);
7066 if (ret) {
7067 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue);
7068 netdev_err(priv->dev, "Failed to init TX desc.\n");
7069 return;
7070 }
7071
7072 stmmac_reset_tx_queue(priv, queue);
7073 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue);
7074
7075 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7076 tx_q->dma_tx_phy, queue);
7077
7078 if (tx_q->tbs & STMMAC_TBS_AVAIL)
7079 stmmac_enable_tbs(priv, priv->ioaddr, 1, queue);
7080
7081 stmmac_set_queue_tx_tail_ptr(priv, tx_q, queue, 0);
7082
7083 stmmac_start_tx_dma(priv, queue);
7084
7085 spin_lock_irqsave(&ch->lock, flags);
7086 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1);
7087 spin_unlock_irqrestore(&ch->lock, flags);
7088 }
7089
stmmac_xdp_release(struct net_device * dev)7090 void stmmac_xdp_release(struct net_device *dev)
7091 {
7092 struct stmmac_priv *priv = netdev_priv(dev);
7093 u8 chan;
7094
7095 /* Ensure tx function is not running */
7096 netif_tx_disable(dev);
7097
7098 /* Disable NAPI process */
7099 stmmac_disable_all_queues(priv);
7100
7101 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7102 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7103
7104 /* Free the IRQ lines */
7105 stmmac_free_irq(dev, REQ_IRQ_ERR_ALL, 0);
7106
7107 /* Stop TX/RX DMA channels */
7108 stmmac_stop_all_dma(priv);
7109
7110 /* Release and free the Rx/Tx resources */
7111 free_dma_desc_resources(priv, &priv->dma_conf);
7112
7113 /* Disable the MAC Rx/Tx */
7114 stmmac_mac_set(priv, priv->ioaddr, false);
7115
7116 /* set trans_start so we don't get spurious
7117 * watchdogs during reset
7118 */
7119 netif_trans_update(dev);
7120 netif_carrier_off(dev);
7121 }
7122
stmmac_xdp_open(struct net_device * dev)7123 int stmmac_xdp_open(struct net_device *dev)
7124 {
7125 struct stmmac_priv *priv = netdev_priv(dev);
7126 u8 rx_cnt = priv->plat->rx_queues_to_use;
7127 u8 tx_cnt = priv->plat->tx_queues_to_use;
7128 u8 dma_csr_ch = max(rx_cnt, tx_cnt);
7129 struct stmmac_rx_queue *rx_q;
7130 struct stmmac_tx_queue *tx_q;
7131 bool sph_en;
7132 u8 chan;
7133 int ret;
7134
7135 ret = alloc_dma_desc_resources(priv, &priv->dma_conf);
7136 if (ret < 0) {
7137 netdev_err(dev, "%s: DMA descriptors allocation failed\n",
7138 __func__);
7139 goto dma_desc_error;
7140 }
7141
7142 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL);
7143 if (ret < 0) {
7144 netdev_err(dev, "%s: DMA descriptors initialization failed\n",
7145 __func__);
7146 goto init_error;
7147 }
7148
7149 stmmac_reset_queues_param(priv);
7150
7151 /* DMA CSR Channel configuration */
7152 for (chan = 0; chan < dma_csr_ch; chan++) {
7153 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan);
7154 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1);
7155 }
7156
7157 /* Adjust Split header */
7158 sph_en = (priv->hw->rx_csum > 0) && priv->sph_active;
7159
7160 /* DMA RX Channel Configuration */
7161 for (chan = 0; chan < rx_cnt; chan++) {
7162 rx_q = &priv->dma_conf.rx_queue[chan];
7163
7164 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7165 rx_q->dma_rx_phy, chan);
7166
7167 stmmac_set_queue_rx_tail_ptr(priv, rx_q, chan,
7168 rx_q->buf_alloc_num);
7169
7170 stmmac_set_queue_rx_buf_size(priv, rx_q, chan);
7171
7172 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan);
7173 }
7174
7175 /* DMA TX Channel Configuration */
7176 for (chan = 0; chan < tx_cnt; chan++) {
7177 tx_q = &priv->dma_conf.tx_queue[chan];
7178
7179 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg,
7180 tx_q->dma_tx_phy, chan);
7181
7182 stmmac_set_queue_tx_tail_ptr(priv, tx_q, chan, 0);
7183
7184 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
7185 }
7186
7187 /* Enable the MAC Rx/Tx */
7188 stmmac_mac_set(priv, priv->ioaddr, true);
7189
7190 /* Start Rx & Tx DMA Channels */
7191 stmmac_start_all_dma(priv);
7192
7193 ret = stmmac_request_irq(dev);
7194 if (ret)
7195 goto irq_error;
7196
7197 /* Enable NAPI process*/
7198 stmmac_enable_all_queues(priv);
7199 netif_carrier_on(dev);
7200 netif_tx_start_all_queues(dev);
7201 stmmac_enable_all_dma_irq(priv);
7202
7203 return 0;
7204
7205 irq_error:
7206 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
7207 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
7208
7209 init_error:
7210 free_dma_desc_resources(priv, &priv->dma_conf);
7211 dma_desc_error:
7212 return ret;
7213 }
7214
stmmac_xsk_wakeup(struct net_device * dev,u32 queue,u32 flags)7215 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags)
7216 {
7217 struct stmmac_priv *priv = netdev_priv(dev);
7218 struct stmmac_rx_queue *rx_q;
7219 struct stmmac_tx_queue *tx_q;
7220 struct stmmac_channel *ch;
7221
7222 if (test_bit(STMMAC_DOWN, &priv->state) ||
7223 !netif_carrier_ok(priv->dev))
7224 return -ENETDOWN;
7225
7226 if (!stmmac_xdp_is_enabled(priv))
7227 return -EINVAL;
7228
7229 if (queue >= priv->plat->rx_queues_to_use ||
7230 queue >= priv->plat->tx_queues_to_use)
7231 return -EINVAL;
7232
7233 rx_q = &priv->dma_conf.rx_queue[queue];
7234 tx_q = &priv->dma_conf.tx_queue[queue];
7235 ch = &priv->channel[queue];
7236
7237 if (!rx_q->xsk_pool && !tx_q->xsk_pool)
7238 return -EINVAL;
7239
7240 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) {
7241 /* EQoS does not have per-DMA channel SW interrupt,
7242 * so we schedule RX Napi straight-away.
7243 */
7244 if (likely(napi_schedule_prep(&ch->rxtx_napi)))
7245 __napi_schedule(&ch->rxtx_napi);
7246 }
7247
7248 return 0;
7249 }
7250
stmmac_get_stats64(struct net_device * dev,struct rtnl_link_stats64 * stats)7251 static void stmmac_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
7252 {
7253 struct stmmac_priv *priv = netdev_priv(dev);
7254 u8 tx_cnt = priv->plat->tx_queues_to_use;
7255 u8 rx_cnt = priv->plat->rx_queues_to_use;
7256 unsigned int start;
7257 u8 q;
7258
7259 for (q = 0; q < tx_cnt; q++) {
7260 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q];
7261 u64 tx_packets;
7262 u64 tx_bytes;
7263
7264 do {
7265 start = u64_stats_fetch_begin(&txq_stats->q_syncp);
7266 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes);
7267 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start));
7268 do {
7269 start = u64_stats_fetch_begin(&txq_stats->napi_syncp);
7270 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets);
7271 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start));
7272
7273 stats->tx_packets += tx_packets;
7274 stats->tx_bytes += tx_bytes;
7275 }
7276
7277 for (q = 0; q < rx_cnt; q++) {
7278 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q];
7279 u64 rx_packets;
7280 u64 rx_bytes;
7281
7282 do {
7283 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp);
7284 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets);
7285 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes);
7286 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start));
7287
7288 stats->rx_packets += rx_packets;
7289 stats->rx_bytes += rx_bytes;
7290 }
7291
7292 stats->rx_dropped = priv->xstats.rx_dropped;
7293 stats->rx_errors = priv->xstats.rx_errors;
7294 stats->tx_dropped = priv->xstats.tx_dropped;
7295 stats->tx_errors = priv->xstats.tx_errors;
7296 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier;
7297 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision;
7298 stats->rx_length_errors = priv->xstats.rx_length;
7299 stats->rx_crc_errors = priv->xstats.rx_crc_errors;
7300 stats->rx_over_errors = priv->xstats.rx_overflow_cntr;
7301 stats->rx_missed_errors = priv->xstats.rx_missed_cntr;
7302 }
7303
7304 static const struct net_device_ops stmmac_netdev_ops = {
7305 .ndo_open = stmmac_open,
7306 .ndo_start_xmit = stmmac_xmit,
7307 .ndo_features_check = stmmac_features_check,
7308 .ndo_stop = stmmac_release,
7309 .ndo_change_mtu = stmmac_change_mtu,
7310 .ndo_fix_features = stmmac_fix_features,
7311 .ndo_set_features = stmmac_set_features,
7312 .ndo_set_rx_mode = stmmac_set_rx_mode,
7313 .ndo_tx_timeout = stmmac_tx_timeout,
7314 .ndo_eth_ioctl = stmmac_ioctl,
7315 .ndo_get_stats64 = stmmac_get_stats64,
7316 .ndo_setup_tc = stmmac_setup_tc,
7317 .ndo_select_queue = stmmac_select_queue,
7318 .ndo_set_mac_address = stmmac_set_mac_address,
7319 .ndo_vlan_rx_add_vid = stmmac_vlan_rx_add_vid,
7320 .ndo_vlan_rx_kill_vid = stmmac_vlan_rx_kill_vid,
7321 .ndo_bpf = stmmac_bpf,
7322 .ndo_xdp_xmit = stmmac_xdp_xmit,
7323 .ndo_xsk_wakeup = stmmac_xsk_wakeup,
7324 .ndo_hwtstamp_get = stmmac_hwtstamp_get,
7325 .ndo_hwtstamp_set = stmmac_hwtstamp_set,
7326 };
7327
stmmac_reset_subtask(struct stmmac_priv * priv)7328 static void stmmac_reset_subtask(struct stmmac_priv *priv)
7329 {
7330 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state))
7331 return;
7332 if (test_bit(STMMAC_DOWN, &priv->state))
7333 return;
7334
7335 netdev_err(priv->dev, "Reset adapter.\n");
7336
7337 rtnl_lock();
7338 netif_trans_update(priv->dev);
7339 while (test_and_set_bit(STMMAC_RESETING, &priv->state))
7340 usleep_range(1000, 2000);
7341
7342 set_bit(STMMAC_DOWN, &priv->state);
7343 dev_close(priv->dev);
7344 dev_open(priv->dev, NULL);
7345 clear_bit(STMMAC_DOWN, &priv->state);
7346 clear_bit(STMMAC_RESETING, &priv->state);
7347 rtnl_unlock();
7348 }
7349
stmmac_service_task(struct work_struct * work)7350 static void stmmac_service_task(struct work_struct *work)
7351 {
7352 struct stmmac_priv *priv = container_of(work, struct stmmac_priv,
7353 service_task);
7354
7355 stmmac_reset_subtask(priv);
7356 clear_bit(STMMAC_SERVICE_SCHED, &priv->state);
7357 }
7358
stmmac_print_actphyif(struct stmmac_priv * priv)7359 static void stmmac_print_actphyif(struct stmmac_priv *priv)
7360 {
7361 const char **phyif_table;
7362 const char *actphyif_str;
7363 size_t phyif_table_size;
7364
7365 switch (priv->plat->core_type) {
7366 case DWMAC_CORE_MAC100:
7367 return;
7368
7369 case DWMAC_CORE_GMAC:
7370 case DWMAC_CORE_GMAC4:
7371 phyif_table = stmmac_dwmac_actphyif;
7372 phyif_table_size = ARRAY_SIZE(stmmac_dwmac_actphyif);
7373 break;
7374
7375 case DWMAC_CORE_XGMAC:
7376 phyif_table = stmmac_dwxgmac_phyif;
7377 phyif_table_size = ARRAY_SIZE(stmmac_dwxgmac_phyif);
7378 break;
7379 }
7380
7381 if (priv->dma_cap.actphyif < phyif_table_size)
7382 actphyif_str = phyif_table[priv->dma_cap.actphyif];
7383 else
7384 actphyif_str = NULL;
7385
7386 if (!actphyif_str)
7387 actphyif_str = "unknown";
7388
7389 dev_info(priv->device, "Active PHY interface: %s (%u)\n",
7390 actphyif_str, priv->dma_cap.actphyif);
7391 }
7392
7393 /**
7394 * stmmac_hw_init - Init the MAC device
7395 * @priv: driver private structure
7396 * Description: this function is to configure the MAC device according to
7397 * some platform parameters or the HW capability register. It prepares the
7398 * driver to use either ring or chain modes and to setup either enhanced or
7399 * normal descriptors.
7400 */
stmmac_hw_init(struct stmmac_priv * priv)7401 static int stmmac_hw_init(struct stmmac_priv *priv)
7402 {
7403 int ret;
7404
7405 /* dwmac-sun8i only work in chain mode */
7406 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I)
7407 chain_mode = 1;
7408 priv->chain_mode = !!chain_mode;
7409
7410 /* Initialize HW Interface */
7411 ret = stmmac_hwif_init(priv);
7412 if (ret)
7413 return ret;
7414
7415 /* Get the HW capability (new GMAC newer than 3.50a) */
7416 priv->hw_cap_support = stmmac_get_hw_features(priv);
7417 if (priv->hw_cap_support) {
7418 dev_info(priv->device, "DMA HW capability register supported\n");
7419
7420 /* We can override some gmac/dma configuration fields: e.g.
7421 * enh_desc, tx_coe (e.g. that are passed through the
7422 * platform) with the values from the HW capability
7423 * register (if supported).
7424 */
7425 priv->plat->enh_desc = priv->dma_cap.enh_desc;
7426 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up &&
7427 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL);
7428 if (priv->dma_cap.hash_tb_sz) {
7429 priv->hw->multicast_filter_bins =
7430 (BIT(priv->dma_cap.hash_tb_sz) << 5);
7431 priv->hw->mcast_bits_log2 =
7432 ilog2(priv->hw->multicast_filter_bins);
7433 }
7434
7435 /* TXCOE doesn't work in thresh DMA mode */
7436 if (priv->plat->force_thresh_dma_mode)
7437 priv->plat->tx_coe = false;
7438 else
7439 priv->plat->tx_coe = priv->dma_cap.tx_coe;
7440
7441 /* In case of GMAC4 rx_coe is from HW cap register. */
7442 priv->plat->rx_coe = priv->dma_cap.rx_coe;
7443
7444 if (priv->dma_cap.rx_coe_type2)
7445 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2;
7446 else if (priv->dma_cap.rx_coe_type1)
7447 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1;
7448
7449 stmmac_print_actphyif(priv);
7450 } else {
7451 dev_info(priv->device, "No HW DMA feature register supported\n");
7452 }
7453
7454 if (priv->plat->rx_coe) {
7455 priv->hw->rx_csum = priv->plat->rx_coe;
7456 dev_info(priv->device, "RX Checksum Offload Engine supported\n");
7457 if (priv->synopsys_id < DWMAC_CORE_4_00)
7458 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum);
7459 }
7460 if (priv->plat->tx_coe)
7461 dev_info(priv->device, "TX Checksum insertion supported\n");
7462
7463 if (priv->plat->pmt) {
7464 dev_info(priv->device, "Wake-Up On Lan supported\n");
7465 device_set_wakeup_capable(priv->device, 1);
7466 devm_pm_set_wake_irq(priv->device, priv->wol_irq);
7467 }
7468
7469 if (priv->dma_cap.number_rx_queues &&
7470 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) {
7471 dev_warn(priv->device,
7472 "Number of Rx queues (%u) exceeds dma capability\n",
7473 priv->plat->rx_queues_to_use);
7474 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues;
7475 }
7476 if (priv->dma_cap.number_tx_queues &&
7477 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) {
7478 dev_warn(priv->device,
7479 "Number of Tx queues (%u) exceeds dma capability\n",
7480 priv->plat->tx_queues_to_use);
7481 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues;
7482 }
7483
7484 if (priv->dma_cap.rx_fifo_size &&
7485 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) {
7486 dev_warn(priv->device,
7487 "Rx FIFO size (%u) exceeds dma capability\n",
7488 priv->plat->rx_fifo_size);
7489 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size;
7490 }
7491 if (priv->dma_cap.tx_fifo_size &&
7492 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) {
7493 dev_warn(priv->device,
7494 "Tx FIFO size (%u) exceeds dma capability\n",
7495 priv->plat->tx_fifo_size);
7496 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size;
7497 }
7498
7499 priv->hw->vlan_fail_q_en =
7500 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN);
7501 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q;
7502
7503 /* Run HW quirks, if any */
7504 if (priv->hwif_quirks) {
7505 ret = priv->hwif_quirks(priv);
7506 if (ret)
7507 return ret;
7508 }
7509
7510 /* Set alternate descriptor size (which tells the hardware that
7511 * descriptors are 8 32-bit words) when using extended descriptors
7512 * with ring mode. Only applicable for pre-v4.0 cores. Platform glue
7513 * is not expected to change this.
7514 */
7515 priv->plat->dma_cfg->atds = priv->extend_desc &&
7516 priv->descriptor_mode == STMMAC_RING_MODE;
7517
7518 /* Rx Watchdog is available in the COREs newer than the 3.40.
7519 * In some case, for example on bugged HW this feature
7520 * has to be disable and this can be done by passing the
7521 * riwt_off field from the platform.
7522 */
7523 if ((priv->synopsys_id >= DWMAC_CORE_3_50 ||
7524 priv->plat->core_type == DWMAC_CORE_XGMAC) &&
7525 !priv->plat->riwt_off) {
7526 priv->use_riwt = 1;
7527 dev_info(priv->device,
7528 "Enable RX Mitigation via HW Watchdog Timer\n");
7529 }
7530
7531 /* Unimplemented PCS init (as indicated by stmmac_do_callback()
7532 * perversely returning -EINVAL) is non-fatal.
7533 */
7534 ret = stmmac_mac_pcs_init(priv);
7535 if (ret != -EINVAL)
7536 return ret;
7537
7538 return 0;
7539 }
7540
stmmac_napi_add(struct net_device * dev)7541 static void stmmac_napi_add(struct net_device *dev)
7542 {
7543 struct stmmac_priv *priv = netdev_priv(dev);
7544 u8 queue, maxq;
7545
7546 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7547
7548 for (queue = 0; queue < maxq; queue++) {
7549 struct stmmac_channel *ch = &priv->channel[queue];
7550
7551 ch->priv_data = priv;
7552 ch->index = queue;
7553 spin_lock_init(&ch->lock);
7554
7555 if (queue < priv->plat->rx_queues_to_use) {
7556 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx);
7557 }
7558 if (queue < priv->plat->tx_queues_to_use) {
7559 netif_napi_add_tx(dev, &ch->tx_napi,
7560 stmmac_napi_poll_tx);
7561 }
7562 if (queue < priv->plat->rx_queues_to_use &&
7563 queue < priv->plat->tx_queues_to_use) {
7564 netif_napi_add(dev, &ch->rxtx_napi,
7565 stmmac_napi_poll_rxtx);
7566 }
7567 }
7568 }
7569
stmmac_napi_del(struct net_device * dev)7570 static void stmmac_napi_del(struct net_device *dev)
7571 {
7572 struct stmmac_priv *priv = netdev_priv(dev);
7573 u8 queue, maxq;
7574
7575 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use);
7576
7577 for (queue = 0; queue < maxq; queue++) {
7578 struct stmmac_channel *ch = &priv->channel[queue];
7579
7580 if (queue < priv->plat->rx_queues_to_use)
7581 netif_napi_del(&ch->rx_napi);
7582 if (queue < priv->plat->tx_queues_to_use)
7583 netif_napi_del(&ch->tx_napi);
7584 if (queue < priv->plat->rx_queues_to_use &&
7585 queue < priv->plat->tx_queues_to_use) {
7586 netif_napi_del(&ch->rxtx_napi);
7587 }
7588 }
7589 }
7590
stmmac_reinit_queues(struct net_device * dev,u8 rx_cnt,u8 tx_cnt)7591 int stmmac_reinit_queues(struct net_device *dev, u8 rx_cnt, u8 tx_cnt)
7592 {
7593 struct stmmac_priv *priv = netdev_priv(dev);
7594 int ret = 0, i;
7595
7596 if (netif_running(dev))
7597 stmmac_release(dev);
7598
7599 stmmac_napi_del(dev);
7600
7601 priv->plat->rx_queues_to_use = rx_cnt;
7602 priv->plat->tx_queues_to_use = tx_cnt;
7603 if (!netif_is_rxfh_configured(dev))
7604 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7605 priv->rss.table[i] = ethtool_rxfh_indir_default(i,
7606 rx_cnt);
7607
7608 stmmac_napi_add(dev);
7609
7610 if (netif_running(dev))
7611 ret = stmmac_open(dev);
7612
7613 return ret;
7614 }
7615
stmmac_reinit_ringparam(struct net_device * dev,u32 rx_size,u32 tx_size)7616 int stmmac_reinit_ringparam(struct net_device *dev, u32 rx_size, u32 tx_size)
7617 {
7618 struct stmmac_priv *priv = netdev_priv(dev);
7619 int ret = 0;
7620
7621 if (netif_running(dev))
7622 stmmac_release(dev);
7623
7624 priv->dma_conf.dma_rx_size = rx_size;
7625 priv->dma_conf.dma_tx_size = tx_size;
7626
7627 if (netif_running(dev))
7628 ret = stmmac_open(dev);
7629
7630 return ret;
7631 }
7632
stmmac_xdp_rx_timestamp(const struct xdp_md * _ctx,u64 * timestamp)7633 static int stmmac_xdp_rx_timestamp(const struct xdp_md *_ctx, u64 *timestamp)
7634 {
7635 const struct stmmac_xdp_buff *ctx = (void *)_ctx;
7636 struct dma_desc *desc_contains_ts = ctx->desc;
7637 struct stmmac_priv *priv = ctx->priv;
7638 struct dma_desc *ndesc = ctx->ndesc;
7639 struct dma_desc *desc = ctx->desc;
7640 u64 ns = 0;
7641
7642 if (!priv->hwts_rx_en)
7643 return -ENODATA;
7644
7645 /* For GMAC4, the valid timestamp is from CTX next desc. */
7646 if (dwmac_is_xmac(priv->plat->core_type))
7647 desc_contains_ts = ndesc;
7648
7649 /* Check if timestamp is available */
7650 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) {
7651 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns);
7652 ns -= priv->plat->cdc_error_adj;
7653 *timestamp = ns_to_ktime(ns);
7654 return 0;
7655 }
7656
7657 return -ENODATA;
7658 }
7659
7660 static const struct xdp_metadata_ops stmmac_xdp_metadata_ops = {
7661 .xmo_rx_timestamp = stmmac_xdp_rx_timestamp,
7662 };
7663
stmmac_dl_ts_coarse_set(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7664 static int stmmac_dl_ts_coarse_set(struct devlink *dl, u32 id,
7665 struct devlink_param_gset_ctx *ctx,
7666 struct netlink_ext_ack *extack)
7667 {
7668 struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7669 struct stmmac_priv *priv = dl_priv->stmmac_priv;
7670
7671 priv->tsfupdt_coarse = ctx->val.vbool;
7672
7673 if (priv->tsfupdt_coarse)
7674 priv->systime_flags &= ~PTP_TCR_TSCFUPDT;
7675 else
7676 priv->systime_flags |= PTP_TCR_TSCFUPDT;
7677
7678 /* In Coarse mode, we can use a smaller subsecond increment, let's
7679 * reconfigure the systime, subsecond increment and addend.
7680 */
7681 stmmac_update_subsecond_increment(priv);
7682
7683 return 0;
7684 }
7685
stmmac_dl_ts_coarse_get(struct devlink * dl,u32 id,struct devlink_param_gset_ctx * ctx,struct netlink_ext_ack * extack)7686 static int stmmac_dl_ts_coarse_get(struct devlink *dl, u32 id,
7687 struct devlink_param_gset_ctx *ctx,
7688 struct netlink_ext_ack *extack)
7689 {
7690 struct stmmac_devlink_priv *dl_priv = devlink_priv(dl);
7691 struct stmmac_priv *priv = dl_priv->stmmac_priv;
7692
7693 ctx->val.vbool = priv->tsfupdt_coarse;
7694
7695 return 0;
7696 }
7697
7698 static const struct devlink_param stmmac_devlink_params[] = {
7699 DEVLINK_PARAM_DRIVER(STMMAC_DEVLINK_PARAM_ID_TS_COARSE, "phc_coarse_adj",
7700 DEVLINK_PARAM_TYPE_BOOL,
7701 BIT(DEVLINK_PARAM_CMODE_RUNTIME),
7702 stmmac_dl_ts_coarse_get,
7703 stmmac_dl_ts_coarse_set, NULL),
7704 };
7705
7706 /* None of the generic devlink parameters are implemented */
7707 static const struct devlink_ops stmmac_devlink_ops = {};
7708
stmmac_register_devlink(struct stmmac_priv * priv)7709 static int stmmac_register_devlink(struct stmmac_priv *priv)
7710 {
7711 struct stmmac_devlink_priv *dl_priv;
7712 int ret;
7713
7714 /* For now, what is exposed over devlink is only relevant when
7715 * timestamping is available and we have a valid ptp clock rate
7716 */
7717 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp) ||
7718 !priv->plat->clk_ptp_rate)
7719 return 0;
7720
7721 priv->devlink = devlink_alloc(&stmmac_devlink_ops, sizeof(*dl_priv),
7722 priv->device);
7723 if (!priv->devlink)
7724 return -ENOMEM;
7725
7726 dl_priv = devlink_priv(priv->devlink);
7727 dl_priv->stmmac_priv = priv;
7728
7729 ret = devlink_params_register(priv->devlink, stmmac_devlink_params,
7730 ARRAY_SIZE(stmmac_devlink_params));
7731 if (ret)
7732 goto dl_free;
7733
7734 devlink_register(priv->devlink);
7735 return 0;
7736
7737 dl_free:
7738 devlink_free(priv->devlink);
7739
7740 return ret;
7741 }
7742
stmmac_unregister_devlink(struct stmmac_priv * priv)7743 static void stmmac_unregister_devlink(struct stmmac_priv *priv)
7744 {
7745 if (!priv->devlink)
7746 return;
7747
7748 devlink_unregister(priv->devlink);
7749 devlink_params_unregister(priv->devlink, stmmac_devlink_params,
7750 ARRAY_SIZE(stmmac_devlink_params));
7751 devlink_free(priv->devlink);
7752 }
7753
stmmac_plat_dat_alloc(struct device * dev)7754 struct plat_stmmacenet_data *stmmac_plat_dat_alloc(struct device *dev)
7755 {
7756 struct plat_stmmacenet_data *plat_dat;
7757 int i;
7758
7759 plat_dat = devm_kzalloc(dev, sizeof(*plat_dat), GFP_KERNEL);
7760 if (!plat_dat)
7761 return NULL;
7762
7763 plat_dat->dma_cfg = &plat_dat->__dma_cfg;
7764
7765 /* Set the defaults:
7766 * - phy autodetection
7767 * - determine GMII_Address CR field from CSR clock
7768 * - allow MTU up to JUMBO_LEN
7769 * - hash table size
7770 * - one unicast filter entry
7771 */
7772 plat_dat->phy_addr = -1;
7773 plat_dat->clk_csr = -1;
7774 plat_dat->maxmtu = JUMBO_LEN;
7775 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
7776 plat_dat->unicast_filter_entries = 1;
7777
7778 /* Set the mtl defaults */
7779 plat_dat->tx_queues_to_use = 1;
7780 plat_dat->rx_queues_to_use = 1;
7781
7782 /* Setup the default RX queue channel map */
7783 for (i = 0; i < ARRAY_SIZE(plat_dat->rx_queues_cfg); i++)
7784 plat_dat->rx_queues_cfg[i].chan = i;
7785
7786 return plat_dat;
7787 }
7788 EXPORT_SYMBOL_GPL(stmmac_plat_dat_alloc);
7789
__stmmac_dvr_probe(struct device * device,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)7790 static int __stmmac_dvr_probe(struct device *device,
7791 struct plat_stmmacenet_data *plat_dat,
7792 struct stmmac_resources *res)
7793 {
7794 struct net_device *ndev = NULL;
7795 struct stmmac_priv *priv;
7796 int i, ret = 0;
7797 u8 rxq;
7798
7799 if (!plat_dat->dma_cfg || !plat_dat->dma_cfg->pbl) {
7800 dev_err(device, "invalid DMA configuration\n");
7801 return -EINVAL;
7802 }
7803
7804 ndev = devm_alloc_etherdev_mqs(device, sizeof(struct stmmac_priv),
7805 MTL_MAX_TX_QUEUES, MTL_MAX_RX_QUEUES);
7806 if (!ndev)
7807 return -ENOMEM;
7808
7809 SET_NETDEV_DEV(ndev, device);
7810
7811 priv = netdev_priv(ndev);
7812 priv->device = device;
7813 priv->dev = ndev;
7814
7815 for (i = 0; i < MTL_MAX_RX_QUEUES; i++)
7816 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp);
7817 for (i = 0; i < MTL_MAX_TX_QUEUES; i++) {
7818 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp);
7819 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp);
7820 }
7821
7822 priv->xstats.pcpu_stats =
7823 devm_netdev_alloc_pcpu_stats(device, struct stmmac_pcpu_stats);
7824 if (!priv->xstats.pcpu_stats)
7825 return -ENOMEM;
7826
7827 stmmac_set_ethtool_ops(ndev);
7828 priv->pause_time = pause;
7829 priv->plat = plat_dat;
7830 priv->ioaddr = res->addr;
7831 priv->dev->base_addr = (unsigned long)res->addr;
7832 priv->plat->dma_cfg->multi_msi_en =
7833 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN);
7834
7835 priv->dev->irq = res->irq;
7836 priv->wol_irq = res->wol_irq;
7837 priv->sfty_irq = res->sfty_irq;
7838
7839 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) {
7840 ret = stmmac_msi_init(priv, res);
7841 if (ret)
7842 return ret;
7843 }
7844
7845 if (!is_zero_ether_addr(res->mac))
7846 eth_hw_addr_set(priv->dev, res->mac);
7847
7848 dev_set_drvdata(device, priv->dev);
7849
7850 /* Verify driver arguments */
7851 stmmac_verify_args();
7852
7853 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL);
7854 if (!priv->af_xdp_zc_qps)
7855 return -ENOMEM;
7856
7857 /* Allocate workqueue */
7858 priv->wq = create_singlethread_workqueue("stmmac_wq");
7859 if (!priv->wq) {
7860 dev_err(priv->device, "failed to create workqueue\n");
7861 ret = -ENOMEM;
7862 goto error_wq_init;
7863 }
7864
7865 INIT_WORK(&priv->service_task, stmmac_service_task);
7866
7867 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0);
7868
7869 /* Override with kernel parameters if supplied XXX CRS XXX
7870 * this needs to have multiple instances
7871 */
7872 if ((phyaddr >= 0) && (phyaddr <= 31))
7873 priv->plat->phy_addr = phyaddr;
7874
7875 if (priv->plat->stmmac_rst) {
7876 ret = reset_control_assert(priv->plat->stmmac_rst);
7877 reset_control_deassert(priv->plat->stmmac_rst);
7878 /* Some reset controllers have only reset callback instead of
7879 * assert + deassert callbacks pair.
7880 */
7881 if (ret == -ENOTSUPP)
7882 reset_control_reset(priv->plat->stmmac_rst);
7883 }
7884
7885 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst);
7886 if (ret == -ENOTSUPP)
7887 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n",
7888 ERR_PTR(ret));
7889
7890 /* Wait a bit for the reset to take effect */
7891 udelay(10);
7892
7893 /* Init MAC and get the capabilities */
7894 ret = stmmac_hw_init(priv);
7895 if (ret)
7896 goto error_hw_init;
7897
7898 /* Only DWMAC core version 5.20 onwards supports HW descriptor prefetch.
7899 */
7900 if (priv->synopsys_id < DWMAC_CORE_5_20)
7901 priv->plat->dma_cfg->dche = false;
7902
7903 stmmac_check_ether_addr(priv);
7904
7905 ndev->netdev_ops = &stmmac_netdev_ops;
7906
7907 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops;
7908 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops;
7909
7910 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
7911 NETIF_F_RXCSUM;
7912 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT |
7913 NETDEV_XDP_ACT_XSK_ZEROCOPY;
7914
7915 ret = stmmac_tc_init(priv, priv);
7916 if (!ret) {
7917 ndev->hw_features |= NETIF_F_HW_TC;
7918 }
7919
7920 stmmac_set_gso_features(ndev);
7921
7922 if (priv->dma_cap.sphen &&
7923 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) {
7924 ndev->hw_features |= NETIF_F_GRO;
7925 priv->sph_capable = true;
7926 priv->sph_active = priv->sph_capable;
7927 dev_info(priv->device, "SPH feature enabled\n");
7928 }
7929
7930 /* Ideally our host DMA address width is the same as for the
7931 * device. However, it may differ and then we have to use our
7932 * host DMA width for allocation and the device DMA width for
7933 * register handling.
7934 */
7935 if (priv->plat->host_dma_width)
7936 priv->dma_cap.host_dma_width = priv->plat->host_dma_width;
7937 else
7938 priv->dma_cap.host_dma_width = priv->dma_cap.addr64;
7939
7940 if (priv->dma_cap.host_dma_width) {
7941 ret = dma_set_mask_and_coherent(device,
7942 DMA_BIT_MASK(priv->dma_cap.host_dma_width));
7943 if (!ret) {
7944 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n",
7945 priv->dma_cap.host_dma_width, priv->dma_cap.addr64);
7946
7947 /*
7948 * If more than 32 bits can be addressed, make sure to
7949 * enable enhanced addressing mode.
7950 */
7951 if (IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT))
7952 priv->plat->dma_cfg->eame = true;
7953 } else {
7954 ret = dma_set_mask_and_coherent(device, DMA_BIT_MASK(32));
7955 if (ret) {
7956 dev_err(priv->device, "Failed to set DMA Mask\n");
7957 goto error_hw_init;
7958 }
7959
7960 priv->dma_cap.host_dma_width = 32;
7961 }
7962 }
7963
7964 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA;
7965 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
7966 #ifdef STMMAC_VLAN_TAG_USED
7967 /* Both mac100 and gmac support receive VLAN tag detection */
7968 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX;
7969 if (dwmac_is_xmac(priv->plat->core_type)) {
7970 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
7971 priv->hw->hw_vlan_en = true;
7972 }
7973 if (priv->dma_cap.vlhash) {
7974 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
7975 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER;
7976 }
7977 if (priv->dma_cap.vlins)
7978 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX;
7979 #endif
7980 priv->msg_enable = netif_msg_init(debug, default_msg_level);
7981
7982 priv->xstats.threshold = tc;
7983
7984 /* Initialize RSS */
7985 rxq = priv->plat->rx_queues_to_use;
7986 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key));
7987 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++)
7988 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq);
7989
7990 if (priv->dma_cap.rssen && priv->plat->rss_en)
7991 ndev->features |= NETIF_F_RXHASH;
7992
7993 ndev->vlan_features |= ndev->features;
7994
7995 /* MTU range: 46 - hw-specific max */
7996 ndev->min_mtu = ETH_ZLEN - ETH_HLEN;
7997
7998 if (priv->plat->core_type == DWMAC_CORE_XGMAC)
7999 ndev->max_mtu = XGMAC_JUMBO_LEN;
8000 else if (priv->plat->enh_desc || priv->synopsys_id >= DWMAC_CORE_4_00)
8001 ndev->max_mtu = JUMBO_LEN;
8002 else
8003 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN);
8004
8005 /* Warn if the platform's maxmtu is smaller than the minimum MTU,
8006 * otherwise clamp the maximum MTU above to the platform's maxmtu.
8007 */
8008 if (priv->plat->maxmtu < ndev->min_mtu)
8009 dev_warn(priv->device,
8010 "%s: warning: maxmtu having invalid value (%d)\n",
8011 __func__, priv->plat->maxmtu);
8012 else if (priv->plat->maxmtu < ndev->max_mtu)
8013 ndev->max_mtu = priv->plat->maxmtu;
8014
8015 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
8016
8017 /* Setup channels NAPI */
8018 stmmac_napi_add(ndev);
8019
8020 mutex_init(&priv->lock);
8021
8022 stmmac_fpe_init(priv);
8023
8024 stmmac_check_pcs_mode(priv);
8025
8026 pm_runtime_get_noresume(device);
8027 pm_runtime_set_active(device);
8028 if (!pm_runtime_enabled(device))
8029 pm_runtime_enable(device);
8030
8031 ret = stmmac_mdio_register(ndev);
8032 if (ret < 0) {
8033 dev_err_probe(priv->device, ret,
8034 "MDIO bus (id: %d) registration failed\n",
8035 priv->plat->bus_id);
8036 goto error_mdio_register;
8037 }
8038
8039 ret = stmmac_pcs_setup(ndev);
8040 if (ret)
8041 goto error_pcs_setup;
8042
8043 ret = stmmac_phylink_setup(priv);
8044 if (ret) {
8045 netdev_err(ndev, "failed to setup phy (%d)\n", ret);
8046 goto error_phy_setup;
8047 }
8048
8049 ret = stmmac_register_devlink(priv);
8050 if (ret)
8051 goto error_devlink_setup;
8052
8053 ret = register_netdev(ndev);
8054 if (ret) {
8055 dev_err(priv->device, "%s: ERROR %i registering the device\n",
8056 __func__, ret);
8057 goto error_netdev_register;
8058 }
8059
8060 #ifdef CONFIG_DEBUG_FS
8061 stmmac_init_fs(ndev);
8062 #endif
8063
8064 if (priv->plat->dump_debug_regs)
8065 priv->plat->dump_debug_regs(priv->plat->bsp_priv);
8066
8067 /* Let pm_runtime_put() disable the clocks.
8068 * If CONFIG_PM is not enabled, the clocks will stay powered.
8069 */
8070 pm_runtime_put(device);
8071
8072 return ret;
8073
8074 error_netdev_register:
8075 stmmac_unregister_devlink(priv);
8076 error_devlink_setup:
8077 phylink_destroy(priv->phylink);
8078 error_phy_setup:
8079 stmmac_pcs_clean(ndev);
8080 error_pcs_setup:
8081 stmmac_mdio_unregister(ndev);
8082 error_mdio_register:
8083 stmmac_napi_del(ndev);
8084 error_hw_init:
8085 destroy_workqueue(priv->wq);
8086 error_wq_init:
8087 bitmap_free(priv->af_xdp_zc_qps);
8088
8089 return ret;
8090 }
8091
8092 /**
8093 * stmmac_dvr_probe
8094 * @dev: device pointer
8095 * @plat_dat: platform data pointer
8096 * @res: stmmac resource pointer
8097 * Description: this is the main probe function used to
8098 * call the alloc_etherdev, allocate the priv structure.
8099 * Return:
8100 * returns 0 on success, otherwise errno.
8101 */
stmmac_dvr_probe(struct device * dev,struct plat_stmmacenet_data * plat_dat,struct stmmac_resources * res)8102 int stmmac_dvr_probe(struct device *dev, struct plat_stmmacenet_data *plat_dat,
8103 struct stmmac_resources *res)
8104 {
8105 int ret;
8106
8107 if (plat_dat->init) {
8108 ret = plat_dat->init(dev, plat_dat->bsp_priv);
8109 if (ret)
8110 return ret;
8111 }
8112
8113 ret = __stmmac_dvr_probe(dev, plat_dat, res);
8114 if (ret && plat_dat->exit)
8115 plat_dat->exit(dev, plat_dat->bsp_priv);
8116
8117 return ret;
8118 }
8119 EXPORT_SYMBOL_GPL(stmmac_dvr_probe);
8120
8121 /**
8122 * stmmac_dvr_remove
8123 * @dev: device pointer
8124 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
8125 * changes the link status, releases the DMA descriptor rings.
8126 */
stmmac_dvr_remove(struct device * dev)8127 void stmmac_dvr_remove(struct device *dev)
8128 {
8129 struct net_device *ndev = dev_get_drvdata(dev);
8130 struct stmmac_priv *priv = netdev_priv(ndev);
8131
8132 netdev_info(priv->dev, "%s: removing driver", __func__);
8133
8134 pm_runtime_get_sync(dev);
8135
8136 unregister_netdev(ndev);
8137
8138 #ifdef CONFIG_DEBUG_FS
8139 stmmac_exit_fs(ndev);
8140 #endif
8141 stmmac_unregister_devlink(priv);
8142
8143 phylink_destroy(priv->phylink);
8144 if (priv->plat->stmmac_rst)
8145 reset_control_assert(priv->plat->stmmac_rst);
8146 reset_control_assert(priv->plat->stmmac_ahb_rst);
8147
8148 stmmac_pcs_clean(ndev);
8149 stmmac_mdio_unregister(ndev);
8150
8151 destroy_workqueue(priv->wq);
8152 mutex_destroy(&priv->lock);
8153 bitmap_free(priv->af_xdp_zc_qps);
8154
8155 pm_runtime_disable(dev);
8156 pm_runtime_put_noidle(dev);
8157
8158 if (priv->plat->exit)
8159 priv->plat->exit(dev, priv->plat->bsp_priv);
8160 }
8161 EXPORT_SYMBOL_GPL(stmmac_dvr_remove);
8162
8163 /**
8164 * stmmac_suspend - suspend callback
8165 * @dev: device pointer
8166 * Description: this is the function to suspend the device and it is called
8167 * by the platform driver to stop the network queue, release the resources,
8168 * program the PMT register (for WoL), clean and release driver resources.
8169 */
stmmac_suspend(struct device * dev)8170 int stmmac_suspend(struct device *dev)
8171 {
8172 struct net_device *ndev = dev_get_drvdata(dev);
8173 struct stmmac_priv *priv = netdev_priv(ndev);
8174 u8 chan;
8175
8176 if (!ndev || !netif_running(ndev))
8177 goto suspend_bsp;
8178
8179 mutex_lock(&priv->lock);
8180
8181 netif_device_detach(ndev);
8182
8183 stmmac_disable_all_queues(priv);
8184
8185 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++)
8186 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer);
8187
8188 if (priv->eee_sw_timer_en) {
8189 priv->tx_path_in_lpi_mode = false;
8190 timer_delete_sync(&priv->eee_ctrl_timer);
8191 }
8192
8193 /* Stop TX/RX DMA */
8194 stmmac_stop_all_dma(priv);
8195
8196 stmmac_legacy_serdes_power_down(priv);
8197
8198 /* Enable Power down mode by programming the PMT regs */
8199 if (priv->wolopts) {
8200 stmmac_pmt(priv, priv->hw, priv->wolopts);
8201 priv->irq_wake = 1;
8202 } else {
8203 stmmac_mac_set(priv, priv->ioaddr, false);
8204 pinctrl_pm_select_sleep_state(priv->device);
8205 }
8206
8207 mutex_unlock(&priv->lock);
8208
8209 rtnl_lock();
8210 phylink_suspend(priv->phylink, !!priv->wolopts);
8211 rtnl_unlock();
8212
8213 if (stmmac_fpe_supported(priv))
8214 ethtool_mmsv_stop(&priv->fpe_cfg.mmsv);
8215
8216 suspend_bsp:
8217 if (priv->plat->suspend)
8218 return priv->plat->suspend(dev, priv->plat->bsp_priv);
8219
8220 return 0;
8221 }
8222 EXPORT_SYMBOL_GPL(stmmac_suspend);
8223
stmmac_reset_rx_queue(struct stmmac_priv * priv,u32 queue)8224 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue)
8225 {
8226 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue];
8227
8228 rx_q->cur_rx = 0;
8229 rx_q->dirty_rx = 0;
8230 }
8231
stmmac_reset_tx_queue(struct stmmac_priv * priv,u32 queue)8232 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue)
8233 {
8234 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue];
8235
8236 tx_q->cur_tx = 0;
8237 tx_q->dirty_tx = 0;
8238 tx_q->mss = 0;
8239
8240 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue));
8241 }
8242
8243 /**
8244 * stmmac_reset_queues_param - reset queue parameters
8245 * @priv: device pointer
8246 */
stmmac_reset_queues_param(struct stmmac_priv * priv)8247 static void stmmac_reset_queues_param(struct stmmac_priv *priv)
8248 {
8249 u8 rx_cnt = priv->plat->rx_queues_to_use;
8250 u8 tx_cnt = priv->plat->tx_queues_to_use;
8251 u8 queue;
8252
8253 for (queue = 0; queue < rx_cnt; queue++)
8254 stmmac_reset_rx_queue(priv, queue);
8255
8256 for (queue = 0; queue < tx_cnt; queue++)
8257 stmmac_reset_tx_queue(priv, queue);
8258 }
8259
8260 /**
8261 * stmmac_resume - resume callback
8262 * @dev: device pointer
8263 * Description: when resume this function is invoked to setup the DMA and CORE
8264 * in a usable state.
8265 */
stmmac_resume(struct device * dev)8266 int stmmac_resume(struct device *dev)
8267 {
8268 struct net_device *ndev = dev_get_drvdata(dev);
8269 struct stmmac_priv *priv = netdev_priv(ndev);
8270 int ret;
8271
8272 if (priv->plat->resume) {
8273 ret = priv->plat->resume(dev, priv->plat->bsp_priv);
8274 if (ret)
8275 return ret;
8276 }
8277
8278 if (!netif_running(ndev))
8279 return 0;
8280
8281 /* Power Down bit, into the PM register, is cleared
8282 * automatically as soon as a magic packet or a Wake-up frame
8283 * is received. Anyway, it's better to manually clear
8284 * this bit because it can generate problems while resuming
8285 * from another devices (e.g. serial console).
8286 */
8287 if (priv->wolopts) {
8288 mutex_lock(&priv->lock);
8289 stmmac_pmt(priv, priv->hw, 0);
8290 mutex_unlock(&priv->lock);
8291 priv->irq_wake = 0;
8292 } else {
8293 pinctrl_pm_select_default_state(priv->device);
8294 /* reset the phy so that it's ready */
8295 if (priv->mii)
8296 stmmac_mdio_reset(priv->mii);
8297 }
8298
8299 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP)) {
8300 ret = stmmac_legacy_serdes_power_up(priv);
8301 if (ret < 0)
8302 return ret;
8303 }
8304
8305 rtnl_lock();
8306
8307 /* Prepare the PHY to resume, ensuring that its clocks which are
8308 * necessary for the MAC DMA reset to complete are running
8309 */
8310 phylink_prepare_resume(priv->phylink);
8311
8312 mutex_lock(&priv->lock);
8313
8314 stmmac_reset_queues_param(priv);
8315
8316 stmmac_free_tx_skbufs(priv);
8317 stmmac_clear_descriptors(priv, &priv->dma_conf);
8318
8319 ret = stmmac_hw_setup(ndev);
8320 if (ret < 0) {
8321 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__);
8322 stmmac_legacy_serdes_power_down(priv);
8323 mutex_unlock(&priv->lock);
8324 rtnl_unlock();
8325 return ret;
8326 }
8327
8328 stmmac_init_timestamping(priv);
8329
8330 stmmac_init_coalesce(priv);
8331 phylink_rx_clk_stop_block(priv->phylink);
8332 stmmac_set_rx_mode(ndev);
8333 phylink_rx_clk_stop_unblock(priv->phylink);
8334
8335 stmmac_vlan_restore(priv);
8336
8337 stmmac_enable_all_queues(priv);
8338 stmmac_enable_all_dma_irq(priv);
8339
8340 mutex_unlock(&priv->lock);
8341
8342 /* phylink_resume() must be called after the hardware has been
8343 * initialised because it may bring the link up immediately in a
8344 * workqueue thread, which will race with initialisation.
8345 */
8346 phylink_resume(priv->phylink);
8347 rtnl_unlock();
8348
8349 netif_device_attach(ndev);
8350
8351 return 0;
8352 }
8353 EXPORT_SYMBOL_GPL(stmmac_resume);
8354
8355 /* This is not the same as EXPORT_GPL_SIMPLE_DEV_PM_OPS() when CONFIG_PM=n */
8356 DEFINE_SIMPLE_DEV_PM_OPS(stmmac_simple_pm_ops, stmmac_suspend, stmmac_resume);
8357 EXPORT_SYMBOL_GPL(stmmac_simple_pm_ops);
8358
8359 #ifndef MODULE
stmmac_cmdline_opt(char * str)8360 static int __init stmmac_cmdline_opt(char *str)
8361 {
8362 char *opt;
8363
8364 if (!str || !*str)
8365 return 1;
8366 while ((opt = strsep(&str, ",")) != NULL) {
8367 if (!strncmp(opt, "debug:", 6)) {
8368 if (kstrtoint(opt + 6, 0, &debug))
8369 goto err;
8370 } else if (!strncmp(opt, "phyaddr:", 8)) {
8371 if (kstrtoint(opt + 8, 0, &phyaddr))
8372 goto err;
8373 } else if (!strncmp(opt, "tc:", 3)) {
8374 if (kstrtoint(opt + 3, 0, &tc))
8375 goto err;
8376 } else if (!strncmp(opt, "watchdog:", 9)) {
8377 if (kstrtoint(opt + 9, 0, &watchdog))
8378 goto err;
8379 } else if (!strncmp(opt, "flow_ctrl:", 10)) {
8380 if (kstrtoint(opt + 10, 0, &flow_ctrl))
8381 goto err;
8382 } else if (!strncmp(opt, "pause:", 6)) {
8383 if (kstrtoint(opt + 6, 0, &pause))
8384 goto err;
8385 } else if (!strncmp(opt, "eee_timer:", 10)) {
8386 if (kstrtoint(opt + 10, 0, &eee_timer))
8387 goto err;
8388 } else if (!strncmp(opt, "chain_mode:", 11)) {
8389 if (kstrtoint(opt + 11, 0, &chain_mode))
8390 goto err;
8391 }
8392 }
8393 return 1;
8394
8395 err:
8396 pr_err("%s: ERROR broken module parameter conversion", __func__);
8397 return 1;
8398 }
8399
8400 __setup("stmmaceth=", stmmac_cmdline_opt);
8401 #endif /* MODULE */
8402
stmmac_init(void)8403 static int __init stmmac_init(void)
8404 {
8405 #ifdef CONFIG_DEBUG_FS
8406 /* Create debugfs main directory if it doesn't exist yet */
8407 if (!stmmac_fs_dir)
8408 stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
8409 register_netdevice_notifier(&stmmac_notifier);
8410 #endif
8411
8412 return 0;
8413 }
8414
stmmac_exit(void)8415 static void __exit stmmac_exit(void)
8416 {
8417 #ifdef CONFIG_DEBUG_FS
8418 unregister_netdevice_notifier(&stmmac_notifier);
8419 debugfs_remove_recursive(stmmac_fs_dir);
8420 #endif
8421 }
8422
8423 module_init(stmmac_init)
8424 module_exit(stmmac_exit)
8425
8426 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
8427 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
8428 MODULE_LICENSE("GPL");
8429