Lines Matching +full:queue +full:- +full:rx
1 // SPDX-License-Identifier: GPL-2.0-only
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
29 #include <linux/dma-mapping.h>
55 * with fine resolution and binary rollover. This avoid non-monotonic behavior
62 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
70 static int debug = -1;
72 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
74 static int phyaddr = -1;
78 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
79 #define STMMAC_RX_THRESH(x) ((x)->dma_conf.dma_rx_size / 4)
121 /* By default the driver will use the ring mode to manage tx and rx descriptors,
134 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue);
135 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue);
137 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue);
138 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue);
155 ret = clk_prepare_enable(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
158 ret = clk_prepare_enable(priv->plat->pclk); in stmmac_bus_clks_config()
160 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
163 if (priv->plat->clks_config) { in stmmac_bus_clks_config()
164 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
166 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
167 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
172 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
173 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
174 if (priv->plat->clks_config) in stmmac_bus_clks_config()
175 priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
183 * stmmac_verify_args - verify the driver parameters.
205 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in __stmmac_disable_all_queues()
206 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in __stmmac_disable_all_queues()
208 u32 queue; in __stmmac_disable_all_queues() local
210 for (queue = 0; queue < maxq; queue++) { in __stmmac_disable_all_queues()
211 struct stmmac_channel *ch = &priv->channel[queue]; in __stmmac_disable_all_queues()
214 test_bit(queue, priv->af_xdp_zc_qps)) { in __stmmac_disable_all_queues()
215 napi_disable(&ch->rxtx_napi); in __stmmac_disable_all_queues()
219 if (queue < rx_queues_cnt) in __stmmac_disable_all_queues()
220 napi_disable(&ch->rx_napi); in __stmmac_disable_all_queues()
221 if (queue < tx_queues_cnt) in __stmmac_disable_all_queues()
222 napi_disable(&ch->tx_napi); in __stmmac_disable_all_queues()
227 * stmmac_disable_all_queues - Disable all queues
232 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_disable_all_queues()
234 u32 queue; in stmmac_disable_all_queues() local
237 for (queue = 0; queue < rx_queues_cnt; queue++) { in stmmac_disable_all_queues()
238 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_disable_all_queues()
239 if (rx_q->xsk_pool) { in stmmac_disable_all_queues()
249 * stmmac_enable_all_queues - Enable all queues
254 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_enable_all_queues()
255 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_all_queues()
257 u32 queue; in stmmac_enable_all_queues() local
259 for (queue = 0; queue < maxq; queue++) { in stmmac_enable_all_queues()
260 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_all_queues()
263 test_bit(queue, priv->af_xdp_zc_qps)) { in stmmac_enable_all_queues()
264 napi_enable(&ch->rxtx_napi); in stmmac_enable_all_queues()
268 if (queue < rx_queues_cnt) in stmmac_enable_all_queues()
269 napi_enable(&ch->rx_napi); in stmmac_enable_all_queues()
270 if (queue < tx_queues_cnt) in stmmac_enable_all_queues()
271 napi_enable(&ch->tx_napi); in stmmac_enable_all_queues()
277 if (!test_bit(STMMAC_DOWN, &priv->state) && in stmmac_service_event_schedule()
278 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) in stmmac_service_event_schedule()
279 queue_work(priv->wq, &priv->service_task); in stmmac_service_event_schedule()
284 netif_carrier_off(priv->dev); in stmmac_global_err()
285 set_bit(STMMAC_RESET_REQUESTED, &priv->state); in stmmac_global_err()
290 * stmmac_clk_csr_set - dynamically set the MDC clock
297 * changed at run-time and it is fixed (as reported in the driver
305 clk_rate = clk_get_rate(priv->plat->stmmac_clk); in stmmac_clk_csr_set()
314 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { in stmmac_clk_csr_set()
316 priv->clk_csr = STMMAC_CSR_20_35M; in stmmac_clk_csr_set()
318 priv->clk_csr = STMMAC_CSR_35_60M; in stmmac_clk_csr_set()
320 priv->clk_csr = STMMAC_CSR_60_100M; in stmmac_clk_csr_set()
322 priv->clk_csr = STMMAC_CSR_100_150M; in stmmac_clk_csr_set()
324 priv->clk_csr = STMMAC_CSR_150_250M; in stmmac_clk_csr_set()
326 priv->clk_csr = STMMAC_CSR_250_300M; in stmmac_clk_csr_set()
329 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { in stmmac_clk_csr_set()
331 priv->clk_csr = 0x03; in stmmac_clk_csr_set()
333 priv->clk_csr = 0x02; in stmmac_clk_csr_set()
335 priv->clk_csr = 0x01; in stmmac_clk_csr_set()
337 priv->clk_csr = 0; in stmmac_clk_csr_set()
340 if (priv->plat->has_xgmac) { in stmmac_clk_csr_set()
342 priv->clk_csr = 0x5; in stmmac_clk_csr_set()
344 priv->clk_csr = 0x4; in stmmac_clk_csr_set()
346 priv->clk_csr = 0x3; in stmmac_clk_csr_set()
348 priv->clk_csr = 0x2; in stmmac_clk_csr_set()
350 priv->clk_csr = 0x1; in stmmac_clk_csr_set()
352 priv->clk_csr = 0x0; in stmmac_clk_csr_set()
362 static inline u32 stmmac_tx_avail(struct stmmac_priv *priv, u32 queue) in stmmac_tx_avail() argument
364 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_avail()
367 if (tx_q->dirty_tx > tx_q->cur_tx) in stmmac_tx_avail()
368 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; in stmmac_tx_avail()
370 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; in stmmac_tx_avail()
376 * stmmac_rx_dirty - Get RX queue dirty
378 * @queue: RX queue index
380 static inline u32 stmmac_rx_dirty(struct stmmac_priv *priv, u32 queue) in stmmac_rx_dirty() argument
382 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_dirty()
385 if (rx_q->dirty_rx <= rx_q->cur_rx) in stmmac_rx_dirty()
386 dirty = rx_q->cur_rx - rx_q->dirty_rx; in stmmac_rx_dirty()
388 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; in stmmac_rx_dirty()
398 priv->eee_sw_timer_en = en ? 0 : 1; in stmmac_lpi_entry_timer_config()
399 tx_lpi_timer = en ? priv->tx_lpi_timer : 0; in stmmac_lpi_entry_timer_config()
400 stmmac_set_eee_lpi_timer(priv, priv->hw, tx_lpi_timer); in stmmac_lpi_entry_timer_config()
404 * stmmac_enable_eee_mode - check and enter in LPI mode
411 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_eee_mode()
412 u32 queue; in stmmac_enable_eee_mode() local
415 for (queue = 0; queue < tx_cnt; queue++) { in stmmac_enable_eee_mode()
416 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_eee_mode()
418 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_enable_eee_mode()
419 return -EBUSY; /* still unfinished work */ in stmmac_enable_eee_mode()
423 if (!priv->tx_path_in_lpi_mode) in stmmac_enable_eee_mode()
424 stmmac_set_eee_mode(priv, priv->hw, in stmmac_enable_eee_mode()
425 priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLOCKGATING); in stmmac_enable_eee_mode()
430 * stmmac_disable_eee_mode - disable and exit from LPI mode
437 if (!priv->eee_sw_timer_en) { in stmmac_disable_eee_mode()
442 stmmac_reset_eee_mode(priv, priv->hw); in stmmac_disable_eee_mode()
443 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_disable_eee_mode()
444 priv->tx_path_in_lpi_mode = false; in stmmac_disable_eee_mode()
448 * stmmac_eee_ctrl_timer - EEE TX SW timer.
459 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_eee_ctrl_timer()
463 * stmmac_eee_init - init EEE
472 int eee_tw_timer = priv->eee_tw_timer; in stmmac_eee_init()
477 if (priv->hw->pcs == STMMAC_PCS_TBI || in stmmac_eee_init()
478 priv->hw->pcs == STMMAC_PCS_RTBI) in stmmac_eee_init()
482 if (!priv->dma_cap.eee) in stmmac_eee_init()
485 mutex_lock(&priv->lock); in stmmac_eee_init()
488 if (!priv->eee_active) { in stmmac_eee_init()
489 if (priv->eee_enabled) { in stmmac_eee_init()
490 netdev_dbg(priv->dev, "disable EEE\n"); in stmmac_eee_init()
492 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
493 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); in stmmac_eee_init()
494 if (priv->hw->xpcs) in stmmac_eee_init()
495 xpcs_config_eee(priv->hw->xpcs, in stmmac_eee_init()
496 priv->plat->mult_fact_100ns, in stmmac_eee_init()
499 mutex_unlock(&priv->lock); in stmmac_eee_init()
503 if (priv->eee_active && !priv->eee_enabled) { in stmmac_eee_init()
504 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); in stmmac_eee_init()
505 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, in stmmac_eee_init()
507 if (priv->hw->xpcs) in stmmac_eee_init()
508 xpcs_config_eee(priv->hw->xpcs, in stmmac_eee_init()
509 priv->plat->mult_fact_100ns, in stmmac_eee_init()
513 if (priv->plat->has_gmac4 && priv->tx_lpi_timer <= STMMAC_ET_MAX) { in stmmac_eee_init()
514 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
515 priv->tx_path_in_lpi_mode = false; in stmmac_eee_init()
519 mod_timer(&priv->eee_ctrl_timer, in stmmac_eee_init()
520 STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_eee_init()
523 mutex_unlock(&priv->lock); in stmmac_eee_init()
524 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); in stmmac_eee_init()
528 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
543 if (!priv->hwts_tx_en) in stmmac_get_tx_hwtstamp()
547 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) in stmmac_get_tx_hwtstamp()
552 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); in stmmac_get_tx_hwtstamp()
554 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_get_tx_hwtstamp()
559 ns -= priv->plat->cdc_error_adj; in stmmac_get_tx_hwtstamp()
564 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); in stmmac_get_tx_hwtstamp()
570 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
586 if (!priv->hwts_rx_en) in stmmac_get_rx_hwtstamp()
589 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_get_rx_hwtstamp()
593 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { in stmmac_get_rx_hwtstamp()
594 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_get_rx_hwtstamp()
596 ns -= priv->plat->cdc_error_adj; in stmmac_get_rx_hwtstamp()
598 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); in stmmac_get_rx_hwtstamp()
601 shhwtstamp->hwtstamp = ns_to_ktime(ns); in stmmac_get_rx_hwtstamp()
603 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); in stmmac_get_rx_hwtstamp()
608 * stmmac_hwtstamp_set - control hardware timestamping.
614 * and incoming(RX) packets time stamping based on user input.
616 * 0 on success and an appropriate -ve integer on failure.
631 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { in stmmac_hwtstamp_set()
632 netdev_alert(priv->dev, "No support for HW time stamping\n"); in stmmac_hwtstamp_set()
633 priv->hwts_tx_en = 0; in stmmac_hwtstamp_set()
634 priv->hwts_rx_en = 0; in stmmac_hwtstamp_set()
636 return -EOPNOTSUPP; in stmmac_hwtstamp_set()
639 if (copy_from_user(&config, ifr->ifr_data, in stmmac_hwtstamp_set()
641 return -EFAULT; in stmmac_hwtstamp_set()
643 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", in stmmac_hwtstamp_set()
648 return -ERANGE; in stmmac_hwtstamp_set()
650 if (priv->adv_ts) { in stmmac_hwtstamp_set()
731 if (priv->synopsys_id < DWMAC_CORE_4_10) in stmmac_hwtstamp_set()
771 return -ERANGE; in stmmac_hwtstamp_set()
784 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); in stmmac_hwtstamp_set()
785 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; in stmmac_hwtstamp_set()
787 priv->systime_flags = STMMAC_HWTS_ACTIVE; in stmmac_hwtstamp_set()
789 if (priv->hwts_tx_en || priv->hwts_rx_en) { in stmmac_hwtstamp_set()
790 priv->systime_flags |= tstamp_all | ptp_v2 | in stmmac_hwtstamp_set()
796 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); in stmmac_hwtstamp_set()
798 memcpy(&priv->tstamp_config, &config, sizeof(config)); in stmmac_hwtstamp_set()
800 return copy_to_user(ifr->ifr_data, &config, in stmmac_hwtstamp_set()
801 sizeof(config)) ? -EFAULT : 0; in stmmac_hwtstamp_set()
805 * stmmac_hwtstamp_get - read hardware timestamping.
816 struct hwtstamp_config *config = &priv->tstamp_config; in stmmac_hwtstamp_get()
818 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_hwtstamp_get()
819 return -EOPNOTSUPP; in stmmac_hwtstamp_get()
821 return copy_to_user(ifr->ifr_data, config, in stmmac_hwtstamp_get()
822 sizeof(*config)) ? -EFAULT : 0; in stmmac_hwtstamp_get()
826 * stmmac_init_tstamp_counter - init hardware timestamping counter
837 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_tstamp_counter()
842 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_init_tstamp_counter()
843 return -EOPNOTSUPP; in stmmac_init_tstamp_counter()
845 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); in stmmac_init_tstamp_counter()
846 priv->systime_flags = systime_flags; in stmmac_init_tstamp_counter()
849 stmmac_config_sub_second_increment(priv, priv->ptpaddr, in stmmac_init_tstamp_counter()
850 priv->plat->clk_ptp_rate, in stmmac_init_tstamp_counter()
855 priv->sub_second_inc = sec_inc; in stmmac_init_tstamp_counter()
863 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); in stmmac_init_tstamp_counter()
864 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); in stmmac_init_tstamp_counter()
870 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); in stmmac_init_tstamp_counter()
877 * stmmac_init_ptp - init PTP
885 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_ptp()
888 if (priv->plat->ptp_clk_freq_config) in stmmac_init_ptp()
889 priv->plat->ptp_clk_freq_config(priv); in stmmac_init_ptp()
895 priv->adv_ts = 0; in stmmac_init_ptp()
897 if (xmac && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
898 priv->adv_ts = 1; in stmmac_init_ptp()
900 else if (priv->extend_desc && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
901 priv->adv_ts = 1; in stmmac_init_ptp()
903 if (priv->dma_cap.time_stamp) in stmmac_init_ptp()
904 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); in stmmac_init_ptp()
906 if (priv->adv_ts) in stmmac_init_ptp()
907 netdev_info(priv->dev, in stmmac_init_ptp()
908 "IEEE 1588-2008 Advanced Timestamp supported\n"); in stmmac_init_ptp()
910 priv->hwts_tx_en = 0; in stmmac_init_ptp()
911 priv->hwts_rx_en = 0; in stmmac_init_ptp()
913 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_init_ptp()
921 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_release_ptp()
926 * stmmac_mac_flow_ctrl - Configure flow control in all queues
933 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_mac_flow_ctrl()
935 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, in stmmac_mac_flow_ctrl()
936 priv->pause, tx_cnt); in stmmac_mac_flow_ctrl()
942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_select_pcs()
944 if (priv->hw->xpcs) in stmmac_mac_select_pcs()
945 return &priv->hw->xpcs->pcs; in stmmac_mac_select_pcs()
947 if (priv->hw->lynx_pcs) in stmmac_mac_select_pcs()
948 return priv->hw->lynx_pcs; in stmmac_mac_select_pcs()
961 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; in stmmac_fpe_link_state_handle()
962 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; in stmmac_fpe_link_state_handle()
963 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; in stmmac_fpe_link_state_handle()
964 bool *hs_enable = &fpe_cfg->hs_enable; in stmmac_fpe_link_state_handle()
967 stmmac_fpe_send_mpacket(priv, priv->ioaddr, fpe_cfg, in stmmac_fpe_link_state_handle()
978 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_down()
980 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_mac_link_down()
981 priv->eee_active = false; in stmmac_mac_link_down()
982 priv->tx_lpi_enabled = false; in stmmac_mac_link_down()
983 priv->eee_enabled = stmmac_eee_init(priv); in stmmac_mac_link_down()
984 stmmac_set_eee_pls(priv, priv->hw, false); in stmmac_mac_link_down()
986 if (priv->dma_cap.fpesel) in stmmac_mac_link_down()
996 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_up()
999 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_mac_link_up()
1000 priv->plat->serdes_powerup) in stmmac_mac_link_up()
1001 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); in stmmac_mac_link_up()
1003 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1004 ctrl = old_ctrl & ~priv->hw->link.speed_mask; in stmmac_mac_link_up()
1009 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1012 ctrl |= priv->hw->link.xgmii.speed5000; in stmmac_mac_link_up()
1015 ctrl |= priv->hw->link.xgmii.speed2500; in stmmac_mac_link_up()
1023 ctrl |= priv->hw->link.xlgmii.speed100000; in stmmac_mac_link_up()
1026 ctrl |= priv->hw->link.xlgmii.speed50000; in stmmac_mac_link_up()
1029 ctrl |= priv->hw->link.xlgmii.speed40000; in stmmac_mac_link_up()
1032 ctrl |= priv->hw->link.xlgmii.speed25000; in stmmac_mac_link_up()
1035 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1038 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1041 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1049 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1052 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1055 ctrl |= priv->hw->link.speed100; in stmmac_mac_link_up()
1058 ctrl |= priv->hw->link.speed10; in stmmac_mac_link_up()
1065 priv->speed = speed; in stmmac_mac_link_up()
1067 if (priv->plat->fix_mac_speed) in stmmac_mac_link_up()
1068 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); in stmmac_mac_link_up()
1071 ctrl &= ~priv->hw->link.duplex; in stmmac_mac_link_up()
1073 ctrl |= priv->hw->link.duplex; in stmmac_mac_link_up()
1077 priv->flow_ctrl = FLOW_AUTO; in stmmac_mac_link_up()
1079 priv->flow_ctrl = FLOW_RX; in stmmac_mac_link_up()
1081 priv->flow_ctrl = FLOW_TX; in stmmac_mac_link_up()
1083 priv->flow_ctrl = FLOW_OFF; in stmmac_mac_link_up()
1088 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1090 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_mac_link_up()
1091 if (phy && priv->dma_cap.eee) { in stmmac_mac_link_up()
1092 priv->eee_active = in stmmac_mac_link_up()
1093 phy_init_eee(phy, !(priv->plat->flags & in stmmac_mac_link_up()
1095 priv->eee_enabled = stmmac_eee_init(priv); in stmmac_mac_link_up()
1096 priv->tx_lpi_enabled = priv->eee_enabled; in stmmac_mac_link_up()
1097 stmmac_set_eee_pls(priv, priv->hw, true); in stmmac_mac_link_up()
1100 if (priv->dma_cap.fpesel) in stmmac_mac_link_up()
1103 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_mac_link_up()
1115 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1123 int interface = priv->plat->mac_interface; in stmmac_check_pcs_mode()
1125 if (priv->dma_cap.pcs) { in stmmac_check_pcs_mode()
1130 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); in stmmac_check_pcs_mode()
1131 priv->hw->pcs = STMMAC_PCS_RGMII; in stmmac_check_pcs_mode()
1133 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); in stmmac_check_pcs_mode()
1134 priv->hw->pcs = STMMAC_PCS_SGMII; in stmmac_check_pcs_mode()
1140 * stmmac_init_phy - PHY initialization
1154 if (!phylink_expects_phy(priv->phylink)) in stmmac_init_phy()
1157 fwnode = priv->plat->port_node; in stmmac_init_phy()
1159 fwnode = dev_fwnode(priv->device); in stmmac_init_phy()
1166 /* Some DT bindings do not set-up the PHY handle. Let's try to in stmmac_init_phy()
1170 int addr = priv->plat->phy_addr; in stmmac_init_phy()
1174 netdev_err(priv->dev, "no phy found\n"); in stmmac_init_phy()
1175 return -ENODEV; in stmmac_init_phy()
1178 phydev = mdiobus_get_phy(priv->mii, addr); in stmmac_init_phy()
1180 netdev_err(priv->dev, "no phy at addr %d\n", addr); in stmmac_init_phy()
1181 return -ENODEV; in stmmac_init_phy()
1184 ret = phylink_connect_phy(priv->phylink, phydev); in stmmac_init_phy()
1187 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); in stmmac_init_phy()
1190 if (!priv->plat->pmt) { in stmmac_init_phy()
1193 phylink_ethtool_get_wol(priv->phylink, &wol); in stmmac_init_phy()
1194 device_set_wakeup_capable(priv->device, !!wol.supported); in stmmac_init_phy()
1195 device_set_wakeup_enable(priv->device, !!wol.wolopts); in stmmac_init_phy()
1203 /* Half-Duplex can only work with single tx queue */ in stmmac_set_half_duplex()
1204 if (priv->plat->tx_queues_to_use > 1) in stmmac_set_half_duplex()
1205 priv->phylink_config.mac_capabilities &= in stmmac_set_half_duplex()
1208 priv->phylink_config.mac_capabilities |= in stmmac_set_half_duplex()
1215 int mode = priv->plat->phy_interface; in stmmac_phy_setup()
1220 priv->phylink_config.dev = &priv->dev->dev; in stmmac_phy_setup()
1221 priv->phylink_config.type = PHYLINK_NETDEV; in stmmac_phy_setup()
1222 priv->phylink_config.mac_managed_pm = true; in stmmac_phy_setup()
1224 mdio_bus_data = priv->plat->mdio_bus_data; in stmmac_phy_setup()
1226 priv->phylink_config.ovr_an_inband = in stmmac_phy_setup()
1227 mdio_bus_data->xpcs_an_inband; in stmmac_phy_setup()
1232 __set_bit(mode, priv->phylink_config.supported_interfaces); in stmmac_phy_setup()
1235 if (priv->hw->xpcs) in stmmac_phy_setup()
1236 xpcs_get_interfaces(priv->hw->xpcs, in stmmac_phy_setup()
1237 priv->phylink_config.supported_interfaces); in stmmac_phy_setup()
1239 priv->phylink_config.mac_capabilities = MAC_ASYM_PAUSE | MAC_SYM_PAUSE | in stmmac_phy_setup()
1248 max_speed = priv->plat->max_speed; in stmmac_phy_setup()
1250 phylink_limit_mac_speed(&priv->phylink_config, max_speed); in stmmac_phy_setup()
1252 fwnode = priv->plat->port_node; in stmmac_phy_setup()
1254 fwnode = dev_fwnode(priv->device); in stmmac_phy_setup()
1256 phylink = phylink_create(&priv->phylink_config, fwnode, in stmmac_phy_setup()
1261 priv->phylink = phylink; in stmmac_phy_setup()
1268 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_display_rx_rings()
1271 u32 queue; in stmmac_display_rx_rings() local
1273 /* Display RX rings */ in stmmac_display_rx_rings()
1274 for (queue = 0; queue < rx_cnt; queue++) { in stmmac_display_rx_rings()
1275 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_display_rx_rings()
1277 pr_info("\tRX Queue %u rings\n", queue); in stmmac_display_rx_rings()
1279 if (priv->extend_desc) { in stmmac_display_rx_rings()
1280 head_rx = (void *)rx_q->dma_erx; in stmmac_display_rx_rings()
1283 head_rx = (void *)rx_q->dma_rx; in stmmac_display_rx_rings()
1287 /* Display RX ring */ in stmmac_display_rx_rings()
1288 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, in stmmac_display_rx_rings()
1289 rx_q->dma_rx_phy, desc_size); in stmmac_display_rx_rings()
1296 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_display_tx_rings()
1299 u32 queue; in stmmac_display_tx_rings() local
1302 for (queue = 0; queue < tx_cnt; queue++) { in stmmac_display_tx_rings()
1303 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_display_tx_rings()
1305 pr_info("\tTX Queue %d rings\n", queue); in stmmac_display_tx_rings()
1307 if (priv->extend_desc) { in stmmac_display_tx_rings()
1308 head_tx = (void *)tx_q->dma_etx; in stmmac_display_tx_rings()
1310 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in stmmac_display_tx_rings()
1311 head_tx = (void *)tx_q->dma_entx; in stmmac_display_tx_rings()
1314 head_tx = (void *)tx_q->dma_tx; in stmmac_display_tx_rings()
1318 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, in stmmac_display_tx_rings()
1319 tx_q->dma_tx_phy, desc_size); in stmmac_display_tx_rings()
1326 /* Display RX ring */ in stmmac_display_rings()
1352 * stmmac_clear_rx_descriptors - clear RX descriptors
1355 * @queue: RX queue index
1356 * Description: this function is called to clear the RX descriptors
1361 u32 queue) in stmmac_clear_rx_descriptors() argument
1363 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_clear_rx_descriptors()
1366 /* Clear the RX descriptors */ in stmmac_clear_rx_descriptors()
1367 for (i = 0; i < dma_conf->dma_rx_size; i++) in stmmac_clear_rx_descriptors()
1368 if (priv->extend_desc) in stmmac_clear_rx_descriptors()
1369 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, in stmmac_clear_rx_descriptors()
1370 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1371 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1372 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1374 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], in stmmac_clear_rx_descriptors()
1375 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1376 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1377 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1381 * stmmac_clear_tx_descriptors - clear tx descriptors
1384 * @queue: TX queue index.
1390 u32 queue) in stmmac_clear_tx_descriptors() argument
1392 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_clear_tx_descriptors()
1396 for (i = 0; i < dma_conf->dma_tx_size; i++) { in stmmac_clear_tx_descriptors()
1397 int last = (i == (dma_conf->dma_tx_size - 1)); in stmmac_clear_tx_descriptors()
1400 if (priv->extend_desc) in stmmac_clear_tx_descriptors()
1401 p = &tx_q->dma_etx[i].basic; in stmmac_clear_tx_descriptors()
1402 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_clear_tx_descriptors()
1403 p = &tx_q->dma_entx[i].basic; in stmmac_clear_tx_descriptors()
1405 p = &tx_q->dma_tx[i]; in stmmac_clear_tx_descriptors()
1407 stmmac_init_tx_desc(priv, p, priv->mode, last); in stmmac_clear_tx_descriptors()
1412 * stmmac_clear_descriptors - clear descriptors
1415 * Description: this function is called to clear the TX and RX descriptors
1421 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; in stmmac_clear_descriptors()
1422 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_clear_descriptors()
1423 u32 queue; in stmmac_clear_descriptors() local
1425 /* Clear the RX descriptors */ in stmmac_clear_descriptors()
1426 for (queue = 0; queue < rx_queue_cnt; queue++) in stmmac_clear_descriptors()
1427 stmmac_clear_rx_descriptors(priv, dma_conf, queue); in stmmac_clear_descriptors()
1430 for (queue = 0; queue < tx_queue_cnt; queue++) in stmmac_clear_descriptors()
1431 stmmac_clear_tx_descriptors(priv, dma_conf, queue); in stmmac_clear_descriptors()
1435 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1441 * @queue: RX queue index
1448 int i, gfp_t flags, u32 queue) in stmmac_init_rx_buffers() argument
1450 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_init_rx_buffers()
1451 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_init_rx_buffers()
1454 if (priv->dma_cap.host_dma_width <= 32) in stmmac_init_rx_buffers()
1457 if (!buf->page) { in stmmac_init_rx_buffers()
1458 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1459 if (!buf->page) in stmmac_init_rx_buffers()
1460 return -ENOMEM; in stmmac_init_rx_buffers()
1461 buf->page_offset = stmmac_rx_offset(priv); in stmmac_init_rx_buffers()
1464 if (priv->sph && !buf->sec_page) { in stmmac_init_rx_buffers()
1465 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1466 if (!buf->sec_page) in stmmac_init_rx_buffers()
1467 return -ENOMEM; in stmmac_init_rx_buffers()
1469 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_init_rx_buffers()
1470 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_init_rx_buffers()
1472 buf->sec_page = NULL; in stmmac_init_rx_buffers()
1473 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_init_rx_buffers()
1476 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_init_rx_buffers()
1478 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_init_rx_buffers()
1479 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) in stmmac_init_rx_buffers()
1486 * stmmac_free_rx_buffer - free RX dma buffers
1488 * @rx_q: RX queue
1495 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_free_rx_buffer()
1497 if (buf->page) in stmmac_free_rx_buffer()
1498 page_pool_put_full_page(rx_q->page_pool, buf->page, false); in stmmac_free_rx_buffer()
1499 buf->page = NULL; in stmmac_free_rx_buffer()
1501 if (buf->sec_page) in stmmac_free_rx_buffer()
1502 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); in stmmac_free_rx_buffer()
1503 buf->sec_page = NULL; in stmmac_free_rx_buffer()
1507 * stmmac_free_tx_buffer - free RX dma buffers
1510 * @queue: RX queue index
1515 u32 queue, int i) in stmmac_free_tx_buffer() argument
1517 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_free_tx_buffer()
1519 if (tx_q->tx_skbuff_dma[i].buf && in stmmac_free_tx_buffer()
1520 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { in stmmac_free_tx_buffer()
1521 if (tx_q->tx_skbuff_dma[i].map_as_page) in stmmac_free_tx_buffer()
1522 dma_unmap_page(priv->device, in stmmac_free_tx_buffer()
1523 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1524 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1527 dma_unmap_single(priv->device, in stmmac_free_tx_buffer()
1528 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1529 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1533 if (tx_q->xdpf[i] && in stmmac_free_tx_buffer()
1534 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_free_tx_buffer()
1535 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { in stmmac_free_tx_buffer()
1536 xdp_return_frame(tx_q->xdpf[i]); in stmmac_free_tx_buffer()
1537 tx_q->xdpf[i] = NULL; in stmmac_free_tx_buffer()
1540 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_free_tx_buffer()
1541 tx_q->xsk_frames_done++; in stmmac_free_tx_buffer()
1543 if (tx_q->tx_skbuff[i] && in stmmac_free_tx_buffer()
1544 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_free_tx_buffer()
1545 dev_kfree_skb_any(tx_q->tx_skbuff[i]); in stmmac_free_tx_buffer()
1546 tx_q->tx_skbuff[i] = NULL; in stmmac_free_tx_buffer()
1549 tx_q->tx_skbuff_dma[i].buf = 0; in stmmac_free_tx_buffer()
1550 tx_q->tx_skbuff_dma[i].map_as_page = false; in stmmac_free_tx_buffer()
1554 * dma_free_rx_skbufs - free RX dma buffers
1557 * @queue: RX queue index
1561 u32 queue) in dma_free_rx_skbufs() argument
1563 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_skbufs()
1566 for (i = 0; i < dma_conf->dma_rx_size; i++) in dma_free_rx_skbufs()
1572 u32 queue, gfp_t flags) in stmmac_alloc_rx_buffers() argument
1574 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers()
1577 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers()
1581 if (priv->extend_desc) in stmmac_alloc_rx_buffers()
1582 p = &((rx_q->dma_erx + i)->basic); in stmmac_alloc_rx_buffers()
1584 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers()
1587 queue); in stmmac_alloc_rx_buffers()
1591 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers()
1598 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1601 * @queue: RX queue index
1605 u32 queue) in dma_free_rx_xskbufs() argument
1607 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_xskbufs()
1610 for (i = 0; i < dma_conf->dma_rx_size; i++) { in dma_free_rx_xskbufs()
1611 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in dma_free_rx_xskbufs()
1613 if (!buf->xdp) in dma_free_rx_xskbufs()
1616 xsk_buff_free(buf->xdp); in dma_free_rx_xskbufs()
1617 buf->xdp = NULL; in dma_free_rx_xskbufs()
1623 u32 queue) in stmmac_alloc_rx_buffers_zc() argument
1625 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers_zc()
1634 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers_zc()
1639 if (priv->extend_desc) in stmmac_alloc_rx_buffers_zc()
1640 p = (struct dma_desc *)(rx_q->dma_erx + i); in stmmac_alloc_rx_buffers_zc()
1642 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers_zc()
1644 buf = &rx_q->buf_pool[i]; in stmmac_alloc_rx_buffers_zc()
1646 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1647 if (!buf->xdp) in stmmac_alloc_rx_buffers_zc()
1648 return -ENOMEM; in stmmac_alloc_rx_buffers_zc()
1650 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_alloc_rx_buffers_zc()
1652 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers_zc()
1658 static struct xsk_buff_pool *stmmac_get_xsk_pool(struct stmmac_priv *priv, u32 queue) in stmmac_get_xsk_pool() argument
1660 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) in stmmac_get_xsk_pool()
1663 return xsk_get_pool_from_qid(priv->dev, queue); in stmmac_get_xsk_pool()
1667 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1670 * @queue: RX queue index
1672 * Description: this function initializes the DMA RX descriptors
1678 u32 queue, gfp_t flags) in __init_dma_rx_desc_rings() argument
1680 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __init_dma_rx_desc_rings()
1683 netif_dbg(priv, probe, priv->dev, in __init_dma_rx_desc_rings()
1685 (u32)rx_q->dma_rx_phy); in __init_dma_rx_desc_rings()
1687 stmmac_clear_rx_descriptors(priv, dma_conf, queue); in __init_dma_rx_desc_rings()
1689 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1691 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1693 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1694 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1697 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1698 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1699 rx_q->queue_index); in __init_dma_rx_desc_rings()
1700 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1702 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1704 rx_q->page_pool)); in __init_dma_rx_desc_rings()
1705 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1706 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1707 rx_q->queue_index); in __init_dma_rx_desc_rings()
1710 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1711 /* RX XDP ZC buffer pool may not be populated, e.g. in __init_dma_rx_desc_rings()
1712 * xdpsock TX-only. in __init_dma_rx_desc_rings()
1714 stmmac_alloc_rx_buffers_zc(priv, dma_conf, queue); in __init_dma_rx_desc_rings()
1716 ret = stmmac_alloc_rx_buffers(priv, dma_conf, queue, flags); in __init_dma_rx_desc_rings()
1718 return -ENOMEM; in __init_dma_rx_desc_rings()
1722 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_rx_desc_rings()
1723 if (priv->extend_desc) in __init_dma_rx_desc_rings()
1724 stmmac_mode_init(priv, rx_q->dma_erx, in __init_dma_rx_desc_rings()
1725 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1726 dma_conf->dma_rx_size, 1); in __init_dma_rx_desc_rings()
1728 stmmac_mode_init(priv, rx_q->dma_rx, in __init_dma_rx_desc_rings()
1729 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1730 dma_conf->dma_rx_size, 0); in __init_dma_rx_desc_rings()
1741 u32 rx_count = priv->plat->rx_queues_to_use; in init_dma_rx_desc_rings()
1742 int queue; in init_dma_rx_desc_rings() local
1745 /* RX INITIALIZATION */ in init_dma_rx_desc_rings()
1746 netif_dbg(priv, probe, priv->dev, in init_dma_rx_desc_rings()
1749 for (queue = 0; queue < rx_count; queue++) { in init_dma_rx_desc_rings()
1750 ret = __init_dma_rx_desc_rings(priv, dma_conf, queue, flags); in init_dma_rx_desc_rings()
1758 while (queue >= 0) { in init_dma_rx_desc_rings()
1759 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in init_dma_rx_desc_rings()
1761 if (rx_q->xsk_pool) in init_dma_rx_desc_rings()
1762 dma_free_rx_xskbufs(priv, dma_conf, queue); in init_dma_rx_desc_rings()
1764 dma_free_rx_skbufs(priv, dma_conf, queue); in init_dma_rx_desc_rings()
1766 rx_q->buf_alloc_num = 0; in init_dma_rx_desc_rings()
1767 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings()
1769 queue--; in init_dma_rx_desc_rings()
1776 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1779 * @queue: TX queue index
1786 u32 queue) in __init_dma_tx_desc_rings() argument
1788 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __init_dma_tx_desc_rings()
1791 netif_dbg(priv, probe, priv->dev, in __init_dma_tx_desc_rings()
1793 (u32)tx_q->dma_tx_phy); in __init_dma_tx_desc_rings()
1796 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_tx_desc_rings()
1797 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1798 stmmac_mode_init(priv, tx_q->dma_etx, in __init_dma_tx_desc_rings()
1799 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1800 dma_conf->dma_tx_size, 1); in __init_dma_tx_desc_rings()
1801 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) in __init_dma_tx_desc_rings()
1802 stmmac_mode_init(priv, tx_q->dma_tx, in __init_dma_tx_desc_rings()
1803 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1804 dma_conf->dma_tx_size, 0); in __init_dma_tx_desc_rings()
1807 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings()
1809 for (i = 0; i < dma_conf->dma_tx_size; i++) { in __init_dma_tx_desc_rings()
1812 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1813 p = &((tx_q->dma_etx + i)->basic); in __init_dma_tx_desc_rings()
1814 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __init_dma_tx_desc_rings()
1815 p = &((tx_q->dma_entx + i)->basic); in __init_dma_tx_desc_rings()
1817 p = tx_q->dma_tx + i; in __init_dma_tx_desc_rings()
1821 tx_q->tx_skbuff_dma[i].buf = 0; in __init_dma_tx_desc_rings()
1822 tx_q->tx_skbuff_dma[i].map_as_page = false; in __init_dma_tx_desc_rings()
1823 tx_q->tx_skbuff_dma[i].len = 0; in __init_dma_tx_desc_rings()
1824 tx_q->tx_skbuff_dma[i].last_segment = false; in __init_dma_tx_desc_rings()
1825 tx_q->tx_skbuff[i] = NULL; in __init_dma_tx_desc_rings()
1836 u32 queue; in init_dma_tx_desc_rings() local
1838 tx_queue_cnt = priv->plat->tx_queues_to_use; in init_dma_tx_desc_rings()
1840 for (queue = 0; queue < tx_queue_cnt; queue++) in init_dma_tx_desc_rings()
1841 __init_dma_tx_desc_rings(priv, dma_conf, queue); in init_dma_tx_desc_rings()
1847 * init_dma_desc_rings - init the RX/TX descriptor rings
1851 * Description: this function initializes the DMA RX/TX descriptors
1877 * dma_free_tx_skbufs - free TX dma buffers
1880 * @queue: TX queue index
1884 u32 queue) in dma_free_tx_skbufs() argument
1886 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in dma_free_tx_skbufs()
1889 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1891 for (i = 0; i < dma_conf->dma_tx_size; i++) in dma_free_tx_skbufs()
1892 stmmac_free_tx_buffer(priv, dma_conf, queue, i); in dma_free_tx_skbufs()
1894 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs()
1895 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in dma_free_tx_skbufs()
1896 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1897 tx_q->xsk_pool = NULL; in dma_free_tx_skbufs()
1902 * stmmac_free_tx_skbufs - free TX skb buffers
1907 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_free_tx_skbufs()
1908 u32 queue; in stmmac_free_tx_skbufs() local
1910 for (queue = 0; queue < tx_queue_cnt; queue++) in stmmac_free_tx_skbufs()
1911 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); in stmmac_free_tx_skbufs()
1915 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1918 * @queue: RX queue index
1922 u32 queue) in __free_dma_rx_desc_resources() argument
1924 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __free_dma_rx_desc_resources()
1926 /* Release the DMA RX socket buffers */ in __free_dma_rx_desc_resources()
1927 if (rx_q->xsk_pool) in __free_dma_rx_desc_resources()
1928 dma_free_rx_xskbufs(priv, dma_conf, queue); in __free_dma_rx_desc_resources()
1930 dma_free_rx_skbufs(priv, dma_conf, queue); in __free_dma_rx_desc_resources()
1932 rx_q->buf_alloc_num = 0; in __free_dma_rx_desc_resources()
1933 rx_q->xsk_pool = NULL; in __free_dma_rx_desc_resources()
1936 if (!priv->extend_desc) in __free_dma_rx_desc_resources()
1937 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1939 rx_q->dma_rx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
1941 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
1943 rx_q->dma_erx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
1945 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) in __free_dma_rx_desc_resources()
1946 xdp_rxq_info_unreg(&rx_q->xdp_rxq); in __free_dma_rx_desc_resources()
1948 kfree(rx_q->buf_pool); in __free_dma_rx_desc_resources()
1949 if (rx_q->page_pool) in __free_dma_rx_desc_resources()
1950 page_pool_destroy(rx_q->page_pool); in __free_dma_rx_desc_resources()
1956 u32 rx_count = priv->plat->rx_queues_to_use; in free_dma_rx_desc_resources()
1957 u32 queue; in free_dma_rx_desc_resources() local
1959 /* Free RX queue resources */ in free_dma_rx_desc_resources()
1960 for (queue = 0; queue < rx_count; queue++) in free_dma_rx_desc_resources()
1961 __free_dma_rx_desc_resources(priv, dma_conf, queue); in free_dma_rx_desc_resources()
1965 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
1968 * @queue: TX queue index
1972 u32 queue) in __free_dma_tx_desc_resources() argument
1974 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __free_dma_tx_desc_resources()
1979 dma_free_tx_skbufs(priv, dma_conf, queue); in __free_dma_tx_desc_resources()
1981 if (priv->extend_desc) { in __free_dma_tx_desc_resources()
1983 addr = tx_q->dma_etx; in __free_dma_tx_desc_resources()
1984 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in __free_dma_tx_desc_resources()
1986 addr = tx_q->dma_entx; in __free_dma_tx_desc_resources()
1989 addr = tx_q->dma_tx; in __free_dma_tx_desc_resources()
1992 size *= dma_conf->dma_tx_size; in __free_dma_tx_desc_resources()
1994 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); in __free_dma_tx_desc_resources()
1996 kfree(tx_q->tx_skbuff_dma); in __free_dma_tx_desc_resources()
1997 kfree(tx_q->tx_skbuff); in __free_dma_tx_desc_resources()
2003 u32 tx_count = priv->plat->tx_queues_to_use; in free_dma_tx_desc_resources()
2004 u32 queue; in free_dma_tx_desc_resources() local
2006 /* Free TX queue resources */ in free_dma_tx_desc_resources()
2007 for (queue = 0; queue < tx_count; queue++) in free_dma_tx_desc_resources()
2008 __free_dma_tx_desc_resources(priv, dma_conf, queue); in free_dma_tx_desc_resources()
2012 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2015 * @queue: RX queue index
2017 * this function allocates the resources for TX and RX paths. In case of
2018 * reception, for example, it pre-allocated the RX socket buffer in order to
2019 * allow zero-copy mechanism.
2023 u32 queue) in __alloc_dma_rx_desc_resources() argument
2025 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __alloc_dma_rx_desc_resources()
2026 struct stmmac_channel *ch = &priv->channel[queue]; in __alloc_dma_rx_desc_resources()
2033 rx_q->queue_index = queue; in __alloc_dma_rx_desc_resources()
2034 rx_q->priv_data = priv; in __alloc_dma_rx_desc_resources()
2037 pp_params.pool_size = dma_conf->dma_rx_size; in __alloc_dma_rx_desc_resources()
2038 num_pages = DIV_ROUND_UP(dma_conf->dma_buf_sz, PAGE_SIZE); in __alloc_dma_rx_desc_resources()
2040 pp_params.nid = dev_to_node(priv->device); in __alloc_dma_rx_desc_resources()
2041 pp_params.dev = priv->device; in __alloc_dma_rx_desc_resources()
2046 rx_q->page_pool = page_pool_create(&pp_params); in __alloc_dma_rx_desc_resources()
2047 if (IS_ERR(rx_q->page_pool)) { in __alloc_dma_rx_desc_resources()
2048 ret = PTR_ERR(rx_q->page_pool); in __alloc_dma_rx_desc_resources()
2049 rx_q->page_pool = NULL; in __alloc_dma_rx_desc_resources()
2053 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, in __alloc_dma_rx_desc_resources()
2054 sizeof(*rx_q->buf_pool), in __alloc_dma_rx_desc_resources()
2056 if (!rx_q->buf_pool) in __alloc_dma_rx_desc_resources()
2057 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2059 if (priv->extend_desc) { in __alloc_dma_rx_desc_resources()
2060 rx_q->dma_erx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2061 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2063 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2065 if (!rx_q->dma_erx) in __alloc_dma_rx_desc_resources()
2066 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2069 rx_q->dma_rx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2070 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2072 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2074 if (!rx_q->dma_rx) in __alloc_dma_rx_desc_resources()
2075 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2079 test_bit(queue, priv->af_xdp_zc_qps)) in __alloc_dma_rx_desc_resources()
2080 napi_id = ch->rxtx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2082 napi_id = ch->rx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2084 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, in __alloc_dma_rx_desc_resources()
2085 rx_q->queue_index, in __alloc_dma_rx_desc_resources()
2088 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); in __alloc_dma_rx_desc_resources()
2089 return -EINVAL; in __alloc_dma_rx_desc_resources()
2098 u32 rx_count = priv->plat->rx_queues_to_use; in alloc_dma_rx_desc_resources()
2099 u32 queue; in alloc_dma_rx_desc_resources() local
2102 /* RX queues buffers and DMA */ in alloc_dma_rx_desc_resources()
2103 for (queue = 0; queue < rx_count; queue++) { in alloc_dma_rx_desc_resources()
2104 ret = __alloc_dma_rx_desc_resources(priv, dma_conf, queue); in alloc_dma_rx_desc_resources()
2118 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2121 * @queue: TX queue index
2123 * this function allocates the resources for TX and RX paths. In case of
2124 * reception, for example, it pre-allocated the RX socket buffer in order to
2125 * allow zero-copy mechanism.
2129 u32 queue) in __alloc_dma_tx_desc_resources() argument
2131 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __alloc_dma_tx_desc_resources()
2135 tx_q->queue_index = queue; in __alloc_dma_tx_desc_resources()
2136 tx_q->priv_data = priv; in __alloc_dma_tx_desc_resources()
2138 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2139 sizeof(*tx_q->tx_skbuff_dma), in __alloc_dma_tx_desc_resources()
2141 if (!tx_q->tx_skbuff_dma) in __alloc_dma_tx_desc_resources()
2142 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2144 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2147 if (!tx_q->tx_skbuff) in __alloc_dma_tx_desc_resources()
2148 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2150 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2152 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2157 size *= dma_conf->dma_tx_size; in __alloc_dma_tx_desc_resources()
2159 addr = dma_alloc_coherent(priv->device, size, in __alloc_dma_tx_desc_resources()
2160 &tx_q->dma_tx_phy, GFP_KERNEL); in __alloc_dma_tx_desc_resources()
2162 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2164 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2165 tx_q->dma_etx = addr; in __alloc_dma_tx_desc_resources()
2166 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2167 tx_q->dma_entx = addr; in __alloc_dma_tx_desc_resources()
2169 tx_q->dma_tx = addr; in __alloc_dma_tx_desc_resources()
2177 u32 tx_count = priv->plat->tx_queues_to_use; in alloc_dma_tx_desc_resources()
2178 u32 queue; in alloc_dma_tx_desc_resources() local
2182 for (queue = 0; queue < tx_count; queue++) { in alloc_dma_tx_desc_resources()
2183 ret = __alloc_dma_tx_desc_resources(priv, dma_conf, queue); in alloc_dma_tx_desc_resources()
2196 * alloc_dma_desc_resources - alloc TX/RX resources.
2200 * this function allocates the resources for TX and RX paths. In case of
2201 * reception, for example, it pre-allocated the RX socket buffer in order to
2202 * allow zero-copy mechanism.
2207 /* RX Allocation */ in alloc_dma_desc_resources()
2219 * free_dma_desc_resources - free dma desc resources
2229 /* Release the DMA RX socket buffers later in free_dma_desc_resources()
2236 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2238 * Description: It is used for enabling the rx queues in the MAC
2242 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_enable_rx_queues()
2243 int queue; in stmmac_mac_enable_rx_queues() local
2246 for (queue = 0; queue < rx_queues_count; queue++) { in stmmac_mac_enable_rx_queues()
2247 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; in stmmac_mac_enable_rx_queues()
2248 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); in stmmac_mac_enable_rx_queues()
2253 * stmmac_start_rx_dma - start RX DMA channel
2255 * @chan: RX channel index
2257 * This starts a RX DMA channel
2261 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); in stmmac_start_rx_dma()
2262 stmmac_start_rx(priv, priv->ioaddr, chan); in stmmac_start_rx_dma()
2266 * stmmac_start_tx_dma - start TX DMA channel
2274 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); in stmmac_start_tx_dma()
2275 stmmac_start_tx(priv, priv->ioaddr, chan); in stmmac_start_tx_dma()
2279 * stmmac_stop_rx_dma - stop RX DMA channel
2281 * @chan: RX channel index
2283 * This stops a RX DMA channel
2287 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); in stmmac_stop_rx_dma()
2288 stmmac_stop_rx(priv, priv->ioaddr, chan); in stmmac_stop_rx_dma()
2292 * stmmac_stop_tx_dma - stop TX DMA channel
2300 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); in stmmac_stop_tx_dma()
2301 stmmac_stop_tx(priv, priv->ioaddr, chan); in stmmac_stop_tx_dma()
2306 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_enable_all_dma_irq()
2307 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_enable_all_dma_irq()
2312 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_enable_all_dma_irq()
2315 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2316 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_enable_all_dma_irq()
2317 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2322 * stmmac_start_all_dma - start all RX and TX DMA channels
2325 * This starts all the RX and TX DMA channels
2329 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_start_all_dma()
2330 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_start_all_dma()
2341 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2344 * This stops the RX and TX DMA channels
2348 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_stop_all_dma()
2349 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_stop_all_dma()
2360 * stmmac_dma_operation_mode - HW DMA operation mode
2363 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2367 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_dma_operation_mode()
2368 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_dma_operation_mode()
2369 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_dma_operation_mode()
2370 int txfifosz = priv->plat->tx_fifo_size; in stmmac_dma_operation_mode()
2377 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_dma_operation_mode()
2379 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_dma_operation_mode()
2381 /* Adjust for real per queue fifo size */ in stmmac_dma_operation_mode()
2385 if (priv->plat->force_thresh_dma_mode) { in stmmac_dma_operation_mode()
2388 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { in stmmac_dma_operation_mode()
2398 priv->xstats.threshold = SF_DMA_MODE; in stmmac_dma_operation_mode()
2406 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_dma_operation_mode()
2409 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2411 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, in stmmac_dma_operation_mode()
2414 if (rx_q->xsk_pool) { in stmmac_dma_operation_mode()
2415 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_dma_operation_mode()
2416 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2420 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2421 priv->dma_conf.dma_buf_sz, in stmmac_dma_operation_mode()
2427 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2429 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, in stmmac_dma_operation_mode()
2438 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc); in stmmac_xsk_request_timestamp()
2439 *meta_req->set_ic = true; in stmmac_xsk_request_timestamp()
2445 struct stmmac_priv *priv = tx_compl->priv; in stmmac_xsk_fill_timestamp()
2446 struct dma_desc *desc = tx_compl->desc; in stmmac_xsk_fill_timestamp()
2450 if (!priv->hwts_tx_en) in stmmac_xsk_fill_timestamp()
2455 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_xsk_fill_timestamp()
2457 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_xsk_fill_timestamp()
2462 ns -= priv->plat->cdc_error_adj; in stmmac_xsk_fill_timestamp()
2474 static bool stmmac_xdp_xmit_zc(struct stmmac_priv *priv, u32 queue, u32 budget) in stmmac_xdp_xmit_zc() argument
2476 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc()
2477 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_zc()
2478 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_zc()
2479 struct xsk_buff_pool *pool = tx_q->xsk_pool; in stmmac_xdp_xmit_zc()
2480 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2486 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_zc()
2489 budget = min(budget, stmmac_tx_avail(priv, queue)); in stmmac_xdp_xmit_zc()
2491 while (budget-- > 0) { in stmmac_xdp_xmit_zc()
2500 if (unlikely(stmmac_tx_avail(priv, queue) < STMMAC_TX_XSK_AVAIL) || in stmmac_xdp_xmit_zc()
2501 !netif_carrier_ok(priv->dev)) { in stmmac_xdp_xmit_zc()
2509 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_zc()
2510 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_zc()
2511 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_zc()
2512 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_zc()
2514 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_zc()
2520 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; in stmmac_xdp_xmit_zc()
2526 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_xdp_xmit_zc()
2527 tx_q->xdpf[entry] = NULL; in stmmac_xdp_xmit_zc()
2529 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_zc()
2530 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; in stmmac_xdp_xmit_zc()
2531 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_zc()
2532 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_zc()
2536 tx_q->tx_count_frames++; in stmmac_xdp_xmit_zc()
2538 if (!priv->tx_coal_frames[queue]) in stmmac_xdp_xmit_zc()
2540 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_zc()
2551 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_zc()
2557 true, priv->mode, true, true, in stmmac_xdp_xmit_zc()
2560 stmmac_enable_dma_transmission(priv, priv->ioaddr); in stmmac_xdp_xmit_zc()
2563 &tx_q->tx_skbuff_dma[entry].xsk_meta); in stmmac_xdp_xmit_zc()
2565 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_zc()
2566 entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2568 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2569 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); in stmmac_xdp_xmit_zc()
2570 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2573 stmmac_flush_tx_descriptors(priv, queue); in stmmac_xdp_xmit_zc()
2587 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { in stmmac_bump_dma_threshold()
2590 if (priv->plat->force_thresh_dma_mode) in stmmac_bump_dma_threshold()
2596 priv->xstats.threshold = tc; in stmmac_bump_dma_threshold()
2601 * stmmac_tx_clean - to manage the transmission completion
2604 * @queue: TX queue index
2610 static int stmmac_tx_clean(struct stmmac_priv *priv, int budget, u32 queue, in stmmac_tx_clean() argument
2613 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_clean()
2614 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tx_clean()
2619 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2621 tx_q->xsk_frames_done = 0; in stmmac_tx_clean()
2623 entry = tx_q->dirty_tx; in stmmac_tx_clean()
2626 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { in stmmac_tx_clean()
2632 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_tx_clean()
2633 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2634 xdpf = tx_q->xdpf[entry]; in stmmac_tx_clean()
2636 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2638 skb = tx_q->tx_skbuff[entry]; in stmmac_tx_clean()
2644 if (priv->extend_desc) in stmmac_tx_clean()
2645 p = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_tx_clean()
2646 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tx_clean()
2647 p = &tx_q->dma_entx[entry].basic; in stmmac_tx_clean()
2649 p = tx_q->dma_tx + entry; in stmmac_tx_clean()
2651 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); in stmmac_tx_clean()
2669 stmmac_bump_dma_threshold(priv, queue); in stmmac_tx_clean()
2675 } else if (tx_q->xsk_pool && in stmmac_tx_clean()
2676 xp_tx_metadata_enabled(tx_q->xsk_pool)) { in stmmac_tx_clean()
2682 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta, in stmmac_tx_clean()
2688 if (likely(tx_q->tx_skbuff_dma[entry].buf && in stmmac_tx_clean()
2689 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { in stmmac_tx_clean()
2690 if (tx_q->tx_skbuff_dma[entry].map_as_page) in stmmac_tx_clean()
2691 dma_unmap_page(priv->device, in stmmac_tx_clean()
2692 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2693 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2696 dma_unmap_single(priv->device, in stmmac_tx_clean()
2697 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2698 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2700 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_tx_clean()
2701 tx_q->tx_skbuff_dma[entry].len = 0; in stmmac_tx_clean()
2702 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_tx_clean()
2707 tx_q->tx_skbuff_dma[entry].last_segment = false; in stmmac_tx_clean()
2708 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_tx_clean()
2711 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { in stmmac_tx_clean()
2713 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2717 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2719 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2722 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_tx_clean()
2723 tx_q->xsk_frames_done++; in stmmac_tx_clean()
2725 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2728 bytes_compl += skb->len; in stmmac_tx_clean()
2730 tx_q->tx_skbuff[entry] = NULL; in stmmac_tx_clean()
2734 stmmac_release_tx_desc(priv, p, priv->mode); in stmmac_tx_clean()
2736 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_tx_clean()
2738 tx_q->dirty_tx = entry; in stmmac_tx_clean()
2740 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), in stmmac_tx_clean()
2743 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, in stmmac_tx_clean()
2744 queue))) && in stmmac_tx_clean()
2745 stmmac_tx_avail(priv, queue) > STMMAC_TX_THRESH(priv)) { in stmmac_tx_clean()
2747 netif_dbg(priv, tx_done, priv->dev, in stmmac_tx_clean()
2749 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2752 if (tx_q->xsk_pool) { in stmmac_tx_clean()
2755 if (tx_q->xsk_frames_done) in stmmac_tx_clean()
2756 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in stmmac_tx_clean()
2758 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) in stmmac_tx_clean()
2759 xsk_set_tx_need_wakeup(tx_q->xsk_pool); in stmmac_tx_clean()
2763 * available), return "budget - 1" to reenable TX IRQ. in stmmac_tx_clean()
2766 work_done = stmmac_xdp_xmit_zc(priv, queue, in stmmac_tx_clean()
2769 xmits = budget - 1; in stmmac_tx_clean()
2774 if (priv->eee_enabled && !priv->tx_path_in_lpi_mode && in stmmac_tx_clean()
2775 priv->eee_sw_timer_en) { in stmmac_tx_clean()
2777 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_tx_clean()
2781 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_tx_clean()
2784 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_tx_clean()
2785 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); in stmmac_tx_clean()
2786 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); in stmmac_tx_clean()
2787 u64_stats_inc(&txq_stats->napi.tx_clean); in stmmac_tx_clean()
2788 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_tx_clean()
2790 priv->xstats.tx_errors += tx_errors; in stmmac_tx_clean()
2792 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2799 * stmmac_tx_err - to manage the tx error
2807 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_tx_err()
2809 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2812 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2813 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2815 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_tx_err()
2816 tx_q->dma_tx_phy, chan); in stmmac_tx_err()
2819 priv->xstats.tx_errors++; in stmmac_tx_err()
2820 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2824 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2827 * @rxmode: RX operating mode
2830 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2836 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2837 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2838 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_dma_operation_mode()
2839 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_dma_operation_mode()
2840 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_set_dma_operation_mode()
2841 int txfifosz = priv->plat->tx_fifo_size; in stmmac_set_dma_operation_mode()
2844 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_set_dma_operation_mode()
2846 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_set_dma_operation_mode()
2848 /* Adjust for real per queue fifo size */ in stmmac_set_dma_operation_mode()
2852 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); in stmmac_set_dma_operation_mode()
2853 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); in stmmac_set_dma_operation_mode()
2860 ret = stmmac_safety_feat_irq_status(priv, priv->dev, in stmmac_safety_feat_interrupt()
2861 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); in stmmac_safety_feat_interrupt()
2862 if (ret && (ret != -EINVAL)) { in stmmac_safety_feat_interrupt()
2872 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, in stmmac_napi_check()
2873 &priv->xstats, chan, dir); in stmmac_napi_check()
2874 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_napi_check()
2875 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_napi_check()
2876 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_napi_check()
2881 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; in stmmac_napi_check()
2882 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_napi_check()
2884 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { in stmmac_napi_check()
2886 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2887 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_check()
2888 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2893 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { in stmmac_napi_check()
2895 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2896 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_check()
2897 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2906 * stmmac_dma_interrupt - DMA ISR
2914 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_dma_interrupt()
2915 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_dma_interrupt()
2949 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); in stmmac_mmc_setup()
2951 if (priv->dma_cap.rmon) { in stmmac_mmc_setup()
2952 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); in stmmac_mmc_setup()
2953 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); in stmmac_mmc_setup()
2955 netdev_info(priv->dev, "No MAC Management Counters available\n"); in stmmac_mmc_setup()
2959 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2969 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; in stmmac_get_hw_features()
2973 * stmmac_check_ether_addr - check if the MAC addr is valid
2983 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in stmmac_check_ether_addr()
2984 stmmac_get_umac_addr(priv, priv->hw, addr, 0); in stmmac_check_ether_addr()
2986 eth_hw_addr_set(priv->dev, addr); in stmmac_check_ether_addr()
2988 eth_hw_addr_random(priv->dev); in stmmac_check_ether_addr()
2989 dev_info(priv->device, "device MAC address %pM\n", in stmmac_check_ether_addr()
2990 priv->dev->dev_addr); in stmmac_check_ether_addr()
2995 * stmmac_init_dma_engine - DMA init.
3004 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_init_dma_engine()
3005 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_init_dma_engine()
3013 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { in stmmac_init_dma_engine()
3014 dev_err(priv->device, "Invalid DMA configuration\n"); in stmmac_init_dma_engine()
3015 return -EINVAL; in stmmac_init_dma_engine()
3018 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) in stmmac_init_dma_engine()
3021 ret = stmmac_reset(priv, priv->ioaddr); in stmmac_init_dma_engine()
3023 dev_err(priv->device, "Failed to reset the dma\n"); in stmmac_init_dma_engine()
3028 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); in stmmac_init_dma_engine()
3030 if (priv->plat->axi) in stmmac_init_dma_engine()
3031 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); in stmmac_init_dma_engine()
3035 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_init_dma_engine()
3036 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_init_dma_engine()
3039 /* DMA RX Channel Configuration */ in stmmac_init_dma_engine()
3041 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_init_dma_engine()
3043 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3044 rx_q->dma_rx_phy, chan); in stmmac_init_dma_engine()
3046 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_init_dma_engine()
3047 (rx_q->buf_alloc_num * in stmmac_init_dma_engine()
3049 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3050 rx_q->rx_tail_addr, chan); in stmmac_init_dma_engine()
3055 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_dma_engine()
3057 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3058 tx_q->dma_tx_phy, chan); in stmmac_init_dma_engine()
3060 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_init_dma_engine()
3061 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3062 tx_q->tx_tail_addr, chan); in stmmac_init_dma_engine()
3068 static void stmmac_tx_timer_arm(struct stmmac_priv *priv, u32 queue) in stmmac_tx_timer_arm() argument
3070 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_timer_arm()
3071 u32 tx_coal_timer = priv->tx_coal_timer[queue]; in stmmac_tx_timer_arm()
3078 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer_arm()
3079 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer_arm()
3086 hrtimer_start(&tx_q->txtimer, in stmmac_tx_timer_arm()
3090 hrtimer_try_to_cancel(&tx_q->txtimer); in stmmac_tx_timer_arm()
3094 * stmmac_tx_timer - mitigation sw timer for tx.
3102 struct stmmac_priv *priv = tx_q->priv_data; in stmmac_tx_timer()
3106 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer()
3107 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer()
3112 spin_lock_irqsave(&ch->lock, flags); in stmmac_tx_timer()
3113 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); in stmmac_tx_timer()
3114 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_tx_timer()
3122 * stmmac_init_coalesce - init mitigation options.
3131 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_init_coalesce()
3132 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_init_coalesce()
3136 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_coalesce()
3138 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; in stmmac_init_coalesce()
3139 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; in stmmac_init_coalesce()
3141 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_init_coalesce()
3142 tx_q->txtimer.function = stmmac_tx_timer; in stmmac_init_coalesce()
3146 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; in stmmac_init_coalesce()
3151 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_rings_length()
3152 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_rings_length()
3157 stmmac_set_tx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3158 (priv->dma_conf.dma_tx_size - 1), chan); in stmmac_set_rings_length()
3160 /* set RX ring length */ in stmmac_set_rings_length()
3162 stmmac_set_rx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3163 (priv->dma_conf.dma_rx_size - 1), chan); in stmmac_set_rings_length()
3167 * stmmac_set_tx_queue_weight - Set TX queue weight
3173 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_set_tx_queue_weight()
3175 u32 queue; in stmmac_set_tx_queue_weight() local
3177 for (queue = 0; queue < tx_queues_count; queue++) { in stmmac_set_tx_queue_weight()
3178 weight = priv->plat->tx_queues_cfg[queue].weight; in stmmac_set_tx_queue_weight()
3179 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); in stmmac_set_tx_queue_weight()
3184 * stmmac_configure_cbs - Configure CBS in TX queue
3190 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_configure_cbs()
3192 u32 queue; in stmmac_configure_cbs() local
3194 /* queue 0 is reserved for legacy traffic */ in stmmac_configure_cbs()
3195 for (queue = 1; queue < tx_queues_count; queue++) { in stmmac_configure_cbs()
3196 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; in stmmac_configure_cbs()
3200 stmmac_config_cbs(priv, priv->hw, in stmmac_configure_cbs()
3201 priv->plat->tx_queues_cfg[queue].send_slope, in stmmac_configure_cbs()
3202 priv->plat->tx_queues_cfg[queue].idle_slope, in stmmac_configure_cbs()
3203 priv->plat->tx_queues_cfg[queue].high_credit, in stmmac_configure_cbs()
3204 priv->plat->tx_queues_cfg[queue].low_credit, in stmmac_configure_cbs()
3205 queue); in stmmac_configure_cbs()
3210 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3212 * Description: It is used for mapping RX queues to RX dma channels
3216 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_rx_queue_dma_chan_map()
3217 u32 queue; in stmmac_rx_queue_dma_chan_map() local
3220 for (queue = 0; queue < rx_queues_count; queue++) { in stmmac_rx_queue_dma_chan_map()
3221 chan = priv->plat->rx_queues_cfg[queue].chan; in stmmac_rx_queue_dma_chan_map()
3222 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); in stmmac_rx_queue_dma_chan_map()
3227 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3229 * Description: It is used for configuring the RX Queue Priority
3233 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_prio()
3234 u32 queue; in stmmac_mac_config_rx_queues_prio() local
3237 for (queue = 0; queue < rx_queues_count; queue++) { in stmmac_mac_config_rx_queues_prio()
3238 if (!priv->plat->rx_queues_cfg[queue].use_prio) in stmmac_mac_config_rx_queues_prio()
3241 prio = priv->plat->rx_queues_cfg[queue].prio; in stmmac_mac_config_rx_queues_prio()
3242 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_rx_queues_prio()
3247 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3249 * Description: It is used for configuring the TX Queue Priority
3253 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mac_config_tx_queues_prio()
3254 u32 queue; in stmmac_mac_config_tx_queues_prio() local
3257 for (queue = 0; queue < tx_queues_count; queue++) { in stmmac_mac_config_tx_queues_prio()
3258 if (!priv->plat->tx_queues_cfg[queue].use_prio) in stmmac_mac_config_tx_queues_prio()
3261 prio = priv->plat->tx_queues_cfg[queue].prio; in stmmac_mac_config_tx_queues_prio()
3262 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_tx_queues_prio()
3267 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3269 * Description: It is used for configuring the RX queue routing
3273 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_routing()
3274 u32 queue; in stmmac_mac_config_rx_queues_routing() local
3277 for (queue = 0; queue < rx_queues_count; queue++) { in stmmac_mac_config_rx_queues_routing()
3278 /* no specific packet type routing specified for the queue */ in stmmac_mac_config_rx_queues_routing()
3279 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) in stmmac_mac_config_rx_queues_routing()
3282 packet = priv->plat->rx_queues_cfg[queue].pkt_route; in stmmac_mac_config_rx_queues_routing()
3283 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); in stmmac_mac_config_rx_queues_routing()
3289 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { in stmmac_mac_config_rss()
3290 priv->rss.enable = false; in stmmac_mac_config_rss()
3294 if (priv->dev->features & NETIF_F_RXHASH) in stmmac_mac_config_rss()
3295 priv->rss.enable = true; in stmmac_mac_config_rss()
3297 priv->rss.enable = false; in stmmac_mac_config_rss()
3299 stmmac_rss_configure(priv, priv->hw, &priv->rss, in stmmac_mac_config_rss()
3300 priv->plat->rx_queues_to_use); in stmmac_mac_config_rss()
3304 * stmmac_mtl_configuration - Configure MTL
3310 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mtl_configuration()
3311 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mtl_configuration()
3316 /* Configure MTL RX algorithms */ in stmmac_mtl_configuration()
3318 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3319 priv->plat->rx_sched_algorithm); in stmmac_mtl_configuration()
3323 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3324 priv->plat->tx_sched_algorithm); in stmmac_mtl_configuration()
3330 /* Map RX MTL to DMA channels */ in stmmac_mtl_configuration()
3333 /* Enable MAC RX Queues */ in stmmac_mtl_configuration()
3336 /* Set RX priorities */ in stmmac_mtl_configuration()
3344 /* Set RX routing */ in stmmac_mtl_configuration()
3355 if (priv->dma_cap.asp) { in stmmac_safety_feat_configuration()
3356 netdev_info(priv->dev, "Enabling Safety Features\n"); in stmmac_safety_feat_configuration()
3357 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, in stmmac_safety_feat_configuration()
3358 priv->plat->safety_feat_cfg); in stmmac_safety_feat_configuration()
3360 netdev_info(priv->dev, "No Safety Features support found\n"); in stmmac_safety_feat_configuration()
3368 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); in stmmac_fpe_start_wq()
3369 clear_bit(__FPE_REMOVING, &priv->fpe_task_state); in stmmac_fpe_start_wq()
3371 name = priv->wq_name; in stmmac_fpe_start_wq()
3372 sprintf(name, "%s-fpe", priv->dev->name); in stmmac_fpe_start_wq()
3374 priv->fpe_wq = create_singlethread_workqueue(name); in stmmac_fpe_start_wq()
3375 if (!priv->fpe_wq) { in stmmac_fpe_start_wq()
3376 netdev_err(priv->dev, "%s: Failed to create workqueue\n", name); in stmmac_fpe_start_wq()
3378 return -ENOMEM; in stmmac_fpe_start_wq()
3380 netdev_info(priv->dev, "FPE workqueue start"); in stmmac_fpe_start_wq()
3386 * stmmac_hw_setup - setup mac in a usable state.
3395 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3401 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_hw_setup()
3402 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_hw_setup()
3410 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", in stmmac_hw_setup()
3416 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); in stmmac_hw_setup()
3419 if (priv->hw->pcs) { in stmmac_hw_setup()
3420 int speed = priv->plat->mac_port_sel_speed; in stmmac_hw_setup()
3424 priv->hw->ps = speed; in stmmac_hw_setup()
3426 dev_warn(priv->device, "invalid port speed\n"); in stmmac_hw_setup()
3427 priv->hw->ps = 0; in stmmac_hw_setup()
3432 stmmac_core_init(priv, priv->hw, dev); in stmmac_hw_setup()
3440 ret = stmmac_rx_ipc(priv, priv->hw); in stmmac_hw_setup()
3442 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); in stmmac_hw_setup()
3443 priv->plat->rx_coe = STMMAC_RX_COE_NONE; in stmmac_hw_setup()
3444 priv->hw->rx_csum = 0; in stmmac_hw_setup()
3447 /* Enable the MAC Rx/Tx */ in stmmac_hw_setup()
3448 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_hw_setup()
3456 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); in stmmac_hw_setup()
3458 netdev_warn(priv->dev, in stmmac_hw_setup()
3464 if (ret == -EOPNOTSUPP) in stmmac_hw_setup()
3465 netdev_info(priv->dev, "PTP not supported by HW\n"); in stmmac_hw_setup()
3467 netdev_warn(priv->dev, "PTP init failed\n"); in stmmac_hw_setup()
3471 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; in stmmac_hw_setup()
3474 if (!priv->tx_lpi_timer) in stmmac_hw_setup()
3475 priv->tx_lpi_timer = eee_timer * 1000; in stmmac_hw_setup()
3477 if (priv->use_riwt) { in stmmac_hw_setup()
3478 u32 queue; in stmmac_hw_setup() local
3480 for (queue = 0; queue < rx_cnt; queue++) { in stmmac_hw_setup()
3481 if (!priv->rx_riwt[queue]) in stmmac_hw_setup()
3482 priv->rx_riwt[queue] = DEF_DMA_RIWT; in stmmac_hw_setup()
3484 stmmac_rx_watchdog(priv, priv->ioaddr, in stmmac_hw_setup()
3485 priv->rx_riwt[queue], queue); in stmmac_hw_setup()
3489 if (priv->hw->pcs) in stmmac_hw_setup()
3490 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); in stmmac_hw_setup()
3492 /* set TX and RX rings length */ in stmmac_hw_setup()
3496 if (priv->tso) { in stmmac_hw_setup()
3498 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3500 /* TSO and TBS cannot co-exist */ in stmmac_hw_setup()
3501 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_hw_setup()
3504 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); in stmmac_hw_setup()
3509 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_hw_setup()
3511 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_hw_setup()
3515 if (priv->dma_cap.vlins) in stmmac_hw_setup()
3516 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); in stmmac_hw_setup()
3520 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3521 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; in stmmac_hw_setup()
3523 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); in stmmac_hw_setup()
3526 /* Configure real RX and TX queues */ in stmmac_hw_setup()
3527 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); in stmmac_hw_setup()
3528 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); in stmmac_hw_setup()
3533 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_hw_setup()
3535 if (priv->dma_cap.fpesel) { in stmmac_hw_setup()
3538 if (priv->plat->fpe_cfg->enable) in stmmac_hw_setup()
3549 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_hw_teardown()
3560 irq_idx = priv->plat->tx_queues_to_use; in stmmac_free_irq()
3563 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3564 if (priv->tx_irq[j] > 0) { in stmmac_free_irq()
3565 irq_set_affinity_hint(priv->tx_irq[j], NULL); in stmmac_free_irq()
3566 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); in stmmac_free_irq()
3569 irq_idx = priv->plat->rx_queues_to_use; in stmmac_free_irq()
3572 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3573 if (priv->rx_irq[j] > 0) { in stmmac_free_irq()
3574 irq_set_affinity_hint(priv->rx_irq[j], NULL); in stmmac_free_irq()
3575 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); in stmmac_free_irq()
3579 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) in stmmac_free_irq()
3580 free_irq(priv->sfty_ue_irq, dev); in stmmac_free_irq()
3583 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) in stmmac_free_irq()
3584 free_irq(priv->sfty_ce_irq, dev); in stmmac_free_irq()
3587 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) in stmmac_free_irq()
3588 free_irq(priv->lpi_irq, dev); in stmmac_free_irq()
3591 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) in stmmac_free_irq()
3592 free_irq(priv->wol_irq, dev); in stmmac_free_irq()
3595 free_irq(dev->irq, dev); in stmmac_free_irq()
3615 int_name = priv->int_name_mac; in stmmac_request_irq_multi_msi()
3616 sprintf(int_name, "%s:%s", dev->name, "mac"); in stmmac_request_irq_multi_msi()
3617 ret = request_irq(dev->irq, stmmac_mac_interrupt, in stmmac_request_irq_multi_msi()
3620 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3622 __func__, dev->irq, ret); in stmmac_request_irq_multi_msi()
3630 priv->wol_irq_disabled = true; in stmmac_request_irq_multi_msi()
3631 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3632 int_name = priv->int_name_wol; in stmmac_request_irq_multi_msi()
3633 sprintf(int_name, "%s:%s", dev->name, "wol"); in stmmac_request_irq_multi_msi()
3634 ret = request_irq(priv->wol_irq, in stmmac_request_irq_multi_msi()
3638 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3640 __func__, priv->wol_irq, ret); in stmmac_request_irq_multi_msi()
3649 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3650 int_name = priv->int_name_lpi; in stmmac_request_irq_multi_msi()
3651 sprintf(int_name, "%s:%s", dev->name, "lpi"); in stmmac_request_irq_multi_msi()
3652 ret = request_irq(priv->lpi_irq, in stmmac_request_irq_multi_msi()
3656 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3658 __func__, priv->lpi_irq, ret); in stmmac_request_irq_multi_msi()
3667 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3668 int_name = priv->int_name_sfty_ce; in stmmac_request_irq_multi_msi()
3669 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); in stmmac_request_irq_multi_msi()
3670 ret = request_irq(priv->sfty_ce_irq, in stmmac_request_irq_multi_msi()
3674 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3676 __func__, priv->sfty_ce_irq, ret); in stmmac_request_irq_multi_msi()
3685 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3686 int_name = priv->int_name_sfty_ue; in stmmac_request_irq_multi_msi()
3687 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); in stmmac_request_irq_multi_msi()
3688 ret = request_irq(priv->sfty_ue_irq, in stmmac_request_irq_multi_msi()
3692 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3694 __func__, priv->sfty_ue_irq, ret); in stmmac_request_irq_multi_msi()
3700 /* Request Rx MSI irq */ in stmmac_request_irq_multi_msi()
3701 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3704 if (priv->rx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3707 int_name = priv->int_name_rx_irq[i]; in stmmac_request_irq_multi_msi()
3708 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); in stmmac_request_irq_multi_msi()
3709 ret = request_irq(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3711 0, int_name, &priv->dma_conf.rx_queue[i]); in stmmac_request_irq_multi_msi()
3713 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3714 "%s: alloc rx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3715 __func__, i, priv->rx_irq[i], ret); in stmmac_request_irq_multi_msi()
3722 irq_set_affinity_hint(priv->rx_irq[i], &cpu_mask); in stmmac_request_irq_multi_msi()
3726 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3729 if (priv->tx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3732 int_name = priv->int_name_tx_irq[i]; in stmmac_request_irq_multi_msi()
3733 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); in stmmac_request_irq_multi_msi()
3734 ret = request_irq(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3736 0, int_name, &priv->dma_conf.tx_queue[i]); in stmmac_request_irq_multi_msi()
3738 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3739 "%s: alloc tx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3740 __func__, i, priv->tx_irq[i], ret); in stmmac_request_irq_multi_msi()
3747 irq_set_affinity_hint(priv->tx_irq[i], &cpu_mask); in stmmac_request_irq_multi_msi()
3763 ret = request_irq(dev->irq, stmmac_interrupt, in stmmac_request_irq_single()
3764 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3766 netdev_err(priv->dev, in stmmac_request_irq_single()
3768 __func__, dev->irq, ret); in stmmac_request_irq_single()
3776 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_single()
3777 ret = request_irq(priv->wol_irq, stmmac_interrupt, in stmmac_request_irq_single()
3778 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3780 netdev_err(priv->dev, in stmmac_request_irq_single()
3782 __func__, priv->wol_irq, ret); in stmmac_request_irq_single()
3789 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_single()
3790 ret = request_irq(priv->lpi_irq, stmmac_interrupt, in stmmac_request_irq_single()
3791 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3793 netdev_err(priv->dev, in stmmac_request_irq_single()
3795 __func__, priv->lpi_irq, ret); in stmmac_request_irq_single()
3814 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) in stmmac_request_irq()
3823 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3825 * @mtu: MTU to setup the dma queue and buf with
3827 * Allocate the Tx/Rx DMA queue and init them.
3839 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", in stmmac_setup_dma_desc()
3841 return ERR_PTR(-ENOMEM); in stmmac_setup_dma_desc()
3851 dma_conf->dma_buf_sz = bfsize; in stmmac_setup_dma_desc()
3852 /* Chose the tx/rx size from the already defined one in the in stmmac_setup_dma_desc()
3855 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; in stmmac_setup_dma_desc()
3856 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; in stmmac_setup_dma_desc()
3858 if (!dma_conf->dma_tx_size) in stmmac_setup_dma_desc()
3859 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; in stmmac_setup_dma_desc()
3860 if (!dma_conf->dma_rx_size) in stmmac_setup_dma_desc()
3861 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; in stmmac_setup_dma_desc()
3864 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { in stmmac_setup_dma_desc()
3865 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; in stmmac_setup_dma_desc()
3866 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; in stmmac_setup_dma_desc()
3868 /* Setup per-TXQ tbs flag before TX descriptor alloc */ in stmmac_setup_dma_desc()
3869 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; in stmmac_setup_dma_desc()
3874 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", in stmmac_setup_dma_desc()
3879 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); in stmmac_setup_dma_desc()
3881 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", in stmmac_setup_dma_desc()
3896 * __stmmac_open - open entry point of the driver
3902 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3909 int mode = priv->plat->phy_interface; in __stmmac_open()
3913 ret = pm_runtime_resume_and_get(priv->device); in __stmmac_open()
3917 if (priv->hw->pcs != STMMAC_PCS_TBI && in __stmmac_open()
3918 priv->hw->pcs != STMMAC_PCS_RTBI && in __stmmac_open()
3919 (!priv->hw->xpcs || in __stmmac_open()
3920 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73) && in __stmmac_open()
3921 !priv->hw->lynx_pcs) { in __stmmac_open()
3924 netdev_err(priv->dev, in __stmmac_open()
3931 priv->rx_copybreak = STMMAC_RX_COPYBREAK; in __stmmac_open()
3933 buf_sz = dma_conf->dma_buf_sz; in __stmmac_open()
3935 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) in __stmmac_open()
3936 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; in __stmmac_open()
3937 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); in __stmmac_open()
3941 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in __stmmac_open()
3942 priv->plat->serdes_powerup) { in __stmmac_open()
3943 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); in __stmmac_open()
3945 netdev_err(priv->dev, "%s: Serdes powerup failed\n", in __stmmac_open()
3953 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); in __stmmac_open()
3959 phylink_start(priv->phylink); in __stmmac_open()
3961 phylink_speed_up(priv->phylink); in __stmmac_open()
3968 netif_tx_start_all_queues(priv->dev); in __stmmac_open()
3974 phylink_stop(priv->phylink); in __stmmac_open()
3976 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in __stmmac_open()
3977 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in __stmmac_open()
3981 phylink_disconnect_phy(priv->phylink); in __stmmac_open()
3983 pm_runtime_put(priv->device); in __stmmac_open()
3993 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); in stmmac_open()
4007 set_bit(__FPE_REMOVING, &priv->fpe_task_state); in stmmac_fpe_stop_wq()
4009 if (priv->fpe_wq) { in stmmac_fpe_stop_wq()
4010 destroy_workqueue(priv->fpe_wq); in stmmac_fpe_stop_wq()
4011 priv->fpe_wq = NULL; in stmmac_fpe_stop_wq()
4014 netdev_info(priv->dev, "FPE workqueue stop"); in stmmac_fpe_stop_wq()
4018 * stmmac_release - close entry point of the driver
4028 if (device_may_wakeup(priv->device)) in stmmac_release()
4029 phylink_speed_down(priv->phylink, false); in stmmac_release()
4031 phylink_stop(priv->phylink); in stmmac_release()
4032 phylink_disconnect_phy(priv->phylink); in stmmac_release()
4036 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_release()
4037 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_release()
4044 if (priv->eee_enabled) { in stmmac_release()
4045 priv->tx_path_in_lpi_mode = false; in stmmac_release()
4046 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_release()
4049 /* Stop TX/RX DMA and clear the descriptors */ in stmmac_release()
4052 /* Release and free the Rx/Tx resources */ in stmmac_release()
4053 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_release()
4055 /* Disable the MAC Rx/Tx */ in stmmac_release()
4056 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_release()
4059 if (priv->plat->serdes_powerdown) in stmmac_release()
4060 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); in stmmac_release()
4066 pm_runtime_put(priv->device); in stmmac_release()
4068 if (priv->dma_cap.fpesel) in stmmac_release()
4081 if (!priv->dma_cap.vlins) in stmmac_vlan_insert()
4085 if (skb->vlan_proto == htons(ETH_P_8021AD)) { in stmmac_vlan_insert()
4092 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_vlan_insert()
4093 p = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_vlan_insert()
4095 p = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_vlan_insert()
4101 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_vlan_insert()
4106 * stmmac_tso_allocator - close entry point of the driver
4111 * @queue: TX queue index
4117 int total_len, bool last_segment, u32 queue) in stmmac_tso_allocator() argument
4119 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_allocator()
4129 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_allocator()
4130 priv->dma_conf.dma_tx_size); in stmmac_tso_allocator()
4131 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_allocator()
4133 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_allocator()
4134 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_allocator()
4136 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_allocator()
4138 curr_addr = des + (total_len - tmp_len); in stmmac_tso_allocator()
4139 if (priv->dma_cap.addr64 <= 32) in stmmac_tso_allocator()
4140 desc->des0 = cpu_to_le32(curr_addr); in stmmac_tso_allocator()
4152 tmp_len -= TSO_MAX_BUFF_SIZE; in stmmac_tso_allocator()
4156 static void stmmac_flush_tx_descriptors(struct stmmac_priv *priv, int queue) in stmmac_flush_tx_descriptors() argument
4158 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_flush_tx_descriptors()
4161 if (likely(priv->extend_desc)) in stmmac_flush_tx_descriptors()
4163 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_flush_tx_descriptors()
4174 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); in stmmac_flush_tx_descriptors()
4175 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); in stmmac_flush_tx_descriptors()
4179 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4187 * --------
4188 * | DES0 |---> buffer1 = L2/L3/L4 header
4189 * | DES1 |---> TCP Payload (can continue on next descr...)
4190 * | DES2 |---> buffer 1 and 2 len
4191 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4192 * --------
4196 * --------
4197 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
4198 * | DES1 | --|
4199 * | DES2 | --> buffer 1 and 2 len
4201 * --------
4209 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_tso_xmit()
4210 u32 queue = skb_get_queue_mapping(skb); in stmmac_tso_xmit() local
4221 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_xmit()
4222 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tso_xmit()
4223 first_tx = tx_q->cur_tx; in stmmac_tso_xmit()
4226 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in stmmac_tso_xmit()
4235 if (unlikely(stmmac_tx_avail(priv, queue) < in stmmac_tso_xmit()
4236 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { in stmmac_tso_xmit()
4237 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { in stmmac_tso_xmit()
4238 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_tso_xmit()
4239 queue)); in stmmac_tso_xmit()
4241 netdev_err(priv->dev, in stmmac_tso_xmit()
4242 "%s: Tx Ring full when queue awake\n", in stmmac_tso_xmit()
4248 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ in stmmac_tso_xmit()
4250 mss = skb_shinfo(skb)->gso_size; in stmmac_tso_xmit()
4253 if (mss != tx_q->mss) { in stmmac_tso_xmit()
4254 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4255 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4257 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4260 tx_q->mss = mss; in stmmac_tso_xmit()
4261 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_xmit()
4262 priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4263 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_xmit()
4269 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, in stmmac_tso_xmit()
4270 skb->data_len); in stmmac_tso_xmit()
4276 first_entry = tx_q->cur_tx; in stmmac_tso_xmit()
4277 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_tso_xmit()
4279 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4280 desc = &tx_q->dma_entx[first_entry].basic; in stmmac_tso_xmit()
4282 desc = &tx_q->dma_tx[first_entry]; in stmmac_tso_xmit()
4289 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), in stmmac_tso_xmit()
4291 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4294 tx_q->tx_skbuff_dma[first_entry].buf = des; in stmmac_tso_xmit()
4295 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); in stmmac_tso_xmit()
4296 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; in stmmac_tso_xmit()
4297 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4299 if (priv->dma_cap.addr64 <= 32) { in stmmac_tso_xmit()
4300 first->des0 = cpu_to_le32(des); in stmmac_tso_xmit()
4304 first->des1 = cpu_to_le32(des + proto_hdr_len); in stmmac_tso_xmit()
4307 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; in stmmac_tso_xmit()
4315 stmmac_tso_allocator(priv, des, tmp_pay_len, (nfrags == 0), queue); in stmmac_tso_xmit()
4319 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_tso_xmit()
4321 des = skb_frag_dma_map(priv->device, frag, 0, in stmmac_tso_xmit()
4324 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4328 (i == nfrags - 1), queue); in stmmac_tso_xmit()
4330 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4331 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); in stmmac_tso_xmit()
4332 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; in stmmac_tso_xmit()
4333 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4336 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; in stmmac_tso_xmit()
4339 tx_q->tx_skbuff[tx_q->cur_tx] = skb; in stmmac_tso_xmit()
4340 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4343 tx_packets = (tx_q->cur_tx + 1) - first_tx; in stmmac_tso_xmit()
4344 tx_q->tx_count_frames += tx_packets; in stmmac_tso_xmit()
4346 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_tso_xmit()
4348 else if (!priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4350 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4352 else if ((tx_q->tx_count_frames % in stmmac_tso_xmit()
4353 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_tso_xmit()
4359 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4360 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4362 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4364 tx_q->tx_count_frames = 0; in stmmac_tso_xmit()
4373 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4375 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { in stmmac_tso_xmit()
4376 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_tso_xmit()
4378 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tso_xmit()
4381 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_tso_xmit()
4382 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_tso_xmit()
4383 u64_stats_inc(&txq_stats->q.tx_tso_frames); in stmmac_tso_xmit()
4384 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); in stmmac_tso_xmit()
4386 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_tso_xmit()
4387 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_tso_xmit()
4389 if (priv->sarc_type) in stmmac_tso_xmit()
4390 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_tso_xmit()
4394 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_tso_xmit()
4395 priv->hwts_tx_en)) { in stmmac_tso_xmit()
4397 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_tso_xmit()
4405 1, tx_q->tx_skbuff_dma[first_entry].last_segment, in stmmac_tso_xmit()
4406 hdr / 4, (skb->len - proto_hdr_len)); in stmmac_tso_xmit()
4421 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_tso_xmit()
4422 tx_q->cur_tx, first, nfrags); in stmmac_tso_xmit()
4424 print_pkt(skb->data, skb_headlen(skb)); in stmmac_tso_xmit()
4427 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_tso_xmit()
4429 stmmac_flush_tx_descriptors(priv, queue); in stmmac_tso_xmit()
4430 stmmac_tx_timer_arm(priv, queue); in stmmac_tso_xmit()
4435 dev_err(priv->device, "Tx dma map failed\n"); in stmmac_tso_xmit()
4437 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4442 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4464 * stmmac_xmit - Tx entry point of the driver
4477 u32 queue = skb_get_queue_mapping(skb); in stmmac_xmit() local
4478 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_xmit()
4479 int gso = skb_shinfo(skb)->gso_type; in stmmac_xmit()
4488 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xmit()
4489 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xmit()
4490 first_tx = tx_q->cur_tx; in stmmac_xmit()
4492 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) in stmmac_xmit()
4496 if (skb_is_gso(skb) && priv->tso) { in stmmac_xmit()
4499 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) in stmmac_xmit()
4503 if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) { in stmmac_xmit()
4504 if (!netif_tx_queue_stopped(netdev_get_tx_queue(dev, queue))) { in stmmac_xmit()
4505 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_xmit()
4506 queue)); in stmmac_xmit()
4508 netdev_err(priv->dev, in stmmac_xmit()
4509 "%s: Tx Ring full when queue awake\n", in stmmac_xmit()
4518 entry = tx_q->cur_tx; in stmmac_xmit()
4520 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_xmit()
4522 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); in stmmac_xmit()
4527 * Packets that won't trigger the COE e.g. most DSA-tagged packets will in stmmac_xmit()
4531 (priv->plat->tx_queues_cfg[queue].coe_unsupported || in stmmac_xmit()
4538 if (likely(priv->extend_desc)) in stmmac_xmit()
4539 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4540 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4541 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4543 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4550 enh_desc = priv->plat->enh_desc; in stmmac_xmit()
4553 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); in stmmac_xmit()
4557 if (unlikely(entry < 0) && (entry != -EINVAL)) in stmmac_xmit()
4562 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_xmit()
4564 bool last_segment = (i == (nfrags - 1)); in stmmac_xmit()
4566 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4567 WARN_ON(tx_q->tx_skbuff[entry]); in stmmac_xmit()
4569 if (likely(priv->extend_desc)) in stmmac_xmit()
4570 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4571 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4572 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4574 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4576 des = skb_frag_dma_map(priv->device, frag, 0, len, in stmmac_xmit()
4578 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4581 tx_q->tx_skbuff_dma[entry].buf = des; in stmmac_xmit()
4585 tx_q->tx_skbuff_dma[entry].map_as_page = true; in stmmac_xmit()
4586 tx_q->tx_skbuff_dma[entry].len = len; in stmmac_xmit()
4587 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; in stmmac_xmit()
4588 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4592 priv->mode, 1, last_segment, skb->len); in stmmac_xmit()
4596 tx_q->tx_skbuff[entry] = skb; in stmmac_xmit()
4597 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4600 * segment is reset and the timer re-started to clean the tx status. in stmmac_xmit()
4604 tx_packets = (entry + 1) - first_tx; in stmmac_xmit()
4605 tx_q->tx_count_frames += tx_packets; in stmmac_xmit()
4607 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_xmit()
4609 else if (!priv->tx_coal_frames[queue]) in stmmac_xmit()
4611 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_xmit()
4613 else if ((tx_q->tx_count_frames % in stmmac_xmit()
4614 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_xmit()
4620 if (likely(priv->extend_desc)) in stmmac_xmit()
4621 desc = &tx_q->dma_etx[entry].basic; in stmmac_xmit()
4622 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4623 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4625 desc = &tx_q->dma_tx[entry]; in stmmac_xmit()
4627 tx_q->tx_count_frames = 0; in stmmac_xmit()
4636 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4637 tx_q->cur_tx = entry; in stmmac_xmit()
4640 netdev_dbg(priv->dev, in stmmac_xmit()
4642 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_xmit()
4645 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); in stmmac_xmit()
4646 print_pkt(skb->data, skb->len); in stmmac_xmit()
4649 if (unlikely(stmmac_tx_avail(priv, queue) <= (MAX_SKB_FRAGS + 1))) { in stmmac_xmit()
4650 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_xmit()
4652 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_xmit()
4655 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xmit()
4656 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_xmit()
4658 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xmit()
4659 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xmit()
4661 if (priv->sarc_type) in stmmac_xmit()
4662 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_xmit()
4673 des = dma_map_single(priv->device, skb->data, in stmmac_xmit()
4675 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4678 tx_q->tx_skbuff_dma[first_entry].buf = des; in stmmac_xmit()
4679 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4680 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; in stmmac_xmit()
4684 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; in stmmac_xmit()
4685 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; in stmmac_xmit()
4687 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_xmit()
4688 priv->hwts_tx_en)) { in stmmac_xmit()
4690 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_xmit()
4696 csum_insertion, priv->mode, 0, last_segment, in stmmac_xmit()
4697 skb->len); in stmmac_xmit()
4700 if (tx_q->tbs & STMMAC_TBS_EN) { in stmmac_xmit()
4701 struct timespec64 ts = ns_to_timespec64(skb->tstamp); in stmmac_xmit()
4703 tbs_desc = &tx_q->dma_entx[first_entry]; in stmmac_xmit()
4709 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_xmit()
4711 stmmac_enable_dma_transmission(priv, priv->ioaddr); in stmmac_xmit()
4713 stmmac_flush_tx_descriptors(priv, queue); in stmmac_xmit()
4714 stmmac_tx_timer_arm(priv, queue); in stmmac_xmit()
4719 netdev_err(priv->dev, "Tx DMA map failed\n"); in stmmac_xmit()
4721 priv->xstats.tx_dropped++; in stmmac_xmit()
4728 __be16 vlan_proto = veth->h_vlan_proto; in stmmac_rx_vlan()
4732 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || in stmmac_rx_vlan()
4734 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { in stmmac_rx_vlan()
4736 vlanid = ntohs(veth->h_vlan_TCI); in stmmac_rx_vlan()
4737 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); in stmmac_rx_vlan()
4744 * stmmac_rx_refill - refill used skb preallocated buffers
4746 * @queue: RX queue index
4748 * that is based on zero-copy.
4750 static inline void stmmac_rx_refill(struct stmmac_priv *priv, u32 queue) in stmmac_rx_refill() argument
4752 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill()
4753 int dirty = stmmac_rx_dirty(priv, queue); in stmmac_rx_refill()
4754 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill()
4757 if (priv->dma_cap.host_dma_width <= 32) in stmmac_rx_refill()
4760 while (dirty-- > 0) { in stmmac_rx_refill()
4761 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill()
4765 if (priv->extend_desc) in stmmac_rx_refill()
4766 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill()
4768 p = rx_q->dma_rx + entry; in stmmac_rx_refill()
4770 if (!buf->page) { in stmmac_rx_refill()
4771 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4772 if (!buf->page) in stmmac_rx_refill()
4776 if (priv->sph && !buf->sec_page) { in stmmac_rx_refill()
4777 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4778 if (!buf->sec_page) in stmmac_rx_refill()
4781 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_rx_refill()
4784 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_rx_refill()
4786 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_rx_refill()
4787 if (priv->sph) in stmmac_rx_refill()
4788 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_rx_refill()
4790 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_rx_refill()
4793 rx_q->rx_count_frames++; in stmmac_rx_refill()
4794 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4795 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill()
4796 rx_q->rx_count_frames = 0; in stmmac_rx_refill()
4798 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4799 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill()
4800 if (!priv->use_riwt) in stmmac_rx_refill()
4806 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill()
4808 rx_q->dirty_rx = entry; in stmmac_rx_refill()
4809 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill()
4810 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill()
4811 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill()
4819 int coe = priv->hw->rx_csum; in stmmac_rx_buf1_len()
4822 if (priv->sph && len) in stmmac_rx_buf1_len()
4827 if (priv->sph && hlen) { in stmmac_rx_buf1_len()
4828 priv->xstats.rx_split_hdr_pkt_n++; in stmmac_rx_buf1_len()
4834 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf1_len()
4839 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); in stmmac_rx_buf1_len()
4846 int coe = priv->hw->rx_csum; in stmmac_rx_buf2_len()
4850 if (!priv->sph) in stmmac_rx_buf2_len()
4855 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf2_len()
4860 return plen - len; in stmmac_rx_buf2_len()
4863 static int stmmac_xdp_xmit_xdpf(struct stmmac_priv *priv, int queue, in stmmac_xdp_xmit_xdpf() argument
4866 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_xdpf()
4867 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_xdpf()
4868 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_xdpf()
4873 if (stmmac_tx_avail(priv, queue) < STMMAC_TX_THRESH(priv)) in stmmac_xdp_xmit_xdpf()
4876 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_xdpf()
4877 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_xdpf()
4878 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_xdpf()
4879 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_xdpf()
4881 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_xdpf()
4884 dma_addr = dma_map_single(priv->device, xdpf->data, in stmmac_xdp_xmit_xdpf()
4885 xdpf->len, DMA_TO_DEVICE); in stmmac_xdp_xmit_xdpf()
4886 if (dma_mapping_error(priv->device, dma_addr)) in stmmac_xdp_xmit_xdpf()
4889 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; in stmmac_xdp_xmit_xdpf()
4891 struct page *page = virt_to_page(xdpf->data); in stmmac_xdp_xmit_xdpf()
4894 xdpf->headroom; in stmmac_xdp_xmit_xdpf()
4895 dma_sync_single_for_device(priv->device, dma_addr, in stmmac_xdp_xmit_xdpf()
4896 xdpf->len, DMA_BIDIRECTIONAL); in stmmac_xdp_xmit_xdpf()
4898 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; in stmmac_xdp_xmit_xdpf()
4901 tx_q->tx_skbuff_dma[entry].buf = dma_addr; in stmmac_xdp_xmit_xdpf()
4902 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_xdpf()
4903 tx_q->tx_skbuff_dma[entry].len = xdpf->len; in stmmac_xdp_xmit_xdpf()
4904 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_xdpf()
4905 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_xdpf()
4907 tx_q->xdpf[entry] = xdpf; in stmmac_xdp_xmit_xdpf()
4911 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, in stmmac_xdp_xmit_xdpf()
4912 true, priv->mode, true, true, in stmmac_xdp_xmit_xdpf()
4913 xdpf->len); in stmmac_xdp_xmit_xdpf()
4915 tx_q->tx_count_frames++; in stmmac_xdp_xmit_xdpf()
4917 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_xdpf()
4923 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_xdpf()
4925 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
4926 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xdp_xmit_xdpf()
4927 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
4930 stmmac_enable_dma_transmission(priv, priv->ioaddr); in stmmac_xdp_xmit_xdpf()
4932 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_xdpf()
4933 tx_q->cur_tx = entry; in stmmac_xdp_xmit_xdpf()
4946 while (index >= priv->plat->tx_queues_to_use) in stmmac_xdp_get_tx_queue()
4947 index -= priv->plat->tx_queues_to_use; in stmmac_xdp_get_tx_queue()
4958 int queue; in stmmac_xdp_xmit_back() local
4964 queue = stmmac_xdp_get_tx_queue(priv, cpu); in stmmac_xdp_xmit_back()
4965 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back()
4968 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_back()
4971 res = stmmac_xdp_xmit_xdpf(priv, queue, xdpf, false); in stmmac_xdp_xmit_back()
4973 stmmac_flush_tx_descriptors(priv, queue); in stmmac_xdp_xmit_back()
4996 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) in __stmmac_xdp_run_prog()
5002 bpf_warn_invalid_xdp_action(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5005 trace_xdp_exception(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5021 prog = READ_ONCE(priv->xdp_prog); in stmmac_xdp_run_prog()
5029 return ERR_PTR(-res); in stmmac_xdp_run_prog()
5036 int queue; in stmmac_finalize_xdp_rx() local
5038 queue = stmmac_xdp_get_tx_queue(priv, cpu); in stmmac_finalize_xdp_rx()
5041 stmmac_tx_timer_arm(priv, queue); in stmmac_finalize_xdp_rx()
5050 unsigned int metasize = xdp->data - xdp->data_meta; in stmmac_construct_skb_zc()
5051 unsigned int datasize = xdp->data_end - xdp->data; in stmmac_construct_skb_zc()
5054 skb = __napi_alloc_skb(&ch->rxtx_napi, in stmmac_construct_skb_zc()
5055 xdp->data_end - xdp->data_hard_start, in stmmac_construct_skb_zc()
5060 skb_reserve(skb, xdp->data - xdp->data_hard_start); in stmmac_construct_skb_zc()
5061 memcpy(__skb_put(skb, datasize), xdp->data, datasize); in stmmac_construct_skb_zc()
5068 static void stmmac_dispatch_skb_zc(struct stmmac_priv *priv, u32 queue, in stmmac_dispatch_skb_zc() argument
5072 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_dispatch_skb_zc()
5073 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_dispatch_skb_zc()
5074 unsigned int len = xdp->data_end - xdp->data; in stmmac_dispatch_skb_zc()
5076 int coe = priv->hw->rx_csum; in stmmac_dispatch_skb_zc()
5082 priv->xstats.rx_dropped++; in stmmac_dispatch_skb_zc()
5087 if (priv->hw->hw_vlan_en) in stmmac_dispatch_skb_zc()
5089 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_dispatch_skb_zc()
5092 stmmac_rx_vlan(priv->dev, skb); in stmmac_dispatch_skb_zc()
5093 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_dispatch_skb_zc()
5098 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_dispatch_skb_zc()
5103 skb_record_rx_queue(skb, queue); in stmmac_dispatch_skb_zc()
5104 napi_gro_receive(&ch->rxtx_napi, skb); in stmmac_dispatch_skb_zc()
5106 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5107 u64_stats_inc(&rxq_stats->napi.rx_pkt_n); in stmmac_dispatch_skb_zc()
5108 u64_stats_add(&rxq_stats->napi.rx_bytes, len); in stmmac_dispatch_skb_zc()
5109 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5112 static bool stmmac_rx_refill_zc(struct stmmac_priv *priv, u32 queue, u32 budget) in stmmac_rx_refill_zc() argument
5114 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill_zc()
5115 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill_zc()
5119 budget = min(budget, stmmac_rx_dirty(priv, queue)); in stmmac_rx_refill_zc()
5121 while (budget-- > 0 && entry != rx_q->cur_rx) { in stmmac_rx_refill_zc()
5122 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill_zc()
5126 if (!buf->xdp) { in stmmac_rx_refill_zc()
5127 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_rx_refill_zc()
5128 if (!buf->xdp) { in stmmac_rx_refill_zc()
5134 if (priv->extend_desc) in stmmac_rx_refill_zc()
5135 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill_zc()
5137 rx_desc = rx_q->dma_rx + entry; in stmmac_rx_refill_zc()
5139 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_rx_refill_zc()
5144 rx_q->rx_count_frames++; in stmmac_rx_refill_zc()
5145 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5146 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill_zc()
5147 rx_q->rx_count_frames = 0; in stmmac_rx_refill_zc()
5149 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5150 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill_zc()
5151 if (!priv->use_riwt) in stmmac_rx_refill_zc()
5157 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill_zc()
5161 rx_q->dirty_rx = entry; in stmmac_rx_refill_zc()
5162 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill_zc()
5163 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill_zc()
5164 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill_zc()
5180 static int stmmac_rx_zc(struct stmmac_priv *priv, int limit, u32 queue) in stmmac_rx_zc() argument
5182 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx_zc()
5183 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_zc()
5185 int dirty = stmmac_rx_dirty(priv, queue); in stmmac_rx_zc()
5186 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5197 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx_zc()
5198 if (priv->extend_desc) { in stmmac_rx_zc()
5199 rx_head = (void *)rx_q->dma_erx; in stmmac_rx_zc()
5202 rx_head = (void *)rx_q->dma_rx; in stmmac_rx_zc()
5206 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx_zc()
5207 rx_q->dma_rx_phy, desc_size); in stmmac_rx_zc()
5217 if (!count && rx_q->state_saved) { in stmmac_rx_zc()
5218 error = rx_q->state.error; in stmmac_rx_zc()
5219 len = rx_q->state.len; in stmmac_rx_zc()
5221 rx_q->state_saved = false; in stmmac_rx_zc()
5232 buf = &rx_q->buf_pool[entry]; in stmmac_rx_zc()
5236 !stmmac_rx_refill_zc(priv, queue, dirty); in stmmac_rx_zc()
5240 if (priv->extend_desc) in stmmac_rx_zc()
5241 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_zc()
5243 p = rx_q->dma_rx + entry; in stmmac_rx_zc()
5246 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx_zc()
5251 /* Prefetch the next RX descriptor */ in stmmac_rx_zc()
5252 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx_zc()
5253 priv->dma_conf.dma_rx_size); in stmmac_rx_zc()
5254 next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5256 if (priv->extend_desc) in stmmac_rx_zc()
5257 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx_zc()
5259 np = rx_q->dma_rx + next_entry; in stmmac_rx_zc()
5264 if (!buf->xdp) in stmmac_rx_zc()
5267 if (priv->extend_desc) in stmmac_rx_zc()
5268 stmmac_rx_extended_status(priv, &priv->xstats, in stmmac_rx_zc()
5269 rx_q->dma_erx + entry); in stmmac_rx_zc()
5271 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5272 buf->xdp = NULL; in stmmac_rx_zc()
5275 if (!priv->hwts_rx_en) in stmmac_rx_zc()
5286 /* XSK pool expects RX frame 1:1 mapped to XSK buffer */ in stmmac_rx_zc()
5288 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5289 buf->xdp = NULL; in stmmac_rx_zc()
5295 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); in stmmac_rx_zc()
5296 ctx->priv = priv; in stmmac_rx_zc()
5297 ctx->desc = p; in stmmac_rx_zc()
5298 ctx->ndesc = np; in stmmac_rx_zc()
5306 buf1_len -= ETH_FCS_LEN; in stmmac_rx_zc()
5307 len -= ETH_FCS_LEN; in stmmac_rx_zc()
5310 /* RX buffer is good and fit into a XSK pool buffer */ in stmmac_rx_zc()
5311 buf->xdp->data_end = buf->xdp->data + buf1_len; in stmmac_rx_zc()
5312 xsk_buff_dma_sync_for_cpu(buf->xdp, rx_q->xsk_pool); in stmmac_rx_zc()
5314 prog = READ_ONCE(priv->xdp_prog); in stmmac_rx_zc()
5315 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); in stmmac_rx_zc()
5319 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); in stmmac_rx_zc()
5320 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5323 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5332 buf->xdp = NULL; in stmmac_rx_zc()
5338 rx_q->state_saved = true; in stmmac_rx_zc()
5339 rx_q->state.error = error; in stmmac_rx_zc()
5340 rx_q->state.len = len; in stmmac_rx_zc()
5345 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5346 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx_zc()
5347 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5349 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx_zc()
5350 priv->xstats.rx_errors += rx_errors; in stmmac_rx_zc()
5352 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { in stmmac_rx_zc()
5353 if (failure || stmmac_rx_dirty(priv, queue) > 0) in stmmac_rx_zc()
5354 xsk_set_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5356 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5365 * stmmac_rx - manage the receive process
5368 * @queue: RX queue index.
5372 static int stmmac_rx(struct stmmac_priv *priv, int limit, u32 queue) in stmmac_rx() argument
5375 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx()
5376 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx()
5377 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_rx()
5379 int status = 0, coe = priv->hw->rx_csum; in stmmac_rx()
5380 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx()
5388 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); in stmmac_rx()
5389 buf_sz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; in stmmac_rx()
5390 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); in stmmac_rx()
5395 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx()
5396 if (priv->extend_desc) { in stmmac_rx()
5397 rx_head = (void *)rx_q->dma_erx; in stmmac_rx()
5400 rx_head = (void *)rx_q->dma_rx; in stmmac_rx()
5404 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx()
5405 rx_q->dma_rx_phy, desc_size); in stmmac_rx()
5415 if (!count && rx_q->state_saved) { in stmmac_rx()
5416 skb = rx_q->state.skb; in stmmac_rx()
5417 error = rx_q->state.error; in stmmac_rx()
5418 len = rx_q->state.len; in stmmac_rx()
5420 rx_q->state_saved = false; in stmmac_rx()
5433 buf = &rx_q->buf_pool[entry]; in stmmac_rx()
5435 if (priv->extend_desc) in stmmac_rx()
5436 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx()
5438 p = rx_q->dma_rx + entry; in stmmac_rx()
5441 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx()
5446 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx()
5447 priv->dma_conf.dma_rx_size); in stmmac_rx()
5448 next_entry = rx_q->cur_rx; in stmmac_rx()
5450 if (priv->extend_desc) in stmmac_rx()
5451 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx()
5453 np = rx_q->dma_rx + next_entry; in stmmac_rx()
5457 if (priv->extend_desc) in stmmac_rx()
5458 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); in stmmac_rx()
5460 page_pool_recycle_direct(rx_q->page_pool, buf->page); in stmmac_rx()
5461 buf->page = NULL; in stmmac_rx()
5463 if (!priv->hwts_rx_en) in stmmac_rx()
5478 prefetch(page_address(buf->page) + buf->page_offset); in stmmac_rx()
5479 if (buf->sec_page) in stmmac_rx()
5480 prefetch(page_address(buf->sec_page)); in stmmac_rx()
5490 buf2_len -= ETH_FCS_LEN; in stmmac_rx()
5491 len -= ETH_FCS_LEN; in stmmac_rx()
5493 buf1_len -= ETH_FCS_LEN; in stmmac_rx()
5494 len -= ETH_FCS_LEN; in stmmac_rx()
5501 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5504 xdp_init_buff(&ctx.xdp, buf_sz, &rx_q->xdp_rxq); in stmmac_rx()
5505 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), in stmmac_rx()
5506 buf->page_offset, buf1_len, true); in stmmac_rx()
5508 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5509 buf->page_offset; in stmmac_rx()
5519 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5520 buf->page_offset; in stmmac_rx()
5525 unsigned int xdp_res = -PTR_ERR(skb); in stmmac_rx()
5528 page_pool_put_page(rx_q->page_pool, in stmmac_rx()
5531 buf->page = NULL; in stmmac_rx()
5547 buf->page = NULL; in stmmac_rx()
5557 buf1_len = ctx.xdp.data_end - ctx.xdp.data; in stmmac_rx()
5559 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); in stmmac_rx()
5571 page_pool_recycle_direct(rx_q->page_pool, buf->page); in stmmac_rx()
5572 buf->page = NULL; in stmmac_rx()
5574 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5576 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5577 buf->page, buf->page_offset, buf1_len, in stmmac_rx()
5578 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5582 buf->page = NULL; in stmmac_rx()
5586 dma_sync_single_for_cpu(priv->device, buf->sec_addr, in stmmac_rx()
5588 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5589 buf->sec_page, 0, buf2_len, in stmmac_rx()
5590 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5594 buf->sec_page = NULL; in stmmac_rx()
5607 if (priv->hw->hw_vlan_en) in stmmac_rx()
5609 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_rx()
5612 stmmac_rx_vlan(priv->dev, skb); in stmmac_rx()
5614 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_rx()
5619 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_rx()
5624 skb_record_rx_queue(skb, queue); in stmmac_rx()
5625 napi_gro_receive(&ch->rx_napi, skb); in stmmac_rx()
5634 rx_q->state_saved = true; in stmmac_rx()
5635 rx_q->state.skb = skb; in stmmac_rx()
5636 rx_q->state.error = error; in stmmac_rx()
5637 rx_q->state.len = len; in stmmac_rx()
5642 stmmac_rx_refill(priv, queue); in stmmac_rx()
5644 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx()
5645 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); in stmmac_rx()
5646 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); in stmmac_rx()
5647 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx()
5648 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx()
5650 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx()
5651 priv->xstats.rx_errors += rx_errors; in stmmac_rx()
5660 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rx()
5662 u32 chan = ch->index; in stmmac_napi_poll_rx()
5665 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rx()
5666 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5667 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rx()
5668 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5674 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rx()
5675 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_poll_rx()
5676 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rx()
5686 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_tx()
5689 u32 chan = ch->index; in stmmac_napi_poll_tx()
5692 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_tx()
5693 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5694 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_tx()
5695 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5703 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_tx()
5704 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_poll_tx()
5705 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_tx()
5719 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rxtx()
5724 u32 chan = ch->index; in stmmac_napi_poll_rxtx()
5726 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rxtx()
5727 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5728 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5729 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5731 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_rxtx()
5732 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5733 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5734 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5743 /* If either TX or RX work is not complete, return budget in stmmac_napi_poll_rxtx()
5753 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5754 /* Both RX and TX work done are compelte, in stmmac_napi_poll_rxtx()
5755 * so enable both RX & TX IRQs. in stmmac_napi_poll_rxtx()
5757 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_napi_poll_rxtx()
5758 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5765 return min(rxtx_done, budget - 1); in stmmac_napi_poll_rxtx()
5771 * @txqueue: the index of the hanging transmit queue
5785 * stmmac_set_rx_mode - entry point for multicast addressing
5797 stmmac_set_filter(priv, priv->hw, dev); in stmmac_set_rx_mode()
5801 * stmmac_change_mtu - entry point to change MTU size for the device.
5808 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5814 int txfifosz = priv->plat->tx_fifo_size; in stmmac_change_mtu()
5820 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_change_mtu()
5822 txfifosz /= priv->plat->tx_queues_to_use; in stmmac_change_mtu()
5825 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); in stmmac_change_mtu()
5826 return -EINVAL; in stmmac_change_mtu()
5833 return -EINVAL; in stmmac_change_mtu()
5836 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); in stmmac_change_mtu()
5840 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", in stmmac_change_mtu()
5851 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); in stmmac_change_mtu()
5860 dev->mtu = mtu; in stmmac_change_mtu()
5871 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) in stmmac_fix_features()
5874 if (!priv->plat->tx_coe) in stmmac_fix_features()
5882 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) in stmmac_fix_features()
5886 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_fix_features()
5888 priv->tso = true; in stmmac_fix_features()
5890 priv->tso = false; in stmmac_fix_features()
5903 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_set_features()
5905 priv->hw->rx_csum = 0; in stmmac_set_features()
5909 stmmac_rx_ipc(priv, priv->hw); in stmmac_set_features()
5911 if (priv->sph_cap) { in stmmac_set_features()
5912 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_set_features()
5915 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) in stmmac_set_features()
5916 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_set_features()
5920 priv->hw->hw_vlan_en = true; in stmmac_set_features()
5922 priv->hw->hw_vlan_en = false; in stmmac_set_features()
5924 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_set_features()
5931 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; in stmmac_fpe_event_status()
5932 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; in stmmac_fpe_event_status()
5933 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; in stmmac_fpe_event_status()
5934 bool *hs_enable = &fpe_cfg->hs_enable; in stmmac_fpe_event_status()
5946 stmmac_fpe_send_mpacket(priv, priv->ioaddr, in stmmac_fpe_event_status()
5965 if (!test_bit(__FPE_REMOVING, &priv->fpe_task_state) && in stmmac_fpe_event_status()
5966 !test_and_set_bit(__FPE_TASK_SCHED, &priv->fpe_task_state) && in stmmac_fpe_event_status()
5967 priv->fpe_wq) { in stmmac_fpe_event_status()
5968 queue_work(priv->fpe_wq, &priv->fpe_task); in stmmac_fpe_event_status()
5974 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_common_interrupt()
5975 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_common_interrupt()
5977 u32 queue; in stmmac_common_interrupt() local
5980 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_common_interrupt()
5983 if (priv->irq_wake) in stmmac_common_interrupt()
5984 pm_wakeup_event(priv->device, 0); in stmmac_common_interrupt()
5986 if (priv->dma_cap.estsel) in stmmac_common_interrupt()
5987 stmmac_est_irq_status(priv, priv, priv->dev, in stmmac_common_interrupt()
5988 &priv->xstats, tx_cnt); in stmmac_common_interrupt()
5990 if (priv->dma_cap.fpesel) { in stmmac_common_interrupt()
5991 int status = stmmac_fpe_irq_status(priv, priv->ioaddr, in stmmac_common_interrupt()
5992 priv->dev); in stmmac_common_interrupt()
5998 if ((priv->plat->has_gmac) || xmac) { in stmmac_common_interrupt()
5999 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); in stmmac_common_interrupt()
6004 priv->tx_path_in_lpi_mode = true; in stmmac_common_interrupt()
6006 priv->tx_path_in_lpi_mode = false; in stmmac_common_interrupt()
6009 for (queue = 0; queue < queues_count; queue++) { in stmmac_common_interrupt()
6010 status = stmmac_host_mtl_irq_status(priv, priv->hw, in stmmac_common_interrupt()
6011 queue); in stmmac_common_interrupt()
6015 if (priv->hw->pcs && in stmmac_common_interrupt()
6016 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { in stmmac_common_interrupt()
6017 if (priv->xstats.pcs_link) in stmmac_common_interrupt()
6018 netif_carrier_on(priv->dev); in stmmac_common_interrupt()
6020 netif_carrier_off(priv->dev); in stmmac_common_interrupt()
6028 * stmmac_interrupt - main ISR
6035 * o Core interrupts to manage: remote wake-up, management counter, LPI
6044 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_interrupt()
6066 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_mac_interrupt()
6081 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_safety_interrupt()
6094 int chan = tx_q->queue_index; in stmmac_msi_intr_tx()
6102 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_tx()
6121 int chan = rx_q->queue_index; in stmmac_msi_intr_rx()
6128 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_rx()
6137 * stmmac_ioctl - Entry point for the Ioctl
6148 int ret = -EOPNOTSUPP; in stmmac_ioctl()
6151 return -EINVAL; in stmmac_ioctl()
6157 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); in stmmac_ioctl()
6176 int ret = -EOPNOTSUPP; in stmmac_setup_tc_block_cb()
6178 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) in stmmac_setup_tc_block_cb()
6220 return -EOPNOTSUPP; in stmmac_setup_tc()
6227 int gso = skb_shinfo(skb)->gso_type; in stmmac_select_queue()
6232 * capable Queues. Let's use always the Queue 0 in stmmac_select_queue()
6239 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in stmmac_select_queue()
6247 ret = pm_runtime_resume_and_get(priv->device); in stmmac_set_mac_address()
6255 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); in stmmac_set_mac_address()
6258 pm_runtime_put(priv->device); in stmmac_set_mac_address()
6280 le32_to_cpu(p->des0), le32_to_cpu(p->des1), in sysfs_display_ring()
6281 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); in sysfs_display_ring()
6283 p = &(++ep)->basic; in sysfs_display_ring()
6291 struct net_device *dev = seq->private; in stmmac_rings_status_show()
6293 u32 rx_count = priv->plat->rx_queues_to_use; in stmmac_rings_status_show()
6294 u32 tx_count = priv->plat->tx_queues_to_use; in stmmac_rings_status_show()
6295 u32 queue; in stmmac_rings_status_show() local
6297 if ((dev->flags & IFF_UP) == 0) in stmmac_rings_status_show()
6300 for (queue = 0; queue < rx_count; queue++) { in stmmac_rings_status_show()
6301 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rings_status_show()
6303 seq_printf(seq, "RX Queue %d:\n", queue); in stmmac_rings_status_show()
6305 if (priv->extend_desc) { in stmmac_rings_status_show()
6307 sysfs_display_ring((void *)rx_q->dma_erx, in stmmac_rings_status_show()
6308 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6311 sysfs_display_ring((void *)rx_q->dma_rx, in stmmac_rings_status_show()
6312 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6316 for (queue = 0; queue < tx_count; queue++) { in stmmac_rings_status_show()
6317 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_rings_status_show()
6319 seq_printf(seq, "TX Queue %d:\n", queue); in stmmac_rings_status_show()
6321 if (priv->extend_desc) { in stmmac_rings_status_show()
6323 sysfs_display_ring((void *)tx_q->dma_etx, in stmmac_rings_status_show()
6324 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6325 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { in stmmac_rings_status_show()
6327 sysfs_display_ring((void *)tx_q->dma_tx, in stmmac_rings_status_show()
6328 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6354 struct net_device *dev = seq->private; in stmmac_dma_cap_show()
6357 if (!priv->hw_cap_support) { in stmmac_dma_cap_show()
6367 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); in stmmac_dma_cap_show()
6369 (priv->dma_cap.mbps_1000) ? "Y" : "N"); in stmmac_dma_cap_show()
6371 (priv->dma_cap.half_duplex) ? "Y" : "N"); in stmmac_dma_cap_show()
6372 if (priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6375 priv->dma_cap.multi_addr); in stmmac_dma_cap_show()
6378 (priv->dma_cap.hash_filter) ? "Y" : "N"); in stmmac_dma_cap_show()
6380 (priv->dma_cap.multi_addr) ? "Y" : "N"); in stmmac_dma_cap_show()
6383 (priv->dma_cap.pcs) ? "Y" : "N"); in stmmac_dma_cap_show()
6385 (priv->dma_cap.sma_mdio) ? "Y" : "N"); in stmmac_dma_cap_show()
6387 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); in stmmac_dma_cap_show()
6389 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); in stmmac_dma_cap_show()
6391 (priv->dma_cap.rmon) ? "Y" : "N"); in stmmac_dma_cap_show()
6392 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", in stmmac_dma_cap_show()
6393 (priv->dma_cap.time_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6394 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", in stmmac_dma_cap_show()
6395 (priv->dma_cap.atime_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6396 if (priv->plat->has_xgmac) in stmmac_dma_cap_show()
6398 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); in stmmac_dma_cap_show()
6399 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", in stmmac_dma_cap_show()
6400 (priv->dma_cap.eee) ? "Y" : "N"); in stmmac_dma_cap_show()
6401 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); in stmmac_dma_cap_show()
6403 (priv->dma_cap.tx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6404 if (priv->synopsys_id >= DWMAC_CORE_4_00 || in stmmac_dma_cap_show()
6405 priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6406 seq_printf(seq, "\tIP Checksum Offload in RX: %s\n", in stmmac_dma_cap_show()
6407 (priv->dma_cap.rx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6409 seq_printf(seq, "\tIP Checksum Offload (type1) in RX: %s\n", in stmmac_dma_cap_show()
6410 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); in stmmac_dma_cap_show()
6411 seq_printf(seq, "\tIP Checksum Offload (type2) in RX: %s\n", in stmmac_dma_cap_show()
6412 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); in stmmac_dma_cap_show()
6414 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); in stmmac_dma_cap_show()
6416 seq_printf(seq, "\tNumber of Additional RX channel: %d\n", in stmmac_dma_cap_show()
6417 priv->dma_cap.number_rx_channel); in stmmac_dma_cap_show()
6419 priv->dma_cap.number_tx_channel); in stmmac_dma_cap_show()
6420 seq_printf(seq, "\tNumber of Additional RX queues: %d\n", in stmmac_dma_cap_show()
6421 priv->dma_cap.number_rx_queues); in stmmac_dma_cap_show()
6423 priv->dma_cap.number_tx_queues); in stmmac_dma_cap_show()
6425 (priv->dma_cap.enh_desc) ? "Y" : "N"); in stmmac_dma_cap_show()
6426 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); in stmmac_dma_cap_show()
6427 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); in stmmac_dma_cap_show()
6428 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? in stmmac_dma_cap_show()
6429 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); in stmmac_dma_cap_show()
6430 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); in stmmac_dma_cap_show()
6432 priv->dma_cap.pps_out_num); in stmmac_dma_cap_show()
6434 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); in stmmac_dma_cap_show()
6435 seq_printf(seq, "\tFlexible RX Parser: %s\n", in stmmac_dma_cap_show()
6436 priv->dma_cap.frpsel ? "Y" : "N"); in stmmac_dma_cap_show()
6438 priv->dma_cap.host_dma_width); in stmmac_dma_cap_show()
6440 priv->dma_cap.rssen ? "Y" : "N"); in stmmac_dma_cap_show()
6442 priv->dma_cap.vlhash ? "Y" : "N"); in stmmac_dma_cap_show()
6444 priv->dma_cap.sphen ? "Y" : "N"); in stmmac_dma_cap_show()
6446 priv->dma_cap.vlins ? "Y" : "N"); in stmmac_dma_cap_show()
6448 priv->dma_cap.dvlan ? "Y" : "N"); in stmmac_dma_cap_show()
6450 priv->dma_cap.l3l4fnum); in stmmac_dma_cap_show()
6452 priv->dma_cap.arpoffsel ? "Y" : "N"); in stmmac_dma_cap_show()
6454 priv->dma_cap.estsel ? "Y" : "N"); in stmmac_dma_cap_show()
6456 priv->dma_cap.fpesel ? "Y" : "N"); in stmmac_dma_cap_show()
6457 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", in stmmac_dma_cap_show()
6458 priv->dma_cap.tbssel ? "Y" : "N"); in stmmac_dma_cap_show()
6460 priv->dma_cap.tbs_ch_num); in stmmac_dma_cap_show()
6461 seq_printf(seq, "\tPer-Stream Filtering: %s\n", in stmmac_dma_cap_show()
6462 priv->dma_cap.sgfsel ? "Y" : "N"); in stmmac_dma_cap_show()
6464 BIT(priv->dma_cap.ttsfd) >> 1); in stmmac_dma_cap_show()
6466 priv->dma_cap.numtc); in stmmac_dma_cap_show()
6468 priv->dma_cap.dcben ? "Y" : "N"); in stmmac_dma_cap_show()
6470 priv->dma_cap.advthword ? "Y" : "N"); in stmmac_dma_cap_show()
6472 priv->dma_cap.ptoen ? "Y" : "N"); in stmmac_dma_cap_show()
6473 seq_printf(seq, "\tOne-Step Timestamping: %s\n", in stmmac_dma_cap_show()
6474 priv->dma_cap.osten ? "Y" : "N"); in stmmac_dma_cap_show()
6475 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", in stmmac_dma_cap_show()
6476 priv->dma_cap.pfcen ? "Y" : "N"); in stmmac_dma_cap_show()
6477 seq_printf(seq, "\tNumber of Flexible RX Parser Instructions: %lu\n", in stmmac_dma_cap_show()
6478 BIT(priv->dma_cap.frpes) << 6); in stmmac_dma_cap_show()
6479 seq_printf(seq, "\tNumber of Flexible RX Parser Parsable Bytes: %lu\n", in stmmac_dma_cap_show()
6480 BIT(priv->dma_cap.frpbs) << 6); in stmmac_dma_cap_show()
6482 priv->dma_cap.frppipe_num); in stmmac_dma_cap_show()
6484 priv->dma_cap.nrvf_num ? in stmmac_dma_cap_show()
6485 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); in stmmac_dma_cap_show()
6487 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); in stmmac_dma_cap_show()
6489 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); in stmmac_dma_cap_show()
6490 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", in stmmac_dma_cap_show()
6491 priv->dma_cap.cbtisel ? "Y" : "N"); in stmmac_dma_cap_show()
6493 priv->dma_cap.aux_snapshot_n); in stmmac_dma_cap_show()
6494 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", in stmmac_dma_cap_show()
6495 priv->dma_cap.pou_ost_en ? "Y" : "N"); in stmmac_dma_cap_show()
6497 priv->dma_cap.edma ? "Y" : "N"); in stmmac_dma_cap_show()
6499 priv->dma_cap.ediffc ? "Y" : "N"); in stmmac_dma_cap_show()
6501 priv->dma_cap.vxn ? "Y" : "N"); in stmmac_dma_cap_show()
6503 priv->dma_cap.dbgmem ? "Y" : "N"); in stmmac_dma_cap_show()
6505 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); in stmmac_dma_cap_show()
6518 if (dev->netdev_ops != &stmmac_netdev_ops) in stmmac_device_event()
6523 if (priv->dbgfs_dir) in stmmac_device_event()
6524 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, in stmmac_device_event()
6525 priv->dbgfs_dir, in stmmac_device_event()
6527 dev->name); in stmmac_device_event()
6545 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); in stmmac_init_fs()
6547 /* Entry to report DMA RX/TX rings */ in stmmac_init_fs()
6548 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6552 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6562 debugfs_remove_recursive(priv->dbgfs_dir); in stmmac_exit_fs()
6597 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { in stmmac_vlan_update()
6604 if (!priv->dma_cap.vlhash) { in stmmac_vlan_update()
6606 return -EOPNOTSUPP; in stmmac_vlan_update()
6612 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); in stmmac_vlan_update()
6621 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_add_vid()
6628 set_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6631 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6635 if (priv->hw->num_vlan) { in stmmac_vlan_rx_add_vid()
6636 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_add_vid()
6641 pm_runtime_put(priv->device); in stmmac_vlan_rx_add_vid()
6652 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_kill_vid()
6659 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_kill_vid()
6661 if (priv->hw->num_vlan) { in stmmac_vlan_rx_kill_vid()
6662 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_kill_vid()
6670 pm_runtime_put(priv->device); in stmmac_vlan_rx_kill_vid()
6679 switch (bpf->command) { in stmmac_bpf()
6681 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); in stmmac_bpf()
6683 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, in stmmac_bpf()
6684 bpf->xsk.queue_id); in stmmac_bpf()
6686 return -EOPNOTSUPP; in stmmac_bpf()
6697 int queue; in stmmac_xdp_xmit() local
6699 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) in stmmac_xdp_xmit()
6700 return -ENETDOWN; in stmmac_xdp_xmit()
6703 return -EINVAL; in stmmac_xdp_xmit()
6705 queue = stmmac_xdp_get_tx_queue(priv, cpu); in stmmac_xdp_xmit()
6706 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit()
6709 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit()
6715 res = stmmac_xdp_xmit_xdpf(priv, queue, frames[i], true); in stmmac_xdp_xmit()
6723 stmmac_flush_tx_descriptors(priv, queue); in stmmac_xdp_xmit()
6724 stmmac_tx_timer_arm(priv, queue); in stmmac_xdp_xmit()
6732 void stmmac_disable_rx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_disable_rx_queue() argument
6734 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_rx_queue()
6737 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_rx_queue()
6738 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_disable_rx_queue()
6739 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_rx_queue()
6741 stmmac_stop_rx_dma(priv, queue); in stmmac_disable_rx_queue()
6742 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_rx_queue()
6745 void stmmac_enable_rx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_enable_rx_queue() argument
6747 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_enable_rx_queue()
6748 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_rx_queue()
6753 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6755 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); in stmmac_enable_rx_queue()
6759 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); in stmmac_enable_rx_queue()
6761 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6762 netdev_err(priv->dev, "Failed to init RX desc.\n"); in stmmac_enable_rx_queue()
6766 stmmac_reset_rx_queue(priv, queue); in stmmac_enable_rx_queue()
6767 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6769 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_rx_queue()
6770 rx_q->dma_rx_phy, rx_q->queue_index); in stmmac_enable_rx_queue()
6772 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * in stmmac_enable_rx_queue()
6774 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6775 rx_q->rx_tail_addr, rx_q->queue_index); in stmmac_enable_rx_queue()
6777 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_enable_rx_queue()
6778 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_enable_rx_queue()
6779 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6781 rx_q->queue_index); in stmmac_enable_rx_queue()
6783 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6784 priv->dma_conf.dma_buf_sz, in stmmac_enable_rx_queue()
6785 rx_q->queue_index); in stmmac_enable_rx_queue()
6788 stmmac_start_rx_dma(priv, queue); in stmmac_enable_rx_queue()
6790 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_rx_queue()
6791 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_enable_rx_queue()
6792 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_rx_queue()
6795 void stmmac_disable_tx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_disable_tx_queue() argument
6797 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_tx_queue()
6800 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_tx_queue()
6801 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_disable_tx_queue()
6802 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_tx_queue()
6804 stmmac_stop_tx_dma(priv, queue); in stmmac_disable_tx_queue()
6805 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_tx_queue()
6808 void stmmac_enable_tx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_enable_tx_queue() argument
6810 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_tx_queue()
6811 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_tx_queue()
6815 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6817 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); in stmmac_enable_tx_queue()
6821 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6823 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6824 netdev_err(priv->dev, "Failed to init TX desc.\n"); in stmmac_enable_tx_queue()
6828 stmmac_reset_tx_queue(priv, queue); in stmmac_enable_tx_queue()
6829 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6831 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_tx_queue()
6832 tx_q->dma_tx_phy, tx_q->queue_index); in stmmac_enable_tx_queue()
6834 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_enable_tx_queue()
6835 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); in stmmac_enable_tx_queue()
6837 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_enable_tx_queue()
6838 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_tx_queue()
6839 tx_q->tx_tail_addr, tx_q->queue_index); in stmmac_enable_tx_queue()
6841 stmmac_start_tx_dma(priv, queue); in stmmac_enable_tx_queue()
6843 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_tx_queue()
6844 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_enable_tx_queue()
6845 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_tx_queue()
6859 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_release()
6860 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_release()
6865 /* Stop TX/RX DMA channels */ in stmmac_xdp_release()
6868 /* Release and free the Rx/Tx resources */ in stmmac_xdp_release()
6869 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_release()
6871 /* Disable the MAC Rx/Tx */ in stmmac_xdp_release()
6872 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_xdp_release()
6884 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_xdp_open()
6885 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_xdp_open()
6894 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6901 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); in stmmac_xdp_open()
6912 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_xdp_open()
6913 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_xdp_open()
6917 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_xdp_open()
6919 /* DMA RX Channel Configuration */ in stmmac_xdp_open()
6921 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_xdp_open()
6923 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6924 rx_q->dma_rx_phy, chan); in stmmac_xdp_open()
6926 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_xdp_open()
6927 (rx_q->buf_alloc_num * in stmmac_xdp_open()
6929 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6930 rx_q->rx_tail_addr, chan); in stmmac_xdp_open()
6932 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_xdp_open()
6933 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_xdp_open()
6934 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6936 rx_q->queue_index); in stmmac_xdp_open()
6938 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
6939 priv->dma_conf.dma_buf_sz, in stmmac_xdp_open()
6940 rx_q->queue_index); in stmmac_xdp_open()
6943 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_xdp_open()
6948 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_xdp_open()
6950 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6951 tx_q->dma_tx_phy, chan); in stmmac_xdp_open()
6953 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_xdp_open()
6954 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6955 tx_q->tx_tail_addr, chan); in stmmac_xdp_open()
6957 hrtimer_init(&tx_q->txtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_xdp_open()
6958 tx_q->txtimer.function = stmmac_tx_timer; in stmmac_xdp_open()
6961 /* Enable the MAC Rx/Tx */ in stmmac_xdp_open()
6962 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_xdp_open()
6964 /* Start Rx & Tx DMA Channels */ in stmmac_xdp_open()
6980 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_open()
6981 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_open()
6985 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6990 int stmmac_xsk_wakeup(struct net_device *dev, u32 queue, u32 flags) in stmmac_xsk_wakeup() argument
6997 if (test_bit(STMMAC_DOWN, &priv->state) || in stmmac_xsk_wakeup()
6998 !netif_carrier_ok(priv->dev)) in stmmac_xsk_wakeup()
6999 return -ENETDOWN; in stmmac_xsk_wakeup()
7002 return -EINVAL; in stmmac_xsk_wakeup()
7004 if (queue >= priv->plat->rx_queues_to_use || in stmmac_xsk_wakeup()
7005 queue >= priv->plat->tx_queues_to_use) in stmmac_xsk_wakeup()
7006 return -EINVAL; in stmmac_xsk_wakeup()
7008 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_xsk_wakeup()
7009 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xsk_wakeup()
7010 ch = &priv->channel[queue]; in stmmac_xsk_wakeup()
7012 if (!rx_q->xsk_pool && !tx_q->xsk_pool) in stmmac_xsk_wakeup()
7013 return -EINVAL; in stmmac_xsk_wakeup()
7015 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { in stmmac_xsk_wakeup()
7016 /* EQoS does not have per-DMA channel SW interrupt, in stmmac_xsk_wakeup()
7017 * so we schedule RX Napi straight-away. in stmmac_xsk_wakeup()
7019 if (likely(napi_schedule_prep(&ch->rxtx_napi))) in stmmac_xsk_wakeup()
7020 __napi_schedule(&ch->rxtx_napi); in stmmac_xsk_wakeup()
7029 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_get_stats64()
7030 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_get_stats64()
7035 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; in stmmac_get_stats64()
7040 start = u64_stats_fetch_begin(&txq_stats->q_syncp); in stmmac_get_stats64()
7041 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); in stmmac_get_stats64()
7042 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); in stmmac_get_stats64()
7044 start = u64_stats_fetch_begin(&txq_stats->napi_syncp); in stmmac_get_stats64()
7045 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); in stmmac_get_stats64()
7046 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); in stmmac_get_stats64()
7048 stats->tx_packets += tx_packets; in stmmac_get_stats64()
7049 stats->tx_bytes += tx_bytes; in stmmac_get_stats64()
7053 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; in stmmac_get_stats64()
7058 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); in stmmac_get_stats64()
7059 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); in stmmac_get_stats64()
7060 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); in stmmac_get_stats64()
7061 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); in stmmac_get_stats64()
7063 stats->rx_packets += rx_packets; in stmmac_get_stats64()
7064 stats->rx_bytes += rx_bytes; in stmmac_get_stats64()
7067 stats->rx_dropped = priv->xstats.rx_dropped; in stmmac_get_stats64()
7068 stats->rx_errors = priv->xstats.rx_errors; in stmmac_get_stats64()
7069 stats->tx_dropped = priv->xstats.tx_dropped; in stmmac_get_stats64()
7070 stats->tx_errors = priv->xstats.tx_errors; in stmmac_get_stats64()
7071 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; in stmmac_get_stats64()
7072 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; in stmmac_get_stats64()
7073 stats->rx_length_errors = priv->xstats.rx_length; in stmmac_get_stats64()
7074 stats->rx_crc_errors = priv->xstats.rx_crc_errors; in stmmac_get_stats64()
7075 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; in stmmac_get_stats64()
7076 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; in stmmac_get_stats64()
7102 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) in stmmac_reset_subtask()
7104 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_reset_subtask()
7107 netdev_err(priv->dev, "Reset adapter.\n"); in stmmac_reset_subtask()
7110 netif_trans_update(priv->dev); in stmmac_reset_subtask()
7111 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) in stmmac_reset_subtask()
7114 set_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7115 dev_close(priv->dev); in stmmac_reset_subtask()
7116 dev_open(priv->dev, NULL); in stmmac_reset_subtask()
7117 clear_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7118 clear_bit(STMMAC_RESETING, &priv->state); in stmmac_reset_subtask()
7128 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); in stmmac_service_task()
7132 * stmmac_hw_init - Init the MAC device
7143 /* dwmac-sun8i only work in chain mode */ in stmmac_hw_init()
7144 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) in stmmac_hw_init()
7146 priv->chain_mode = chain_mode; in stmmac_hw_init()
7154 priv->hw_cap_support = stmmac_get_hw_features(priv); in stmmac_hw_init()
7155 if (priv->hw_cap_support) { in stmmac_hw_init()
7156 dev_info(priv->device, "DMA HW capability register supported\n"); in stmmac_hw_init()
7163 priv->plat->enh_desc = priv->dma_cap.enh_desc; in stmmac_hw_init()
7164 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && in stmmac_hw_init()
7165 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); in stmmac_hw_init()
7166 priv->hw->pmt = priv->plat->pmt; in stmmac_hw_init()
7167 if (priv->dma_cap.hash_tb_sz) { in stmmac_hw_init()
7168 priv->hw->multicast_filter_bins = in stmmac_hw_init()
7169 (BIT(priv->dma_cap.hash_tb_sz) << 5); in stmmac_hw_init()
7170 priv->hw->mcast_bits_log2 = in stmmac_hw_init()
7171 ilog2(priv->hw->multicast_filter_bins); in stmmac_hw_init()
7175 if (priv->plat->force_thresh_dma_mode) in stmmac_hw_init()
7176 priv->plat->tx_coe = 0; in stmmac_hw_init()
7178 priv->plat->tx_coe = priv->dma_cap.tx_coe; in stmmac_hw_init()
7181 priv->plat->rx_coe = priv->dma_cap.rx_coe; in stmmac_hw_init()
7183 if (priv->dma_cap.rx_coe_type2) in stmmac_hw_init()
7184 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; in stmmac_hw_init()
7185 else if (priv->dma_cap.rx_coe_type1) in stmmac_hw_init()
7186 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; in stmmac_hw_init()
7189 dev_info(priv->device, "No HW DMA feature register supported\n"); in stmmac_hw_init()
7192 if (priv->plat->rx_coe) { in stmmac_hw_init()
7193 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_hw_init()
7194 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); in stmmac_hw_init()
7195 if (priv->synopsys_id < DWMAC_CORE_4_00) in stmmac_hw_init()
7196 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); in stmmac_hw_init()
7198 if (priv->plat->tx_coe) in stmmac_hw_init()
7199 dev_info(priv->device, "TX Checksum insertion supported\n"); in stmmac_hw_init()
7201 if (priv->plat->pmt) { in stmmac_hw_init()
7202 dev_info(priv->device, "Wake-Up On Lan supported\n"); in stmmac_hw_init()
7203 device_set_wakeup_capable(priv->device, 1); in stmmac_hw_init()
7206 if (priv->dma_cap.tsoen) in stmmac_hw_init()
7207 dev_info(priv->device, "TSO supported\n"); in stmmac_hw_init()
7209 priv->hw->vlan_fail_q_en = in stmmac_hw_init()
7210 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); in stmmac_hw_init()
7211 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; in stmmac_hw_init()
7214 if (priv->hwif_quirks) { in stmmac_hw_init()
7215 ret = priv->hwif_quirks(priv); in stmmac_hw_init()
7220 /* Rx Watchdog is available in the COREs newer than the 3.40. in stmmac_hw_init()
7225 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || in stmmac_hw_init()
7226 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { in stmmac_hw_init()
7227 priv->use_riwt = 1; in stmmac_hw_init()
7228 dev_info(priv->device, in stmmac_hw_init()
7229 "Enable RX Mitigation via HW Watchdog Timer\n"); in stmmac_hw_init()
7238 u32 queue, maxq; in stmmac_napi_add() local
7240 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_add()
7242 for (queue = 0; queue < maxq; queue++) { in stmmac_napi_add()
7243 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_add()
7245 ch->priv_data = priv; in stmmac_napi_add()
7246 ch->index = queue; in stmmac_napi_add()
7247 spin_lock_init(&ch->lock); in stmmac_napi_add()
7249 if (queue < priv->plat->rx_queues_to_use) { in stmmac_napi_add()
7250 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); in stmmac_napi_add()
7252 if (queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7253 netif_napi_add_tx(dev, &ch->tx_napi, in stmmac_napi_add()
7256 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_add()
7257 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7258 netif_napi_add(dev, &ch->rxtx_napi, in stmmac_napi_add()
7267 u32 queue, maxq; in stmmac_napi_del() local
7269 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_del()
7271 for (queue = 0; queue < maxq; queue++) { in stmmac_napi_del()
7272 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_del()
7274 if (queue < priv->plat->rx_queues_to_use) in stmmac_napi_del()
7275 netif_napi_del(&ch->rx_napi); in stmmac_napi_del()
7276 if (queue < priv->plat->tx_queues_to_use) in stmmac_napi_del()
7277 netif_napi_del(&ch->tx_napi); in stmmac_napi_del()
7278 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_del()
7279 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_del()
7280 netif_napi_del(&ch->rxtx_napi); in stmmac_napi_del()
7295 priv->plat->rx_queues_to_use = rx_cnt; in stmmac_reinit_queues()
7296 priv->plat->tx_queues_to_use = tx_cnt; in stmmac_reinit_queues()
7298 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_reinit_queues()
7299 priv->rss.table[i] = ethtool_rxfh_indir_default(i, in stmmac_reinit_queues()
7319 priv->dma_conf.dma_rx_size = rx_size; in stmmac_reinit_ringparam()
7320 priv->dma_conf.dma_tx_size = tx_size; in stmmac_reinit_ringparam()
7333 struct stmmac_fpe_cfg *fpe_cfg = priv->plat->fpe_cfg; in stmmac_fpe_lp_task()
7334 enum stmmac_fpe_state *lo_state = &fpe_cfg->lo_fpe_state; in stmmac_fpe_lp_task()
7335 enum stmmac_fpe_state *lp_state = &fpe_cfg->lp_fpe_state; in stmmac_fpe_lp_task()
7336 bool *hs_enable = &fpe_cfg->hs_enable; in stmmac_fpe_lp_task()
7337 bool *enable = &fpe_cfg->enable; in stmmac_fpe_lp_task()
7340 while (retries-- > 0) { in stmmac_fpe_lp_task()
7347 stmmac_fpe_configure(priv, priv->ioaddr, in stmmac_fpe_lp_task()
7349 priv->plat->tx_queues_to_use, in stmmac_fpe_lp_task()
7350 priv->plat->rx_queues_to_use, in stmmac_fpe_lp_task()
7353 netdev_info(priv->dev, "configured FPE\n"); in stmmac_fpe_lp_task()
7357 netdev_info(priv->dev, "!!! BOTH FPE stations ON\n"); in stmmac_fpe_lp_task()
7364 netdev_info(priv->dev, SEND_VERIFY_MPAKCET_FMT, in stmmac_fpe_lp_task()
7366 stmmac_fpe_send_mpacket(priv, priv->ioaddr, in stmmac_fpe_lp_task()
7374 clear_bit(__FPE_TASK_SCHED, &priv->fpe_task_state); in stmmac_fpe_lp_task()
7379 if (priv->plat->fpe_cfg->hs_enable != enable) { in stmmac_fpe_handshake()
7381 stmmac_fpe_send_mpacket(priv, priv->ioaddr, in stmmac_fpe_handshake()
7382 priv->plat->fpe_cfg, in stmmac_fpe_handshake()
7385 priv->plat->fpe_cfg->lo_fpe_state = FPE_STATE_OFF; in stmmac_fpe_handshake()
7386 priv->plat->fpe_cfg->lp_fpe_state = FPE_STATE_OFF; in stmmac_fpe_handshake()
7389 priv->plat->fpe_cfg->hs_enable = enable; in stmmac_fpe_handshake()
7396 struct dma_desc *desc_contains_ts = ctx->desc; in stmmac_xdp_rx_timestamp()
7397 struct stmmac_priv *priv = ctx->priv; in stmmac_xdp_rx_timestamp()
7398 struct dma_desc *ndesc = ctx->ndesc; in stmmac_xdp_rx_timestamp()
7399 struct dma_desc *desc = ctx->desc; in stmmac_xdp_rx_timestamp()
7402 if (!priv->hwts_rx_en) in stmmac_xdp_rx_timestamp()
7403 return -ENODATA; in stmmac_xdp_rx_timestamp()
7406 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_xdp_rx_timestamp()
7410 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { in stmmac_xdp_rx_timestamp()
7411 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); in stmmac_xdp_rx_timestamp()
7412 ns -= priv->plat->cdc_error_adj; in stmmac_xdp_rx_timestamp()
7417 return -ENODATA; in stmmac_xdp_rx_timestamp()
7446 return -ENOMEM; in stmmac_dvr_probe()
7451 priv->device = device; in stmmac_dvr_probe()
7452 priv->dev = ndev; in stmmac_dvr_probe()
7455 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); in stmmac_dvr_probe()
7457 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); in stmmac_dvr_probe()
7458 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); in stmmac_dvr_probe()
7461 priv->xstats.pcpu_stats = in stmmac_dvr_probe()
7463 if (!priv->xstats.pcpu_stats) in stmmac_dvr_probe()
7464 return -ENOMEM; in stmmac_dvr_probe()
7467 priv->pause = pause; in stmmac_dvr_probe()
7468 priv->plat = plat_dat; in stmmac_dvr_probe()
7469 priv->ioaddr = res->addr; in stmmac_dvr_probe()
7470 priv->dev->base_addr = (unsigned long)res->addr; in stmmac_dvr_probe()
7471 priv->plat->dma_cfg->multi_msi_en = in stmmac_dvr_probe()
7472 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); in stmmac_dvr_probe()
7474 priv->dev->irq = res->irq; in stmmac_dvr_probe()
7475 priv->wol_irq = res->wol_irq; in stmmac_dvr_probe()
7476 priv->lpi_irq = res->lpi_irq; in stmmac_dvr_probe()
7477 priv->sfty_ce_irq = res->sfty_ce_irq; in stmmac_dvr_probe()
7478 priv->sfty_ue_irq = res->sfty_ue_irq; in stmmac_dvr_probe()
7480 priv->rx_irq[i] = res->rx_irq[i]; in stmmac_dvr_probe()
7482 priv->tx_irq[i] = res->tx_irq[i]; in stmmac_dvr_probe()
7484 if (!is_zero_ether_addr(res->mac)) in stmmac_dvr_probe()
7485 eth_hw_addr_set(priv->dev, res->mac); in stmmac_dvr_probe()
7487 dev_set_drvdata(device, priv->dev); in stmmac_dvr_probe()
7492 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); in stmmac_dvr_probe()
7493 if (!priv->af_xdp_zc_qps) in stmmac_dvr_probe()
7494 return -ENOMEM; in stmmac_dvr_probe()
7497 priv->wq = create_singlethread_workqueue("stmmac_wq"); in stmmac_dvr_probe()
7498 if (!priv->wq) { in stmmac_dvr_probe()
7499 dev_err(priv->device, "failed to create workqueue\n"); in stmmac_dvr_probe()
7500 ret = -ENOMEM; in stmmac_dvr_probe()
7504 INIT_WORK(&priv->service_task, stmmac_service_task); in stmmac_dvr_probe()
7507 INIT_WORK(&priv->fpe_task, stmmac_fpe_lp_task); in stmmac_dvr_probe()
7513 priv->plat->phy_addr = phyaddr; in stmmac_dvr_probe()
7515 if (priv->plat->stmmac_rst) { in stmmac_dvr_probe()
7516 ret = reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7517 reset_control_deassert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7521 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7522 reset_control_reset(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7525 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_probe()
7526 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7527 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", in stmmac_dvr_probe()
7540 if (priv->synopsys_id < DWMAC_CORE_5_20) in stmmac_dvr_probe()
7541 priv->plat->dma_cfg->dche = false; in stmmac_dvr_probe()
7545 ndev->netdev_ops = &stmmac_netdev_ops; in stmmac_dvr_probe()
7547 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; in stmmac_dvr_probe()
7548 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops; in stmmac_dvr_probe()
7550 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in stmmac_dvr_probe()
7552 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in stmmac_dvr_probe()
7557 ndev->hw_features |= NETIF_F_HW_TC; in stmmac_dvr_probe()
7560 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_dvr_probe()
7561 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in stmmac_dvr_probe()
7562 if (priv->plat->has_gmac4) in stmmac_dvr_probe()
7563 ndev->hw_features |= NETIF_F_GSO_UDP_L4; in stmmac_dvr_probe()
7564 priv->tso = true; in stmmac_dvr_probe()
7565 dev_info(priv->device, "TSO feature enabled\n"); in stmmac_dvr_probe()
7568 if (priv->dma_cap.sphen && in stmmac_dvr_probe()
7569 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { in stmmac_dvr_probe()
7570 ndev->hw_features |= NETIF_F_GRO; in stmmac_dvr_probe()
7571 priv->sph_cap = true; in stmmac_dvr_probe()
7572 priv->sph = priv->sph_cap; in stmmac_dvr_probe()
7573 dev_info(priv->device, "SPH feature enabled\n"); in stmmac_dvr_probe()
7581 if (priv->plat->host_dma_width) in stmmac_dvr_probe()
7582 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; in stmmac_dvr_probe()
7584 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; in stmmac_dvr_probe()
7586 if (priv->dma_cap.host_dma_width) { in stmmac_dvr_probe()
7588 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); in stmmac_dvr_probe()
7590 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", in stmmac_dvr_probe()
7591 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); in stmmac_dvr_probe()
7598 priv->plat->dma_cfg->eame = true; in stmmac_dvr_probe()
7602 dev_err(priv->device, "Failed to set DMA Mask\n"); in stmmac_dvr_probe()
7606 priv->dma_cap.host_dma_width = 32; in stmmac_dvr_probe()
7610 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; in stmmac_dvr_probe()
7611 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); in stmmac_dvr_probe()
7614 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; in stmmac_dvr_probe()
7615 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in stmmac_dvr_probe()
7616 priv->hw->hw_vlan_en = true; in stmmac_dvr_probe()
7618 if (priv->dma_cap.vlhash) { in stmmac_dvr_probe()
7619 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in stmmac_dvr_probe()
7620 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; in stmmac_dvr_probe()
7622 if (priv->dma_cap.vlins) { in stmmac_dvr_probe()
7623 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; in stmmac_dvr_probe()
7624 if (priv->dma_cap.dvlan) in stmmac_dvr_probe()
7625 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; in stmmac_dvr_probe()
7628 priv->msg_enable = netif_msg_init(debug, default_msg_level); in stmmac_dvr_probe()
7630 priv->xstats.threshold = tc; in stmmac_dvr_probe()
7633 rxq = priv->plat->rx_queues_to_use; in stmmac_dvr_probe()
7634 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); in stmmac_dvr_probe()
7635 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_dvr_probe()
7636 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); in stmmac_dvr_probe()
7638 if (priv->dma_cap.rssen && priv->plat->rss_en) in stmmac_dvr_probe()
7639 ndev->features |= NETIF_F_RXHASH; in stmmac_dvr_probe()
7641 ndev->vlan_features |= ndev->features; in stmmac_dvr_probe()
7643 ndev->vlan_features &= ~NETIF_F_TSO; in stmmac_dvr_probe()
7645 /* MTU range: 46 - hw-specific max */ in stmmac_dvr_probe()
7646 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; in stmmac_dvr_probe()
7647 if (priv->plat->has_xgmac) in stmmac_dvr_probe()
7648 ndev->max_mtu = XGMAC_JUMBO_LEN; in stmmac_dvr_probe()
7649 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) in stmmac_dvr_probe()
7650 ndev->max_mtu = JUMBO_LEN; in stmmac_dvr_probe()
7652 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); in stmmac_dvr_probe()
7653 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu in stmmac_dvr_probe()
7654 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. in stmmac_dvr_probe()
7656 if ((priv->plat->maxmtu < ndev->max_mtu) && in stmmac_dvr_probe()
7657 (priv->plat->maxmtu >= ndev->min_mtu)) in stmmac_dvr_probe()
7658 ndev->max_mtu = priv->plat->maxmtu; in stmmac_dvr_probe()
7659 else if (priv->plat->maxmtu < ndev->min_mtu) in stmmac_dvr_probe()
7660 dev_warn(priv->device, in stmmac_dvr_probe()
7662 __func__, priv->plat->maxmtu); in stmmac_dvr_probe()
7665 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ in stmmac_dvr_probe()
7667 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in stmmac_dvr_probe()
7672 mutex_init(&priv->lock); in stmmac_dvr_probe()
7676 * changed at run-time and it is fixed. Viceversa the driver'll try to in stmmac_dvr_probe()
7680 if (priv->plat->clk_csr >= 0) in stmmac_dvr_probe()
7681 priv->clk_csr = priv->plat->clk_csr; in stmmac_dvr_probe()
7692 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_probe()
7693 priv->hw->pcs != STMMAC_PCS_RTBI) { in stmmac_dvr_probe()
7697 dev_err_probe(priv->device, ret, in stmmac_dvr_probe()
7699 __func__, priv->plat->bus_id); in stmmac_dvr_probe()
7704 if (priv->plat->speed_mode_2500) in stmmac_dvr_probe()
7705 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); in stmmac_dvr_probe()
7707 if (priv->plat->mdio_bus_data && priv->plat->mdio_bus_data->has_xpcs) { in stmmac_dvr_probe()
7708 ret = stmmac_xpcs_setup(priv->mii); in stmmac_dvr_probe()
7721 dev_err(priv->device, "%s: ERROR %i registering the device\n", in stmmac_dvr_probe()
7730 if (priv->plat->dump_debug_regs) in stmmac_dvr_probe()
7731 priv->plat->dump_debug_regs(priv->plat->bsp_priv); in stmmac_dvr_probe()
7741 phylink_destroy(priv->phylink); in stmmac_dvr_probe()
7744 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_probe()
7745 priv->hw->pcs != STMMAC_PCS_RTBI) in stmmac_dvr_probe()
7750 destroy_workqueue(priv->wq); in stmmac_dvr_probe()
7752 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_probe()
7761 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7769 netdev_info(priv->dev, "%s: removing driver", __func__); in stmmac_dvr_remove()
7774 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_dvr_remove()
7781 phylink_destroy(priv->phylink); in stmmac_dvr_remove()
7782 if (priv->plat->stmmac_rst) in stmmac_dvr_remove()
7783 reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_remove()
7784 reset_control_assert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_remove()
7785 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_remove()
7786 priv->hw->pcs != STMMAC_PCS_RTBI) in stmmac_dvr_remove()
7788 destroy_workqueue(priv->wq); in stmmac_dvr_remove()
7789 mutex_destroy(&priv->lock); in stmmac_dvr_remove()
7790 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_remove()
7798 * stmmac_suspend - suspend callback
7801 * by the platform driver to stop the network queue, release the resources,
7813 mutex_lock(&priv->lock); in stmmac_suspend()
7819 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_suspend()
7820 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_suspend()
7822 if (priv->eee_enabled) { in stmmac_suspend()
7823 priv->tx_path_in_lpi_mode = false; in stmmac_suspend()
7824 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_suspend()
7827 /* Stop TX/RX DMA */ in stmmac_suspend()
7830 if (priv->plat->serdes_powerdown) in stmmac_suspend()
7831 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_suspend()
7834 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7835 stmmac_pmt(priv, priv->hw, priv->wolopts); in stmmac_suspend()
7836 priv->irq_wake = 1; in stmmac_suspend()
7838 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_suspend()
7839 pinctrl_pm_select_sleep_state(priv->device); in stmmac_suspend()
7842 mutex_unlock(&priv->lock); in stmmac_suspend()
7845 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7846 phylink_suspend(priv->phylink, true); in stmmac_suspend()
7848 if (device_may_wakeup(priv->device)) in stmmac_suspend()
7849 phylink_speed_down(priv->phylink, false); in stmmac_suspend()
7850 phylink_suspend(priv->phylink, false); in stmmac_suspend()
7854 if (priv->dma_cap.fpesel) { in stmmac_suspend()
7856 stmmac_fpe_configure(priv, priv->ioaddr, in stmmac_suspend()
7857 priv->plat->fpe_cfg, in stmmac_suspend()
7858 priv->plat->tx_queues_to_use, in stmmac_suspend()
7859 priv->plat->rx_queues_to_use, false); in stmmac_suspend()
7865 priv->speed = SPEED_UNKNOWN; in stmmac_suspend()
7870 static void stmmac_reset_rx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_reset_rx_queue() argument
7872 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_reset_rx_queue()
7874 rx_q->cur_rx = 0; in stmmac_reset_rx_queue()
7875 rx_q->dirty_rx = 0; in stmmac_reset_rx_queue()
7878 static void stmmac_reset_tx_queue(struct stmmac_priv *priv, u32 queue) in stmmac_reset_tx_queue() argument
7880 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_reset_tx_queue()
7882 tx_q->cur_tx = 0; in stmmac_reset_tx_queue()
7883 tx_q->dirty_tx = 0; in stmmac_reset_tx_queue()
7884 tx_q->mss = 0; in stmmac_reset_tx_queue()
7886 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_reset_tx_queue()
7890 * stmmac_reset_queues_param - reset queue parameters
7895 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_reset_queues_param()
7896 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_reset_queues_param()
7897 u32 queue; in stmmac_reset_queues_param() local
7899 for (queue = 0; queue < rx_cnt; queue++) in stmmac_reset_queues_param()
7900 stmmac_reset_rx_queue(priv, queue); in stmmac_reset_queues_param()
7902 for (queue = 0; queue < tx_cnt; queue++) in stmmac_reset_queues_param()
7903 stmmac_reset_tx_queue(priv, queue); in stmmac_reset_queues_param()
7907 * stmmac_resume - resume callback
7922 * automatically as soon as a magic packet or a Wake-up frame in stmmac_resume()
7927 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7928 mutex_lock(&priv->lock); in stmmac_resume()
7929 stmmac_pmt(priv, priv->hw, 0); in stmmac_resume()
7930 mutex_unlock(&priv->lock); in stmmac_resume()
7931 priv->irq_wake = 0; in stmmac_resume()
7933 pinctrl_pm_select_default_state(priv->device); in stmmac_resume()
7935 if (priv->mii) in stmmac_resume()
7936 stmmac_mdio_reset(priv->mii); in stmmac_resume()
7939 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_resume()
7940 priv->plat->serdes_powerup) { in stmmac_resume()
7941 ret = priv->plat->serdes_powerup(ndev, in stmmac_resume()
7942 priv->plat->bsp_priv); in stmmac_resume()
7949 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7950 phylink_resume(priv->phylink); in stmmac_resume()
7952 phylink_resume(priv->phylink); in stmmac_resume()
7953 if (device_may_wakeup(priv->device)) in stmmac_resume()
7954 phylink_speed_up(priv->phylink); in stmmac_resume()
7959 mutex_lock(&priv->lock); in stmmac_resume()
7964 stmmac_clear_descriptors(priv, &priv->dma_conf); in stmmac_resume()
7970 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); in stmmac_resume()
7975 mutex_unlock(&priv->lock); in stmmac_resume()