Lines Matching +full:eee +full:- +full:broken +full:- +full:100 +full:tx

1 // SPDX-License-Identifier: GPL-2.0-only
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
29 #include <linux/dma-mapping.h>
56 * with fine resolution and binary rollover. This avoid non-monotonic behavior
63 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
71 static int debug = -1;
73 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
75 static int phyaddr = -1;
79 #define STMMAC_TX_THRESH(x) ((x)->dma_conf.dma_tx_size / 4)
81 /* Limit to make sure XDP TX and slow path can coexist */
117 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
120 /* By default the driver will use the ring mode to manage tx and rx descriptors,
154 ret = clk_prepare_enable(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
157 ret = clk_prepare_enable(priv->plat->pclk); in stmmac_bus_clks_config()
159 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
162 if (priv->plat->clks_config) { in stmmac_bus_clks_config()
163 ret = priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
165 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
166 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
171 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_bus_clks_config()
172 clk_disable_unprepare(priv->plat->pclk); in stmmac_bus_clks_config()
173 if (priv->plat->clks_config) in stmmac_bus_clks_config()
174 priv->plat->clks_config(priv->plat->bsp_priv, enabled); in stmmac_bus_clks_config()
182 * stmmac_set_clk_tx_rate() - set the clock rate for the MAC transmit clock
189 * 25MHz for 100Mbps and 125MHz for 1Gbps. This is suitable for at least
191 * the plat_data->set_clk_tx_rate method directly, call it via their own
195 * plat_data->clk_tx_i must be filled in.
203 * supports 10, 100 and 1000Mbps. We do not want to spit in stmmac_set_clk_tx_rate()
214 * stmmac_verify_args - verify the driver parameters.
226 …pr_warn("stmmac: module parameter 'flow_ctrl' is obsolete - please remove from your module configu… in stmmac_verify_args()
231 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in __stmmac_disable_all_queues()
232 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in __stmmac_disable_all_queues()
237 struct stmmac_channel *ch = &priv->channel[queue]; in __stmmac_disable_all_queues()
240 test_bit(queue, priv->af_xdp_zc_qps)) { in __stmmac_disable_all_queues()
241 napi_disable(&ch->rxtx_napi); in __stmmac_disable_all_queues()
246 napi_disable(&ch->rx_napi); in __stmmac_disable_all_queues()
248 napi_disable(&ch->tx_napi); in __stmmac_disable_all_queues()
253 * stmmac_disable_all_queues - Disable all queues
258 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_disable_all_queues()
264 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_disable_all_queues()
265 if (rx_q->xsk_pool) { in stmmac_disable_all_queues()
275 * stmmac_enable_all_queues - Enable all queues
280 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_enable_all_queues()
281 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_all_queues()
286 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_all_queues()
289 test_bit(queue, priv->af_xdp_zc_qps)) { in stmmac_enable_all_queues()
290 napi_enable(&ch->rxtx_napi); in stmmac_enable_all_queues()
295 napi_enable(&ch->rx_napi); in stmmac_enable_all_queues()
297 napi_enable(&ch->tx_napi); in stmmac_enable_all_queues()
303 if (!test_bit(STMMAC_DOWN, &priv->state) && in stmmac_service_event_schedule()
304 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) in stmmac_service_event_schedule()
305 queue_work(priv->wq, &priv->service_task); in stmmac_service_event_schedule()
310 netif_carrier_off(priv->dev); in stmmac_global_err()
311 set_bit(STMMAC_RESET_REQUESTED, &priv->state); in stmmac_global_err()
316 * stmmac_clk_csr_set - dynamically set the MDC clock
323 * changed at run-time and it is fixed (as reported in the driver
331 clk_rate = clk_get_rate(priv->plat->stmmac_clk); in stmmac_clk_csr_set()
340 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { in stmmac_clk_csr_set()
342 priv->clk_csr = STMMAC_CSR_20_35M; in stmmac_clk_csr_set()
344 priv->clk_csr = STMMAC_CSR_35_60M; in stmmac_clk_csr_set()
346 priv->clk_csr = STMMAC_CSR_60_100M; in stmmac_clk_csr_set()
348 priv->clk_csr = STMMAC_CSR_100_150M; in stmmac_clk_csr_set()
350 priv->clk_csr = STMMAC_CSR_150_250M; in stmmac_clk_csr_set()
352 priv->clk_csr = STMMAC_CSR_250_300M; in stmmac_clk_csr_set()
354 priv->clk_csr = STMMAC_CSR_300_500M; in stmmac_clk_csr_set()
356 priv->clk_csr = STMMAC_CSR_500_800M; in stmmac_clk_csr_set()
359 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) { in stmmac_clk_csr_set()
361 priv->clk_csr = 0x03; in stmmac_clk_csr_set()
363 priv->clk_csr = 0x02; in stmmac_clk_csr_set()
365 priv->clk_csr = 0x01; in stmmac_clk_csr_set()
367 priv->clk_csr = 0; in stmmac_clk_csr_set()
370 if (priv->plat->has_xgmac) { in stmmac_clk_csr_set()
372 priv->clk_csr = 0x5; in stmmac_clk_csr_set()
374 priv->clk_csr = 0x4; in stmmac_clk_csr_set()
376 priv->clk_csr = 0x3; in stmmac_clk_csr_set()
378 priv->clk_csr = 0x2; in stmmac_clk_csr_set()
380 priv->clk_csr = 0x1; in stmmac_clk_csr_set()
382 priv->clk_csr = 0x0; in stmmac_clk_csr_set()
394 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_avail()
397 if (tx_q->dirty_tx > tx_q->cur_tx) in stmmac_tx_avail()
398 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; in stmmac_tx_avail()
400 avail = priv->dma_conf.dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; in stmmac_tx_avail()
406 * stmmac_rx_dirty - Get RX queue dirty
412 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_dirty()
415 if (rx_q->dirty_rx <= rx_q->cur_rx) in stmmac_rx_dirty()
416 dirty = rx_q->cur_rx - rx_q->dirty_rx; in stmmac_rx_dirty()
418 dirty = priv->dma_conf.dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; in stmmac_rx_dirty()
425 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_eee_tx_busy()
428 /* check if all TX queues have the work finished */ in stmmac_eee_tx_busy()
430 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_eee_tx_busy()
432 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_eee_tx_busy()
441 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_restart_sw_lpi_timer()
445 * stmmac_try_to_start_sw_lpi - check and enter in LPI mode
448 * EEE.
458 if (!priv->tx_path_in_lpi_mode) in stmmac_try_to_start_sw_lpi()
459 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_FORCED, in stmmac_try_to_start_sw_lpi()
460 priv->tx_lpi_clk_stop, 0); in stmmac_try_to_start_sw_lpi()
464 * stmmac_stop_sw_lpi - stop transmitting LPI
466 * Description: When using software-controlled LPI, stop transmitting LPI state.
470 timer_delete_sync(&priv->eee_ctrl_timer); in stmmac_stop_sw_lpi()
471 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); in stmmac_stop_sw_lpi()
472 priv->tx_path_in_lpi_mode = false; in stmmac_stop_sw_lpi()
476 * stmmac_eee_ctrl_timer - EEE TX SW timer.
489 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
504 if (!priv->hwts_tx_en) in stmmac_get_tx_hwtstamp()
508 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) in stmmac_get_tx_hwtstamp()
511 /* check tx tstamp status */ in stmmac_get_tx_hwtstamp()
513 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); in stmmac_get_tx_hwtstamp()
515 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_get_tx_hwtstamp()
520 ns -= priv->plat->cdc_error_adj; in stmmac_get_tx_hwtstamp()
525 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); in stmmac_get_tx_hwtstamp()
531 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
547 if (!priv->hwts_rx_en) in stmmac_get_rx_hwtstamp()
550 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_get_rx_hwtstamp()
554 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { in stmmac_get_rx_hwtstamp()
555 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_get_rx_hwtstamp()
557 ns -= priv->plat->cdc_error_adj; in stmmac_get_rx_hwtstamp()
559 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); in stmmac_get_rx_hwtstamp()
562 shhwtstamp->hwtstamp = ns_to_ktime(ns); in stmmac_get_rx_hwtstamp()
564 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); in stmmac_get_rx_hwtstamp()
569 * stmmac_hwtstamp_set - control hardware timestamping.
574 * This function configures the MAC to enable/disable both outgoing(TX)
577 * 0 on success and an appropriate -ve integer on failure.
592 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { in stmmac_hwtstamp_set()
593 netdev_alert(priv->dev, "No support for HW time stamping\n"); in stmmac_hwtstamp_set()
594 priv->hwts_tx_en = 0; in stmmac_hwtstamp_set()
595 priv->hwts_rx_en = 0; in stmmac_hwtstamp_set()
597 return -EOPNOTSUPP; in stmmac_hwtstamp_set()
600 if (copy_from_user(&config, ifr->ifr_data, in stmmac_hwtstamp_set()
602 return -EFAULT; in stmmac_hwtstamp_set()
604 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", in stmmac_hwtstamp_set()
609 return -ERANGE; in stmmac_hwtstamp_set()
611 if (priv->adv_ts) { in stmmac_hwtstamp_set()
692 if (priv->synopsys_id < DWMAC_CORE_4_10) in stmmac_hwtstamp_set()
732 return -ERANGE; in stmmac_hwtstamp_set()
745 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); in stmmac_hwtstamp_set()
746 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; in stmmac_hwtstamp_set()
748 priv->systime_flags = STMMAC_HWTS_ACTIVE; in stmmac_hwtstamp_set()
750 if (priv->hwts_tx_en || priv->hwts_rx_en) { in stmmac_hwtstamp_set()
751 priv->systime_flags |= tstamp_all | ptp_v2 | in stmmac_hwtstamp_set()
757 stmmac_config_hw_tstamping(priv, priv->ptpaddr, priv->systime_flags); in stmmac_hwtstamp_set()
759 memcpy(&priv->tstamp_config, &config, sizeof(config)); in stmmac_hwtstamp_set()
761 return copy_to_user(ifr->ifr_data, &config, in stmmac_hwtstamp_set()
762 sizeof(config)) ? -EFAULT : 0; in stmmac_hwtstamp_set()
766 * stmmac_hwtstamp_get - read hardware timestamping.
777 struct hwtstamp_config *config = &priv->tstamp_config; in stmmac_hwtstamp_get()
779 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_hwtstamp_get()
780 return -EOPNOTSUPP; in stmmac_hwtstamp_get()
782 return copy_to_user(ifr->ifr_data, config, in stmmac_hwtstamp_get()
783 sizeof(*config)) ? -EFAULT : 0; in stmmac_hwtstamp_get()
787 * stmmac_init_tstamp_counter - init hardware timestamping counter
798 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_tstamp_counter()
803 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_init_tstamp_counter()
804 return -EOPNOTSUPP; in stmmac_init_tstamp_counter()
806 stmmac_config_hw_tstamping(priv, priv->ptpaddr, systime_flags); in stmmac_init_tstamp_counter()
807 priv->systime_flags = systime_flags; in stmmac_init_tstamp_counter()
810 stmmac_config_sub_second_increment(priv, priv->ptpaddr, in stmmac_init_tstamp_counter()
811 priv->plat->clk_ptp_rate, in stmmac_init_tstamp_counter()
816 priv->sub_second_inc = sec_inc; in stmmac_init_tstamp_counter()
824 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); in stmmac_init_tstamp_counter()
825 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); in stmmac_init_tstamp_counter()
831 stmmac_init_systime(priv, priv->ptpaddr, (u32)now.tv_sec, now.tv_nsec); in stmmac_init_tstamp_counter()
838 * stmmac_init_ptp - init PTP
846 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_ptp()
849 if (priv->plat->ptp_clk_freq_config) in stmmac_init_ptp()
850 priv->plat->ptp_clk_freq_config(priv); in stmmac_init_ptp()
856 priv->adv_ts = 0; in stmmac_init_ptp()
858 if (xmac && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
859 priv->adv_ts = 1; in stmmac_init_ptp()
861 else if (priv->extend_desc && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
862 priv->adv_ts = 1; in stmmac_init_ptp()
864 if (priv->dma_cap.time_stamp) in stmmac_init_ptp()
865 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); in stmmac_init_ptp()
867 if (priv->adv_ts) in stmmac_init_ptp()
868 netdev_info(priv->dev, in stmmac_init_ptp()
869 "IEEE 1588-2008 Advanced Timestamp supported\n"); in stmmac_init_ptp()
871 priv->hwts_tx_en = 0; in stmmac_init_ptp()
872 priv->hwts_rx_en = 0; in stmmac_init_ptp()
874 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_init_ptp()
882 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_release_ptp()
887 * stmmac_mac_flow_ctrl - Configure flow control in all queues
896 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_mac_flow_ctrl()
898 stmmac_flow_ctrl(priv, priv->hw, duplex, flow_ctrl, priv->pause_time, in stmmac_mac_flow_ctrl()
905 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_get_caps()
907 /* Refresh the MAC-specific capabilities */ in stmmac_mac_get_caps()
910 config->mac_capabilities = priv->hw->link.caps; in stmmac_mac_get_caps()
912 if (priv->plat->max_speed) in stmmac_mac_get_caps()
913 phylink_limit_mac_speed(config, priv->plat->max_speed); in stmmac_mac_get_caps()
915 return config->mac_capabilities; in stmmac_mac_get_caps()
921 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_select_pcs()
924 if (priv->plat->select_pcs) { in stmmac_mac_select_pcs()
925 pcs = priv->plat->select_pcs(priv, interface); in stmmac_mac_select_pcs()
942 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_down()
944 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_mac_link_down()
945 if (priv->dma_cap.eee) in stmmac_mac_link_down()
946 stmmac_set_eee_pls(priv, priv->hw, false); in stmmac_mac_link_down()
958 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_up()
963 if ((priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_mac_link_up()
964 priv->plat->serdes_powerup) in stmmac_mac_link_up()
965 priv->plat->serdes_powerup(priv->dev, priv->plat->bsp_priv); in stmmac_mac_link_up()
967 old_ctrl = readl(priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
968 ctrl = old_ctrl & ~priv->hw->link.speed_mask; in stmmac_mac_link_up()
973 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
976 ctrl |= priv->hw->link.xgmii.speed5000; in stmmac_mac_link_up()
979 ctrl |= priv->hw->link.xgmii.speed2500; in stmmac_mac_link_up()
987 ctrl |= priv->hw->link.xlgmii.speed100000; in stmmac_mac_link_up()
990 ctrl |= priv->hw->link.xlgmii.speed50000; in stmmac_mac_link_up()
993 ctrl |= priv->hw->link.xlgmii.speed40000; in stmmac_mac_link_up()
996 ctrl |= priv->hw->link.xlgmii.speed25000; in stmmac_mac_link_up()
999 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
1002 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1005 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1013 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
1016 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
1019 ctrl |= priv->hw->link.speed100; in stmmac_mac_link_up()
1022 ctrl |= priv->hw->link.speed10; in stmmac_mac_link_up()
1029 if (priv->plat->fix_mac_speed) in stmmac_mac_link_up()
1030 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed, mode); in stmmac_mac_link_up()
1033 ctrl &= ~priv->hw->link.duplex; in stmmac_mac_link_up()
1035 ctrl |= priv->hw->link.duplex; in stmmac_mac_link_up()
1050 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1052 if (priv->plat->set_clk_tx_rate) { in stmmac_mac_link_up()
1053 ret = priv->plat->set_clk_tx_rate(priv->plat->bsp_priv, in stmmac_mac_link_up()
1054 priv->plat->clk_tx_i, in stmmac_mac_link_up()
1057 netdev_err(priv->dev, in stmmac_mac_link_up()
1062 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_mac_link_up()
1063 if (priv->dma_cap.eee) in stmmac_mac_link_up()
1064 stmmac_set_eee_pls(priv, priv->hw, true); in stmmac_mac_link_up()
1069 if (priv->plat->flags & STMMAC_FLAG_HWTSTAMP_CORRECT_LATENCY) in stmmac_mac_link_up()
1075 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_disable_tx_lpi()
1077 priv->eee_active = false; in stmmac_mac_disable_tx_lpi()
1079 mutex_lock(&priv->lock); in stmmac_mac_disable_tx_lpi()
1081 priv->eee_enabled = false; in stmmac_mac_disable_tx_lpi()
1083 netdev_dbg(priv->dev, "disable EEE\n"); in stmmac_mac_disable_tx_lpi()
1084 priv->eee_sw_timer_en = false; in stmmac_mac_disable_tx_lpi()
1085 timer_delete_sync(&priv->eee_ctrl_timer); in stmmac_mac_disable_tx_lpi()
1086 stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_DISABLE, false, 0); in stmmac_mac_disable_tx_lpi()
1087 priv->tx_path_in_lpi_mode = false; in stmmac_mac_disable_tx_lpi()
1089 stmmac_set_eee_timer(priv, priv->hw, 0, STMMAC_DEFAULT_TWT_LS); in stmmac_mac_disable_tx_lpi()
1090 mutex_unlock(&priv->lock); in stmmac_mac_disable_tx_lpi()
1096 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_enable_tx_lpi()
1099 priv->tx_lpi_timer = timer; in stmmac_mac_enable_tx_lpi()
1100 priv->eee_active = true; in stmmac_mac_enable_tx_lpi()
1102 mutex_lock(&priv->lock); in stmmac_mac_enable_tx_lpi()
1104 priv->eee_enabled = true; in stmmac_mac_enable_tx_lpi()
1109 if (priv->plat->flags & STMMAC_FLAG_EN_TX_LPI_CLK_PHY_CAP) in stmmac_mac_enable_tx_lpi()
1110 priv->tx_lpi_clk_stop = tx_clk_stop; in stmmac_mac_enable_tx_lpi()
1112 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, in stmmac_mac_enable_tx_lpi()
1116 ret = stmmac_set_lpi_mode(priv, priv->hw, STMMAC_LPI_TIMER, in stmmac_mac_enable_tx_lpi()
1117 priv->tx_lpi_clk_stop, priv->tx_lpi_timer); in stmmac_mac_enable_tx_lpi()
1123 priv->eee_sw_timer_en = true; in stmmac_mac_enable_tx_lpi()
1127 mutex_unlock(&priv->lock); in stmmac_mac_enable_tx_lpi()
1128 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); in stmmac_mac_enable_tx_lpi()
1136 struct net_device *ndev = to_net_dev(config->dev); in stmmac_mac_finish()
1139 if (priv->plat->mac_finish) in stmmac_mac_finish()
1140 priv->plat->mac_finish(ndev, priv->plat->bsp_priv, mode, interface); in stmmac_mac_finish()
1157 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1165 int interface = priv->plat->mac_interface; in stmmac_check_pcs_mode()
1167 if (priv->dma_cap.pcs) { in stmmac_check_pcs_mode()
1172 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); in stmmac_check_pcs_mode()
1173 priv->hw->pcs = STMMAC_PCS_RGMII; in stmmac_check_pcs_mode()
1175 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); in stmmac_check_pcs_mode()
1176 priv->hw->pcs = STMMAC_PCS_SGMII; in stmmac_check_pcs_mode()
1182 * stmmac_init_phy - PHY initialization
1196 if (!phylink_expects_phy(priv->phylink)) in stmmac_init_phy()
1199 fwnode = priv->plat->port_node; in stmmac_init_phy()
1201 fwnode = dev_fwnode(priv->device); in stmmac_init_phy()
1208 /* Some DT bindings do not set-up the PHY handle. Let's try to in stmmac_init_phy()
1212 int addr = priv->plat->phy_addr; in stmmac_init_phy()
1216 netdev_err(priv->dev, "no phy found\n"); in stmmac_init_phy()
1217 return -ENODEV; in stmmac_init_phy()
1220 phydev = mdiobus_get_phy(priv->mii, addr); in stmmac_init_phy()
1222 netdev_err(priv->dev, "no phy at addr %d\n", addr); in stmmac_init_phy()
1223 return -ENODEV; in stmmac_init_phy()
1226 ret = phylink_connect_phy(priv->phylink, phydev); in stmmac_init_phy()
1229 ret = phylink_fwnode_phy_connect(priv->phylink, fwnode, 0); in stmmac_init_phy()
1233 struct ethtool_keee eee; in stmmac_init_phy() local
1241 if (!phylink_ethtool_get_eee(priv->phylink, &eee)) { in stmmac_init_phy()
1242 eee.tx_lpi_timer = priv->tx_lpi_timer; in stmmac_init_phy()
1243 phylink_ethtool_set_eee(priv->phylink, &eee); in stmmac_init_phy()
1247 if (!priv->plat->pmt) { in stmmac_init_phy()
1250 phylink_ethtool_get_wol(priv->phylink, &wol); in stmmac_init_phy()
1251 device_set_wakeup_capable(priv->device, !!wol.supported); in stmmac_init_phy()
1252 device_set_wakeup_enable(priv->device, !!wol.wolopts); in stmmac_init_phy()
1261 int mode = priv->plat->phy_interface; in stmmac_phy_setup()
1266 priv->phylink_config.dev = &priv->dev->dev; in stmmac_phy_setup()
1267 priv->phylink_config.type = PHYLINK_NETDEV; in stmmac_phy_setup()
1268 priv->phylink_config.mac_managed_pm = true; in stmmac_phy_setup()
1271 priv->phylink_config.mac_requires_rxc = true; in stmmac_phy_setup()
1273 if (!(priv->plat->flags & STMMAC_FLAG_RX_CLK_RUNS_IN_LPI)) in stmmac_phy_setup()
1274 priv->phylink_config.eee_rx_clk_stop_enable = true; in stmmac_phy_setup()
1277 priv->tx_lpi_clk_stop = priv->plat->flags & in stmmac_phy_setup()
1280 mdio_bus_data = priv->plat->mdio_bus_data; in stmmac_phy_setup()
1282 priv->phylink_config.default_an_inband = in stmmac_phy_setup()
1283 mdio_bus_data->default_an_inband; in stmmac_phy_setup()
1288 __set_bit(mode, priv->phylink_config.supported_interfaces); in stmmac_phy_setup()
1291 if (priv->hw->xpcs) in stmmac_phy_setup()
1292 pcs = xpcs_to_phylink_pcs(priv->hw->xpcs); in stmmac_phy_setup()
1294 pcs = priv->hw->phylink_pcs; in stmmac_phy_setup()
1297 phy_interface_or(priv->phylink_config.supported_interfaces, in stmmac_phy_setup()
1298 priv->phylink_config.supported_interfaces, in stmmac_phy_setup()
1299 pcs->supported_interfaces); in stmmac_phy_setup()
1301 if (priv->dma_cap.eee) { in stmmac_phy_setup()
1303 memcpy(priv->phylink_config.lpi_interfaces, in stmmac_phy_setup()
1304 priv->phylink_config.supported_interfaces, in stmmac_phy_setup()
1305 sizeof(priv->phylink_config.lpi_interfaces)); in stmmac_phy_setup()
1307 /* All full duplex speeds above 100Mbps are supported */ in stmmac_phy_setup()
1308 priv->phylink_config.lpi_capabilities = ~(MAC_1000FD - 1) | in stmmac_phy_setup()
1310 priv->phylink_config.lpi_timer_default = eee_timer * 1000; in stmmac_phy_setup()
1311 priv->phylink_config.eee_enabled_default = true; in stmmac_phy_setup()
1314 fwnode = priv->plat->port_node; in stmmac_phy_setup()
1316 fwnode = dev_fwnode(priv->device); in stmmac_phy_setup()
1318 phylink = phylink_create(&priv->phylink_config, fwnode, in stmmac_phy_setup()
1323 priv->phylink = phylink; in stmmac_phy_setup()
1330 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_display_rx_rings()
1337 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_display_rx_rings()
1341 if (priv->extend_desc) { in stmmac_display_rx_rings()
1342 head_rx = (void *)rx_q->dma_erx; in stmmac_display_rx_rings()
1345 head_rx = (void *)rx_q->dma_rx; in stmmac_display_rx_rings()
1350 stmmac_display_ring(priv, head_rx, dma_conf->dma_rx_size, true, in stmmac_display_rx_rings()
1351 rx_q->dma_rx_phy, desc_size); in stmmac_display_rx_rings()
1358 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_display_tx_rings()
1363 /* Display TX rings */ in stmmac_display_tx_rings()
1365 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_display_tx_rings()
1369 if (priv->extend_desc) { in stmmac_display_tx_rings()
1370 head_tx = (void *)tx_q->dma_etx; in stmmac_display_tx_rings()
1372 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in stmmac_display_tx_rings()
1373 head_tx = (void *)tx_q->dma_entx; in stmmac_display_tx_rings()
1376 head_tx = (void *)tx_q->dma_tx; in stmmac_display_tx_rings()
1380 stmmac_display_ring(priv, head_tx, dma_conf->dma_tx_size, false, in stmmac_display_tx_rings()
1381 tx_q->dma_tx_phy, desc_size); in stmmac_display_tx_rings()
1391 /* Display TX ring */ in stmmac_display_rings()
1422 * stmmac_clear_rx_descriptors - clear RX descriptors
1433 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_clear_rx_descriptors()
1437 for (i = 0; i < dma_conf->dma_rx_size; i++) in stmmac_clear_rx_descriptors()
1438 if (priv->extend_desc) in stmmac_clear_rx_descriptors()
1439 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, in stmmac_clear_rx_descriptors()
1440 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1441 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1442 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1444 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], in stmmac_clear_rx_descriptors()
1445 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1446 (i == dma_conf->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1447 dma_conf->dma_buf_sz); in stmmac_clear_rx_descriptors()
1451 * stmmac_clear_tx_descriptors - clear tx descriptors
1454 * @queue: TX queue index.
1455 * Description: this function is called to clear the TX descriptors
1462 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_clear_tx_descriptors()
1465 /* Clear the TX descriptors */ in stmmac_clear_tx_descriptors()
1466 for (i = 0; i < dma_conf->dma_tx_size; i++) { in stmmac_clear_tx_descriptors()
1467 int last = (i == (dma_conf->dma_tx_size - 1)); in stmmac_clear_tx_descriptors()
1470 if (priv->extend_desc) in stmmac_clear_tx_descriptors()
1471 p = &tx_q->dma_etx[i].basic; in stmmac_clear_tx_descriptors()
1472 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_clear_tx_descriptors()
1473 p = &tx_q->dma_entx[i].basic; in stmmac_clear_tx_descriptors()
1475 p = &tx_q->dma_tx[i]; in stmmac_clear_tx_descriptors()
1477 stmmac_init_tx_desc(priv, p, priv->mode, last); in stmmac_clear_tx_descriptors()
1482 * stmmac_clear_descriptors - clear descriptors
1485 * Description: this function is called to clear the TX and RX descriptors
1491 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; in stmmac_clear_descriptors()
1492 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_clear_descriptors()
1499 /* Clear the TX descriptors */ in stmmac_clear_descriptors()
1505 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1520 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_init_rx_buffers()
1521 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_init_rx_buffers()
1524 if (priv->dma_cap.host_dma_width <= 32) in stmmac_init_rx_buffers()
1527 if (!buf->page) { in stmmac_init_rx_buffers()
1528 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1529 if (!buf->page) in stmmac_init_rx_buffers()
1530 return -ENOMEM; in stmmac_init_rx_buffers()
1531 buf->page_offset = stmmac_rx_offset(priv); in stmmac_init_rx_buffers()
1534 if (priv->sph && !buf->sec_page) { in stmmac_init_rx_buffers()
1535 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_init_rx_buffers()
1536 if (!buf->sec_page) in stmmac_init_rx_buffers()
1537 return -ENOMEM; in stmmac_init_rx_buffers()
1539 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_init_rx_buffers()
1540 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_init_rx_buffers()
1542 buf->sec_page = NULL; in stmmac_init_rx_buffers()
1543 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_init_rx_buffers()
1546 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_init_rx_buffers()
1548 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_init_rx_buffers()
1549 if (dma_conf->dma_buf_sz == BUF_SIZE_16KiB) in stmmac_init_rx_buffers()
1556 * stmmac_free_rx_buffer - free RX dma buffers
1565 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_free_rx_buffer()
1567 if (buf->page) in stmmac_free_rx_buffer()
1568 page_pool_put_full_page(rx_q->page_pool, buf->page, false); in stmmac_free_rx_buffer()
1569 buf->page = NULL; in stmmac_free_rx_buffer()
1571 if (buf->sec_page) in stmmac_free_rx_buffer()
1572 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); in stmmac_free_rx_buffer()
1573 buf->sec_page = NULL; in stmmac_free_rx_buffer()
1577 * stmmac_free_tx_buffer - free RX dma buffers
1587 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in stmmac_free_tx_buffer()
1589 if (tx_q->tx_skbuff_dma[i].buf && in stmmac_free_tx_buffer()
1590 tx_q->tx_skbuff_dma[i].buf_type != STMMAC_TXBUF_T_XDP_TX) { in stmmac_free_tx_buffer()
1591 if (tx_q->tx_skbuff_dma[i].map_as_page) in stmmac_free_tx_buffer()
1592 dma_unmap_page(priv->device, in stmmac_free_tx_buffer()
1593 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1594 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1597 dma_unmap_single(priv->device, in stmmac_free_tx_buffer()
1598 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1599 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1603 if (tx_q->xdpf[i] && in stmmac_free_tx_buffer()
1604 (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_free_tx_buffer()
1605 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XDP_NDO)) { in stmmac_free_tx_buffer()
1606 xdp_return_frame(tx_q->xdpf[i]); in stmmac_free_tx_buffer()
1607 tx_q->xdpf[i] = NULL; in stmmac_free_tx_buffer()
1610 if (tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_free_tx_buffer()
1611 tx_q->xsk_frames_done++; in stmmac_free_tx_buffer()
1613 if (tx_q->tx_skbuff[i] && in stmmac_free_tx_buffer()
1614 tx_q->tx_skbuff_dma[i].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_free_tx_buffer()
1615 dev_kfree_skb_any(tx_q->tx_skbuff[i]); in stmmac_free_tx_buffer()
1616 tx_q->tx_skbuff[i] = NULL; in stmmac_free_tx_buffer()
1619 tx_q->tx_skbuff_dma[i].buf = 0; in stmmac_free_tx_buffer()
1620 tx_q->tx_skbuff_dma[i].map_as_page = false; in stmmac_free_tx_buffer()
1624 * dma_free_rx_skbufs - free RX dma buffers
1633 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_skbufs()
1636 for (i = 0; i < dma_conf->dma_rx_size; i++) in dma_free_rx_skbufs()
1644 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers()
1647 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers()
1651 if (priv->extend_desc) in stmmac_alloc_rx_buffers()
1652 p = &((rx_q->dma_erx + i)->basic); in stmmac_alloc_rx_buffers()
1654 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers()
1661 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers()
1668 * dma_free_rx_xskbufs - free RX dma buffers from XSK pool
1677 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in dma_free_rx_xskbufs()
1680 for (i = 0; i < dma_conf->dma_rx_size; i++) { in dma_free_rx_xskbufs()
1681 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in dma_free_rx_xskbufs()
1683 if (!buf->xdp) in dma_free_rx_xskbufs()
1686 xsk_buff_free(buf->xdp); in dma_free_rx_xskbufs()
1687 buf->xdp = NULL; in dma_free_rx_xskbufs()
1695 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in stmmac_alloc_rx_buffers_zc()
1704 for (i = 0; i < dma_conf->dma_rx_size; i++) { in stmmac_alloc_rx_buffers_zc()
1709 if (priv->extend_desc) in stmmac_alloc_rx_buffers_zc()
1710 p = (struct dma_desc *)(rx_q->dma_erx + i); in stmmac_alloc_rx_buffers_zc()
1712 p = rx_q->dma_rx + i; in stmmac_alloc_rx_buffers_zc()
1714 buf = &rx_q->buf_pool[i]; in stmmac_alloc_rx_buffers_zc()
1716 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_alloc_rx_buffers_zc()
1717 if (!buf->xdp) in stmmac_alloc_rx_buffers_zc()
1718 return -ENOMEM; in stmmac_alloc_rx_buffers_zc()
1720 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_alloc_rx_buffers_zc()
1722 rx_q->buf_alloc_num++; in stmmac_alloc_rx_buffers_zc()
1730 if (!stmmac_xdp_is_enabled(priv) || !test_bit(queue, priv->af_xdp_zc_qps)) in stmmac_get_xsk_pool()
1733 return xsk_get_pool_from_qid(priv->dev, queue); in stmmac_get_xsk_pool()
1737 * __init_dma_rx_desc_rings - init the RX descriptor ring (per queue)
1750 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __init_dma_rx_desc_rings()
1753 netif_dbg(priv, probe, priv->dev, in __init_dma_rx_desc_rings()
1755 (u32)rx_q->dma_rx_phy); in __init_dma_rx_desc_rings()
1759 xdp_rxq_info_unreg_mem_model(&rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1761 rx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_rx_desc_rings()
1763 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1764 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1767 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1768 "Register MEM_TYPE_XSK_BUFF_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1769 rx_q->queue_index); in __init_dma_rx_desc_rings()
1770 xsk_pool_set_rxq_info(rx_q->xsk_pool, &rx_q->xdp_rxq); in __init_dma_rx_desc_rings()
1772 WARN_ON(xdp_rxq_info_reg_mem_model(&rx_q->xdp_rxq, in __init_dma_rx_desc_rings()
1774 rx_q->page_pool)); in __init_dma_rx_desc_rings()
1775 netdev_info(priv->dev, in __init_dma_rx_desc_rings()
1776 "Register MEM_TYPE_PAGE_POOL RxQ-%d\n", in __init_dma_rx_desc_rings()
1777 rx_q->queue_index); in __init_dma_rx_desc_rings()
1780 if (rx_q->xsk_pool) { in __init_dma_rx_desc_rings()
1782 * xdpsock TX-only. in __init_dma_rx_desc_rings()
1788 return -ENOMEM; in __init_dma_rx_desc_rings()
1792 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_rx_desc_rings()
1793 if (priv->extend_desc) in __init_dma_rx_desc_rings()
1794 stmmac_mode_init(priv, rx_q->dma_erx, in __init_dma_rx_desc_rings()
1795 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1796 dma_conf->dma_rx_size, 1); in __init_dma_rx_desc_rings()
1798 stmmac_mode_init(priv, rx_q->dma_rx, in __init_dma_rx_desc_rings()
1799 rx_q->dma_rx_phy, in __init_dma_rx_desc_rings()
1800 dma_conf->dma_rx_size, 0); in __init_dma_rx_desc_rings()
1811 u32 rx_count = priv->plat->rx_queues_to_use; in init_dma_rx_desc_rings()
1816 netif_dbg(priv, probe, priv->dev, in init_dma_rx_desc_rings()
1829 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in init_dma_rx_desc_rings()
1831 if (rx_q->xsk_pool) in init_dma_rx_desc_rings()
1836 rx_q->buf_alloc_num = 0; in init_dma_rx_desc_rings()
1837 rx_q->xsk_pool = NULL; in init_dma_rx_desc_rings()
1839 queue--; in init_dma_rx_desc_rings()
1846 * __init_dma_tx_desc_rings - init the TX descriptor ring (per queue)
1849 * @queue: TX queue index
1850 * Description: this function initializes the DMA TX descriptors
1858 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __init_dma_tx_desc_rings()
1861 netif_dbg(priv, probe, priv->dev, in __init_dma_tx_desc_rings()
1863 (u32)tx_q->dma_tx_phy); in __init_dma_tx_desc_rings()
1866 if (priv->mode == STMMAC_CHAIN_MODE) { in __init_dma_tx_desc_rings()
1867 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1868 stmmac_mode_init(priv, tx_q->dma_etx, in __init_dma_tx_desc_rings()
1869 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1870 dma_conf->dma_tx_size, 1); in __init_dma_tx_desc_rings()
1871 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) in __init_dma_tx_desc_rings()
1872 stmmac_mode_init(priv, tx_q->dma_tx, in __init_dma_tx_desc_rings()
1873 tx_q->dma_tx_phy, in __init_dma_tx_desc_rings()
1874 dma_conf->dma_tx_size, 0); in __init_dma_tx_desc_rings()
1877 tx_q->xsk_pool = stmmac_get_xsk_pool(priv, queue); in __init_dma_tx_desc_rings()
1879 for (i = 0; i < dma_conf->dma_tx_size; i++) { in __init_dma_tx_desc_rings()
1882 if (priv->extend_desc) in __init_dma_tx_desc_rings()
1883 p = &((tx_q->dma_etx + i)->basic); in __init_dma_tx_desc_rings()
1884 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __init_dma_tx_desc_rings()
1885 p = &((tx_q->dma_entx + i)->basic); in __init_dma_tx_desc_rings()
1887 p = tx_q->dma_tx + i; in __init_dma_tx_desc_rings()
1891 tx_q->tx_skbuff_dma[i].buf = 0; in __init_dma_tx_desc_rings()
1892 tx_q->tx_skbuff_dma[i].map_as_page = false; in __init_dma_tx_desc_rings()
1893 tx_q->tx_skbuff_dma[i].len = 0; in __init_dma_tx_desc_rings()
1894 tx_q->tx_skbuff_dma[i].last_segment = false; in __init_dma_tx_desc_rings()
1895 tx_q->tx_skbuff[i] = NULL; in __init_dma_tx_desc_rings()
1908 tx_queue_cnt = priv->plat->tx_queues_to_use; in init_dma_tx_desc_rings()
1917 * init_dma_desc_rings - init the RX/TX descriptor rings
1921 * Description: this function initializes the DMA RX/TX descriptors
1947 * dma_free_tx_skbufs - free TX dma buffers
1950 * @queue: TX queue index
1956 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in dma_free_tx_skbufs()
1959 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1961 for (i = 0; i < dma_conf->dma_tx_size; i++) in dma_free_tx_skbufs()
1964 if (tx_q->xsk_pool && tx_q->xsk_frames_done) { in dma_free_tx_skbufs()
1965 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in dma_free_tx_skbufs()
1966 tx_q->xsk_frames_done = 0; in dma_free_tx_skbufs()
1967 tx_q->xsk_pool = NULL; in dma_free_tx_skbufs()
1972 * stmmac_free_tx_skbufs - free TX skb buffers
1977 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_free_tx_skbufs()
1981 dma_free_tx_skbufs(priv, &priv->dma_conf, queue); in stmmac_free_tx_skbufs()
1985 * __free_dma_rx_desc_resources - free RX dma desc resources (per queue)
1994 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __free_dma_rx_desc_resources()
1997 if (rx_q->xsk_pool) in __free_dma_rx_desc_resources()
2002 rx_q->buf_alloc_num = 0; in __free_dma_rx_desc_resources()
2003 rx_q->xsk_pool = NULL; in __free_dma_rx_desc_resources()
2006 if (!priv->extend_desc) in __free_dma_rx_desc_resources()
2007 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
2009 rx_q->dma_rx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
2011 dma_free_coherent(priv->device, dma_conf->dma_rx_size * in __free_dma_rx_desc_resources()
2013 rx_q->dma_erx, rx_q->dma_rx_phy); in __free_dma_rx_desc_resources()
2015 if (xdp_rxq_info_is_reg(&rx_q->xdp_rxq)) in __free_dma_rx_desc_resources()
2016 xdp_rxq_info_unreg(&rx_q->xdp_rxq); in __free_dma_rx_desc_resources()
2018 kfree(rx_q->buf_pool); in __free_dma_rx_desc_resources()
2019 if (rx_q->page_pool) in __free_dma_rx_desc_resources()
2020 page_pool_destroy(rx_q->page_pool); in __free_dma_rx_desc_resources()
2026 u32 rx_count = priv->plat->rx_queues_to_use; in free_dma_rx_desc_resources()
2035 * __free_dma_tx_desc_resources - free TX dma desc resources (per queue)
2038 * @queue: TX queue index
2044 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __free_dma_tx_desc_resources()
2048 /* Release the DMA TX socket buffers */ in __free_dma_tx_desc_resources()
2051 if (priv->extend_desc) { in __free_dma_tx_desc_resources()
2053 addr = tx_q->dma_etx; in __free_dma_tx_desc_resources()
2054 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in __free_dma_tx_desc_resources()
2056 addr = tx_q->dma_entx; in __free_dma_tx_desc_resources()
2059 addr = tx_q->dma_tx; in __free_dma_tx_desc_resources()
2062 size *= dma_conf->dma_tx_size; in __free_dma_tx_desc_resources()
2064 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); in __free_dma_tx_desc_resources()
2066 kfree(tx_q->tx_skbuff_dma); in __free_dma_tx_desc_resources()
2067 kfree(tx_q->tx_skbuff); in __free_dma_tx_desc_resources()
2073 u32 tx_count = priv->plat->tx_queues_to_use; in free_dma_tx_desc_resources()
2076 /* Free TX queue resources */ in free_dma_tx_desc_resources()
2082 * __alloc_dma_rx_desc_resources - alloc RX resources (per queue).
2087 * this function allocates the resources for TX and RX paths. In case of
2088 * reception, for example, it pre-allocated the RX socket buffer in order to
2089 * allow zero-copy mechanism.
2095 struct stmmac_rx_queue *rx_q = &dma_conf->rx_queue[queue]; in __alloc_dma_rx_desc_resources()
2096 struct stmmac_channel *ch = &priv->channel[queue]; in __alloc_dma_rx_desc_resources()
2103 dma_buf_sz_pad = stmmac_rx_offset(priv) + dma_conf->dma_buf_sz + in __alloc_dma_rx_desc_resources()
2107 rx_q->queue_index = queue; in __alloc_dma_rx_desc_resources()
2108 rx_q->priv_data = priv; in __alloc_dma_rx_desc_resources()
2109 rx_q->napi_skb_frag_size = num_pages * PAGE_SIZE; in __alloc_dma_rx_desc_resources()
2112 pp_params.pool_size = dma_conf->dma_rx_size; in __alloc_dma_rx_desc_resources()
2114 pp_params.nid = dev_to_node(priv->device); in __alloc_dma_rx_desc_resources()
2115 pp_params.dev = priv->device; in __alloc_dma_rx_desc_resources()
2118 pp_params.max_len = dma_conf->dma_buf_sz; in __alloc_dma_rx_desc_resources()
2120 if (priv->sph) { in __alloc_dma_rx_desc_resources()
2125 rx_q->page_pool = page_pool_create(&pp_params); in __alloc_dma_rx_desc_resources()
2126 if (IS_ERR(rx_q->page_pool)) { in __alloc_dma_rx_desc_resources()
2127 ret = PTR_ERR(rx_q->page_pool); in __alloc_dma_rx_desc_resources()
2128 rx_q->page_pool = NULL; in __alloc_dma_rx_desc_resources()
2132 rx_q->buf_pool = kcalloc(dma_conf->dma_rx_size, in __alloc_dma_rx_desc_resources()
2133 sizeof(*rx_q->buf_pool), in __alloc_dma_rx_desc_resources()
2135 if (!rx_q->buf_pool) in __alloc_dma_rx_desc_resources()
2136 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2138 if (priv->extend_desc) { in __alloc_dma_rx_desc_resources()
2139 rx_q->dma_erx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2140 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2142 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2144 if (!rx_q->dma_erx) in __alloc_dma_rx_desc_resources()
2145 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2148 rx_q->dma_rx = dma_alloc_coherent(priv->device, in __alloc_dma_rx_desc_resources()
2149 dma_conf->dma_rx_size * in __alloc_dma_rx_desc_resources()
2151 &rx_q->dma_rx_phy, in __alloc_dma_rx_desc_resources()
2153 if (!rx_q->dma_rx) in __alloc_dma_rx_desc_resources()
2154 return -ENOMEM; in __alloc_dma_rx_desc_resources()
2158 test_bit(queue, priv->af_xdp_zc_qps)) in __alloc_dma_rx_desc_resources()
2159 napi_id = ch->rxtx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2161 napi_id = ch->rx_napi.napi_id; in __alloc_dma_rx_desc_resources()
2163 ret = xdp_rxq_info_reg(&rx_q->xdp_rxq, priv->dev, in __alloc_dma_rx_desc_resources()
2164 rx_q->queue_index, in __alloc_dma_rx_desc_resources()
2167 netdev_err(priv->dev, "Failed to register xdp rxq info\n"); in __alloc_dma_rx_desc_resources()
2168 return -EINVAL; in __alloc_dma_rx_desc_resources()
2177 u32 rx_count = priv->plat->rx_queues_to_use; in alloc_dma_rx_desc_resources()
2197 * __alloc_dma_tx_desc_resources - alloc TX resources (per queue).
2200 * @queue: TX queue index
2202 * this function allocates the resources for TX and RX paths. In case of
2203 * reception, for example, it pre-allocated the RX socket buffer in order to
2204 * allow zero-copy mechanism.
2210 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[queue]; in __alloc_dma_tx_desc_resources()
2214 tx_q->queue_index = queue; in __alloc_dma_tx_desc_resources()
2215 tx_q->priv_data = priv; in __alloc_dma_tx_desc_resources()
2217 tx_q->tx_skbuff_dma = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2218 sizeof(*tx_q->tx_skbuff_dma), in __alloc_dma_tx_desc_resources()
2220 if (!tx_q->tx_skbuff_dma) in __alloc_dma_tx_desc_resources()
2221 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2223 tx_q->tx_skbuff = kcalloc(dma_conf->dma_tx_size, in __alloc_dma_tx_desc_resources()
2226 if (!tx_q->tx_skbuff) in __alloc_dma_tx_desc_resources()
2227 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2229 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2231 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2236 size *= dma_conf->dma_tx_size; in __alloc_dma_tx_desc_resources()
2238 addr = dma_alloc_coherent(priv->device, size, in __alloc_dma_tx_desc_resources()
2239 &tx_q->dma_tx_phy, GFP_KERNEL); in __alloc_dma_tx_desc_resources()
2241 return -ENOMEM; in __alloc_dma_tx_desc_resources()
2243 if (priv->extend_desc) in __alloc_dma_tx_desc_resources()
2244 tx_q->dma_etx = addr; in __alloc_dma_tx_desc_resources()
2245 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in __alloc_dma_tx_desc_resources()
2246 tx_q->dma_entx = addr; in __alloc_dma_tx_desc_resources()
2248 tx_q->dma_tx = addr; in __alloc_dma_tx_desc_resources()
2256 u32 tx_count = priv->plat->tx_queues_to_use; in alloc_dma_tx_desc_resources()
2260 /* TX queues buffers and DMA */ in alloc_dma_tx_desc_resources()
2275 * alloc_dma_desc_resources - alloc TX/RX resources.
2279 * this function allocates the resources for TX and RX paths. In case of
2280 * reception, for example, it pre-allocated the RX socket buffer in order to
2281 * allow zero-copy mechanism.
2298 * free_dma_desc_resources - free dma desc resources
2305 /* Release the DMA TX socket buffers */ in free_dma_desc_resources()
2315 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
2321 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_enable_rx_queues()
2326 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; in stmmac_mac_enable_rx_queues()
2327 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); in stmmac_mac_enable_rx_queues()
2332 * stmmac_start_rx_dma - start RX DMA channel
2340 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); in stmmac_start_rx_dma()
2341 stmmac_start_rx(priv, priv->ioaddr, chan); in stmmac_start_rx_dma()
2345 * stmmac_start_tx_dma - start TX DMA channel
2347 * @chan: TX channel index
2349 * This starts a TX DMA channel
2353 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); in stmmac_start_tx_dma()
2354 stmmac_start_tx(priv, priv->ioaddr, chan); in stmmac_start_tx_dma()
2358 * stmmac_stop_rx_dma - stop RX DMA channel
2366 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); in stmmac_stop_rx_dma()
2367 stmmac_stop_rx(priv, priv->ioaddr, chan); in stmmac_stop_rx_dma()
2371 * stmmac_stop_tx_dma - stop TX DMA channel
2373 * @chan: TX channel index
2375 * This stops a TX DMA channel
2379 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); in stmmac_stop_tx_dma()
2380 stmmac_stop_tx(priv, priv->ioaddr, chan); in stmmac_stop_tx_dma()
2385 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_enable_all_dma_irq()
2386 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_enable_all_dma_irq()
2391 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_enable_all_dma_irq()
2394 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2395 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_enable_all_dma_irq()
2396 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_all_dma_irq()
2401 * stmmac_start_all_dma - start all RX and TX DMA channels
2404 * This starts all the RX and TX DMA channels
2408 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_start_all_dma()
2409 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_start_all_dma()
2420 * stmmac_stop_all_dma - stop all RX and TX DMA channels
2423 * This stops the RX and TX DMA channels
2427 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_stop_all_dma()
2428 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_stop_all_dma()
2439 * stmmac_dma_operation_mode - HW DMA operation mode
2442 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
2446 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_dma_operation_mode()
2447 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_dma_operation_mode()
2448 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_dma_operation_mode()
2449 int txfifosz = priv->plat->tx_fifo_size; in stmmac_dma_operation_mode()
2456 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_dma_operation_mode()
2458 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_dma_operation_mode()
2460 /* Split up the shared Tx/Rx FIFO memory on DW QoS Eth and DW XGMAC */ in stmmac_dma_operation_mode()
2461 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) { in stmmac_dma_operation_mode()
2466 if (priv->plat->force_thresh_dma_mode) { in stmmac_dma_operation_mode()
2469 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { in stmmac_dma_operation_mode()
2472 * to perform the TX COE in HW. This depends on: in stmmac_dma_operation_mode()
2473 * 1) TX COE if actually supported in stmmac_dma_operation_mode()
2479 priv->xstats.threshold = SF_DMA_MODE; in stmmac_dma_operation_mode()
2487 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_dma_operation_mode()
2490 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2492 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, in stmmac_dma_operation_mode()
2495 if (rx_q->xsk_pool) { in stmmac_dma_operation_mode()
2496 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_dma_operation_mode()
2497 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2501 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_dma_operation_mode()
2502 priv->dma_conf.dma_buf_sz, in stmmac_dma_operation_mode()
2508 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
2510 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, in stmmac_dma_operation_mode()
2519 stmmac_enable_tx_timestamp(meta_req->priv, meta_req->tx_desc); in stmmac_xsk_request_timestamp()
2520 *meta_req->set_ic = true; in stmmac_xsk_request_timestamp()
2526 struct stmmac_priv *priv = tx_compl->priv; in stmmac_xsk_fill_timestamp()
2527 struct dma_desc *desc = tx_compl->desc; in stmmac_xsk_fill_timestamp()
2531 if (!priv->hwts_tx_en) in stmmac_xsk_fill_timestamp()
2534 /* check tx tstamp status */ in stmmac_xsk_fill_timestamp()
2536 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_xsk_fill_timestamp()
2538 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_xsk_fill_timestamp()
2543 ns -= priv->plat->cdc_error_adj; in stmmac_xsk_fill_timestamp()
2555 if (meta_req->tbs & STMMAC_TBS_EN) in stmmac_xsk_request_launch_time()
2556 stmmac_set_desc_tbs(meta_req->priv, meta_req->edesc, ts.tv_sec, in stmmac_xsk_request_launch_time()
2568 struct netdev_queue *nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_zc()
2569 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_zc()
2570 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_zc()
2571 struct xsk_buff_pool *pool = tx_q->xsk_pool; in stmmac_xdp_xmit_zc()
2572 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2578 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_zc()
2583 while (budget-- > 0) { in stmmac_xdp_xmit_zc()
2589 /* We are sharing with slow path and stop XSK TX desc submission when in stmmac_xdp_xmit_zc()
2590 * available TX ring is less than threshold. in stmmac_xdp_xmit_zc()
2593 !netif_carrier_ok(priv->dev)) { in stmmac_xdp_xmit_zc()
2601 if (priv->est && priv->est->enable && in stmmac_xdp_xmit_zc()
2602 priv->est->max_sdu[queue] && in stmmac_xdp_xmit_zc()
2603 xdp_desc.len > priv->est->max_sdu[queue]) { in stmmac_xdp_xmit_zc()
2604 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xdp_xmit_zc()
2608 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_zc()
2609 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_zc()
2610 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_zc()
2611 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_zc()
2613 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_zc()
2619 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XSK_TX; in stmmac_xdp_xmit_zc()
2625 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_xdp_xmit_zc()
2626 tx_q->xdpf[entry] = NULL; in stmmac_xdp_xmit_zc()
2628 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_zc()
2629 tx_q->tx_skbuff_dma[entry].len = xdp_desc.len; in stmmac_xdp_xmit_zc()
2630 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_zc()
2631 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_zc()
2635 tx_q->tx_count_frames++; in stmmac_xdp_xmit_zc()
2637 if (!priv->tx_coal_frames[queue]) in stmmac_xdp_xmit_zc()
2639 else if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_zc()
2647 meta_req.tbs = tx_q->tbs; in stmmac_xdp_xmit_zc()
2648 meta_req.edesc = &tx_q->dma_entx[entry]; in stmmac_xdp_xmit_zc()
2652 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_zc()
2658 true, priv->mode, true, true, in stmmac_xdp_xmit_zc()
2661 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xdp_xmit_zc()
2664 &tx_q->tx_skbuff_dma[entry].xsk_meta); in stmmac_xdp_xmit_zc()
2666 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_zc()
2667 entry = tx_q->cur_tx; in stmmac_xdp_xmit_zc()
2669 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2670 u64_stats_add(&txq_stats->napi.tx_set_ic_bit, tx_set_ic_bit); in stmmac_xdp_xmit_zc()
2671 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_xdp_xmit_zc()
2679 * a) TX Budget is still available in stmmac_xdp_xmit_zc()
2680 * b) work_done = true when XSK TX desc peek is empty (no more in stmmac_xdp_xmit_zc()
2681 * pending XSK TX for transmission) in stmmac_xdp_xmit_zc()
2688 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && tc <= 256) { in stmmac_bump_dma_threshold()
2691 if (priv->plat->force_thresh_dma_mode) in stmmac_bump_dma_threshold()
2697 priv->xstats.threshold = tc; in stmmac_bump_dma_threshold()
2702 * stmmac_tx_clean - to manage the transmission completion
2705 * @queue: TX queue index
2706 * @pending_packets: signal to arm the TX coal timer
2708 * If some packets still needs to be handled, due to TX coalesce, set
2709 * pending_packets to true to make NAPI arm the TX coal timer.
2714 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_clean()
2715 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tx_clean()
2720 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2722 tx_q->xsk_frames_done = 0; in stmmac_tx_clean()
2724 entry = tx_q->dirty_tx; in stmmac_tx_clean()
2726 /* Try to clean all TX complete frame in 1 shot */ in stmmac_tx_clean()
2727 while ((entry != tx_q->cur_tx) && count < priv->dma_conf.dma_tx_size) { in stmmac_tx_clean()
2733 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX || in stmmac_tx_clean()
2734 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2735 xdpf = tx_q->xdpf[entry]; in stmmac_tx_clean()
2737 } else if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2739 skb = tx_q->tx_skbuff[entry]; in stmmac_tx_clean()
2745 if (priv->extend_desc) in stmmac_tx_clean()
2746 p = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_tx_clean()
2747 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tx_clean()
2748 p = &tx_q->dma_entx[entry].basic; in stmmac_tx_clean()
2750 p = tx_q->dma_tx + entry; in stmmac_tx_clean()
2752 status = stmmac_tx_status(priv, &priv->xstats, p, priv->ioaddr); in stmmac_tx_clean()
2776 } else if (tx_q->xsk_pool && in stmmac_tx_clean()
2777 xp_tx_metadata_enabled(tx_q->xsk_pool)) { in stmmac_tx_clean()
2783 xsk_tx_metadata_complete(&tx_q->tx_skbuff_dma[entry].xsk_meta, in stmmac_tx_clean()
2789 if (likely(tx_q->tx_skbuff_dma[entry].buf && in stmmac_tx_clean()
2790 tx_q->tx_skbuff_dma[entry].buf_type != STMMAC_TXBUF_T_XDP_TX)) { in stmmac_tx_clean()
2791 if (tx_q->tx_skbuff_dma[entry].map_as_page) in stmmac_tx_clean()
2792 dma_unmap_page(priv->device, in stmmac_tx_clean()
2793 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2794 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2797 dma_unmap_single(priv->device, in stmmac_tx_clean()
2798 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2799 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2801 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_tx_clean()
2802 tx_q->tx_skbuff_dma[entry].len = 0; in stmmac_tx_clean()
2803 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_tx_clean()
2808 tx_q->tx_skbuff_dma[entry].last_segment = false; in stmmac_tx_clean()
2809 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_tx_clean()
2812 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_TX) { in stmmac_tx_clean()
2814 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2818 tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XDP_NDO) { in stmmac_tx_clean()
2820 tx_q->xdpf[entry] = NULL; in stmmac_tx_clean()
2823 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_XSK_TX) in stmmac_tx_clean()
2824 tx_q->xsk_frames_done++; in stmmac_tx_clean()
2826 if (tx_q->tx_skbuff_dma[entry].buf_type == STMMAC_TXBUF_T_SKB) { in stmmac_tx_clean()
2829 bytes_compl += skb->len; in stmmac_tx_clean()
2831 tx_q->tx_skbuff[entry] = NULL; in stmmac_tx_clean()
2835 stmmac_release_tx_desc(priv, p, priv->mode); in stmmac_tx_clean()
2837 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_tx_clean()
2839 tx_q->dirty_tx = entry; in stmmac_tx_clean()
2841 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), in stmmac_tx_clean()
2844 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, in stmmac_tx_clean()
2848 netif_dbg(priv, tx_done, priv->dev, in stmmac_tx_clean()
2850 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2853 if (tx_q->xsk_pool) { in stmmac_tx_clean()
2856 if (tx_q->xsk_frames_done) in stmmac_tx_clean()
2857 xsk_tx_completed(tx_q->xsk_pool, tx_q->xsk_frames_done); in stmmac_tx_clean()
2859 if (xsk_uses_need_wakeup(tx_q->xsk_pool)) in stmmac_tx_clean()
2860 xsk_set_tx_need_wakeup(tx_q->xsk_pool); in stmmac_tx_clean()
2862 /* For XSK TX, we try to send as many as possible. in stmmac_tx_clean()
2863 * If XSK work done (XSK TX desc empty and budget still in stmmac_tx_clean()
2864 * available), return "budget - 1" to reenable TX IRQ. in stmmac_tx_clean()
2870 xmits = budget - 1; in stmmac_tx_clean()
2875 if (priv->eee_sw_timer_en && !priv->tx_path_in_lpi_mode) in stmmac_tx_clean()
2879 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_tx_clean()
2882 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_tx_clean()
2883 u64_stats_add(&txq_stats->napi.tx_packets, tx_packets); in stmmac_tx_clean()
2884 u64_stats_add(&txq_stats->napi.tx_pkt_n, tx_packets); in stmmac_tx_clean()
2885 u64_stats_inc(&txq_stats->napi.tx_clean); in stmmac_tx_clean()
2886 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_tx_clean()
2888 priv->xstats.tx_errors += tx_errors; in stmmac_tx_clean()
2890 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2892 /* Combine decisions from TX clean and XSK TX */ in stmmac_tx_clean()
2897 * stmmac_tx_err - to manage the tx error
2905 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_tx_err()
2907 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2910 dma_free_tx_skbufs(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2911 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, chan); in stmmac_tx_err()
2913 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_tx_err()
2914 tx_q->dma_tx_phy, chan); in stmmac_tx_err()
2917 priv->xstats.tx_errors++; in stmmac_tx_err()
2918 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2922 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2924 * @txmode: TX operating mode
2928 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2934 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2935 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2936 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_dma_operation_mode()
2937 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_dma_operation_mode()
2938 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_set_dma_operation_mode()
2939 int txfifosz = priv->plat->tx_fifo_size; in stmmac_set_dma_operation_mode()
2942 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_set_dma_operation_mode()
2944 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_set_dma_operation_mode()
2950 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); in stmmac_set_dma_operation_mode()
2951 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); in stmmac_set_dma_operation_mode()
2958 ret = stmmac_safety_feat_irq_status(priv, priv->dev, in stmmac_safety_feat_interrupt()
2959 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); in stmmac_safety_feat_interrupt()
2960 if (ret && (ret != -EINVAL)) { in stmmac_safety_feat_interrupt()
2970 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, in stmmac_napi_check()
2971 &priv->xstats, chan, dir); in stmmac_napi_check()
2972 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_napi_check()
2973 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_napi_check()
2974 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_napi_check()
2979 rx_napi = rx_q->xsk_pool ? &ch->rxtx_napi : &ch->rx_napi; in stmmac_napi_check()
2980 tx_napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_napi_check()
2982 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { in stmmac_napi_check()
2984 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2985 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_check()
2986 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2991 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { in stmmac_napi_check()
2993 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2994 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_check()
2995 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
3004 * stmmac_dma_interrupt - DMA ISR
3012 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_dma_interrupt()
3013 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_dma_interrupt()
3047 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); in stmmac_mmc_setup()
3049 if (priv->dma_cap.rmon) { in stmmac_mmc_setup()
3050 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); in stmmac_mmc_setup()
3051 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); in stmmac_mmc_setup()
3053 netdev_info(priv->dev, "No MAC Management Counters available\n"); in stmmac_mmc_setup()
3057 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
3063 * platform and necessary for old MAC10/100 and GMAC chips.
3067 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; in stmmac_get_hw_features()
3071 * stmmac_check_ether_addr - check if the MAC addr is valid
3081 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in stmmac_check_ether_addr()
3082 stmmac_get_umac_addr(priv, priv->hw, addr, 0); in stmmac_check_ether_addr()
3084 eth_hw_addr_set(priv->dev, addr); in stmmac_check_ether_addr()
3086 eth_hw_addr_random(priv->dev); in stmmac_check_ether_addr()
3087 dev_info(priv->device, "device MAC address %pM\n", in stmmac_check_ether_addr()
3088 priv->dev->dev_addr); in stmmac_check_ether_addr()
3093 * stmmac_init_dma_engine - DMA init.
3102 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_init_dma_engine()
3103 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_init_dma_engine()
3110 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { in stmmac_init_dma_engine()
3111 netdev_err(priv->dev, "Invalid DMA configuration\n"); in stmmac_init_dma_engine()
3112 return -EINVAL; in stmmac_init_dma_engine()
3115 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) in stmmac_init_dma_engine()
3116 priv->plat->dma_cfg->atds = 1; in stmmac_init_dma_engine()
3118 ret = stmmac_reset(priv, priv->ioaddr); in stmmac_init_dma_engine()
3120 netdev_err(priv->dev, "Failed to reset the dma\n"); in stmmac_init_dma_engine()
3125 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg); in stmmac_init_dma_engine()
3127 if (priv->plat->axi) in stmmac_init_dma_engine()
3128 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); in stmmac_init_dma_engine()
3132 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_init_dma_engine()
3133 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_init_dma_engine()
3138 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_init_dma_engine()
3140 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3141 rx_q->dma_rx_phy, chan); in stmmac_init_dma_engine()
3143 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_init_dma_engine()
3144 (rx_q->buf_alloc_num * in stmmac_init_dma_engine()
3146 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3147 rx_q->rx_tail_addr, chan); in stmmac_init_dma_engine()
3150 /* DMA TX Channel Configuration */ in stmmac_init_dma_engine()
3152 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_dma_engine()
3154 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
3155 tx_q->dma_tx_phy, chan); in stmmac_init_dma_engine()
3157 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_init_dma_engine()
3158 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
3159 tx_q->tx_tail_addr, chan); in stmmac_init_dma_engine()
3167 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tx_timer_arm()
3168 u32 tx_coal_timer = priv->tx_coal_timer[queue]; in stmmac_tx_timer_arm()
3175 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer_arm()
3176 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer_arm()
3183 hrtimer_start(&tx_q->txtimer, in stmmac_tx_timer_arm()
3187 hrtimer_try_to_cancel(&tx_q->txtimer); in stmmac_tx_timer_arm()
3191 * stmmac_tx_timer - mitigation sw timer for tx.
3199 struct stmmac_priv *priv = tx_q->priv_data; in stmmac_tx_timer()
3203 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer()
3204 napi = tx_q->xsk_pool ? &ch->rxtx_napi : &ch->tx_napi; in stmmac_tx_timer()
3209 spin_lock_irqsave(&ch->lock, flags); in stmmac_tx_timer()
3210 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); in stmmac_tx_timer()
3211 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_tx_timer()
3219 * stmmac_init_coalesce - init mitigation options.
3228 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_init_coalesce()
3229 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_init_coalesce()
3233 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_init_coalesce()
3235 priv->tx_coal_frames[chan] = STMMAC_TX_FRAMES; in stmmac_init_coalesce()
3236 priv->tx_coal_timer[chan] = STMMAC_COAL_TX_TIMER; in stmmac_init_coalesce()
3238 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_init_coalesce()
3242 priv->rx_coal_frames[chan] = STMMAC_RX_FRAMES; in stmmac_init_coalesce()
3247 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_rings_length()
3248 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_rings_length()
3251 /* set TX ring length */ in stmmac_set_rings_length()
3253 stmmac_set_tx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3254 (priv->dma_conf.dma_tx_size - 1), chan); in stmmac_set_rings_length()
3258 stmmac_set_rx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
3259 (priv->dma_conf.dma_rx_size - 1), chan); in stmmac_set_rings_length()
3263 * stmmac_set_tx_queue_weight - Set TX queue weight
3265 * Description: It is used for setting TX queues weight
3269 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_set_tx_queue_weight()
3274 weight = priv->plat->tx_queues_cfg[queue].weight; in stmmac_set_tx_queue_weight()
3275 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); in stmmac_set_tx_queue_weight()
3280 * stmmac_configure_cbs - Configure CBS in TX queue
3282 * Description: It is used for configuring CBS in AVB TX queues
3286 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_configure_cbs()
3292 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; in stmmac_configure_cbs()
3296 stmmac_config_cbs(priv, priv->hw, in stmmac_configure_cbs()
3297 priv->plat->tx_queues_cfg[queue].send_slope, in stmmac_configure_cbs()
3298 priv->plat->tx_queues_cfg[queue].idle_slope, in stmmac_configure_cbs()
3299 priv->plat->tx_queues_cfg[queue].high_credit, in stmmac_configure_cbs()
3300 priv->plat->tx_queues_cfg[queue].low_credit, in stmmac_configure_cbs()
3306 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
3312 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_rx_queue_dma_chan_map()
3317 chan = priv->plat->rx_queues_cfg[queue].chan; in stmmac_rx_queue_dma_chan_map()
3318 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); in stmmac_rx_queue_dma_chan_map()
3323 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
3329 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_prio()
3334 if (!priv->plat->rx_queues_cfg[queue].use_prio) in stmmac_mac_config_rx_queues_prio()
3337 prio = priv->plat->rx_queues_cfg[queue].prio; in stmmac_mac_config_rx_queues_prio()
3338 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_rx_queues_prio()
3343 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
3345 * Description: It is used for configuring the TX Queue Priority
3349 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mac_config_tx_queues_prio()
3354 if (!priv->plat->tx_queues_cfg[queue].use_prio) in stmmac_mac_config_tx_queues_prio()
3357 prio = priv->plat->tx_queues_cfg[queue].prio; in stmmac_mac_config_tx_queues_prio()
3358 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_tx_queues_prio()
3363 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
3369 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_routing()
3375 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) in stmmac_mac_config_rx_queues_routing()
3378 packet = priv->plat->rx_queues_cfg[queue].pkt_route; in stmmac_mac_config_rx_queues_routing()
3379 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); in stmmac_mac_config_rx_queues_routing()
3385 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { in stmmac_mac_config_rss()
3386 priv->rss.enable = false; in stmmac_mac_config_rss()
3390 if (priv->dev->features & NETIF_F_RXHASH) in stmmac_mac_config_rss()
3391 priv->rss.enable = true; in stmmac_mac_config_rss()
3393 priv->rss.enable = false; in stmmac_mac_config_rss()
3395 stmmac_rss_configure(priv, priv->hw, &priv->rss, in stmmac_mac_config_rss()
3396 priv->plat->rx_queues_to_use); in stmmac_mac_config_rss()
3400 * stmmac_mtl_configuration - Configure MTL
3406 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mtl_configuration()
3407 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mtl_configuration()
3414 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3415 priv->plat->rx_sched_algorithm); in stmmac_mtl_configuration()
3417 /* Configure MTL TX algorithms */ in stmmac_mtl_configuration()
3419 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
3420 priv->plat->tx_sched_algorithm); in stmmac_mtl_configuration()
3422 /* Configure CBS in AVB TX queues */ in stmmac_mtl_configuration()
3436 /* Set TX priorities */ in stmmac_mtl_configuration()
3451 if (priv->dma_cap.asp) { in stmmac_safety_feat_configuration()
3452 netdev_info(priv->dev, "Enabling Safety Features\n"); in stmmac_safety_feat_configuration()
3453 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp, in stmmac_safety_feat_configuration()
3454 priv->plat->safety_feat_cfg); in stmmac_safety_feat_configuration()
3456 netdev_info(priv->dev, "No Safety Features support found\n"); in stmmac_safety_feat_configuration()
3461 * stmmac_hw_setup - setup mac in a usable state.
3470 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3476 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_hw_setup()
3477 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_hw_setup()
3483 if (priv->hw->phylink_pcs) in stmmac_hw_setup()
3484 phylink_pcs_pre_init(priv->phylink, priv->hw->phylink_pcs); in stmmac_hw_setup()
3490 * the link is established with EEE mode active. in stmmac_hw_setup()
3492 phylink_rx_clk_stop_block(priv->phylink); in stmmac_hw_setup()
3497 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_hw_setup()
3498 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", in stmmac_hw_setup()
3504 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); in stmmac_hw_setup()
3505 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_hw_setup()
3508 if (priv->hw->pcs) { in stmmac_hw_setup()
3509 int speed = priv->plat->mac_port_sel_speed; in stmmac_hw_setup()
3513 priv->hw->ps = speed; in stmmac_hw_setup()
3515 dev_warn(priv->device, "invalid port speed\n"); in stmmac_hw_setup()
3516 priv->hw->ps = 0; in stmmac_hw_setup()
3521 stmmac_core_init(priv, priv->hw, dev); in stmmac_hw_setup()
3529 ret = stmmac_rx_ipc(priv, priv->hw); in stmmac_hw_setup()
3531 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); in stmmac_hw_setup()
3532 priv->plat->rx_coe = STMMAC_RX_COE_NONE; in stmmac_hw_setup()
3533 priv->hw->rx_csum = 0; in stmmac_hw_setup()
3536 /* Enable the MAC Rx/Tx */ in stmmac_hw_setup()
3537 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_hw_setup()
3545 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); in stmmac_hw_setup()
3547 netdev_warn(priv->dev, in stmmac_hw_setup()
3553 if (ret == -EOPNOTSUPP) in stmmac_hw_setup()
3554 netdev_info(priv->dev, "PTP not supported by HW\n"); in stmmac_hw_setup()
3556 netdev_warn(priv->dev, "PTP init failed\n"); in stmmac_hw_setup()
3560 if (priv->use_riwt) { in stmmac_hw_setup()
3564 if (!priv->rx_riwt[queue]) in stmmac_hw_setup()
3565 priv->rx_riwt[queue] = DEF_DMA_RIWT; in stmmac_hw_setup()
3567 stmmac_rx_watchdog(priv, priv->ioaddr, in stmmac_hw_setup()
3568 priv->rx_riwt[queue], queue); in stmmac_hw_setup()
3572 if (priv->hw->pcs) in stmmac_hw_setup()
3573 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); in stmmac_hw_setup()
3575 /* set TX and RX rings length */ in stmmac_hw_setup()
3579 if (priv->tso) { in stmmac_hw_setup()
3581 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3583 /* TSO and TBS cannot co-exist */ in stmmac_hw_setup()
3584 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_hw_setup()
3587 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); in stmmac_hw_setup()
3592 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_hw_setup()
3594 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_hw_setup()
3598 if (priv->dma_cap.vlins) in stmmac_hw_setup()
3599 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); in stmmac_hw_setup()
3603 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_hw_setup()
3604 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; in stmmac_hw_setup()
3606 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); in stmmac_hw_setup()
3609 /* Configure real RX and TX queues */ in stmmac_hw_setup()
3610 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); in stmmac_hw_setup()
3611 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); in stmmac_hw_setup()
3616 phylink_rx_clk_stop_block(priv->phylink); in stmmac_hw_setup()
3617 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_hw_setup()
3618 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_hw_setup()
3627 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_hw_teardown()
3638 irq_idx = priv->plat->tx_queues_to_use; in stmmac_free_irq()
3641 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3642 if (priv->tx_irq[j] > 0) { in stmmac_free_irq()
3643 irq_set_affinity_hint(priv->tx_irq[j], NULL); in stmmac_free_irq()
3644 free_irq(priv->tx_irq[j], &priv->dma_conf.tx_queue[j]); in stmmac_free_irq()
3647 irq_idx = priv->plat->rx_queues_to_use; in stmmac_free_irq()
3650 for (j = irq_idx - 1; j >= 0; j--) { in stmmac_free_irq()
3651 if (priv->rx_irq[j] > 0) { in stmmac_free_irq()
3652 irq_set_affinity_hint(priv->rx_irq[j], NULL); in stmmac_free_irq()
3653 free_irq(priv->rx_irq[j], &priv->dma_conf.rx_queue[j]); in stmmac_free_irq()
3657 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) in stmmac_free_irq()
3658 free_irq(priv->sfty_ue_irq, dev); in stmmac_free_irq()
3661 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) in stmmac_free_irq()
3662 free_irq(priv->sfty_ce_irq, dev); in stmmac_free_irq()
3665 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) in stmmac_free_irq()
3666 free_irq(priv->lpi_irq, dev); in stmmac_free_irq()
3669 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) in stmmac_free_irq()
3670 free_irq(priv->wol_irq, dev); in stmmac_free_irq()
3673 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) in stmmac_free_irq()
3674 free_irq(priv->sfty_irq, dev); in stmmac_free_irq()
3677 free_irq(dev->irq, dev); in stmmac_free_irq()
3696 int_name = priv->int_name_mac; in stmmac_request_irq_multi_msi()
3697 sprintf(int_name, "%s:%s", dev->name, "mac"); in stmmac_request_irq_multi_msi()
3698 ret = request_irq(dev->irq, stmmac_mac_interrupt, in stmmac_request_irq_multi_msi()
3701 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3703 __func__, dev->irq, ret); in stmmac_request_irq_multi_msi()
3711 priv->wol_irq_disabled = true; in stmmac_request_irq_multi_msi()
3712 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3713 int_name = priv->int_name_wol; in stmmac_request_irq_multi_msi()
3714 sprintf(int_name, "%s:%s", dev->name, "wol"); in stmmac_request_irq_multi_msi()
3715 ret = request_irq(priv->wol_irq, in stmmac_request_irq_multi_msi()
3719 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3721 __func__, priv->wol_irq, ret); in stmmac_request_irq_multi_msi()
3730 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3731 int_name = priv->int_name_lpi; in stmmac_request_irq_multi_msi()
3732 sprintf(int_name, "%s:%s", dev->name, "lpi"); in stmmac_request_irq_multi_msi()
3733 ret = request_irq(priv->lpi_irq, in stmmac_request_irq_multi_msi()
3737 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3739 __func__, priv->lpi_irq, ret); in stmmac_request_irq_multi_msi()
3748 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3749 int_name = priv->int_name_sfty; in stmmac_request_irq_multi_msi()
3750 sprintf(int_name, "%s:%s", dev->name, "safety"); in stmmac_request_irq_multi_msi()
3751 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, in stmmac_request_irq_multi_msi()
3754 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3756 __func__, priv->sfty_irq, ret); in stmmac_request_irq_multi_msi()
3765 if (priv->sfty_ce_irq > 0 && priv->sfty_ce_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3766 int_name = priv->int_name_sfty_ce; in stmmac_request_irq_multi_msi()
3767 sprintf(int_name, "%s:%s", dev->name, "safety-ce"); in stmmac_request_irq_multi_msi()
3768 ret = request_irq(priv->sfty_ce_irq, in stmmac_request_irq_multi_msi()
3772 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3774 __func__, priv->sfty_ce_irq, ret); in stmmac_request_irq_multi_msi()
3783 if (priv->sfty_ue_irq > 0 && priv->sfty_ue_irq != dev->irq) { in stmmac_request_irq_multi_msi()
3784 int_name = priv->int_name_sfty_ue; in stmmac_request_irq_multi_msi()
3785 sprintf(int_name, "%s:%s", dev->name, "safety-ue"); in stmmac_request_irq_multi_msi()
3786 ret = request_irq(priv->sfty_ue_irq, in stmmac_request_irq_multi_msi()
3790 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3792 __func__, priv->sfty_ue_irq, ret); in stmmac_request_irq_multi_msi()
3799 for (i = 0; i < priv->plat->rx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3802 if (priv->rx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3805 int_name = priv->int_name_rx_irq[i]; in stmmac_request_irq_multi_msi()
3806 sprintf(int_name, "%s:%s-%d", dev->name, "rx", i); in stmmac_request_irq_multi_msi()
3807 ret = request_irq(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3809 0, int_name, &priv->dma_conf.rx_queue[i]); in stmmac_request_irq_multi_msi()
3811 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3812 "%s: alloc rx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3813 __func__, i, priv->rx_irq[i], ret); in stmmac_request_irq_multi_msi()
3818 irq_set_affinity_hint(priv->rx_irq[i], in stmmac_request_irq_multi_msi()
3822 /* Request Tx MSI irq */ in stmmac_request_irq_multi_msi()
3823 for (i = 0; i < priv->plat->tx_queues_to_use; i++) { in stmmac_request_irq_multi_msi()
3826 if (priv->tx_irq[i] == 0) in stmmac_request_irq_multi_msi()
3829 int_name = priv->int_name_tx_irq[i]; in stmmac_request_irq_multi_msi()
3830 sprintf(int_name, "%s:%s-%d", dev->name, "tx", i); in stmmac_request_irq_multi_msi()
3831 ret = request_irq(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3833 0, int_name, &priv->dma_conf.tx_queue[i]); in stmmac_request_irq_multi_msi()
3835 netdev_err(priv->dev, in stmmac_request_irq_multi_msi()
3836 "%s: alloc tx-%d MSI %d (error: %d)\n", in stmmac_request_irq_multi_msi()
3837 __func__, i, priv->tx_irq[i], ret); in stmmac_request_irq_multi_msi()
3842 irq_set_affinity_hint(priv->tx_irq[i], in stmmac_request_irq_multi_msi()
3859 ret = request_irq(dev->irq, stmmac_interrupt, in stmmac_request_irq_single()
3860 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3862 netdev_err(priv->dev, in stmmac_request_irq_single()
3864 __func__, dev->irq, ret); in stmmac_request_irq_single()
3872 priv->wol_irq_disabled = true; in stmmac_request_irq_single()
3873 if (priv->wol_irq > 0 && priv->wol_irq != dev->irq) { in stmmac_request_irq_single()
3874 ret = request_irq(priv->wol_irq, stmmac_interrupt, in stmmac_request_irq_single()
3875 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3877 netdev_err(priv->dev, in stmmac_request_irq_single()
3879 __func__, priv->wol_irq, ret); in stmmac_request_irq_single()
3886 if (priv->lpi_irq > 0 && priv->lpi_irq != dev->irq) { in stmmac_request_irq_single()
3887 ret = request_irq(priv->lpi_irq, stmmac_interrupt, in stmmac_request_irq_single()
3888 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3890 netdev_err(priv->dev, in stmmac_request_irq_single()
3892 __func__, priv->lpi_irq, ret); in stmmac_request_irq_single()
3901 if (priv->sfty_irq > 0 && priv->sfty_irq != dev->irq) { in stmmac_request_irq_single()
3902 ret = request_irq(priv->sfty_irq, stmmac_safety_interrupt, in stmmac_request_irq_single()
3903 IRQF_SHARED, dev->name, dev); in stmmac_request_irq_single()
3905 netdev_err(priv->dev, in stmmac_request_irq_single()
3907 __func__, priv->sfty_irq, ret); in stmmac_request_irq_single()
3926 if (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN) in stmmac_request_irq()
3935 * stmmac_setup_dma_desc - Generate a dma_conf and allocate DMA queue
3939 * Allocate the Tx/Rx DMA queue and init them.
3951 netdev_err(priv->dev, "%s: DMA conf allocation failed\n", in stmmac_setup_dma_desc()
3953 return ERR_PTR(-ENOMEM); in stmmac_setup_dma_desc()
3963 dma_conf->dma_buf_sz = bfsize; in stmmac_setup_dma_desc()
3964 /* Chose the tx/rx size from the already defined one in the in stmmac_setup_dma_desc()
3967 dma_conf->dma_tx_size = priv->dma_conf.dma_tx_size; in stmmac_setup_dma_desc()
3968 dma_conf->dma_rx_size = priv->dma_conf.dma_rx_size; in stmmac_setup_dma_desc()
3970 if (!dma_conf->dma_tx_size) in stmmac_setup_dma_desc()
3971 dma_conf->dma_tx_size = DMA_DEFAULT_TX_SIZE; in stmmac_setup_dma_desc()
3972 if (!dma_conf->dma_rx_size) in stmmac_setup_dma_desc()
3973 dma_conf->dma_rx_size = DMA_DEFAULT_RX_SIZE; in stmmac_setup_dma_desc()
3976 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { in stmmac_setup_dma_desc()
3977 struct stmmac_tx_queue *tx_q = &dma_conf->tx_queue[chan]; in stmmac_setup_dma_desc()
3978 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; in stmmac_setup_dma_desc()
3980 /* Setup per-TXQ tbs flag before TX descriptor alloc */ in stmmac_setup_dma_desc()
3981 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; in stmmac_setup_dma_desc()
3986 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", in stmmac_setup_dma_desc()
3991 ret = init_dma_desc_rings(priv->dev, dma_conf, GFP_KERNEL); in stmmac_setup_dma_desc()
3993 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", in stmmac_setup_dma_desc()
4008 * __stmmac_open - open entry point of the driver
4014 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4021 int mode = priv->plat->phy_interface; in __stmmac_open()
4025 /* Initialise the tx lpi timer, converting from msec to usec */ in __stmmac_open()
4026 if (!priv->tx_lpi_timer) in __stmmac_open()
4027 priv->tx_lpi_timer = eee_timer * 1000; in __stmmac_open()
4029 ret = pm_runtime_resume_and_get(priv->device); in __stmmac_open()
4033 if ((!priv->hw->xpcs || in __stmmac_open()
4034 xpcs_get_an_mode(priv->hw->xpcs, mode) != DW_AN_C73)) { in __stmmac_open()
4037 netdev_err(priv->dev, in __stmmac_open()
4045 if (priv->dma_conf.tx_queue[i].tbs & STMMAC_TBS_EN) in __stmmac_open()
4046 dma_conf->tx_queue[i].tbs = priv->dma_conf.tx_queue[i].tbs; in __stmmac_open()
4047 memcpy(&priv->dma_conf, dma_conf, sizeof(*dma_conf)); in __stmmac_open()
4051 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in __stmmac_open()
4052 priv->plat->serdes_powerup) { in __stmmac_open()
4053 ret = priv->plat->serdes_powerup(dev, priv->plat->bsp_priv); in __stmmac_open()
4055 netdev_err(priv->dev, "%s: Serdes powerup failed\n", in __stmmac_open()
4063 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); in __stmmac_open()
4069 phylink_start(priv->phylink); in __stmmac_open()
4071 phylink_speed_up(priv->phylink); in __stmmac_open()
4078 netif_tx_start_all_queues(priv->dev); in __stmmac_open()
4084 phylink_stop(priv->phylink); in __stmmac_open()
4086 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in __stmmac_open()
4087 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in __stmmac_open()
4091 phylink_disconnect_phy(priv->phylink); in __stmmac_open()
4093 pm_runtime_put(priv->device); in __stmmac_open()
4103 dma_conf = stmmac_setup_dma_desc(priv, dev->mtu); in stmmac_open()
4116 * stmmac_release - close entry point of the driver
4126 if (device_may_wakeup(priv->device)) in stmmac_release()
4127 phylink_speed_down(priv->phylink, false); in stmmac_release()
4129 phylink_stop(priv->phylink); in stmmac_release()
4130 phylink_disconnect_phy(priv->phylink); in stmmac_release()
4134 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_release()
4135 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_release()
4142 /* Stop TX/RX DMA and clear the descriptors */ in stmmac_release()
4145 /* Release and free the Rx/Tx resources */ in stmmac_release()
4146 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_release()
4149 if (priv->plat->serdes_powerdown) in stmmac_release()
4150 priv->plat->serdes_powerdown(dev, priv->plat->bsp_priv); in stmmac_release()
4155 timer_shutdown_sync(&priv->fpe_cfg.verify_timer); in stmmac_release()
4157 pm_runtime_put(priv->device); in stmmac_release()
4169 if (!priv->dma_cap.vlins) in stmmac_vlan_insert()
4173 if (skb->vlan_proto == htons(ETH_P_8021AD)) { in stmmac_vlan_insert()
4180 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_vlan_insert()
4181 p = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_vlan_insert()
4183 p = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_vlan_insert()
4189 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_vlan_insert()
4194 * stmmac_tso_allocator - close entry point of the driver
4199 * @queue: TX queue index
4207 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_allocator()
4217 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_allocator()
4218 priv->dma_conf.dma_tx_size); in stmmac_tso_allocator()
4219 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_allocator()
4221 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_allocator()
4222 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_allocator()
4224 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_allocator()
4226 curr_addr = des + (total_len - tmp_len); in stmmac_tso_allocator()
4236 tmp_len -= TSO_MAX_BUFF_SIZE; in stmmac_tso_allocator()
4242 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_flush_tx_descriptors()
4245 if (likely(priv->extend_desc)) in stmmac_flush_tx_descriptors()
4247 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_flush_tx_descriptors()
4258 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); in stmmac_flush_tx_descriptors()
4259 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); in stmmac_flush_tx_descriptors()
4263 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
4271 * --------
4272 * | DES0 |---> buffer1 = L2/L3/L4 header
4273 * | DES1 |---> can be used as buffer2 for TCP Payload if the DMA AXI address
4274 * | | width is 32-bit, but we never use it.
4275 * | | Also can be used as the most-significant 8-bits or 16-bits of
4276 * | | buffer1 address pointer if the DMA AXI address width is 40-bit
4277 * | | or 48-bit, and we always use it.
4278 * | DES2 |---> buffer1 len
4279 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
4280 * --------
4281 * --------
4282 * | DES0 |---> buffer1 = TCP Payload (can continue on next descr...)
4283 * | DES1 |---> same as the First Descriptor
4284 * | DES2 |---> buffer1 len
4286 * --------
4290 * --------
4291 * | DES0 |---> buffer1 = Split TCP Payload
4292 * | DES1 |---> same as the First Descriptor
4293 * | DES2 |---> buffer1 len
4295 * --------
4315 * TSO engine will be un-tagged by mistake. in stmmac_tso_xmit()
4320 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4325 nfrags = skb_shinfo(skb)->nr_frags; in stmmac_tso_xmit()
4328 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_tso_xmit()
4329 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_tso_xmit()
4330 first_tx = tx_q->cur_tx; in stmmac_tso_xmit()
4333 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in stmmac_tso_xmit()
4343 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { in stmmac_tso_xmit()
4345 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_tso_xmit()
4348 netdev_err(priv->dev, in stmmac_tso_xmit()
4349 "%s: Tx Ring full when queue awake\n", in stmmac_tso_xmit()
4355 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ in stmmac_tso_xmit()
4357 mss = skb_shinfo(skb)->gso_size; in stmmac_tso_xmit()
4360 if (mss != tx_q->mss) { in stmmac_tso_xmit()
4361 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4362 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4364 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4367 tx_q->mss = mss; in stmmac_tso_xmit()
4368 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_xmit()
4369 priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4370 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_xmit()
4376 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, in stmmac_tso_xmit()
4377 skb->data_len); in stmmac_tso_xmit()
4380 first_entry = tx_q->cur_tx; in stmmac_tso_xmit()
4381 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_tso_xmit()
4383 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4384 desc = &tx_q->dma_entx[first_entry].basic; in stmmac_tso_xmit()
4386 desc = &tx_q->dma_tx[first_entry]; in stmmac_tso_xmit()
4390 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), in stmmac_tso_xmit()
4392 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4400 * non-paged SKB data, the DMA buffer address should be saved to in stmmac_tso_xmit()
4401 * tx_q->tx_skbuff_dma[].buf corresponding to the last descriptor, in stmmac_tso_xmit()
4402 * and leave the other tx_q->tx_skbuff_dma[].buf as NULL to guarantee in stmmac_tso_xmit()
4406 * By saving the DMA buffer address to tx_q->tx_skbuff_dma[].buf in stmmac_tso_xmit()
4411 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4412 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_headlen(skb); in stmmac_tso_xmit()
4413 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = false; in stmmac_tso_xmit()
4414 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4418 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_tso_xmit()
4420 des = skb_frag_dma_map(priv->device, frag, 0, in stmmac_tso_xmit()
4423 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
4427 (i == nfrags - 1), queue); in stmmac_tso_xmit()
4429 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
4430 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); in stmmac_tso_xmit()
4431 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; in stmmac_tso_xmit()
4432 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4435 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; in stmmac_tso_xmit()
4438 tx_q->tx_skbuff[tx_q->cur_tx] = skb; in stmmac_tso_xmit()
4439 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_tso_xmit()
4441 /* Manage tx mitigation */ in stmmac_tso_xmit()
4442 tx_packets = (tx_q->cur_tx + 1) - first_tx; in stmmac_tso_xmit()
4443 tx_q->tx_count_frames += tx_packets; in stmmac_tso_xmit()
4445 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_tso_xmit()
4447 else if (!priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4449 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_tso_xmit()
4451 else if ((tx_q->tx_count_frames % in stmmac_tso_xmit()
4452 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_tso_xmit()
4458 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
4459 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
4461 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
4463 tx_q->tx_count_frames = 0; in stmmac_tso_xmit()
4472 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_conf.dma_tx_size); in stmmac_tso_xmit()
4475 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_tso_xmit()
4477 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tso_xmit()
4480 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_tso_xmit()
4481 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_tso_xmit()
4482 u64_stats_inc(&txq_stats->q.tx_tso_frames); in stmmac_tso_xmit()
4483 u64_stats_add(&txq_stats->q.tx_tso_nfrags, nfrags); in stmmac_tso_xmit()
4485 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_tso_xmit()
4486 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_tso_xmit()
4488 if (priv->sarc_type) in stmmac_tso_xmit()
4489 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_tso_xmit()
4493 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_tso_xmit()
4494 priv->hwts_tx_en)) { in stmmac_tso_xmit()
4496 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_tso_xmit()
4502 tx_q->tx_skbuff_dma[first_entry].last_segment, in stmmac_tso_xmit()
4503 hdr / 4, (skb->len - proto_hdr_len)); in stmmac_tso_xmit()
4518 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_tso_xmit()
4519 tx_q->cur_tx, first, nfrags); in stmmac_tso_xmit()
4521 print_pkt(skb->data, skb_headlen(skb)); in stmmac_tso_xmit()
4524 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_tso_xmit()
4532 dev_err(priv->device, "Tx dma map failed\n"); in stmmac_tso_xmit()
4534 priv->xstats.tx_dropped++; in stmmac_tso_xmit()
4539 * stmmac_has_ip_ethertype() - Check if packet has IP ethertype
4561 * stmmac_xmit - Tx entry point of the driver
4564 * Description : this is the tx entry point of the driver.
4575 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_xmit()
4576 int gso = skb_shinfo(skb)->gso_type; in stmmac_xmit()
4585 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xmit()
4586 txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xmit()
4587 first_tx = tx_q->cur_tx; in stmmac_xmit()
4589 if (priv->tx_path_in_lpi_mode && priv->eee_sw_timer_en) in stmmac_xmit()
4593 if (skb_is_gso(skb) && priv->tso) { in stmmac_xmit()
4596 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) in stmmac_xmit()
4600 if (priv->est && priv->est->enable && in stmmac_xmit()
4601 priv->est->max_sdu[queue] && in stmmac_xmit()
4602 skb->len > priv->est->max_sdu[queue]){ in stmmac_xmit()
4603 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xmit()
4609 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_xmit()
4612 netdev_err(priv->dev, in stmmac_xmit()
4613 "%s: Tx Ring full when queue awake\n", in stmmac_xmit()
4622 entry = tx_q->cur_tx; in stmmac_xmit()
4624 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_xmit()
4626 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); in stmmac_xmit()
4627 /* DWMAC IPs can be synthesized to support tx coe only for a few tx in stmmac_xmit()
4629 * support tx coe needs to fallback to software checksum calculation. in stmmac_xmit()
4631 * Packets that won't trigger the COE e.g. most DSA-tagged packets will in stmmac_xmit()
4635 (priv->plat->tx_queues_cfg[queue].coe_unsupported || in stmmac_xmit()
4642 if (likely(priv->extend_desc)) in stmmac_xmit()
4643 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4644 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4645 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4647 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4654 enh_desc = priv->plat->enh_desc; in stmmac_xmit()
4657 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); in stmmac_xmit()
4661 if (unlikely(entry < 0) && (entry != -EINVAL)) in stmmac_xmit()
4666 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_xmit()
4668 bool last_segment = (i == (nfrags - 1)); in stmmac_xmit()
4670 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4671 WARN_ON(tx_q->tx_skbuff[entry]); in stmmac_xmit()
4673 if (likely(priv->extend_desc)) in stmmac_xmit()
4674 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
4675 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4676 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4678 desc = tx_q->dma_tx + entry; in stmmac_xmit()
4680 des = skb_frag_dma_map(priv->device, frag, 0, len, in stmmac_xmit()
4682 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4685 tx_q->tx_skbuff_dma[entry].buf = des; in stmmac_xmit()
4689 tx_q->tx_skbuff_dma[entry].map_as_page = true; in stmmac_xmit()
4690 tx_q->tx_skbuff_dma[entry].len = len; in stmmac_xmit()
4691 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; in stmmac_xmit()
4692 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4696 priv->mode, 1, last_segment, skb->len); in stmmac_xmit()
4700 tx_q->tx_skbuff[entry] = skb; in stmmac_xmit()
4701 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4704 * segment is reset and the timer re-started to clean the tx status. in stmmac_xmit()
4708 tx_packets = (entry + 1) - first_tx; in stmmac_xmit()
4709 tx_q->tx_count_frames += tx_packets; in stmmac_xmit()
4711 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_xmit()
4713 else if (!priv->tx_coal_frames[queue]) in stmmac_xmit()
4715 else if (tx_packets > priv->tx_coal_frames[queue]) in stmmac_xmit()
4717 else if ((tx_q->tx_count_frames % in stmmac_xmit()
4718 priv->tx_coal_frames[queue]) < tx_packets) in stmmac_xmit()
4724 if (likely(priv->extend_desc)) in stmmac_xmit()
4725 desc = &tx_q->dma_etx[entry].basic; in stmmac_xmit()
4726 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
4727 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
4729 desc = &tx_q->dma_tx[entry]; in stmmac_xmit()
4731 tx_q->tx_count_frames = 0; in stmmac_xmit()
4740 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xmit()
4741 tx_q->cur_tx = entry; in stmmac_xmit()
4744 netdev_dbg(priv->dev, in stmmac_xmit()
4746 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_xmit()
4749 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); in stmmac_xmit()
4750 print_pkt(skb->data, skb->len); in stmmac_xmit()
4754 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_xmit()
4756 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_xmit()
4759 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xmit()
4760 u64_stats_add(&txq_stats->q.tx_bytes, skb->len); in stmmac_xmit()
4762 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xmit()
4763 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xmit()
4765 if (priv->sarc_type) in stmmac_xmit()
4766 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_xmit()
4777 des = dma_map_single(priv->device, skb->data, in stmmac_xmit()
4779 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
4782 tx_q->tx_skbuff_dma[first_entry].buf = des; in stmmac_xmit()
4783 tx_q->tx_skbuff_dma[first_entry].buf_type = STMMAC_TXBUF_T_SKB; in stmmac_xmit()
4784 tx_q->tx_skbuff_dma[first_entry].map_as_page = false; in stmmac_xmit()
4788 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; in stmmac_xmit()
4789 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; in stmmac_xmit()
4791 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_xmit()
4792 priv->hwts_tx_en)) { in stmmac_xmit()
4794 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_xmit()
4800 csum_insertion, priv->mode, 0, last_segment, in stmmac_xmit()
4801 skb->len); in stmmac_xmit()
4804 if (tx_q->tbs & STMMAC_TBS_EN) { in stmmac_xmit()
4805 struct timespec64 ts = ns_to_timespec64(skb->tstamp); in stmmac_xmit()
4807 tbs_desc = &tx_q->dma_entx[first_entry]; in stmmac_xmit()
4813 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_xmit()
4815 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xmit()
4823 netdev_err(priv->dev, "Tx DMA map failed\n"); in stmmac_xmit()
4826 priv->xstats.tx_dropped++; in stmmac_xmit()
4833 __be16 vlan_proto = veth->h_vlan_proto; in stmmac_rx_vlan()
4837 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || in stmmac_rx_vlan()
4839 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { in stmmac_rx_vlan()
4841 vlanid = ntohs(veth->h_vlan_TCI); in stmmac_rx_vlan()
4842 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); in stmmac_rx_vlan()
4849 * stmmac_rx_refill - refill used skb preallocated buffers
4853 * that is based on zero-copy.
4857 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill()
4859 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill()
4862 if (priv->dma_cap.host_dma_width <= 32) in stmmac_rx_refill()
4865 while (dirty-- > 0) { in stmmac_rx_refill()
4866 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill()
4870 if (priv->extend_desc) in stmmac_rx_refill()
4871 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill()
4873 p = rx_q->dma_rx + entry; in stmmac_rx_refill()
4875 if (!buf->page) { in stmmac_rx_refill()
4876 buf->page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4877 if (!buf->page) in stmmac_rx_refill()
4881 if (priv->sph && !buf->sec_page) { in stmmac_rx_refill()
4882 buf->sec_page = page_pool_alloc_pages(rx_q->page_pool, gfp); in stmmac_rx_refill()
4883 if (!buf->sec_page) in stmmac_rx_refill()
4886 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_rx_refill()
4889 buf->addr = page_pool_get_dma_addr(buf->page) + buf->page_offset; in stmmac_rx_refill()
4891 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_rx_refill()
4892 if (priv->sph) in stmmac_rx_refill()
4893 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, true); in stmmac_rx_refill()
4895 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr, false); in stmmac_rx_refill()
4898 rx_q->rx_count_frames++; in stmmac_rx_refill()
4899 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4900 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill()
4901 rx_q->rx_count_frames = 0; in stmmac_rx_refill()
4903 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill()
4904 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill()
4905 if (!priv->use_riwt) in stmmac_rx_refill()
4911 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill()
4913 rx_q->dirty_rx = entry; in stmmac_rx_refill()
4914 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill()
4915 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill()
4916 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill()
4924 int coe = priv->hw->rx_csum; in stmmac_rx_buf1_len()
4927 if (priv->sph && len) in stmmac_rx_buf1_len()
4932 if (priv->sph && hlen) { in stmmac_rx_buf1_len()
4933 priv->xstats.rx_split_hdr_pkt_n++; in stmmac_rx_buf1_len()
4939 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf1_len()
4944 return min_t(unsigned int, priv->dma_conf.dma_buf_sz, plen); in stmmac_rx_buf1_len()
4951 int coe = priv->hw->rx_csum; in stmmac_rx_buf2_len()
4955 if (!priv->sph) in stmmac_rx_buf2_len()
4960 return priv->dma_conf.dma_buf_sz; in stmmac_rx_buf2_len()
4965 return plen - len; in stmmac_rx_buf2_len()
4971 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[queue]; in stmmac_xdp_xmit_xdpf()
4972 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xdp_xmit_xdpf()
4973 unsigned int entry = tx_q->cur_tx; in stmmac_xdp_xmit_xdpf()
4981 if (priv->est && priv->est->enable && in stmmac_xdp_xmit_xdpf()
4982 priv->est->max_sdu[queue] && in stmmac_xdp_xmit_xdpf()
4983 xdpf->len > priv->est->max_sdu[queue]) { in stmmac_xdp_xmit_xdpf()
4984 priv->xstats.max_sdu_txq_drop[queue]++; in stmmac_xdp_xmit_xdpf()
4988 if (likely(priv->extend_desc)) in stmmac_xdp_xmit_xdpf()
4989 tx_desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xdp_xmit_xdpf()
4990 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xdp_xmit_xdpf()
4991 tx_desc = &tx_q->dma_entx[entry].basic; in stmmac_xdp_xmit_xdpf()
4993 tx_desc = tx_q->dma_tx + entry; in stmmac_xdp_xmit_xdpf()
4996 dma_addr = dma_map_single(priv->device, xdpf->data, in stmmac_xdp_xmit_xdpf()
4997 xdpf->len, DMA_TO_DEVICE); in stmmac_xdp_xmit_xdpf()
4998 if (dma_mapping_error(priv->device, dma_addr)) in stmmac_xdp_xmit_xdpf()
5001 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_NDO; in stmmac_xdp_xmit_xdpf()
5003 struct page *page = virt_to_page(xdpf->data); in stmmac_xdp_xmit_xdpf()
5006 xdpf->headroom; in stmmac_xdp_xmit_xdpf()
5007 dma_sync_single_for_device(priv->device, dma_addr, in stmmac_xdp_xmit_xdpf()
5008 xdpf->len, DMA_BIDIRECTIONAL); in stmmac_xdp_xmit_xdpf()
5010 tx_q->tx_skbuff_dma[entry].buf_type = STMMAC_TXBUF_T_XDP_TX; in stmmac_xdp_xmit_xdpf()
5013 tx_q->tx_skbuff_dma[entry].buf = dma_addr; in stmmac_xdp_xmit_xdpf()
5014 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_xdp_xmit_xdpf()
5015 tx_q->tx_skbuff_dma[entry].len = xdpf->len; in stmmac_xdp_xmit_xdpf()
5016 tx_q->tx_skbuff_dma[entry].last_segment = true; in stmmac_xdp_xmit_xdpf()
5017 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_xdp_xmit_xdpf()
5019 tx_q->xdpf[entry] = xdpf; in stmmac_xdp_xmit_xdpf()
5023 stmmac_prepare_tx_desc(priv, tx_desc, 1, xdpf->len, in stmmac_xdp_xmit_xdpf()
5024 true, priv->mode, true, true, in stmmac_xdp_xmit_xdpf()
5025 xdpf->len); in stmmac_xdp_xmit_xdpf()
5027 tx_q->tx_count_frames++; in stmmac_xdp_xmit_xdpf()
5029 if (tx_q->tx_count_frames % priv->tx_coal_frames[queue] == 0) in stmmac_xdp_xmit_xdpf()
5035 tx_q->tx_count_frames = 0; in stmmac_xdp_xmit_xdpf()
5037 u64_stats_update_begin(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
5038 u64_stats_inc(&txq_stats->q.tx_set_ic_bit); in stmmac_xdp_xmit_xdpf()
5039 u64_stats_update_end(&txq_stats->q_syncp); in stmmac_xdp_xmit_xdpf()
5042 stmmac_enable_dma_transmission(priv, priv->ioaddr, queue); in stmmac_xdp_xmit_xdpf()
5044 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_tx_size); in stmmac_xdp_xmit_xdpf()
5045 tx_q->cur_tx = entry; in stmmac_xdp_xmit_xdpf()
5058 while (index >= priv->plat->tx_queues_to_use) in stmmac_xdp_get_tx_queue()
5059 index -= priv->plat->tx_queues_to_use; in stmmac_xdp_get_tx_queue()
5077 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit_back()
5080 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit_back()
5108 if (xdp_do_redirect(priv->dev, xdp, prog) < 0) in __stmmac_xdp_run_prog()
5114 bpf_warn_invalid_xdp_action(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5117 trace_xdp_exception(priv->dev, prog, act); in __stmmac_xdp_run_prog()
5133 prog = READ_ONCE(priv->xdp_prog); in stmmac_xdp_run_prog()
5141 return ERR_PTR(-res); in stmmac_xdp_run_prog()
5162 unsigned int metasize = xdp->data - xdp->data_meta; in stmmac_construct_skb_zc()
5163 unsigned int datasize = xdp->data_end - xdp->data; in stmmac_construct_skb_zc()
5166 skb = napi_alloc_skb(&ch->rxtx_napi, in stmmac_construct_skb_zc()
5167 xdp->data_end - xdp->data_hard_start); in stmmac_construct_skb_zc()
5171 skb_reserve(skb, xdp->data - xdp->data_hard_start); in stmmac_construct_skb_zc()
5172 memcpy(__skb_put(skb, datasize), xdp->data, datasize); in stmmac_construct_skb_zc()
5183 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_dispatch_skb_zc()
5184 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_dispatch_skb_zc()
5185 unsigned int len = xdp->data_end - xdp->data; in stmmac_dispatch_skb_zc()
5187 int coe = priv->hw->rx_csum; in stmmac_dispatch_skb_zc()
5193 priv->xstats.rx_dropped++; in stmmac_dispatch_skb_zc()
5198 if (priv->hw->hw_vlan_en) in stmmac_dispatch_skb_zc()
5200 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_dispatch_skb_zc()
5203 stmmac_rx_vlan(priv->dev, skb); in stmmac_dispatch_skb_zc()
5204 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_dispatch_skb_zc()
5209 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_dispatch_skb_zc()
5215 napi_gro_receive(&ch->rxtx_napi, skb); in stmmac_dispatch_skb_zc()
5217 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5218 u64_stats_inc(&rxq_stats->napi.rx_pkt_n); in stmmac_dispatch_skb_zc()
5219 u64_stats_add(&rxq_stats->napi.rx_bytes, len); in stmmac_dispatch_skb_zc()
5220 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_dispatch_skb_zc()
5225 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_refill_zc()
5226 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill_zc()
5232 while (budget-- > 0 && entry != rx_q->cur_rx) { in stmmac_rx_refill_zc()
5233 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill_zc()
5237 if (!buf->xdp) { in stmmac_rx_refill_zc()
5238 buf->xdp = xsk_buff_alloc(rx_q->xsk_pool); in stmmac_rx_refill_zc()
5239 if (!buf->xdp) { in stmmac_rx_refill_zc()
5245 if (priv->extend_desc) in stmmac_rx_refill_zc()
5246 rx_desc = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill_zc()
5248 rx_desc = rx_q->dma_rx + entry; in stmmac_rx_refill_zc()
5250 dma_addr = xsk_buff_xdp_get_dma(buf->xdp); in stmmac_rx_refill_zc()
5255 rx_q->rx_count_frames++; in stmmac_rx_refill_zc()
5256 rx_q->rx_count_frames += priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5257 if (rx_q->rx_count_frames > priv->rx_coal_frames[queue]) in stmmac_rx_refill_zc()
5258 rx_q->rx_count_frames = 0; in stmmac_rx_refill_zc()
5260 use_rx_wd = !priv->rx_coal_frames[queue]; in stmmac_rx_refill_zc()
5261 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill_zc()
5262 if (!priv->use_riwt) in stmmac_rx_refill_zc()
5268 entry = STMMAC_GET_ENTRY(entry, priv->dma_conf.dma_rx_size); in stmmac_rx_refill_zc()
5272 rx_q->dirty_rx = entry; in stmmac_rx_refill_zc()
5273 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill_zc()
5274 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill_zc()
5275 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill_zc()
5293 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx_zc()
5294 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx_zc()
5297 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5308 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx_zc()
5309 if (priv->extend_desc) { in stmmac_rx_zc()
5310 rx_head = (void *)rx_q->dma_erx; in stmmac_rx_zc()
5313 rx_head = (void *)rx_q->dma_rx; in stmmac_rx_zc()
5317 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx_zc()
5318 rx_q->dma_rx_phy, desc_size); in stmmac_rx_zc()
5328 if (!count && rx_q->state_saved) { in stmmac_rx_zc()
5329 error = rx_q->state.error; in stmmac_rx_zc()
5330 len = rx_q->state.len; in stmmac_rx_zc()
5332 rx_q->state_saved = false; in stmmac_rx_zc()
5343 buf = &rx_q->buf_pool[entry]; in stmmac_rx_zc()
5351 if (priv->extend_desc) in stmmac_rx_zc()
5352 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_zc()
5354 p = rx_q->dma_rx + entry; in stmmac_rx_zc()
5357 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx_zc()
5363 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx_zc()
5364 priv->dma_conf.dma_rx_size); in stmmac_rx_zc()
5365 next_entry = rx_q->cur_rx; in stmmac_rx_zc()
5367 if (priv->extend_desc) in stmmac_rx_zc()
5368 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx_zc()
5370 np = rx_q->dma_rx + next_entry; in stmmac_rx_zc()
5375 if (!buf->xdp) in stmmac_rx_zc()
5378 if (priv->extend_desc) in stmmac_rx_zc()
5379 stmmac_rx_extended_status(priv, &priv->xstats, in stmmac_rx_zc()
5380 rx_q->dma_erx + entry); in stmmac_rx_zc()
5382 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5383 buf->xdp = NULL; in stmmac_rx_zc()
5386 if (!priv->hwts_rx_en) in stmmac_rx_zc()
5399 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5400 buf->xdp = NULL; in stmmac_rx_zc()
5406 ctx = xsk_buff_to_stmmac_ctx(buf->xdp); in stmmac_rx_zc()
5407 ctx->priv = priv; in stmmac_rx_zc()
5408 ctx->desc = p; in stmmac_rx_zc()
5409 ctx->ndesc = np; in stmmac_rx_zc()
5417 buf1_len -= ETH_FCS_LEN; in stmmac_rx_zc()
5418 len -= ETH_FCS_LEN; in stmmac_rx_zc()
5422 buf->xdp->data_end = buf->xdp->data + buf1_len; in stmmac_rx_zc()
5423 xsk_buff_dma_sync_for_cpu(buf->xdp); in stmmac_rx_zc()
5425 prog = READ_ONCE(priv->xdp_prog); in stmmac_rx_zc()
5426 res = __stmmac_xdp_run_prog(priv, prog, buf->xdp); in stmmac_rx_zc()
5430 stmmac_dispatch_skb_zc(priv, queue, p, np, buf->xdp); in stmmac_rx_zc()
5431 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5434 xsk_buff_free(buf->xdp); in stmmac_rx_zc()
5443 buf->xdp = NULL; in stmmac_rx_zc()
5449 rx_q->state_saved = true; in stmmac_rx_zc()
5450 rx_q->state.error = error; in stmmac_rx_zc()
5451 rx_q->state.len = len; in stmmac_rx_zc()
5456 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5457 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx_zc()
5458 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx_zc()
5460 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx_zc()
5461 priv->xstats.rx_errors += rx_errors; in stmmac_rx_zc()
5463 if (xsk_uses_need_wakeup(rx_q->xsk_pool)) { in stmmac_rx_zc()
5465 xsk_set_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5467 xsk_clear_rx_need_wakeup(rx_q->xsk_pool); in stmmac_rx_zc()
5476 * stmmac_rx - manage the receive process
5486 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[queue]; in stmmac_rx()
5487 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rx()
5488 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_rx()
5490 int status = 0, coe = priv->hw->rx_csum; in stmmac_rx()
5491 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx()
5499 dma_dir = page_pool_get_dma_dir(rx_q->page_pool); in stmmac_rx()
5500 bufsz = DIV_ROUND_UP(priv->dma_conf.dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; in stmmac_rx()
5501 limit = min(priv->dma_conf.dma_rx_size - 1, (unsigned int)limit); in stmmac_rx()
5506 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx()
5507 if (priv->extend_desc) { in stmmac_rx()
5508 rx_head = (void *)rx_q->dma_erx; in stmmac_rx()
5511 rx_head = (void *)rx_q->dma_rx; in stmmac_rx()
5515 stmmac_display_ring(priv, rx_head, priv->dma_conf.dma_rx_size, true, in stmmac_rx()
5516 rx_q->dma_rx_phy, desc_size); in stmmac_rx()
5526 if (!count && rx_q->state_saved) { in stmmac_rx()
5527 skb = rx_q->state.skb; in stmmac_rx()
5528 error = rx_q->state.error; in stmmac_rx()
5529 len = rx_q->state.len; in stmmac_rx()
5531 rx_q->state_saved = false; in stmmac_rx()
5544 buf = &rx_q->buf_pool[entry]; in stmmac_rx()
5546 if (priv->extend_desc) in stmmac_rx()
5547 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx()
5549 p = rx_q->dma_rx + entry; in stmmac_rx()
5552 status = stmmac_rx_status(priv, &priv->xstats, p); in stmmac_rx()
5557 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx()
5558 priv->dma_conf.dma_rx_size); in stmmac_rx()
5559 next_entry = rx_q->cur_rx; in stmmac_rx()
5561 if (priv->extend_desc) in stmmac_rx()
5562 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx()
5564 np = rx_q->dma_rx + next_entry; in stmmac_rx()
5568 if (priv->extend_desc) in stmmac_rx()
5569 stmmac_rx_extended_status(priv, &priv->xstats, rx_q->dma_erx + entry); in stmmac_rx()
5571 page_pool_put_page(rx_q->page_pool, buf->page, 0, true); in stmmac_rx()
5572 buf->page = NULL; in stmmac_rx()
5574 if (!priv->hwts_rx_en) in stmmac_rx()
5597 buf2_len -= ETH_FCS_LEN; in stmmac_rx()
5598 len -= ETH_FCS_LEN; in stmmac_rx()
5600 buf1_len -= ETH_FCS_LEN; in stmmac_rx()
5601 len -= ETH_FCS_LEN; in stmmac_rx()
5608 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5610 net_prefetch(page_address(buf->page) + in stmmac_rx()
5611 buf->page_offset); in stmmac_rx()
5613 xdp_init_buff(&ctx.xdp, bufsz, &rx_q->xdp_rxq); in stmmac_rx()
5614 xdp_prepare_buff(&ctx.xdp, page_address(buf->page), in stmmac_rx()
5615 buf->page_offset, buf1_len, true); in stmmac_rx()
5617 pre_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5618 buf->page_offset; in stmmac_rx()
5628 sync_len = ctx.xdp.data_end - ctx.xdp.data_hard_start - in stmmac_rx()
5629 buf->page_offset; in stmmac_rx()
5634 unsigned int xdp_res = -PTR_ERR(skb); in stmmac_rx()
5637 page_pool_put_page(rx_q->page_pool, in stmmac_rx()
5640 buf->page = NULL; in stmmac_rx()
5656 buf->page = NULL; in stmmac_rx()
5668 buf1_len = ctx.xdp.data_end - ctx.xdp.data; in stmmac_rx()
5670 skb = napi_build_skb(page_address(buf->page), in stmmac_rx()
5671 rx_q->napi_skb_frag_size); in stmmac_rx()
5673 page_pool_recycle_direct(rx_q->page_pool, in stmmac_rx()
5674 buf->page); in stmmac_rx()
5681 head_pad_len = ctx.xdp.data - ctx.xdp.data_hard_start; in stmmac_rx()
5685 buf->page = NULL; in stmmac_rx()
5687 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
5689 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5690 buf->page, buf->page_offset, buf1_len, in stmmac_rx()
5691 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5692 buf->page = NULL; in stmmac_rx()
5696 dma_sync_single_for_cpu(priv->device, buf->sec_addr, in stmmac_rx()
5698 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
5699 buf->sec_page, 0, buf2_len, in stmmac_rx()
5700 priv->dma_conf.dma_buf_sz); in stmmac_rx()
5701 buf->sec_page = NULL; in stmmac_rx()
5714 if (priv->hw->hw_vlan_en) in stmmac_rx()
5716 stmmac_rx_hw_vlan(priv, priv->hw, p, skb); in stmmac_rx()
5719 stmmac_rx_vlan(priv->dev, skb); in stmmac_rx()
5721 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_rx()
5726 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_rx()
5732 napi_gro_receive(&ch->rx_napi, skb); in stmmac_rx()
5741 rx_q->state_saved = true; in stmmac_rx()
5742 rx_q->state.skb = skb; in stmmac_rx()
5743 rx_q->state.error = error; in stmmac_rx()
5744 rx_q->state.len = len; in stmmac_rx()
5751 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_rx()
5752 u64_stats_add(&rxq_stats->napi.rx_packets, rx_packets); in stmmac_rx()
5753 u64_stats_add(&rxq_stats->napi.rx_bytes, rx_bytes); in stmmac_rx()
5754 u64_stats_add(&rxq_stats->napi.rx_pkt_n, count); in stmmac_rx()
5755 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_rx()
5757 priv->xstats.rx_dropped += rx_dropped; in stmmac_rx()
5758 priv->xstats.rx_errors += rx_errors; in stmmac_rx()
5767 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rx()
5769 u32 chan = ch->index; in stmmac_napi_poll_rx()
5772 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rx()
5773 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5774 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rx()
5775 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rx()
5781 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rx()
5782 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_poll_rx()
5783 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rx()
5793 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_tx()
5796 u32 chan = ch->index; in stmmac_napi_poll_tx()
5799 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_tx()
5800 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5801 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_tx()
5802 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_tx()
5810 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_tx()
5811 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_poll_tx()
5812 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_tx()
5815 /* TX still have packet to handle, check if we need to arm tx timer */ in stmmac_napi_poll_tx()
5826 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rxtx()
5831 u32 chan = ch->index; in stmmac_napi_poll_rxtx()
5833 rxq_stats = &priv->xstats.rxq_stats[chan]; in stmmac_napi_poll_rxtx()
5834 u64_stats_update_begin(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5835 u64_stats_inc(&rxq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5836 u64_stats_update_end(&rxq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5838 txq_stats = &priv->xstats.txq_stats[chan]; in stmmac_napi_poll_rxtx()
5839 u64_stats_update_begin(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5840 u64_stats_inc(&txq_stats->napi.poll); in stmmac_napi_poll_rxtx()
5841 u64_stats_update_end(&txq_stats->napi_syncp); in stmmac_napi_poll_rxtx()
5850 /* If either TX or RX work is not complete, return budget in stmmac_napi_poll_rxtx()
5860 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5861 /* Both RX and TX work done are compelte, in stmmac_napi_poll_rxtx()
5862 * so enable both RX & TX IRQs. in stmmac_napi_poll_rxtx()
5864 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_napi_poll_rxtx()
5865 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rxtx()
5868 /* TX still have packet to handle, check if we need to arm tx timer */ in stmmac_napi_poll_rxtx()
5872 return min(rxtx_done, budget - 1); in stmmac_napi_poll_rxtx()
5892 * stmmac_set_rx_mode - entry point for multicast addressing
5907 stmmac_set_filter(priv, priv->hw, dev); in stmmac_set_rx_mode()
5911 * stmmac_change_mtu - entry point to change MTU size for the device.
5918 * 0 on success and an appropriate (-)ve integer as defined in errno.h
5924 int txfifosz = priv->plat->tx_fifo_size; in stmmac_change_mtu()
5930 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_change_mtu()
5932 txfifosz /= priv->plat->tx_queues_to_use; in stmmac_change_mtu()
5935 netdev_dbg(priv->dev, "Jumbo frames not supported for XDP\n"); in stmmac_change_mtu()
5936 return -EINVAL; in stmmac_change_mtu()
5943 return -EINVAL; in stmmac_change_mtu()
5946 netdev_dbg(priv->dev, "restarting interface to change its MTU\n"); in stmmac_change_mtu()
5950 netdev_err(priv->dev, "failed allocating new dma conf for new MTU %d\n", in stmmac_change_mtu()
5961 netdev_err(priv->dev, "failed reopening the interface after MTU change\n"); in stmmac_change_mtu()
5970 WRITE_ONCE(dev->mtu, mtu); in stmmac_change_mtu()
5981 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) in stmmac_fix_features()
5984 if (!priv->plat->tx_coe) in stmmac_fix_features()
5988 * needs to have the Tx COE disabled for oversized frames in stmmac_fix_features()
5990 * the TX csum insertion in the TDES and not use SF. in stmmac_fix_features()
5992 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) in stmmac_fix_features()
5996 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_fix_features()
5998 priv->tso = true; in stmmac_fix_features()
6000 priv->tso = false; in stmmac_fix_features()
6013 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_set_features()
6015 priv->hw->rx_csum = 0; in stmmac_set_features()
6019 stmmac_rx_ipc(priv, priv->hw); in stmmac_set_features()
6021 if (priv->sph_cap) { in stmmac_set_features()
6022 bool sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_set_features()
6025 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) in stmmac_set_features()
6026 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_set_features()
6030 priv->hw->hw_vlan_en = true; in stmmac_set_features()
6032 priv->hw->hw_vlan_en = false; in stmmac_set_features()
6034 phylink_rx_clk_stop_block(priv->phylink); in stmmac_set_features()
6035 stmmac_set_hw_vlan_mode(priv, priv->hw); in stmmac_set_features()
6036 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_set_features()
6043 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_common_interrupt()
6044 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_common_interrupt()
6049 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_common_interrupt()
6052 if (priv->irq_wake) in stmmac_common_interrupt()
6053 pm_wakeup_event(priv->device, 0); in stmmac_common_interrupt()
6055 if (priv->dma_cap.estsel) in stmmac_common_interrupt()
6056 stmmac_est_irq_status(priv, priv, priv->dev, in stmmac_common_interrupt()
6057 &priv->xstats, tx_cnt); in stmmac_common_interrupt()
6063 if ((priv->plat->has_gmac) || xmac) { in stmmac_common_interrupt()
6064 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); in stmmac_common_interrupt()
6067 /* For LPI we need to save the tx status */ in stmmac_common_interrupt()
6069 priv->tx_path_in_lpi_mode = true; in stmmac_common_interrupt()
6071 priv->tx_path_in_lpi_mode = false; in stmmac_common_interrupt()
6075 stmmac_host_mtl_irq_status(priv, priv->hw, queue); in stmmac_common_interrupt()
6078 if (priv->hw->pcs && in stmmac_common_interrupt()
6079 !(priv->plat->flags & STMMAC_FLAG_HAS_INTEGRATED_PCS)) { in stmmac_common_interrupt()
6080 if (priv->xstats.pcs_link) in stmmac_common_interrupt()
6081 netif_carrier_on(priv->dev); in stmmac_common_interrupt()
6083 netif_carrier_off(priv->dev); in stmmac_common_interrupt()
6091 * stmmac_interrupt - main ISR
6098 * o Core interrupts to manage: remote wake-up, management counter, LPI
6107 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_interrupt()
6111 if (priv->sfty_irq <= 0 && stmmac_safety_feat_interrupt(priv)) in stmmac_interrupt()
6129 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_mac_interrupt()
6144 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_safety_interrupt()
6157 int chan = tx_q->queue_index; in stmmac_msi_intr_tx()
6165 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_tx()
6184 int chan = rx_q->queue_index; in stmmac_msi_intr_rx()
6191 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_msi_intr_rx()
6200 * stmmac_ioctl - Entry point for the Ioctl
6211 int ret = -EOPNOTSUPP; in stmmac_ioctl()
6214 return -EINVAL; in stmmac_ioctl()
6220 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); in stmmac_ioctl()
6239 int ret = -EOPNOTSUPP; in stmmac_setup_tc_block_cb()
6241 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) in stmmac_setup_tc_block_cb()
6285 return -EOPNOTSUPP; in stmmac_setup_tc()
6292 int gso = skb_shinfo(skb)->gso_type; in stmmac_select_queue()
6304 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in stmmac_select_queue()
6312 ret = pm_runtime_resume_and_get(priv->device); in stmmac_set_mac_address()
6320 phylink_rx_clk_stop_block(priv->phylink); in stmmac_set_mac_address()
6321 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); in stmmac_set_mac_address()
6322 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_set_mac_address()
6325 pm_runtime_put(priv->device); in stmmac_set_mac_address()
6347 le32_to_cpu(p->des0), le32_to_cpu(p->des1), in sysfs_display_ring()
6348 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); in sysfs_display_ring()
6350 p = &(++ep)->basic; in sysfs_display_ring()
6358 struct net_device *dev = seq->private; in stmmac_rings_status_show()
6360 u32 rx_count = priv->plat->rx_queues_to_use; in stmmac_rings_status_show()
6361 u32 tx_count = priv->plat->tx_queues_to_use; in stmmac_rings_status_show()
6364 if ((dev->flags & IFF_UP) == 0) in stmmac_rings_status_show()
6368 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_rings_status_show()
6372 if (priv->extend_desc) { in stmmac_rings_status_show()
6374 sysfs_display_ring((void *)rx_q->dma_erx, in stmmac_rings_status_show()
6375 priv->dma_conf.dma_rx_size, 1, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6378 sysfs_display_ring((void *)rx_q->dma_rx, in stmmac_rings_status_show()
6379 priv->dma_conf.dma_rx_size, 0, seq, rx_q->dma_rx_phy); in stmmac_rings_status_show()
6384 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_rings_status_show()
6386 seq_printf(seq, "TX Queue %d:\n", queue); in stmmac_rings_status_show()
6388 if (priv->extend_desc) { in stmmac_rings_status_show()
6390 sysfs_display_ring((void *)tx_q->dma_etx, in stmmac_rings_status_show()
6391 priv->dma_conf.dma_tx_size, 1, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6392 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { in stmmac_rings_status_show()
6394 sysfs_display_ring((void *)tx_q->dma_tx, in stmmac_rings_status_show()
6395 priv->dma_conf.dma_tx_size, 0, seq, tx_q->dma_tx_phy); in stmmac_rings_status_show()
6421 struct net_device *dev = seq->private; in stmmac_dma_cap_show()
6424 if (!priv->hw_cap_support) { in stmmac_dma_cap_show()
6433 seq_printf(seq, "\t10/100 Mbps: %s\n", in stmmac_dma_cap_show()
6434 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); in stmmac_dma_cap_show()
6436 (priv->dma_cap.mbps_1000) ? "Y" : "N"); in stmmac_dma_cap_show()
6438 (priv->dma_cap.half_duplex) ? "Y" : "N"); in stmmac_dma_cap_show()
6439 if (priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6442 priv->dma_cap.multi_addr); in stmmac_dma_cap_show()
6445 (priv->dma_cap.hash_filter) ? "Y" : "N"); in stmmac_dma_cap_show()
6447 (priv->dma_cap.multi_addr) ? "Y" : "N"); in stmmac_dma_cap_show()
6450 (priv->dma_cap.pcs) ? "Y" : "N"); in stmmac_dma_cap_show()
6452 (priv->dma_cap.sma_mdio) ? "Y" : "N"); in stmmac_dma_cap_show()
6454 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); in stmmac_dma_cap_show()
6456 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); in stmmac_dma_cap_show()
6458 (priv->dma_cap.rmon) ? "Y" : "N"); in stmmac_dma_cap_show()
6459 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", in stmmac_dma_cap_show()
6460 (priv->dma_cap.time_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6461 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", in stmmac_dma_cap_show()
6462 (priv->dma_cap.atime_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
6463 if (priv->plat->has_xgmac) in stmmac_dma_cap_show()
6465 dwxgmac_timestamp_source[priv->dma_cap.tssrc]); in stmmac_dma_cap_show()
6466 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", in stmmac_dma_cap_show()
6467 (priv->dma_cap.eee) ? "Y" : "N"); in stmmac_dma_cap_show()
6468 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); in stmmac_dma_cap_show()
6469 seq_printf(seq, "\tChecksum Offload in TX: %s\n", in stmmac_dma_cap_show()
6470 (priv->dma_cap.tx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6471 if (priv->synopsys_id >= DWMAC_CORE_4_00 || in stmmac_dma_cap_show()
6472 priv->plat->has_xgmac) { in stmmac_dma_cap_show()
6474 (priv->dma_cap.rx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
6477 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); in stmmac_dma_cap_show()
6479 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); in stmmac_dma_cap_show()
6481 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); in stmmac_dma_cap_show()
6484 priv->dma_cap.number_rx_channel); in stmmac_dma_cap_show()
6485 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", in stmmac_dma_cap_show()
6486 priv->dma_cap.number_tx_channel); in stmmac_dma_cap_show()
6488 priv->dma_cap.number_rx_queues); in stmmac_dma_cap_show()
6489 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", in stmmac_dma_cap_show()
6490 priv->dma_cap.number_tx_queues); in stmmac_dma_cap_show()
6492 (priv->dma_cap.enh_desc) ? "Y" : "N"); in stmmac_dma_cap_show()
6493 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); in stmmac_dma_cap_show()
6494 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); in stmmac_dma_cap_show()
6495 seq_printf(seq, "\tHash Table Size: %lu\n", priv->dma_cap.hash_tb_sz ? in stmmac_dma_cap_show()
6496 (BIT(priv->dma_cap.hash_tb_sz) << 5) : 0); in stmmac_dma_cap_show()
6497 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); in stmmac_dma_cap_show()
6499 priv->dma_cap.pps_out_num); in stmmac_dma_cap_show()
6501 dwxgmac_safety_feature_desc[priv->dma_cap.asp]); in stmmac_dma_cap_show()
6503 priv->dma_cap.frpsel ? "Y" : "N"); in stmmac_dma_cap_show()
6505 priv->dma_cap.host_dma_width); in stmmac_dma_cap_show()
6507 priv->dma_cap.rssen ? "Y" : "N"); in stmmac_dma_cap_show()
6509 priv->dma_cap.vlhash ? "Y" : "N"); in stmmac_dma_cap_show()
6511 priv->dma_cap.sphen ? "Y" : "N"); in stmmac_dma_cap_show()
6512 seq_printf(seq, "\tVLAN TX Insertion: %s\n", in stmmac_dma_cap_show()
6513 priv->dma_cap.vlins ? "Y" : "N"); in stmmac_dma_cap_show()
6515 priv->dma_cap.dvlan ? "Y" : "N"); in stmmac_dma_cap_show()
6517 priv->dma_cap.l3l4fnum); in stmmac_dma_cap_show()
6519 priv->dma_cap.arpoffsel ? "Y" : "N"); in stmmac_dma_cap_show()
6521 priv->dma_cap.estsel ? "Y" : "N"); in stmmac_dma_cap_show()
6523 priv->dma_cap.fpesel ? "Y" : "N"); in stmmac_dma_cap_show()
6524 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", in stmmac_dma_cap_show()
6525 priv->dma_cap.tbssel ? "Y" : "N"); in stmmac_dma_cap_show()
6527 priv->dma_cap.tbs_ch_num); in stmmac_dma_cap_show()
6528 seq_printf(seq, "\tPer-Stream Filtering: %s\n", in stmmac_dma_cap_show()
6529 priv->dma_cap.sgfsel ? "Y" : "N"); in stmmac_dma_cap_show()
6531 BIT(priv->dma_cap.ttsfd) >> 1); in stmmac_dma_cap_show()
6533 priv->dma_cap.numtc); in stmmac_dma_cap_show()
6535 priv->dma_cap.dcben ? "Y" : "N"); in stmmac_dma_cap_show()
6537 priv->dma_cap.advthword ? "Y" : "N"); in stmmac_dma_cap_show()
6539 priv->dma_cap.ptoen ? "Y" : "N"); in stmmac_dma_cap_show()
6540 seq_printf(seq, "\tOne-Step Timestamping: %s\n", in stmmac_dma_cap_show()
6541 priv->dma_cap.osten ? "Y" : "N"); in stmmac_dma_cap_show()
6542 seq_printf(seq, "\tPriority-Based Flow Control: %s\n", in stmmac_dma_cap_show()
6543 priv->dma_cap.pfcen ? "Y" : "N"); in stmmac_dma_cap_show()
6545 BIT(priv->dma_cap.frpes) << 6); in stmmac_dma_cap_show()
6547 BIT(priv->dma_cap.frpbs) << 6); in stmmac_dma_cap_show()
6549 priv->dma_cap.frppipe_num); in stmmac_dma_cap_show()
6551 priv->dma_cap.nrvf_num ? in stmmac_dma_cap_show()
6552 (BIT(priv->dma_cap.nrvf_num) << 1) : 0); in stmmac_dma_cap_show()
6554 priv->dma_cap.estwid ? 4 * priv->dma_cap.estwid + 12 : 0); in stmmac_dma_cap_show()
6556 priv->dma_cap.estdep ? (BIT(priv->dma_cap.estdep) << 5) : 0); in stmmac_dma_cap_show()
6557 seq_printf(seq, "\tQueue/Channel-Based VLAN Tag Insertion on TX: %s\n", in stmmac_dma_cap_show()
6558 priv->dma_cap.cbtisel ? "Y" : "N"); in stmmac_dma_cap_show()
6560 priv->dma_cap.aux_snapshot_n); in stmmac_dma_cap_show()
6561 seq_printf(seq, "\tOne-Step Timestamping for PTP over UDP/IP: %s\n", in stmmac_dma_cap_show()
6562 priv->dma_cap.pou_ost_en ? "Y" : "N"); in stmmac_dma_cap_show()
6564 priv->dma_cap.edma ? "Y" : "N"); in stmmac_dma_cap_show()
6566 priv->dma_cap.ediffc ? "Y" : "N"); in stmmac_dma_cap_show()
6568 priv->dma_cap.vxn ? "Y" : "N"); in stmmac_dma_cap_show()
6570 priv->dma_cap.dbgmem ? "Y" : "N"); in stmmac_dma_cap_show()
6572 priv->dma_cap.pcsel ? BIT(priv->dma_cap.pcsel + 3) : 0); in stmmac_dma_cap_show()
6585 if (dev->netdev_ops != &stmmac_netdev_ops) in stmmac_device_event()
6590 debugfs_change_name(priv->dbgfs_dir, "%s", dev->name); in stmmac_device_event()
6608 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); in stmmac_init_fs()
6610 /* Entry to report DMA RX/TX rings */ in stmmac_init_fs()
6611 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6615 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
6625 debugfs_remove_recursive(priv->dbgfs_dir); in stmmac_exit_fs()
6660 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { in stmmac_vlan_update()
6667 if (!priv->dma_cap.vlhash) { in stmmac_vlan_update()
6669 return -EOPNOTSUPP; in stmmac_vlan_update()
6675 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); in stmmac_vlan_update()
6687 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_add_vid()
6694 set_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6697 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
6701 if (priv->hw->num_vlan) { in stmmac_vlan_rx_add_vid()
6702 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_add_vid()
6707 pm_runtime_put(priv->device); in stmmac_vlan_rx_add_vid()
6721 ret = pm_runtime_resume_and_get(priv->device); in stmmac_vlan_rx_kill_vid()
6728 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_kill_vid()
6730 if (priv->hw->num_vlan) { in stmmac_vlan_rx_kill_vid()
6731 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_kill_vid()
6739 pm_runtime_put(priv->device); in stmmac_vlan_rx_kill_vid()
6748 switch (bpf->command) { in stmmac_bpf()
6750 return stmmac_xdp_set_prog(priv, bpf->prog, bpf->extack); in stmmac_bpf()
6752 return stmmac_xdp_setup_pool(priv, bpf->xsk.pool, in stmmac_bpf()
6753 bpf->xsk.queue_id); in stmmac_bpf()
6755 return -EOPNOTSUPP; in stmmac_bpf()
6768 if (unlikely(test_bit(STMMAC_DOWN, &priv->state))) in stmmac_xdp_xmit()
6769 return -ENETDOWN; in stmmac_xdp_xmit()
6772 return -EINVAL; in stmmac_xdp_xmit()
6775 nq = netdev_get_tx_queue(priv->dev, queue); in stmmac_xdp_xmit()
6778 /* Avoids TX time-out as we are sharing with slow path */ in stmmac_xdp_xmit()
6803 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_rx_queue()
6806 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_rx_queue()
6807 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_disable_rx_queue()
6808 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_rx_queue()
6811 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_rx_queue()
6816 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_enable_rx_queue()
6817 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_rx_queue()
6822 ret = __alloc_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6824 netdev_err(priv->dev, "Failed to alloc RX desc.\n"); in stmmac_enable_rx_queue()
6828 ret = __init_dma_rx_desc_rings(priv, &priv->dma_conf, queue, GFP_KERNEL); in stmmac_enable_rx_queue()
6830 __free_dma_rx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6831 netdev_err(priv->dev, "Failed to init RX desc.\n"); in stmmac_enable_rx_queue()
6836 stmmac_clear_rx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_rx_queue()
6838 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_rx_queue()
6839 rx_q->dma_rx_phy, rx_q->queue_index); in stmmac_enable_rx_queue()
6841 rx_q->rx_tail_addr = rx_q->dma_rx_phy + (rx_q->buf_alloc_num * in stmmac_enable_rx_queue()
6843 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6844 rx_q->rx_tail_addr, rx_q->queue_index); in stmmac_enable_rx_queue()
6846 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_enable_rx_queue()
6847 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_enable_rx_queue()
6848 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6850 rx_q->queue_index); in stmmac_enable_rx_queue()
6852 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_enable_rx_queue()
6853 priv->dma_conf.dma_buf_sz, in stmmac_enable_rx_queue()
6854 rx_q->queue_index); in stmmac_enable_rx_queue()
6859 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_rx_queue()
6860 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 1, 0); in stmmac_enable_rx_queue()
6861 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_rx_queue()
6866 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_tx_queue()
6869 spin_lock_irqsave(&ch->lock, flags); in stmmac_disable_tx_queue()
6870 stmmac_disable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_disable_tx_queue()
6871 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_disable_tx_queue()
6874 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_disable_tx_queue()
6879 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_enable_tx_queue()
6880 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_tx_queue()
6884 ret = __alloc_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6886 netdev_err(priv->dev, "Failed to alloc TX desc.\n"); in stmmac_enable_tx_queue()
6890 ret = __init_dma_tx_desc_rings(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6892 __free_dma_tx_desc_resources(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6893 netdev_err(priv->dev, "Failed to init TX desc.\n"); in stmmac_enable_tx_queue()
6898 stmmac_clear_tx_descriptors(priv, &priv->dma_conf, queue); in stmmac_enable_tx_queue()
6900 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_enable_tx_queue()
6901 tx_q->dma_tx_phy, tx_q->queue_index); in stmmac_enable_tx_queue()
6903 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_enable_tx_queue()
6904 stmmac_enable_tbs(priv, priv->ioaddr, 1, tx_q->queue_index); in stmmac_enable_tx_queue()
6906 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_enable_tx_queue()
6907 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_enable_tx_queue()
6908 tx_q->tx_tail_addr, tx_q->queue_index); in stmmac_enable_tx_queue()
6912 spin_lock_irqsave(&ch->lock, flags); in stmmac_enable_tx_queue()
6913 stmmac_enable_dma_irq(priv, priv->ioaddr, queue, 0, 1); in stmmac_enable_tx_queue()
6914 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_enable_tx_queue()
6922 /* Ensure tx function is not running */ in stmmac_xdp_release()
6928 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_release()
6929 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_release()
6934 /* Stop TX/RX DMA channels */ in stmmac_xdp_release()
6937 /* Release and free the Rx/Tx resources */ in stmmac_xdp_release()
6938 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_release()
6940 /* Disable the MAC Rx/Tx */ in stmmac_xdp_release()
6941 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_xdp_release()
6953 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_xdp_open()
6954 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_xdp_open()
6963 ret = alloc_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
6970 ret = init_dma_desc_rings(dev, &priv->dma_conf, GFP_KERNEL); in stmmac_xdp_open()
6981 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_xdp_open()
6982 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 1); in stmmac_xdp_open()
6986 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_xdp_open()
6990 rx_q = &priv->dma_conf.rx_queue[chan]; in stmmac_xdp_open()
6992 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
6993 rx_q->dma_rx_phy, chan); in stmmac_xdp_open()
6995 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_xdp_open()
6996 (rx_q->buf_alloc_num * in stmmac_xdp_open()
6998 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
6999 rx_q->rx_tail_addr, chan); in stmmac_xdp_open()
7001 if (rx_q->xsk_pool && rx_q->buf_alloc_num) { in stmmac_xdp_open()
7002 buf_size = xsk_pool_get_rx_frame_size(rx_q->xsk_pool); in stmmac_xdp_open()
7003 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
7005 rx_q->queue_index); in stmmac_xdp_open()
7007 stmmac_set_dma_bfsize(priv, priv->ioaddr, in stmmac_xdp_open()
7008 priv->dma_conf.dma_buf_sz, in stmmac_xdp_open()
7009 rx_q->queue_index); in stmmac_xdp_open()
7012 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_xdp_open()
7015 /* DMA TX Channel Configuration */ in stmmac_xdp_open()
7017 tx_q = &priv->dma_conf.tx_queue[chan]; in stmmac_xdp_open()
7019 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_xdp_open()
7020 tx_q->dma_tx_phy, chan); in stmmac_xdp_open()
7022 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_xdp_open()
7023 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_xdp_open()
7024 tx_q->tx_tail_addr, chan); in stmmac_xdp_open()
7026 hrtimer_setup(&tx_q->txtimer, stmmac_tx_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); in stmmac_xdp_open()
7029 /* Enable the MAC Rx/Tx */ in stmmac_xdp_open()
7030 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_xdp_open()
7032 /* Start Rx & Tx DMA Channels */ in stmmac_xdp_open()
7048 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_xdp_open()
7049 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_xdp_open()
7053 free_dma_desc_resources(priv, &priv->dma_conf); in stmmac_xdp_open()
7065 if (test_bit(STMMAC_DOWN, &priv->state) || in stmmac_xsk_wakeup()
7066 !netif_carrier_ok(priv->dev)) in stmmac_xsk_wakeup()
7067 return -ENETDOWN; in stmmac_xsk_wakeup()
7070 return -EINVAL; in stmmac_xsk_wakeup()
7072 if (queue >= priv->plat->rx_queues_to_use || in stmmac_xsk_wakeup()
7073 queue >= priv->plat->tx_queues_to_use) in stmmac_xsk_wakeup()
7074 return -EINVAL; in stmmac_xsk_wakeup()
7076 rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_xsk_wakeup()
7077 tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_xsk_wakeup()
7078 ch = &priv->channel[queue]; in stmmac_xsk_wakeup()
7080 if (!rx_q->xsk_pool && !tx_q->xsk_pool) in stmmac_xsk_wakeup()
7081 return -EINVAL; in stmmac_xsk_wakeup()
7083 if (!napi_if_scheduled_mark_missed(&ch->rxtx_napi)) { in stmmac_xsk_wakeup()
7084 /* EQoS does not have per-DMA channel SW interrupt, in stmmac_xsk_wakeup()
7085 * so we schedule RX Napi straight-away. in stmmac_xsk_wakeup()
7087 if (likely(napi_schedule_prep(&ch->rxtx_napi))) in stmmac_xsk_wakeup()
7088 __napi_schedule(&ch->rxtx_napi); in stmmac_xsk_wakeup()
7097 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_get_stats64()
7098 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_get_stats64()
7103 struct stmmac_txq_stats *txq_stats = &priv->xstats.txq_stats[q]; in stmmac_get_stats64()
7108 start = u64_stats_fetch_begin(&txq_stats->q_syncp); in stmmac_get_stats64()
7109 tx_bytes = u64_stats_read(&txq_stats->q.tx_bytes); in stmmac_get_stats64()
7110 } while (u64_stats_fetch_retry(&txq_stats->q_syncp, start)); in stmmac_get_stats64()
7112 start = u64_stats_fetch_begin(&txq_stats->napi_syncp); in stmmac_get_stats64()
7113 tx_packets = u64_stats_read(&txq_stats->napi.tx_packets); in stmmac_get_stats64()
7114 } while (u64_stats_fetch_retry(&txq_stats->napi_syncp, start)); in stmmac_get_stats64()
7116 stats->tx_packets += tx_packets; in stmmac_get_stats64()
7117 stats->tx_bytes += tx_bytes; in stmmac_get_stats64()
7121 struct stmmac_rxq_stats *rxq_stats = &priv->xstats.rxq_stats[q]; in stmmac_get_stats64()
7126 start = u64_stats_fetch_begin(&rxq_stats->napi_syncp); in stmmac_get_stats64()
7127 rx_packets = u64_stats_read(&rxq_stats->napi.rx_packets); in stmmac_get_stats64()
7128 rx_bytes = u64_stats_read(&rxq_stats->napi.rx_bytes); in stmmac_get_stats64()
7129 } while (u64_stats_fetch_retry(&rxq_stats->napi_syncp, start)); in stmmac_get_stats64()
7131 stats->rx_packets += rx_packets; in stmmac_get_stats64()
7132 stats->rx_bytes += rx_bytes; in stmmac_get_stats64()
7135 stats->rx_dropped = priv->xstats.rx_dropped; in stmmac_get_stats64()
7136 stats->rx_errors = priv->xstats.rx_errors; in stmmac_get_stats64()
7137 stats->tx_dropped = priv->xstats.tx_dropped; in stmmac_get_stats64()
7138 stats->tx_errors = priv->xstats.tx_errors; in stmmac_get_stats64()
7139 stats->tx_carrier_errors = priv->xstats.tx_losscarrier + priv->xstats.tx_carrier; in stmmac_get_stats64()
7140 stats->collisions = priv->xstats.tx_collision + priv->xstats.rx_collision; in stmmac_get_stats64()
7141 stats->rx_length_errors = priv->xstats.rx_length; in stmmac_get_stats64()
7142 stats->rx_crc_errors = priv->xstats.rx_crc_errors; in stmmac_get_stats64()
7143 stats->rx_over_errors = priv->xstats.rx_overflow_cntr; in stmmac_get_stats64()
7144 stats->rx_missed_errors = priv->xstats.rx_missed_cntr; in stmmac_get_stats64()
7170 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) in stmmac_reset_subtask()
7172 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_reset_subtask()
7175 netdev_err(priv->dev, "Reset adapter.\n"); in stmmac_reset_subtask()
7178 netif_trans_update(priv->dev); in stmmac_reset_subtask()
7179 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) in stmmac_reset_subtask()
7182 set_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7183 dev_close(priv->dev); in stmmac_reset_subtask()
7184 dev_open(priv->dev, NULL); in stmmac_reset_subtask()
7185 clear_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
7186 clear_bit(STMMAC_RESETING, &priv->state); in stmmac_reset_subtask()
7196 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); in stmmac_service_task()
7200 * stmmac_hw_init - Init the MAC device
7211 /* dwmac-sun8i only work in chain mode */ in stmmac_hw_init()
7212 if (priv->plat->flags & STMMAC_FLAG_HAS_SUN8I) in stmmac_hw_init()
7214 priv->chain_mode = chain_mode; in stmmac_hw_init()
7222 priv->hw_cap_support = stmmac_get_hw_features(priv); in stmmac_hw_init()
7223 if (priv->hw_cap_support) { in stmmac_hw_init()
7224 dev_info(priv->device, "DMA HW capability register supported\n"); in stmmac_hw_init()
7231 priv->plat->enh_desc = priv->dma_cap.enh_desc; in stmmac_hw_init()
7232 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up && in stmmac_hw_init()
7233 !(priv->plat->flags & STMMAC_FLAG_USE_PHY_WOL); in stmmac_hw_init()
7234 priv->hw->pmt = priv->plat->pmt; in stmmac_hw_init()
7235 if (priv->dma_cap.hash_tb_sz) { in stmmac_hw_init()
7236 priv->hw->multicast_filter_bins = in stmmac_hw_init()
7237 (BIT(priv->dma_cap.hash_tb_sz) << 5); in stmmac_hw_init()
7238 priv->hw->mcast_bits_log2 = in stmmac_hw_init()
7239 ilog2(priv->hw->multicast_filter_bins); in stmmac_hw_init()
7243 if (priv->plat->force_thresh_dma_mode) in stmmac_hw_init()
7244 priv->plat->tx_coe = 0; in stmmac_hw_init()
7246 priv->plat->tx_coe = priv->dma_cap.tx_coe; in stmmac_hw_init()
7249 priv->plat->rx_coe = priv->dma_cap.rx_coe; in stmmac_hw_init()
7251 if (priv->dma_cap.rx_coe_type2) in stmmac_hw_init()
7252 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; in stmmac_hw_init()
7253 else if (priv->dma_cap.rx_coe_type1) in stmmac_hw_init()
7254 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; in stmmac_hw_init()
7257 dev_info(priv->device, "No HW DMA feature register supported\n"); in stmmac_hw_init()
7260 if (priv->plat->rx_coe) { in stmmac_hw_init()
7261 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_hw_init()
7262 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); in stmmac_hw_init()
7263 if (priv->synopsys_id < DWMAC_CORE_4_00) in stmmac_hw_init()
7264 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); in stmmac_hw_init()
7266 if (priv->plat->tx_coe) in stmmac_hw_init()
7267 dev_info(priv->device, "TX Checksum insertion supported\n"); in stmmac_hw_init()
7269 if (priv->plat->pmt) { in stmmac_hw_init()
7270 dev_info(priv->device, "Wake-Up On Lan supported\n"); in stmmac_hw_init()
7271 device_set_wakeup_capable(priv->device, 1); in stmmac_hw_init()
7274 if (priv->dma_cap.tsoen) in stmmac_hw_init()
7275 dev_info(priv->device, "TSO supported\n"); in stmmac_hw_init()
7277 if (priv->dma_cap.number_rx_queues && in stmmac_hw_init()
7278 priv->plat->rx_queues_to_use > priv->dma_cap.number_rx_queues) { in stmmac_hw_init()
7279 dev_warn(priv->device, in stmmac_hw_init()
7281 priv->plat->rx_queues_to_use); in stmmac_hw_init()
7282 priv->plat->rx_queues_to_use = priv->dma_cap.number_rx_queues; in stmmac_hw_init()
7284 if (priv->dma_cap.number_tx_queues && in stmmac_hw_init()
7285 priv->plat->tx_queues_to_use > priv->dma_cap.number_tx_queues) { in stmmac_hw_init()
7286 dev_warn(priv->device, in stmmac_hw_init()
7287 "Number of Tx queues (%u) exceeds dma capability\n", in stmmac_hw_init()
7288 priv->plat->tx_queues_to_use); in stmmac_hw_init()
7289 priv->plat->tx_queues_to_use = priv->dma_cap.number_tx_queues; in stmmac_hw_init()
7292 if (priv->dma_cap.rx_fifo_size && in stmmac_hw_init()
7293 priv->plat->rx_fifo_size > priv->dma_cap.rx_fifo_size) { in stmmac_hw_init()
7294 dev_warn(priv->device, in stmmac_hw_init()
7296 priv->plat->rx_fifo_size); in stmmac_hw_init()
7297 priv->plat->rx_fifo_size = priv->dma_cap.rx_fifo_size; in stmmac_hw_init()
7299 if (priv->dma_cap.tx_fifo_size && in stmmac_hw_init()
7300 priv->plat->tx_fifo_size > priv->dma_cap.tx_fifo_size) { in stmmac_hw_init()
7301 dev_warn(priv->device, in stmmac_hw_init()
7302 "Tx FIFO size (%u) exceeds dma capability\n", in stmmac_hw_init()
7303 priv->plat->tx_fifo_size); in stmmac_hw_init()
7304 priv->plat->tx_fifo_size = priv->dma_cap.tx_fifo_size; in stmmac_hw_init()
7307 priv->hw->vlan_fail_q_en = in stmmac_hw_init()
7308 (priv->plat->flags & STMMAC_FLAG_VLAN_FAIL_Q_EN); in stmmac_hw_init()
7309 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; in stmmac_hw_init()
7312 if (priv->hwif_quirks) { in stmmac_hw_init()
7313 ret = priv->hwif_quirks(priv); in stmmac_hw_init()
7323 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || in stmmac_hw_init()
7324 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { in stmmac_hw_init()
7325 priv->use_riwt = 1; in stmmac_hw_init()
7326 dev_info(priv->device, in stmmac_hw_init()
7338 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_add()
7341 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_add()
7343 ch->priv_data = priv; in stmmac_napi_add()
7344 ch->index = queue; in stmmac_napi_add()
7345 spin_lock_init(&ch->lock); in stmmac_napi_add()
7347 if (queue < priv->plat->rx_queues_to_use) { in stmmac_napi_add()
7348 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx); in stmmac_napi_add()
7350 if (queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7351 netif_napi_add_tx(dev, &ch->tx_napi, in stmmac_napi_add()
7354 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_add()
7355 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
7356 netif_napi_add(dev, &ch->rxtx_napi, in stmmac_napi_add()
7367 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_del()
7370 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_del()
7372 if (queue < priv->plat->rx_queues_to_use) in stmmac_napi_del()
7373 netif_napi_del(&ch->rx_napi); in stmmac_napi_del()
7374 if (queue < priv->plat->tx_queues_to_use) in stmmac_napi_del()
7375 netif_napi_del(&ch->tx_napi); in stmmac_napi_del()
7376 if (queue < priv->plat->rx_queues_to_use && in stmmac_napi_del()
7377 queue < priv->plat->tx_queues_to_use) { in stmmac_napi_del()
7378 netif_napi_del(&ch->rxtx_napi); in stmmac_napi_del()
7393 priv->plat->rx_queues_to_use = rx_cnt; in stmmac_reinit_queues()
7394 priv->plat->tx_queues_to_use = tx_cnt; in stmmac_reinit_queues()
7396 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_reinit_queues()
7397 priv->rss.table[i] = ethtool_rxfh_indir_default(i, in stmmac_reinit_queues()
7416 priv->dma_conf.dma_rx_size = rx_size; in stmmac_reinit_ringparam()
7417 priv->dma_conf.dma_tx_size = tx_size; in stmmac_reinit_ringparam()
7428 struct dma_desc *desc_contains_ts = ctx->desc; in stmmac_xdp_rx_timestamp()
7429 struct stmmac_priv *priv = ctx->priv; in stmmac_xdp_rx_timestamp()
7430 struct dma_desc *ndesc = ctx->ndesc; in stmmac_xdp_rx_timestamp()
7431 struct dma_desc *desc = ctx->desc; in stmmac_xdp_rx_timestamp()
7434 if (!priv->hwts_rx_en) in stmmac_xdp_rx_timestamp()
7435 return -ENODATA; in stmmac_xdp_rx_timestamp()
7438 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_xdp_rx_timestamp()
7442 if (stmmac_get_rx_timestamp_status(priv, desc, ndesc, priv->adv_ts)) { in stmmac_xdp_rx_timestamp()
7443 stmmac_get_timestamp(priv, desc_contains_ts, priv->adv_ts, &ns); in stmmac_xdp_rx_timestamp()
7444 ns -= priv->plat->cdc_error_adj; in stmmac_xdp_rx_timestamp()
7449 return -ENODATA; in stmmac_xdp_rx_timestamp()
7478 return -ENOMEM; in stmmac_dvr_probe()
7483 priv->device = device; in stmmac_dvr_probe()
7484 priv->dev = ndev; in stmmac_dvr_probe()
7487 u64_stats_init(&priv->xstats.rxq_stats[i].napi_syncp); in stmmac_dvr_probe()
7489 u64_stats_init(&priv->xstats.txq_stats[i].q_syncp); in stmmac_dvr_probe()
7490 u64_stats_init(&priv->xstats.txq_stats[i].napi_syncp); in stmmac_dvr_probe()
7493 priv->xstats.pcpu_stats = in stmmac_dvr_probe()
7495 if (!priv->xstats.pcpu_stats) in stmmac_dvr_probe()
7496 return -ENOMEM; in stmmac_dvr_probe()
7499 priv->pause_time = pause; in stmmac_dvr_probe()
7500 priv->plat = plat_dat; in stmmac_dvr_probe()
7501 priv->ioaddr = res->addr; in stmmac_dvr_probe()
7502 priv->dev->base_addr = (unsigned long)res->addr; in stmmac_dvr_probe()
7503 priv->plat->dma_cfg->multi_msi_en = in stmmac_dvr_probe()
7504 (priv->plat->flags & STMMAC_FLAG_MULTI_MSI_EN); in stmmac_dvr_probe()
7506 priv->dev->irq = res->irq; in stmmac_dvr_probe()
7507 priv->wol_irq = res->wol_irq; in stmmac_dvr_probe()
7508 priv->lpi_irq = res->lpi_irq; in stmmac_dvr_probe()
7509 priv->sfty_irq = res->sfty_irq; in stmmac_dvr_probe()
7510 priv->sfty_ce_irq = res->sfty_ce_irq; in stmmac_dvr_probe()
7511 priv->sfty_ue_irq = res->sfty_ue_irq; in stmmac_dvr_probe()
7513 priv->rx_irq[i] = res->rx_irq[i]; in stmmac_dvr_probe()
7515 priv->tx_irq[i] = res->tx_irq[i]; in stmmac_dvr_probe()
7517 if (!is_zero_ether_addr(res->mac)) in stmmac_dvr_probe()
7518 eth_hw_addr_set(priv->dev, res->mac); in stmmac_dvr_probe()
7520 dev_set_drvdata(device, priv->dev); in stmmac_dvr_probe()
7525 priv->af_xdp_zc_qps = bitmap_zalloc(MTL_MAX_TX_QUEUES, GFP_KERNEL); in stmmac_dvr_probe()
7526 if (!priv->af_xdp_zc_qps) in stmmac_dvr_probe()
7527 return -ENOMEM; in stmmac_dvr_probe()
7530 priv->wq = create_singlethread_workqueue("stmmac_wq"); in stmmac_dvr_probe()
7531 if (!priv->wq) { in stmmac_dvr_probe()
7532 dev_err(priv->device, "failed to create workqueue\n"); in stmmac_dvr_probe()
7533 ret = -ENOMEM; in stmmac_dvr_probe()
7537 INIT_WORK(&priv->service_task, stmmac_service_task); in stmmac_dvr_probe()
7539 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); in stmmac_dvr_probe()
7545 priv->plat->phy_addr = phyaddr; in stmmac_dvr_probe()
7547 if (priv->plat->stmmac_rst) { in stmmac_dvr_probe()
7548 ret = reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7549 reset_control_deassert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7553 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7554 reset_control_reset(priv->plat->stmmac_rst); in stmmac_dvr_probe()
7557 ret = reset_control_deassert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_probe()
7558 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
7559 dev_err(priv->device, "unable to bring out of ahb reset: %pe\n", in stmmac_dvr_probe()
7572 if (priv->synopsys_id < DWMAC_CORE_5_20) in stmmac_dvr_probe()
7573 priv->plat->dma_cfg->dche = false; in stmmac_dvr_probe()
7577 ndev->netdev_ops = &stmmac_netdev_ops; in stmmac_dvr_probe()
7579 ndev->xdp_metadata_ops = &stmmac_xdp_metadata_ops; in stmmac_dvr_probe()
7580 ndev->xsk_tx_metadata_ops = &stmmac_xsk_tx_metadata_ops; in stmmac_dvr_probe()
7582 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in stmmac_dvr_probe()
7584 ndev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | in stmmac_dvr_probe()
7589 ndev->hw_features |= NETIF_F_HW_TC; in stmmac_dvr_probe()
7592 if ((priv->plat->flags & STMMAC_FLAG_TSO_EN) && (priv->dma_cap.tsoen)) { in stmmac_dvr_probe()
7593 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in stmmac_dvr_probe()
7594 if (priv->plat->has_gmac4) in stmmac_dvr_probe()
7595 ndev->hw_features |= NETIF_F_GSO_UDP_L4; in stmmac_dvr_probe()
7596 priv->tso = true; in stmmac_dvr_probe()
7597 dev_info(priv->device, "TSO feature enabled\n"); in stmmac_dvr_probe()
7600 if (priv->dma_cap.sphen && in stmmac_dvr_probe()
7601 !(priv->plat->flags & STMMAC_FLAG_SPH_DISABLE)) { in stmmac_dvr_probe()
7602 ndev->hw_features |= NETIF_F_GRO; in stmmac_dvr_probe()
7603 priv->sph_cap = true; in stmmac_dvr_probe()
7604 priv->sph = priv->sph_cap; in stmmac_dvr_probe()
7605 dev_info(priv->device, "SPH feature enabled\n"); in stmmac_dvr_probe()
7613 if (priv->plat->host_dma_width) in stmmac_dvr_probe()
7614 priv->dma_cap.host_dma_width = priv->plat->host_dma_width; in stmmac_dvr_probe()
7616 priv->dma_cap.host_dma_width = priv->dma_cap.addr64; in stmmac_dvr_probe()
7618 if (priv->dma_cap.host_dma_width) { in stmmac_dvr_probe()
7620 DMA_BIT_MASK(priv->dma_cap.host_dma_width)); in stmmac_dvr_probe()
7622 dev_info(priv->device, "Using %d/%d bits DMA host/device width\n", in stmmac_dvr_probe()
7623 priv->dma_cap.host_dma_width, priv->dma_cap.addr64); in stmmac_dvr_probe()
7630 priv->plat->dma_cfg->eame = true; in stmmac_dvr_probe()
7634 dev_err(priv->device, "Failed to set DMA Mask\n"); in stmmac_dvr_probe()
7638 priv->dma_cap.host_dma_width = 32; in stmmac_dvr_probe()
7642 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; in stmmac_dvr_probe()
7643 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); in stmmac_dvr_probe()
7646 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; in stmmac_dvr_probe()
7647 if (priv->plat->has_gmac4) { in stmmac_dvr_probe()
7648 ndev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX; in stmmac_dvr_probe()
7649 priv->hw->hw_vlan_en = true; in stmmac_dvr_probe()
7651 if (priv->dma_cap.vlhash) { in stmmac_dvr_probe()
7652 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in stmmac_dvr_probe()
7653 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; in stmmac_dvr_probe()
7655 if (priv->dma_cap.vlins) { in stmmac_dvr_probe()
7656 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; in stmmac_dvr_probe()
7657 if (priv->dma_cap.dvlan) in stmmac_dvr_probe()
7658 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; in stmmac_dvr_probe()
7661 priv->msg_enable = netif_msg_init(debug, default_msg_level); in stmmac_dvr_probe()
7663 priv->xstats.threshold = tc; in stmmac_dvr_probe()
7666 rxq = priv->plat->rx_queues_to_use; in stmmac_dvr_probe()
7667 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); in stmmac_dvr_probe()
7668 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_dvr_probe()
7669 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); in stmmac_dvr_probe()
7671 if (priv->dma_cap.rssen && priv->plat->rss_en) in stmmac_dvr_probe()
7672 ndev->features |= NETIF_F_RXHASH; in stmmac_dvr_probe()
7674 ndev->vlan_features |= ndev->features; in stmmac_dvr_probe()
7676 /* MTU range: 46 - hw-specific max */ in stmmac_dvr_probe()
7677 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; in stmmac_dvr_probe()
7678 if (priv->plat->has_xgmac) in stmmac_dvr_probe()
7679 ndev->max_mtu = XGMAC_JUMBO_LEN; in stmmac_dvr_probe()
7680 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) in stmmac_dvr_probe()
7681 ndev->max_mtu = JUMBO_LEN; in stmmac_dvr_probe()
7683 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); in stmmac_dvr_probe()
7684 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu in stmmac_dvr_probe()
7685 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. in stmmac_dvr_probe()
7687 if ((priv->plat->maxmtu < ndev->max_mtu) && in stmmac_dvr_probe()
7688 (priv->plat->maxmtu >= ndev->min_mtu)) in stmmac_dvr_probe()
7689 ndev->max_mtu = priv->plat->maxmtu; in stmmac_dvr_probe()
7690 else if (priv->plat->maxmtu < ndev->min_mtu) in stmmac_dvr_probe()
7691 dev_warn(priv->device, in stmmac_dvr_probe()
7693 __func__, priv->plat->maxmtu); in stmmac_dvr_probe()
7695 ndev->priv_flags |= IFF_LIVE_ADDR_CHANGE; in stmmac_dvr_probe()
7700 mutex_init(&priv->lock); in stmmac_dvr_probe()
7706 * changed at run-time and it is fixed. Viceversa the driver'll try to in stmmac_dvr_probe()
7710 if (priv->plat->clk_csr >= 0) in stmmac_dvr_probe()
7711 priv->clk_csr = priv->plat->clk_csr; in stmmac_dvr_probe()
7724 dev_err_probe(priv->device, ret, in stmmac_dvr_probe()
7726 priv->plat->bus_id); in stmmac_dvr_probe()
7730 if (priv->plat->speed_mode_2500) in stmmac_dvr_probe()
7731 priv->plat->speed_mode_2500(ndev, priv->plat->bsp_priv); in stmmac_dvr_probe()
7745 dev_err(priv->device, "%s: ERROR %i registering the device\n", in stmmac_dvr_probe()
7754 if (priv->plat->dump_debug_regs) in stmmac_dvr_probe()
7755 priv->plat->dump_debug_regs(priv->plat->bsp_priv); in stmmac_dvr_probe()
7765 phylink_destroy(priv->phylink); in stmmac_dvr_probe()
7773 destroy_workqueue(priv->wq); in stmmac_dvr_probe()
7775 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_probe()
7784 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
7792 netdev_info(priv->dev, "%s: removing driver", __func__); in stmmac_dvr_remove()
7801 phylink_destroy(priv->phylink); in stmmac_dvr_remove()
7802 if (priv->plat->stmmac_rst) in stmmac_dvr_remove()
7803 reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_remove()
7804 reset_control_assert(priv->plat->stmmac_ahb_rst); in stmmac_dvr_remove()
7809 destroy_workqueue(priv->wq); in stmmac_dvr_remove()
7810 mutex_destroy(&priv->lock); in stmmac_dvr_remove()
7811 bitmap_free(priv->af_xdp_zc_qps); in stmmac_dvr_remove()
7819 * stmmac_suspend - suspend callback
7834 mutex_lock(&priv->lock); in stmmac_suspend()
7840 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_suspend()
7841 hrtimer_cancel(&priv->dma_conf.tx_queue[chan].txtimer); in stmmac_suspend()
7843 if (priv->eee_sw_timer_en) { in stmmac_suspend()
7844 priv->tx_path_in_lpi_mode = false; in stmmac_suspend()
7845 timer_delete_sync(&priv->eee_ctrl_timer); in stmmac_suspend()
7848 /* Stop TX/RX DMA */ in stmmac_suspend()
7851 if (priv->plat->serdes_powerdown) in stmmac_suspend()
7852 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_suspend()
7855 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
7856 stmmac_pmt(priv, priv->hw, priv->wolopts); in stmmac_suspend()
7857 priv->irq_wake = 1; in stmmac_suspend()
7859 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_suspend()
7860 pinctrl_pm_select_sleep_state(priv->device); in stmmac_suspend()
7863 mutex_unlock(&priv->lock); in stmmac_suspend()
7866 if (device_may_wakeup(priv->device) && !priv->plat->pmt) in stmmac_suspend()
7867 phylink_speed_down(priv->phylink, false); in stmmac_suspend()
7869 phylink_suspend(priv->phylink, in stmmac_suspend()
7870 device_may_wakeup(priv->device) && priv->plat->pmt); in stmmac_suspend()
7874 timer_shutdown_sync(&priv->fpe_cfg.verify_timer); in stmmac_suspend()
7882 struct stmmac_rx_queue *rx_q = &priv->dma_conf.rx_queue[queue]; in stmmac_reset_rx_queue()
7884 rx_q->cur_rx = 0; in stmmac_reset_rx_queue()
7885 rx_q->dirty_rx = 0; in stmmac_reset_rx_queue()
7890 struct stmmac_tx_queue *tx_q = &priv->dma_conf.tx_queue[queue]; in stmmac_reset_tx_queue()
7892 tx_q->cur_tx = 0; in stmmac_reset_tx_queue()
7893 tx_q->dirty_tx = 0; in stmmac_reset_tx_queue()
7894 tx_q->mss = 0; in stmmac_reset_tx_queue()
7896 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_reset_tx_queue()
7900 * stmmac_reset_queues_param - reset queue parameters
7905 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_reset_queues_param()
7906 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_reset_queues_param()
7917 * stmmac_resume - resume callback
7932 * automatically as soon as a magic packet or a Wake-up frame in stmmac_resume()
7937 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
7938 mutex_lock(&priv->lock); in stmmac_resume()
7939 stmmac_pmt(priv, priv->hw, 0); in stmmac_resume()
7940 mutex_unlock(&priv->lock); in stmmac_resume()
7941 priv->irq_wake = 0; in stmmac_resume()
7943 pinctrl_pm_select_default_state(priv->device); in stmmac_resume()
7945 if (priv->mii) in stmmac_resume()
7946 stmmac_mdio_reset(priv->mii); in stmmac_resume()
7949 if (!(priv->plat->flags & STMMAC_FLAG_SERDES_UP_AFTER_PHY_LINKUP) && in stmmac_resume()
7950 priv->plat->serdes_powerup) { in stmmac_resume()
7951 ret = priv->plat->serdes_powerup(ndev, in stmmac_resume()
7952 priv->plat->bsp_priv); in stmmac_resume()
7963 phylink_prepare_resume(priv->phylink); in stmmac_resume()
7965 mutex_lock(&priv->lock); in stmmac_resume()
7970 stmmac_clear_descriptors(priv, &priv->dma_conf); in stmmac_resume()
7974 phylink_rx_clk_stop_block(priv->phylink); in stmmac_resume()
7977 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); in stmmac_resume()
7978 phylink_rx_clk_stop_unblock(priv->phylink); in stmmac_resume()
7983 mutex_unlock(&priv->lock); in stmmac_resume()
7989 phylink_resume(priv->phylink); in stmmac_resume()
7990 if (device_may_wakeup(priv->device) && !priv->plat->pmt) in stmmac_resume()
7991 phylink_speed_up(priv->phylink); in stmmac_resume()
8038 pr_err("%s: ERROR broken module parameter conversion", __func__); in stmmac_cmdline_opt()
8068 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");