Lines Matching +full:eee +full:- +full:broken +full:- +full:100 +full:tx

1 // SPDX-License-Identifier: GPL-2.0-only
3 This is the driver for the ST MAC 10/100/1000 on-chip Ethernet controllers.
6 Copyright(C) 2007-2011 STMicroelectronics Ltd
29 #include <linux/dma-mapping.h>
50 #define TSO_MAX_BUFF_SIZE (SZ_16K - 1)
58 static int debug = -1;
60 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
62 static int phyaddr = -1;
66 #define STMMAC_TX_THRESH(x) ((x)->dma_tx_size / 4)
67 #define STMMAC_RX_THRESH(x) ((x)->dma_rx_size / 4)
96 MODULE_PARM_DESC(eee_timer, "LPI tx expiration time in msec");
99 /* By default the driver will use the ring mode to manage tx and rx descriptors,
117 * stmmac_verify_args - verify the driver parameters.
138 * stmmac_disable_all_queues - Disable all queues
143 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_disable_all_queues()
144 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in stmmac_disable_all_queues()
149 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_disable_all_queues()
152 napi_disable(&ch->rx_napi); in stmmac_disable_all_queues()
154 napi_disable(&ch->tx_napi); in stmmac_disable_all_queues()
159 * stmmac_enable_all_queues - Enable all queues
164 u32 rx_queues_cnt = priv->plat->rx_queues_to_use; in stmmac_enable_all_queues()
165 u32 tx_queues_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_all_queues()
170 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_enable_all_queues()
173 napi_enable(&ch->rx_napi); in stmmac_enable_all_queues()
175 napi_enable(&ch->tx_napi); in stmmac_enable_all_queues()
181 if (!test_bit(STMMAC_DOWN, &priv->state) && in stmmac_service_event_schedule()
182 !test_and_set_bit(STMMAC_SERVICE_SCHED, &priv->state)) in stmmac_service_event_schedule()
183 queue_work(priv->wq, &priv->service_task); in stmmac_service_event_schedule()
188 netif_carrier_off(priv->dev); in stmmac_global_err()
189 set_bit(STMMAC_RESET_REQUESTED, &priv->state); in stmmac_global_err()
194 * stmmac_clk_csr_set - dynamically set the MDC clock
201 * changed at run-time and it is fixed (as reported in the driver
209 clk_rate = clk_get_rate(priv->plat->stmmac_clk); in stmmac_clk_csr_set()
218 if (!(priv->clk_csr & MAC_CSR_H_FRQ_MASK)) { in stmmac_clk_csr_set()
220 priv->clk_csr = STMMAC_CSR_20_35M; in stmmac_clk_csr_set()
222 priv->clk_csr = STMMAC_CSR_35_60M; in stmmac_clk_csr_set()
224 priv->clk_csr = STMMAC_CSR_60_100M; in stmmac_clk_csr_set()
226 priv->clk_csr = STMMAC_CSR_100_150M; in stmmac_clk_csr_set()
228 priv->clk_csr = STMMAC_CSR_150_250M; in stmmac_clk_csr_set()
230 priv->clk_csr = STMMAC_CSR_250_300M; in stmmac_clk_csr_set()
233 if (priv->plat->has_sun8i) { in stmmac_clk_csr_set()
235 priv->clk_csr = 0x03; in stmmac_clk_csr_set()
237 priv->clk_csr = 0x02; in stmmac_clk_csr_set()
239 priv->clk_csr = 0x01; in stmmac_clk_csr_set()
241 priv->clk_csr = 0; in stmmac_clk_csr_set()
244 if (priv->plat->has_xgmac) { in stmmac_clk_csr_set()
246 priv->clk_csr = 0x5; in stmmac_clk_csr_set()
248 priv->clk_csr = 0x4; in stmmac_clk_csr_set()
250 priv->clk_csr = 0x3; in stmmac_clk_csr_set()
252 priv->clk_csr = 0x2; in stmmac_clk_csr_set()
254 priv->clk_csr = 0x1; in stmmac_clk_csr_set()
256 priv->clk_csr = 0x0; in stmmac_clk_csr_set()
268 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_tx_avail()
271 if (tx_q->dirty_tx > tx_q->cur_tx) in stmmac_tx_avail()
272 avail = tx_q->dirty_tx - tx_q->cur_tx - 1; in stmmac_tx_avail()
274 avail = priv->dma_tx_size - tx_q->cur_tx + tx_q->dirty_tx - 1; in stmmac_tx_avail()
280 * stmmac_rx_dirty - Get RX queue dirty
286 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_rx_dirty()
289 if (rx_q->dirty_rx <= rx_q->cur_rx) in stmmac_rx_dirty()
290 dirty = rx_q->cur_rx - rx_q->dirty_rx; in stmmac_rx_dirty()
292 dirty = priv->dma_rx_size - rx_q->dirty_rx + rx_q->cur_rx; in stmmac_rx_dirty()
298 * stmmac_enable_eee_mode - check and enter in LPI mode
301 * EEE.
305 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_enable_eee_mode()
308 /* check if all TX queues have the work finished */ in stmmac_enable_eee_mode()
310 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_enable_eee_mode()
312 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_enable_eee_mode()
317 if (!priv->tx_path_in_lpi_mode) in stmmac_enable_eee_mode()
318 stmmac_set_eee_mode(priv, priv->hw, in stmmac_enable_eee_mode()
319 priv->plat->en_tx_lpi_clockgating); in stmmac_enable_eee_mode()
323 * stmmac_disable_eee_mode - disable and exit from LPI mode
325 * Description: this function is to exit and disable EEE in case of
330 stmmac_reset_eee_mode(priv, priv->hw); in stmmac_disable_eee_mode()
331 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_disable_eee_mode()
332 priv->tx_path_in_lpi_mode = false; in stmmac_disable_eee_mode()
336 * stmmac_eee_ctrl_timer - EEE TX SW timer.
347 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_eee_ctrl_timer()
351 * stmmac_eee_init - init EEE
354 * if the GMAC supports the EEE (from the HW cap reg) and the phy device
355 * can also manage EEE, this function enable the LPI state and start related
360 int eee_tw_timer = priv->eee_tw_timer; in stmmac_eee_init()
363 * so we do not support extra feature like EEE. in stmmac_eee_init()
365 if (priv->hw->pcs == STMMAC_PCS_TBI || in stmmac_eee_init()
366 priv->hw->pcs == STMMAC_PCS_RTBI) in stmmac_eee_init()
369 /* Check if MAC core supports the EEE feature. */ in stmmac_eee_init()
370 if (!priv->dma_cap.eee) in stmmac_eee_init()
373 mutex_lock(&priv->lock); in stmmac_eee_init()
376 if (!priv->eee_active) { in stmmac_eee_init()
377 if (priv->eee_enabled) { in stmmac_eee_init()
378 netdev_dbg(priv->dev, "disable EEE\n"); in stmmac_eee_init()
379 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_eee_init()
380 stmmac_set_eee_timer(priv, priv->hw, 0, eee_tw_timer); in stmmac_eee_init()
382 mutex_unlock(&priv->lock); in stmmac_eee_init()
386 if (priv->eee_active && !priv->eee_enabled) { in stmmac_eee_init()
387 timer_setup(&priv->eee_ctrl_timer, stmmac_eee_ctrl_timer, 0); in stmmac_eee_init()
388 stmmac_set_eee_timer(priv, priv->hw, STMMAC_DEFAULT_LIT_LS, in stmmac_eee_init()
392 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_eee_init()
394 mutex_unlock(&priv->lock); in stmmac_eee_init()
395 netdev_dbg(priv->dev, "Energy-Efficient Ethernet initialized\n"); in stmmac_eee_init()
399 /* stmmac_get_tx_hwtstamp - get HW TX timestamps
414 if (!priv->hwts_tx_en) in stmmac_get_tx_hwtstamp()
418 if (likely(!skb || !(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS))) in stmmac_get_tx_hwtstamp()
421 /* check tx tstamp status */ in stmmac_get_tx_hwtstamp()
423 stmmac_get_timestamp(priv, p, priv->adv_ts, &ns); in stmmac_get_tx_hwtstamp()
425 } else if (!stmmac_get_mac_tx_timestamp(priv, priv->hw, &ns)) { in stmmac_get_tx_hwtstamp()
433 netdev_dbg(priv->dev, "get valid TX hw timestamp %llu\n", ns); in stmmac_get_tx_hwtstamp()
439 /* stmmac_get_rx_hwtstamp - get HW RX timestamps
455 if (!priv->hwts_rx_en) in stmmac_get_rx_hwtstamp()
458 if (priv->plat->has_gmac4 || priv->plat->has_xgmac) in stmmac_get_rx_hwtstamp()
462 if (stmmac_get_rx_timestamp_status(priv, p, np, priv->adv_ts)) { in stmmac_get_rx_hwtstamp()
463 stmmac_get_timestamp(priv, desc, priv->adv_ts, &ns); in stmmac_get_rx_hwtstamp()
464 netdev_dbg(priv->dev, "get valid RX hw timestamp %llu\n", ns); in stmmac_get_rx_hwtstamp()
467 shhwtstamp->hwtstamp = ns_to_ktime(ns); in stmmac_get_rx_hwtstamp()
469 netdev_dbg(priv->dev, "cannot get RX hw timestamp\n"); in stmmac_get_rx_hwtstamp()
474 * stmmac_hwtstamp_set - control hardware timestamping.
479 * This function configures the MAC to enable/disable both outgoing(TX)
482 * 0 on success and an appropriate -ve integer on failure.
502 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_hwtstamp_set()
504 if (!(priv->dma_cap.time_stamp || priv->adv_ts)) { in stmmac_hwtstamp_set()
505 netdev_alert(priv->dev, "No support for HW time stamping\n"); in stmmac_hwtstamp_set()
506 priv->hwts_tx_en = 0; in stmmac_hwtstamp_set()
507 priv->hwts_rx_en = 0; in stmmac_hwtstamp_set()
509 return -EOPNOTSUPP; in stmmac_hwtstamp_set()
512 if (copy_from_user(&config, ifr->ifr_data, in stmmac_hwtstamp_set()
514 return -EFAULT; in stmmac_hwtstamp_set()
516 netdev_dbg(priv->dev, "%s config flags:0x%x, tx_type:0x%x, rx_filter:0x%x\n", in stmmac_hwtstamp_set()
521 return -EINVAL; in stmmac_hwtstamp_set()
525 return -ERANGE; in stmmac_hwtstamp_set()
527 if (priv->adv_ts) { in stmmac_hwtstamp_set()
608 if (priv->synopsys_id != DWMAC_CORE_5_10) in stmmac_hwtstamp_set()
648 return -ERANGE; in stmmac_hwtstamp_set()
661 priv->hwts_rx_en = ((config.rx_filter == HWTSTAMP_FILTER_NONE) ? 0 : 1); in stmmac_hwtstamp_set()
662 priv->hwts_tx_en = config.tx_type == HWTSTAMP_TX_ON; in stmmac_hwtstamp_set()
664 if (!priv->hwts_tx_en && !priv->hwts_rx_en) in stmmac_hwtstamp_set()
665 stmmac_config_hw_tstamping(priv, priv->ptpaddr, 0); in stmmac_hwtstamp_set()
671 stmmac_config_hw_tstamping(priv, priv->ptpaddr, value); in stmmac_hwtstamp_set()
675 priv->ptpaddr, priv->plat->clk_ptp_rate, in stmmac_hwtstamp_set()
680 priv->sub_second_inc = sec_inc; in stmmac_hwtstamp_set()
681 priv->systime_flags = value; in stmmac_hwtstamp_set()
689 priv->default_addend = div_u64(temp, priv->plat->clk_ptp_rate); in stmmac_hwtstamp_set()
690 stmmac_config_addend(priv, priv->ptpaddr, priv->default_addend); in stmmac_hwtstamp_set()
696 stmmac_init_systime(priv, priv->ptpaddr, in stmmac_hwtstamp_set()
700 memcpy(&priv->tstamp_config, &config, sizeof(config)); in stmmac_hwtstamp_set()
702 return copy_to_user(ifr->ifr_data, &config, in stmmac_hwtstamp_set()
703 sizeof(config)) ? -EFAULT : 0; in stmmac_hwtstamp_set()
707 * stmmac_hwtstamp_get - read hardware timestamping.
718 struct hwtstamp_config *config = &priv->tstamp_config; in stmmac_hwtstamp_get()
720 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_hwtstamp_get()
721 return -EOPNOTSUPP; in stmmac_hwtstamp_get()
723 return copy_to_user(ifr->ifr_data, config, in stmmac_hwtstamp_get()
724 sizeof(*config)) ? -EFAULT : 0; in stmmac_hwtstamp_get()
728 * stmmac_init_ptp - init PTP
736 bool xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_init_ptp()
738 if (!(priv->dma_cap.time_stamp || priv->dma_cap.atime_stamp)) in stmmac_init_ptp()
739 return -EOPNOTSUPP; in stmmac_init_ptp()
741 priv->adv_ts = 0; in stmmac_init_ptp()
743 if (xmac && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
744 priv->adv_ts = 1; in stmmac_init_ptp()
746 else if (priv->extend_desc && priv->dma_cap.atime_stamp) in stmmac_init_ptp()
747 priv->adv_ts = 1; in stmmac_init_ptp()
749 if (priv->dma_cap.time_stamp) in stmmac_init_ptp()
750 netdev_info(priv->dev, "IEEE 1588-2002 Timestamp supported\n"); in stmmac_init_ptp()
752 if (priv->adv_ts) in stmmac_init_ptp()
753 netdev_info(priv->dev, in stmmac_init_ptp()
754 "IEEE 1588-2008 Advanced Timestamp supported\n"); in stmmac_init_ptp()
756 priv->hwts_tx_en = 0; in stmmac_init_ptp()
757 priv->hwts_rx_en = 0; in stmmac_init_ptp()
766 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_release_ptp()
771 * stmmac_mac_flow_ctrl - Configure flow control in all queues
778 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_mac_flow_ctrl()
780 stmmac_flow_ctrl(priv, priv->hw, duplex, priv->flow_ctrl, in stmmac_mac_flow_ctrl()
781 priv->pause, tx_cnt); in stmmac_mac_flow_ctrl()
788 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_validate()
791 int tx_cnt = priv->plat->tx_queues_to_use; in stmmac_validate()
792 int max_speed = priv->plat->max_speed; in stmmac_validate()
796 phylink_set(mac_supported, 100baseT_Half); in stmmac_validate()
797 phylink_set(mac_supported, 100baseT_Full); in stmmac_validate()
811 } else if (priv->plat->has_xgmac) { in stmmac_validate()
862 /* Half-Duplex can only work with single queue */ in stmmac_validate()
865 phylink_set(mask, 100baseT_Half); in stmmac_validate()
872 linkmode_and(state->advertising, state->advertising, mac_supported); in stmmac_validate()
873 linkmode_andnot(state->advertising, state->advertising, mask); in stmmac_validate()
876 stmmac_xpcs_validate(priv, &priv->hw->xpcs_args, supported, state); in stmmac_validate()
882 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_pcs_get_state()
884 state->link = 0; in stmmac_mac_pcs_get_state()
885 stmmac_xpcs_get_state(priv, &priv->hw->xpcs_args, state); in stmmac_mac_pcs_get_state()
891 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_config()
893 stmmac_xpcs_config(priv, &priv->hw->xpcs_args, state); in stmmac_mac_config()
904 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_down()
906 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_mac_link_down()
907 priv->eee_active = false; in stmmac_mac_link_down()
908 priv->tx_lpi_enabled = false; in stmmac_mac_link_down()
910 stmmac_set_eee_pls(priv, priv->hw, false); in stmmac_mac_link_down()
919 struct stmmac_priv *priv = netdev_priv(to_net_dev(config->dev)); in stmmac_mac_link_up()
922 stmmac_xpcs_link_up(priv, &priv->hw->xpcs_args, speed, interface); in stmmac_mac_link_up()
924 ctrl = readl(priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
925 ctrl &= ~priv->hw->link.speed_mask; in stmmac_mac_link_up()
930 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
933 ctrl |= priv->hw->link.xgmii.speed5000; in stmmac_mac_link_up()
936 ctrl |= priv->hw->link.xgmii.speed2500; in stmmac_mac_link_up()
944 ctrl |= priv->hw->link.xlgmii.speed100000; in stmmac_mac_link_up()
947 ctrl |= priv->hw->link.xlgmii.speed50000; in stmmac_mac_link_up()
950 ctrl |= priv->hw->link.xlgmii.speed40000; in stmmac_mac_link_up()
953 ctrl |= priv->hw->link.xlgmii.speed25000; in stmmac_mac_link_up()
956 ctrl |= priv->hw->link.xgmii.speed10000; in stmmac_mac_link_up()
959 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
962 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
970 ctrl |= priv->hw->link.speed2500; in stmmac_mac_link_up()
973 ctrl |= priv->hw->link.speed1000; in stmmac_mac_link_up()
976 ctrl |= priv->hw->link.speed100; in stmmac_mac_link_up()
979 ctrl |= priv->hw->link.speed10; in stmmac_mac_link_up()
986 priv->speed = speed; in stmmac_mac_link_up()
988 if (priv->plat->fix_mac_speed) in stmmac_mac_link_up()
989 priv->plat->fix_mac_speed(priv->plat->bsp_priv, speed); in stmmac_mac_link_up()
992 ctrl &= ~priv->hw->link.duplex; in stmmac_mac_link_up()
994 ctrl |= priv->hw->link.duplex; in stmmac_mac_link_up()
1000 writel(ctrl, priv->ioaddr + MAC_CTRL_REG); in stmmac_mac_link_up()
1002 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_mac_link_up()
1003 if (phy && priv->dma_cap.eee) { in stmmac_mac_link_up()
1004 priv->eee_active = phy_init_eee(phy, 1) >= 0; in stmmac_mac_link_up()
1005 priv->eee_enabled = stmmac_eee_init(priv); in stmmac_mac_link_up()
1006 priv->tx_lpi_enabled = priv->eee_enabled; in stmmac_mac_link_up()
1007 stmmac_set_eee_pls(priv, priv->hw, true); in stmmac_mac_link_up()
1021 * stmmac_check_pcs_mode - verify if RGMII/SGMII is supported
1029 int interface = priv->plat->interface; in stmmac_check_pcs_mode()
1031 if (priv->dma_cap.pcs) { in stmmac_check_pcs_mode()
1036 netdev_dbg(priv->dev, "PCS RGMII support enabled\n"); in stmmac_check_pcs_mode()
1037 priv->hw->pcs = STMMAC_PCS_RGMII; in stmmac_check_pcs_mode()
1039 netdev_dbg(priv->dev, "PCS SGMII support enabled\n"); in stmmac_check_pcs_mode()
1040 priv->hw->pcs = STMMAC_PCS_SGMII; in stmmac_check_pcs_mode()
1046 * stmmac_init_phy - PHY initialization
1060 node = priv->plat->phylink_node; in stmmac_init_phy()
1063 ret = phylink_of_phy_connect(priv->phylink, node, 0); in stmmac_init_phy()
1065 /* Some DT bindings do not set-up the PHY handle. Let's try to in stmmac_init_phy()
1069 int addr = priv->plat->phy_addr; in stmmac_init_phy()
1072 phydev = mdiobus_get_phy(priv->mii, addr); in stmmac_init_phy()
1074 netdev_err(priv->dev, "no phy at addr %d\n", addr); in stmmac_init_phy()
1075 return -ENODEV; in stmmac_init_phy()
1078 ret = phylink_connect_phy(priv->phylink, phydev); in stmmac_init_phy()
1081 phylink_ethtool_get_wol(priv->phylink, &wol); in stmmac_init_phy()
1082 device_set_wakeup_capable(priv->device, !!wol.supported); in stmmac_init_phy()
1089 struct fwnode_handle *fwnode = of_fwnode_handle(priv->plat->phylink_node); in stmmac_phy_setup()
1090 int mode = priv->plat->phy_interface; in stmmac_phy_setup()
1093 priv->phylink_config.dev = &priv->dev->dev; in stmmac_phy_setup()
1094 priv->phylink_config.type = PHYLINK_NETDEV; in stmmac_phy_setup()
1095 priv->phylink_config.pcs_poll = true; in stmmac_phy_setup()
1098 fwnode = dev_fwnode(priv->device); in stmmac_phy_setup()
1100 phylink = phylink_create(&priv->phylink_config, fwnode, in stmmac_phy_setup()
1105 priv->phylink = phylink; in stmmac_phy_setup()
1111 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_display_rx_rings()
1117 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_display_rx_rings()
1121 if (priv->extend_desc) in stmmac_display_rx_rings()
1122 head_rx = (void *)rx_q->dma_erx; in stmmac_display_rx_rings()
1124 head_rx = (void *)rx_q->dma_rx; in stmmac_display_rx_rings()
1127 stmmac_display_ring(priv, head_rx, priv->dma_rx_size, true); in stmmac_display_rx_rings()
1133 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_display_tx_rings()
1137 /* Display TX rings */ in stmmac_display_tx_rings()
1139 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_display_tx_rings()
1143 if (priv->extend_desc) in stmmac_display_tx_rings()
1144 head_tx = (void *)tx_q->dma_etx; in stmmac_display_tx_rings()
1145 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_display_tx_rings()
1146 head_tx = (void *)tx_q->dma_entx; in stmmac_display_tx_rings()
1148 head_tx = (void *)tx_q->dma_tx; in stmmac_display_tx_rings()
1150 stmmac_display_ring(priv, head_tx, priv->dma_tx_size, false); in stmmac_display_tx_rings()
1159 /* Display TX ring */ in stmmac_display_rings()
1182 * stmmac_clear_rx_descriptors - clear RX descriptors
1190 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_clear_rx_descriptors()
1194 for (i = 0; i < priv->dma_rx_size; i++) in stmmac_clear_rx_descriptors()
1195 if (priv->extend_desc) in stmmac_clear_rx_descriptors()
1196 stmmac_init_rx_desc(priv, &rx_q->dma_erx[i].basic, in stmmac_clear_rx_descriptors()
1197 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1198 (i == priv->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1199 priv->dma_buf_sz); in stmmac_clear_rx_descriptors()
1201 stmmac_init_rx_desc(priv, &rx_q->dma_rx[i], in stmmac_clear_rx_descriptors()
1202 priv->use_riwt, priv->mode, in stmmac_clear_rx_descriptors()
1203 (i == priv->dma_rx_size - 1), in stmmac_clear_rx_descriptors()
1204 priv->dma_buf_sz); in stmmac_clear_rx_descriptors()
1208 * stmmac_clear_tx_descriptors - clear tx descriptors
1210 * @queue: TX queue index.
1211 * Description: this function is called to clear the TX descriptors
1216 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_clear_tx_descriptors()
1219 /* Clear the TX descriptors */ in stmmac_clear_tx_descriptors()
1220 for (i = 0; i < priv->dma_tx_size; i++) { in stmmac_clear_tx_descriptors()
1221 int last = (i == (priv->dma_tx_size - 1)); in stmmac_clear_tx_descriptors()
1224 if (priv->extend_desc) in stmmac_clear_tx_descriptors()
1225 p = &tx_q->dma_etx[i].basic; in stmmac_clear_tx_descriptors()
1226 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_clear_tx_descriptors()
1227 p = &tx_q->dma_entx[i].basic; in stmmac_clear_tx_descriptors()
1229 p = &tx_q->dma_tx[i]; in stmmac_clear_tx_descriptors()
1231 stmmac_init_tx_desc(priv, p, priv->mode, last); in stmmac_clear_tx_descriptors()
1236 * stmmac_clear_descriptors - clear descriptors
1238 * Description: this function is called to clear the TX and RX descriptors
1243 u32 rx_queue_cnt = priv->plat->rx_queues_to_use; in stmmac_clear_descriptors()
1244 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_clear_descriptors()
1251 /* Clear the TX descriptors */ in stmmac_clear_descriptors()
1257 * stmmac_init_rx_buffers - init the RX descriptor buffer.
1269 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_init_rx_buffers()
1270 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_init_rx_buffers()
1272 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); in stmmac_init_rx_buffers()
1273 if (!buf->page) in stmmac_init_rx_buffers()
1274 return -ENOMEM; in stmmac_init_rx_buffers()
1276 if (priv->sph) { in stmmac_init_rx_buffers()
1277 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); in stmmac_init_rx_buffers()
1278 if (!buf->sec_page) in stmmac_init_rx_buffers()
1279 return -ENOMEM; in stmmac_init_rx_buffers()
1281 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_init_rx_buffers()
1282 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr); in stmmac_init_rx_buffers()
1284 buf->sec_page = NULL; in stmmac_init_rx_buffers()
1287 buf->addr = page_pool_get_dma_addr(buf->page); in stmmac_init_rx_buffers()
1288 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_init_rx_buffers()
1289 if (priv->dma_buf_sz == BUF_SIZE_16KiB) in stmmac_init_rx_buffers()
1296 * stmmac_free_rx_buffer - free RX dma buffers
1303 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_free_rx_buffer()
1304 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[i]; in stmmac_free_rx_buffer()
1306 if (buf->page) in stmmac_free_rx_buffer()
1307 page_pool_put_full_page(rx_q->page_pool, buf->page, false); in stmmac_free_rx_buffer()
1308 buf->page = NULL; in stmmac_free_rx_buffer()
1310 if (buf->sec_page) in stmmac_free_rx_buffer()
1311 page_pool_put_full_page(rx_q->page_pool, buf->sec_page, false); in stmmac_free_rx_buffer()
1312 buf->sec_page = NULL; in stmmac_free_rx_buffer()
1316 * stmmac_free_tx_buffer - free RX dma buffers
1323 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_free_tx_buffer()
1325 if (tx_q->tx_skbuff_dma[i].buf) { in stmmac_free_tx_buffer()
1326 if (tx_q->tx_skbuff_dma[i].map_as_page) in stmmac_free_tx_buffer()
1327 dma_unmap_page(priv->device, in stmmac_free_tx_buffer()
1328 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1329 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1332 dma_unmap_single(priv->device, in stmmac_free_tx_buffer()
1333 tx_q->tx_skbuff_dma[i].buf, in stmmac_free_tx_buffer()
1334 tx_q->tx_skbuff_dma[i].len, in stmmac_free_tx_buffer()
1338 if (tx_q->tx_skbuff[i]) { in stmmac_free_tx_buffer()
1339 dev_kfree_skb_any(tx_q->tx_skbuff[i]); in stmmac_free_tx_buffer()
1340 tx_q->tx_skbuff[i] = NULL; in stmmac_free_tx_buffer()
1341 tx_q->tx_skbuff_dma[i].buf = 0; in stmmac_free_tx_buffer()
1342 tx_q->tx_skbuff_dma[i].map_as_page = false; in stmmac_free_tx_buffer()
1347 * init_dma_rx_desc_rings - init the RX descriptor rings
1357 u32 rx_count = priv->plat->rx_queues_to_use; in init_dma_rx_desc_rings()
1358 int ret = -ENOMEM; in init_dma_rx_desc_rings()
1363 netif_dbg(priv, probe, priv->dev, in init_dma_rx_desc_rings()
1367 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in init_dma_rx_desc_rings()
1369 netif_dbg(priv, probe, priv->dev, in init_dma_rx_desc_rings()
1371 (u32)rx_q->dma_rx_phy); in init_dma_rx_desc_rings()
1375 for (i = 0; i < priv->dma_rx_size; i++) { in init_dma_rx_desc_rings()
1378 if (priv->extend_desc) in init_dma_rx_desc_rings()
1379 p = &((rx_q->dma_erx + i)->basic); in init_dma_rx_desc_rings()
1381 p = rx_q->dma_rx + i; in init_dma_rx_desc_rings()
1389 rx_q->cur_rx = 0; in init_dma_rx_desc_rings()
1390 rx_q->dirty_rx = (unsigned int)(i - priv->dma_rx_size); in init_dma_rx_desc_rings()
1393 if (priv->mode == STMMAC_CHAIN_MODE) { in init_dma_rx_desc_rings()
1394 if (priv->extend_desc) in init_dma_rx_desc_rings()
1395 stmmac_mode_init(priv, rx_q->dma_erx, in init_dma_rx_desc_rings()
1396 rx_q->dma_rx_phy, in init_dma_rx_desc_rings()
1397 priv->dma_rx_size, 1); in init_dma_rx_desc_rings()
1399 stmmac_mode_init(priv, rx_q->dma_rx, in init_dma_rx_desc_rings()
1400 rx_q->dma_rx_phy, in init_dma_rx_desc_rings()
1401 priv->dma_rx_size, 0); in init_dma_rx_desc_rings()
1409 while (--i >= 0) in init_dma_rx_desc_rings()
1415 i = priv->dma_rx_size; in init_dma_rx_desc_rings()
1416 queue--; in init_dma_rx_desc_rings()
1423 * init_dma_tx_desc_rings - init the TX descriptor rings
1425 * Description: this function initializes the DMA TX descriptors
1432 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in init_dma_tx_desc_rings()
1437 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in init_dma_tx_desc_rings()
1439 netif_dbg(priv, probe, priv->dev, in init_dma_tx_desc_rings()
1441 (u32)tx_q->dma_tx_phy); in init_dma_tx_desc_rings()
1444 if (priv->mode == STMMAC_CHAIN_MODE) { in init_dma_tx_desc_rings()
1445 if (priv->extend_desc) in init_dma_tx_desc_rings()
1446 stmmac_mode_init(priv, tx_q->dma_etx, in init_dma_tx_desc_rings()
1447 tx_q->dma_tx_phy, in init_dma_tx_desc_rings()
1448 priv->dma_tx_size, 1); in init_dma_tx_desc_rings()
1449 else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) in init_dma_tx_desc_rings()
1450 stmmac_mode_init(priv, tx_q->dma_tx, in init_dma_tx_desc_rings()
1451 tx_q->dma_tx_phy, in init_dma_tx_desc_rings()
1452 priv->dma_tx_size, 0); in init_dma_tx_desc_rings()
1455 for (i = 0; i < priv->dma_tx_size; i++) { in init_dma_tx_desc_rings()
1457 if (priv->extend_desc) in init_dma_tx_desc_rings()
1458 p = &((tx_q->dma_etx + i)->basic); in init_dma_tx_desc_rings()
1459 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in init_dma_tx_desc_rings()
1460 p = &((tx_q->dma_entx + i)->basic); in init_dma_tx_desc_rings()
1462 p = tx_q->dma_tx + i; in init_dma_tx_desc_rings()
1466 tx_q->tx_skbuff_dma[i].buf = 0; in init_dma_tx_desc_rings()
1467 tx_q->tx_skbuff_dma[i].map_as_page = false; in init_dma_tx_desc_rings()
1468 tx_q->tx_skbuff_dma[i].len = 0; in init_dma_tx_desc_rings()
1469 tx_q->tx_skbuff_dma[i].last_segment = false; in init_dma_tx_desc_rings()
1470 tx_q->tx_skbuff[i] = NULL; in init_dma_tx_desc_rings()
1473 tx_q->dirty_tx = 0; in init_dma_tx_desc_rings()
1474 tx_q->cur_tx = 0; in init_dma_tx_desc_rings()
1475 tx_q->mss = 0; in init_dma_tx_desc_rings()
1477 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, queue)); in init_dma_tx_desc_rings()
1484 * init_dma_desc_rings - init the RX/TX descriptor rings
1487 * Description: this function initializes the DMA RX/TX descriptors
1511 * dma_free_rx_skbufs - free RX dma buffers
1519 for (i = 0; i < priv->dma_rx_size; i++) in dma_free_rx_skbufs()
1524 * dma_free_tx_skbufs - free TX dma buffers
1526 * @queue: TX queue index
1532 for (i = 0; i < priv->dma_tx_size; i++) in dma_free_tx_skbufs()
1537 * stmmac_free_tx_skbufs - free TX skb buffers
1542 u32 tx_queue_cnt = priv->plat->tx_queues_to_use; in stmmac_free_tx_skbufs()
1550 * free_dma_rx_desc_resources - free RX dma desc resources
1555 u32 rx_count = priv->plat->rx_queues_to_use; in free_dma_rx_desc_resources()
1560 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in free_dma_rx_desc_resources()
1566 if (!priv->extend_desc) in free_dma_rx_desc_resources()
1567 dma_free_coherent(priv->device, priv->dma_rx_size * in free_dma_rx_desc_resources()
1569 rx_q->dma_rx, rx_q->dma_rx_phy); in free_dma_rx_desc_resources()
1571 dma_free_coherent(priv->device, priv->dma_rx_size * in free_dma_rx_desc_resources()
1573 rx_q->dma_erx, rx_q->dma_rx_phy); in free_dma_rx_desc_resources()
1575 kfree(rx_q->buf_pool); in free_dma_rx_desc_resources()
1576 if (rx_q->page_pool) in free_dma_rx_desc_resources()
1577 page_pool_destroy(rx_q->page_pool); in free_dma_rx_desc_resources()
1582 * free_dma_tx_desc_resources - free TX dma desc resources
1587 u32 tx_count = priv->plat->tx_queues_to_use; in free_dma_tx_desc_resources()
1590 /* Free TX queue resources */ in free_dma_tx_desc_resources()
1592 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in free_dma_tx_desc_resources()
1596 /* Release the DMA TX socket buffers */ in free_dma_tx_desc_resources()
1599 if (priv->extend_desc) { in free_dma_tx_desc_resources()
1601 addr = tx_q->dma_etx; in free_dma_tx_desc_resources()
1602 } else if (tx_q->tbs & STMMAC_TBS_AVAIL) { in free_dma_tx_desc_resources()
1604 addr = tx_q->dma_entx; in free_dma_tx_desc_resources()
1607 addr = tx_q->dma_tx; in free_dma_tx_desc_resources()
1610 size *= priv->dma_tx_size; in free_dma_tx_desc_resources()
1612 dma_free_coherent(priv->device, size, addr, tx_q->dma_tx_phy); in free_dma_tx_desc_resources()
1614 kfree(tx_q->tx_skbuff_dma); in free_dma_tx_desc_resources()
1615 kfree(tx_q->tx_skbuff); in free_dma_tx_desc_resources()
1620 * alloc_dma_rx_desc_resources - alloc RX resources.
1623 * this function allocates the resources for TX and RX paths. In case of
1624 * reception, for example, it pre-allocated the RX socket buffer in order to
1625 * allow zero-copy mechanism.
1629 u32 rx_count = priv->plat->rx_queues_to_use; in alloc_dma_rx_desc_resources()
1630 int ret = -ENOMEM; in alloc_dma_rx_desc_resources()
1635 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in alloc_dma_rx_desc_resources()
1639 rx_q->queue_index = queue; in alloc_dma_rx_desc_resources()
1640 rx_q->priv_data = priv; in alloc_dma_rx_desc_resources()
1643 pp_params.pool_size = priv->dma_rx_size; in alloc_dma_rx_desc_resources()
1644 num_pages = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE); in alloc_dma_rx_desc_resources()
1646 pp_params.nid = dev_to_node(priv->device); in alloc_dma_rx_desc_resources()
1647 pp_params.dev = priv->device; in alloc_dma_rx_desc_resources()
1650 rx_q->page_pool = page_pool_create(&pp_params); in alloc_dma_rx_desc_resources()
1651 if (IS_ERR(rx_q->page_pool)) { in alloc_dma_rx_desc_resources()
1652 ret = PTR_ERR(rx_q->page_pool); in alloc_dma_rx_desc_resources()
1653 rx_q->page_pool = NULL; in alloc_dma_rx_desc_resources()
1657 rx_q->buf_pool = kcalloc(priv->dma_rx_size, in alloc_dma_rx_desc_resources()
1658 sizeof(*rx_q->buf_pool), in alloc_dma_rx_desc_resources()
1660 if (!rx_q->buf_pool) in alloc_dma_rx_desc_resources()
1663 if (priv->extend_desc) { in alloc_dma_rx_desc_resources()
1664 rx_q->dma_erx = dma_alloc_coherent(priv->device, in alloc_dma_rx_desc_resources()
1665 priv->dma_rx_size * in alloc_dma_rx_desc_resources()
1667 &rx_q->dma_rx_phy, in alloc_dma_rx_desc_resources()
1669 if (!rx_q->dma_erx) in alloc_dma_rx_desc_resources()
1673 rx_q->dma_rx = dma_alloc_coherent(priv->device, in alloc_dma_rx_desc_resources()
1674 priv->dma_rx_size * in alloc_dma_rx_desc_resources()
1676 &rx_q->dma_rx_phy, in alloc_dma_rx_desc_resources()
1678 if (!rx_q->dma_rx) in alloc_dma_rx_desc_resources()
1692 * alloc_dma_tx_desc_resources - alloc TX resources.
1695 * this function allocates the resources for TX and RX paths. In case of
1696 * reception, for example, it pre-allocated the RX socket buffer in order to
1697 * allow zero-copy mechanism.
1701 u32 tx_count = priv->plat->tx_queues_to_use; in alloc_dma_tx_desc_resources()
1702 int ret = -ENOMEM; in alloc_dma_tx_desc_resources()
1705 /* TX queues buffers and DMA */ in alloc_dma_tx_desc_resources()
1707 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in alloc_dma_tx_desc_resources()
1711 tx_q->queue_index = queue; in alloc_dma_tx_desc_resources()
1712 tx_q->priv_data = priv; in alloc_dma_tx_desc_resources()
1714 tx_q->tx_skbuff_dma = kcalloc(priv->dma_tx_size, in alloc_dma_tx_desc_resources()
1715 sizeof(*tx_q->tx_skbuff_dma), in alloc_dma_tx_desc_resources()
1717 if (!tx_q->tx_skbuff_dma) in alloc_dma_tx_desc_resources()
1720 tx_q->tx_skbuff = kcalloc(priv->dma_tx_size, in alloc_dma_tx_desc_resources()
1723 if (!tx_q->tx_skbuff) in alloc_dma_tx_desc_resources()
1726 if (priv->extend_desc) in alloc_dma_tx_desc_resources()
1728 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in alloc_dma_tx_desc_resources()
1733 size *= priv->dma_tx_size; in alloc_dma_tx_desc_resources()
1735 addr = dma_alloc_coherent(priv->device, size, in alloc_dma_tx_desc_resources()
1736 &tx_q->dma_tx_phy, GFP_KERNEL); in alloc_dma_tx_desc_resources()
1740 if (priv->extend_desc) in alloc_dma_tx_desc_resources()
1741 tx_q->dma_etx = addr; in alloc_dma_tx_desc_resources()
1742 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in alloc_dma_tx_desc_resources()
1743 tx_q->dma_entx = addr; in alloc_dma_tx_desc_resources()
1745 tx_q->dma_tx = addr; in alloc_dma_tx_desc_resources()
1756 * alloc_dma_desc_resources - alloc TX/RX resources.
1759 * this function allocates the resources for TX and RX paths. In case of
1760 * reception, for example, it pre-allocated the RX socket buffer in order to
1761 * allow zero-copy mechanism.
1777 * free_dma_desc_resources - free dma desc resources
1785 /* Release the DMA TX socket buffers */ in free_dma_desc_resources()
1790 * stmmac_mac_enable_rx_queues - Enable MAC rx queues
1796 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_enable_rx_queues()
1801 mode = priv->plat->rx_queues_cfg[queue].mode_to_use; in stmmac_mac_enable_rx_queues()
1802 stmmac_rx_queue_enable(priv, priv->hw, mode, queue); in stmmac_mac_enable_rx_queues()
1807 * stmmac_start_rx_dma - start RX DMA channel
1815 netdev_dbg(priv->dev, "DMA RX processes started in channel %d\n", chan); in stmmac_start_rx_dma()
1816 stmmac_start_rx(priv, priv->ioaddr, chan); in stmmac_start_rx_dma()
1820 * stmmac_start_tx_dma - start TX DMA channel
1822 * @chan: TX channel index
1824 * This starts a TX DMA channel
1828 netdev_dbg(priv->dev, "DMA TX processes started in channel %d\n", chan); in stmmac_start_tx_dma()
1829 stmmac_start_tx(priv, priv->ioaddr, chan); in stmmac_start_tx_dma()
1833 * stmmac_stop_rx_dma - stop RX DMA channel
1841 netdev_dbg(priv->dev, "DMA RX processes stopped in channel %d\n", chan); in stmmac_stop_rx_dma()
1842 stmmac_stop_rx(priv, priv->ioaddr, chan); in stmmac_stop_rx_dma()
1846 * stmmac_stop_tx_dma - stop TX DMA channel
1848 * @chan: TX channel index
1850 * This stops a TX DMA channel
1854 netdev_dbg(priv->dev, "DMA TX processes stopped in channel %d\n", chan); in stmmac_stop_tx_dma()
1855 stmmac_stop_tx(priv, priv->ioaddr, chan); in stmmac_stop_tx_dma()
1859 * stmmac_start_all_dma - start all RX and TX DMA channels
1862 * This starts all the RX and TX DMA channels
1866 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_start_all_dma()
1867 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_start_all_dma()
1878 * stmmac_stop_all_dma - stop all RX and TX DMA channels
1881 * This stops the RX and TX DMA channels
1885 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_stop_all_dma()
1886 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_stop_all_dma()
1897 * stmmac_dma_operation_mode - HW DMA operation mode
1900 * order to program the tx/rx DMA thresholds or Store-And-Forward mode.
1904 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_dma_operation_mode()
1905 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_dma_operation_mode()
1906 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_dma_operation_mode()
1907 int txfifosz = priv->plat->tx_fifo_size; in stmmac_dma_operation_mode()
1914 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_dma_operation_mode()
1916 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_dma_operation_mode()
1922 if (priv->plat->force_thresh_dma_mode) { in stmmac_dma_operation_mode()
1925 } else if (priv->plat->force_sf_dma_mode || priv->plat->tx_coe) { in stmmac_dma_operation_mode()
1928 * to perform the TX COE in HW. This depends on: in stmmac_dma_operation_mode()
1929 * 1) TX COE if actually supported in stmmac_dma_operation_mode()
1935 priv->xstats.threshold = SF_DMA_MODE; in stmmac_dma_operation_mode()
1943 qmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
1945 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, in stmmac_dma_operation_mode()
1947 stmmac_set_dma_bfsize(priv, priv->ioaddr, priv->dma_buf_sz, in stmmac_dma_operation_mode()
1952 qmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_dma_operation_mode()
1954 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, in stmmac_dma_operation_mode()
1960 * stmmac_tx_clean - to manage the transmission completion
1963 * @queue: TX queue index
1968 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_tx_clean()
1972 __netif_tx_lock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
1974 priv->xstats.tx_clean++; in stmmac_tx_clean()
1976 entry = tx_q->dirty_tx; in stmmac_tx_clean()
1977 while ((entry != tx_q->cur_tx) && (count < budget)) { in stmmac_tx_clean()
1978 struct sk_buff *skb = tx_q->tx_skbuff[entry]; in stmmac_tx_clean()
1982 if (priv->extend_desc) in stmmac_tx_clean()
1983 p = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_tx_clean()
1984 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tx_clean()
1985 p = &tx_q->dma_entx[entry].basic; in stmmac_tx_clean()
1987 p = tx_q->dma_tx + entry; in stmmac_tx_clean()
1989 status = stmmac_tx_status(priv, &priv->dev->stats, in stmmac_tx_clean()
1990 &priv->xstats, p, priv->ioaddr); in stmmac_tx_clean()
2006 priv->dev->stats.tx_errors++; in stmmac_tx_clean()
2008 priv->dev->stats.tx_packets++; in stmmac_tx_clean()
2009 priv->xstats.tx_pkt_n++; in stmmac_tx_clean()
2014 if (likely(tx_q->tx_skbuff_dma[entry].buf)) { in stmmac_tx_clean()
2015 if (tx_q->tx_skbuff_dma[entry].map_as_page) in stmmac_tx_clean()
2016 dma_unmap_page(priv->device, in stmmac_tx_clean()
2017 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2018 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2021 dma_unmap_single(priv->device, in stmmac_tx_clean()
2022 tx_q->tx_skbuff_dma[entry].buf, in stmmac_tx_clean()
2023 tx_q->tx_skbuff_dma[entry].len, in stmmac_tx_clean()
2025 tx_q->tx_skbuff_dma[entry].buf = 0; in stmmac_tx_clean()
2026 tx_q->tx_skbuff_dma[entry].len = 0; in stmmac_tx_clean()
2027 tx_q->tx_skbuff_dma[entry].map_as_page = false; in stmmac_tx_clean()
2032 tx_q->tx_skbuff_dma[entry].last_segment = false; in stmmac_tx_clean()
2033 tx_q->tx_skbuff_dma[entry].is_jumbo = false; in stmmac_tx_clean()
2037 bytes_compl += skb->len; in stmmac_tx_clean()
2039 tx_q->tx_skbuff[entry] = NULL; in stmmac_tx_clean()
2042 stmmac_release_tx_desc(priv, p, priv->mode); in stmmac_tx_clean()
2044 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); in stmmac_tx_clean()
2046 tx_q->dirty_tx = entry; in stmmac_tx_clean()
2048 netdev_tx_completed_queue(netdev_get_tx_queue(priv->dev, queue), in stmmac_tx_clean()
2051 if (unlikely(netif_tx_queue_stopped(netdev_get_tx_queue(priv->dev, in stmmac_tx_clean()
2055 netif_dbg(priv, tx_done, priv->dev, in stmmac_tx_clean()
2057 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2060 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { in stmmac_tx_clean()
2062 mod_timer(&priv->eee_ctrl_timer, STMMAC_LPI_T(priv->tx_lpi_timer)); in stmmac_tx_clean()
2066 if (tx_q->dirty_tx != tx_q->cur_tx) in stmmac_tx_clean()
2067 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); in stmmac_tx_clean()
2069 __netif_tx_unlock_bh(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tx_clean()
2075 * stmmac_tx_err - to manage the tx error
2083 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; in stmmac_tx_err()
2085 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2090 tx_q->dirty_tx = 0; in stmmac_tx_err()
2091 tx_q->cur_tx = 0; in stmmac_tx_err()
2092 tx_q->mss = 0; in stmmac_tx_err()
2093 netdev_tx_reset_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2094 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_tx_err()
2095 tx_q->dma_tx_phy, chan); in stmmac_tx_err()
2098 priv->dev->stats.tx_errors++; in stmmac_tx_err()
2099 netif_tx_wake_queue(netdev_get_tx_queue(priv->dev, chan)); in stmmac_tx_err()
2103 * stmmac_set_dma_operation_mode - Set DMA operation mode by channel
2105 * @txmode: TX operating mode
2109 * runtime in order to program the tx/rx DMA thresholds or Store-And-Forward
2115 u8 rxqmode = priv->plat->rx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2116 u8 txqmode = priv->plat->tx_queues_cfg[chan].mode_to_use; in stmmac_set_dma_operation_mode()
2117 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_dma_operation_mode()
2118 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_dma_operation_mode()
2119 int rxfifosz = priv->plat->rx_fifo_size; in stmmac_set_dma_operation_mode()
2120 int txfifosz = priv->plat->tx_fifo_size; in stmmac_set_dma_operation_mode()
2123 rxfifosz = priv->dma_cap.rx_fifo_size; in stmmac_set_dma_operation_mode()
2125 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_set_dma_operation_mode()
2131 stmmac_dma_rx_mode(priv, priv->ioaddr, rxmode, chan, rxfifosz, rxqmode); in stmmac_set_dma_operation_mode()
2132 stmmac_dma_tx_mode(priv, priv->ioaddr, txmode, chan, txfifosz, txqmode); in stmmac_set_dma_operation_mode()
2139 ret = stmmac_safety_feat_irq_status(priv, priv->dev, in stmmac_safety_feat_interrupt()
2140 priv->ioaddr, priv->dma_cap.asp, &priv->sstats); in stmmac_safety_feat_interrupt()
2141 if (ret && (ret != -EINVAL)) { in stmmac_safety_feat_interrupt()
2151 int status = stmmac_dma_interrupt_status(priv, priv->ioaddr, in stmmac_napi_check()
2152 &priv->xstats, chan); in stmmac_napi_check()
2153 struct stmmac_channel *ch = &priv->channel[chan]; in stmmac_napi_check()
2156 if ((status & handle_rx) && (chan < priv->plat->rx_queues_to_use)) { in stmmac_napi_check()
2157 if (napi_schedule_prep(&ch->rx_napi)) { in stmmac_napi_check()
2158 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2159 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_check()
2160 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2161 __napi_schedule_irqoff(&ch->rx_napi); in stmmac_napi_check()
2165 if ((status & handle_tx) && (chan < priv->plat->tx_queues_to_use)) { in stmmac_napi_check()
2166 if (napi_schedule_prep(&ch->tx_napi)) { in stmmac_napi_check()
2167 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_check()
2168 stmmac_disable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_check()
2169 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_check()
2170 __napi_schedule_irqoff(&ch->tx_napi); in stmmac_napi_check()
2178 * stmmac_dma_interrupt - DMA ISR
2186 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_dma_interrupt()
2187 u32 rx_channel_count = priv->plat->rx_queues_to_use; in stmmac_dma_interrupt()
2203 if (unlikely(priv->xstats.threshold != SF_DMA_MODE) && in stmmac_dma_interrupt()
2206 if (priv->plat->force_thresh_dma_mode) in stmmac_dma_interrupt()
2216 priv->xstats.threshold = tc; in stmmac_dma_interrupt()
2234 stmmac_mmc_intr_all_mask(priv, priv->mmcaddr); in stmmac_mmc_setup()
2236 if (priv->dma_cap.rmon) { in stmmac_mmc_setup()
2237 stmmac_mmc_ctrl(priv, priv->mmcaddr, mode); in stmmac_mmc_setup()
2238 memset(&priv->mmc, 0, sizeof(struct stmmac_counters)); in stmmac_mmc_setup()
2240 netdev_info(priv->dev, "No MAC Management Counters available\n"); in stmmac_mmc_setup()
2244 * stmmac_get_hw_features - get MAC capabilities from the HW cap. register.
2250 * platform and necessary for old MAC10/100 and GMAC chips.
2254 return stmmac_get_hw_feature(priv, priv->ioaddr, &priv->dma_cap) == 0; in stmmac_get_hw_features()
2258 * stmmac_check_ether_addr - check if the MAC addr is valid
2266 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in stmmac_check_ether_addr()
2267 stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0); in stmmac_check_ether_addr()
2268 if (!is_valid_ether_addr(priv->dev->dev_addr)) in stmmac_check_ether_addr()
2269 eth_hw_addr_random(priv->dev); in stmmac_check_ether_addr()
2270 dev_info(priv->device, "device MAC address %pM\n", in stmmac_check_ether_addr()
2271 priv->dev->dev_addr); in stmmac_check_ether_addr()
2276 * stmmac_init_dma_engine - DMA init.
2285 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_init_dma_engine()
2286 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_init_dma_engine()
2294 if (!priv->plat->dma_cfg || !priv->plat->dma_cfg->pbl) { in stmmac_init_dma_engine()
2295 dev_err(priv->device, "Invalid DMA configuration\n"); in stmmac_init_dma_engine()
2296 return -EINVAL; in stmmac_init_dma_engine()
2299 if (priv->extend_desc && (priv->mode == STMMAC_RING_MODE)) in stmmac_init_dma_engine()
2302 ret = stmmac_reset(priv, priv->ioaddr); in stmmac_init_dma_engine()
2304 dev_err(priv->device, "Failed to reset the dma\n"); in stmmac_init_dma_engine()
2309 stmmac_dma_init(priv, priv->ioaddr, priv->plat->dma_cfg, atds); in stmmac_init_dma_engine()
2311 if (priv->plat->axi) in stmmac_init_dma_engine()
2312 stmmac_axi(priv, priv->ioaddr, priv->plat->axi); in stmmac_init_dma_engine()
2316 stmmac_init_chan(priv, priv->ioaddr, priv->plat->dma_cfg, chan); in stmmac_init_dma_engine()
2320 rx_q = &priv->rx_queue[chan]; in stmmac_init_dma_engine()
2322 stmmac_init_rx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
2323 rx_q->dma_rx_phy, chan); in stmmac_init_dma_engine()
2325 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_init_dma_engine()
2326 (priv->dma_rx_size * in stmmac_init_dma_engine()
2328 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
2329 rx_q->rx_tail_addr, chan); in stmmac_init_dma_engine()
2332 /* DMA TX Channel Configuration */ in stmmac_init_dma_engine()
2334 tx_q = &priv->tx_queue[chan]; in stmmac_init_dma_engine()
2336 stmmac_init_tx_chan(priv, priv->ioaddr, priv->plat->dma_cfg, in stmmac_init_dma_engine()
2337 tx_q->dma_tx_phy, chan); in stmmac_init_dma_engine()
2339 tx_q->tx_tail_addr = tx_q->dma_tx_phy; in stmmac_init_dma_engine()
2340 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, in stmmac_init_dma_engine()
2341 tx_q->tx_tail_addr, chan); in stmmac_init_dma_engine()
2349 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_tx_timer_arm()
2351 mod_timer(&tx_q->txtimer, STMMAC_COAL_TIMER(priv->tx_coal_timer)); in stmmac_tx_timer_arm()
2355 * stmmac_tx_timer - mitigation sw timer for tx.
2363 struct stmmac_priv *priv = tx_q->priv_data; in stmmac_tx_timer()
2366 ch = &priv->channel[tx_q->queue_index]; in stmmac_tx_timer()
2368 if (likely(napi_schedule_prep(&ch->tx_napi))) { in stmmac_tx_timer()
2371 spin_lock_irqsave(&ch->lock, flags); in stmmac_tx_timer()
2372 stmmac_disable_dma_irq(priv, priv->ioaddr, ch->index, 0, 1); in stmmac_tx_timer()
2373 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_tx_timer()
2374 __napi_schedule(&ch->tx_napi); in stmmac_tx_timer()
2379 * stmmac_init_coalesce - init mitigation options.
2388 u32 tx_channel_count = priv->plat->tx_queues_to_use; in stmmac_init_coalesce()
2391 priv->tx_coal_frames = STMMAC_TX_FRAMES; in stmmac_init_coalesce()
2392 priv->tx_coal_timer = STMMAC_COAL_TX_TIMER; in stmmac_init_coalesce()
2393 priv->rx_coal_frames = STMMAC_RX_FRAMES; in stmmac_init_coalesce()
2396 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; in stmmac_init_coalesce()
2398 timer_setup(&tx_q->txtimer, stmmac_tx_timer, 0); in stmmac_init_coalesce()
2404 u32 rx_channels_count = priv->plat->rx_queues_to_use; in stmmac_set_rings_length()
2405 u32 tx_channels_count = priv->plat->tx_queues_to_use; in stmmac_set_rings_length()
2408 /* set TX ring length */ in stmmac_set_rings_length()
2410 stmmac_set_tx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
2411 (priv->dma_tx_size - 1), chan); in stmmac_set_rings_length()
2415 stmmac_set_rx_ring_len(priv, priv->ioaddr, in stmmac_set_rings_length()
2416 (priv->dma_rx_size - 1), chan); in stmmac_set_rings_length()
2420 * stmmac_set_tx_queue_weight - Set TX queue weight
2422 * Description: It is used for setting TX queues weight
2426 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_set_tx_queue_weight()
2431 weight = priv->plat->tx_queues_cfg[queue].weight; in stmmac_set_tx_queue_weight()
2432 stmmac_set_mtl_tx_queue_weight(priv, priv->hw, weight, queue); in stmmac_set_tx_queue_weight()
2437 * stmmac_configure_cbs - Configure CBS in TX queue
2439 * Description: It is used for configuring CBS in AVB TX queues
2443 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_configure_cbs()
2449 mode_to_use = priv->plat->tx_queues_cfg[queue].mode_to_use; in stmmac_configure_cbs()
2453 stmmac_config_cbs(priv, priv->hw, in stmmac_configure_cbs()
2454 priv->plat->tx_queues_cfg[queue].send_slope, in stmmac_configure_cbs()
2455 priv->plat->tx_queues_cfg[queue].idle_slope, in stmmac_configure_cbs()
2456 priv->plat->tx_queues_cfg[queue].high_credit, in stmmac_configure_cbs()
2457 priv->plat->tx_queues_cfg[queue].low_credit, in stmmac_configure_cbs()
2463 * stmmac_rx_queue_dma_chan_map - Map RX queue to RX dma channel
2469 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_rx_queue_dma_chan_map()
2474 chan = priv->plat->rx_queues_cfg[queue].chan; in stmmac_rx_queue_dma_chan_map()
2475 stmmac_map_mtl_to_dma(priv, priv->hw, queue, chan); in stmmac_rx_queue_dma_chan_map()
2480 * stmmac_mac_config_rx_queues_prio - Configure RX Queue priority
2486 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_prio()
2491 if (!priv->plat->rx_queues_cfg[queue].use_prio) in stmmac_mac_config_rx_queues_prio()
2494 prio = priv->plat->rx_queues_cfg[queue].prio; in stmmac_mac_config_rx_queues_prio()
2495 stmmac_rx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_rx_queues_prio()
2500 * stmmac_mac_config_tx_queues_prio - Configure TX Queue priority
2502 * Description: It is used for configuring the TX Queue Priority
2506 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mac_config_tx_queues_prio()
2511 if (!priv->plat->tx_queues_cfg[queue].use_prio) in stmmac_mac_config_tx_queues_prio()
2514 prio = priv->plat->tx_queues_cfg[queue].prio; in stmmac_mac_config_tx_queues_prio()
2515 stmmac_tx_queue_prio(priv, priv->hw, prio, queue); in stmmac_mac_config_tx_queues_prio()
2520 * stmmac_mac_config_rx_queues_routing - Configure RX Queue Routing
2526 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mac_config_rx_queues_routing()
2532 if (priv->plat->rx_queues_cfg[queue].pkt_route == 0x0) in stmmac_mac_config_rx_queues_routing()
2535 packet = priv->plat->rx_queues_cfg[queue].pkt_route; in stmmac_mac_config_rx_queues_routing()
2536 stmmac_rx_queue_routing(priv, priv->hw, packet, queue); in stmmac_mac_config_rx_queues_routing()
2542 if (!priv->dma_cap.rssen || !priv->plat->rss_en) { in stmmac_mac_config_rss()
2543 priv->rss.enable = false; in stmmac_mac_config_rss()
2547 if (priv->dev->features & NETIF_F_RXHASH) in stmmac_mac_config_rss()
2548 priv->rss.enable = true; in stmmac_mac_config_rss()
2550 priv->rss.enable = false; in stmmac_mac_config_rss()
2552 stmmac_rss_configure(priv, priv->hw, &priv->rss, in stmmac_mac_config_rss()
2553 priv->plat->rx_queues_to_use); in stmmac_mac_config_rss()
2557 * stmmac_mtl_configuration - Configure MTL
2563 u32 rx_queues_count = priv->plat->rx_queues_to_use; in stmmac_mtl_configuration()
2564 u32 tx_queues_count = priv->plat->tx_queues_to_use; in stmmac_mtl_configuration()
2571 stmmac_prog_mtl_rx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
2572 priv->plat->rx_sched_algorithm); in stmmac_mtl_configuration()
2574 /* Configure MTL TX algorithms */ in stmmac_mtl_configuration()
2576 stmmac_prog_mtl_tx_algorithms(priv, priv->hw, in stmmac_mtl_configuration()
2577 priv->plat->tx_sched_algorithm); in stmmac_mtl_configuration()
2579 /* Configure CBS in AVB TX queues */ in stmmac_mtl_configuration()
2593 /* Set TX priorities */ in stmmac_mtl_configuration()
2608 if (priv->dma_cap.asp) { in stmmac_safety_feat_configuration()
2609 netdev_info(priv->dev, "Enabling Safety Features\n"); in stmmac_safety_feat_configuration()
2610 stmmac_safety_feat_config(priv, priv->ioaddr, priv->dma_cap.asp); in stmmac_safety_feat_configuration()
2612 netdev_info(priv->dev, "No Safety Features support found\n"); in stmmac_safety_feat_configuration()
2617 * stmmac_hw_setup - setup mac in a usable state.
2626 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2632 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_hw_setup()
2633 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_hw_setup()
2640 netdev_err(priv->dev, "%s: DMA engine initialization failed\n", in stmmac_hw_setup()
2646 stmmac_set_umac_addr(priv, priv->hw, dev->dev_addr, 0); in stmmac_hw_setup()
2649 if (priv->hw->pcs) { in stmmac_hw_setup()
2650 int speed = priv->plat->mac_port_sel_speed; in stmmac_hw_setup()
2654 priv->hw->ps = speed; in stmmac_hw_setup()
2656 dev_warn(priv->device, "invalid port speed\n"); in stmmac_hw_setup()
2657 priv->hw->ps = 0; in stmmac_hw_setup()
2662 stmmac_core_init(priv, priv->hw, dev); in stmmac_hw_setup()
2670 ret = stmmac_rx_ipc(priv, priv->hw); in stmmac_hw_setup()
2672 netdev_warn(priv->dev, "RX IPC Checksum Offload disabled\n"); in stmmac_hw_setup()
2673 priv->plat->rx_coe = STMMAC_RX_COE_NONE; in stmmac_hw_setup()
2674 priv->hw->rx_csum = 0; in stmmac_hw_setup()
2677 /* Enable the MAC Rx/Tx */ in stmmac_hw_setup()
2678 stmmac_mac_set(priv, priv->ioaddr, true); in stmmac_hw_setup()
2686 ret = clk_prepare_enable(priv->plat->clk_ptp_ref); in stmmac_hw_setup()
2688 netdev_warn(priv->dev, "failed to enable PTP reference clock: %d\n", ret); in stmmac_hw_setup()
2691 if (ret == -EOPNOTSUPP) in stmmac_hw_setup()
2692 netdev_warn(priv->dev, "PTP not supported by HW\n"); in stmmac_hw_setup()
2694 netdev_warn(priv->dev, "PTP init failed\n"); in stmmac_hw_setup()
2697 priv->eee_tw_timer = STMMAC_DEFAULT_TWT_LS; in stmmac_hw_setup()
2700 if (!priv->tx_lpi_timer) in stmmac_hw_setup()
2701 priv->tx_lpi_timer = eee_timer * 1000; in stmmac_hw_setup()
2703 if (priv->use_riwt) { in stmmac_hw_setup()
2704 if (!priv->rx_riwt) in stmmac_hw_setup()
2705 priv->rx_riwt = DEF_DMA_RIWT; in stmmac_hw_setup()
2707 ret = stmmac_rx_watchdog(priv, priv->ioaddr, priv->rx_riwt, rx_cnt); in stmmac_hw_setup()
2710 if (priv->hw->pcs) in stmmac_hw_setup()
2711 stmmac_pcs_ctrl_ane(priv, priv->ioaddr, 1, priv->hw->ps, 0); in stmmac_hw_setup()
2713 /* set TX and RX rings length */ in stmmac_hw_setup()
2717 if (priv->tso) { in stmmac_hw_setup()
2719 stmmac_enable_tso(priv, priv->ioaddr, 1, chan); in stmmac_hw_setup()
2723 if (priv->sph && priv->hw->rx_csum) { in stmmac_hw_setup()
2725 stmmac_enable_sph(priv, priv->ioaddr, 1, chan); in stmmac_hw_setup()
2729 if (priv->dma_cap.vlins) in stmmac_hw_setup()
2730 stmmac_enable_vlan(priv, priv->hw, STMMAC_VLAN_INSERT); in stmmac_hw_setup()
2734 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; in stmmac_hw_setup()
2735 int enable = tx_q->tbs & STMMAC_TBS_AVAIL; in stmmac_hw_setup()
2737 stmmac_enable_tbs(priv, priv->ioaddr, enable, chan); in stmmac_hw_setup()
2740 /* Configure real RX and TX queues */ in stmmac_hw_setup()
2741 netif_set_real_num_rx_queues(dev, priv->plat->rx_queues_to_use); in stmmac_hw_setup()
2742 netif_set_real_num_tx_queues(dev, priv->plat->tx_queues_to_use); in stmmac_hw_setup()
2754 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_hw_teardown()
2758 * stmmac_open - open entry point of the driver
2763 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2773 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_open()
2774 priv->hw->pcs != STMMAC_PCS_RTBI && in stmmac_open()
2775 priv->hw->xpcs == NULL) { in stmmac_open()
2778 netdev_err(priv->dev, in stmmac_open()
2786 memset(&priv->xstats, 0, sizeof(struct stmmac_extra_stats)); in stmmac_open()
2787 priv->xstats.threshold = tc; in stmmac_open()
2789 bfsize = stmmac_set_16kib_bfsize(priv, dev->mtu); in stmmac_open()
2794 bfsize = stmmac_set_bfsize(dev->mtu, priv->dma_buf_sz); in stmmac_open()
2796 priv->dma_buf_sz = bfsize; in stmmac_open()
2799 priv->rx_copybreak = STMMAC_RX_COPYBREAK; in stmmac_open()
2801 if (!priv->dma_tx_size) in stmmac_open()
2802 priv->dma_tx_size = DMA_DEFAULT_TX_SIZE; in stmmac_open()
2803 if (!priv->dma_rx_size) in stmmac_open()
2804 priv->dma_rx_size = DMA_DEFAULT_RX_SIZE; in stmmac_open()
2807 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) { in stmmac_open()
2808 struct stmmac_tx_queue *tx_q = &priv->tx_queue[chan]; in stmmac_open()
2809 int tbs_en = priv->plat->tx_queues_cfg[chan].tbs_en; in stmmac_open()
2811 tx_q->tbs |= tbs_en ? STMMAC_TBS_AVAIL : 0; in stmmac_open()
2812 if (stmmac_enable_tbs(priv, priv->ioaddr, tbs_en, chan)) in stmmac_open()
2813 tx_q->tbs &= ~STMMAC_TBS_AVAIL; in stmmac_open()
2818 netdev_err(priv->dev, "%s: DMA descriptors allocation failed\n", in stmmac_open()
2825 netdev_err(priv->dev, "%s: DMA descriptors initialization failed\n", in stmmac_open()
2832 netdev_err(priv->dev, "%s: Hw setup failed\n", __func__); in stmmac_open()
2838 phylink_start(priv->phylink); in stmmac_open()
2840 phylink_speed_up(priv->phylink); in stmmac_open()
2843 ret = request_irq(dev->irq, stmmac_interrupt, in stmmac_open()
2844 IRQF_SHARED, dev->name, dev); in stmmac_open()
2846 netdev_err(priv->dev, in stmmac_open()
2848 __func__, dev->irq, ret); in stmmac_open()
2853 if (priv->wol_irq != dev->irq) { in stmmac_open()
2854 ret = request_irq(priv->wol_irq, stmmac_interrupt, in stmmac_open()
2855 IRQF_SHARED, dev->name, dev); in stmmac_open()
2857 netdev_err(priv->dev, in stmmac_open()
2859 __func__, priv->wol_irq, ret); in stmmac_open()
2865 if (priv->lpi_irq > 0) { in stmmac_open()
2866 ret = request_irq(priv->lpi_irq, stmmac_interrupt, IRQF_SHARED, in stmmac_open()
2867 dev->name, dev); in stmmac_open()
2869 netdev_err(priv->dev, in stmmac_open()
2871 __func__, priv->lpi_irq, ret); in stmmac_open()
2877 netif_tx_start_all_queues(priv->dev); in stmmac_open()
2882 if (priv->wol_irq != dev->irq) in stmmac_open()
2883 free_irq(priv->wol_irq, dev); in stmmac_open()
2885 free_irq(dev->irq, dev); in stmmac_open()
2887 phylink_stop(priv->phylink); in stmmac_open()
2889 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_open()
2890 del_timer_sync(&priv->tx_queue[chan].txtimer); in stmmac_open()
2896 phylink_disconnect_phy(priv->phylink); in stmmac_open()
2901 * stmmac_release - close entry point of the driver
2911 if (device_may_wakeup(priv->device)) in stmmac_release()
2912 phylink_speed_down(priv->phylink, false); in stmmac_release()
2914 phylink_stop(priv->phylink); in stmmac_release()
2915 phylink_disconnect_phy(priv->phylink); in stmmac_release()
2919 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_release()
2920 del_timer_sync(&priv->tx_queue[chan].txtimer); in stmmac_release()
2923 free_irq(dev->irq, dev); in stmmac_release()
2924 if (priv->wol_irq != dev->irq) in stmmac_release()
2925 free_irq(priv->wol_irq, dev); in stmmac_release()
2926 if (priv->lpi_irq > 0) in stmmac_release()
2927 free_irq(priv->lpi_irq, dev); in stmmac_release()
2929 if (priv->eee_enabled) { in stmmac_release()
2930 priv->tx_path_in_lpi_mode = false; in stmmac_release()
2931 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_release()
2934 /* Stop TX/RX DMA and clear the descriptors */ in stmmac_release()
2937 /* Release and free the Rx/Tx resources */ in stmmac_release()
2940 /* Disable the MAC Rx/Tx */ in stmmac_release()
2941 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_release()
2957 if (!priv->dma_cap.vlins) in stmmac_vlan_insert()
2961 if (skb->vlan_proto == htons(ETH_P_8021AD)) { in stmmac_vlan_insert()
2968 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_vlan_insert()
2969 p = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_vlan_insert()
2971 p = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_vlan_insert()
2977 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); in stmmac_vlan_insert()
2982 * stmmac_tso_allocator - close entry point of the driver
2987 * @queue: TX queue index
2995 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_tso_allocator()
3005 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_allocator()
3006 priv->dma_tx_size); in stmmac_tso_allocator()
3007 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_allocator()
3009 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_allocator()
3010 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_allocator()
3012 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_allocator()
3014 curr_addr = des + (total_len - tmp_len); in stmmac_tso_allocator()
3015 if (priv->dma_cap.addr64 <= 32) in stmmac_tso_allocator()
3016 desc->des0 = cpu_to_le32(curr_addr); in stmmac_tso_allocator()
3028 tmp_len -= TSO_MAX_BUFF_SIZE; in stmmac_tso_allocator()
3033 * stmmac_tso_xmit - Tx entry point of the driver for oversized frames (TSO)
3041 * --------
3042 * | DES0 |---> buffer1 = L2/L3/L4 header
3043 * | DES1 |---> TCP Payload (can continue on next descr...)
3044 * | DES2 |---> buffer 1 and 2 len
3045 * | DES3 |---> must set TSE, TCP hdr len-> [22:19]. TCP payload len [17:0]
3046 * --------
3050 * --------
3051 * | DES0 | --| Split TCP Payload on Buffers 1 and 2
3052 * | DES1 | --|
3053 * | DES2 | --> buffer 1 and 2 len
3055 * --------
3064 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_tso_xmit()
3074 tx_q = &priv->tx_queue[queue]; in stmmac_tso_xmit()
3075 first_tx = tx_q->cur_tx; in stmmac_tso_xmit()
3078 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4) { in stmmac_tso_xmit()
3088 (((skb->len - proto_hdr_len) / TSO_MAX_BUFF_SIZE + 1)))) { in stmmac_tso_xmit()
3090 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_tso_xmit()
3093 netdev_err(priv->dev, in stmmac_tso_xmit()
3094 "%s: Tx Ring full when queue awake\n", in stmmac_tso_xmit()
3100 pay_len = skb_headlen(skb) - proto_hdr_len; /* no frags */ in stmmac_tso_xmit()
3102 mss = skb_shinfo(skb)->gso_size; in stmmac_tso_xmit()
3105 if (mss != tx_q->mss) { in stmmac_tso_xmit()
3106 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
3107 mss_desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
3109 mss_desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
3112 tx_q->mss = mss; in stmmac_tso_xmit()
3113 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, in stmmac_tso_xmit()
3114 priv->dma_tx_size); in stmmac_tso_xmit()
3115 WARN_ON(tx_q->tx_skbuff[tx_q->cur_tx]); in stmmac_tso_xmit()
3121 pr_info("\tskb->len %d, skb->data_len %d\n", skb->len, in stmmac_tso_xmit()
3122 skb->data_len); in stmmac_tso_xmit()
3128 first_entry = tx_q->cur_tx; in stmmac_tso_xmit()
3129 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_tso_xmit()
3131 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
3132 desc = &tx_q->dma_entx[first_entry].basic; in stmmac_tso_xmit()
3134 desc = &tx_q->dma_tx[first_entry]; in stmmac_tso_xmit()
3141 des = dma_map_single(priv->device, skb->data, skb_headlen(skb), in stmmac_tso_xmit()
3143 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
3146 tx_q->tx_skbuff_dma[first_entry].buf = des; in stmmac_tso_xmit()
3147 tx_q->tx_skbuff_dma[first_entry].len = skb_headlen(skb); in stmmac_tso_xmit()
3149 if (priv->dma_cap.addr64 <= 32) { in stmmac_tso_xmit()
3150 first->des0 = cpu_to_le32(des); in stmmac_tso_xmit()
3154 first->des1 = cpu_to_le32(des + proto_hdr_len); in stmmac_tso_xmit()
3157 tmp_pay_len = pay_len - TSO_MAX_BUFF_SIZE; in stmmac_tso_xmit()
3169 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_tso_xmit()
3171 des = skb_frag_dma_map(priv->device, frag, 0, in stmmac_tso_xmit()
3174 if (dma_mapping_error(priv->device, des)) in stmmac_tso_xmit()
3178 (i == nfrags - 1), queue); in stmmac_tso_xmit()
3180 tx_q->tx_skbuff_dma[tx_q->cur_tx].buf = des; in stmmac_tso_xmit()
3181 tx_q->tx_skbuff_dma[tx_q->cur_tx].len = skb_frag_size(frag); in stmmac_tso_xmit()
3182 tx_q->tx_skbuff_dma[tx_q->cur_tx].map_as_page = true; in stmmac_tso_xmit()
3185 tx_q->tx_skbuff_dma[tx_q->cur_tx].last_segment = true; in stmmac_tso_xmit()
3188 tx_q->tx_skbuff[tx_q->cur_tx] = skb; in stmmac_tso_xmit()
3190 /* Manage tx mitigation */ in stmmac_tso_xmit()
3191 tx_packets = (tx_q->cur_tx + 1) - first_tx; in stmmac_tso_xmit()
3192 tx_q->tx_count_frames += tx_packets; in stmmac_tso_xmit()
3194 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_tso_xmit()
3196 else if (!priv->tx_coal_frames) in stmmac_tso_xmit()
3198 else if (tx_packets > priv->tx_coal_frames) in stmmac_tso_xmit()
3200 else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) in stmmac_tso_xmit()
3206 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
3207 desc = &tx_q->dma_entx[tx_q->cur_tx].basic; in stmmac_tso_xmit()
3209 desc = &tx_q->dma_tx[tx_q->cur_tx]; in stmmac_tso_xmit()
3211 tx_q->tx_count_frames = 0; in stmmac_tso_xmit()
3213 priv->xstats.tx_set_ic_bit++; in stmmac_tso_xmit()
3221 tx_q->cur_tx = STMMAC_GET_ENTRY(tx_q->cur_tx, priv->dma_tx_size); in stmmac_tso_xmit()
3224 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_tso_xmit()
3226 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_tso_xmit()
3229 dev->stats.tx_bytes += skb->len; in stmmac_tso_xmit()
3230 priv->xstats.tx_tso_frames++; in stmmac_tso_xmit()
3231 priv->xstats.tx_tso_nfrags += nfrags; in stmmac_tso_xmit()
3233 if (priv->sarc_type) in stmmac_tso_xmit()
3234 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_tso_xmit()
3238 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_tso_xmit()
3239 priv->hwts_tx_en)) { in stmmac_tso_xmit()
3241 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_tso_xmit()
3249 1, tx_q->tx_skbuff_dma[first_entry].last_segment, in stmmac_tso_xmit()
3250 hdr / 4, (skb->len - proto_hdr_len)); in stmmac_tso_xmit()
3271 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_tso_xmit()
3272 tx_q->cur_tx, first, nfrags); in stmmac_tso_xmit()
3274 print_pkt(skb->data, skb_headlen(skb)); in stmmac_tso_xmit()
3277 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_tso_xmit()
3279 if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_tso_xmit()
3284 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); in stmmac_tso_xmit()
3285 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); in stmmac_tso_xmit()
3291 dev_err(priv->device, "Tx dma map failed\n"); in stmmac_tso_xmit()
3293 priv->dev->stats.tx_dropped++; in stmmac_tso_xmit()
3298 * stmmac_xmit - Tx entry point of the driver
3301 * Description : this is the tx entry point of the driver.
3312 int nfrags = skb_shinfo(skb)->nr_frags; in stmmac_xmit()
3313 int gso = skb_shinfo(skb)->gso_type; in stmmac_xmit()
3321 tx_q = &priv->tx_queue[queue]; in stmmac_xmit()
3322 first_tx = tx_q->cur_tx; in stmmac_xmit()
3324 if (priv->tx_path_in_lpi_mode) in stmmac_xmit()
3328 if (skb_is_gso(skb) && priv->tso) { in stmmac_xmit()
3331 if (priv->plat->has_gmac4 && (gso & SKB_GSO_UDP_L4)) in stmmac_xmit()
3337 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, in stmmac_xmit()
3340 netdev_err(priv->dev, in stmmac_xmit()
3341 "%s: Tx Ring full when queue awake\n", in stmmac_xmit()
3350 entry = tx_q->cur_tx; in stmmac_xmit()
3352 WARN_ON(tx_q->tx_skbuff[first_entry]); in stmmac_xmit()
3354 csum_insertion = (skb->ip_summed == CHECKSUM_PARTIAL); in stmmac_xmit()
3356 if (likely(priv->extend_desc)) in stmmac_xmit()
3357 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
3358 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
3359 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
3361 desc = tx_q->dma_tx + entry; in stmmac_xmit()
3368 enh_desc = priv->plat->enh_desc; in stmmac_xmit()
3371 is_jumbo = stmmac_is_jumbo_frm(priv, skb->len, enh_desc); in stmmac_xmit()
3375 if (unlikely(entry < 0) && (entry != -EINVAL)) in stmmac_xmit()
3380 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; in stmmac_xmit()
3382 bool last_segment = (i == (nfrags - 1)); in stmmac_xmit()
3384 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); in stmmac_xmit()
3385 WARN_ON(tx_q->tx_skbuff[entry]); in stmmac_xmit()
3387 if (likely(priv->extend_desc)) in stmmac_xmit()
3388 desc = (struct dma_desc *)(tx_q->dma_etx + entry); in stmmac_xmit()
3389 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
3390 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
3392 desc = tx_q->dma_tx + entry; in stmmac_xmit()
3394 des = skb_frag_dma_map(priv->device, frag, 0, len, in stmmac_xmit()
3396 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
3399 tx_q->tx_skbuff_dma[entry].buf = des; in stmmac_xmit()
3403 tx_q->tx_skbuff_dma[entry].map_as_page = true; in stmmac_xmit()
3404 tx_q->tx_skbuff_dma[entry].len = len; in stmmac_xmit()
3405 tx_q->tx_skbuff_dma[entry].last_segment = last_segment; in stmmac_xmit()
3409 priv->mode, 1, last_segment, skb->len); in stmmac_xmit()
3413 tx_q->tx_skbuff[entry] = skb; in stmmac_xmit()
3416 * segment is reset and the timer re-started to clean the tx status. in stmmac_xmit()
3420 tx_packets = (entry + 1) - first_tx; in stmmac_xmit()
3421 tx_q->tx_count_frames += tx_packets; in stmmac_xmit()
3423 if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && priv->hwts_tx_en) in stmmac_xmit()
3425 else if (!priv->tx_coal_frames) in stmmac_xmit()
3427 else if (tx_packets > priv->tx_coal_frames) in stmmac_xmit()
3429 else if ((tx_q->tx_count_frames % priv->tx_coal_frames) < tx_packets) in stmmac_xmit()
3435 if (likely(priv->extend_desc)) in stmmac_xmit()
3436 desc = &tx_q->dma_etx[entry].basic; in stmmac_xmit()
3437 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
3438 desc = &tx_q->dma_entx[entry].basic; in stmmac_xmit()
3440 desc = &tx_q->dma_tx[entry]; in stmmac_xmit()
3442 tx_q->tx_count_frames = 0; in stmmac_xmit()
3444 priv->xstats.tx_set_ic_bit++; in stmmac_xmit()
3452 entry = STMMAC_GET_ENTRY(entry, priv->dma_tx_size); in stmmac_xmit()
3453 tx_q->cur_tx = entry; in stmmac_xmit()
3456 netdev_dbg(priv->dev, in stmmac_xmit()
3458 __func__, tx_q->cur_tx, tx_q->dirty_tx, first_entry, in stmmac_xmit()
3461 netdev_dbg(priv->dev, ">>> frame to be transmitted: "); in stmmac_xmit()
3462 print_pkt(skb->data, skb->len); in stmmac_xmit()
3466 netif_dbg(priv, hw, priv->dev, "%s: stop transmitted packets\n", in stmmac_xmit()
3468 netif_tx_stop_queue(netdev_get_tx_queue(priv->dev, queue)); in stmmac_xmit()
3471 dev->stats.tx_bytes += skb->len; in stmmac_xmit()
3473 if (priv->sarc_type) in stmmac_xmit()
3474 stmmac_set_desc_sarc(priv, first, priv->sarc_type); in stmmac_xmit()
3485 des = dma_map_single(priv->device, skb->data, in stmmac_xmit()
3487 if (dma_mapping_error(priv->device, des)) in stmmac_xmit()
3490 tx_q->tx_skbuff_dma[first_entry].buf = des; in stmmac_xmit()
3494 tx_q->tx_skbuff_dma[first_entry].len = nopaged_len; in stmmac_xmit()
3495 tx_q->tx_skbuff_dma[first_entry].last_segment = last_segment; in stmmac_xmit()
3497 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in stmmac_xmit()
3498 priv->hwts_tx_en)) { in stmmac_xmit()
3500 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in stmmac_xmit()
3506 csum_insertion, priv->mode, 0, last_segment, in stmmac_xmit()
3507 skb->len); in stmmac_xmit()
3510 if (tx_q->tbs & STMMAC_TBS_EN) { in stmmac_xmit()
3511 struct timespec64 ts = ns_to_timespec64(skb->tstamp); in stmmac_xmit()
3513 tbs_desc = &tx_q->dma_entx[first_entry]; in stmmac_xmit()
3525 netdev_tx_sent_queue(netdev_get_tx_queue(dev, queue), skb->len); in stmmac_xmit()
3527 stmmac_enable_dma_transmission(priv, priv->ioaddr); in stmmac_xmit()
3529 if (likely(priv->extend_desc)) in stmmac_xmit()
3531 else if (tx_q->tbs & STMMAC_TBS_AVAIL) in stmmac_xmit()
3536 tx_q->tx_tail_addr = tx_q->dma_tx_phy + (tx_q->cur_tx * desc_size); in stmmac_xmit()
3537 stmmac_set_tx_tail_ptr(priv, priv->ioaddr, tx_q->tx_tail_addr, queue); in stmmac_xmit()
3543 netdev_err(priv->dev, "Tx DMA map failed\n"); in stmmac_xmit()
3545 priv->dev->stats.tx_dropped++; in stmmac_xmit()
3555 veth = (struct vlan_ethhdr *)skb->data; in stmmac_rx_vlan()
3556 vlan_proto = veth->h_vlan_proto; in stmmac_rx_vlan()
3559 dev->features & NETIF_F_HW_VLAN_CTAG_RX) || in stmmac_rx_vlan()
3561 dev->features & NETIF_F_HW_VLAN_STAG_RX)) { in stmmac_rx_vlan()
3563 vlanid = ntohs(veth->h_vlan_TCI); in stmmac_rx_vlan()
3564 memmove(skb->data + VLAN_HLEN, veth, ETH_ALEN * 2); in stmmac_rx_vlan()
3571 * stmmac_rx_refill - refill used skb preallocated buffers
3575 * that is based on zero-copy.
3579 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_rx_refill()
3581 unsigned int entry = rx_q->dirty_rx; in stmmac_rx_refill()
3583 len = DIV_ROUND_UP(priv->dma_buf_sz, PAGE_SIZE) * PAGE_SIZE; in stmmac_rx_refill()
3585 while (dirty-- > 0) { in stmmac_rx_refill()
3586 struct stmmac_rx_buffer *buf = &rx_q->buf_pool[entry]; in stmmac_rx_refill()
3590 if (priv->extend_desc) in stmmac_rx_refill()
3591 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx_refill()
3593 p = rx_q->dma_rx + entry; in stmmac_rx_refill()
3595 if (!buf->page) { in stmmac_rx_refill()
3596 buf->page = page_pool_dev_alloc_pages(rx_q->page_pool); in stmmac_rx_refill()
3597 if (!buf->page) in stmmac_rx_refill()
3601 if (priv->sph && !buf->sec_page) { in stmmac_rx_refill()
3602 buf->sec_page = page_pool_dev_alloc_pages(rx_q->page_pool); in stmmac_rx_refill()
3603 if (!buf->sec_page) in stmmac_rx_refill()
3606 buf->sec_addr = page_pool_get_dma_addr(buf->sec_page); in stmmac_rx_refill()
3608 dma_sync_single_for_device(priv->device, buf->sec_addr, in stmmac_rx_refill()
3612 buf->addr = page_pool_get_dma_addr(buf->page); in stmmac_rx_refill()
3617 dma_sync_single_for_device(priv->device, buf->addr, len, in stmmac_rx_refill()
3620 stmmac_set_desc_addr(priv, p, buf->addr); in stmmac_rx_refill()
3621 stmmac_set_desc_sec_addr(priv, p, buf->sec_addr); in stmmac_rx_refill()
3624 rx_q->rx_count_frames++; in stmmac_rx_refill()
3625 rx_q->rx_count_frames += priv->rx_coal_frames; in stmmac_rx_refill()
3626 if (rx_q->rx_count_frames > priv->rx_coal_frames) in stmmac_rx_refill()
3627 rx_q->rx_count_frames = 0; in stmmac_rx_refill()
3629 use_rx_wd = !priv->rx_coal_frames; in stmmac_rx_refill()
3630 use_rx_wd |= rx_q->rx_count_frames > 0; in stmmac_rx_refill()
3631 if (!priv->use_riwt) in stmmac_rx_refill()
3637 entry = STMMAC_GET_ENTRY(entry, priv->dma_rx_size); in stmmac_rx_refill()
3639 rx_q->dirty_rx = entry; in stmmac_rx_refill()
3640 rx_q->rx_tail_addr = rx_q->dma_rx_phy + in stmmac_rx_refill()
3641 (rx_q->dirty_rx * sizeof(struct dma_desc)); in stmmac_rx_refill()
3642 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, rx_q->rx_tail_addr, queue); in stmmac_rx_refill()
3650 int coe = priv->hw->rx_csum; in stmmac_rx_buf1_len()
3653 if (priv->sph && len) in stmmac_rx_buf1_len()
3658 if (priv->sph && hlen) { in stmmac_rx_buf1_len()
3659 priv->xstats.rx_split_hdr_pkt_n++; in stmmac_rx_buf1_len()
3665 return priv->dma_buf_sz; in stmmac_rx_buf1_len()
3670 return min_t(unsigned int, priv->dma_buf_sz, plen); in stmmac_rx_buf1_len()
3677 int coe = priv->hw->rx_csum; in stmmac_rx_buf2_len()
3681 if (!priv->sph) in stmmac_rx_buf2_len()
3686 return priv->dma_buf_sz; in stmmac_rx_buf2_len()
3691 return plen - len; in stmmac_rx_buf2_len()
3695 * stmmac_rx - manage the receive process
3704 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_rx()
3705 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_rx()
3707 int status = 0, coe = priv->hw->rx_csum; in stmmac_rx()
3708 unsigned int next_entry = rx_q->cur_rx; in stmmac_rx()
3714 netdev_dbg(priv->dev, "%s: descriptor ring:\n", __func__); in stmmac_rx()
3715 if (priv->extend_desc) in stmmac_rx()
3716 rx_head = (void *)rx_q->dma_erx; in stmmac_rx()
3718 rx_head = (void *)rx_q->dma_rx; in stmmac_rx()
3720 stmmac_display_ring(priv, rx_head, priv->dma_rx_size, true); in stmmac_rx()
3730 if (!count && rx_q->state_saved) { in stmmac_rx()
3731 skb = rx_q->state.skb; in stmmac_rx()
3732 error = rx_q->state.error; in stmmac_rx()
3733 len = rx_q->state.len; in stmmac_rx()
3735 rx_q->state_saved = false; in stmmac_rx()
3748 buf = &rx_q->buf_pool[entry]; in stmmac_rx()
3750 if (priv->extend_desc) in stmmac_rx()
3751 p = (struct dma_desc *)(rx_q->dma_erx + entry); in stmmac_rx()
3753 p = rx_q->dma_rx + entry; in stmmac_rx()
3756 status = stmmac_rx_status(priv, &priv->dev->stats, in stmmac_rx()
3757 &priv->xstats, p); in stmmac_rx()
3762 rx_q->cur_rx = STMMAC_GET_ENTRY(rx_q->cur_rx, in stmmac_rx()
3763 priv->dma_rx_size); in stmmac_rx()
3764 next_entry = rx_q->cur_rx; in stmmac_rx()
3766 if (priv->extend_desc) in stmmac_rx()
3767 np = (struct dma_desc *)(rx_q->dma_erx + next_entry); in stmmac_rx()
3769 np = rx_q->dma_rx + next_entry; in stmmac_rx()
3773 if (priv->extend_desc) in stmmac_rx()
3774 stmmac_rx_extended_status(priv, &priv->dev->stats, in stmmac_rx()
3775 &priv->xstats, rx_q->dma_erx + entry); in stmmac_rx()
3777 page_pool_recycle_direct(rx_q->page_pool, buf->page); in stmmac_rx()
3778 buf->page = NULL; in stmmac_rx()
3780 if (!priv->hwts_rx_en) in stmmac_rx()
3781 priv->dev->stats.rx_errors++; in stmmac_rx()
3795 prefetch(page_address(buf->page)); in stmmac_rx()
3796 if (buf->sec_page) in stmmac_rx()
3797 prefetch(page_address(buf->sec_page)); in stmmac_rx()
3805 * Type frames (LLC/LLC-SNAP) in stmmac_rx()
3812 (likely(priv->synopsys_id >= DWMAC_CORE_4_00) || in stmmac_rx()
3815 buf2_len -= ETH_FCS_LEN; in stmmac_rx()
3817 buf1_len -= ETH_FCS_LEN; in stmmac_rx()
3819 len -= ETH_FCS_LEN; in stmmac_rx()
3823 skb = napi_alloc_skb(&ch->rx_napi, buf1_len); in stmmac_rx()
3825 priv->dev->stats.rx_dropped++; in stmmac_rx()
3830 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
3832 skb_copy_to_linear_data(skb, page_address(buf->page), in stmmac_rx()
3837 page_pool_recycle_direct(rx_q->page_pool, buf->page); in stmmac_rx()
3838 buf->page = NULL; in stmmac_rx()
3840 dma_sync_single_for_cpu(priv->device, buf->addr, in stmmac_rx()
3842 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
3843 buf->page, 0, buf1_len, in stmmac_rx()
3844 priv->dma_buf_sz); in stmmac_rx()
3847 page_pool_release_page(rx_q->page_pool, buf->page); in stmmac_rx()
3848 buf->page = NULL; in stmmac_rx()
3852 dma_sync_single_for_cpu(priv->device, buf->sec_addr, in stmmac_rx()
3854 skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, in stmmac_rx()
3855 buf->sec_page, 0, buf2_len, in stmmac_rx()
3856 priv->dma_buf_sz); in stmmac_rx()
3859 page_pool_release_page(rx_q->page_pool, buf->sec_page); in stmmac_rx()
3860 buf->sec_page = NULL; in stmmac_rx()
3872 stmmac_rx_vlan(priv->dev, skb); in stmmac_rx()
3873 skb->protocol = eth_type_trans(skb, priv->dev); in stmmac_rx()
3878 skb->ip_summed = CHECKSUM_UNNECESSARY; in stmmac_rx()
3884 napi_gro_receive(&ch->rx_napi, skb); in stmmac_rx()
3887 priv->dev->stats.rx_packets++; in stmmac_rx()
3888 priv->dev->stats.rx_bytes += len; in stmmac_rx()
3893 rx_q->state_saved = true; in stmmac_rx()
3894 rx_q->state.skb = skb; in stmmac_rx()
3895 rx_q->state.error = error; in stmmac_rx()
3896 rx_q->state.len = len; in stmmac_rx()
3901 priv->xstats.rx_pkt_n += count; in stmmac_rx()
3910 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_rx()
3911 u32 chan = ch->index; in stmmac_napi_poll_rx()
3914 priv->xstats.napi_poll++; in stmmac_napi_poll_rx()
3920 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_rx()
3921 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 1, 0); in stmmac_napi_poll_rx()
3922 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_rx()
3932 struct stmmac_priv *priv = ch->priv_data; in stmmac_napi_poll_tx()
3933 u32 chan = ch->index; in stmmac_napi_poll_tx()
3936 priv->xstats.napi_poll++; in stmmac_napi_poll_tx()
3938 work_done = stmmac_tx_clean(priv, priv->dma_tx_size, chan); in stmmac_napi_poll_tx()
3944 spin_lock_irqsave(&ch->lock, flags); in stmmac_napi_poll_tx()
3945 stmmac_enable_dma_irq(priv, priv->ioaddr, chan, 0, 1); in stmmac_napi_poll_tx()
3946 spin_unlock_irqrestore(&ch->lock, flags); in stmmac_napi_poll_tx()
3969 * stmmac_set_rx_mode - entry point for multicast addressing
3981 stmmac_set_filter(priv, priv->hw, dev); in stmmac_set_rx_mode()
3985 * stmmac_change_mtu - entry point to change MTU size for the device.
3992 * 0 on success and an appropriate (-)ve integer as defined in errno.h
3998 int txfifosz = priv->plat->tx_fifo_size; in stmmac_change_mtu()
4001 txfifosz = priv->dma_cap.tx_fifo_size; in stmmac_change_mtu()
4003 txfifosz /= priv->plat->tx_queues_to_use; in stmmac_change_mtu()
4006 netdev_err(priv->dev, "must be stopped to change its MTU\n"); in stmmac_change_mtu()
4007 return -EBUSY; in stmmac_change_mtu()
4014 return -EINVAL; in stmmac_change_mtu()
4016 dev->mtu = new_mtu; in stmmac_change_mtu()
4028 if (priv->plat->rx_coe == STMMAC_RX_COE_NONE) in stmmac_fix_features()
4031 if (!priv->plat->tx_coe) in stmmac_fix_features()
4035 * needs to have the Tx COE disabled for oversized frames in stmmac_fix_features()
4037 * the TX csum insertion in the TDES and not use SF. in stmmac_fix_features()
4039 if (priv->plat->bugged_jumbo && (dev->mtu > ETH_DATA_LEN)) in stmmac_fix_features()
4043 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { in stmmac_fix_features()
4045 priv->tso = true; in stmmac_fix_features()
4047 priv->tso = false; in stmmac_fix_features()
4062 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_set_features()
4064 priv->hw->rx_csum = 0; in stmmac_set_features()
4068 stmmac_rx_ipc(priv, priv->hw); in stmmac_set_features()
4070 sph_en = (priv->hw->rx_csum > 0) && priv->sph; in stmmac_set_features()
4071 for (chan = 0; chan < priv->plat->rx_queues_to_use; chan++) in stmmac_set_features()
4072 stmmac_enable_sph(priv, priv->ioaddr, sph_en, chan); in stmmac_set_features()
4078 * stmmac_interrupt - main ISR
4085 * o Core interrupts to manage: remote wake-up, management counter, LPI
4092 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_interrupt()
4093 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_interrupt()
4098 xmac = priv->plat->has_gmac4 || priv->plat->has_xgmac; in stmmac_interrupt()
4101 if (priv->irq_wake) in stmmac_interrupt()
4102 pm_wakeup_event(priv->device, 0); in stmmac_interrupt()
4105 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_interrupt()
4112 if ((priv->plat->has_gmac) || xmac) { in stmmac_interrupt()
4113 int status = stmmac_host_irq_status(priv, priv->hw, &priv->xstats); in stmmac_interrupt()
4117 /* For LPI we need to save the tx status */ in stmmac_interrupt()
4119 priv->tx_path_in_lpi_mode = true; in stmmac_interrupt()
4121 priv->tx_path_in_lpi_mode = false; in stmmac_interrupt()
4125 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_interrupt()
4127 mtl_status = stmmac_host_mtl_irq_status(priv, priv->hw, in stmmac_interrupt()
4129 if (mtl_status != -EINVAL) in stmmac_interrupt()
4133 stmmac_set_rx_tail_ptr(priv, priv->ioaddr, in stmmac_interrupt()
4134 rx_q->rx_tail_addr, in stmmac_interrupt()
4139 if (priv->hw->pcs) { in stmmac_interrupt()
4140 if (priv->xstats.pcs_link) in stmmac_interrupt()
4154 /* Polling receive - used by NETCONSOLE and other diagnostic tools
4159 disable_irq(dev->irq); in stmmac_poll_controller()
4160 stmmac_interrupt(dev->irq, dev); in stmmac_poll_controller()
4161 enable_irq(dev->irq); in stmmac_poll_controller()
4166 * stmmac_ioctl - Entry point for the Ioctl
4177 int ret = -EOPNOTSUPP; in stmmac_ioctl()
4180 return -EINVAL; in stmmac_ioctl()
4186 ret = phylink_mii_ioctl(priv->phylink, rq, cmd); in stmmac_ioctl()
4205 int ret = -EOPNOTSUPP; in stmmac_setup_tc_block_cb()
4207 if (!tc_cls_can_offload_and_chain0(priv->dev, type_data)) in stmmac_setup_tc_block_cb()
4247 return -EOPNOTSUPP; in stmmac_setup_tc()
4254 int gso = skb_shinfo(skb)->gso_type; in stmmac_select_queue()
4266 return netdev_pick_tx(dev, skb, NULL) % dev->real_num_tx_queues; in stmmac_select_queue()
4278 stmmac_set_umac_addr(priv, priv->hw, ndev->dev_addr, 0); in stmmac_set_mac_address()
4297 le32_to_cpu(ep->basic.des0), in sysfs_display_ring()
4298 le32_to_cpu(ep->basic.des1), in sysfs_display_ring()
4299 le32_to_cpu(ep->basic.des2), in sysfs_display_ring()
4300 le32_to_cpu(ep->basic.des3)); in sysfs_display_ring()
4305 le32_to_cpu(p->des0), le32_to_cpu(p->des1), in sysfs_display_ring()
4306 le32_to_cpu(p->des2), le32_to_cpu(p->des3)); in sysfs_display_ring()
4315 struct net_device *dev = seq->private; in stmmac_rings_status_show()
4317 u32 rx_count = priv->plat->rx_queues_to_use; in stmmac_rings_status_show()
4318 u32 tx_count = priv->plat->tx_queues_to_use; in stmmac_rings_status_show()
4321 if ((dev->flags & IFF_UP) == 0) in stmmac_rings_status_show()
4325 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_rings_status_show()
4329 if (priv->extend_desc) { in stmmac_rings_status_show()
4331 sysfs_display_ring((void *)rx_q->dma_erx, in stmmac_rings_status_show()
4332 priv->dma_rx_size, 1, seq); in stmmac_rings_status_show()
4335 sysfs_display_ring((void *)rx_q->dma_rx, in stmmac_rings_status_show()
4336 priv->dma_rx_size, 0, seq); in stmmac_rings_status_show()
4341 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_rings_status_show()
4343 seq_printf(seq, "TX Queue %d:\n", queue); in stmmac_rings_status_show()
4345 if (priv->extend_desc) { in stmmac_rings_status_show()
4347 sysfs_display_ring((void *)tx_q->dma_etx, in stmmac_rings_status_show()
4348 priv->dma_tx_size, 1, seq); in stmmac_rings_status_show()
4349 } else if (!(tx_q->tbs & STMMAC_TBS_AVAIL)) { in stmmac_rings_status_show()
4351 sysfs_display_ring((void *)tx_q->dma_tx, in stmmac_rings_status_show()
4352 priv->dma_tx_size, 0, seq); in stmmac_rings_status_show()
4362 struct net_device *dev = seq->private; in stmmac_dma_cap_show()
4365 if (!priv->hw_cap_support) { in stmmac_dma_cap_show()
4374 seq_printf(seq, "\t10/100 Mbps: %s\n", in stmmac_dma_cap_show()
4375 (priv->dma_cap.mbps_10_100) ? "Y" : "N"); in stmmac_dma_cap_show()
4377 (priv->dma_cap.mbps_1000) ? "Y" : "N"); in stmmac_dma_cap_show()
4379 (priv->dma_cap.half_duplex) ? "Y" : "N"); in stmmac_dma_cap_show()
4381 (priv->dma_cap.hash_filter) ? "Y" : "N"); in stmmac_dma_cap_show()
4383 (priv->dma_cap.multi_addr) ? "Y" : "N"); in stmmac_dma_cap_show()
4385 (priv->dma_cap.pcs) ? "Y" : "N"); in stmmac_dma_cap_show()
4387 (priv->dma_cap.sma_mdio) ? "Y" : "N"); in stmmac_dma_cap_show()
4389 (priv->dma_cap.pmt_remote_wake_up) ? "Y" : "N"); in stmmac_dma_cap_show()
4391 (priv->dma_cap.pmt_magic_frame) ? "Y" : "N"); in stmmac_dma_cap_show()
4393 (priv->dma_cap.rmon) ? "Y" : "N"); in stmmac_dma_cap_show()
4394 seq_printf(seq, "\tIEEE 1588-2002 Time Stamp: %s\n", in stmmac_dma_cap_show()
4395 (priv->dma_cap.time_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
4396 seq_printf(seq, "\tIEEE 1588-2008 Advanced Time Stamp: %s\n", in stmmac_dma_cap_show()
4397 (priv->dma_cap.atime_stamp) ? "Y" : "N"); in stmmac_dma_cap_show()
4398 seq_printf(seq, "\t802.3az - Energy-Efficient Ethernet (EEE): %s\n", in stmmac_dma_cap_show()
4399 (priv->dma_cap.eee) ? "Y" : "N"); in stmmac_dma_cap_show()
4400 seq_printf(seq, "\tAV features: %s\n", (priv->dma_cap.av) ? "Y" : "N"); in stmmac_dma_cap_show()
4401 seq_printf(seq, "\tChecksum Offload in TX: %s\n", in stmmac_dma_cap_show()
4402 (priv->dma_cap.tx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
4403 if (priv->synopsys_id >= DWMAC_CORE_4_00) { in stmmac_dma_cap_show()
4405 (priv->dma_cap.rx_coe) ? "Y" : "N"); in stmmac_dma_cap_show()
4408 (priv->dma_cap.rx_coe_type1) ? "Y" : "N"); in stmmac_dma_cap_show()
4410 (priv->dma_cap.rx_coe_type2) ? "Y" : "N"); in stmmac_dma_cap_show()
4413 (priv->dma_cap.rxfifo_over_2048) ? "Y" : "N"); in stmmac_dma_cap_show()
4415 priv->dma_cap.number_rx_channel); in stmmac_dma_cap_show()
4416 seq_printf(seq, "\tNumber of Additional TX channel: %d\n", in stmmac_dma_cap_show()
4417 priv->dma_cap.number_tx_channel); in stmmac_dma_cap_show()
4419 priv->dma_cap.number_rx_queues); in stmmac_dma_cap_show()
4420 seq_printf(seq, "\tNumber of Additional TX queues: %d\n", in stmmac_dma_cap_show()
4421 priv->dma_cap.number_tx_queues); in stmmac_dma_cap_show()
4423 (priv->dma_cap.enh_desc) ? "Y" : "N"); in stmmac_dma_cap_show()
4424 seq_printf(seq, "\tTX Fifo Size: %d\n", priv->dma_cap.tx_fifo_size); in stmmac_dma_cap_show()
4425 seq_printf(seq, "\tRX Fifo Size: %d\n", priv->dma_cap.rx_fifo_size); in stmmac_dma_cap_show()
4426 seq_printf(seq, "\tHash Table Size: %d\n", priv->dma_cap.hash_tb_sz); in stmmac_dma_cap_show()
4427 seq_printf(seq, "\tTSO: %s\n", priv->dma_cap.tsoen ? "Y" : "N"); in stmmac_dma_cap_show()
4429 priv->dma_cap.pps_out_num); in stmmac_dma_cap_show()
4431 priv->dma_cap.asp ? "Y" : "N"); in stmmac_dma_cap_show()
4433 priv->dma_cap.frpsel ? "Y" : "N"); in stmmac_dma_cap_show()
4435 priv->dma_cap.addr64); in stmmac_dma_cap_show()
4437 priv->dma_cap.rssen ? "Y" : "N"); in stmmac_dma_cap_show()
4439 priv->dma_cap.vlhash ? "Y" : "N"); in stmmac_dma_cap_show()
4441 priv->dma_cap.sphen ? "Y" : "N"); in stmmac_dma_cap_show()
4442 seq_printf(seq, "\tVLAN TX Insertion: %s\n", in stmmac_dma_cap_show()
4443 priv->dma_cap.vlins ? "Y" : "N"); in stmmac_dma_cap_show()
4445 priv->dma_cap.dvlan ? "Y" : "N"); in stmmac_dma_cap_show()
4447 priv->dma_cap.l3l4fnum); in stmmac_dma_cap_show()
4449 priv->dma_cap.arpoffsel ? "Y" : "N"); in stmmac_dma_cap_show()
4451 priv->dma_cap.estsel ? "Y" : "N"); in stmmac_dma_cap_show()
4453 priv->dma_cap.fpesel ? "Y" : "N"); in stmmac_dma_cap_show()
4454 seq_printf(seq, "\tTime-Based Scheduling (TBS): %s\n", in stmmac_dma_cap_show()
4455 priv->dma_cap.tbssel ? "Y" : "N"); in stmmac_dma_cap_show()
4468 if (dev->netdev_ops != &stmmac_netdev_ops) in stmmac_device_event()
4473 if (priv->dbgfs_dir) in stmmac_device_event()
4474 priv->dbgfs_dir = debugfs_rename(stmmac_fs_dir, in stmmac_device_event()
4475 priv->dbgfs_dir, in stmmac_device_event()
4477 dev->name); in stmmac_device_event()
4495 priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir); in stmmac_init_fs()
4497 /* Entry to report DMA RX/TX rings */ in stmmac_init_fs()
4498 debugfs_create_file("descriptors_status", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
4502 debugfs_create_file("dma_cap", 0444, priv->dbgfs_dir, dev, in stmmac_init_fs()
4512 debugfs_remove_recursive(priv->dbgfs_dir); in stmmac_exit_fs()
4547 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) { in stmmac_vlan_update()
4554 if (!priv->dma_cap.vlhash) { in stmmac_vlan_update()
4556 return -EOPNOTSUPP; in stmmac_vlan_update()
4562 return stmmac_update_vlan_hash(priv, priv->hw, hash, pmatch, is_double); in stmmac_vlan_update()
4574 set_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
4577 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_add_vid()
4581 if (priv->hw->num_vlan) { in stmmac_vlan_rx_add_vid()
4582 ret = stmmac_add_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_add_vid()
4599 clear_bit(vid, priv->active_vlans); in stmmac_vlan_rx_kill_vid()
4601 if (priv->hw->num_vlan) { in stmmac_vlan_rx_kill_vid()
4602 ret = stmmac_del_hw_vlan_rx_fltr(priv, ndev, priv->hw, proto, vid); in stmmac_vlan_rx_kill_vid()
4632 if (!test_and_clear_bit(STMMAC_RESET_REQUESTED, &priv->state)) in stmmac_reset_subtask()
4634 if (test_bit(STMMAC_DOWN, &priv->state)) in stmmac_reset_subtask()
4637 netdev_err(priv->dev, "Reset adapter.\n"); in stmmac_reset_subtask()
4640 netif_trans_update(priv->dev); in stmmac_reset_subtask()
4641 while (test_and_set_bit(STMMAC_RESETING, &priv->state)) in stmmac_reset_subtask()
4644 set_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
4645 dev_close(priv->dev); in stmmac_reset_subtask()
4646 dev_open(priv->dev, NULL); in stmmac_reset_subtask()
4647 clear_bit(STMMAC_DOWN, &priv->state); in stmmac_reset_subtask()
4648 clear_bit(STMMAC_RESETING, &priv->state); in stmmac_reset_subtask()
4658 clear_bit(STMMAC_SERVICE_SCHED, &priv->state); in stmmac_service_task()
4662 * stmmac_hw_init - Init the MAC device
4673 /* dwmac-sun8i only work in chain mode */ in stmmac_hw_init()
4674 if (priv->plat->has_sun8i) in stmmac_hw_init()
4676 priv->chain_mode = chain_mode; in stmmac_hw_init()
4684 priv->hw_cap_support = stmmac_get_hw_features(priv); in stmmac_hw_init()
4685 if (priv->hw_cap_support) { in stmmac_hw_init()
4686 dev_info(priv->device, "DMA HW capability register supported\n"); in stmmac_hw_init()
4693 priv->plat->enh_desc = priv->dma_cap.enh_desc; in stmmac_hw_init()
4694 priv->plat->pmt = priv->dma_cap.pmt_remote_wake_up; in stmmac_hw_init()
4695 priv->hw->pmt = priv->plat->pmt; in stmmac_hw_init()
4696 if (priv->dma_cap.hash_tb_sz) { in stmmac_hw_init()
4697 priv->hw->multicast_filter_bins = in stmmac_hw_init()
4698 (BIT(priv->dma_cap.hash_tb_sz) << 5); in stmmac_hw_init()
4699 priv->hw->mcast_bits_log2 = in stmmac_hw_init()
4700 ilog2(priv->hw->multicast_filter_bins); in stmmac_hw_init()
4704 if (priv->plat->force_thresh_dma_mode) in stmmac_hw_init()
4705 priv->plat->tx_coe = 0; in stmmac_hw_init()
4707 priv->plat->tx_coe = priv->dma_cap.tx_coe; in stmmac_hw_init()
4710 priv->plat->rx_coe = priv->dma_cap.rx_coe; in stmmac_hw_init()
4712 if (priv->dma_cap.rx_coe_type2) in stmmac_hw_init()
4713 priv->plat->rx_coe = STMMAC_RX_COE_TYPE2; in stmmac_hw_init()
4714 else if (priv->dma_cap.rx_coe_type1) in stmmac_hw_init()
4715 priv->plat->rx_coe = STMMAC_RX_COE_TYPE1; in stmmac_hw_init()
4718 dev_info(priv->device, "No HW DMA feature register supported\n"); in stmmac_hw_init()
4721 if (priv->plat->rx_coe) { in stmmac_hw_init()
4722 priv->hw->rx_csum = priv->plat->rx_coe; in stmmac_hw_init()
4723 dev_info(priv->device, "RX Checksum Offload Engine supported\n"); in stmmac_hw_init()
4724 if (priv->synopsys_id < DWMAC_CORE_4_00) in stmmac_hw_init()
4725 dev_info(priv->device, "COE Type %d\n", priv->hw->rx_csum); in stmmac_hw_init()
4727 if (priv->plat->tx_coe) in stmmac_hw_init()
4728 dev_info(priv->device, "TX Checksum insertion supported\n"); in stmmac_hw_init()
4730 if (priv->plat->pmt) { in stmmac_hw_init()
4731 dev_info(priv->device, "Wake-Up On Lan supported\n"); in stmmac_hw_init()
4732 device_set_wakeup_capable(priv->device, 1); in stmmac_hw_init()
4735 if (priv->dma_cap.tsoen) in stmmac_hw_init()
4736 dev_info(priv->device, "TSO supported\n"); in stmmac_hw_init()
4738 priv->hw->vlan_fail_q_en = priv->plat->vlan_fail_q_en; in stmmac_hw_init()
4739 priv->hw->vlan_fail_q = priv->plat->vlan_fail_q; in stmmac_hw_init()
4742 if (priv->hwif_quirks) { in stmmac_hw_init()
4743 ret = priv->hwif_quirks(priv); in stmmac_hw_init()
4753 if (((priv->synopsys_id >= DWMAC_CORE_3_50) || in stmmac_hw_init()
4754 (priv->plat->has_xgmac)) && (!priv->plat->riwt_off)) { in stmmac_hw_init()
4755 priv->use_riwt = 1; in stmmac_hw_init()
4756 dev_info(priv->device, in stmmac_hw_init()
4768 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_add()
4771 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_add()
4773 ch->priv_data = priv; in stmmac_napi_add()
4774 ch->index = queue; in stmmac_napi_add()
4775 spin_lock_init(&ch->lock); in stmmac_napi_add()
4777 if (queue < priv->plat->rx_queues_to_use) { in stmmac_napi_add()
4778 netif_napi_add(dev, &ch->rx_napi, stmmac_napi_poll_rx, in stmmac_napi_add()
4781 if (queue < priv->plat->tx_queues_to_use) { in stmmac_napi_add()
4782 netif_tx_napi_add(dev, &ch->tx_napi, in stmmac_napi_add()
4794 maxq = max(priv->plat->rx_queues_to_use, priv->plat->tx_queues_to_use); in stmmac_napi_del()
4797 struct stmmac_channel *ch = &priv->channel[queue]; in stmmac_napi_del()
4799 if (queue < priv->plat->rx_queues_to_use) in stmmac_napi_del()
4800 netif_napi_del(&ch->rx_napi); in stmmac_napi_del()
4801 if (queue < priv->plat->tx_queues_to_use) in stmmac_napi_del()
4802 netif_napi_del(&ch->tx_napi); in stmmac_napi_del()
4816 priv->plat->rx_queues_to_use = rx_cnt; in stmmac_reinit_queues()
4817 priv->plat->tx_queues_to_use = tx_cnt; in stmmac_reinit_queues()
4835 priv->dma_rx_size = rx_size; in stmmac_reinit_ringparam()
4836 priv->dma_tx_size = tx_size; in stmmac_reinit_ringparam()
4866 return -ENOMEM; in stmmac_dvr_probe()
4871 priv->device = device; in stmmac_dvr_probe()
4872 priv->dev = ndev; in stmmac_dvr_probe()
4875 priv->pause = pause; in stmmac_dvr_probe()
4876 priv->plat = plat_dat; in stmmac_dvr_probe()
4877 priv->ioaddr = res->addr; in stmmac_dvr_probe()
4878 priv->dev->base_addr = (unsigned long)res->addr; in stmmac_dvr_probe()
4880 priv->dev->irq = res->irq; in stmmac_dvr_probe()
4881 priv->wol_irq = res->wol_irq; in stmmac_dvr_probe()
4882 priv->lpi_irq = res->lpi_irq; in stmmac_dvr_probe()
4884 if (!IS_ERR_OR_NULL(res->mac)) in stmmac_dvr_probe()
4885 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); in stmmac_dvr_probe()
4887 dev_set_drvdata(device, priv->dev); in stmmac_dvr_probe()
4893 priv->wq = create_singlethread_workqueue("stmmac_wq"); in stmmac_dvr_probe()
4894 if (!priv->wq) { in stmmac_dvr_probe()
4895 dev_err(priv->device, "failed to create workqueue\n"); in stmmac_dvr_probe()
4896 return -ENOMEM; in stmmac_dvr_probe()
4899 INIT_WORK(&priv->service_task, stmmac_service_task); in stmmac_dvr_probe()
4905 priv->plat->phy_addr = phyaddr; in stmmac_dvr_probe()
4907 if (priv->plat->stmmac_rst) { in stmmac_dvr_probe()
4908 ret = reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
4909 reset_control_deassert(priv->plat->stmmac_rst); in stmmac_dvr_probe()
4913 if (ret == -ENOTSUPP) in stmmac_dvr_probe()
4914 reset_control_reset(priv->plat->stmmac_rst); in stmmac_dvr_probe()
4924 ndev->netdev_ops = &stmmac_netdev_ops; in stmmac_dvr_probe()
4926 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in stmmac_dvr_probe()
4931 ndev->hw_features |= NETIF_F_HW_TC; in stmmac_dvr_probe()
4934 if ((priv->plat->tso_en) && (priv->dma_cap.tsoen)) { in stmmac_dvr_probe()
4935 ndev->hw_features |= NETIF_F_TSO | NETIF_F_TSO6; in stmmac_dvr_probe()
4936 if (priv->plat->has_gmac4) in stmmac_dvr_probe()
4937 ndev->hw_features |= NETIF_F_GSO_UDP_L4; in stmmac_dvr_probe()
4938 priv->tso = true; in stmmac_dvr_probe()
4939 dev_info(priv->device, "TSO feature enabled\n"); in stmmac_dvr_probe()
4942 if (priv->dma_cap.sphen) { in stmmac_dvr_probe()
4943 ndev->hw_features |= NETIF_F_GRO; in stmmac_dvr_probe()
4944 priv->sph = true; in stmmac_dvr_probe()
4945 dev_info(priv->device, "SPH feature enabled\n"); in stmmac_dvr_probe()
4953 if (priv->plat->addr64) in stmmac_dvr_probe()
4954 priv->dma_cap.addr64 = priv->plat->addr64; in stmmac_dvr_probe()
4956 if (priv->dma_cap.addr64) { in stmmac_dvr_probe()
4958 DMA_BIT_MASK(priv->dma_cap.addr64)); in stmmac_dvr_probe()
4960 dev_info(priv->device, "Using %d bits DMA width\n", in stmmac_dvr_probe()
4961 priv->dma_cap.addr64); in stmmac_dvr_probe()
4968 priv->plat->dma_cfg->eame = true; in stmmac_dvr_probe()
4972 dev_err(priv->device, "Failed to set DMA Mask\n"); in stmmac_dvr_probe()
4976 priv->dma_cap.addr64 = 32; in stmmac_dvr_probe()
4980 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; in stmmac_dvr_probe()
4981 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); in stmmac_dvr_probe()
4984 ndev->features |= NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_HW_VLAN_STAG_RX; in stmmac_dvr_probe()
4985 if (priv->dma_cap.vlhash) { in stmmac_dvr_probe()
4986 ndev->features |= NETIF_F_HW_VLAN_CTAG_FILTER; in stmmac_dvr_probe()
4987 ndev->features |= NETIF_F_HW_VLAN_STAG_FILTER; in stmmac_dvr_probe()
4989 if (priv->dma_cap.vlins) { in stmmac_dvr_probe()
4990 ndev->features |= NETIF_F_HW_VLAN_CTAG_TX; in stmmac_dvr_probe()
4991 if (priv->dma_cap.dvlan) in stmmac_dvr_probe()
4992 ndev->features |= NETIF_F_HW_VLAN_STAG_TX; in stmmac_dvr_probe()
4995 priv->msg_enable = netif_msg_init(debug, default_msg_level); in stmmac_dvr_probe()
4998 rxq = priv->plat->rx_queues_to_use; in stmmac_dvr_probe()
4999 netdev_rss_key_fill(priv->rss.key, sizeof(priv->rss.key)); in stmmac_dvr_probe()
5000 for (i = 0; i < ARRAY_SIZE(priv->rss.table); i++) in stmmac_dvr_probe()
5001 priv->rss.table[i] = ethtool_rxfh_indir_default(i, rxq); in stmmac_dvr_probe()
5003 if (priv->dma_cap.rssen && priv->plat->rss_en) in stmmac_dvr_probe()
5004 ndev->features |= NETIF_F_RXHASH; in stmmac_dvr_probe()
5006 /* MTU range: 46 - hw-specific max */ in stmmac_dvr_probe()
5007 ndev->min_mtu = ETH_ZLEN - ETH_HLEN; in stmmac_dvr_probe()
5008 if (priv->plat->has_xgmac) in stmmac_dvr_probe()
5009 ndev->max_mtu = XGMAC_JUMBO_LEN; in stmmac_dvr_probe()
5010 else if ((priv->plat->enh_desc) || (priv->synopsys_id >= DWMAC_CORE_4_00)) in stmmac_dvr_probe()
5011 ndev->max_mtu = JUMBO_LEN; in stmmac_dvr_probe()
5013 ndev->max_mtu = SKB_MAX_HEAD(NET_SKB_PAD + NET_IP_ALIGN); in stmmac_dvr_probe()
5014 /* Will not overwrite ndev->max_mtu if plat->maxmtu > ndev->max_mtu in stmmac_dvr_probe()
5015 * as well as plat->maxmtu < ndev->min_mtu which is a invalid range. in stmmac_dvr_probe()
5017 if ((priv->plat->maxmtu < ndev->max_mtu) && in stmmac_dvr_probe()
5018 (priv->plat->maxmtu >= ndev->min_mtu)) in stmmac_dvr_probe()
5019 ndev->max_mtu = priv->plat->maxmtu; in stmmac_dvr_probe()
5020 else if (priv->plat->maxmtu < ndev->min_mtu) in stmmac_dvr_probe()
5021 dev_warn(priv->device, in stmmac_dvr_probe()
5023 __func__, priv->plat->maxmtu); in stmmac_dvr_probe()
5026 priv->flow_ctrl = FLOW_AUTO; /* RX/TX pause on */ in stmmac_dvr_probe()
5031 mutex_init(&priv->lock); in stmmac_dvr_probe()
5035 * changed at run-time and it is fixed. Viceversa the driver'll try to in stmmac_dvr_probe()
5039 if (priv->plat->clk_csr >= 0) in stmmac_dvr_probe()
5040 priv->clk_csr = priv->plat->clk_csr; in stmmac_dvr_probe()
5046 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_probe()
5047 priv->hw->pcs != STMMAC_PCS_RTBI) { in stmmac_dvr_probe()
5051 dev_err(priv->device, in stmmac_dvr_probe()
5053 __func__, priv->plat->bus_id); in stmmac_dvr_probe()
5066 dev_err(priv->device, "%s: ERROR %i registering the device\n", in stmmac_dvr_probe()
5071 if (priv->plat->serdes_powerup) { in stmmac_dvr_probe()
5072 ret = priv->plat->serdes_powerup(ndev, in stmmac_dvr_probe()
5073 priv->plat->bsp_priv); in stmmac_dvr_probe()
5088 phylink_destroy(priv->phylink); in stmmac_dvr_probe()
5090 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_probe()
5091 priv->hw->pcs != STMMAC_PCS_RTBI) in stmmac_dvr_probe()
5096 destroy_workqueue(priv->wq); in stmmac_dvr_probe()
5105 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
5113 netdev_info(priv->dev, "%s: removing driver", __func__); in stmmac_dvr_remove()
5117 if (priv->plat->serdes_powerdown) in stmmac_dvr_remove()
5118 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_dvr_remove()
5120 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_dvr_remove()
5126 phylink_destroy(priv->phylink); in stmmac_dvr_remove()
5127 if (priv->plat->stmmac_rst) in stmmac_dvr_remove()
5128 reset_control_assert(priv->plat->stmmac_rst); in stmmac_dvr_remove()
5129 clk_disable_unprepare(priv->plat->pclk); in stmmac_dvr_remove()
5130 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_dvr_remove()
5131 if (priv->hw->pcs != STMMAC_PCS_TBI && in stmmac_dvr_remove()
5132 priv->hw->pcs != STMMAC_PCS_RTBI) in stmmac_dvr_remove()
5134 destroy_workqueue(priv->wq); in stmmac_dvr_remove()
5135 mutex_destroy(&priv->lock); in stmmac_dvr_remove()
5142 * stmmac_suspend - suspend callback
5157 phylink_mac_change(priv->phylink, false); in stmmac_suspend()
5159 mutex_lock(&priv->lock); in stmmac_suspend()
5165 for (chan = 0; chan < priv->plat->tx_queues_to_use; chan++) in stmmac_suspend()
5166 del_timer_sync(&priv->tx_queue[chan].txtimer); in stmmac_suspend()
5168 if (priv->eee_enabled) { in stmmac_suspend()
5169 priv->tx_path_in_lpi_mode = false; in stmmac_suspend()
5170 del_timer_sync(&priv->eee_ctrl_timer); in stmmac_suspend()
5173 /* Stop TX/RX DMA */ in stmmac_suspend()
5176 if (priv->plat->serdes_powerdown) in stmmac_suspend()
5177 priv->plat->serdes_powerdown(ndev, priv->plat->bsp_priv); in stmmac_suspend()
5180 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_suspend()
5181 stmmac_pmt(priv, priv->hw, priv->wolopts); in stmmac_suspend()
5182 priv->irq_wake = 1; in stmmac_suspend()
5184 mutex_unlock(&priv->lock); in stmmac_suspend()
5186 if (device_may_wakeup(priv->device)) in stmmac_suspend()
5187 phylink_speed_down(priv->phylink, false); in stmmac_suspend()
5188 phylink_stop(priv->phylink); in stmmac_suspend()
5190 mutex_lock(&priv->lock); in stmmac_suspend()
5192 stmmac_mac_set(priv, priv->ioaddr, false); in stmmac_suspend()
5193 pinctrl_pm_select_sleep_state(priv->device); in stmmac_suspend()
5195 clk_disable_unprepare(priv->plat->clk_ptp_ref); in stmmac_suspend()
5196 clk_disable_unprepare(priv->plat->pclk); in stmmac_suspend()
5197 clk_disable_unprepare(priv->plat->stmmac_clk); in stmmac_suspend()
5199 mutex_unlock(&priv->lock); in stmmac_suspend()
5201 priv->speed = SPEED_UNKNOWN; in stmmac_suspend()
5207 * stmmac_reset_queues_param - reset queue parameters
5212 u32 rx_cnt = priv->plat->rx_queues_to_use; in stmmac_reset_queues_param()
5213 u32 tx_cnt = priv->plat->tx_queues_to_use; in stmmac_reset_queues_param()
5217 struct stmmac_rx_queue *rx_q = &priv->rx_queue[queue]; in stmmac_reset_queues_param()
5219 rx_q->cur_rx = 0; in stmmac_reset_queues_param()
5220 rx_q->dirty_rx = 0; in stmmac_reset_queues_param()
5224 struct stmmac_tx_queue *tx_q = &priv->tx_queue[queue]; in stmmac_reset_queues_param()
5226 tx_q->cur_tx = 0; in stmmac_reset_queues_param()
5227 tx_q->dirty_tx = 0; in stmmac_reset_queues_param()
5228 tx_q->mss = 0; in stmmac_reset_queues_param()
5233 * stmmac_resume - resume callback
5248 * automatically as soon as a magic packet or a Wake-up frame in stmmac_resume()
5253 if (device_may_wakeup(priv->device) && priv->plat->pmt) { in stmmac_resume()
5254 mutex_lock(&priv->lock); in stmmac_resume()
5255 stmmac_pmt(priv, priv->hw, 0); in stmmac_resume()
5256 mutex_unlock(&priv->lock); in stmmac_resume()
5257 priv->irq_wake = 0; in stmmac_resume()
5259 pinctrl_pm_select_default_state(priv->device); in stmmac_resume()
5261 clk_prepare_enable(priv->plat->stmmac_clk); in stmmac_resume()
5262 clk_prepare_enable(priv->plat->pclk); in stmmac_resume()
5263 if (priv->plat->clk_ptp_ref) in stmmac_resume()
5264 clk_prepare_enable(priv->plat->clk_ptp_ref); in stmmac_resume()
5266 if (priv->mii) in stmmac_resume()
5267 stmmac_mdio_reset(priv->mii); in stmmac_resume()
5270 if (priv->plat->serdes_powerup) { in stmmac_resume()
5271 ret = priv->plat->serdes_powerup(ndev, in stmmac_resume()
5272 priv->plat->bsp_priv); in stmmac_resume()
5278 if (!device_may_wakeup(priv->device) || !priv->plat->pmt) { in stmmac_resume()
5280 phylink_start(priv->phylink); in stmmac_resume()
5282 phylink_speed_up(priv->phylink); in stmmac_resume()
5287 mutex_lock(&priv->lock); in stmmac_resume()
5298 stmmac_restore_hw_vlan_rx_fltr(priv, ndev, priv->hw); in stmmac_resume()
5302 mutex_unlock(&priv->lock); in stmmac_resume()
5305 phylink_mac_change(priv->phylink, true); in stmmac_resume()
5319 return -EINVAL; in stmmac_cmdline_opt()
5353 pr_err("%s: ERROR broken module parameter conversion", __func__); in stmmac_cmdline_opt()
5354 return -EINVAL; in stmmac_cmdline_opt()
5383 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");