Lines Matching +full:eee +full:- +full:broken +full:- +full:100 +full:tx

1 // SPDX-License-Identifier: GPL-2.0-only
14 #include <linux/dma-mapping.h>
54 static int debug = -1;
73 * sxgbe_verify_args - verify the driver parameters.
86 if (!priv->tx_path_in_lpi_mode) in sxgbe_enable_eee_mode()
87 priv->hw->mac->set_eee_mode(priv->ioaddr); in sxgbe_enable_eee_mode()
92 /* Exit and disable EEE in case of we are in LPI state. */ in sxgbe_disable_eee_mode()
93 priv->hw->mac->reset_eee_mode(priv->ioaddr); in sxgbe_disable_eee_mode()
94 del_timer_sync(&priv->eee_ctrl_timer); in sxgbe_disable_eee_mode()
95 priv->tx_path_in_lpi_mode = false; in sxgbe_disable_eee_mode()
110 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); in sxgbe_eee_ctrl_timer()
117 * If the EEE support has been enabled while configuring the driver,
118 * if the GMAC actually supports the EEE (from the HW cap reg) and the
119 * phy can also manage EEE, so enable the LPI state and start the timer
120 * to verify if the tx path can enter in LPI state.
124 struct net_device *ndev = priv->dev; in sxgbe_eee_init()
127 /* MAC core supports the EEE feature. */ in sxgbe_eee_init()
128 if (priv->hw_cap.eee) { in sxgbe_eee_init()
129 /* Check if the PHY supports EEE */ in sxgbe_eee_init()
130 if (phy_init_eee(ndev->phydev, true)) in sxgbe_eee_init()
133 priv->eee_active = 1; in sxgbe_eee_init()
134 timer_setup(&priv->eee_ctrl_timer, sxgbe_eee_ctrl_timer, 0); in sxgbe_eee_init()
135 priv->eee_ctrl_timer.expires = SXGBE_LPI_TIMER(eee_timer); in sxgbe_eee_init()
136 add_timer(&priv->eee_ctrl_timer); in sxgbe_eee_init()
138 priv->hw->mac->set_eee_timer(priv->ioaddr, in sxgbe_eee_init()
140 priv->tx_lpi_timer); in sxgbe_eee_init()
142 pr_info("Energy-Efficient Ethernet initialized\n"); in sxgbe_eee_init()
152 struct net_device *ndev = priv->dev; in sxgbe_eee_adjust()
154 /* When the EEE has been already initialised we have to in sxgbe_eee_adjust()
158 if (priv->eee_enabled) in sxgbe_eee_adjust()
159 priv->hw->mac->set_eee_pls(priv->ioaddr, ndev->phydev->link); in sxgbe_eee_adjust()
163 * sxgbe_clk_csr_set - dynamically set the MDC clock
170 u32 clk_rate = clk_get_rate(priv->sxgbe_clk); in sxgbe_clk_csr_set()
176 priv->clk_csr = SXGBE_CSR_100_150M; in sxgbe_clk_csr_set()
178 priv->clk_csr = SXGBE_CSR_150_250M; in sxgbe_clk_csr_set()
180 priv->clk_csr = SXGBE_CSR_250_300M; in sxgbe_clk_csr_set()
182 priv->clk_csr = SXGBE_CSR_300_350M; in sxgbe_clk_csr_set()
184 priv->clk_csr = SXGBE_CSR_350_400M; in sxgbe_clk_csr_set()
186 priv->clk_csr = SXGBE_CSR_400_500M; in sxgbe_clk_csr_set()
189 /* minimum number of free TX descriptors required to wake up TX process */
190 #define SXGBE_TX_THRESH(x) (x->dma_tx_size/4)
194 return queue->dirty_tx + tx_qsize - queue->cur_tx - 1; in sxgbe_tx_avail()
205 struct phy_device *phydev = dev->phydev; in sxgbe_adjust_link()
212 /* SXGBE is not supporting auto-negotiation and in sxgbe_adjust_link()
216 if (phydev->link) { in sxgbe_adjust_link()
217 if (phydev->speed != priv->speed) { in sxgbe_adjust_link()
219 switch (phydev->speed) { in sxgbe_adjust_link()
232 phydev->speed); in sxgbe_adjust_link()
235 priv->speed = phydev->speed; in sxgbe_adjust_link()
236 priv->hw->mac->set_speed(priv->ioaddr, speed); in sxgbe_adjust_link()
239 if (!priv->oldlink) { in sxgbe_adjust_link()
241 priv->oldlink = 1; in sxgbe_adjust_link()
243 } else if (priv->oldlink) { in sxgbe_adjust_link()
245 priv->oldlink = 0; in sxgbe_adjust_link()
246 priv->speed = SPEED_UNKNOWN; in sxgbe_adjust_link()
252 /* Alter the MAC settings for EEE */ in sxgbe_adjust_link()
257 * sxgbe_init_phy - PHY initialization
270 int phy_iface = priv->plat->interface; in sxgbe_init_phy()
273 priv->oldlink = 0; in sxgbe_init_phy()
274 priv->speed = SPEED_UNKNOWN; in sxgbe_init_phy()
275 priv->oldduplex = DUPLEX_UNKNOWN; in sxgbe_init_phy()
277 if (priv->plat->phy_bus_name) in sxgbe_init_phy()
278 snprintf(bus_id, MII_BUS_ID_SIZE, "%s-%x", in sxgbe_init_phy()
279 priv->plat->phy_bus_name, priv->plat->bus_id); in sxgbe_init_phy()
281 snprintf(bus_id, MII_BUS_ID_SIZE, "sxgbe-%x", in sxgbe_init_phy()
282 priv->plat->bus_id); in sxgbe_init_phy()
285 priv->plat->phy_addr); in sxgbe_init_phy()
300 if (phydev->phy_id == 0) { in sxgbe_init_phy()
302 return -ENODEV; in sxgbe_init_phy()
306 __func__, phydev->phy_id, phydev->link); in sxgbe_init_phy()
314 * Description: this function is called to clear the tx and rx descriptors
320 unsigned int txsize = priv->dma_tx_size; in sxgbe_clear_descriptors()
321 unsigned int rxsize = priv->dma_rx_size; in sxgbe_clear_descriptors()
323 /* Clear the Rx/Tx descriptors */ in sxgbe_clear_descriptors()
326 priv->hw->desc->init_rx_desc(&priv->rxq[j]->dma_rx[i], in sxgbe_clear_descriptors()
327 priv->use_riwt, priv->mode, in sxgbe_clear_descriptors()
328 (i == rxsize - 1)); in sxgbe_clear_descriptors()
333 priv->hw->desc->init_tx_desc(&priv->txq[j]->dma_tx[i]); in sxgbe_clear_descriptors()
347 return -ENOMEM; in sxgbe_init_rx_buffers()
349 rx_ring->rx_skbuff[i] = skb; in sxgbe_init_rx_buffers()
350 rx_ring->rx_skbuff_dma[i] = dma_map_single(priv->device, skb->data, in sxgbe_init_rx_buffers()
353 if (dma_mapping_error(priv->device, rx_ring->rx_skbuff_dma[i])) { in sxgbe_init_rx_buffers()
356 return -EINVAL; in sxgbe_init_rx_buffers()
359 p->rdes23.rx_rd_des23.buf2_addr = rx_ring->rx_skbuff_dma[i]; in sxgbe_init_rx_buffers()
365 * sxgbe_free_rx_buffers - free what sxgbe_init_rx_buffers() allocated
381 kfree_skb(rx_ring->rx_skbuff[i]); in sxgbe_free_rx_buffers()
382 dma_unmap_single(priv->device, rx_ring->rx_skbuff_dma[i], in sxgbe_free_rx_buffers()
387 * init_tx_ring - init the TX descriptor ring
392 * Description: this function initializes the DMA TX descriptor
397 /* TX ring is not allcoated */ in init_tx_ring()
399 dev_err(dev, "No memory for TX queue of SXGBE\n"); in init_tx_ring()
400 return -ENOMEM; in init_tx_ring()
403 /* allocate memory for TX descriptors */ in init_tx_ring()
404 tx_ring->dma_tx = dma_alloc_coherent(dev, in init_tx_ring()
406 &tx_ring->dma_tx_phy, GFP_KERNEL); in init_tx_ring()
407 if (!tx_ring->dma_tx) in init_tx_ring()
408 return -ENOMEM; in init_tx_ring()
410 /* allocate memory for TX skbuff array */ in init_tx_ring()
411 tx_ring->tx_skbuff_dma = devm_kcalloc(dev, tx_rsize, in init_tx_ring()
413 if (!tx_ring->tx_skbuff_dma) in init_tx_ring()
416 tx_ring->tx_skbuff = devm_kcalloc(dev, tx_rsize, in init_tx_ring()
419 if (!tx_ring->tx_skbuff) in init_tx_ring()
423 tx_ring->queue_no = queue_no; in init_tx_ring()
426 tx_ring->dirty_tx = 0; in init_tx_ring()
427 tx_ring->cur_tx = 0; in init_tx_ring()
433 tx_ring->dma_tx, tx_ring->dma_tx_phy); in init_tx_ring()
434 return -ENOMEM; in init_tx_ring()
438 * free_rx_ring - free the RX descriptor ring
448 rx_ring->dma_rx, rx_ring->dma_rx_phy); in free_rx_ring()
449 kfree(rx_ring->rx_skbuff_dma); in free_rx_ring()
450 kfree(rx_ring->rx_skbuff); in free_rx_ring()
454 * init_rx_ring - init the RX descriptor ring
470 bfsize = ALIGN(dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN, 8); in init_rx_ring()
477 return -ENOMEM; in init_rx_ring()
481 rx_ring->queue_no = queue_no; in init_rx_ring()
484 rx_ring->dma_rx = dma_alloc_coherent(priv->device, in init_rx_ring()
486 &rx_ring->dma_rx_phy, GFP_KERNEL); in init_rx_ring()
488 if (rx_ring->dma_rx == NULL) in init_rx_ring()
489 return -ENOMEM; in init_rx_ring()
492 rx_ring->rx_skbuff_dma = kmalloc_array(rx_rsize, in init_rx_ring()
494 if (!rx_ring->rx_skbuff_dma) { in init_rx_ring()
495 ret = -ENOMEM; in init_rx_ring()
499 rx_ring->rx_skbuff = kmalloc_array(rx_rsize, in init_rx_ring()
501 if (!rx_ring->rx_skbuff) { in init_rx_ring()
502 ret = -ENOMEM; in init_rx_ring()
509 p = rx_ring->dma_rx + desc_index; in init_rx_ring()
517 rx_ring->cur_rx = 0; in init_rx_ring()
518 rx_ring->dirty_rx = (unsigned int)(desc_index - rx_rsize); in init_rx_ring()
519 priv->dma_buf_sz = bfsize; in init_rx_ring()
524 while (--desc_index >= 0) { in init_rx_ring()
527 p = rx_ring->dma_rx + desc_index; in init_rx_ring()
530 kfree(rx_ring->rx_skbuff); in init_rx_ring()
532 kfree(rx_ring->rx_skbuff_dma); in init_rx_ring()
534 dma_free_coherent(priv->device, in init_rx_ring()
536 rx_ring->dma_rx, rx_ring->dma_rx_phy); in init_rx_ring()
541 * free_tx_ring - free the TX descriptor ring
545 * Description: this function initializes the DMA TX descriptor
551 tx_ring->dma_tx, tx_ring->dma_tx_phy); in free_tx_ring()
555 * init_dma_desc_rings - init the RX/TX descriptor rings
557 * Description: this function initializes the DMA RX/TX descriptors
565 int tx_rsize = priv->dma_tx_size; in init_dma_desc_rings()
566 int rx_rsize = priv->dma_rx_size; in init_dma_desc_rings()
568 /* Allocate memory for queue structures and TX descs */ in init_dma_desc_rings()
570 ret = init_tx_ring(priv->device, queue_num, in init_dma_desc_rings()
571 priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
573 dev_err(&netd->dev, "TX DMA ring allocation failed!\n"); in init_dma_desc_rings()
578 * pointer is needed during cleaing TX queue in init_dma_desc_rings()
580 priv->txq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
586 priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings()
593 * pointer is needed during cleaing TX queue in init_dma_desc_rings()
595 priv->rxq[queue_num]->priv_ptr = priv; in init_dma_desc_rings()
603 while (queue_num--) in init_dma_desc_rings()
604 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in init_dma_desc_rings()
608 while (queue_num--) in init_dma_desc_rings()
609 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); in init_dma_desc_rings()
616 struct sxgbe_priv_data *priv = txqueue->priv_ptr; in tx_free_ring_skbufs()
617 int tx_rsize = priv->dma_tx_size; in tx_free_ring_skbufs()
620 struct sxgbe_tx_norm_desc *tdesc = txqueue->dma_tx + dma_desc; in tx_free_ring_skbufs()
622 if (txqueue->tx_skbuff_dma[dma_desc]) in tx_free_ring_skbufs()
623 dma_unmap_single(priv->device, in tx_free_ring_skbufs()
624 txqueue->tx_skbuff_dma[dma_desc], in tx_free_ring_skbufs()
625 priv->hw->desc->get_tx_len(tdesc), in tx_free_ring_skbufs()
628 dev_kfree_skb_any(txqueue->tx_skbuff[dma_desc]); in tx_free_ring_skbufs()
629 txqueue->tx_skbuff[dma_desc] = NULL; in tx_free_ring_skbufs()
630 txqueue->tx_skbuff_dma[dma_desc] = 0; in tx_free_ring_skbufs()
640 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in dma_free_tx_skbufs()
648 int tx_rsize = priv->dma_tx_size; in free_dma_desc_resources()
649 int rx_rsize = priv->dma_rx_size; in free_dma_desc_resources()
651 /* Release the DMA TX buffers */ in free_dma_desc_resources()
654 /* Release the TX ring memory also */ in free_dma_desc_resources()
656 free_tx_ring(priv->device, priv->txq[queue_num], tx_rsize); in free_dma_desc_resources()
661 free_rx_ring(priv->device, priv->rxq[queue_num], rx_rsize); in free_dma_desc_resources()
670 priv->txq[queue_num] = devm_kmalloc(priv->device, in txring_mem_alloc()
672 if (!priv->txq[queue_num]) in txring_mem_alloc()
673 return -ENOMEM; in txring_mem_alloc()
684 priv->rxq[queue_num] = devm_kmalloc(priv->device, in rxring_mem_alloc()
686 if (!priv->rxq[queue_num]) in rxring_mem_alloc()
687 return -ENOMEM; in rxring_mem_alloc()
694 * sxgbe_mtl_operation_mode - HW MTL operation mode
696 * Description: it sets the MTL operation mode: tx/rx MTL thresholds
697 * or Store-And-Forward capability.
703 /* TX/RX threshold control */ in sxgbe_mtl_operation_mode()
704 if (likely(priv->plat->force_sf_dma_mode)) { in sxgbe_mtl_operation_mode()
705 /* set TC mode for TX QUEUES */ in sxgbe_mtl_operation_mode()
706 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
707 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
709 priv->tx_tc = SXGBE_MTL_SFMODE; in sxgbe_mtl_operation_mode()
712 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
713 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
715 priv->rx_tc = SXGBE_MTL_SFMODE; in sxgbe_mtl_operation_mode()
716 } else if (unlikely(priv->plat->force_thresh_dma_mode)) { in sxgbe_mtl_operation_mode()
717 /* set TC mode for TX QUEUES */ in sxgbe_mtl_operation_mode()
718 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.tx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
719 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
720 priv->tx_tc); in sxgbe_mtl_operation_mode()
722 SXGBE_FOR_EACH_QUEUE(priv->hw_cap.rx_mtl_queues, queue_num) in sxgbe_mtl_operation_mode()
723 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, queue_num, in sxgbe_mtl_operation_mode()
724 priv->rx_tc); in sxgbe_mtl_operation_mode()
726 pr_err("ERROR: %s: Invalid TX threshold mode\n", __func__); in sxgbe_mtl_operation_mode()
737 struct sxgbe_priv_data *priv = tqueue->priv_ptr; in sxgbe_tx_queue_clean()
738 unsigned int tx_rsize = priv->dma_tx_size; in sxgbe_tx_queue_clean()
740 u8 queue_no = tqueue->queue_no; in sxgbe_tx_queue_clean()
742 dev_txq = netdev_get_tx_queue(priv->dev, queue_no); in sxgbe_tx_queue_clean()
746 priv->xstats.tx_clean++; in sxgbe_tx_queue_clean()
747 while (tqueue->dirty_tx != tqueue->cur_tx) { in sxgbe_tx_queue_clean()
748 unsigned int entry = tqueue->dirty_tx % tx_rsize; in sxgbe_tx_queue_clean()
749 struct sk_buff *skb = tqueue->tx_skbuff[entry]; in sxgbe_tx_queue_clean()
752 p = tqueue->dma_tx + entry; in sxgbe_tx_queue_clean()
755 if (priv->hw->desc->get_tx_owner(p)) in sxgbe_tx_queue_clean()
760 __func__, tqueue->cur_tx, tqueue->dirty_tx); in sxgbe_tx_queue_clean()
762 if (likely(tqueue->tx_skbuff_dma[entry])) { in sxgbe_tx_queue_clean()
763 dma_unmap_single(priv->device, in sxgbe_tx_queue_clean()
764 tqueue->tx_skbuff_dma[entry], in sxgbe_tx_queue_clean()
765 priv->hw->desc->get_tx_len(p), in sxgbe_tx_queue_clean()
767 tqueue->tx_skbuff_dma[entry] = 0; in sxgbe_tx_queue_clean()
772 tqueue->tx_skbuff[entry] = NULL; in sxgbe_tx_queue_clean()
775 priv->hw->desc->release_tx_desc(p); in sxgbe_tx_queue_clean()
777 tqueue->dirty_tx++; in sxgbe_tx_queue_clean()
801 struct sxgbe_tx_queue *tqueue = priv->txq[queue_num]; in sxgbe_tx_all_clean()
806 if ((priv->eee_enabled) && (!priv->tx_path_in_lpi_mode)) { in sxgbe_tx_all_clean()
808 mod_timer(&priv->eee_ctrl_timer, SXGBE_LPI_TIMER(eee_timer)); in sxgbe_tx_all_clean()
813 * sxgbe_restart_tx_queue: irq tx error mng function
821 struct sxgbe_tx_queue *tx_ring = priv->txq[queue_num]; in sxgbe_restart_tx_queue()
822 struct netdev_queue *dev_txq = netdev_get_tx_queue(priv->dev, in sxgbe_restart_tx_queue()
828 /* stop the tx dma */ in sxgbe_restart_tx_queue()
829 priv->hw->dma->stop_tx_queue(priv->ioaddr, queue_num); in sxgbe_restart_tx_queue()
835 tx_ring->cur_tx = 0; in sxgbe_restart_tx_queue()
836 tx_ring->dirty_tx = 0; in sxgbe_restart_tx_queue()
838 /* start the tx dma */ in sxgbe_restart_tx_queue()
839 priv->hw->dma->start_tx_queue(priv->ioaddr, queue_num); in sxgbe_restart_tx_queue()
841 priv->dev->stats.tx_errors++; in sxgbe_restart_tx_queue()
848 * sxgbe_reset_all_tx_queues: irq tx error mng function
857 /* On TX timeout of net device, resetting of all queues in sxgbe_reset_all_tx_queues()
871 * platform and necessary for old MAC10/100 and GMAC chips.
876 struct sxgbe_hw_features *features = &priv->hw_cap; in sxgbe_get_hw_features()
879 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 0); in sxgbe_get_hw_features()
881 features->pmt_remote_wake_up = in sxgbe_get_hw_features()
883 features->pmt_magic_frame = SXGBE_HW_FEAT_PMT_MAGIC_PKT(rval); in sxgbe_get_hw_features()
884 features->atime_stamp = SXGBE_HW_FEAT_IEEE1500_2008(rval); in sxgbe_get_hw_features()
885 features->tx_csum_offload = in sxgbe_get_hw_features()
887 features->rx_csum_offload = in sxgbe_get_hw_features()
889 features->multi_macaddr = SXGBE_HW_FEAT_MACADDR_COUNT(rval); in sxgbe_get_hw_features()
890 features->tstamp_srcselect = SXGBE_HW_FEAT_TSTMAP_SRC(rval); in sxgbe_get_hw_features()
891 features->sa_vlan_insert = SXGBE_HW_FEAT_SRCADDR_VLAN(rval); in sxgbe_get_hw_features()
892 features->eee = SXGBE_HW_FEAT_EEE(rval); in sxgbe_get_hw_features()
896 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 1); in sxgbe_get_hw_features()
898 features->rxfifo_size = SXGBE_HW_FEAT_RX_FIFO_SIZE(rval); in sxgbe_get_hw_features()
899 features->txfifo_size = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); in sxgbe_get_hw_features()
900 features->atstmap_hword = SXGBE_HW_FEAT_TX_FIFO_SIZE(rval); in sxgbe_get_hw_features()
901 features->dcb_enable = SXGBE_HW_FEAT_DCB(rval); in sxgbe_get_hw_features()
902 features->splithead_enable = SXGBE_HW_FEAT_SPLIT_HDR(rval); in sxgbe_get_hw_features()
903 features->tcpseg_offload = SXGBE_HW_FEAT_TSO(rval); in sxgbe_get_hw_features()
904 features->debug_mem = SXGBE_HW_FEAT_DEBUG_MEM_IFACE(rval); in sxgbe_get_hw_features()
905 features->rss_enable = SXGBE_HW_FEAT_RSS(rval); in sxgbe_get_hw_features()
906 features->hash_tsize = SXGBE_HW_FEAT_HASH_TABLE_SIZE(rval); in sxgbe_get_hw_features()
907 features->l3l4_filer_size = SXGBE_HW_FEAT_L3L4_FILTER_NUM(rval); in sxgbe_get_hw_features()
911 rval = priv->hw->mac->get_hw_feature(priv->ioaddr, 2); in sxgbe_get_hw_features()
913 features->rx_mtl_queues = SXGBE_HW_FEAT_RX_MTL_QUEUES(rval); in sxgbe_get_hw_features()
914 features->tx_mtl_queues = SXGBE_HW_FEAT_TX_MTL_QUEUES(rval); in sxgbe_get_hw_features()
915 features->rx_dma_channels = SXGBE_HW_FEAT_RX_DMA_CHANNELS(rval); in sxgbe_get_hw_features()
916 features->tx_dma_channels = SXGBE_HW_FEAT_TX_DMA_CHANNELS(rval); in sxgbe_get_hw_features()
917 features->pps_output_count = SXGBE_HW_FEAT_PPS_OUTPUTS(rval); in sxgbe_get_hw_features()
918 features->aux_input_count = SXGBE_HW_FEAT_AUX_SNAPSHOTS(rval); in sxgbe_get_hw_features()
933 if (!is_valid_ether_addr(priv->dev->dev_addr)) { in sxgbe_check_ether_addr()
936 priv->hw->mac->get_umac_addr((void __iomem *) in sxgbe_check_ether_addr()
937 priv->ioaddr, addr, 0); in sxgbe_check_ether_addr()
939 eth_hw_addr_set(priv->dev, addr); in sxgbe_check_ether_addr()
941 eth_hw_addr_random(priv->dev); in sxgbe_check_ether_addr()
943 dev_info(priv->device, "device MAC address %pM\n", in sxgbe_check_ether_addr()
944 priv->dev->dev_addr); in sxgbe_check_ether_addr()
960 if (priv->plat->dma_cfg) { in sxgbe_init_dma_engine()
961 pbl = priv->plat->dma_cfg->pbl; in sxgbe_init_dma_engine()
962 fixed_burst = priv->plat->dma_cfg->fixed_burst; in sxgbe_init_dma_engine()
963 burst_map = priv->plat->dma_cfg->burst_map; in sxgbe_init_dma_engine()
967 priv->hw->dma->cha_init(priv->ioaddr, queue_num, in sxgbe_init_dma_engine()
969 (priv->txq[queue_num])->dma_tx_phy, in sxgbe_init_dma_engine()
970 (priv->rxq[queue_num])->dma_rx_phy, in sxgbe_init_dma_engine()
971 priv->dma_tx_size, priv->dma_rx_size); in sxgbe_init_dma_engine()
973 return priv->hw->dma->init(priv->ioaddr, fixed_burst, burst_map); in sxgbe_init_dma_engine()
987 priv->hw->mtl->mtl_set_txfifosize(priv->ioaddr, queue_num, in sxgbe_init_mtl_engine()
988 priv->hw_cap.tx_mtl_qsize); in sxgbe_init_mtl_engine()
989 priv->hw->mtl->mtl_enable_txqueue(priv->ioaddr, queue_num); in sxgbe_init_mtl_engine()
1004 priv->hw->mtl->mtl_disable_txqueue(priv->ioaddr, queue_num); in sxgbe_disable_mtl_engine()
1009 * sxgbe_tx_timer: mitigation sw timer for tx.
1021 * sxgbe_tx_init_coalesce: init tx mitigation options.
1033 struct sxgbe_tx_queue *p = priv->txq[queue_num]; in sxgbe_tx_init_coalesce()
1034 p->tx_coal_frames = SXGBE_TX_FRAMES; in sxgbe_tx_init_coalesce()
1035 p->tx_coal_timer = SXGBE_COAL_TX_TIMER; in sxgbe_tx_init_coalesce()
1036 timer_setup(&p->txtimer, sxgbe_tx_timer, 0); in sxgbe_tx_init_coalesce()
1037 p->txtimer.expires = SXGBE_COAL_TIMER(p->tx_coal_timer); in sxgbe_tx_init_coalesce()
1038 add_timer(&p->txtimer); in sxgbe_tx_init_coalesce()
1047 struct sxgbe_tx_queue *p = priv->txq[queue_num]; in sxgbe_tx_del_timer()
1048 del_timer_sync(&p->txtimer); in sxgbe_tx_del_timer()
1053 * sxgbe_open - open entry point of the driver
1058 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1066 clk_prepare_enable(priv->sxgbe_clk); in sxgbe_open()
1078 /* Create and initialize the TX/RX descriptors chains. */ in sxgbe_open()
1079 priv->dma_tx_size = SXGBE_ALIGN(DMA_TX_SIZE); in sxgbe_open()
1080 priv->dma_rx_size = SXGBE_ALIGN(DMA_RX_SIZE); in sxgbe_open()
1081 priv->dma_buf_sz = SXGBE_ALIGN(DMA_BUFFER_SIZE); in sxgbe_open()
1082 priv->tx_tc = TC_DEFAULT; in sxgbe_open()
1083 priv->rx_tc = TC_DEFAULT; in sxgbe_open()
1097 priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0); in sxgbe_open()
1100 priv->hw->mac->core_init(priv->ioaddr); in sxgbe_open()
1102 priv->hw->mac->enable_rxqueue(priv->ioaddr, queue_num); in sxgbe_open()
1106 ret = devm_request_irq(priv->device, priv->irq, sxgbe_common_interrupt, in sxgbe_open()
1107 IRQF_SHARED, dev->name, dev); in sxgbe_open()
1110 __func__, priv->irq, ret); in sxgbe_open()
1117 if (priv->lpi_irq != dev->irq) { in sxgbe_open()
1118 ret = devm_request_irq(priv->device, priv->lpi_irq, in sxgbe_open()
1120 IRQF_SHARED, dev->name, dev); in sxgbe_open()
1123 __func__, priv->lpi_irq, ret); in sxgbe_open()
1128 /* Request TX DMA irq lines */ in sxgbe_open()
1130 ret = devm_request_irq(priv->device, in sxgbe_open()
1131 (priv->txq[queue_num])->irq_no, in sxgbe_open()
1133 dev->name, priv->txq[queue_num]); in sxgbe_open()
1135 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n", in sxgbe_open()
1136 __func__, priv->irq, ret); in sxgbe_open()
1143 ret = devm_request_irq(priv->device, in sxgbe_open()
1144 (priv->rxq[queue_num])->irq_no, in sxgbe_open()
1146 dev->name, priv->rxq[queue_num]); in sxgbe_open()
1148 netdev_err(dev, "%s: ERROR: allocating TX IRQ %d (error: %d)\n", in sxgbe_open()
1149 __func__, priv->irq, ret); in sxgbe_open()
1154 /* Enable the MAC Rx/Tx */ in sxgbe_open()
1155 priv->hw->mac->enable_tx(priv->ioaddr, true); in sxgbe_open()
1156 priv->hw->mac->enable_rx(priv->ioaddr, true); in sxgbe_open()
1162 memset(&priv->xstats, 0, sizeof(struct sxgbe_extra_stats)); in sxgbe_open()
1164 priv->xstats.tx_threshold = priv->tx_tc; in sxgbe_open()
1165 priv->xstats.rx_threshold = priv->rx_tc; in sxgbe_open()
1168 netdev_dbg(dev, "DMA RX/TX processes started...\n"); in sxgbe_open()
1169 priv->hw->dma->start_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_open()
1170 priv->hw->dma->start_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_open()
1172 if (dev->phydev) in sxgbe_open()
1173 phy_start(dev->phydev); in sxgbe_open()
1175 /* initialise TX coalesce parameters */ in sxgbe_open()
1178 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { in sxgbe_open()
1179 priv->rx_riwt = SXGBE_MAX_DMA_RIWT; in sxgbe_open()
1180 priv->hw->dma->rx_watchdog(priv->ioaddr, SXGBE_MAX_DMA_RIWT); in sxgbe_open()
1183 priv->tx_lpi_timer = SXGBE_DEFAULT_LPI_TIMER; in sxgbe_open()
1184 priv->eee_enabled = sxgbe_eee_init(priv); in sxgbe_open()
1186 napi_enable(&priv->napi); in sxgbe_open()
1193 if (dev->phydev) in sxgbe_open()
1194 phy_disconnect(dev->phydev); in sxgbe_open()
1196 clk_disable_unprepare(priv->sxgbe_clk); in sxgbe_open()
1202 * sxgbe_release - close entry point of the driver
1211 if (priv->eee_enabled) in sxgbe_release()
1212 del_timer_sync(&priv->eee_ctrl_timer); in sxgbe_release()
1215 if (dev->phydev) { in sxgbe_release()
1216 phy_stop(dev->phydev); in sxgbe_release()
1217 phy_disconnect(dev->phydev); in sxgbe_release()
1222 napi_disable(&priv->napi); in sxgbe_release()
1224 /* delete TX timers */ in sxgbe_release()
1227 /* Stop TX/RX DMA and clear the descriptors */ in sxgbe_release()
1228 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_release()
1229 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_release()
1234 /* Release and free the Rx/Tx resources */ in sxgbe_release()
1237 /* Disable the MAC Rx/Tx */ in sxgbe_release()
1238 priv->hw->mac->enable_tx(priv->ioaddr, false); in sxgbe_release()
1239 priv->hw->mac->enable_rx(priv->ioaddr, false); in sxgbe_release()
1241 clk_disable_unprepare(priv->sxgbe_clk); in sxgbe_release()
1245 /* Prepare first Tx descriptor for doing TSO operation */
1252 /* Write first Tx descriptor with appropriate value */ in sxgbe_tso_prepare()
1256 first_desc->tdes01 = dma_map_single(priv->device, skb->data, in sxgbe_tso_prepare()
1258 if (dma_mapping_error(priv->device, first_desc->tdes01)) in sxgbe_tso_prepare()
1259 pr_err("%s: TX dma mapping failed!!\n", __func__); in sxgbe_tso_prepare()
1261 first_desc->tdes23.tx_rd_des23.first_desc = 1; in sxgbe_tso_prepare()
1262 priv->hw->desc->tx_desc_enable_tse(first_desc, 1, total_hdr_len, in sxgbe_tso_prepare()
1264 skb->len - total_hdr_len); in sxgbe_tso_prepare()
1268 * sxgbe_xmit: Tx entry point of the driver
1271 * Description : this is the tx entry point of the driver.
1282 unsigned int tx_rsize = priv->dma_tx_size; in sxgbe_xmit()
1283 struct sxgbe_tx_queue *tqueue = priv->txq[txq_index]; in sxgbe_xmit()
1286 int nr_frags = skb_shinfo(skb)->nr_frags; in sxgbe_xmit()
1289 u16 cur_mss = skb_shinfo(skb)->gso_size; in sxgbe_xmit()
1292 /* get the TX queue handle */ in sxgbe_xmit()
1295 if (unlikely(skb_is_gso(skb) && tqueue->prev_mss != cur_mss)) in sxgbe_xmit()
1299 ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in sxgbe_xmit()
1300 tqueue->hwts_tx_en))) in sxgbe_xmit()
1303 if (priv->tx_path_in_lpi_mode) in sxgbe_xmit()
1309 netdev_err(dev, "%s: Tx Ring is full when %d queue is awake\n", in sxgbe_xmit()
1315 entry = tqueue->cur_tx % tx_rsize; in sxgbe_xmit()
1316 tx_desc = tqueue->dma_tx + entry; in sxgbe_xmit()
1323 tqueue->tx_skbuff[entry] = skb; in sxgbe_xmit()
1328 if (unlikely(tqueue->prev_mss != cur_mss)) { in sxgbe_xmit()
1329 priv->hw->desc->tx_ctxt_desc_set_mss( in sxgbe_xmit()
1331 priv->hw->desc->tx_ctxt_desc_set_tcmssv( in sxgbe_xmit()
1333 priv->hw->desc->tx_ctxt_desc_reset_ostc( in sxgbe_xmit()
1335 priv->hw->desc->tx_ctxt_desc_set_ctxt( in sxgbe_xmit()
1337 priv->hw->desc->tx_ctxt_desc_set_owner( in sxgbe_xmit()
1340 entry = (++tqueue->cur_tx) % tx_rsize; in sxgbe_xmit()
1341 first_desc = tqueue->dma_tx + entry; in sxgbe_xmit()
1343 tqueue->prev_mss = cur_mss; in sxgbe_xmit()
1347 tx_desc->tdes01 = dma_map_single(priv->device, in sxgbe_xmit()
1348 skb->data, no_pagedlen, DMA_TO_DEVICE); in sxgbe_xmit()
1349 if (dma_mapping_error(priv->device, tx_desc->tdes01)) in sxgbe_xmit()
1350 netdev_err(dev, "%s: TX dma mapping failed!!\n", in sxgbe_xmit()
1353 priv->hw->desc->prepare_tx_desc(tx_desc, 1, no_pagedlen, in sxgbe_xmit()
1359 const skb_frag_t *frag = &skb_shinfo(skb)->frags[frag_num]; in sxgbe_xmit()
1362 entry = (++tqueue->cur_tx) % tx_rsize; in sxgbe_xmit()
1363 tx_desc = tqueue->dma_tx + entry; in sxgbe_xmit()
1364 tx_desc->tdes01 = skb_frag_dma_map(priv->device, frag, 0, len, in sxgbe_xmit()
1367 tqueue->tx_skbuff_dma[entry] = tx_desc->tdes01; in sxgbe_xmit()
1368 tqueue->tx_skbuff[entry] = NULL; in sxgbe_xmit()
1371 priv->hw->desc->prepare_tx_desc(tx_desc, 0, len, in sxgbe_xmit()
1377 priv->hw->desc->set_tx_owner(tx_desc); in sxgbe_xmit()
1381 priv->hw->desc->close_tx_desc(tx_desc); in sxgbe_xmit()
1386 tqueue->tx_count_frames += nr_frags + 1; in sxgbe_xmit()
1387 if (tqueue->tx_count_frames > tqueue->tx_coal_frames) { in sxgbe_xmit()
1388 priv->hw->desc->clear_tx_ic(tx_desc); in sxgbe_xmit()
1389 priv->xstats.tx_reset_ic_bit++; in sxgbe_xmit()
1390 mod_timer(&tqueue->txtimer, in sxgbe_xmit()
1391 SXGBE_COAL_TIMER(tqueue->tx_coal_timer)); in sxgbe_xmit()
1393 tqueue->tx_count_frames = 0; in sxgbe_xmit()
1397 priv->hw->desc->set_tx_owner(first_desc); in sxgbe_xmit()
1402 tqueue->cur_tx++; in sxgbe_xmit()
1406 __func__, tqueue->cur_tx % tx_rsize, in sxgbe_xmit()
1407 tqueue->dirty_tx % tx_rsize, entry, in sxgbe_xmit()
1416 dev->stats.tx_bytes += skb->len; in sxgbe_xmit()
1418 if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && in sxgbe_xmit()
1419 tqueue->hwts_tx_en)) { in sxgbe_xmit()
1421 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in sxgbe_xmit()
1422 priv->hw->desc->tx_enable_tstamp(first_desc); in sxgbe_xmit()
1427 priv->hw->dma->enable_dma_transmission(priv->ioaddr, txq_index); in sxgbe_xmit()
1436 * that is based on zero-copy.
1440 unsigned int rxsize = priv->dma_rx_size; in sxgbe_rx_refill()
1441 int bfsize = priv->dma_buf_sz; in sxgbe_rx_refill()
1442 u8 qnum = priv->cur_rx_qnum; in sxgbe_rx_refill()
1444 for (; priv->rxq[qnum]->cur_rx - priv->rxq[qnum]->dirty_rx > 0; in sxgbe_rx_refill()
1445 priv->rxq[qnum]->dirty_rx++) { in sxgbe_rx_refill()
1446 unsigned int entry = priv->rxq[qnum]->dirty_rx % rxsize; in sxgbe_rx_refill()
1449 p = priv->rxq[qnum]->dma_rx + entry; in sxgbe_rx_refill()
1451 if (likely(priv->rxq[qnum]->rx_skbuff[entry] == NULL)) { in sxgbe_rx_refill()
1454 skb = netdev_alloc_skb_ip_align(priv->dev, bfsize); in sxgbe_rx_refill()
1459 priv->rxq[qnum]->rx_skbuff[entry] = skb; in sxgbe_rx_refill()
1460 priv->rxq[qnum]->rx_skbuff_dma[entry] = in sxgbe_rx_refill()
1461 dma_map_single(priv->device, skb->data, bfsize, in sxgbe_rx_refill()
1464 p->rdes23.rx_rd_des23.buf2_addr = in sxgbe_rx_refill()
1465 priv->rxq[qnum]->rx_skbuff_dma[entry]; in sxgbe_rx_refill()
1470 priv->hw->desc->set_rx_owner(p); in sxgbe_rx_refill()
1471 priv->hw->desc->set_rx_int_on_com(p); in sxgbe_rx_refill()
1486 u8 qnum = priv->cur_rx_qnum; in sxgbe_rx()
1487 unsigned int rxsize = priv->dma_rx_size; in sxgbe_rx()
1488 unsigned int entry = priv->rxq[qnum]->cur_rx; in sxgbe_rx()
1499 p = priv->rxq[qnum]->dma_rx + entry; in sxgbe_rx()
1501 if (priv->hw->desc->get_rx_owner(p)) in sxgbe_rx()
1506 next_entry = (++priv->rxq[qnum]->cur_rx) % rxsize; in sxgbe_rx()
1507 prefetch(priv->rxq[qnum]->dma_rx + next_entry); in sxgbe_rx()
1513 status = priv->hw->desc->rx_wbstatus(p, &priv->xstats, in sxgbe_rx()
1519 if (unlikely(!priv->rxcsum_insertion)) in sxgbe_rx()
1522 skb = priv->rxq[qnum]->rx_skbuff[entry]; in sxgbe_rx()
1525 netdev_err(priv->dev, "rx descriptor is not consistent\n"); in sxgbe_rx()
1527 prefetch(skb->data - NET_IP_ALIGN); in sxgbe_rx()
1528 priv->rxq[qnum]->rx_skbuff[entry] = NULL; in sxgbe_rx()
1530 frame_len = priv->hw->desc->get_rx_frame_len(p); in sxgbe_rx()
1534 skb->ip_summed = checksum; in sxgbe_rx()
1538 napi_gro_receive(&priv->napi, skb); in sxgbe_rx()
1549 * sxgbe_poll - sxgbe poll method (NAPI)
1554 * To look at the incoming frames and clear the tx resources.
1561 u8 qnum = priv->cur_rx_qnum; in sxgbe_poll()
1563 priv->xstats.napi_poll++; in sxgbe_poll()
1564 /* first, clean the tx queues */ in sxgbe_poll()
1570 priv->hw->dma->enable_dma_irq(priv->ioaddr, qnum); in sxgbe_poll()
1593 * sxgbe_common_interrupt - main ISR
1606 status = priv->hw->mac->host_irq_status(priv->ioaddr, &priv->xstats); in sxgbe_common_interrupt()
1607 /* For LPI we need to save the tx status */ in sxgbe_common_interrupt()
1609 priv->xstats.tx_lpi_entry_n++; in sxgbe_common_interrupt()
1610 priv->tx_path_in_lpi_mode = true; in sxgbe_common_interrupt()
1613 priv->xstats.tx_lpi_exit_n++; in sxgbe_common_interrupt()
1614 priv->tx_path_in_lpi_mode = false; in sxgbe_common_interrupt()
1617 priv->xstats.rx_lpi_entry_n++; in sxgbe_common_interrupt()
1619 priv->xstats.rx_lpi_exit_n++; in sxgbe_common_interrupt()
1625 * sxgbe_tx_interrupt - TX DMA ISR
1628 * Description: this is the tx dma interrupt service routine.
1634 struct sxgbe_priv_data *priv = txq->priv_ptr; in sxgbe_tx_interrupt()
1637 status = priv->hw->dma->tx_dma_int_status(priv->ioaddr, txq->queue_no, in sxgbe_tx_interrupt()
1638 &priv->xstats); in sxgbe_tx_interrupt()
1641 napi_schedule(&priv->napi); in sxgbe_tx_interrupt()
1645 sxgbe_restart_tx_queue(priv, txq->queue_no); in sxgbe_tx_interrupt()
1649 (priv->tx_tc != SXGBE_MTL_SFMODE) && in sxgbe_tx_interrupt()
1650 (priv->tx_tc < 512))) { in sxgbe_tx_interrupt()
1651 /* step of TX TC is 32 till 128, otherwise 64 */ in sxgbe_tx_interrupt()
1652 priv->tx_tc += (priv->tx_tc < 128) ? 32 : 64; in sxgbe_tx_interrupt()
1653 priv->hw->mtl->set_tx_mtl_mode(priv->ioaddr, in sxgbe_tx_interrupt()
1654 txq->queue_no, priv->tx_tc); in sxgbe_tx_interrupt()
1655 priv->xstats.tx_threshold = priv->tx_tc; in sxgbe_tx_interrupt()
1662 * sxgbe_rx_interrupt - RX DMA ISR
1671 struct sxgbe_priv_data *priv = rxq->priv_ptr; in sxgbe_rx_interrupt()
1674 status = priv->hw->dma->rx_dma_int_status(priv->ioaddr, rxq->queue_no, in sxgbe_rx_interrupt()
1675 &priv->xstats); in sxgbe_rx_interrupt()
1677 if (likely((status & handle_rx) && (napi_schedule_prep(&priv->napi)))) { in sxgbe_rx_interrupt()
1678 priv->hw->dma->disable_dma_irq(priv->ioaddr, rxq->queue_no); in sxgbe_rx_interrupt()
1679 __napi_schedule(&priv->napi); in sxgbe_rx_interrupt()
1684 (priv->rx_tc != SXGBE_MTL_SFMODE) && in sxgbe_rx_interrupt()
1685 (priv->rx_tc < 128))) { in sxgbe_rx_interrupt()
1687 priv->rx_tc += 32; in sxgbe_rx_interrupt()
1688 priv->hw->mtl->set_rx_mtl_mode(priv->ioaddr, in sxgbe_rx_interrupt()
1689 rxq->queue_no, priv->rx_tc); in sxgbe_rx_interrupt()
1690 priv->xstats.rx_threshold = priv->rx_tc; in sxgbe_rx_interrupt()
1706 /* sxgbe_get_stats64 - entry point to see statistical information of device
1718 void __iomem *ioaddr = priv->ioaddr; in sxgbe_get_stats64()
1721 spin_lock(&priv->stats_lock); in sxgbe_get_stats64()
1727 stats->rx_bytes = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1731 stats->rx_packets = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1735 stats->multicast = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1739 stats->rx_crc_errors = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1743 stats->rx_length_errors = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1747 stats->rx_missed_errors = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1751 stats->tx_bytes = sxgbe_get_stat64(ioaddr, in sxgbe_get_stats64()
1758 stats->tx_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXFRAMELO_GCNT_REG, in sxgbe_get_stats64()
1760 stats->tx_errors = count - stats->tx_errors; in sxgbe_get_stats64()
1761 stats->tx_packets = count; in sxgbe_get_stats64()
1762 stats->tx_fifo_errors = sxgbe_get_stat64(ioaddr, SXGBE_MMC_TXUFLWLO_GBCNT_REG, in sxgbe_get_stats64()
1765 spin_unlock(&priv->stats_lock); in sxgbe_get_stats64()
1768 /* sxgbe_set_features - entry point to set offload features of the device.
1781 netdev_features_t changed = dev->features ^ features; in sxgbe_set_features()
1785 priv->hw->mac->enable_rx_csum(priv->ioaddr); in sxgbe_set_features()
1786 priv->rxcsum_insertion = true; in sxgbe_set_features()
1788 priv->hw->mac->disable_rx_csum(priv->ioaddr); in sxgbe_set_features()
1789 priv->rxcsum_insertion = false; in sxgbe_set_features()
1796 /* sxgbe_change_mtu - entry point to change MTU size for the device.
1803 * 0 on success and an appropriate (-)ve integer as defined in errno.h
1808 dev->mtu = new_mtu; in sxgbe_change_mtu()
1837 * sxgbe_set_rx_mode - entry point for setting different receive mode of
1850 void __iomem *ioaddr = (void __iomem *)priv->ioaddr; in sxgbe_set_rx_mode()
1859 if (dev->flags & IFF_PROMISC) { in sxgbe_set_rx_mode()
1863 (dev->flags & IFF_ALLMULTI)) { in sxgbe_set_rx_mode()
1877 int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26; in sxgbe_set_rx_mode()
1897 sxgbe_set_umac_addr(ioaddr, ha->addr, reg); in sxgbe_set_rx_mode()
1915 * sxgbe_poll_controller - entry point for polling receive by device
1927 disable_irq(priv->irq); in sxgbe_poll_controller()
1928 sxgbe_rx_interrupt(priv->irq, dev); in sxgbe_poll_controller()
1929 enable_irq(priv->irq); in sxgbe_poll_controller()
1933 /* sxgbe_ioctl - Entry point for the Ioctl
1943 int ret = -EOPNOTSUPP; in sxgbe_ioctl()
1946 return -EINVAL; in sxgbe_ioctl()
1980 ops_ptr->mac = sxgbe_get_core_ops(); in sxgbe_get_ops()
1981 ops_ptr->desc = sxgbe_get_desc_ops(); in sxgbe_get_ops()
1982 ops_ptr->dma = sxgbe_get_dma_ops(); in sxgbe_get_ops()
1983 ops_ptr->mtl = sxgbe_get_mtl_ops(); in sxgbe_get_ops()
1986 ops_ptr->mii.addr = SXGBE_MDIO_SCMD_ADD_REG; in sxgbe_get_ops()
1987 ops_ptr->mii.data = SXGBE_MDIO_SCMD_DATA_REG; in sxgbe_get_ops()
1993 ops_ptr->link.port = 0; in sxgbe_get_ops()
1994 ops_ptr->link.duplex = 0; in sxgbe_get_ops()
1995 ops_ptr->link.speed = SXGBE_SPEED_10G; in sxgbe_get_ops()
1999 * sxgbe_hw_init - Init the GMAC device
2008 priv->hw = kmalloc(sizeof(*priv->hw), GFP_KERNEL); in sxgbe_hw_init()
2009 if(!priv->hw) in sxgbe_hw_init()
2010 return -ENOMEM; in sxgbe_hw_init()
2013 sxgbe_get_ops(priv->hw); in sxgbe_hw_init()
2016 ctrl_ids = priv->hw->mac->get_controller_version(priv->ioaddr); in sxgbe_hw_init()
2017 priv->hw->ctrl_uid = (ctrl_ids & 0x00ff0000) >> 16; in sxgbe_hw_init()
2018 priv->hw->ctrl_id = (ctrl_ids & 0x000000ff); in sxgbe_hw_init()
2020 priv->hw->ctrl_uid, priv->hw->ctrl_id); in sxgbe_hw_init()
2026 if (priv->hw_cap.tx_csum_offload) in sxgbe_hw_init()
2027 pr_info("TX Checksum offload supported\n"); in sxgbe_hw_init()
2029 if (priv->hw_cap.rx_csum_offload) in sxgbe_hw_init()
2040 while (retry_count--) { in sxgbe_sw_reset()
2048 return -EBUSY; in sxgbe_sw_reset()
2078 priv->device = device; in sxgbe_drv_probe()
2079 priv->dev = ndev; in sxgbe_drv_probe()
2082 priv->plat = plat_dat; in sxgbe_drv_probe()
2083 priv->ioaddr = addr; in sxgbe_drv_probe()
2085 ret = sxgbe_sw_reset(priv->ioaddr); in sxgbe_drv_probe()
2106 ndev->netdev_ops = &sxgbe_netdev_ops; in sxgbe_drv_probe()
2108 ndev->hw_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | in sxgbe_drv_probe()
2111 ndev->features |= ndev->hw_features | NETIF_F_HIGHDMA; in sxgbe_drv_probe()
2112 ndev->watchdog_timeo = msecs_to_jiffies(TX_TIMEO); in sxgbe_drv_probe()
2115 ndev->priv_flags |= IFF_UNICAST_FLT; in sxgbe_drv_probe()
2117 /* MTU range: 68 - 9000 */ in sxgbe_drv_probe()
2118 ndev->min_mtu = MIN_MTU; in sxgbe_drv_probe()
2119 ndev->max_mtu = MAX_MTU; in sxgbe_drv_probe()
2121 priv->msg_enable = netif_msg_init(debug, default_msg_level); in sxgbe_drv_probe()
2124 if (priv->hw_cap.tcpseg_offload) { in sxgbe_drv_probe()
2126 priv->hw->dma->enable_tso(priv->ioaddr, queue_num); in sxgbe_drv_probe()
2131 if (priv->hw_cap.rx_csum_offload) { in sxgbe_drv_probe()
2132 priv->hw->mac->enable_rx_csum(priv->ioaddr); in sxgbe_drv_probe()
2133 priv->rxcsum_insertion = true; in sxgbe_drv_probe()
2137 priv->rx_pause = 1; in sxgbe_drv_probe()
2138 priv->tx_pause = 1; in sxgbe_drv_probe()
2141 if (!priv->plat->riwt_off) { in sxgbe_drv_probe()
2142 priv->use_riwt = 1; in sxgbe_drv_probe()
2146 netif_napi_add(ndev, &priv->napi, sxgbe_poll); in sxgbe_drv_probe()
2148 spin_lock_init(&priv->stats_lock); in sxgbe_drv_probe()
2150 priv->sxgbe_clk = clk_get(priv->device, SXGBE_RESOURCE_NAME); in sxgbe_drv_probe()
2151 if (IS_ERR(priv->sxgbe_clk)) { in sxgbe_drv_probe()
2159 * changed at run-time and it is fixed. Viceversa the driver'll try to in sxgbe_drv_probe()
2163 if (!priv->plat->clk_csr) in sxgbe_drv_probe()
2166 priv->clk_csr = priv->plat->clk_csr; in sxgbe_drv_probe()
2172 __func__, priv->plat->bus_id); in sxgbe_drv_probe()
2189 clk_put(priv->sxgbe_clk); in sxgbe_drv_probe()
2191 netif_napi_del(&priv->napi); in sxgbe_drv_probe()
2193 kfree(priv->hw); in sxgbe_drv_probe()
2203 * Description: this function resets the TX/RX processes, disables the MAC RX/TX
2214 priv->hw->mac->disable_rxqueue(priv->ioaddr, queue_num); in sxgbe_drv_remove()
2217 priv->hw->dma->stop_rx(priv->ioaddr, SXGBE_RX_QUEUES); in sxgbe_drv_remove()
2218 priv->hw->dma->stop_tx(priv->ioaddr, SXGBE_TX_QUEUES); in sxgbe_drv_remove()
2220 priv->hw->mac->enable_tx(priv->ioaddr, false); in sxgbe_drv_remove()
2221 priv->hw->mac->enable_rx(priv->ioaddr, false); in sxgbe_drv_remove()
2227 clk_put(priv->sxgbe_clk); in sxgbe_drv_remove()
2229 netif_napi_del(&priv->napi); in sxgbe_drv_remove()
2231 kfree(priv->hw); in sxgbe_drv_remove()
2249 return -ENOSYS; in sxgbe_freeze()
2254 return -ENOSYS; in sxgbe_restore()
2296 pr_err("%s: ERROR broken module parameter conversion\n", __func__); in sxgbe_cmdline_opt()
2307 MODULE_PARM_DESC(debug, "Message Level (-1: default, 0: no output, 16: all)");
2308 MODULE_PARM_DESC(eee_timer, "EEE-LPI Default LS timer value");