Lines Matching +full:axi +full:- +full:can +full:- +full:1

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Xilinx Axi Ethernet device driver
6 * Copyright (c) 2005-2008 DLA Systems, David H. Lynch Jr. <dhlii@dlasys.net>
7 * Copyright (c) 2008-2009 Secret Lab Technologies Ltd.
8 * Copyright (c) 2010 - 2011 Michal Simek <monstr@monstr.eu>
9 * Copyright (c) 2010 - 2011 PetaLogix
10 * Copyright (c) 2019 - 2022 Calian Advanced Technologies
11 * Copyright (c) 2010 - 2012 Xilinx, Inc. All rights reserved.
13 * This is a driver for the Xilinx Axi Ethernet which is used in the Virtex6
17 * - Add Axi Fifo support.
18 * - Factor out Axi DMA code into separate driver.
19 * - Test and fix basic multicast filtering.
20 * - Add support for extended multicast filtering.
21 * - Test basic VLAN support.
22 * - Add support for extended VLAN support.
42 #include <linux/dma-mapping.h>
52 #define TX_BD_NUM_MIN (MAX_SKB_FRAGS + 1)
61 #define DRIVER_DESCRIPTION "Xilinx Axi Ethernet driver"
70 { .compatible = "xlnx,axi-ethernet-1.00.a", },
71 { .compatible = "xlnx,axi-ethernet-1.01.a", },
72 { .compatible = "xlnx,axi-ethernet-2.01.a", },
78 /* Option table for setting up Axi Ethernet hardware options */
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)]; in axienet_get_rx_desc()
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)]; in axienet_get_tx_desc()
144 * axienet_dma_in32 - Memory mapped Axi DMA register read
146 * @reg: Address offset from the base address of the Axi DMA core
148 * Return: The contents of the Axi DMA register
150 * This function returns the contents of the corresponding Axi DMA register.
154 return ioread32(lp->dma_regs + reg); in axienet_dma_in32()
160 desc->phys = lower_32_bits(addr); in desc_set_phys_addr()
161 if (lp->features & XAE_FEATURE_DMA_64BIT) in desc_set_phys_addr()
162 desc->phys_msb = upper_32_bits(addr); in desc_set_phys_addr()
168 dma_addr_t ret = desc->phys; in desc_get_phys_addr()
170 if (lp->features & XAE_FEATURE_DMA_64BIT) in desc_get_phys_addr()
171 ret |= ((dma_addr_t)desc->phys_msb << 16) << 16; in desc_get_phys_addr()
177 * axienet_dma_bd_release - Release buffer descriptor rings
181 * axienet_dma_bd_init. axienet_dma_bd_release is called when Axi Ethernet
190 dma_free_coherent(lp->dev, in axienet_dma_bd_release()
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, in axienet_dma_bd_release()
192 lp->tx_bd_v, in axienet_dma_bd_release()
193 lp->tx_bd_p); in axienet_dma_bd_release()
195 if (!lp->rx_bd_v) in axienet_dma_bd_release()
198 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_bd_release()
204 if (!lp->rx_bd_v[i].skb) in axienet_dma_bd_release()
207 dev_kfree_skb(lp->rx_bd_v[i].skb); in axienet_dma_bd_release()
209 /* For each descriptor, we programmed cntrl with the (non-zero) in axienet_dma_bd_release()
211 * So a non-zero value in there means we need to unmap it. in axienet_dma_bd_release()
213 if (lp->rx_bd_v[i].cntrl) { in axienet_dma_bd_release()
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]); in axienet_dma_bd_release()
215 dma_unmap_single(lp->dev, phys, in axienet_dma_bd_release()
216 lp->max_frm_size, DMA_FROM_DEVICE); in axienet_dma_bd_release()
220 dma_free_coherent(lp->dev, in axienet_dma_bd_release()
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, in axienet_dma_bd_release()
222 lp->rx_bd_v, in axienet_dma_bd_release()
223 lp->rx_bd_p); in axienet_dma_bd_release()
228 if (lp->axi_clk) in axienet_dma_rate()
229 return clk_get_rate(lp->axi_clk); in axienet_dma_rate()
234 * axienet_calc_cr() - Calculate control register value
251 if (count > 1) { in axienet_calc_cr()
255 /* 1 Timeout Interval = 125 * (clock period of SG clock) */ in axienet_calc_cr()
268 * axienet_coalesce_params() - Extract coalesce parameters from the CR
285 * axienet_dma_start - Set up DMA registers and start DMA operation
290 spin_lock_irq(&lp->rx_cr_lock); in axienet_dma_start()
293 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
294 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_dma_start()
296 /* Populate the tail pointer and bring the Rx Axi DMA engine out of in axienet_dma_start()
299 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p); in axienet_dma_start()
300 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
301 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_dma_start()
302 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p + in axienet_dma_start()
303 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1))); in axienet_dma_start()
304 lp->rx_dma_started = true; in axienet_dma_start()
306 spin_unlock_irq(&lp->rx_cr_lock); in axienet_dma_start()
307 spin_lock_irq(&lp->tx_cr_lock); in axienet_dma_start()
310 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
311 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_dma_start()
313 /* Write to the RS (Run-stop) bit in the Tx channel control register. in axienet_dma_start()
317 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p); in axienet_dma_start()
318 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK; in axienet_dma_start()
319 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_dma_start()
320 lp->tx_dma_started = true; in axienet_dma_start()
322 spin_unlock_irq(&lp->tx_cr_lock); in axienet_dma_start()
326 * axienet_dma_bd_init - Setup buffer descriptor rings for Axi DMA
329 * Return: 0, on success -ENOMEM, on failure
333 * and is called when Axi Ethernet driver reset is called.
342 lp->tx_bd_ci = 0; in axienet_dma_bd_init()
343 lp->tx_bd_tail = 0; in axienet_dma_bd_init()
344 lp->rx_bd_ci = 0; in axienet_dma_bd_init()
347 lp->tx_bd_v = dma_alloc_coherent(lp->dev, in axienet_dma_bd_init()
348 sizeof(*lp->tx_bd_v) * lp->tx_bd_num, in axienet_dma_bd_init()
349 &lp->tx_bd_p, GFP_KERNEL); in axienet_dma_bd_init()
350 if (!lp->tx_bd_v) in axienet_dma_bd_init()
351 return -ENOMEM; in axienet_dma_bd_init()
353 lp->rx_bd_v = dma_alloc_coherent(lp->dev, in axienet_dma_bd_init()
354 sizeof(*lp->rx_bd_v) * lp->rx_bd_num, in axienet_dma_bd_init()
355 &lp->rx_bd_p, GFP_KERNEL); in axienet_dma_bd_init()
356 if (!lp->rx_bd_v) in axienet_dma_bd_init()
359 for (i = 0; i < lp->tx_bd_num; i++) { in axienet_dma_bd_init()
360 dma_addr_t addr = lp->tx_bd_p + in axienet_dma_bd_init()
361 sizeof(*lp->tx_bd_v) * in axienet_dma_bd_init()
362 ((i + 1) % lp->tx_bd_num); in axienet_dma_bd_init()
364 lp->tx_bd_v[i].next = lower_32_bits(addr); in axienet_dma_bd_init()
365 if (lp->features & XAE_FEATURE_DMA_64BIT) in axienet_dma_bd_init()
366 lp->tx_bd_v[i].next_msb = upper_32_bits(addr); in axienet_dma_bd_init()
369 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_bd_init()
372 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * in axienet_dma_bd_init()
373 ((i + 1) % lp->rx_bd_num); in axienet_dma_bd_init()
374 lp->rx_bd_v[i].next = lower_32_bits(addr); in axienet_dma_bd_init()
375 if (lp->features & XAE_FEATURE_DMA_64BIT) in axienet_dma_bd_init()
376 lp->rx_bd_v[i].next_msb = upper_32_bits(addr); in axienet_dma_bd_init()
378 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); in axienet_dma_bd_init()
382 lp->rx_bd_v[i].skb = skb; in axienet_dma_bd_init()
383 addr = dma_map_single(lp->dev, skb->data, in axienet_dma_bd_init()
384 lp->max_frm_size, DMA_FROM_DEVICE); in axienet_dma_bd_init()
385 if (dma_mapping_error(lp->dev, addr)) { in axienet_dma_bd_init()
389 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]); in axienet_dma_bd_init()
391 lp->rx_bd_v[i].cntrl = lp->max_frm_size; in axienet_dma_bd_init()
399 return -ENOMEM; in axienet_dma_bd_init()
403 * axienet_set_mac_address - Write the MAC address
407 * This function is called to initialize the MAC address of the Axi Ethernet
417 if (!is_valid_ether_addr(ndev->dev_addr)) in axienet_set_mac_address()
422 (ndev->dev_addr[0]) | in axienet_set_mac_address()
423 (ndev->dev_addr[1] << 8) | in axienet_set_mac_address()
424 (ndev->dev_addr[2] << 16) | in axienet_set_mac_address()
425 (ndev->dev_addr[3] << 24)); in axienet_set_mac_address()
429 (ndev->dev_addr[4] | in axienet_set_mac_address()
430 (ndev->dev_addr[5] << 8)))); in axienet_set_mac_address()
434 * netdev_set_mac_address - Write the MAC address (from outside the driver)
440 * This function is called to initialize the MAC address of the Axi Ethernet
448 axienet_set_mac_address(ndev, addr->sa_data); in netdev_set_mac_address()
453 * axienet_set_multicast_list - Prepare the multicast table
457 * initialization. The Axi Ethernet basic multicast support has a four-entry
471 if (ndev->flags & IFF_PROMISC) in axienet_set_multicast_list()
477 if (ndev->flags & IFF_ALLMULTI || in axienet_set_multicast_list()
481 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */ in axienet_set_multicast_list()
483 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */ in axienet_set_multicast_list()
485 axienet_iow(lp, XAE_FFE_OFFSET, 1); in axienet_set_multicast_list()
486 i = 1; in axienet_set_multicast_list()
494 af0reg = (ha->addr[0]); in axienet_set_multicast_list()
495 af0reg |= (ha->addr[1] << 8); in axienet_set_multicast_list()
496 af0reg |= (ha->addr[2] << 16); in axienet_set_multicast_list()
497 af0reg |= (ha->addr[3] << 24); in axienet_set_multicast_list()
499 af1reg = (ha->addr[4]); in axienet_set_multicast_list()
500 af1reg |= (ha->addr[5] << 8); in axienet_set_multicast_list()
510 axienet_iow(lp, XAE_FFE_OFFSET, 1); in axienet_set_multicast_list()
524 * axienet_setoptions - Set an Axi Ethernet option
528 * The Axi Ethernet core has multiple features which can be selectively turned
531 * these options in the Axi Ethernet hardware. This is done through
540 while (tp->opt) { in axienet_setoptions()
541 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or)); in axienet_setoptions()
542 if (options & tp->opt) in axienet_setoptions()
543 reg |= tp->m_or; in axienet_setoptions()
544 axienet_iow(lp, tp->reg, reg); in axienet_setoptions()
548 lp->options |= options; in axienet_setoptions()
555 if (lp->reset_in_progress) in axienet_stat()
556 return lp->hw_stat_base[stat]; in axienet_stat()
559 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]); in axienet_stat()
566 write_seqcount_begin(&lp->hw_stats_seqcount); in axienet_stats_update()
567 lp->reset_in_progress = reset; in axienet_stats_update()
571 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat]; in axienet_stats_update()
572 lp->hw_last_counter[stat] = counter; in axienet_stats_update()
574 write_seqcount_end(&lp->hw_stats_seqcount); in axienet_stats_update()
582 mutex_lock(&lp->stats_lock); in axienet_refresh_stats()
584 mutex_unlock(&lp->stats_lock); in axienet_refresh_stats()
587 schedule_delayed_work(&lp->stats_work, 13 * HZ); in axienet_refresh_stats()
596 mutex_lock(&lp->stats_lock); in __axienet_device_reset()
597 if (lp->features & XAE_FEATURE_STATS) in __axienet_device_reset()
600 /* Reset Axi DMA. This would reset Axi Ethernet core as well. The reset in __axienet_device_reset()
601 * process of Axi DMA takes a while to complete as all pending in __axienet_device_reset()
613 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__); in __axienet_device_reset()
623 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__); in __axienet_device_reset()
628 if (lp->features & XAE_FEATURE_STATS) { in __axienet_device_reset()
631 write_seqcount_begin(&lp->hw_stats_seqcount); in __axienet_device_reset()
632 lp->reset_in_progress = false; in __axienet_device_reset()
637 lp->hw_stat_base[stat] += in __axienet_device_reset()
638 lp->hw_last_counter[stat] - counter; in __axienet_device_reset()
639 lp->hw_last_counter[stat] = counter; in __axienet_device_reset()
641 write_seqcount_end(&lp->hw_stats_seqcount); in __axienet_device_reset()
645 mutex_unlock(&lp->stats_lock); in __axienet_device_reset()
650 * axienet_dma_stop - Stop DMA operation
658 spin_lock_irq(&lp->rx_cr_lock); in axienet_dma_stop()
660 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); in axienet_dma_stop()
662 lp->rx_dma_started = false; in axienet_dma_stop()
664 spin_unlock_irq(&lp->rx_cr_lock); in axienet_dma_stop()
665 synchronize_irq(lp->rx_irq); in axienet_dma_stop()
667 spin_lock_irq(&lp->tx_cr_lock); in axienet_dma_stop()
669 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK); in axienet_dma_stop()
671 lp->tx_dma_started = false; in axienet_dma_stop()
673 spin_unlock_irq(&lp->tx_cr_lock); in axienet_dma_stop()
674 synchronize_irq(lp->tx_irq); in axienet_dma_stop()
696 * axienet_device_reset - Reset and initialize the Axi Ethernet hardware.
699 * This function is called to reset and initialize the Axi Ethernet core. This
700 * is typically called during initialization. It does a reset of the Axi DMA
701 * Rx/Tx channels and initializes the Axi DMA BDs. Since Axi DMA reset lines
702 * are connected to Axi Ethernet reset lines, this in turn resets the Axi
703 * Ethernet core. No separate hardware reset is done for the Axi Ethernet
713 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE; in axienet_device_reset()
714 lp->options |= XAE_OPTION_VLAN; in axienet_device_reset()
715 lp->options &= (~XAE_OPTION_JUMBO); in axienet_device_reset()
717 if (ndev->mtu > XAE_MTU && ndev->mtu <= XAE_JUMBO_MTU) { in axienet_device_reset()
718 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN + in axienet_device_reset()
721 if (lp->max_frm_size <= lp->rxmem) in axienet_device_reset()
722 lp->options |= XAE_OPTION_JUMBO; in axienet_device_reset()
725 if (!lp->use_dmaengine) { in axienet_device_reset()
745 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? in axienet_device_reset()
753 axienet_setoptions(ndev, lp->options & in axienet_device_reset()
757 axienet_setoptions(ndev, lp->options); in axienet_device_reset()
765 * axienet_free_tx_chain - Clean up a series of linked TX descriptors.
771 * in all cleaned-up descriptors. Ignored if NULL.
787 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num]; in axienet_free_tx_chain()
788 status = cur_p->status; in axienet_free_tx_chain()
799 dma_unmap_single(lp->dev, phys, in axienet_free_tx_chain()
800 (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), in axienet_free_tx_chain()
803 if (cur_p->skb && (status & XAXIDMA_BD_STS_COMPLETE_MASK)) { in axienet_free_tx_chain()
804 napi_consume_skb(cur_p->skb, budget); in axienet_free_tx_chain()
808 cur_p->app0 = 0; in axienet_free_tx_chain()
809 cur_p->app1 = 0; in axienet_free_tx_chain()
810 cur_p->app2 = 0; in axienet_free_tx_chain()
811 cur_p->app4 = 0; in axienet_free_tx_chain()
812 cur_p->skb = NULL; in axienet_free_tx_chain()
815 cur_p->cntrl = 0; in axienet_free_tx_chain()
816 cur_p->status = 0; in axienet_free_tx_chain()
823 lp->tx_bd_ci += i; in axienet_free_tx_chain()
824 if (lp->tx_bd_ci >= lp->tx_bd_num) in axienet_free_tx_chain()
825 lp->tx_bd_ci %= lp->tx_bd_num; in axienet_free_tx_chain()
832 * axienet_check_tx_bd_space - Checks if a BD/group of BDs are currently busy
840 * This function returns 0 if a BD or group of BDs can be allocated for
851 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) % in axienet_check_tx_bd_space()
852 lp->tx_bd_num]; in axienet_check_tx_bd_space()
853 if (cur_p->cntrl) in axienet_check_tx_bd_space()
859 * axienet_dma_tx_cb - DMA engine callback for TX channel.
872 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++); in axienet_dma_tx_cb()
873 len = skbuf_dma->skb->len; in axienet_dma_tx_cb()
874 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb); in axienet_dma_tx_cb()
875 u64_stats_update_begin(&lp->tx_stat_sync); in axienet_dma_tx_cb()
876 u64_stats_add(&lp->tx_bytes, len); in axienet_dma_tx_cb()
877 u64_stats_add(&lp->tx_packets, 1); in axienet_dma_tx_cb()
878 u64_stats_update_end(&lp->tx_stat_sync); in axienet_dma_tx_cb()
879 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE); in axienet_dma_tx_cb()
880 dev_consume_skb_any(skbuf_dma->skb); in axienet_dma_tx_cb()
881 netif_txq_completed_wake(txq, 1, len, in axienet_dma_tx_cb()
882 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), in axienet_dma_tx_cb()
887 * axienet_start_xmit_dmaengine - Starts the transmission.
899 * it populates AXI Stream Control fields with appropriate values.
915 dma_dev = lp->tx_chan->device; in axienet_start_xmit_dmaengine()
916 sg_len = skb_shinfo(skb)->nr_frags + 1; in axienet_start_xmit_dmaengine()
917 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= sg_len) { in axienet_start_xmit_dmaengine()
924 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head); in axienet_start_xmit_dmaengine()
928 lp->tx_ring_head++; in axienet_start_xmit_dmaengine()
929 sg_init_table(skbuf_dma->sgl, sg_len); in axienet_start_xmit_dmaengine()
930 ret = skb_to_sgvec(skb, skbuf_dma->sgl, 0, skb->len); in axienet_start_xmit_dmaengine()
934 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); in axienet_start_xmit_dmaengine()
939 if (skb->ip_summed == CHECKSUM_PARTIAL) { in axienet_start_xmit_dmaengine()
940 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { in axienet_start_xmit_dmaengine()
943 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { in axienet_start_xmit_dmaengine()
945 csum_index_off = csum_start_off + skb->csum_offset; in axienet_start_xmit_dmaengine()
947 app_metadata[0] |= 1; in axienet_start_xmit_dmaengine()
948 app_metadata[1] = (csum_start_off << 16) | csum_index_off; in axienet_start_xmit_dmaengine()
950 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in axienet_start_xmit_dmaengine()
954 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl, in axienet_start_xmit_dmaengine()
960 skbuf_dma->skb = skb; in axienet_start_xmit_dmaengine()
961 skbuf_dma->sg_len = sg_len; in axienet_start_xmit_dmaengine()
962 dma_tx_desc->callback_param = lp; in axienet_start_xmit_dmaengine()
963 dma_tx_desc->callback_result = axienet_dma_tx_cb; in axienet_start_xmit_dmaengine()
964 txq = skb_get_tx_queue(lp->ndev, skb); in axienet_start_xmit_dmaengine()
965 netdev_tx_sent_queue(txq, skb->len); in axienet_start_xmit_dmaengine()
966 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX), in axienet_start_xmit_dmaengine()
967 MAX_SKB_FRAGS + 1, 2 * MAX_SKB_FRAGS); in axienet_start_xmit_dmaengine()
970 dma_async_issue_pending(lp->tx_chan); in axienet_start_xmit_dmaengine()
974 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE); in axienet_start_xmit_dmaengine()
981 * axienet_tx_poll - Invoked once a transmit is completed by the
982 * Axi DMA Tx channel.
990 * unmaps the corresponding buffer so that CPU can regain ownership of the
997 struct net_device *ndev = lp->ndev; in axienet_tx_poll()
1001 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false, in axienet_tx_poll()
1006 u64_stats_update_begin(&lp->tx_stat_sync); in axienet_tx_poll()
1007 u64_stats_add(&lp->tx_packets, packets); in axienet_tx_poll()
1008 u64_stats_add(&lp->tx_bytes, size); in axienet_tx_poll()
1009 u64_stats_update_end(&lp->tx_stat_sync); in axienet_tx_poll()
1014 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) in axienet_tx_poll()
1019 /* Re-enable TX completion interrupts. This should in axienet_tx_poll()
1023 spin_lock_irq(&lp->tx_cr_lock); in axienet_tx_poll()
1024 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr); in axienet_tx_poll()
1025 spin_unlock_irq(&lp->tx_cr_lock); in axienet_tx_poll()
1031 * axienet_start_xmit - Starts the transmission.
1041 * it populates AXI Stream Control fields with appropriate values.
1056 orig_tail_ptr = lp->tx_bd_tail; in axienet_start_xmit()
1059 num_frag = skb_shinfo(skb)->nr_frags; in axienet_start_xmit()
1060 cur_p = &lp->tx_bd_v[orig_tail_ptr]; in axienet_start_xmit()
1062 if (axienet_check_tx_bd_space(lp, num_frag + 1)) { in axienet_start_xmit()
1073 if (skb->ip_summed == CHECKSUM_PARTIAL) { in axienet_start_xmit()
1074 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) { in axienet_start_xmit()
1076 cur_p->app0 |= 2; in axienet_start_xmit()
1077 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) { in axienet_start_xmit()
1079 csum_index_off = csum_start_off + skb->csum_offset; in axienet_start_xmit()
1081 cur_p->app0 |= 1; in axienet_start_xmit()
1082 cur_p->app1 = (csum_start_off << 16) | csum_index_off; in axienet_start_xmit()
1084 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { in axienet_start_xmit()
1085 cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ in axienet_start_xmit()
1088 phys = dma_map_single(lp->dev, skb->data, in axienet_start_xmit()
1090 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_start_xmit()
1093 ndev->stats.tx_dropped++; in axienet_start_xmit()
1098 cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; in axienet_start_xmit()
1101 if (++new_tail_ptr >= lp->tx_bd_num) in axienet_start_xmit()
1103 cur_p = &lp->tx_bd_v[new_tail_ptr]; in axienet_start_xmit()
1104 frag = &skb_shinfo(skb)->frags[ii]; in axienet_start_xmit()
1105 phys = dma_map_single(lp->dev, in axienet_start_xmit()
1109 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_start_xmit()
1112 ndev->stats.tx_dropped++; in axienet_start_xmit()
1113 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1, in axienet_start_xmit()
1119 cur_p->cntrl = skb_frag_size(frag); in axienet_start_xmit()
1122 cur_p->cntrl |= XAXIDMA_BD_CTRL_TXEOF_MASK; in axienet_start_xmit()
1123 cur_p->skb = skb; in axienet_start_xmit()
1125 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr; in axienet_start_xmit()
1126 if (++new_tail_ptr >= lp->tx_bd_num) in axienet_start_xmit()
1128 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr); in axienet_start_xmit()
1129 netdev_sent_queue(ndev, skb->len); in axienet_start_xmit()
1135 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) { in axienet_start_xmit()
1141 /* Space might have just been freed - check again */ in axienet_start_xmit()
1142 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) in axienet_start_xmit()
1150 * axienet_dma_rx_cb - DMA engine callback for RX channel.
1164 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++); in axienet_dma_rx_cb()
1165 skb = skbuf_dma->skb; in axienet_dma_rx_cb()
1166 app_metadata = dmaengine_desc_get_metadata_ptr(skbuf_dma->desc, &meta_len, in axienet_dma_rx_cb()
1168 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size, in axienet_dma_rx_cb()
1173 skb->protocol = eth_type_trans(skb, lp->ndev); in axienet_dma_rx_cb()
1174 skb->ip_summed = CHECKSUM_NONE; in axienet_dma_rx_cb()
1177 u64_stats_update_begin(&lp->rx_stat_sync); in axienet_dma_rx_cb()
1178 u64_stats_add(&lp->rx_packets, 1); in axienet_dma_rx_cb()
1179 u64_stats_add(&lp->rx_bytes, rx_len); in axienet_dma_rx_cb()
1180 u64_stats_update_end(&lp->rx_stat_sync); in axienet_dma_rx_cb()
1181 axienet_rx_submit_desc(lp->ndev); in axienet_dma_rx_cb()
1182 dma_async_issue_pending(lp->rx_chan); in axienet_dma_rx_cb()
1186 * axienet_rx_poll - Triggered by RX ISR to complete the BD processing.
1203 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; in axienet_rx_poll()
1205 while (packets < budget && (cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { in axienet_rx_poll()
1211 skb = cur_p->skb; in axienet_rx_poll()
1212 cur_p->skb = NULL; in axienet_rx_poll()
1220 length = cur_p->app4 & 0x0000FFFF; in axienet_rx_poll()
1223 dma_unmap_single(lp->dev, phys, lp->max_frm_size, in axienet_rx_poll()
1227 skb->protocol = eth_type_trans(skb, lp->ndev); in axienet_rx_poll()
1229 skb->ip_summed = CHECKSUM_NONE; in axienet_rx_poll()
1232 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) { in axienet_rx_poll()
1233 csumstatus = (cur_p->app2 & in axienet_rx_poll()
1237 skb->ip_summed = CHECKSUM_UNNECESSARY; in axienet_rx_poll()
1239 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) { in axienet_rx_poll()
1240 skb->csum = be32_to_cpu(cur_p->app3 & 0xFFFF); in axienet_rx_poll()
1241 skb->ip_summed = CHECKSUM_COMPLETE; in axienet_rx_poll()
1250 new_skb = napi_alloc_skb(napi, lp->max_frm_size); in axienet_rx_poll()
1254 phys = dma_map_single(lp->dev, new_skb->data, in axienet_rx_poll()
1255 lp->max_frm_size, in axienet_rx_poll()
1257 if (unlikely(dma_mapping_error(lp->dev, phys))) { in axienet_rx_poll()
1259 netdev_err(lp->ndev, "RX DMA mapping error\n"); in axienet_rx_poll()
1265 cur_p->cntrl = lp->max_frm_size; in axienet_rx_poll()
1266 cur_p->status = 0; in axienet_rx_poll()
1267 cur_p->skb = new_skb; in axienet_rx_poll()
1272 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; in axienet_rx_poll()
1274 if (++lp->rx_bd_ci >= lp->rx_bd_num) in axienet_rx_poll()
1275 lp->rx_bd_ci = 0; in axienet_rx_poll()
1276 cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; in axienet_rx_poll()
1279 u64_stats_update_begin(&lp->rx_stat_sync); in axienet_rx_poll()
1280 u64_stats_add(&lp->rx_packets, packets); in axienet_rx_poll()
1281 u64_stats_add(&lp->rx_bytes, size); in axienet_rx_poll()
1282 u64_stats_update_end(&lp->rx_stat_sync); in axienet_rx_poll()
1288 if (READ_ONCE(lp->rx_dim_enabled)) { in axienet_rx_poll()
1292 .pkt_ctr = u64_stats_read(&lp->rx_packets), in axienet_rx_poll()
1293 .byte_ctr = u64_stats_read(&lp->rx_bytes), in axienet_rx_poll()
1294 .event_ctr = READ_ONCE(lp->rx_irqs), in axienet_rx_poll()
1297 net_dim(&lp->rx_dim, &sample); in axienet_rx_poll()
1300 /* Re-enable RX completion interrupts. This should in axienet_rx_poll()
1304 spin_lock_irq(&lp->rx_cr_lock); in axienet_rx_poll()
1305 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr); in axienet_rx_poll()
1306 spin_unlock_irq(&lp->rx_cr_lock); in axienet_rx_poll()
1312 * axienet_tx_irq - Tx Done Isr.
1318 * This is the Axi DMA Tx done Isr. It invokes NAPI polling to complete the
1337 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb, in axienet_tx_irq()
1338 (lp->tx_bd_v[lp->tx_bd_ci]).phys); in axienet_tx_irq()
1339 schedule_work(&lp->dma_err_task); in axienet_tx_irq()
1344 if (napi_schedule_prep(&lp->napi_tx)) { in axienet_tx_irq()
1347 spin_lock(&lp->tx_cr_lock); in axienet_tx_irq()
1348 cr = lp->tx_dma_cr; in axienet_tx_irq()
1351 spin_unlock(&lp->tx_cr_lock); in axienet_tx_irq()
1352 __napi_schedule(&lp->napi_tx); in axienet_tx_irq()
1360 * axienet_rx_irq - Rx Isr.
1366 * This is the Axi DMA Rx Isr. It invokes NAPI polling to complete the RX BD
1385 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb, in axienet_rx_irq()
1386 (lp->rx_bd_v[lp->rx_bd_ci]).phys); in axienet_rx_irq()
1387 schedule_work(&lp->dma_err_task); in axienet_rx_irq()
1392 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1); in axienet_rx_irq()
1393 if (napi_schedule_prep(&lp->napi_rx)) { in axienet_rx_irq()
1396 spin_lock(&lp->rx_cr_lock); in axienet_rx_irq()
1397 cr = lp->rx_dma_cr; in axienet_rx_irq()
1400 spin_unlock(&lp->rx_cr_lock); in axienet_rx_irq()
1402 __napi_schedule(&lp->napi_rx); in axienet_rx_irq()
1410 * axienet_eth_irq - Ethernet core Isr.
1429 ndev->stats.rx_missed_errors++; in axienet_eth_irq()
1432 ndev->stats.rx_dropped++; in axienet_eth_irq()
1441 * axienet_rx_submit_desc - Submit the rx descriptors to dmaengine.
1456 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head); in axienet_rx_submit_desc()
1460 lp->rx_ring_head++; in axienet_rx_submit_desc()
1461 skb = netdev_alloc_skb(ndev, lp->max_frm_size); in axienet_rx_submit_desc()
1465 sg_init_table(skbuf_dma->sgl, 1); in axienet_rx_submit_desc()
1466 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE); in axienet_rx_submit_desc()
1467 if (unlikely(dma_mapping_error(lp->dev, addr))) { in axienet_rx_submit_desc()
1472 sg_dma_address(skbuf_dma->sgl) = addr; in axienet_rx_submit_desc()
1473 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size; in axienet_rx_submit_desc()
1474 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl, in axienet_rx_submit_desc()
1475 1, DMA_DEV_TO_MEM, in axienet_rx_submit_desc()
1480 skbuf_dma->skb = skb; in axienet_rx_submit_desc()
1481 skbuf_dma->dma_address = sg_dma_address(skbuf_dma->sgl); in axienet_rx_submit_desc()
1482 skbuf_dma->desc = dma_rx_desc; in axienet_rx_submit_desc()
1483 dma_rx_desc->callback_param = lp; in axienet_rx_submit_desc()
1484 dma_rx_desc->callback_result = axienet_dma_rx_cb; in axienet_rx_submit_desc()
1490 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE); in axienet_rx_submit_desc()
1496 * axienet_init_dmaengine - init the dmaengine code.
1500 * non-zero error value on failure
1510 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0"); in axienet_init_dmaengine()
1511 if (IS_ERR(lp->tx_chan)) { in axienet_init_dmaengine()
1512 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n"); in axienet_init_dmaengine()
1513 return PTR_ERR(lp->tx_chan); in axienet_init_dmaengine()
1516 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0"); in axienet_init_dmaengine()
1517 if (IS_ERR(lp->rx_chan)) { in axienet_init_dmaengine()
1518 ret = PTR_ERR(lp->rx_chan); in axienet_init_dmaengine()
1519 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n"); in axienet_init_dmaengine()
1523 lp->tx_ring_tail = 0; in axienet_init_dmaengine()
1524 lp->tx_ring_head = 0; in axienet_init_dmaengine()
1525 lp->rx_ring_tail = 0; in axienet_init_dmaengine()
1526 lp->rx_ring_head = 0; in axienet_init_dmaengine()
1527 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring), in axienet_init_dmaengine()
1529 if (!lp->tx_skb_ring) { in axienet_init_dmaengine()
1530 ret = -ENOMEM; in axienet_init_dmaengine()
1536 ret = -ENOMEM; in axienet_init_dmaengine()
1539 lp->tx_skb_ring[i] = skbuf_dma; in axienet_init_dmaengine()
1542 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring), in axienet_init_dmaengine()
1544 if (!lp->rx_skb_ring) { in axienet_init_dmaengine()
1545 ret = -ENOMEM; in axienet_init_dmaengine()
1551 ret = -ENOMEM; in axienet_init_dmaengine()
1554 lp->rx_skb_ring[i] = skbuf_dma; in axienet_init_dmaengine()
1559 dma_async_issue_pending(lp->rx_chan); in axienet_init_dmaengine()
1565 kfree(lp->rx_skb_ring[i]); in axienet_init_dmaengine()
1566 kfree(lp->rx_skb_ring); in axienet_init_dmaengine()
1569 kfree(lp->tx_skb_ring[i]); in axienet_init_dmaengine()
1570 kfree(lp->tx_skb_ring); in axienet_init_dmaengine()
1572 dma_release_channel(lp->rx_chan); in axienet_init_dmaengine()
1574 dma_release_channel(lp->tx_chan); in axienet_init_dmaengine()
1579 * axienet_init_legacy_dma - init the dma legacy code.
1583 * non-zero error value on failure
1594 /* Enable worker thread for Axi DMA error handling */ in axienet_init_legacy_dma()
1595 lp->stopping = false; in axienet_init_legacy_dma()
1596 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler); in axienet_init_legacy_dma()
1598 napi_enable(&lp->napi_rx); in axienet_init_legacy_dma()
1599 napi_enable(&lp->napi_tx); in axienet_init_legacy_dma()
1601 /* Enable interrupts for Axi DMA Tx */ in axienet_init_legacy_dma()
1602 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED, in axienet_init_legacy_dma()
1603 ndev->name, ndev); in axienet_init_legacy_dma()
1606 /* Enable interrupts for Axi DMA Rx */ in axienet_init_legacy_dma()
1607 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED, in axienet_init_legacy_dma()
1608 ndev->name, ndev); in axienet_init_legacy_dma()
1611 /* Enable interrupts for Axi Ethernet core (if defined) */ in axienet_init_legacy_dma()
1612 if (lp->eth_irq > 0) { in axienet_init_legacy_dma()
1613 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, in axienet_init_legacy_dma()
1614 ndev->name, ndev); in axienet_init_legacy_dma()
1622 free_irq(lp->rx_irq, ndev); in axienet_init_legacy_dma()
1624 free_irq(lp->tx_irq, ndev); in axienet_init_legacy_dma()
1626 napi_disable(&lp->napi_tx); in axienet_init_legacy_dma()
1627 napi_disable(&lp->napi_rx); in axienet_init_legacy_dma()
1628 cancel_work_sync(&lp->dma_err_task); in axienet_init_legacy_dma()
1629 dev_err(lp->dev, "request_irq() failed\n"); in axienet_init_legacy_dma()
1634 * axienet_open - Driver open routine.
1638 * non-zero error value on failure
1643 * and ISR handling. Axi Ethernet core is reset through Axi DMA core. Buffer
1651 /* When we do an Axi Ethernet reset, it resets the complete core in axienet_open()
1659 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0); in axienet_open()
1661 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret); in axienet_open()
1665 phylink_start(lp->phylink); in axienet_open()
1668 schedule_delayed_work(&lp->stats_work, 0); in axienet_open()
1670 if (lp->use_dmaengine) { in axienet_open()
1671 /* Enable interrupts for Axi Ethernet core (if defined) */ in axienet_open()
1672 if (lp->eth_irq > 0) { in axienet_open()
1673 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED, in axienet_open()
1674 ndev->name, ndev); in axienet_open()
1691 if (lp->eth_irq > 0) in axienet_open()
1692 free_irq(lp->eth_irq, ndev); in axienet_open()
1694 cancel_work_sync(&lp->rx_dim.work); in axienet_open()
1695 cancel_delayed_work_sync(&lp->stats_work); in axienet_open()
1696 phylink_stop(lp->phylink); in axienet_open()
1697 phylink_disconnect_phy(lp->phylink); in axienet_open()
1702 * axienet_stop - Driver stop routine.
1709 * The Axi DMA Tx/Rx BDs are released.
1716 if (!lp->use_dmaengine) { in axienet_stop()
1717 WRITE_ONCE(lp->stopping, true); in axienet_stop()
1718 flush_work(&lp->dma_err_task); in axienet_stop()
1720 napi_disable(&lp->napi_tx); in axienet_stop()
1721 napi_disable(&lp->napi_rx); in axienet_stop()
1724 cancel_work_sync(&lp->rx_dim.work); in axienet_stop()
1725 cancel_delayed_work_sync(&lp->stats_work); in axienet_stop()
1727 phylink_stop(lp->phylink); in axienet_stop()
1728 phylink_disconnect_phy(lp->phylink); in axienet_stop()
1730 axienet_setoptions(ndev, lp->options & in axienet_stop()
1733 if (!lp->use_dmaengine) { in axienet_stop()
1735 cancel_work_sync(&lp->dma_err_task); in axienet_stop()
1736 free_irq(lp->tx_irq, ndev); in axienet_stop()
1737 free_irq(lp->rx_irq, ndev); in axienet_stop()
1740 dmaengine_terminate_sync(lp->tx_chan); in axienet_stop()
1741 dmaengine_synchronize(lp->tx_chan); in axienet_stop()
1742 dmaengine_terminate_sync(lp->rx_chan); in axienet_stop()
1743 dmaengine_synchronize(lp->rx_chan); in axienet_stop()
1746 kfree(lp->tx_skb_ring[i]); in axienet_stop()
1747 kfree(lp->tx_skb_ring); in axienet_stop()
1749 kfree(lp->rx_skb_ring[i]); in axienet_stop()
1750 kfree(lp->rx_skb_ring); in axienet_stop()
1752 dma_release_channel(lp->rx_chan); in axienet_stop()
1753 dma_release_channel(lp->tx_chan); in axienet_stop()
1759 if (lp->eth_irq > 0) in axienet_stop()
1760 free_irq(lp->eth_irq, ndev); in axienet_stop()
1765 * axienet_change_mtu - Driver change mtu routine.
1771 * This is the change mtu driver routine. It checks if the Axi Ethernet
1772 * hardware supports jumbo frames before changing the mtu. This can be
1780 return -EBUSY; in axienet_change_mtu()
1783 XAE_TRL_SIZE) > lp->rxmem) in axienet_change_mtu()
1784 return -EINVAL; in axienet_change_mtu()
1786 WRITE_ONCE(ndev->mtu, new_mtu); in axienet_change_mtu()
1793 * axienet_poll_controller - Axi Ethernet poll mechanism.
1803 disable_irq(lp->tx_irq); in axienet_poll_controller()
1804 disable_irq(lp->rx_irq); in axienet_poll_controller()
1805 axienet_rx_irq(lp->tx_irq, ndev); in axienet_poll_controller()
1806 axienet_tx_irq(lp->rx_irq, ndev); in axienet_poll_controller()
1807 enable_irq(lp->tx_irq); in axienet_poll_controller()
1808 enable_irq(lp->rx_irq); in axienet_poll_controller()
1817 return -EINVAL; in axienet_ioctl()
1819 return phylink_mii_ioctl(lp->phylink, rq, cmd); in axienet_ioctl()
1828 netdev_stats_to_stats64(stats, &dev->stats); in axienet_get_stats64()
1831 start = u64_stats_fetch_begin(&lp->rx_stat_sync); in axienet_get_stats64()
1832 stats->rx_packets = u64_stats_read(&lp->rx_packets); in axienet_get_stats64()
1833 stats->rx_bytes = u64_stats_read(&lp->rx_bytes); in axienet_get_stats64()
1834 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start)); in axienet_get_stats64()
1837 start = u64_stats_fetch_begin(&lp->tx_stat_sync); in axienet_get_stats64()
1838 stats->tx_packets = u64_stats_read(&lp->tx_packets); in axienet_get_stats64()
1839 stats->tx_bytes = u64_stats_read(&lp->tx_bytes); in axienet_get_stats64()
1840 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start)); in axienet_get_stats64()
1842 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_get_stats64()
1846 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_get_stats64()
1847 stats->rx_length_errors = in axienet_get_stats64()
1849 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS); in axienet_get_stats64()
1850 stats->rx_frame_errors = in axienet_get_stats64()
1852 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) + in axienet_get_stats64()
1854 stats->rx_length_errors + in axienet_get_stats64()
1855 stats->rx_crc_errors + in axienet_get_stats64()
1856 stats->rx_frame_errors; in axienet_get_stats64()
1857 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES); in axienet_get_stats64()
1859 stats->tx_aborted_errors = in axienet_get_stats64()
1861 stats->tx_fifo_errors = in axienet_get_stats64()
1863 stats->tx_window_errors = in axienet_get_stats64()
1865 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) + in axienet_get_stats64()
1866 stats->tx_aborted_errors + in axienet_get_stats64()
1867 stats->tx_fifo_errors + in axienet_get_stats64()
1868 stats->tx_window_errors; in axienet_get_stats64()
1869 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_get_stats64()
1900 * axienet_ethtools_get_drvinfo - Get various Axi Ethernet driver information.
1905 * Issue "ethtool -i ethX" under linux prompt to execute this function.
1910 strscpy(ed->driver, DRIVER_NAME, sizeof(ed->driver)); in axienet_ethtools_get_drvinfo()
1911 strscpy(ed->version, DRIVER_VERSION, sizeof(ed->version)); in axienet_ethtools_get_drvinfo()
1915 * axienet_ethtools_get_regs_len - Get the total regs length present in the
1930 * axienet_ethtools_get_regs - Dump the contents of all registers present
1936 * This implements ethtool command for getting the Axi Ethernet register dump.
1937 * Issue "ethtool -d ethX" to execute this function.
1946 regs->version = 0; in axienet_ethtools_get_regs()
1947 regs->len = len; in axienet_ethtools_get_regs()
1951 data[1] = axienet_ior(lp, XAE_TPF_OFFSET); in axienet_ethtools_get_regs()
1978 if (!lp->use_dmaengine) { in axienet_ethtools_get_regs()
1998 ering->rx_max_pending = RX_BD_NUM_MAX; in axienet_ethtools_get_ringparam()
1999 ering->rx_mini_max_pending = 0; in axienet_ethtools_get_ringparam()
2000 ering->rx_jumbo_max_pending = 0; in axienet_ethtools_get_ringparam()
2001 ering->tx_max_pending = TX_BD_NUM_MAX; in axienet_ethtools_get_ringparam()
2002 ering->rx_pending = lp->rx_bd_num; in axienet_ethtools_get_ringparam()
2003 ering->rx_mini_pending = 0; in axienet_ethtools_get_ringparam()
2004 ering->rx_jumbo_pending = 0; in axienet_ethtools_get_ringparam()
2005 ering->tx_pending = lp->tx_bd_num; in axienet_ethtools_get_ringparam()
2016 if (ering->rx_pending > RX_BD_NUM_MAX || in axienet_ethtools_set_ringparam()
2017 ering->rx_mini_pending || in axienet_ethtools_set_ringparam()
2018 ering->rx_jumbo_pending || in axienet_ethtools_set_ringparam()
2019 ering->tx_pending < TX_BD_NUM_MIN || in axienet_ethtools_set_ringparam()
2020 ering->tx_pending > TX_BD_NUM_MAX) in axienet_ethtools_set_ringparam()
2021 return -EINVAL; in axienet_ethtools_set_ringparam()
2024 return -EBUSY; in axienet_ethtools_set_ringparam()
2026 lp->rx_bd_num = ering->rx_pending; in axienet_ethtools_set_ringparam()
2027 lp->tx_bd_num = ering->tx_pending; in axienet_ethtools_set_ringparam()
2032 * axienet_ethtools_get_pauseparam - Get the pause parameter setting for
2037 * This implements ethtool command for getting axi ethernet pause frame
2038 * setting. Issue "ethtool -a ethX" to execute this function.
2046 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm); in axienet_ethtools_get_pauseparam()
2050 * axienet_ethtools_set_pauseparam - Set device pause parameter(flow control)
2056 * paths. Issue "ethtool -A ethX tx on|off" under linux prompt to execute this
2059 * Return: 0 on success, -EFAULT if device is running
2067 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm); in axienet_ethtools_set_pauseparam()
2071 * axienet_update_coalesce_rx() - Set RX CR
2079 spin_lock_irq(&lp->rx_cr_lock); in axienet_update_coalesce_rx()
2080 lp->rx_dma_cr &= ~mask; in axienet_update_coalesce_rx()
2081 lp->rx_dma_cr |= cr; in axienet_update_coalesce_rx()
2085 if (lp->rx_dma_started) { in axienet_update_coalesce_rx()
2090 cr = lp->rx_dma_cr; in axienet_update_coalesce_rx()
2092 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; in axienet_update_coalesce_rx()
2095 spin_unlock_irq(&lp->rx_cr_lock); in axienet_update_coalesce_rx()
2099 * axienet_dim_coalesce_count_rx() - RX coalesce count for DIM
2104 return min(1 << (lp->rx_dim.profile_ix << 1), 255); in axienet_dim_coalesce_count_rx()
2108 * axienet_rx_dim_work() - Adjust RX DIM settings
2120 lp->rx_dim.state = DIM_START_MEASURE; in axienet_rx_dim_work()
2124 * axienet_update_coalesce_tx() - Set TX CR
2132 spin_lock_irq(&lp->tx_cr_lock); in axienet_update_coalesce_tx()
2133 lp->tx_dma_cr &= ~mask; in axienet_update_coalesce_tx()
2134 lp->tx_dma_cr |= cr; in axienet_update_coalesce_tx()
2138 if (lp->tx_dma_started) { in axienet_update_coalesce_tx()
2143 cr = lp->tx_dma_cr; in axienet_update_coalesce_tx()
2145 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK; in axienet_update_coalesce_tx()
2148 spin_unlock_irq(&lp->tx_cr_lock); in axienet_update_coalesce_tx()
2152 * axienet_ethtools_get_coalesce - Get DMA interrupt coalescing count.
2159 * count on Tx and Rx paths. Issue "ethtool -c ethX" under linux prompt to
2173 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled; in axienet_ethtools_get_coalesce()
2175 spin_lock_irq(&lp->rx_cr_lock); in axienet_ethtools_get_coalesce()
2176 cr = lp->rx_dma_cr; in axienet_ethtools_get_coalesce()
2177 spin_unlock_irq(&lp->rx_cr_lock); in axienet_ethtools_get_coalesce()
2179 &ecoalesce->rx_max_coalesced_frames, in axienet_ethtools_get_coalesce()
2180 &ecoalesce->rx_coalesce_usecs); in axienet_ethtools_get_coalesce()
2182 spin_lock_irq(&lp->tx_cr_lock); in axienet_ethtools_get_coalesce()
2183 cr = lp->tx_dma_cr; in axienet_ethtools_get_coalesce()
2184 spin_unlock_irq(&lp->tx_cr_lock); in axienet_ethtools_get_coalesce()
2186 &ecoalesce->tx_max_coalesced_frames, in axienet_ethtools_get_coalesce()
2187 &ecoalesce->tx_coalesce_usecs); in axienet_ethtools_get_coalesce()
2192 * axienet_ethtools_set_coalesce - Set DMA interrupt coalescing count.
2199 * count on Tx and Rx paths. Issue "ethtool -C ethX rx-frames 5" under linux
2202 * Return: 0, on success, Non-zero error value on failure.
2211 bool new_dim = ecoalesce->use_adaptive_rx_coalesce; in axienet_ethtools_set_coalesce()
2212 bool old_dim = lp->rx_dim_enabled; in axienet_ethtools_set_coalesce()
2215 if (ecoalesce->rx_max_coalesced_frames > 255 || in axienet_ethtools_set_coalesce()
2216 ecoalesce->tx_max_coalesced_frames > 255) { in axienet_ethtools_set_coalesce()
2218 return -EINVAL; in axienet_ethtools_set_coalesce()
2221 if (!ecoalesce->rx_max_coalesced_frames || in axienet_ethtools_set_coalesce()
2222 !ecoalesce->tx_max_coalesced_frames) { in axienet_ethtools_set_coalesce()
2223 NL_SET_ERR_MSG(extack, "frames must be non-zero"); in axienet_ethtools_set_coalesce()
2224 return -EINVAL; in axienet_ethtools_set_coalesce()
2227 if (((ecoalesce->rx_max_coalesced_frames > 1 || new_dim) && in axienet_ethtools_set_coalesce()
2228 !ecoalesce->rx_coalesce_usecs) || in axienet_ethtools_set_coalesce()
2229 (ecoalesce->tx_max_coalesced_frames > 1 && in axienet_ethtools_set_coalesce()
2230 !ecoalesce->tx_coalesce_usecs)) { in axienet_ethtools_set_coalesce()
2232 "usecs must be non-zero when frames is greater than one"); in axienet_ethtools_set_coalesce()
2233 return -EINVAL; in axienet_ethtools_set_coalesce()
2238 ecoalesce->rx_coalesce_usecs); in axienet_ethtools_set_coalesce()
2241 WRITE_ONCE(lp->rx_dim_enabled, false); in axienet_ethtools_set_coalesce()
2242 napi_synchronize(&lp->napi_rx); in axienet_ethtools_set_coalesce()
2243 flush_work(&lp->rx_dim.work); in axienet_ethtools_set_coalesce()
2246 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames, in axienet_ethtools_set_coalesce()
2247 ecoalesce->rx_coalesce_usecs); in axienet_ethtools_set_coalesce()
2250 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs); in axienet_ethtools_set_coalesce()
2256 WRITE_ONCE(lp->rx_dim_enabled, true); in axienet_ethtools_set_coalesce()
2258 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames, in axienet_ethtools_set_coalesce()
2259 ecoalesce->tx_coalesce_usecs); in axienet_ethtools_set_coalesce()
2270 return phylink_ethtool_ksettings_get(lp->phylink, cmd); in axienet_ethtools_get_link_ksettings()
2279 return phylink_ethtool_ksettings_set(lp->phylink, cmd); in axienet_ethtools_set_link_ksettings()
2286 return phylink_ethtool_nway_reset(lp->phylink); in axienet_ethtools_nway_reset()
2297 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtools_get_ethtool_stats()
2299 data[1] = axienet_stat(lp, STAT_TX_BYTES); in axienet_ethtools_get_ethtool_stats()
2307 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtools_get_ethtool_stats()
2318 "User Defined Counter 1",
2338 if (lp->features & XAE_FEATURE_STATS) in axienet_ethtools_get_sset_count()
2342 return -EOPNOTSUPP; in axienet_ethtools_get_sset_count()
2353 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_ethtools_get_pause_stats()
2357 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtools_get_pause_stats()
2358 pause_stats->tx_pause_frames = in axienet_ethtools_get_pause_stats()
2360 pause_stats->rx_pause_frames = in axienet_ethtools_get_pause_stats()
2362 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtools_get_pause_stats()
2372 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_ethtool_get_eth_mac_stats()
2376 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtool_get_eth_mac_stats()
2377 mac_stats->FramesTransmittedOK = in axienet_ethtool_get_eth_mac_stats()
2379 mac_stats->SingleCollisionFrames = in axienet_ethtool_get_eth_mac_stats()
2381 mac_stats->MultipleCollisionFrames = in axienet_ethtool_get_eth_mac_stats()
2383 mac_stats->FramesReceivedOK = in axienet_ethtool_get_eth_mac_stats()
2385 mac_stats->FrameCheckSequenceErrors = in axienet_ethtool_get_eth_mac_stats()
2387 mac_stats->AlignmentErrors = in axienet_ethtool_get_eth_mac_stats()
2389 mac_stats->FramesWithDeferredXmissions = in axienet_ethtool_get_eth_mac_stats()
2391 mac_stats->LateCollisions = in axienet_ethtool_get_eth_mac_stats()
2393 mac_stats->FramesAbortedDueToXSColls = in axienet_ethtool_get_eth_mac_stats()
2395 mac_stats->MulticastFramesXmittedOK = in axienet_ethtool_get_eth_mac_stats()
2397 mac_stats->BroadcastFramesXmittedOK = in axienet_ethtool_get_eth_mac_stats()
2399 mac_stats->FramesWithExcessiveDeferral = in axienet_ethtool_get_eth_mac_stats()
2401 mac_stats->MulticastFramesReceivedOK = in axienet_ethtool_get_eth_mac_stats()
2403 mac_stats->BroadcastFramesReceivedOK = in axienet_ethtool_get_eth_mac_stats()
2405 mac_stats->InRangeLengthErrors = in axienet_ethtool_get_eth_mac_stats()
2407 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtool_get_eth_mac_stats()
2417 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_ethtool_get_eth_ctrl_stats()
2421 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtool_get_eth_ctrl_stats()
2422 ctrl_stats->MACControlFramesTransmitted = in axienet_ethtool_get_eth_ctrl_stats()
2424 ctrl_stats->MACControlFramesReceived = in axienet_ethtool_get_eth_ctrl_stats()
2426 ctrl_stats->UnsupportedOpcodesReceived = in axienet_ethtool_get_eth_ctrl_stats()
2428 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtool_get_eth_ctrl_stats()
2450 if (!(lp->features & XAE_FEATURE_STATS)) in axienet_ethtool_get_rmon_stats()
2454 start = read_seqcount_begin(&lp->hw_stats_seqcount); in axienet_ethtool_get_rmon_stats()
2455 rmon_stats->undersize_pkts = in axienet_ethtool_get_rmon_stats()
2457 rmon_stats->oversize_pkts = in axienet_ethtool_get_rmon_stats()
2459 rmon_stats->fragments = in axienet_ethtool_get_rmon_stats()
2462 rmon_stats->hist[0] = in axienet_ethtool_get_rmon_stats()
2464 rmon_stats->hist[1] = in axienet_ethtool_get_rmon_stats()
2466 rmon_stats->hist[2] = in axienet_ethtool_get_rmon_stats()
2468 rmon_stats->hist[3] = in axienet_ethtool_get_rmon_stats()
2470 rmon_stats->hist[4] = in axienet_ethtool_get_rmon_stats()
2472 rmon_stats->hist[5] = in axienet_ethtool_get_rmon_stats()
2474 rmon_stats->hist[6] = in axienet_ethtool_get_rmon_stats()
2475 rmon_stats->oversize_pkts; in axienet_ethtool_get_rmon_stats()
2477 rmon_stats->hist_tx[0] = in axienet_ethtool_get_rmon_stats()
2479 rmon_stats->hist_tx[1] = in axienet_ethtool_get_rmon_stats()
2481 rmon_stats->hist_tx[2] = in axienet_ethtool_get_rmon_stats()
2483 rmon_stats->hist_tx[3] = in axienet_ethtool_get_rmon_stats()
2485 rmon_stats->hist_tx[4] = in axienet_ethtool_get_rmon_stats()
2487 rmon_stats->hist_tx[5] = in axienet_ethtool_get_rmon_stats()
2489 rmon_stats->hist_tx[6] = in axienet_ethtool_get_rmon_stats()
2491 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start)); in axienet_ethtool_get_rmon_stats()
2531 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; in axienet_pcs_get_state()
2538 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; in axienet_pcs_an_restart()
2548 struct mdio_device *pcs_phy = pcs_to_axienet_local(pcs)->pcs_phy; in axienet_pcs_config()
2549 struct net_device *ndev = pcs_to_axienet_local(pcs)->ndev; in axienet_pcs_config()
2553 if (lp->switch_x_sgmii) { in axienet_pcs_config()
2582 struct net_device *ndev = to_net_dev(config->dev); in axienet_mac_select_pcs()
2587 return &lp->pcs; in axienet_mac_select_pcs()
2611 struct net_device *ndev = to_net_dev(config->dev); in axienet_mac_link_up()
2629 dev_err(&ndev->dev, in axienet_mac_link_up()
2630 "Speed other than 10, 100 or 1Gbps is not supported\n"); in axienet_mac_link_up()
2656 * axienet_dma_err_handler - Work queue task for Axi DMA Error
2659 * Resets the Axi DMA and Axi Ethernet devices, and reconfigures the
2669 struct net_device *ndev = lp->ndev; in axienet_dma_err_handler()
2672 if (READ_ONCE(lp->stopping)) in axienet_dma_err_handler()
2675 napi_disable(&lp->napi_tx); in axienet_dma_err_handler()
2676 napi_disable(&lp->napi_rx); in axienet_dma_err_handler()
2678 axienet_setoptions(ndev, lp->options & in axienet_dma_err_handler()
2684 for (i = 0; i < lp->tx_bd_num; i++) { in axienet_dma_err_handler()
2685 cur_p = &lp->tx_bd_v[i]; in axienet_dma_err_handler()
2686 if (cur_p->cntrl) { in axienet_dma_err_handler()
2689 dma_unmap_single(lp->dev, addr, in axienet_dma_err_handler()
2690 (cur_p->cntrl & in axienet_dma_err_handler()
2694 if (cur_p->skb) in axienet_dma_err_handler()
2695 dev_kfree_skb_irq(cur_p->skb); in axienet_dma_err_handler()
2696 cur_p->phys = 0; in axienet_dma_err_handler()
2697 cur_p->phys_msb = 0; in axienet_dma_err_handler()
2698 cur_p->cntrl = 0; in axienet_dma_err_handler()
2699 cur_p->status = 0; in axienet_dma_err_handler()
2700 cur_p->app0 = 0; in axienet_dma_err_handler()
2701 cur_p->app1 = 0; in axienet_dma_err_handler()
2702 cur_p->app2 = 0; in axienet_dma_err_handler()
2703 cur_p->app3 = 0; in axienet_dma_err_handler()
2704 cur_p->app4 = 0; in axienet_dma_err_handler()
2705 cur_p->skb = NULL; in axienet_dma_err_handler()
2708 for (i = 0; i < lp->rx_bd_num; i++) { in axienet_dma_err_handler()
2709 cur_p = &lp->rx_bd_v[i]; in axienet_dma_err_handler()
2710 cur_p->status = 0; in axienet_dma_err_handler()
2711 cur_p->app0 = 0; in axienet_dma_err_handler()
2712 cur_p->app1 = 0; in axienet_dma_err_handler()
2713 cur_p->app2 = 0; in axienet_dma_err_handler()
2714 cur_p->app3 = 0; in axienet_dma_err_handler()
2715 cur_p->app4 = 0; in axienet_dma_err_handler()
2718 lp->tx_bd_ci = 0; in axienet_dma_err_handler()
2719 lp->tx_bd_tail = 0; in axienet_dma_err_handler()
2720 lp->rx_bd_ci = 0; in axienet_dma_err_handler()
2731 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ? in axienet_dma_err_handler()
2738 axienet_setoptions(ndev, lp->options & in axienet_dma_err_handler()
2742 napi_enable(&lp->napi_rx); in axienet_dma_err_handler()
2743 napi_enable(&lp->napi_tx); in axienet_dma_err_handler()
2744 axienet_setoptions(ndev, lp->options); in axienet_dma_err_handler()
2748 * axienet_probe - Axi Ethernet probe function.
2752 * Non-zero error value on failure.
2754 * This is the probe routine for Axi Ethernet driver. This is called before
2772 return -ENOMEM; in axienet_probe()
2776 SET_NETDEV_DEV(ndev, &pdev->dev); in axienet_probe()
2777 ndev->features = NETIF_F_SG; in axienet_probe()
2778 ndev->ethtool_ops = &axienet_ethtool_ops; in axienet_probe()
2780 /* MTU range: 64 - 9000 */ in axienet_probe()
2781 ndev->min_mtu = 64; in axienet_probe()
2782 ndev->max_mtu = XAE_JUMBO_MTU; in axienet_probe()
2785 lp->ndev = ndev; in axienet_probe()
2786 lp->dev = &pdev->dev; in axienet_probe()
2787 lp->options = XAE_OPTION_DEFAULTS; in axienet_probe()
2788 lp->rx_bd_num = RX_BD_NUM_DEFAULT; in axienet_probe()
2789 lp->tx_bd_num = TX_BD_NUM_DEFAULT; in axienet_probe()
2791 u64_stats_init(&lp->rx_stat_sync); in axienet_probe()
2792 u64_stats_init(&lp->tx_stat_sync); in axienet_probe()
2794 mutex_init(&lp->stats_lock); in axienet_probe()
2795 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock); in axienet_probe()
2796 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats); in axienet_probe()
2798 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk"); in axienet_probe()
2799 if (!lp->axi_clk) { in axienet_probe()
2800 /* For backward compatibility, if named AXI clock is not present, in axienet_probe()
2801 * treat the first clock specified as the AXI clock. in axienet_probe()
2803 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL); in axienet_probe()
2805 if (IS_ERR(lp->axi_clk)) { in axienet_probe()
2806 ret = PTR_ERR(lp->axi_clk); in axienet_probe()
2809 ret = clk_prepare_enable(lp->axi_clk); in axienet_probe()
2811 dev_err(&pdev->dev, "Unable to enable AXI clock: %d\n", ret); in axienet_probe()
2815 lp->misc_clks[0].id = "axis_clk"; in axienet_probe()
2816 lp->misc_clks[1].id = "ref_clk"; in axienet_probe()
2817 lp->misc_clks[2].id = "mgt_clk"; in axienet_probe()
2819 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
2823 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
2828 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres); in axienet_probe()
2829 if (IS_ERR(lp->regs)) { in axienet_probe()
2830 ret = PTR_ERR(lp->regs); in axienet_probe()
2833 lp->regs_start = ethres->start; in axienet_probe()
2836 lp->features = 0; in axienet_probe()
2839 lp->features |= XAE_FEATURE_STATS; in axienet_probe()
2841 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,txcsum", &value); in axienet_probe()
2844 case 1: in axienet_probe()
2845 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM; in axienet_probe()
2846 /* Can checksum any contiguous range */ in axienet_probe()
2847 ndev->features |= NETIF_F_HW_CSUM; in axienet_probe()
2850 lp->features |= XAE_FEATURE_FULL_TX_CSUM; in axienet_probe()
2851 /* Can checksum TCP/UDP over IPv4. */ in axienet_probe()
2852 ndev->features |= NETIF_F_IP_CSUM; in axienet_probe()
2856 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,rxcsum", &value); in axienet_probe()
2859 case 1: in axienet_probe()
2860 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM; in axienet_probe()
2861 ndev->features |= NETIF_F_RXCSUM; in axienet_probe()
2864 lp->features |= XAE_FEATURE_FULL_RX_CSUM; in axienet_probe()
2865 ndev->features |= NETIF_F_RXCSUM; in axienet_probe()
2869 /* For supporting jumbo frames, the Axi Ethernet hardware must have in axienet_probe()
2871 * we can enable jumbo option and start supporting jumbo frames. in axienet_probe()
2873 * the device-tree and accordingly set flags. in axienet_probe()
2875 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem); in axienet_probe()
2877 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node, in axienet_probe()
2878 "xlnx,switch-x-sgmii"); in axienet_probe()
2881 ret = of_property_read_u32(pdev->dev.of_node, "xlnx,phy-type", &value); in axienet_probe()
2883 netdev_warn(ndev, "Please upgrade your device tree binary blob to use phy-mode"); in axienet_probe()
2886 lp->phy_mode = PHY_INTERFACE_MODE_MII; in axienet_probe()
2889 lp->phy_mode = PHY_INTERFACE_MODE_GMII; in axienet_probe()
2892 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID; in axienet_probe()
2895 lp->phy_mode = PHY_INTERFACE_MODE_SGMII; in axienet_probe()
2898 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX; in axienet_probe()
2901 ret = -EINVAL; in axienet_probe()
2905 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode); in axienet_probe()
2909 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII && in axienet_probe()
2910 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) { in axienet_probe()
2911 dev_err(&pdev->dev, "xlnx,switch-x-sgmii only supported with SGMII or 1000BaseX\n"); in axienet_probe()
2912 ret = -EINVAL; in axienet_probe()
2916 if (!of_property_present(pdev->dev.of_node, "dmas")) { in axienet_probe()
2918 np = of_parse_phandle(pdev->dev.of_node, "axistream-connected", 0); in axienet_probe()
2925 dev_err(&pdev->dev, in axienet_probe()
2930 lp->dma_regs = devm_ioremap_resource(&pdev->dev, in axienet_probe()
2932 lp->rx_irq = irq_of_parse_and_map(np, 1); in axienet_probe()
2933 lp->tx_irq = irq_of_parse_and_map(np, 0); in axienet_probe()
2935 lp->eth_irq = platform_get_irq_optional(pdev, 0); in axienet_probe()
2938 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL); in axienet_probe()
2939 lp->rx_irq = platform_get_irq(pdev, 1); in axienet_probe()
2940 lp->tx_irq = platform_get_irq(pdev, 0); in axienet_probe()
2941 lp->eth_irq = platform_get_irq_optional(pdev, 2); in axienet_probe()
2943 if (IS_ERR(lp->dma_regs)) { in axienet_probe()
2944 dev_err(&pdev->dev, "could not map DMA regs\n"); in axienet_probe()
2945 ret = PTR_ERR(lp->dma_regs); in axienet_probe()
2948 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) { in axienet_probe()
2949 dev_err(&pdev->dev, "could not determine irqs\n"); in axienet_probe()
2950 ret = -ENOMEM; in axienet_probe()
2959 /* Autodetect the need for 64-bit DMA pointers. in axienet_probe()
2962 * We can detect this case by writing all 1's to one such register in axienet_probe()
2965 * Those MSB registers were introduced in IP v7.1, which we check first. in axienet_probe()
2968 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4; in axienet_probe()
2974 lp->features |= XAE_FEATURE_DMA_64BIT; in axienet_probe()
2976 dev_info(&pdev->dev, in axienet_probe()
2977 "autodetected 64-bit DMA range\n"); in axienet_probe()
2982 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) { in axienet_probe()
2983 dev_err(&pdev->dev, "64-bit addressable DMA is not compatible with 32-bit archecture\n"); in axienet_probe()
2984 ret = -EINVAL; in axienet_probe()
2988 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_width)); in axienet_probe()
2990 dev_err(&pdev->dev, "No suitable DMA available\n"); in axienet_probe()
2993 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll); in axienet_probe()
2994 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll); in axienet_probe()
2999 lp->eth_irq = platform_get_irq_optional(pdev, 0); in axienet_probe()
3000 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) { in axienet_probe()
3001 ret = lp->eth_irq; in axienet_probe()
3004 tx_chan = dma_request_chan(lp->dev, "tx_chan0"); in axienet_probe()
3007 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n"); in axienet_probe()
3011 cfg.reset = 1; in axienet_probe()
3015 dev_err(&pdev->dev, "Reset channel failed\n"); in axienet_probe()
3021 lp->use_dmaengine = 1; in axienet_probe()
3024 if (lp->use_dmaengine) in axienet_probe()
3025 ndev->netdev_ops = &axienet_netdev_dmaengine_ops; in axienet_probe()
3027 ndev->netdev_ops = &axienet_netdev_ops; in axienet_probe()
3029 if (lp->eth_irq <= 0) in axienet_probe()
3030 dev_info(&pdev->dev, "Ethernet core IRQ not defined\n"); in axienet_probe()
3033 ret = of_get_mac_address(pdev->dev.of_node, mac_addr); in axienet_probe()
3037 dev_warn(&pdev->dev, "could not find MAC address property: %d\n", in axienet_probe()
3042 spin_lock_init(&lp->rx_cr_lock); in axienet_probe()
3043 spin_lock_init(&lp->tx_cr_lock); in axienet_probe()
3044 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work); in axienet_probe()
3045 lp->rx_dim_enabled = true; in axienet_probe()
3046 lp->rx_dim.profile_ix = 1; in axienet_probe()
3047 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), in axienet_probe()
3049 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD, in axienet_probe()
3054 dev_warn(&pdev->dev, in axienet_probe()
3057 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII || in axienet_probe()
3058 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) { in axienet_probe()
3059 np = of_parse_phandle(pdev->dev.of_node, "pcs-handle", 0); in axienet_probe()
3061 /* Deprecated: Always use "pcs-handle" for pcs_phy. in axienet_probe()
3062 * Falling back to "phy-handle" here is only for in axienet_probe()
3065 np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); in axienet_probe()
3068 dev_err(&pdev->dev, "pcs-handle (preferred) or phy-handle required for 1000BaseX/SGMII\n"); in axienet_probe()
3069 ret = -EINVAL; in axienet_probe()
3072 lp->pcs_phy = of_mdio_find_device(np); in axienet_probe()
3073 if (!lp->pcs_phy) { in axienet_probe()
3074 ret = -EPROBE_DEFER; in axienet_probe()
3079 lp->pcs.ops = &axienet_pcs_ops; in axienet_probe()
3080 lp->pcs.poll = true; in axienet_probe()
3083 lp->phylink_config.dev = &ndev->dev; in axienet_probe()
3084 lp->phylink_config.type = PHYLINK_NETDEV; in axienet_probe()
3085 lp->phylink_config.mac_managed_pm = true; in axienet_probe()
3086 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE | in axienet_probe()
3089 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces); in axienet_probe()
3090 if (lp->switch_x_sgmii) { in axienet_probe()
3092 lp->phylink_config.supported_interfaces); in axienet_probe()
3094 lp->phylink_config.supported_interfaces); in axienet_probe()
3097 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode, in axienet_probe()
3098 lp->phy_mode, in axienet_probe()
3100 if (IS_ERR(lp->phylink)) { in axienet_probe()
3101 ret = PTR_ERR(lp->phylink); in axienet_probe()
3102 dev_err(&pdev->dev, "phylink_create error (%i)\n", ret); in axienet_probe()
3106 ret = register_netdev(lp->ndev); in axienet_probe()
3108 dev_err(lp->dev, "register_netdev() error (%i)\n", ret); in axienet_probe()
3115 phylink_destroy(lp->phylink); in axienet_probe()
3118 if (lp->pcs_phy) in axienet_probe()
3119 put_device(&lp->pcs_phy->dev); in axienet_probe()
3120 if (lp->mii_bus) in axienet_probe()
3123 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_probe()
3124 clk_disable_unprepare(lp->axi_clk); in axienet_probe()
3139 if (lp->phylink) in axienet_remove()
3140 phylink_destroy(lp->phylink); in axienet_remove()
3142 if (lp->pcs_phy) in axienet_remove()
3143 put_device(&lp->pcs_phy->dev); in axienet_remove()
3147 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks); in axienet_remove()
3148 clk_disable_unprepare(lp->axi_clk); in axienet_remove()
3214 MODULE_DESCRIPTION("Xilinx Axi Ethernet driver");