Lines Matching defs:lp

133 static struct skbuf_dma_descriptor *axienet_get_rx_desc(struct axienet_local *lp, int i)
135 return lp->rx_skb_ring[i & (RX_BUF_NUM_DEFAULT - 1)];
138 static struct skbuf_dma_descriptor *axienet_get_tx_desc(struct axienet_local *lp, int i)
140 return lp->tx_skb_ring[i & (TX_BD_NUM_MAX - 1)];
145 * @lp: Pointer to axienet local structure
152 static inline u32 axienet_dma_in32(struct axienet_local *lp, off_t reg)
154 return ioread32(lp->dma_regs + reg);
157 static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
161 if (lp->features & XAE_FEATURE_DMA_64BIT)
165 static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
170 if (lp->features & XAE_FEATURE_DMA_64BIT)
187 struct axienet_local *lp = netdev_priv(ndev);
190 dma_free_coherent(lp->dev,
191 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
192 lp->tx_bd_v,
193 lp->tx_bd_p);
195 if (!lp->rx_bd_v)
198 for (i = 0; i < lp->rx_bd_num; i++) {
204 if (!lp->rx_bd_v[i].skb)
207 dev_kfree_skb(lp->rx_bd_v[i].skb);
213 if (lp->rx_bd_v[i].cntrl) {
214 phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
215 dma_unmap_single(lp->dev, phys,
216 lp->max_frm_size, DMA_FROM_DEVICE);
220 dma_free_coherent(lp->dev,
221 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
222 lp->rx_bd_v,
223 lp->rx_bd_p);
226 static u64 axienet_dma_rate(struct axienet_local *lp)
228 if (lp->axi_clk)
229 return clk_get_rate(lp->axi_clk);
235 * @lp: Device private data
242 static u32 axienet_calc_cr(struct axienet_local *lp, u32 count, u32 usec)
252 u64 clk_rate = axienet_dma_rate(lp);
269 * @lp: Device private data
274 static void axienet_coalesce_params(struct axienet_local *lp, u32 cr,
277 u64 clk_rate = axienet_dma_rate(lp);
286 * @lp: Pointer to the axienet_local structure
288 static void axienet_dma_start(struct axienet_local *lp)
290 spin_lock_irq(&lp->rx_cr_lock);
293 lp->rx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
294 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
299 axienet_dma_out_addr(lp, XAXIDMA_RX_CDESC_OFFSET, lp->rx_bd_p);
300 lp->rx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
301 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
302 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, lp->rx_bd_p +
303 (sizeof(*lp->rx_bd_v) * (lp->rx_bd_num - 1)));
304 lp->rx_dma_started = true;
306 spin_unlock_irq(&lp->rx_cr_lock);
307 spin_lock_irq(&lp->tx_cr_lock);
310 lp->tx_dma_cr &= ~XAXIDMA_CR_RUNSTOP_MASK;
311 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
317 axienet_dma_out_addr(lp, XAXIDMA_TX_CDESC_OFFSET, lp->tx_bd_p);
318 lp->tx_dma_cr |= XAXIDMA_CR_RUNSTOP_MASK;
319 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
320 lp->tx_dma_started = true;
322 spin_unlock_irq(&lp->tx_cr_lock);
339 struct axienet_local *lp = netdev_priv(ndev);
342 lp->tx_bd_ci = 0;
343 lp->tx_bd_tail = 0;
344 lp->rx_bd_ci = 0;
347 lp->tx_bd_v = dma_alloc_coherent(lp->dev,
348 sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
349 &lp->tx_bd_p, GFP_KERNEL);
350 if (!lp->tx_bd_v)
353 lp->rx_bd_v = dma_alloc_coherent(lp->dev,
354 sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
355 &lp->rx_bd_p, GFP_KERNEL);
356 if (!lp->rx_bd_v)
359 for (i = 0; i < lp->tx_bd_num; i++) {
360 dma_addr_t addr = lp->tx_bd_p +
361 sizeof(*lp->tx_bd_v) *
362 ((i + 1) % lp->tx_bd_num);
364 lp->tx_bd_v[i].next = lower_32_bits(addr);
365 if (lp->features & XAE_FEATURE_DMA_64BIT)
366 lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
369 for (i = 0; i < lp->rx_bd_num; i++) {
372 addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
373 ((i + 1) % lp->rx_bd_num);
374 lp->rx_bd_v[i].next = lower_32_bits(addr);
375 if (lp->features & XAE_FEATURE_DMA_64BIT)
376 lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
378 skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
382 lp->rx_bd_v[i].skb = skb;
383 addr = dma_map_single(lp->dev, skb->data,
384 lp->max_frm_size, DMA_FROM_DEVICE);
385 if (dma_mapping_error(lp->dev, addr)) {
389 desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
391 lp->rx_bd_v[i].cntrl = lp->max_frm_size;
394 axienet_dma_start(lp);
413 struct axienet_local *lp = netdev_priv(ndev);
421 axienet_iow(lp, XAE_UAW0_OFFSET,
426 axienet_iow(lp, XAE_UAW1_OFFSET,
427 (((axienet_ior(lp, XAE_UAW1_OFFSET)) &
467 struct axienet_local *lp = netdev_priv(ndev);
469 reg = axienet_ior(lp, XAE_FMI_OFFSET);
475 axienet_iow(lp, XAE_FMI_OFFSET, reg);
480 axienet_iow(lp, XAE_FMI_OFFSET, reg);
481 axienet_iow(lp, XAE_AF0_OFFSET, 1); /* Multicast bit */
482 axienet_iow(lp, XAE_AF1_OFFSET, 0);
483 axienet_iow(lp, XAE_AM0_OFFSET, 1); /* ditto */
484 axienet_iow(lp, XAE_AM1_OFFSET, 0);
485 axienet_iow(lp, XAE_FFE_OFFSET, 1);
505 axienet_iow(lp, XAE_FMI_OFFSET, reg);
506 axienet_iow(lp, XAE_AF0_OFFSET, af0reg);
507 axienet_iow(lp, XAE_AF1_OFFSET, af1reg);
508 axienet_iow(lp, XAE_AM0_OFFSET, 0xffffffff);
509 axienet_iow(lp, XAE_AM1_OFFSET, 0x0000ffff);
510 axienet_iow(lp, XAE_FFE_OFFSET, 1);
518 axienet_iow(lp, XAE_FMI_OFFSET, reg);
519 axienet_iow(lp, XAE_FFE_OFFSET, 0);
537 struct axienet_local *lp = netdev_priv(ndev);
541 reg = ((axienet_ior(lp, tp->reg)) & ~(tp->m_or));
544 axienet_iow(lp, tp->reg, reg);
548 lp->options |= options;
551 static u64 axienet_stat(struct axienet_local *lp, enum temac_stat stat)
555 if (lp->reset_in_progress)
556 return lp->hw_stat_base[stat];
558 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
559 return lp->hw_stat_base[stat] + (counter - lp->hw_last_counter[stat]);
562 static void axienet_stats_update(struct axienet_local *lp, bool reset)
566 write_seqcount_begin(&lp->hw_stats_seqcount);
567 lp->reset_in_progress = reset;
569 u32 counter = axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
571 lp->hw_stat_base[stat] += counter - lp->hw_last_counter[stat];
572 lp->hw_last_counter[stat] = counter;
574 write_seqcount_end(&lp->hw_stats_seqcount);
579 struct axienet_local *lp = container_of(work, struct axienet_local,
582 mutex_lock(&lp->stats_lock);
583 axienet_stats_update(lp, false);
584 mutex_unlock(&lp->stats_lock);
587 schedule_delayed_work(&lp->stats_work, 13 * HZ);
590 static int __axienet_device_reset(struct axienet_local *lp)
596 mutex_lock(&lp->stats_lock);
597 if (lp->features & XAE_FEATURE_STATS)
598 axienet_stats_update(lp, true);
607 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, XAXIDMA_CR_RESET_MASK);
610 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
613 dev_err(lp->dev, "%s: DMA reset timeout!\n", __func__);
620 DELAY_OF_ONE_MILLISEC, 50000, false, lp,
623 dev_err(lp->dev, "%s: timeout waiting for PhyRstCmplt\n", __func__);
628 if (lp->features & XAE_FEATURE_STATS) {
631 write_seqcount_begin(&lp->hw_stats_seqcount);
632 lp->reset_in_progress = false;
635 axienet_ior(lp, XAE_STATS_OFFSET + stat * 8);
637 lp->hw_stat_base[stat] +=
638 lp->hw_last_counter[stat] - counter;
639 lp->hw_last_counter[stat] = counter;
641 write_seqcount_end(&lp->hw_stats_seqcount);
645 mutex_unlock(&lp->stats_lock);
651 * @lp: Pointer to the axienet_local structure
653 static void axienet_dma_stop(struct axienet_local *lp)
658 spin_lock_irq(&lp->rx_cr_lock);
660 cr = lp->rx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
661 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
662 lp->rx_dma_started = false;
664 spin_unlock_irq(&lp->rx_cr_lock);
665 synchronize_irq(lp->rx_irq);
667 spin_lock_irq(&lp->tx_cr_lock);
669 cr = lp->tx_dma_cr & ~(XAXIDMA_CR_RUNSTOP_MASK | XAXIDMA_IRQ_ALL_MASK);
670 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
671 lp->tx_dma_started = false;
673 spin_unlock_irq(&lp->tx_cr_lock);
674 synchronize_irq(lp->tx_irq);
677 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
680 sr = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
683 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
686 sr = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
690 axienet_lock_mii(lp);
691 __axienet_device_reset(lp);
692 axienet_unlock_mii(lp);
710 struct axienet_local *lp = netdev_priv(ndev);
713 lp->max_frm_size = XAE_MAX_VLAN_FRAME_SIZE;
714 lp->options |= XAE_OPTION_VLAN;
715 lp->options &= (~XAE_OPTION_JUMBO);
718 lp->max_frm_size = ndev->mtu + VLAN_ETH_HLEN +
721 if (lp->max_frm_size <= lp->rxmem)
722 lp->options |= XAE_OPTION_JUMBO;
725 if (!lp->use_dmaengine) {
726 ret = __axienet_device_reset(lp);
738 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
740 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
742 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
744 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
745 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
748 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
753 axienet_setoptions(ndev, lp->options &
757 axienet_setoptions(ndev, lp->options);
766 * @lp: Pointer to the axienet_local structure
778 static int axienet_free_tx_chain(struct axienet_local *lp, u32 first_bd,
787 cur_p = &lp->tx_bd_v[(first_bd + i) % lp->tx_bd_num];
798 phys = desc_get_phys_addr(lp, cur_p);
799 dma_unmap_single(lp->dev, phys,
823 lp->tx_bd_ci += i;
824 if (lp->tx_bd_ci >= lp->tx_bd_num)
825 lp->tx_bd_ci %= lp->tx_bd_num;
833 * @lp: Pointer to the axienet_local structure
844 static inline int axienet_check_tx_bd_space(struct axienet_local *lp,
851 cur_p = &lp->tx_bd_v[(READ_ONCE(lp->tx_bd_tail) + num_frag) %
852 lp->tx_bd_num];
868 struct axienet_local *lp = data;
872 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_tail++);
874 txq = skb_get_tx_queue(lp->ndev, skbuf_dma->skb);
875 u64_stats_update_begin(&lp->tx_stat_sync);
876 u64_stats_add(&lp->tx_bytes, len);
877 u64_stats_add(&lp->tx_packets, 1);
878 u64_stats_update_end(&lp->tx_stat_sync);
879 dma_unmap_sg(lp->dev, skbuf_dma->sgl, skbuf_dma->sg_len, DMA_TO_DEVICE);
882 CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
905 struct axienet_local *lp = netdev_priv(ndev);
915 dma_dev = lp->tx_chan->device;
917 if (CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX) <= 1) {
924 skbuf_dma = axienet_get_tx_desc(lp, lp->tx_ring_head);
928 lp->tx_ring_head++;
934 ret = dma_map_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
940 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
943 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
954 dma_tx_desc = dma_dev->device_prep_slave_sg(lp->tx_chan, skbuf_dma->sgl,
962 dma_tx_desc->callback_param = lp;
964 txq = skb_get_tx_queue(lp->ndev, skb);
966 netif_txq_maybe_stop(txq, CIRC_SPACE(lp->tx_ring_head, lp->tx_ring_tail, TX_BD_NUM_MAX),
970 dma_async_issue_pending(lp->tx_chan);
974 dma_unmap_sg(lp->dev, skbuf_dma->sgl, sg_len, DMA_TO_DEVICE);
996 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_tx);
997 struct net_device *ndev = lp->ndev;
1001 packets = axienet_free_tx_chain(lp, lp->tx_bd_ci, lp->tx_bd_num, false,
1006 u64_stats_update_begin(&lp->tx_stat_sync);
1007 u64_stats_add(&lp->tx_packets, packets);
1008 u64_stats_add(&lp->tx_bytes, size);
1009 u64_stats_update_end(&lp->tx_stat_sync);
1014 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1023 spin_lock_irq(&lp->tx_cr_lock);
1024 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, lp->tx_dma_cr);
1025 spin_unlock_irq(&lp->tx_cr_lock);
1053 struct axienet_local *lp = netdev_priv(ndev);
1056 orig_tail_ptr = lp->tx_bd_tail;
1060 cur_p = &lp->tx_bd_v[orig_tail_ptr];
1062 if (axienet_check_tx_bd_space(lp, num_frag + 1)) {
1074 if (lp->features & XAE_FEATURE_FULL_TX_CSUM) {
1077 } else if (lp->features & XAE_FEATURE_PARTIAL_TX_CSUM) {
1088 phys = dma_map_single(lp->dev, skb->data,
1090 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1097 desc_set_phys_addr(lp, phys, cur_p);
1101 if (++new_tail_ptr >= lp->tx_bd_num)
1103 cur_p = &lp->tx_bd_v[new_tail_ptr];
1105 phys = dma_map_single(lp->dev,
1109 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1113 axienet_free_tx_chain(lp, orig_tail_ptr, ii + 1,
1118 desc_set_phys_addr(lp, phys, cur_p);
1125 tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * new_tail_ptr;
1126 if (++new_tail_ptr >= lp->tx_bd_num)
1128 WRITE_ONCE(lp->tx_bd_tail, new_tail_ptr);
1132 axienet_dma_out_addr(lp, XAXIDMA_TX_TDESC_OFFSET, tail_p);
1135 if (axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1)) {
1142 if (!axienet_check_tx_bd_space(lp, MAX_SKB_FRAGS + 1))
1160 struct axienet_local *lp = data;
1164 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_tail++);
1168 dma_unmap_single(lp->dev, skbuf_dma->dma_address, lp->max_frm_size,
1173 skb->protocol = eth_type_trans(skb, lp->ndev);
1177 u64_stats_update_begin(&lp->rx_stat_sync);
1178 u64_stats_add(&lp->rx_packets, 1);
1179 u64_stats_add(&lp->rx_bytes, rx_len);
1180 u64_stats_update_end(&lp->rx_stat_sync);
1181 axienet_rx_submit_desc(lp->ndev);
1182 dma_async_issue_pending(lp->rx_chan);
1201 struct axienet_local *lp = container_of(napi, struct axienet_local, napi_rx);
1203 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1222 phys = desc_get_phys_addr(lp, cur_p);
1223 dma_unmap_single(lp->dev, phys, lp->max_frm_size,
1227 skb->protocol = eth_type_trans(skb, lp->ndev);
1232 if (lp->features & XAE_FEATURE_FULL_RX_CSUM) {
1239 } else if (lp->features & XAE_FEATURE_PARTIAL_RX_CSUM) {
1250 new_skb = napi_alloc_skb(napi, lp->max_frm_size);
1254 phys = dma_map_single(lp->dev, new_skb->data,
1255 lp->max_frm_size,
1257 if (unlikely(dma_mapping_error(lp->dev, phys))) {
1259 netdev_err(lp->ndev, "RX DMA mapping error\n");
1263 desc_set_phys_addr(lp, phys, cur_p);
1265 cur_p->cntrl = lp->max_frm_size;
1272 tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
1274 if (++lp->rx_bd_ci >= lp->rx_bd_num)
1275 lp->rx_bd_ci = 0;
1276 cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
1279 u64_stats_update_begin(&lp->rx_stat_sync);
1280 u64_stats_add(&lp->rx_packets, packets);
1281 u64_stats_add(&lp->rx_bytes, size);
1282 u64_stats_update_end(&lp->rx_stat_sync);
1285 axienet_dma_out_addr(lp, XAXIDMA_RX_TDESC_OFFSET, tail_p);
1288 if (READ_ONCE(lp->rx_dim_enabled)) {
1292 .pkt_ctr = u64_stats_read(&lp->rx_packets),
1293 .byte_ctr = u64_stats_read(&lp->rx_bytes),
1294 .event_ctr = READ_ONCE(lp->rx_irqs),
1297 net_dim(&lp->rx_dim, &sample);
1304 spin_lock_irq(&lp->rx_cr_lock);
1305 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, lp->rx_dma_cr);
1306 spin_unlock_irq(&lp->rx_cr_lock);
1325 struct axienet_local *lp = netdev_priv(ndev);
1327 status = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1332 axienet_dma_out32(lp, XAXIDMA_TX_SR_OFFSET, status);
1337 (lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
1338 (lp->tx_bd_v[lp->tx_bd_ci]).phys);
1339 schedule_work(&lp->dma_err_task);
1344 if (napi_schedule_prep(&lp->napi_tx)) {
1347 spin_lock(&lp->tx_cr_lock);
1348 cr = lp->tx_dma_cr;
1350 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
1351 spin_unlock(&lp->tx_cr_lock);
1352 __napi_schedule(&lp->napi_tx);
1373 struct axienet_local *lp = netdev_priv(ndev);
1375 status = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1380 axienet_dma_out32(lp, XAXIDMA_RX_SR_OFFSET, status);
1385 (lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
1386 (lp->rx_bd_v[lp->rx_bd_ci]).phys);
1387 schedule_work(&lp->dma_err_task);
1392 WRITE_ONCE(lp->rx_irqs, READ_ONCE(lp->rx_irqs) + 1);
1393 if (napi_schedule_prep(&lp->napi_rx)) {
1396 spin_lock(&lp->rx_cr_lock);
1397 cr = lp->rx_dma_cr;
1399 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
1400 spin_unlock(&lp->rx_cr_lock);
1402 __napi_schedule(&lp->napi_rx);
1421 struct axienet_local *lp = netdev_priv(ndev);
1424 pending = axienet_ior(lp, XAE_IP_OFFSET);
1434 axienet_iow(lp, XAE_IS_OFFSET, pending);
1451 struct axienet_local *lp = netdev_priv(ndev);
1456 skbuf_dma = axienet_get_rx_desc(lp, lp->rx_ring_head);
1460 lp->rx_ring_head++;
1461 skb = netdev_alloc_skb(ndev, lp->max_frm_size);
1466 addr = dma_map_single(lp->dev, skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
1467 if (unlikely(dma_mapping_error(lp->dev, addr))) {
1473 sg_dma_len(skbuf_dma->sgl) = lp->max_frm_size;
1474 dma_rx_desc = dmaengine_prep_slave_sg(lp->rx_chan, skbuf_dma->sgl,
1483 dma_rx_desc->callback_param = lp;
1490 dma_unmap_single(lp->dev, addr, lp->max_frm_size, DMA_FROM_DEVICE);
1506 struct axienet_local *lp = netdev_priv(ndev);
1510 lp->tx_chan = dma_request_chan(lp->dev, "tx_chan0");
1511 if (IS_ERR(lp->tx_chan)) {
1512 dev_err(lp->dev, "No Ethernet DMA (TX) channel found\n");
1513 return PTR_ERR(lp->tx_chan);
1516 lp->rx_chan = dma_request_chan(lp->dev, "rx_chan0");
1517 if (IS_ERR(lp->rx_chan)) {
1518 ret = PTR_ERR(lp->rx_chan);
1519 dev_err(lp->dev, "No Ethernet DMA (RX) channel found\n");
1523 lp->tx_ring_tail = 0;
1524 lp->tx_ring_head = 0;
1525 lp->rx_ring_tail = 0;
1526 lp->rx_ring_head = 0;
1527 lp->tx_skb_ring = kcalloc(TX_BD_NUM_MAX, sizeof(*lp->tx_skb_ring),
1529 if (!lp->tx_skb_ring) {
1539 lp->tx_skb_ring[i] = skbuf_dma;
1542 lp->rx_skb_ring = kcalloc(RX_BUF_NUM_DEFAULT, sizeof(*lp->rx_skb_ring),
1544 if (!lp->rx_skb_ring) {
1554 lp->rx_skb_ring[i] = skbuf_dma;
1559 dma_async_issue_pending(lp->rx_chan);
1565 kfree(lp->rx_skb_ring[i]);
1566 kfree(lp->rx_skb_ring);
1569 kfree(lp->tx_skb_ring[i]);
1570 kfree(lp->tx_skb_ring);
1572 dma_release_channel(lp->rx_chan);
1574 dma_release_channel(lp->tx_chan);
1592 struct axienet_local *lp = netdev_priv(ndev);
1595 lp->stopping = false;
1596 INIT_WORK(&lp->dma_err_task, axienet_dma_err_handler);
1598 napi_enable(&lp->napi_rx);
1599 napi_enable(&lp->napi_tx);
1602 ret = request_irq(lp->tx_irq, axienet_tx_irq, IRQF_SHARED,
1607 ret = request_irq(lp->rx_irq, axienet_rx_irq, IRQF_SHARED,
1612 if (lp->eth_irq > 0) {
1613 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1622 free_irq(lp->rx_irq, ndev);
1624 free_irq(lp->tx_irq, ndev);
1626 napi_disable(&lp->napi_tx);
1627 napi_disable(&lp->napi_rx);
1628 cancel_work_sync(&lp->dma_err_task);
1629 dev_err(lp->dev, "request_irq() failed\n");
1649 struct axienet_local *lp = netdev_priv(ndev);
1655 axienet_lock_mii(lp);
1657 axienet_unlock_mii(lp);
1659 ret = phylink_of_phy_connect(lp->phylink, lp->dev->of_node, 0);
1661 dev_err(lp->dev, "phylink_of_phy_connect() failed: %d\n", ret);
1665 phylink_start(lp->phylink);
1668 schedule_delayed_work(&lp->stats_work, 0);
1670 if (lp->use_dmaengine) {
1672 if (lp->eth_irq > 0) {
1673 ret = request_irq(lp->eth_irq, axienet_eth_irq, IRQF_SHARED,
1691 if (lp->eth_irq > 0)
1692 free_irq(lp->eth_irq, ndev);
1694 cancel_work_sync(&lp->rx_dim.work);
1695 cancel_delayed_work_sync(&lp->stats_work);
1696 phylink_stop(lp->phylink);
1697 phylink_disconnect_phy(lp->phylink);
1713 struct axienet_local *lp = netdev_priv(ndev);
1716 if (!lp->use_dmaengine) {
1717 WRITE_ONCE(lp->stopping, true);
1718 flush_work(&lp->dma_err_task);
1720 napi_disable(&lp->napi_tx);
1721 napi_disable(&lp->napi_rx);
1724 cancel_work_sync(&lp->rx_dim.work);
1725 cancel_delayed_work_sync(&lp->stats_work);
1727 phylink_stop(lp->phylink);
1728 phylink_disconnect_phy(lp->phylink);
1730 axienet_setoptions(ndev, lp->options &
1733 if (!lp->use_dmaengine) {
1734 axienet_dma_stop(lp);
1735 cancel_work_sync(&lp->dma_err_task);
1736 free_irq(lp->tx_irq, ndev);
1737 free_irq(lp->rx_irq, ndev);
1740 dmaengine_terminate_sync(lp->tx_chan);
1741 dmaengine_synchronize(lp->tx_chan);
1742 dmaengine_terminate_sync(lp->rx_chan);
1743 dmaengine_synchronize(lp->rx_chan);
1746 kfree(lp->tx_skb_ring[i]);
1747 kfree(lp->tx_skb_ring);
1749 kfree(lp->rx_skb_ring[i]);
1750 kfree(lp->rx_skb_ring);
1752 dma_release_channel(lp->rx_chan);
1753 dma_release_channel(lp->tx_chan);
1757 axienet_iow(lp, XAE_IE_OFFSET, 0);
1759 if (lp->eth_irq > 0)
1760 free_irq(lp->eth_irq, ndev);
1777 struct axienet_local *lp = netdev_priv(ndev);
1783 XAE_TRL_SIZE) > lp->rxmem)
1801 struct axienet_local *lp = netdev_priv(ndev);
1803 disable_irq(lp->tx_irq);
1804 disable_irq(lp->rx_irq);
1805 axienet_rx_irq(lp->tx_irq, ndev);
1806 axienet_tx_irq(lp->rx_irq, ndev);
1807 enable_irq(lp->tx_irq);
1808 enable_irq(lp->rx_irq);
1814 struct axienet_local *lp = netdev_priv(dev);
1819 return phylink_mii_ioctl(lp->phylink, rq, cmd);
1825 struct axienet_local *lp = netdev_priv(dev);
1831 start = u64_stats_fetch_begin(&lp->rx_stat_sync);
1832 stats->rx_packets = u64_stats_read(&lp->rx_packets);
1833 stats->rx_bytes = u64_stats_read(&lp->rx_bytes);
1834 } while (u64_stats_fetch_retry(&lp->rx_stat_sync, start));
1837 start = u64_stats_fetch_begin(&lp->tx_stat_sync);
1838 stats->tx_packets = u64_stats_read(&lp->tx_packets);
1839 stats->tx_bytes = u64_stats_read(&lp->tx_bytes);
1840 } while (u64_stats_fetch_retry(&lp->tx_stat_sync, start));
1842 if (!(lp->features & XAE_FEATURE_STATS))
1846 start = read_seqcount_begin(&lp->hw_stats_seqcount);
1848 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
1849 stats->rx_crc_errors = axienet_stat(lp, STAT_RX_FCS_ERRORS);
1851 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
1852 stats->rx_errors = axienet_stat(lp, STAT_UNDERSIZE_FRAMES) +
1853 axienet_stat(lp, STAT_FRAGMENT_FRAMES) +
1857 stats->multicast = axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
1860 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
1862 axienet_stat(lp, STAT_TX_UNDERRUN_ERRORS);
1864 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
1865 stats->tx_errors = axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL) +
1869 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
1944 struct axienet_local *lp = netdev_priv(ndev);
1950 data[0] = axienet_ior(lp, XAE_RAF_OFFSET);
1951 data[1] = axienet_ior(lp, XAE_TPF_OFFSET);
1952 data[2] = axienet_ior(lp, XAE_IFGP_OFFSET);
1953 data[3] = axienet_ior(lp, XAE_IS_OFFSET);
1954 data[4] = axienet_ior(lp, XAE_IP_OFFSET);
1955 data[5] = axienet_ior(lp, XAE_IE_OFFSET);
1956 data[6] = axienet_ior(lp, XAE_TTAG_OFFSET);
1957 data[7] = axienet_ior(lp, XAE_RTAG_OFFSET);
1958 data[8] = axienet_ior(lp, XAE_UAWL_OFFSET);
1959 data[9] = axienet_ior(lp, XAE_UAWU_OFFSET);
1960 data[10] = axienet_ior(lp, XAE_TPID0_OFFSET);
1961 data[11] = axienet_ior(lp, XAE_TPID1_OFFSET);
1962 data[12] = axienet_ior(lp, XAE_PPST_OFFSET);
1963 data[13] = axienet_ior(lp, XAE_RCW0_OFFSET);
1964 data[14] = axienet_ior(lp, XAE_RCW1_OFFSET);
1965 data[15] = axienet_ior(lp, XAE_TC_OFFSET);
1966 data[16] = axienet_ior(lp, XAE_FCC_OFFSET);
1967 data[17] = axienet_ior(lp, XAE_EMMC_OFFSET);
1968 data[18] = axienet_ior(lp, XAE_PHYC_OFFSET);
1969 data[19] = axienet_ior(lp, XAE_MDIO_MC_OFFSET);
1970 data[20] = axienet_ior(lp, XAE_MDIO_MCR_OFFSET);
1971 data[21] = axienet_ior(lp, XAE_MDIO_MWD_OFFSET);
1972 data[22] = axienet_ior(lp, XAE_MDIO_MRD_OFFSET);
1973 data[27] = axienet_ior(lp, XAE_UAW0_OFFSET);
1974 data[28] = axienet_ior(lp, XAE_UAW1_OFFSET);
1975 data[29] = axienet_ior(lp, XAE_FMI_OFFSET);
1976 data[30] = axienet_ior(lp, XAE_AF0_OFFSET);
1977 data[31] = axienet_ior(lp, XAE_AF1_OFFSET);
1978 if (!lp->use_dmaengine) {
1979 data[32] = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
1980 data[33] = axienet_dma_in32(lp, XAXIDMA_TX_SR_OFFSET);
1981 data[34] = axienet_dma_in32(lp, XAXIDMA_TX_CDESC_OFFSET);
1982 data[35] = axienet_dma_in32(lp, XAXIDMA_TX_TDESC_OFFSET);
1983 data[36] = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
1984 data[37] = axienet_dma_in32(lp, XAXIDMA_RX_SR_OFFSET);
1985 data[38] = axienet_dma_in32(lp, XAXIDMA_RX_CDESC_OFFSET);
1986 data[39] = axienet_dma_in32(lp, XAXIDMA_RX_TDESC_OFFSET);
1996 struct axienet_local *lp = netdev_priv(ndev);
2002 ering->rx_pending = lp->rx_bd_num;
2005 ering->tx_pending = lp->tx_bd_num;
2014 struct axienet_local *lp = netdev_priv(ndev);
2026 lp->rx_bd_num = ering->rx_pending;
2027 lp->tx_bd_num = ering->tx_pending;
2044 struct axienet_local *lp = netdev_priv(ndev);
2046 phylink_ethtool_get_pauseparam(lp->phylink, epauseparm);
2065 struct axienet_local *lp = netdev_priv(ndev);
2067 return phylink_ethtool_set_pauseparam(lp->phylink, epauseparm);
2072 * @lp: Device private data
2076 static void axienet_update_coalesce_rx(struct axienet_local *lp, u32 cr,
2079 spin_lock_irq(&lp->rx_cr_lock);
2080 lp->rx_dma_cr &= ~mask;
2081 lp->rx_dma_cr |= cr;
2085 if (lp->rx_dma_started) {
2086 u32 reg = axienet_dma_in32(lp, XAXIDMA_RX_CR_OFFSET);
2090 cr = lp->rx_dma_cr;
2092 cr = lp->rx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2093 axienet_dma_out32(lp, XAXIDMA_RX_CR_OFFSET, cr);
2095 spin_unlock_irq(&lp->rx_cr_lock);
2100 * @lp: Device private data
2102 static u32 axienet_dim_coalesce_count_rx(struct axienet_local *lp)
2104 return min(1 << (lp->rx_dim.profile_ix << 1), 255);
2113 struct axienet_local *lp =
2115 u32 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp), 0);
2119 axienet_update_coalesce_rx(lp, cr, mask);
2120 lp->rx_dim.state = DIM_START_MEASURE;
2125 * @lp: Device private data
2129 static void axienet_update_coalesce_tx(struct axienet_local *lp, u32 cr,
2132 spin_lock_irq(&lp->tx_cr_lock);
2133 lp->tx_dma_cr &= ~mask;
2134 lp->tx_dma_cr |= cr;
2138 if (lp->tx_dma_started) {
2139 u32 reg = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
2143 cr = lp->tx_dma_cr;
2145 cr = lp->tx_dma_cr & ~XAXIDMA_IRQ_ALL_MASK;
2146 axienet_dma_out32(lp, XAXIDMA_TX_CR_OFFSET, cr);
2148 spin_unlock_irq(&lp->tx_cr_lock);
2170 struct axienet_local *lp = netdev_priv(ndev);
2173 ecoalesce->use_adaptive_rx_coalesce = lp->rx_dim_enabled;
2175 spin_lock_irq(&lp->rx_cr_lock);
2176 cr = lp->rx_dma_cr;
2177 spin_unlock_irq(&lp->rx_cr_lock);
2178 axienet_coalesce_params(lp, cr,
2182 spin_lock_irq(&lp->tx_cr_lock);
2183 cr = lp->tx_dma_cr;
2184 spin_unlock_irq(&lp->tx_cr_lock);
2185 axienet_coalesce_params(lp, cr,
2210 struct axienet_local *lp = netdev_priv(ndev);
2212 bool old_dim = lp->rx_dim_enabled;
2237 cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
2241 WRITE_ONCE(lp->rx_dim_enabled, false);
2242 napi_synchronize(&lp->napi_rx);
2243 flush_work(&lp->rx_dim.work);
2246 cr = axienet_calc_cr(lp, ecoalesce->rx_max_coalesced_frames,
2250 cr = axienet_calc_cr(lp, 2, ecoalesce->rx_coalesce_usecs);
2254 axienet_update_coalesce_rx(lp, cr, mask);
2256 WRITE_ONCE(lp->rx_dim_enabled, true);
2258 cr = axienet_calc_cr(lp, ecoalesce->tx_max_coalesced_frames,
2260 axienet_update_coalesce_tx(lp, cr, ~XAXIDMA_CR_RUNSTOP_MASK);
2268 struct axienet_local *lp = netdev_priv(ndev);
2270 return phylink_ethtool_ksettings_get(lp->phylink, cmd);
2277 struct axienet_local *lp = netdev_priv(ndev);
2279 return phylink_ethtool_ksettings_set(lp->phylink, cmd);
2284 struct axienet_local *lp = netdev_priv(dev);
2286 return phylink_ethtool_nway_reset(lp->phylink);
2293 struct axienet_local *lp = netdev_priv(dev);
2297 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2298 data[0] = axienet_stat(lp, STAT_RX_BYTES);
2299 data[1] = axienet_stat(lp, STAT_TX_BYTES);
2300 data[2] = axienet_stat(lp, STAT_RX_VLAN_FRAMES);
2301 data[3] = axienet_stat(lp, STAT_TX_VLAN_FRAMES);
2302 data[6] = axienet_stat(lp, STAT_TX_PFC_FRAMES);
2303 data[7] = axienet_stat(lp, STAT_RX_PFC_FRAMES);
2304 data[8] = axienet_stat(lp, STAT_USER_DEFINED0);
2305 data[9] = axienet_stat(lp, STAT_USER_DEFINED1);
2306 data[10] = axienet_stat(lp, STAT_USER_DEFINED2);
2307 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2334 struct axienet_local *lp = netdev_priv(dev);
2338 if (lp->features & XAE_FEATURE_STATS)
2350 struct axienet_local *lp = netdev_priv(dev);
2353 if (!(lp->features & XAE_FEATURE_STATS))
2357 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2359 axienet_stat(lp, STAT_TX_PAUSE_FRAMES);
2361 axienet_stat(lp, STAT_RX_PAUSE_FRAMES);
2362 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2369 struct axienet_local *lp = netdev_priv(dev);
2372 if (!(lp->features & XAE_FEATURE_STATS))
2376 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2378 axienet_stat(lp, STAT_TX_GOOD_FRAMES);
2380 axienet_stat(lp, STAT_TX_SINGLE_COLLISION_FRAMES);
2382 axienet_stat(lp, STAT_TX_MULTIPLE_COLLISION_FRAMES);
2384 axienet_stat(lp, STAT_RX_GOOD_FRAMES);
2386 axienet_stat(lp, STAT_RX_FCS_ERRORS);
2388 axienet_stat(lp, STAT_RX_ALIGNMENT_ERRORS);
2390 axienet_stat(lp, STAT_TX_DEFERRED_FRAMES);
2392 axienet_stat(lp, STAT_TX_LATE_COLLISIONS);
2394 axienet_stat(lp, STAT_TX_EXCESS_COLLISIONS);
2396 axienet_stat(lp, STAT_TX_MULTICAST_FRAMES);
2398 axienet_stat(lp, STAT_TX_BROADCAST_FRAMES);
2400 axienet_stat(lp, STAT_TX_EXCESS_DEFERRAL);
2402 axienet_stat(lp, STAT_RX_MULTICAST_FRAMES);
2404 axienet_stat(lp, STAT_RX_BROADCAST_FRAMES);
2406 axienet_stat(lp, STAT_RX_LENGTH_ERRORS);
2407 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2414 struct axienet_local *lp = netdev_priv(dev);
2417 if (!(lp->features & XAE_FEATURE_STATS))
2421 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2423 axienet_stat(lp, STAT_TX_CONTROL_FRAMES);
2425 axienet_stat(lp, STAT_RX_CONTROL_FRAMES);
2427 axienet_stat(lp, STAT_RX_CONTROL_OPCODE_ERRORS);
2428 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2447 struct axienet_local *lp = netdev_priv(dev);
2450 if (!(lp->features & XAE_FEATURE_STATS))
2454 start = read_seqcount_begin(&lp->hw_stats_seqcount);
2456 axienet_stat(lp, STAT_UNDERSIZE_FRAMES);
2458 axienet_stat(lp, STAT_RX_OVERSIZE_FRAMES);
2460 axienet_stat(lp, STAT_FRAGMENT_FRAMES);
2463 axienet_stat(lp, STAT_RX_64_BYTE_FRAMES);
2465 axienet_stat(lp, STAT_RX_65_127_BYTE_FRAMES);
2467 axienet_stat(lp, STAT_RX_128_255_BYTE_FRAMES);
2469 axienet_stat(lp, STAT_RX_256_511_BYTE_FRAMES);
2471 axienet_stat(lp, STAT_RX_512_1023_BYTE_FRAMES);
2473 axienet_stat(lp, STAT_RX_1024_MAX_BYTE_FRAMES);
2478 axienet_stat(lp, STAT_TX_64_BYTE_FRAMES);
2480 axienet_stat(lp, STAT_TX_65_127_BYTE_FRAMES);
2482 axienet_stat(lp, STAT_TX_128_255_BYTE_FRAMES);
2484 axienet_stat(lp, STAT_TX_256_511_BYTE_FRAMES);
2486 axienet_stat(lp, STAT_TX_512_1023_BYTE_FRAMES);
2488 axienet_stat(lp, STAT_TX_1024_MAX_BYTE_FRAMES);
2490 axienet_stat(lp, STAT_TX_OVERSIZE_FRAMES);
2491 } while (read_seqcount_retry(&lp->hw_stats_seqcount, start));
2550 struct axienet_local *lp = netdev_priv(ndev);
2553 if (lp->switch_x_sgmii) {
2583 struct axienet_local *lp = netdev_priv(ndev);
2587 return &lp->pcs;
2612 struct axienet_local *lp = netdev_priv(ndev);
2615 emmc_reg = axienet_ior(lp, XAE_EMMC_OFFSET);
2634 axienet_iow(lp, XAE_EMMC_OFFSET, emmc_reg);
2636 fcc_reg = axienet_ior(lp, XAE_FCC_OFFSET);
2645 axienet_iow(lp, XAE_FCC_OFFSET, fcc_reg);
2667 struct axienet_local *lp = container_of(work, struct axienet_local,
2669 struct net_device *ndev = lp->ndev;
2672 if (READ_ONCE(lp->stopping))
2675 napi_disable(&lp->napi_tx);
2676 napi_disable(&lp->napi_rx);
2678 axienet_setoptions(ndev, lp->options &
2681 axienet_dma_stop(lp);
2684 for (i = 0; i < lp->tx_bd_num; i++) {
2685 cur_p = &lp->tx_bd_v[i];
2687 dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
2689 dma_unmap_single(lp->dev, addr,
2708 for (i = 0; i < lp->rx_bd_num; i++) {
2709 cur_p = &lp->rx_bd_v[i];
2718 lp->tx_bd_ci = 0;
2719 lp->tx_bd_tail = 0;
2720 lp->rx_bd_ci = 0;
2722 axienet_dma_start(lp);
2724 axienet_status = axienet_ior(lp, XAE_RCW1_OFFSET);
2726 axienet_iow(lp, XAE_RCW1_OFFSET, axienet_status);
2728 axienet_status = axienet_ior(lp, XAE_IP_OFFSET);
2730 axienet_iow(lp, XAE_IS_OFFSET, XAE_INT_RXRJECT_MASK);
2731 axienet_iow(lp, XAE_IE_OFFSET, lp->eth_irq > 0 ?
2733 axienet_iow(lp, XAE_FCC_OFFSET, XAE_FCC_FCRX_MASK);
2738 axienet_setoptions(ndev, lp->options &
2742 napi_enable(&lp->napi_rx);
2743 napi_enable(&lp->napi_tx);
2744 axienet_setoptions(ndev, lp->options);
2763 struct axienet_local *lp;
2770 ndev = alloc_etherdev(sizeof(*lp));
2784 lp = netdev_priv(ndev);
2785 lp->ndev = ndev;
2786 lp->dev = &pdev->dev;
2787 lp->options = XAE_OPTION_DEFAULTS;
2788 lp->rx_bd_num = RX_BD_NUM_DEFAULT;
2789 lp->tx_bd_num = TX_BD_NUM_DEFAULT;
2791 u64_stats_init(&lp->rx_stat_sync);
2792 u64_stats_init(&lp->tx_stat_sync);
2794 mutex_init(&lp->stats_lock);
2795 seqcount_mutex_init(&lp->hw_stats_seqcount, &lp->stats_lock);
2796 INIT_DEFERRABLE_WORK(&lp->stats_work, axienet_refresh_stats);
2798 lp->axi_clk = devm_clk_get_optional(&pdev->dev, "s_axi_lite_clk");
2799 if (!lp->axi_clk) {
2803 lp->axi_clk = devm_clk_get_optional(&pdev->dev, NULL);
2805 if (IS_ERR(lp->axi_clk)) {
2806 ret = PTR_ERR(lp->axi_clk);
2809 ret = clk_prepare_enable(lp->axi_clk);
2815 lp->misc_clks[0].id = "axis_clk";
2816 lp->misc_clks[1].id = "ref_clk";
2817 lp->misc_clks[2].id = "mgt_clk";
2819 ret = devm_clk_bulk_get_optional(&pdev->dev, XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2823 ret = clk_bulk_prepare_enable(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
2828 lp->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &ethres);
2829 if (IS_ERR(lp->regs)) {
2830 ret = PTR_ERR(lp->regs);
2833 lp->regs_start = ethres->start;
2836 lp->features = 0;
2838 if (axienet_ior(lp, XAE_ABILITY_OFFSET) & XAE_ABILITY_STATS)
2839 lp->features |= XAE_FEATURE_STATS;
2845 lp->features |= XAE_FEATURE_PARTIAL_TX_CSUM;
2850 lp->features |= XAE_FEATURE_FULL_TX_CSUM;
2860 lp->features |= XAE_FEATURE_PARTIAL_RX_CSUM;
2864 lp->features |= XAE_FEATURE_FULL_RX_CSUM;
2875 of_property_read_u32(pdev->dev.of_node, "xlnx,rxmem", &lp->rxmem);
2877 lp->switch_x_sgmii = of_property_read_bool(pdev->dev.of_node,
2886 lp->phy_mode = PHY_INTERFACE_MODE_MII;
2889 lp->phy_mode = PHY_INTERFACE_MODE_GMII;
2892 lp->phy_mode = PHY_INTERFACE_MODE_RGMII_ID;
2895 lp->phy_mode = PHY_INTERFACE_MODE_SGMII;
2898 lp->phy_mode = PHY_INTERFACE_MODE_1000BASEX;
2905 ret = of_get_phy_mode(pdev->dev.of_node, &lp->phy_mode);
2909 if (lp->switch_x_sgmii && lp->phy_mode != PHY_INTERFACE_MODE_SGMII &&
2910 lp->phy_mode != PHY_INTERFACE_MODE_1000BASEX) {
2930 lp->dma_regs = devm_ioremap_resource(&pdev->dev,
2932 lp->rx_irq = irq_of_parse_and_map(np, 1);
2933 lp->tx_irq = irq_of_parse_and_map(np, 0);
2935 lp->eth_irq = platform_get_irq_optional(pdev, 0);
2938 lp->dma_regs = devm_platform_get_and_ioremap_resource(pdev, 1, NULL);
2939 lp->rx_irq = platform_get_irq(pdev, 1);
2940 lp->tx_irq = platform_get_irq(pdev, 0);
2941 lp->eth_irq = platform_get_irq_optional(pdev, 2);
2943 if (IS_ERR(lp->dma_regs)) {
2945 ret = PTR_ERR(lp->dma_regs);
2948 if (lp->rx_irq <= 0 || lp->tx_irq <= 0) {
2955 ret = __axienet_device_reset(lp);
2967 if ((axienet_ior(lp, XAE_ID_OFFSET) >> 24) >= 0x9) {
2968 void __iomem *desc = lp->dma_regs + XAXIDMA_TX_CDESC_OFFSET + 4;
2974 lp->features |= XAE_FEATURE_DMA_64BIT;
2982 if (!IS_ENABLED(CONFIG_64BIT) && lp->features & XAE_FEATURE_DMA_64BIT) {
2993 netif_napi_add(ndev, &lp->napi_rx, axienet_rx_poll);
2994 netif_napi_add(ndev, &lp->napi_tx, axienet_tx_poll);
2999 lp->eth_irq = platform_get_irq_optional(pdev, 0);
3000 if (lp->eth_irq < 0 && lp->eth_irq != -ENXIO) {
3001 ret = lp->eth_irq;
3004 tx_chan = dma_request_chan(lp->dev, "tx_chan0");
3007 dev_err_probe(lp->dev, ret, "No Ethernet DMA (TX) channel found\n");
3021 lp->use_dmaengine = 1;
3024 if (lp->use_dmaengine)
3029 if (lp->eth_irq <= 0)
3042 spin_lock_init(&lp->rx_cr_lock);
3043 spin_lock_init(&lp->tx_cr_lock);
3044 INIT_WORK(&lp->rx_dim.work, axienet_rx_dim_work);
3045 lp->rx_dim_enabled = true;
3046 lp->rx_dim.profile_ix = 1;
3047 lp->rx_dma_cr = axienet_calc_cr(lp, axienet_dim_coalesce_count_rx(lp),
3049 lp->tx_dma_cr = axienet_calc_cr(lp, XAXIDMA_DFT_TX_THRESHOLD,
3052 ret = axienet_mdio_setup(lp);
3057 if (lp->phy_mode == PHY_INTERFACE_MODE_SGMII ||
3058 lp->phy_mode == PHY_INTERFACE_MODE_1000BASEX) {
3072 lp->pcs_phy = of_mdio_find_device(np);
3073 if (!lp->pcs_phy) {
3079 lp->pcs.ops = &axienet_pcs_ops;
3080 lp->pcs.poll = true;
3083 lp->phylink_config.dev = &ndev->dev;
3084 lp->phylink_config.type = PHYLINK_NETDEV;
3085 lp->phylink_config.mac_managed_pm = true;
3086 lp->phylink_config.mac_capabilities = MAC_SYM_PAUSE | MAC_ASYM_PAUSE |
3089 __set_bit(lp->phy_mode, lp->phylink_config.supported_interfaces);
3090 if (lp->switch_x_sgmii) {
3092 lp->phylink_config.supported_interfaces);
3094 lp->phylink_config.supported_interfaces);
3097 lp->phylink = phylink_create(&lp->phylink_config, pdev->dev.fwnode,
3098 lp->phy_mode,
3100 if (IS_ERR(lp->phylink)) {
3101 ret = PTR_ERR(lp->phylink);
3106 ret = register_netdev(lp->ndev);
3108 dev_err(lp->dev, "register_netdev() error (%i)\n", ret);
3115 phylink_destroy(lp->phylink);
3118 if (lp->pcs_phy)
3119 put_device(&lp->pcs_phy->dev);
3120 if (lp->mii_bus)
3121 axienet_mdio_teardown(lp);
3123 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3124 clk_disable_unprepare(lp->axi_clk);
3135 struct axienet_local *lp = netdev_priv(ndev);
3139 if (lp->phylink)
3140 phylink_destroy(lp->phylink);
3142 if (lp->pcs_phy)
3143 put_device(&lp->pcs_phy->dev);
3145 axienet_mdio_teardown(lp);
3147 clk_bulk_disable_unprepare(XAE_NUM_MISC_CLOCKS, lp->misc_clks);
3148 clk_disable_unprepare(lp->axi_clk);