Lines Matching +full:asym +full:- +full:pause

1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright (C) 2014-2019 Renesas Electronics Corporation
6 * Copyright (C) 2015-2016 Cogent Embedded, Inc. <source@cogentembedded.com>
14 #include <linux/dma-mapping.h>
66 return -ETIMEDOUT; in ravb_wait()
98 switch (priv->speed) { in ravb_set_rate_gbeth()
115 switch (priv->speed) { in ravb_set_rate_rcar()
127 u32 reserve = (unsigned long)skb->data & (RAVB_ALIGN - 1); in ravb_set_buffer_align()
130 skb_reserve(skb, RAVB_ALIGN - reserve); in ravb_set_buffer_align()
164 ravb_modify(priv->ndev, PIR, mask, set ? mask : 0); in ravb_mdio_ctrl()
191 return (ravb_read(priv->ndev, PIR) & PIR_MDI) != 0; in ravb_get_mdio_data()
203 /* Free TX skb function for AVB-IP */
207 struct net_device_stats *stats = &priv->stats[q]; in ravb_tx_free()
208 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_tx_free()
214 for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) { in ravb_tx_free()
217 entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] * in ravb_tx_free()
219 desc = &priv->tx_ring[q][entry]; in ravb_tx_free()
220 txed = desc->die_dt == DT_FEMPTY; in ravb_tx_free()
225 size = le16_to_cpu(desc->ds_tagl) & TX_DS; in ravb_tx_free()
227 if (priv->tx_skb[q][entry / num_tx_desc]) { in ravb_tx_free()
228 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_tx_free()
231 if (entry % num_tx_desc == num_tx_desc - 1) { in ravb_tx_free()
233 dev_kfree_skb_any(priv->tx_skb[q][entry]); in ravb_tx_free()
234 priv->tx_skb[q][entry] = NULL; in ravb_tx_free()
236 stats->tx_packets++; in ravb_tx_free()
241 stats->tx_bytes += size; in ravb_tx_free()
242 desc->die_dt = DT_EEMPTY; in ravb_tx_free()
253 if (!priv->gbeth_rx_ring) in ravb_rx_ring_free_gbeth()
256 for (i = 0; i < priv->num_rx_ring[q]; i++) { in ravb_rx_ring_free_gbeth()
257 struct ravb_rx_desc *desc = &priv->gbeth_rx_ring[i]; in ravb_rx_ring_free_gbeth()
259 if (!dma_mapping_error(ndev->dev.parent, in ravb_rx_ring_free_gbeth()
260 le32_to_cpu(desc->dptr))) in ravb_rx_ring_free_gbeth()
261 dma_unmap_single(ndev->dev.parent, in ravb_rx_ring_free_gbeth()
262 le32_to_cpu(desc->dptr), in ravb_rx_ring_free_gbeth()
266 ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); in ravb_rx_ring_free_gbeth()
267 dma_free_coherent(ndev->dev.parent, ring_size, priv->gbeth_rx_ring, in ravb_rx_ring_free_gbeth()
268 priv->rx_desc_dma[q]); in ravb_rx_ring_free_gbeth()
269 priv->gbeth_rx_ring = NULL; in ravb_rx_ring_free_gbeth()
278 if (!priv->rx_ring[q]) in ravb_rx_ring_free_rcar()
281 for (i = 0; i < priv->num_rx_ring[q]; i++) { in ravb_rx_ring_free_rcar()
282 struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i]; in ravb_rx_ring_free_rcar()
284 if (!dma_mapping_error(ndev->dev.parent, in ravb_rx_ring_free_rcar()
285 le32_to_cpu(desc->dptr))) in ravb_rx_ring_free_rcar()
286 dma_unmap_single(ndev->dev.parent, in ravb_rx_ring_free_rcar()
287 le32_to_cpu(desc->dptr), in ravb_rx_ring_free_rcar()
292 (priv->num_rx_ring[q] + 1); in ravb_rx_ring_free_rcar()
293 dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], in ravb_rx_ring_free_rcar()
294 priv->rx_desc_dma[q]); in ravb_rx_ring_free_rcar()
295 priv->rx_ring[q] = NULL; in ravb_rx_ring_free_rcar()
302 const struct ravb_hw_info *info = priv->info; in ravb_ring_free()
303 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_free()
307 info->rx_ring_free(ndev, q); in ravb_ring_free()
309 if (priv->tx_ring[q]) { in ravb_ring_free()
313 (priv->num_tx_ring[q] * num_tx_desc + 1); in ravb_ring_free()
314 dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], in ravb_ring_free()
315 priv->tx_desc_dma[q]); in ravb_ring_free()
316 priv->tx_ring[q] = NULL; in ravb_ring_free()
320 if (priv->rx_skb[q]) { in ravb_ring_free()
321 for (i = 0; i < priv->num_rx_ring[q]; i++) in ravb_ring_free()
322 dev_kfree_skb(priv->rx_skb[q][i]); in ravb_ring_free()
324 kfree(priv->rx_skb[q]); in ravb_ring_free()
325 priv->rx_skb[q] = NULL; in ravb_ring_free()
328 kfree(priv->tx_align[q]); in ravb_ring_free()
329 priv->tx_align[q] = NULL; in ravb_ring_free()
334 kfree(priv->tx_skb[q]); in ravb_ring_free()
335 priv->tx_skb[q] = NULL; in ravb_ring_free()
346 rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; in ravb_rx_ring_format_gbeth()
347 memset(priv->gbeth_rx_ring, 0, rx_ring_size); in ravb_rx_ring_format_gbeth()
349 for (i = 0; i < priv->num_rx_ring[q]; i++) { in ravb_rx_ring_format_gbeth()
351 rx_desc = &priv->gbeth_rx_ring[i]; in ravb_rx_ring_format_gbeth()
352 rx_desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); in ravb_rx_ring_format_gbeth()
353 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, in ravb_rx_ring_format_gbeth()
359 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx_ring_format_gbeth()
360 rx_desc->ds_cc = cpu_to_le16(0); in ravb_rx_ring_format_gbeth()
361 rx_desc->dptr = cpu_to_le32(dma_addr); in ravb_rx_ring_format_gbeth()
362 rx_desc->die_dt = DT_FEMPTY; in ravb_rx_ring_format_gbeth()
364 rx_desc = &priv->gbeth_rx_ring[i]; in ravb_rx_ring_format_gbeth()
365 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_rx_ring_format_gbeth()
366 rx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_rx_ring_format_gbeth()
373 unsigned int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; in ravb_rx_ring_format_rcar()
377 memset(priv->rx_ring[q], 0, rx_ring_size); in ravb_rx_ring_format_rcar()
379 for (i = 0; i < priv->num_rx_ring[q]; i++) { in ravb_rx_ring_format_rcar()
381 rx_desc = &priv->rx_ring[q][i]; in ravb_rx_ring_format_rcar()
382 rx_desc->ds_cc = cpu_to_le16(RX_BUF_SZ); in ravb_rx_ring_format_rcar()
383 dma_addr = dma_map_single(ndev->dev.parent, priv->rx_skb[q][i]->data, in ravb_rx_ring_format_rcar()
389 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx_ring_format_rcar()
390 rx_desc->ds_cc = cpu_to_le16(0); in ravb_rx_ring_format_rcar()
391 rx_desc->dptr = cpu_to_le32(dma_addr); in ravb_rx_ring_format_rcar()
392 rx_desc->die_dt = DT_FEMPTY; in ravb_rx_ring_format_rcar()
394 rx_desc = &priv->rx_ring[q][i]; in ravb_rx_ring_format_rcar()
395 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_rx_ring_format_rcar()
396 rx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_rx_ring_format_rcar()
403 const struct ravb_hw_info *info = priv->info; in ravb_ring_format()
404 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_format()
407 unsigned int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q] * in ravb_ring_format()
411 priv->cur_rx[q] = 0; in ravb_ring_format()
412 priv->cur_tx[q] = 0; in ravb_ring_format()
413 priv->dirty_rx[q] = 0; in ravb_ring_format()
414 priv->dirty_tx[q] = 0; in ravb_ring_format()
416 info->rx_ring_format(ndev, q); in ravb_ring_format()
418 memset(priv->tx_ring[q], 0, tx_ring_size); in ravb_ring_format()
420 for (i = 0, tx_desc = priv->tx_ring[q]; i < priv->num_tx_ring[q]; in ravb_ring_format()
422 tx_desc->die_dt = DT_EEMPTY; in ravb_ring_format()
425 tx_desc->die_dt = DT_EEMPTY; in ravb_ring_format()
428 tx_desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); in ravb_ring_format()
429 tx_desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
432 desc = &priv->desc_bat[RX_QUEUE_OFFSET + q]; in ravb_ring_format()
433 desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
434 desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); in ravb_ring_format()
437 desc = &priv->desc_bat[q]; in ravb_ring_format()
438 desc->die_dt = DT_LINKFIX; /* type */ in ravb_ring_format()
439 desc->dptr = cpu_to_le32((u32)priv->tx_desc_dma[q]); in ravb_ring_format()
447 ring_size = sizeof(struct ravb_rx_desc) * (priv->num_rx_ring[q] + 1); in ravb_alloc_rx_desc_gbeth()
449 priv->gbeth_rx_ring = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_alloc_rx_desc_gbeth()
450 &priv->rx_desc_dma[q], in ravb_alloc_rx_desc_gbeth()
452 return priv->gbeth_rx_ring; in ravb_alloc_rx_desc_gbeth()
460 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); in ravb_alloc_rx_desc_rcar()
462 priv->rx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_alloc_rx_desc_rcar()
463 &priv->rx_desc_dma[q], in ravb_alloc_rx_desc_rcar()
465 return priv->rx_ring[q]; in ravb_alloc_rx_desc_rcar()
472 const struct ravb_hw_info *info = priv->info; in ravb_ring_init()
473 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_ring_init()
479 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], in ravb_ring_init()
480 sizeof(*priv->rx_skb[q]), GFP_KERNEL); in ravb_ring_init()
481 priv->tx_skb[q] = kcalloc(priv->num_tx_ring[q], in ravb_ring_init()
482 sizeof(*priv->tx_skb[q]), GFP_KERNEL); in ravb_ring_init()
483 if (!priv->rx_skb[q] || !priv->tx_skb[q]) in ravb_ring_init()
486 for (i = 0; i < priv->num_rx_ring[q]; i++) { in ravb_ring_init()
487 skb = __netdev_alloc_skb(ndev, info->max_rx_len, GFP_KERNEL); in ravb_ring_init()
491 priv->rx_skb[q][i] = skb; in ravb_ring_init()
496 priv->tx_align[q] = kmalloc(DPTR_ALIGN * priv->num_tx_ring[q] + in ravb_ring_init()
497 DPTR_ALIGN - 1, GFP_KERNEL); in ravb_ring_init()
498 if (!priv->tx_align[q]) in ravb_ring_init()
503 if (!info->alloc_rx_desc(ndev, q)) in ravb_ring_init()
506 priv->dirty_rx[q] = 0; in ravb_ring_init()
510 (priv->num_tx_ring[q] * num_tx_desc + 1); in ravb_ring_init()
511 priv->tx_ring[q] = dma_alloc_coherent(ndev->dev.parent, ring_size, in ravb_ring_init()
512 &priv->tx_desc_dma[q], in ravb_ring_init()
514 if (!priv->tx_ring[q]) in ravb_ring_init()
522 return -ENOMEM; in ravb_ring_init()
529 if (priv->phy_interface == PHY_INTERFACE_MODE_MII) { in ravb_emac_init_gbeth()
541 /* EMAC Mode: PAUSE prohibition; Duplex; TX; RX; CRC Pass Through */ in ravb_emac_init_gbeth()
542 ravb_write(ndev, ECMR_ZPF | ((priv->duplex > 0) ? ECMR_DM : 0) | in ravb_emac_init_gbeth()
550 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_gbeth()
551 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_gbeth()
552 ravb_write(ndev, (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_gbeth()
554 /* E-MAC status register clear */ in ravb_emac_init_gbeth()
558 /* E-MAC interrupt enable register */ in ravb_emac_init_gbeth()
565 ravb_write(ndev, ndev->mtu + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN, RFLR); in ravb_emac_init_rcar()
567 /* EMAC Mode: PAUSE prohibition; Duplex; RX Checksum; TX; RX */ in ravb_emac_init_rcar()
569 (ndev->features & NETIF_F_RXCSUM ? ECMR_RCSC : 0) | in ravb_emac_init_rcar()
576 (ndev->dev_addr[0] << 24) | (ndev->dev_addr[1] << 16) | in ravb_emac_init_rcar()
577 (ndev->dev_addr[2] << 8) | (ndev->dev_addr[3]), MAHR); in ravb_emac_init_rcar()
579 (ndev->dev_addr[4] << 8) | (ndev->dev_addr[5]), MALR); in ravb_emac_init_rcar()
581 /* E-MAC status register clear */ in ravb_emac_init_rcar()
584 /* E-MAC interrupt enable register */ in ravb_emac_init_rcar()
588 /* E-MAC init function */
592 const struct ravb_hw_info *info = priv->info; in ravb_emac_init()
594 info->emac_init(ndev); in ravb_emac_init()
634 const struct ravb_hw_info *info = priv->info; in ravb_dmac_init_rcar()
661 if (info->multi_irqs) { in ravb_dmac_init_rcar()
683 const struct ravb_hw_info *info = priv->info; in ravb_dmac_init()
691 error = info->dmac_init(ndev); in ravb_dmac_init()
695 /* Setting the control will start the AVB-DMAC process. */ in ravb_dmac_init()
711 while (count--) { in ravb_get_tx_tstamp()
719 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, in ravb_get_tx_tstamp()
721 skb = ts_skb->skb; in ravb_get_tx_tstamp()
722 tag = ts_skb->tag; in ravb_get_tx_tstamp()
723 list_del(&ts_skb->list); in ravb_get_tx_tstamp()
744 if (unlikely(skb->len < sizeof(__sum16))) in ravb_rx_csum()
746 hw_csum = skb_tail_pointer(skb) - sizeof(__sum16); in ravb_rx_csum()
747 skb->csum = csum_unfold((__force __sum16)get_unaligned_le16(hw_csum)); in ravb_rx_csum()
748 skb->ip_summed = CHECKSUM_COMPLETE; in ravb_rx_csum()
749 skb_trim(skb, skb->len - sizeof(__sum16)); in ravb_rx_csum()
758 skb = priv->rx_skb[RAVB_BE][entry]; in ravb_get_skb_gbeth()
759 priv->rx_skb[RAVB_BE][entry] = NULL; in ravb_get_skb_gbeth()
760 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_get_skb_gbeth()
770 const struct ravb_hw_info *info = priv->info; in ravb_rx_gbeth()
783 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_rx_gbeth()
784 limit = priv->dirty_rx[q] + priv->num_rx_ring[q] - priv->cur_rx[q]; in ravb_rx_gbeth()
785 stats = &priv->stats[q]; in ravb_rx_gbeth()
787 desc = &priv->gbeth_rx_ring[entry]; in ravb_rx_gbeth()
788 for (i = 0; i < limit && rx_packets < *quota && desc->die_dt != DT_FEMPTY; i++) { in ravb_rx_gbeth()
791 desc_status = desc->msc; in ravb_rx_gbeth()
792 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; in ravb_rx_gbeth()
794 /* We use 0-byte descriptors to mark the DMA mapping errors */ in ravb_rx_gbeth()
799 stats->multicast++; in ravb_rx_gbeth()
802 stats->rx_errors++; in ravb_rx_gbeth()
804 stats->rx_crc_errors++; in ravb_rx_gbeth()
806 stats->rx_frame_errors++; in ravb_rx_gbeth()
808 stats->rx_length_errors++; in ravb_rx_gbeth()
810 stats->rx_missed_errors++; in ravb_rx_gbeth()
812 die_dt = desc->die_dt & 0xF0; in ravb_rx_gbeth()
817 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_gbeth()
818 napi_gro_receive(&priv->napi[q], skb); in ravb_rx_gbeth()
820 stats->rx_bytes += pkt_len; in ravb_rx_gbeth()
823 priv->rx_1st_skb = ravb_get_skb_gbeth(ndev, entry, desc); in ravb_rx_gbeth()
824 skb_put(priv->rx_1st_skb, pkt_len); in ravb_rx_gbeth()
828 skb_copy_to_linear_data_offset(priv->rx_1st_skb, in ravb_rx_gbeth()
829 priv->rx_1st_skb->len, in ravb_rx_gbeth()
830 skb->data, in ravb_rx_gbeth()
832 skb_put(priv->rx_1st_skb, pkt_len); in ravb_rx_gbeth()
837 skb_copy_to_linear_data_offset(priv->rx_1st_skb, in ravb_rx_gbeth()
838 priv->rx_1st_skb->len, in ravb_rx_gbeth()
839 skb->data, in ravb_rx_gbeth()
841 skb_put(priv->rx_1st_skb, pkt_len); in ravb_rx_gbeth()
843 priv->rx_1st_skb->protocol = in ravb_rx_gbeth()
844 eth_type_trans(priv->rx_1st_skb, ndev); in ravb_rx_gbeth()
845 napi_gro_receive(&priv->napi[q], in ravb_rx_gbeth()
846 priv->rx_1st_skb); in ravb_rx_gbeth()
848 stats->rx_bytes += pkt_len; in ravb_rx_gbeth()
853 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; in ravb_rx_gbeth()
854 desc = &priv->gbeth_rx_ring[entry]; in ravb_rx_gbeth()
858 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { in ravb_rx_gbeth()
859 entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; in ravb_rx_gbeth()
860 desc = &priv->gbeth_rx_ring[entry]; in ravb_rx_gbeth()
861 desc->ds_cc = cpu_to_le16(GBETH_RX_DESC_DATA_SIZE); in ravb_rx_gbeth()
863 if (!priv->rx_skb[q][entry]) { in ravb_rx_gbeth()
864 skb = netdev_alloc_skb(ndev, info->max_rx_len); in ravb_rx_gbeth()
868 dma_addr = dma_map_single(ndev->dev.parent, in ravb_rx_gbeth()
869 skb->data, in ravb_rx_gbeth()
876 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx_gbeth()
877 desc->ds_cc = cpu_to_le16(0); in ravb_rx_gbeth()
878 desc->dptr = cpu_to_le32(dma_addr); in ravb_rx_gbeth()
879 priv->rx_skb[q][entry] = skb; in ravb_rx_gbeth()
883 desc->die_dt = DT_FEMPTY; in ravb_rx_gbeth()
886 stats->rx_packets += rx_packets; in ravb_rx_gbeth()
887 *quota -= rx_packets; in ravb_rx_gbeth()
895 const struct ravb_hw_info *info = priv->info; in ravb_rx_rcar()
896 int entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_rx_rcar()
897 int boguscnt = (priv->dirty_rx[q] + priv->num_rx_ring[q]) - in ravb_rx_rcar()
898 priv->cur_rx[q]; in ravb_rx_rcar()
899 struct net_device_stats *stats = &priv->stats[q]; in ravb_rx_rcar()
910 desc = &priv->rx_ring[q][entry]; in ravb_rx_rcar()
911 while (desc->die_dt != DT_FEMPTY) { in ravb_rx_rcar()
914 desc_status = desc->msc; in ravb_rx_rcar()
915 pkt_len = le16_to_cpu(desc->ds_cc) & RX_DS; in ravb_rx_rcar()
917 if (--boguscnt < 0) in ravb_rx_rcar()
920 /* We use 0-byte descriptors to mark the DMA mapping errors */ in ravb_rx_rcar()
925 stats->multicast++; in ravb_rx_rcar()
929 stats->rx_errors++; in ravb_rx_rcar()
931 stats->rx_crc_errors++; in ravb_rx_rcar()
933 stats->rx_frame_errors++; in ravb_rx_rcar()
935 stats->rx_length_errors++; in ravb_rx_rcar()
937 stats->rx_missed_errors++; in ravb_rx_rcar()
939 u32 get_ts = priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE; in ravb_rx_rcar()
941 skb = priv->rx_skb[q][entry]; in ravb_rx_rcar()
942 priv->rx_skb[q][entry] = NULL; in ravb_rx_rcar()
943 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_rx_rcar()
954 ts.tv_sec = ((u64) le16_to_cpu(desc->ts_sh) << in ravb_rx_rcar()
955 32) | le32_to_cpu(desc->ts_sl); in ravb_rx_rcar()
956 ts.tv_nsec = le32_to_cpu(desc->ts_n); in ravb_rx_rcar()
957 shhwtstamps->hwtstamp = timespec64_to_ktime(ts); in ravb_rx_rcar()
961 skb->protocol = eth_type_trans(skb, ndev); in ravb_rx_rcar()
962 if (ndev->features & NETIF_F_RXCSUM) in ravb_rx_rcar()
964 napi_gro_receive(&priv->napi[q], skb); in ravb_rx_rcar()
965 stats->rx_packets++; in ravb_rx_rcar()
966 stats->rx_bytes += pkt_len; in ravb_rx_rcar()
969 entry = (++priv->cur_rx[q]) % priv->num_rx_ring[q]; in ravb_rx_rcar()
970 desc = &priv->rx_ring[q][entry]; in ravb_rx_rcar()
974 for (; priv->cur_rx[q] - priv->dirty_rx[q] > 0; priv->dirty_rx[q]++) { in ravb_rx_rcar()
975 entry = priv->dirty_rx[q] % priv->num_rx_ring[q]; in ravb_rx_rcar()
976 desc = &priv->rx_ring[q][entry]; in ravb_rx_rcar()
977 desc->ds_cc = cpu_to_le16(RX_BUF_SZ); in ravb_rx_rcar()
979 if (!priv->rx_skb[q][entry]) { in ravb_rx_rcar()
980 skb = netdev_alloc_skb(ndev, info->max_rx_len); in ravb_rx_rcar()
984 dma_addr = dma_map_single(ndev->dev.parent, skb->data, in ravb_rx_rcar()
985 le16_to_cpu(desc->ds_cc), in ravb_rx_rcar()
991 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_rx_rcar()
992 desc->ds_cc = cpu_to_le16(0); in ravb_rx_rcar()
993 desc->dptr = cpu_to_le32(dma_addr); in ravb_rx_rcar()
994 priv->rx_skb[q][entry] = skb; in ravb_rx_rcar()
998 desc->die_dt = DT_FEMPTY; in ravb_rx_rcar()
1001 *quota -= limit - (++boguscnt); in ravb_rx_rcar()
1010 const struct ravb_hw_info *info = priv->info; in ravb_rx()
1012 return info->receive(ndev, quota, q); in ravb_rx()
1031 const struct ravb_hw_info *info = priv->info; in ravb_stop_dma()
1035 error = ravb_wait(ndev, TCCR, info->tccr_mask, 0); in ravb_stop_dma()
1045 /* Stop the E-MAC's RX/TX processes. */ in ravb_stop_dma()
1053 /* Stop AVB-DMAC process */ in ravb_stop_dma()
1057 /* E-MAC interrupt handler */
1067 pm_wakeup_event(&priv->pdev->dev, 0); in ravb_emac_interrupt_unlocked()
1069 ndev->stats.tx_carrier_errors++; in ravb_emac_interrupt_unlocked()
1072 if (priv->no_avb_link) in ravb_emac_interrupt_unlocked()
1075 if (priv->avb_link_active_low) in ravb_emac_interrupt_unlocked()
1092 spin_lock(&priv->lock); in ravb_emac_interrupt()
1094 spin_unlock(&priv->lock); in ravb_emac_interrupt()
1113 priv->stats[RAVB_BE].rx_over_errors++; in ravb_error_interrupt()
1117 priv->stats[RAVB_NC].rx_over_errors++; in ravb_error_interrupt()
1121 priv->rx_fifo_errors++; in ravb_error_interrupt()
1128 const struct ravb_hw_info *info = priv->info; in ravb_queue_interrupt()
1135 if (napi_schedule_prep(&priv->napi[q])) { in ravb_queue_interrupt()
1137 if (!info->irq_en_dis) { in ravb_queue_interrupt()
1144 __napi_schedule(&priv->napi[q]); in ravb_queue_interrupt()
1174 const struct ravb_hw_info *info = priv->info; in ravb_interrupt()
1178 spin_lock(&priv->lock); in ravb_interrupt()
1191 if (info->nc_queues) { in ravb_interrupt()
1192 for (q = RAVB_NC; q >= RAVB_BE; q--) { in ravb_interrupt()
1202 /* E-MAC status summary */ in ravb_interrupt()
1220 spin_unlock(&priv->lock); in ravb_interrupt()
1232 spin_lock(&priv->lock); in ravb_multi_interrupt()
1252 spin_unlock(&priv->lock); in ravb_multi_interrupt()
1262 spin_lock(&priv->lock); in ravb_dma_interrupt()
1268 spin_unlock(&priv->lock); in ravb_dma_interrupt()
1284 struct net_device *ndev = napi->dev; in ravb_poll()
1286 const struct ravb_hw_info *info = priv->info; in ravb_poll()
1287 bool gptp = info->gptp || info->ccc_gac; in ravb_poll()
1290 int q = napi - priv->napi; in ravb_poll()
1296 entry = priv->cur_rx[q] % priv->num_rx_ring[q]; in ravb_poll()
1297 desc = &priv->gbeth_rx_ring[entry]; in ravb_poll()
1302 if (gptp || desc->die_dt != DT_FEMPTY) { in ravb_poll()
1308 spin_lock_irqsave(&priv->lock, flags); in ravb_poll()
1313 spin_unlock_irqrestore(&priv->lock, flags); in ravb_poll()
1317 /* Re-enable RX/TX interrupts */ in ravb_poll()
1318 spin_lock_irqsave(&priv->lock, flags); in ravb_poll()
1319 if (!info->irq_en_dis) { in ravb_poll()
1326 spin_unlock_irqrestore(&priv->lock, flags); in ravb_poll()
1329 priv->rx_over_errors = priv->stats[RAVB_BE].rx_over_errors; in ravb_poll()
1330 if (info->nc_queues) in ravb_poll()
1331 priv->rx_over_errors += priv->stats[RAVB_NC].rx_over_errors; in ravb_poll()
1332 if (priv->rx_over_errors != ndev->stats.rx_over_errors) in ravb_poll()
1333 ndev->stats.rx_over_errors = priv->rx_over_errors; in ravb_poll()
1334 if (priv->rx_fifo_errors != ndev->stats.rx_fifo_errors) in ravb_poll()
1335 ndev->stats.rx_fifo_errors = priv->rx_fifo_errors; in ravb_poll()
1337 return budget - quota; in ravb_poll()
1344 ravb_modify(ndev, ECMR, ECMR_DM, priv->duplex > 0 ? ECMR_DM : 0); in ravb_set_duplex_gbeth()
1351 const struct ravb_hw_info *info = priv->info; in ravb_adjust_link()
1352 struct phy_device *phydev = ndev->phydev; in ravb_adjust_link()
1356 spin_lock_irqsave(&priv->lock, flags); in ravb_adjust_link()
1358 /* Disable TX and RX right over here, if E-MAC change is ignored */ in ravb_adjust_link()
1359 if (priv->no_avb_link) in ravb_adjust_link()
1362 if (phydev->link) { in ravb_adjust_link()
1363 if (info->half_duplex && phydev->duplex != priv->duplex) { in ravb_adjust_link()
1365 priv->duplex = phydev->duplex; in ravb_adjust_link()
1369 if (phydev->speed != priv->speed) { in ravb_adjust_link()
1371 priv->speed = phydev->speed; in ravb_adjust_link()
1372 info->set_rate(ndev); in ravb_adjust_link()
1374 if (!priv->link) { in ravb_adjust_link()
1377 priv->link = phydev->link; in ravb_adjust_link()
1379 } else if (priv->link) { in ravb_adjust_link()
1381 priv->link = 0; in ravb_adjust_link()
1382 priv->speed = 0; in ravb_adjust_link()
1383 if (info->half_duplex) in ravb_adjust_link()
1384 priv->duplex = -1; in ravb_adjust_link()
1387 /* Enable TX and RX right over here, if E-MAC change is ignored */ in ravb_adjust_link()
1388 if (priv->no_avb_link && phydev->link) in ravb_adjust_link()
1391 spin_unlock_irqrestore(&priv->lock, flags); in ravb_adjust_link()
1400 struct device_node *np = ndev->dev.parent->of_node; in ravb_phy_init()
1402 const struct ravb_hw_info *info = priv->info; in ravb_phy_init()
1408 priv->link = 0; in ravb_phy_init()
1409 priv->speed = 0; in ravb_phy_init()
1410 priv->duplex = -1; in ravb_phy_init()
1413 pn = of_parse_phandle(np, "phy-handle", 0); in ravb_phy_init()
1426 iface = priv->rgmii_override ? PHY_INTERFACE_MODE_RGMII in ravb_phy_init()
1427 : priv->phy_interface; in ravb_phy_init()
1432 err = -ENOENT; in ravb_phy_init()
1436 if (!info->half_duplex) { in ravb_phy_init()
1437 /* 10BASE, Pause and Asym Pause is not supported */ in ravb_phy_init()
1468 phy_start(ndev->phydev); in ravb_phy_start()
1477 return priv->msg_enable; in ravb_get_msglevel()
1484 priv->msg_enable = value; in ravb_set_msglevel()
1542 const struct ravb_hw_info *info = priv->info; in ravb_get_sset_count()
1546 return info->stats_len; in ravb_get_sset_count()
1548 return -EOPNOTSUPP; in ravb_get_sset_count()
1556 const struct ravb_hw_info *info = priv->info; in ravb_get_ethtool_stats()
1561 num_rx_q = info->nc_queues ? NUM_RX_QUEUE : 1; in ravb_get_ethtool_stats()
1562 /* Device-specific stats */ in ravb_get_ethtool_stats()
1564 struct net_device_stats *stats = &priv->stats[q]; in ravb_get_ethtool_stats()
1566 data[i++] = priv->cur_rx[q]; in ravb_get_ethtool_stats()
1567 data[i++] = priv->cur_tx[q]; in ravb_get_ethtool_stats()
1568 data[i++] = priv->dirty_rx[q]; in ravb_get_ethtool_stats()
1569 data[i++] = priv->dirty_tx[q]; in ravb_get_ethtool_stats()
1570 data[i++] = stats->rx_packets; in ravb_get_ethtool_stats()
1571 data[i++] = stats->tx_packets; in ravb_get_ethtool_stats()
1572 data[i++] = stats->rx_bytes; in ravb_get_ethtool_stats()
1573 data[i++] = stats->tx_bytes; in ravb_get_ethtool_stats()
1574 data[i++] = stats->multicast; in ravb_get_ethtool_stats()
1575 data[i++] = stats->rx_errors; in ravb_get_ethtool_stats()
1576 data[i++] = stats->rx_crc_errors; in ravb_get_ethtool_stats()
1577 data[i++] = stats->rx_frame_errors; in ravb_get_ethtool_stats()
1578 data[i++] = stats->rx_length_errors; in ravb_get_ethtool_stats()
1579 data[i++] = stats->rx_missed_errors; in ravb_get_ethtool_stats()
1580 data[i++] = stats->rx_over_errors; in ravb_get_ethtool_stats()
1587 const struct ravb_hw_info *info = priv->info; in ravb_get_strings()
1591 memcpy(data, info->gstrings_stats, info->gstrings_size); in ravb_get_strings()
1603 ring->rx_max_pending = BE_RX_RING_MAX; in ravb_get_ringparam()
1604 ring->tx_max_pending = BE_TX_RING_MAX; in ravb_get_ringparam()
1605 ring->rx_pending = priv->num_rx_ring[RAVB_BE]; in ravb_get_ringparam()
1606 ring->tx_pending = priv->num_tx_ring[RAVB_BE]; in ravb_get_ringparam()
1615 const struct ravb_hw_info *info = priv->info; in ravb_set_ringparam()
1618 if (ring->tx_pending > BE_TX_RING_MAX || in ravb_set_ringparam()
1619 ring->rx_pending > BE_RX_RING_MAX || in ravb_set_ringparam()
1620 ring->tx_pending < BE_TX_RING_MIN || in ravb_set_ringparam()
1621 ring->rx_pending < BE_RX_RING_MIN) in ravb_set_ringparam()
1622 return -EINVAL; in ravb_set_ringparam()
1623 if (ring->rx_mini_pending || ring->rx_jumbo_pending) in ravb_set_ringparam()
1624 return -EINVAL; in ravb_set_ringparam()
1629 if (info->gptp) in ravb_set_ringparam()
1638 synchronize_irq(ndev->irq); in ravb_set_ringparam()
1642 if (info->nc_queues) in ravb_set_ringparam()
1647 priv->num_rx_ring[RAVB_BE] = ring->rx_pending; in ravb_set_ringparam()
1648 priv->num_tx_ring[RAVB_BE] = ring->tx_pending; in ravb_set_ringparam()
1662 if (info->gptp) in ravb_set_ringparam()
1663 ravb_ptp_init(ndev, priv->pdev); in ravb_set_ringparam()
1675 const struct ravb_hw_info *hw_info = priv->info; in ravb_get_ts_info()
1677 info->so_timestamping = in ravb_get_ts_info()
1684 info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON); in ravb_get_ts_info()
1685 info->rx_filters = in ravb_get_ts_info()
1689 if (hw_info->gptp || hw_info->ccc_gac) in ravb_get_ts_info()
1690 info->phc_index = ptp_clock_index(priv->ptp.clock); in ravb_get_ts_info()
1699 wol->supported = WAKE_MAGIC; in ravb_get_wol()
1700 wol->wolopts = priv->wol_enabled ? WAKE_MAGIC : 0; in ravb_get_wol()
1706 const struct ravb_hw_info *info = priv->info; in ravb_set_wol()
1708 if (!info->magic_pkt || (wol->wolopts & ~WAKE_MAGIC)) in ravb_set_wol()
1709 return -EOPNOTSUPP; in ravb_set_wol()
1711 priv->wol_enabled = !!(wol->wolopts & WAKE_MAGIC); in ravb_set_wol()
1713 device_set_wakeup_enable(&priv->pdev->dev, priv->wol_enabled); in ravb_set_wol()
1742 name = devm_kasprintf(dev, GFP_KERNEL, "%s:%s", ndev->name, ch); in ravb_hook_irq()
1744 return -ENOMEM; in ravb_hook_irq()
1756 const struct ravb_hw_info *info = priv->info; in ravb_open()
1757 struct platform_device *pdev = priv->pdev; in ravb_open()
1758 struct device *dev = &pdev->dev; in ravb_open()
1761 napi_enable(&priv->napi[RAVB_BE]); in ravb_open()
1762 if (info->nc_queues) in ravb_open()
1763 napi_enable(&priv->napi[RAVB_NC]); in ravb_open()
1765 if (!info->multi_irqs) { in ravb_open()
1766 error = request_irq(ndev->irq, ravb_interrupt, IRQF_SHARED, in ravb_open()
1767 ndev->name, ndev); in ravb_open()
1773 error = ravb_hook_irq(ndev->irq, ravb_multi_interrupt, ndev, in ravb_open()
1777 error = ravb_hook_irq(priv->emac_irq, ravb_emac_interrupt, ndev, in ravb_open()
1781 error = ravb_hook_irq(priv->rx_irqs[RAVB_BE], ravb_be_interrupt, in ravb_open()
1785 error = ravb_hook_irq(priv->tx_irqs[RAVB_BE], ravb_be_interrupt, in ravb_open()
1789 error = ravb_hook_irq(priv->rx_irqs[RAVB_NC], ravb_nc_interrupt, in ravb_open()
1793 error = ravb_hook_irq(priv->tx_irqs[RAVB_NC], ravb_nc_interrupt, in ravb_open()
1798 if (info->err_mgmt_irqs) { in ravb_open()
1799 error = ravb_hook_irq(priv->erra_irq, ravb_multi_interrupt, in ravb_open()
1803 error = ravb_hook_irq(priv->mgmta_irq, ravb_multi_interrupt, in ravb_open()
1817 if (info->gptp) in ravb_open()
1818 ravb_ptp_init(ndev, priv->pdev); in ravb_open()
1831 if (info->gptp) in ravb_open()
1835 if (!info->multi_irqs) in ravb_open()
1837 if (info->err_mgmt_irqs) in ravb_open()
1838 free_irq(priv->mgmta_irq, ndev); in ravb_open()
1840 if (info->err_mgmt_irqs) in ravb_open()
1841 free_irq(priv->erra_irq, ndev); in ravb_open()
1843 free_irq(priv->tx_irqs[RAVB_NC], ndev); in ravb_open()
1845 free_irq(priv->rx_irqs[RAVB_NC], ndev); in ravb_open()
1847 free_irq(priv->tx_irqs[RAVB_BE], ndev); in ravb_open()
1849 free_irq(priv->rx_irqs[RAVB_BE], ndev); in ravb_open()
1851 free_irq(priv->emac_irq, ndev); in ravb_open()
1853 free_irq(ndev->irq, ndev); in ravb_open()
1855 if (info->nc_queues) in ravb_open()
1856 napi_disable(&priv->napi[RAVB_NC]); in ravb_open()
1857 napi_disable(&priv->napi[RAVB_BE]); in ravb_open()
1871 ndev->stats.tx_errors++; in ravb_tx_timeout()
1873 schedule_work(&priv->work); in ravb_tx_timeout()
1880 const struct ravb_hw_info *info = priv->info; in ravb_tx_timeout_work()
1881 struct net_device *ndev = priv->ndev; in ravb_tx_timeout_work()
1886 schedule_work(&priv->work); in ravb_tx_timeout_work()
1893 if (info->gptp) in ravb_tx_timeout_work()
1903 * re-enables the TX and RX and skip the following in ravb_tx_timeout_work()
1904 * re-initialization procedure. in ravb_tx_timeout_work()
1911 if (info->nc_queues) in ravb_tx_timeout_work()
1918 * should return here to avoid re-enabling the TX and RX in in ravb_tx_timeout_work()
1929 if (info->gptp) in ravb_tx_timeout_work()
1930 ravb_ptp_init(ndev, priv->pdev); in ravb_tx_timeout_work()
1942 const struct ravb_hw_info *info = priv->info; in ravb_start_xmit()
1943 unsigned int num_tx_desc = priv->num_tx_desc; in ravb_start_xmit()
1953 spin_lock_irqsave(&priv->lock, flags); in ravb_start_xmit()
1954 if (priv->cur_tx[q] - priv->dirty_tx[q] > (priv->num_tx_ring[q] - 1) * in ravb_start_xmit()
1959 spin_unlock_irqrestore(&priv->lock, flags); in ravb_start_xmit()
1966 entry = priv->cur_tx[q] % (priv->num_tx_ring[q] * num_tx_desc); in ravb_start_xmit()
1967 priv->tx_skb[q][entry / num_tx_desc] = skb; in ravb_start_xmit()
1970 buffer = PTR_ALIGN(priv->tx_align[q], DPTR_ALIGN) + in ravb_start_xmit()
1972 len = PTR_ALIGN(skb->data, DPTR_ALIGN) - skb->data; in ravb_start_xmit()
1983 * length of the second DMA descriptor (skb->len - len) in ravb_start_xmit()
1989 memcpy(buffer, skb->data, len); in ravb_start_xmit()
1990 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
1992 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
1995 desc = &priv->tx_ring[q][entry]; in ravb_start_xmit()
1996 desc->ds_tagl = cpu_to_le16(len); in ravb_start_xmit()
1997 desc->dptr = cpu_to_le32(dma_addr); in ravb_start_xmit()
1999 buffer = skb->data + len; in ravb_start_xmit()
2000 len = skb->len - len; in ravb_start_xmit()
2001 dma_addr = dma_map_single(ndev->dev.parent, buffer, len, in ravb_start_xmit()
2003 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2008 desc = &priv->tx_ring[q][entry]; in ravb_start_xmit()
2009 len = skb->len; in ravb_start_xmit()
2010 dma_addr = dma_map_single(ndev->dev.parent, skb->data, skb->len, in ravb_start_xmit()
2012 if (dma_mapping_error(ndev->dev.parent, dma_addr)) in ravb_start_xmit()
2015 desc->ds_tagl = cpu_to_le16(len); in ravb_start_xmit()
2016 desc->dptr = cpu_to_le32(dma_addr); in ravb_start_xmit()
2019 if (info->gptp || info->ccc_gac) { in ravb_start_xmit()
2024 desc--; in ravb_start_xmit()
2025 dma_unmap_single(ndev->dev.parent, dma_addr, in ravb_start_xmit()
2030 ts_skb->skb = skb_get(skb); in ravb_start_xmit()
2031 ts_skb->tag = priv->ts_skb_tag++; in ravb_start_xmit()
2032 priv->ts_skb_tag &= 0x3ff; in ravb_start_xmit()
2033 list_add_tail(&ts_skb->list, &priv->ts_skb_list); in ravb_start_xmit()
2036 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; in ravb_start_xmit()
2037 desc->tagh_tsr = (ts_skb->tag >> 4) | TX_TSR; in ravb_start_xmit()
2038 desc->ds_tagl |= cpu_to_le16(ts_skb->tag << 12); in ravb_start_xmit()
2046 desc->die_dt = DT_FEND; in ravb_start_xmit()
2047 desc--; in ravb_start_xmit()
2048 desc->die_dt = DT_FSTART; in ravb_start_xmit()
2050 desc->die_dt = DT_FSINGLE; in ravb_start_xmit()
2054 priv->cur_tx[q] += num_tx_desc; in ravb_start_xmit()
2055 if (priv->cur_tx[q] - priv->dirty_tx[q] > in ravb_start_xmit()
2056 (priv->num_tx_ring[q] - 1) * num_tx_desc && in ravb_start_xmit()
2061 spin_unlock_irqrestore(&priv->lock, flags); in ravb_start_xmit()
2065 dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr), in ravb_start_xmit()
2066 le16_to_cpu(desc->ds_tagl), DMA_TO_DEVICE); in ravb_start_xmit()
2069 priv->tx_skb[q][entry / num_tx_desc] = NULL; in ravb_start_xmit()
2077 return (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) ? RAVB_NC : in ravb_select_queue()
2085 const struct ravb_hw_info *info = priv->info; in ravb_get_stats()
2088 nstats = &ndev->stats; in ravb_get_stats()
2089 stats0 = &priv->stats[RAVB_BE]; in ravb_get_stats()
2091 if (info->tx_counters) { in ravb_get_stats()
2092 nstats->tx_dropped += ravb_read(ndev, TROCR); in ravb_get_stats()
2096 if (info->carrier_counters) { in ravb_get_stats()
2097 nstats->collisions += ravb_read(ndev, CXR41); in ravb_get_stats()
2099 nstats->tx_carrier_errors += ravb_read(ndev, CXR42); in ravb_get_stats()
2103 nstats->rx_packets = stats0->rx_packets; in ravb_get_stats()
2104 nstats->tx_packets = stats0->tx_packets; in ravb_get_stats()
2105 nstats->rx_bytes = stats0->rx_bytes; in ravb_get_stats()
2106 nstats->tx_bytes = stats0->tx_bytes; in ravb_get_stats()
2107 nstats->multicast = stats0->multicast; in ravb_get_stats()
2108 nstats->rx_errors = stats0->rx_errors; in ravb_get_stats()
2109 nstats->rx_crc_errors = stats0->rx_crc_errors; in ravb_get_stats()
2110 nstats->rx_frame_errors = stats0->rx_frame_errors; in ravb_get_stats()
2111 nstats->rx_length_errors = stats0->rx_length_errors; in ravb_get_stats()
2112 nstats->rx_missed_errors = stats0->rx_missed_errors; in ravb_get_stats()
2113 nstats->rx_over_errors = stats0->rx_over_errors; in ravb_get_stats()
2114 if (info->nc_queues) { in ravb_get_stats()
2115 stats1 = &priv->stats[RAVB_NC]; in ravb_get_stats()
2117 nstats->rx_packets += stats1->rx_packets; in ravb_get_stats()
2118 nstats->tx_packets += stats1->tx_packets; in ravb_get_stats()
2119 nstats->rx_bytes += stats1->rx_bytes; in ravb_get_stats()
2120 nstats->tx_bytes += stats1->tx_bytes; in ravb_get_stats()
2121 nstats->multicast += stats1->multicast; in ravb_get_stats()
2122 nstats->rx_errors += stats1->rx_errors; in ravb_get_stats()
2123 nstats->rx_crc_errors += stats1->rx_crc_errors; in ravb_get_stats()
2124 nstats->rx_frame_errors += stats1->rx_frame_errors; in ravb_get_stats()
2125 nstats->rx_length_errors += stats1->rx_length_errors; in ravb_get_stats()
2126 nstats->rx_missed_errors += stats1->rx_missed_errors; in ravb_get_stats()
2127 nstats->rx_over_errors += stats1->rx_over_errors; in ravb_get_stats()
2139 spin_lock_irqsave(&priv->lock, flags); in ravb_set_rx_mode()
2141 ndev->flags & IFF_PROMISC ? ECMR_PRM : 0); in ravb_set_rx_mode()
2142 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_rx_mode()
2148 struct device_node *np = ndev->dev.parent->of_node; in ravb_close()
2150 const struct ravb_hw_info *info = priv->info; in ravb_close()
2161 if (info->gptp) in ravb_close()
2164 /* Set the config mode to stop the AVB-DMAC's processes */ in ravb_close()
2170 if (info->gptp || info->ccc_gac) { in ravb_close()
2171 list_for_each_entry_safe(ts_skb, ts_skb2, &priv->ts_skb_list, list) { in ravb_close()
2172 list_del(&ts_skb->list); in ravb_close()
2173 kfree_skb(ts_skb->skb); in ravb_close()
2179 if (ndev->phydev) { in ravb_close()
2180 phy_stop(ndev->phydev); in ravb_close()
2181 phy_disconnect(ndev->phydev); in ravb_close()
2186 cancel_work_sync(&priv->work); in ravb_close()
2188 if (info->multi_irqs) { in ravb_close()
2189 free_irq(priv->tx_irqs[RAVB_NC], ndev); in ravb_close()
2190 free_irq(priv->rx_irqs[RAVB_NC], ndev); in ravb_close()
2191 free_irq(priv->tx_irqs[RAVB_BE], ndev); in ravb_close()
2192 free_irq(priv->rx_irqs[RAVB_BE], ndev); in ravb_close()
2193 free_irq(priv->emac_irq, ndev); in ravb_close()
2194 if (info->err_mgmt_irqs) { in ravb_close()
2195 free_irq(priv->erra_irq, ndev); in ravb_close()
2196 free_irq(priv->mgmta_irq, ndev); in ravb_close()
2199 free_irq(ndev->irq, ndev); in ravb_close()
2201 if (info->nc_queues) in ravb_close()
2202 napi_disable(&priv->napi[RAVB_NC]); in ravb_close()
2203 napi_disable(&priv->napi[RAVB_BE]); in ravb_close()
2207 if (info->nc_queues) in ravb_close()
2219 config.tx_type = priv->tstamp_tx_ctrl ? HWTSTAMP_TX_ON : in ravb_hwtstamp_get()
2221 switch (priv->tstamp_rx_ctrl & RAVB_RXTSTAMP_TYPE) { in ravb_hwtstamp_get()
2232 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? in ravb_hwtstamp_get()
2233 -EFAULT : 0; in ravb_hwtstamp_get()
2244 if (copy_from_user(&config, req->ifr_data, sizeof(config))) in ravb_hwtstamp_set()
2245 return -EFAULT; in ravb_hwtstamp_set()
2255 return -ERANGE; in ravb_hwtstamp_set()
2270 priv->tstamp_tx_ctrl = tstamp_tx_ctrl; in ravb_hwtstamp_set()
2271 priv->tstamp_rx_ctrl = tstamp_rx_ctrl; in ravb_hwtstamp_set()
2273 return copy_to_user(req->ifr_data, &config, sizeof(config)) ? in ravb_hwtstamp_set()
2274 -EFAULT : 0; in ravb_hwtstamp_set()
2280 struct phy_device *phydev = ndev->phydev; in ravb_do_ioctl()
2283 return -EINVAL; in ravb_do_ioctl()
2286 return -ENODEV; in ravb_do_ioctl()
2302 ndev->mtu = new_mtu; in ravb_change_mtu()
2305 synchronize_irq(priv->emac_irq); in ravb_change_mtu()
2319 spin_lock_irqsave(&priv->lock, flags); in ravb_set_rx_csum()
2330 spin_unlock_irqrestore(&priv->lock, flags); in ravb_set_rx_csum()
2343 netdev_features_t changed = ndev->features ^ features; in ravb_set_features_rcar()
2348 ndev->features = features; in ravb_set_features_rcar()
2357 const struct ravb_hw_info *info = priv->info; in ravb_set_features()
2359 return info->set_feature(ndev, features); in ravb_set_features()
2380 struct platform_device *pdev = priv->pdev; in ravb_mdio_init()
2381 struct device *dev = &pdev->dev; in ravb_mdio_init()
2387 priv->mdiobb.ops = &bb_ops; in ravb_mdio_init()
2390 priv->mii_bus = alloc_mdio_bitbang(&priv->mdiobb); in ravb_mdio_init()
2391 if (!priv->mii_bus) in ravb_mdio_init()
2392 return -ENOMEM; in ravb_mdio_init()
2395 priv->mii_bus->name = "ravb_mii"; in ravb_mdio_init()
2396 priv->mii_bus->parent = dev; in ravb_mdio_init()
2397 snprintf(priv->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", in ravb_mdio_init()
2398 pdev->name, pdev->id); in ravb_mdio_init()
2401 error = of_mdiobus_register(priv->mii_bus, dev->of_node); in ravb_mdio_init()
2405 pn = of_parse_phandle(dev->of_node, "phy-handle", 0); in ravb_mdio_init()
2408 phydev->mac_managed_pm = true; in ravb_mdio_init()
2409 put_device(&phydev->mdio.dev); in ravb_mdio_init()
2416 free_mdio_bitbang(priv->mii_bus); in ravb_mdio_init()
2424 mdiobus_unregister(priv->mii_bus); in ravb_mdio_release()
2427 free_mdio_bitbang(priv->mii_bus); in ravb_mdio_release()
2446 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2472 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2495 .max_rx_len = RX_BUF_SZ + RAVB_ALIGN - 1,
2528 { .compatible = "renesas,etheravb-r8a7790", .data = &ravb_gen2_hw_info },
2529 { .compatible = "renesas,etheravb-r8a7794", .data = &ravb_gen2_hw_info },
2530 { .compatible = "renesas,etheravb-rcar-gen2", .data = &ravb_gen2_hw_info },
2531 { .compatible = "renesas,etheravb-r8a7795", .data = &ravb_gen3_hw_info },
2532 { .compatible = "renesas,etheravb-rcar-gen3", .data = &ravb_gen3_hw_info },
2533 { .compatible = "renesas,etheravb-rcar-gen4", .data = &ravb_gen3_hw_info },
2534 { .compatible = "renesas,etheravb-rzv2m", .data = &ravb_rzv2m_hw_info },
2535 { .compatible = "renesas,rzg2l-gbeth", .data = &gbeth_hw_info },
2543 const struct ravb_hw_info *info = priv->info; in ravb_set_gti()
2544 struct device *dev = ndev->dev.parent; in ravb_set_gti()
2548 if (info->gptp_ref_clk) in ravb_set_gti()
2549 rate = clk_get_rate(priv->gptp_clk); in ravb_set_gti()
2551 rate = clk_get_rate(priv->clk); in ravb_set_gti()
2553 return -EINVAL; in ravb_set_gti()
2558 dev_err(dev, "gti.tiv increment 0x%llx is outside the range 0x%x - 0x%x\n", in ravb_set_gti()
2560 return -EINVAL; in ravb_set_gti()
2571 const struct ravb_hw_info *info = priv->info; in ravb_set_config_mode()
2574 if (info->gptp) { in ravb_set_config_mode()
2580 } else if (info->ccc_gac) { in ravb_set_config_mode()
2596 if (!of_property_read_u32(np, "rx-internal-delay-ps", &delay)) { in ravb_parse_delay_mode()
2598 priv->rxcidm = !!delay; in ravb_parse_delay_mode()
2601 if (!of_property_read_u32(np, "tx-internal-delay-ps", &delay)) { in ravb_parse_delay_mode()
2603 priv->txcidm = !!delay; in ravb_parse_delay_mode()
2610 /* Fall back to legacy rgmii-*id behavior */ in ravb_parse_delay_mode()
2611 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in ravb_parse_delay_mode()
2612 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID) { in ravb_parse_delay_mode()
2613 priv->rxcidm = 1; in ravb_parse_delay_mode()
2614 priv->rgmii_override = 1; in ravb_parse_delay_mode()
2617 if (priv->phy_interface == PHY_INTERFACE_MODE_RGMII_ID || in ravb_parse_delay_mode()
2618 priv->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID) { in ravb_parse_delay_mode()
2619 priv->txcidm = 1; in ravb_parse_delay_mode()
2620 priv->rgmii_override = 1; in ravb_parse_delay_mode()
2629 if (priv->rxcidm) in ravb_set_delay_mode()
2631 if (priv->txcidm) in ravb_set_delay_mode()
2638 struct device_node *np = pdev->dev.of_node; in ravb_probe()
2648 dev_err(&pdev->dev, in ravb_probe()
2650 return -EINVAL; in ravb_probe()
2653 rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); in ravb_probe()
2655 return dev_err_probe(&pdev->dev, PTR_ERR(rstc), in ravb_probe()
2661 return -ENOMEM; in ravb_probe()
2663 info = of_device_get_match_data(&pdev->dev); in ravb_probe()
2665 ndev->features = info->net_features; in ravb_probe()
2666 ndev->hw_features = info->net_hw_features; in ravb_probe()
2672 pm_runtime_enable(&pdev->dev); in ravb_probe()
2673 error = pm_runtime_resume_and_get(&pdev->dev); in ravb_probe()
2677 if (info->multi_irqs) { in ravb_probe()
2678 if (info->err_mgmt_irqs) in ravb_probe()
2689 ndev->irq = irq; in ravb_probe()
2691 SET_NETDEV_DEV(ndev, &pdev->dev); in ravb_probe()
2694 priv->info = info; in ravb_probe()
2695 priv->rstc = rstc; in ravb_probe()
2696 priv->ndev = ndev; in ravb_probe()
2697 priv->pdev = pdev; in ravb_probe()
2698 priv->num_tx_ring[RAVB_BE] = BE_TX_RING_SIZE; in ravb_probe()
2699 priv->num_rx_ring[RAVB_BE] = BE_RX_RING_SIZE; in ravb_probe()
2700 if (info->nc_queues) { in ravb_probe()
2701 priv->num_tx_ring[RAVB_NC] = NC_TX_RING_SIZE; in ravb_probe()
2702 priv->num_rx_ring[RAVB_NC] = NC_RX_RING_SIZE; in ravb_probe()
2705 priv->addr = devm_platform_get_and_ioremap_resource(pdev, 0, &res); in ravb_probe()
2706 if (IS_ERR(priv->addr)) { in ravb_probe()
2707 error = PTR_ERR(priv->addr); in ravb_probe()
2711 /* The Ether-specific entries in the device structure. */ in ravb_probe()
2712 ndev->base_addr = res->start; in ravb_probe()
2714 spin_lock_init(&priv->lock); in ravb_probe()
2715 INIT_WORK(&priv->work, ravb_tx_timeout_work); in ravb_probe()
2717 error = of_get_phy_mode(np, &priv->phy_interface); in ravb_probe()
2718 if (error && error != -ENODEV) in ravb_probe()
2721 priv->no_avb_link = of_property_read_bool(np, "renesas,no-ether-link"); in ravb_probe()
2722 priv->avb_link_active_low = in ravb_probe()
2723 of_property_read_bool(np, "renesas,ether-link-active-low"); in ravb_probe()
2725 if (info->multi_irqs) { in ravb_probe()
2726 if (info->err_mgmt_irqs) in ravb_probe()
2734 priv->emac_irq = irq; in ravb_probe()
2741 priv->rx_irqs[i] = irq; in ravb_probe()
2749 priv->tx_irqs[i] = irq; in ravb_probe()
2752 if (info->err_mgmt_irqs) { in ravb_probe()
2758 priv->erra_irq = irq; in ravb_probe()
2765 priv->mgmta_irq = irq; in ravb_probe()
2769 priv->clk = devm_clk_get(&pdev->dev, NULL); in ravb_probe()
2770 if (IS_ERR(priv->clk)) { in ravb_probe()
2771 error = PTR_ERR(priv->clk); in ravb_probe()
2775 priv->refclk = devm_clk_get_optional(&pdev->dev, "refclk"); in ravb_probe()
2776 if (IS_ERR(priv->refclk)) { in ravb_probe()
2777 error = PTR_ERR(priv->refclk); in ravb_probe()
2780 clk_prepare_enable(priv->refclk); in ravb_probe()
2782 if (info->gptp_ref_clk) { in ravb_probe()
2783 priv->gptp_clk = devm_clk_get(&pdev->dev, "gptp"); in ravb_probe()
2784 if (IS_ERR(priv->gptp_clk)) { in ravb_probe()
2785 error = PTR_ERR(priv->gptp_clk); in ravb_probe()
2788 clk_prepare_enable(priv->gptp_clk); in ravb_probe()
2791 ndev->max_mtu = info->rx_max_buf_size - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN); in ravb_probe()
2792 ndev->min_mtu = ETH_MIN_MTU; in ravb_probe()
2794 /* FIXME: R-Car Gen2 has 4byte alignment restriction for tx buffer in ravb_probe()
2799 priv->num_tx_desc = info->aligned_tx ? 2 : 1; in ravb_probe()
2802 ndev->netdev_ops = &ravb_netdev_ops; in ravb_probe()
2803 ndev->ethtool_ops = &ravb_ethtool_ops; in ravb_probe()
2810 if (info->gptp || info->ccc_gac) { in ravb_probe()
2820 if (info->internal_delay) { in ravb_probe()
2826 priv->desc_bat_size = sizeof(struct ravb_desc) * DBAT_ENTRY_NUM; in ravb_probe()
2827 priv->desc_bat = dma_alloc_coherent(ndev->dev.parent, priv->desc_bat_size, in ravb_probe()
2828 &priv->desc_bat_dma, GFP_KERNEL); in ravb_probe()
2829 if (!priv->desc_bat) { in ravb_probe()
2830 dev_err(&pdev->dev, in ravb_probe()
2832 priv->desc_bat_size); in ravb_probe()
2833 error = -ENOMEM; in ravb_probe()
2837 priv->desc_bat[q].die_dt = DT_EOS; in ravb_probe()
2838 ravb_write(ndev, priv->desc_bat_dma, DBAT); in ravb_probe()
2841 INIT_LIST_HEAD(&priv->ts_skb_list); in ravb_probe()
2844 if (info->ccc_gac) in ravb_probe()
2848 priv->msg_enable = RAVB_DEF_MSG_ENABLE; in ravb_probe()
2852 if (!is_valid_ether_addr(ndev->dev_addr)) { in ravb_probe()
2853 dev_warn(&pdev->dev, in ravb_probe()
2861 dev_err(&pdev->dev, "failed to initialize MDIO\n"); in ravb_probe()
2865 netif_napi_add(ndev, &priv->napi[RAVB_BE], ravb_poll); in ravb_probe()
2866 if (info->nc_queues) in ravb_probe()
2867 netif_napi_add(ndev, &priv->napi[RAVB_NC], ravb_poll); in ravb_probe()
2874 device_set_wakeup_capable(&pdev->dev, 1); in ravb_probe()
2878 (u32)ndev->base_addr, ndev->dev_addr, ndev->irq); in ravb_probe()
2885 if (info->nc_queues) in ravb_probe()
2886 netif_napi_del(&priv->napi[RAVB_NC]); in ravb_probe()
2888 netif_napi_del(&priv->napi[RAVB_BE]); in ravb_probe()
2891 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_probe()
2892 priv->desc_bat_dma); in ravb_probe()
2895 if (info->ccc_gac) in ravb_probe()
2898 clk_disable_unprepare(priv->gptp_clk); in ravb_probe()
2900 clk_disable_unprepare(priv->refclk); in ravb_probe()
2902 pm_runtime_put(&pdev->dev); in ravb_probe()
2904 pm_runtime_disable(&pdev->dev); in ravb_probe()
2915 const struct ravb_hw_info *info = priv->info; in ravb_remove()
2918 if (info->nc_queues) in ravb_remove()
2919 netif_napi_del(&priv->napi[RAVB_NC]); in ravb_remove()
2920 netif_napi_del(&priv->napi[RAVB_BE]); in ravb_remove()
2925 if (info->ccc_gac) in ravb_remove()
2928 dma_free_coherent(ndev->dev.parent, priv->desc_bat_size, priv->desc_bat, in ravb_remove()
2929 priv->desc_bat_dma); in ravb_remove()
2933 clk_disable_unprepare(priv->gptp_clk); in ravb_remove()
2934 clk_disable_unprepare(priv->refclk); in ravb_remove()
2936 pm_runtime_put_sync(&pdev->dev); in ravb_remove()
2937 pm_runtime_disable(&pdev->dev); in ravb_remove()
2938 reset_control_assert(priv->rstc); in ravb_remove()
2946 const struct ravb_hw_info *info = priv->info; in ravb_wol_setup()
2954 synchronize_irq(priv->emac_irq); in ravb_wol_setup()
2955 if (info->nc_queues) in ravb_wol_setup()
2956 napi_disable(&priv->napi[RAVB_NC]); in ravb_wol_setup()
2957 napi_disable(&priv->napi[RAVB_BE]); in ravb_wol_setup()
2963 return enable_irq_wake(priv->emac_irq); in ravb_wol_setup()
2969 const struct ravb_hw_info *info = priv->info; in ravb_wol_restore()
2971 if (info->nc_queues) in ravb_wol_restore()
2972 napi_enable(&priv->napi[RAVB_NC]); in ravb_wol_restore()
2973 napi_enable(&priv->napi[RAVB_BE]); in ravb_wol_restore()
2980 return disable_irq_wake(priv->emac_irq); in ravb_wol_restore()
2994 if (priv->wol_enabled) in ravb_suspend()
2999 if (priv->info->ccc_gac) in ravb_suspend()
3009 const struct ravb_hw_info *info = priv->info; in ravb_resume()
3013 if (priv->wol_enabled) { in ravb_resume()
3029 if (info->gptp || info->ccc_gac) { in ravb_resume()
3039 if (info->internal_delay) in ravb_resume()
3043 ravb_write(ndev, priv->desc_bat_dma, DBAT); in ravb_resume()
3045 if (priv->info->ccc_gac) in ravb_resume()
3046 ravb_ptp_init(ndev, priv->pdev); in ravb_resume()
3049 if (priv->wol_enabled) { in ravb_resume()
3066 /* Runtime PM callback shared between ->runtime_suspend() in ravb_runtime_nop()
3067 * and ->runtime_resume(). Simply returns success. in ravb_runtime_nop()
3069 * This driver re-initializes all registers after in ravb_runtime_nop()