1d6aa60a1SDavid Daney /* 2d6aa60a1SDavid Daney * This file is subject to the terms and conditions of the GNU General Public 3d6aa60a1SDavid Daney * License. See the file "COPYING" in the main directory of this archive 4d6aa60a1SDavid Daney * for more details. 5d6aa60a1SDavid Daney * 6eeae05aaSDavid Daney * Copyright (C) 2009-2012 Cavium, Inc 7d6aa60a1SDavid Daney */ 8d6aa60a1SDavid Daney 9d6aa60a1SDavid Daney #include <linux/platform_device.h> 10368bec0dSDavid Daney #include <linux/dma-mapping.h> 11d6aa60a1SDavid Daney #include <linux/etherdevice.h> 12368bec0dSDavid Daney #include <linux/capability.h> 133d305850SChad Reese #include <linux/net_tstamp.h> 14368bec0dSDavid Daney #include <linux/interrupt.h> 15368bec0dSDavid Daney #include <linux/netdevice.h> 16368bec0dSDavid Daney #include <linux/spinlock.h> 17d6aa60a1SDavid Daney #include <linux/if_vlan.h> 18368bec0dSDavid Daney #include <linux/of_mdio.h> 19368bec0dSDavid Daney #include <linux/module.h> 20368bec0dSDavid Daney #include <linux/of_net.h> 21368bec0dSDavid Daney #include <linux/init.h> 225a0e3ad6STejun Heo #include <linux/slab.h> 23d6aa60a1SDavid Daney #include <linux/phy.h> 24368bec0dSDavid Daney #include <linux/io.h> 25d6aa60a1SDavid Daney 26d6aa60a1SDavid Daney #include <asm/octeon/octeon.h> 27d6aa60a1SDavid Daney #include <asm/octeon/cvmx-mixx-defs.h> 28d6aa60a1SDavid Daney #include <asm/octeon/cvmx-agl-defs.h> 29d6aa60a1SDavid Daney 30d6aa60a1SDavid Daney #define DRV_NAME "octeon_mgmt" 31d6aa60a1SDavid Daney #define DRV_DESCRIPTION \ 32d6aa60a1SDavid Daney "Cavium Networks Octeon MII (management) port Network Driver" 33d6aa60a1SDavid Daney 34d6aa60a1SDavid Daney #define OCTEON_MGMT_NAPI_WEIGHT 16 35d6aa60a1SDavid Daney 36a0ce9b1eSDavid Daney /* Ring sizes that are powers of two allow for more efficient modulo 37d6aa60a1SDavid Daney * opertions. 38d6aa60a1SDavid Daney */ 39d6aa60a1SDavid Daney #define OCTEON_MGMT_RX_RING_SIZE 512 40d6aa60a1SDavid Daney #define OCTEON_MGMT_TX_RING_SIZE 128 41d6aa60a1SDavid Daney 42d6aa60a1SDavid Daney /* Allow 8 bytes for vlan and FCS. */ 43d6aa60a1SDavid Daney #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) 44d6aa60a1SDavid Daney 45d6aa60a1SDavid Daney union mgmt_port_ring_entry { 46d6aa60a1SDavid Daney u64 d64; 47d6aa60a1SDavid Daney struct { 483ac19c90SDavid Daney #define RING_ENTRY_CODE_DONE 0xf 493ac19c90SDavid Daney #define RING_ENTRY_CODE_MORE 0x10 503ac19c90SDavid Daney #ifdef __BIG_ENDIAN_BITFIELD 51d6aa60a1SDavid Daney u64 reserved_62_63:2; 52d6aa60a1SDavid Daney /* Length of the buffer/packet in bytes */ 53d6aa60a1SDavid Daney u64 len:14; 54d6aa60a1SDavid Daney /* For TX, signals that the packet should be timestamped */ 55d6aa60a1SDavid Daney u64 tstamp:1; 56d6aa60a1SDavid Daney /* The RX error code */ 57d6aa60a1SDavid Daney u64 code:7; 58d6aa60a1SDavid Daney /* Physical address of the buffer */ 59d6aa60a1SDavid Daney u64 addr:40; 603ac19c90SDavid Daney #else 613ac19c90SDavid Daney u64 addr:40; 623ac19c90SDavid Daney u64 code:7; 633ac19c90SDavid Daney u64 tstamp:1; 643ac19c90SDavid Daney u64 len:14; 653ac19c90SDavid Daney u64 reserved_62_63:2; 663ac19c90SDavid Daney #endif 67d6aa60a1SDavid Daney } s; 68d6aa60a1SDavid Daney }; 69d6aa60a1SDavid Daney 70368bec0dSDavid Daney #define MIX_ORING1 0x0 71368bec0dSDavid Daney #define MIX_ORING2 0x8 72368bec0dSDavid Daney #define MIX_IRING1 0x10 73368bec0dSDavid Daney #define MIX_IRING2 0x18 74368bec0dSDavid Daney #define MIX_CTL 0x20 75368bec0dSDavid Daney #define MIX_IRHWM 0x28 76368bec0dSDavid Daney #define MIX_IRCNT 0x30 77368bec0dSDavid Daney #define MIX_ORHWM 0x38 78368bec0dSDavid Daney #define MIX_ORCNT 0x40 79368bec0dSDavid Daney #define MIX_ISR 0x48 80368bec0dSDavid Daney #define MIX_INTENA 0x50 81368bec0dSDavid Daney #define MIX_REMCNT 0x58 82368bec0dSDavid Daney #define MIX_BIST 0x78 83368bec0dSDavid Daney 84368bec0dSDavid Daney #define AGL_GMX_PRT_CFG 0x10 85368bec0dSDavid Daney #define AGL_GMX_RX_FRM_CTL 0x18 86368bec0dSDavid Daney #define AGL_GMX_RX_FRM_MAX 0x30 87368bec0dSDavid Daney #define AGL_GMX_RX_JABBER 0x38 88368bec0dSDavid Daney #define AGL_GMX_RX_STATS_CTL 0x50 89368bec0dSDavid Daney 90368bec0dSDavid Daney #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 91368bec0dSDavid Daney #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 92368bec0dSDavid Daney #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 93368bec0dSDavid Daney 94368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CTL 0x100 95368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM_EN 0x108 96368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM0 0x180 97368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM1 0x188 98368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM2 0x190 99368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM3 0x198 100368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM4 0x1a0 101368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM5 0x1a8 102368bec0dSDavid Daney 103eeae05aaSDavid Daney #define AGL_GMX_TX_CLK 0x208 104368bec0dSDavid Daney #define AGL_GMX_TX_STATS_CTL 0x268 105368bec0dSDavid Daney #define AGL_GMX_TX_CTL 0x270 106368bec0dSDavid Daney #define AGL_GMX_TX_STAT0 0x280 107368bec0dSDavid Daney #define AGL_GMX_TX_STAT1 0x288 108368bec0dSDavid Daney #define AGL_GMX_TX_STAT2 0x290 109368bec0dSDavid Daney #define AGL_GMX_TX_STAT3 0x298 110368bec0dSDavid Daney #define AGL_GMX_TX_STAT4 0x2a0 111368bec0dSDavid Daney #define AGL_GMX_TX_STAT5 0x2a8 112368bec0dSDavid Daney #define AGL_GMX_TX_STAT6 0x2b0 113368bec0dSDavid Daney #define AGL_GMX_TX_STAT7 0x2b8 114368bec0dSDavid Daney #define AGL_GMX_TX_STAT8 0x2c0 115368bec0dSDavid Daney #define AGL_GMX_TX_STAT9 0x2c8 116368bec0dSDavid Daney 117d6aa60a1SDavid Daney struct octeon_mgmt { 118d6aa60a1SDavid Daney struct net_device *netdev; 119368bec0dSDavid Daney u64 mix; 120368bec0dSDavid Daney u64 agl; 121eeae05aaSDavid Daney u64 agl_prt_ctl; 122d6aa60a1SDavid Daney int port; 123d6aa60a1SDavid Daney int irq; 1243d305850SChad Reese bool has_rx_tstamp; 125d6aa60a1SDavid Daney u64 *tx_ring; 126d6aa60a1SDavid Daney dma_addr_t tx_ring_handle; 127d6aa60a1SDavid Daney unsigned int tx_next; 128d6aa60a1SDavid Daney unsigned int tx_next_clean; 129d6aa60a1SDavid Daney unsigned int tx_current_fill; 130d6aa60a1SDavid Daney /* The tx_list lock also protects the ring related variables */ 131d6aa60a1SDavid Daney struct sk_buff_head tx_list; 132d6aa60a1SDavid Daney 133d6aa60a1SDavid Daney /* RX variables only touched in napi_poll. No locking necessary. */ 134d6aa60a1SDavid Daney u64 *rx_ring; 135d6aa60a1SDavid Daney dma_addr_t rx_ring_handle; 136d6aa60a1SDavid Daney unsigned int rx_next; 137d6aa60a1SDavid Daney unsigned int rx_next_fill; 138d6aa60a1SDavid Daney unsigned int rx_current_fill; 139d6aa60a1SDavid Daney struct sk_buff_head rx_list; 140d6aa60a1SDavid Daney 141d6aa60a1SDavid Daney spinlock_t lock; 142d6aa60a1SDavid Daney unsigned int last_duplex; 143d6aa60a1SDavid Daney unsigned int last_link; 144eeae05aaSDavid Daney unsigned int last_speed; 145d6aa60a1SDavid Daney struct device *dev; 146d6aa60a1SDavid Daney struct napi_struct napi; 147d6aa60a1SDavid Daney struct tasklet_struct tx_clean_tasklet; 148368bec0dSDavid Daney struct device_node *phy_np; 149368bec0dSDavid Daney resource_size_t mix_phys; 150368bec0dSDavid Daney resource_size_t mix_size; 151368bec0dSDavid Daney resource_size_t agl_phys; 152368bec0dSDavid Daney resource_size_t agl_size; 153eeae05aaSDavid Daney resource_size_t agl_prt_ctl_phys; 154eeae05aaSDavid Daney resource_size_t agl_prt_ctl_size; 155d6aa60a1SDavid Daney }; 156d6aa60a1SDavid Daney 157d6aa60a1SDavid Daney static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) 158d6aa60a1SDavid Daney { 159d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena; 160d6aa60a1SDavid Daney unsigned long flags; 161d6aa60a1SDavid Daney 162d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 163368bec0dSDavid Daney mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); 164d6aa60a1SDavid Daney mix_intena.s.ithena = enable ? 1 : 0; 165368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 166d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 167d6aa60a1SDavid Daney } 168d6aa60a1SDavid Daney 169d6aa60a1SDavid Daney static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) 170d6aa60a1SDavid Daney { 171d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena; 172d6aa60a1SDavid Daney unsigned long flags; 173d6aa60a1SDavid Daney 174d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 175368bec0dSDavid Daney mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); 176d6aa60a1SDavid Daney mix_intena.s.othena = enable ? 1 : 0; 177368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 178d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 179d6aa60a1SDavid Daney } 180d6aa60a1SDavid Daney 181e96f7515SDavid Daney static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) 182d6aa60a1SDavid Daney { 183d6aa60a1SDavid Daney octeon_mgmt_set_rx_irq(p, 1); 184d6aa60a1SDavid Daney } 185d6aa60a1SDavid Daney 186e96f7515SDavid Daney static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) 187d6aa60a1SDavid Daney { 188d6aa60a1SDavid Daney octeon_mgmt_set_rx_irq(p, 0); 189d6aa60a1SDavid Daney } 190d6aa60a1SDavid Daney 191e96f7515SDavid Daney static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) 192d6aa60a1SDavid Daney { 193d6aa60a1SDavid Daney octeon_mgmt_set_tx_irq(p, 1); 194d6aa60a1SDavid Daney } 195d6aa60a1SDavid Daney 196e96f7515SDavid Daney static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) 197d6aa60a1SDavid Daney { 198d6aa60a1SDavid Daney octeon_mgmt_set_tx_irq(p, 0); 199d6aa60a1SDavid Daney } 200d6aa60a1SDavid Daney 201d6aa60a1SDavid Daney static unsigned int ring_max_fill(unsigned int ring_size) 202d6aa60a1SDavid Daney { 203d6aa60a1SDavid Daney return ring_size - 8; 204d6aa60a1SDavid Daney } 205d6aa60a1SDavid Daney 206d6aa60a1SDavid Daney static unsigned int ring_size_to_bytes(unsigned int ring_size) 207d6aa60a1SDavid Daney { 208d6aa60a1SDavid Daney return ring_size * sizeof(union mgmt_port_ring_entry); 209d6aa60a1SDavid Daney } 210d6aa60a1SDavid Daney 211d6aa60a1SDavid Daney static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) 212d6aa60a1SDavid Daney { 213d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 214d6aa60a1SDavid Daney 215d6aa60a1SDavid Daney while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { 216d6aa60a1SDavid Daney unsigned int size; 217d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 218d6aa60a1SDavid Daney struct sk_buff *skb; 219d6aa60a1SDavid Daney 220d6aa60a1SDavid Daney /* CN56XX pass 1 needs 8 bytes of padding. */ 221d6aa60a1SDavid Daney size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; 222d6aa60a1SDavid Daney 223d6aa60a1SDavid Daney skb = netdev_alloc_skb(netdev, size); 224d6aa60a1SDavid Daney if (!skb) 225d6aa60a1SDavid Daney break; 226d6aa60a1SDavid Daney skb_reserve(skb, NET_IP_ALIGN); 227d6aa60a1SDavid Daney __skb_queue_tail(&p->rx_list, skb); 228d6aa60a1SDavid Daney 229d6aa60a1SDavid Daney re.d64 = 0; 230d6aa60a1SDavid Daney re.s.len = size; 231d6aa60a1SDavid Daney re.s.addr = dma_map_single(p->dev, skb->data, 232d6aa60a1SDavid Daney size, 233d6aa60a1SDavid Daney DMA_FROM_DEVICE); 234d6aa60a1SDavid Daney 235d6aa60a1SDavid Daney /* Put it in the ring. */ 236d6aa60a1SDavid Daney p->rx_ring[p->rx_next_fill] = re.d64; 237*0c34bb59SAlexander Sverdlin /* Make sure there is no reorder of filling the ring and ringing 238*0c34bb59SAlexander Sverdlin * the bell 239*0c34bb59SAlexander Sverdlin */ 240*0c34bb59SAlexander Sverdlin wmb(); 241*0c34bb59SAlexander Sverdlin 242d6aa60a1SDavid Daney dma_sync_single_for_device(p->dev, p->rx_ring_handle, 243d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 244d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 245d6aa60a1SDavid Daney p->rx_next_fill = 246d6aa60a1SDavid Daney (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; 247d6aa60a1SDavid Daney p->rx_current_fill++; 248d6aa60a1SDavid Daney /* Ring the bell. */ 249368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRING2, 1); 250d6aa60a1SDavid Daney } 251d6aa60a1SDavid Daney } 252d6aa60a1SDavid Daney 253d6aa60a1SDavid Daney static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) 254d6aa60a1SDavid Daney { 255d6aa60a1SDavid Daney union cvmx_mixx_orcnt mix_orcnt; 256d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 257d6aa60a1SDavid Daney struct sk_buff *skb; 258d6aa60a1SDavid Daney int cleaned = 0; 259d6aa60a1SDavid Daney unsigned long flags; 260d6aa60a1SDavid Daney 261368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); 262d6aa60a1SDavid Daney while (mix_orcnt.s.orcnt) { 2634d30b801SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags); 2644d30b801SDavid Daney 265368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); 2664d30b801SDavid Daney 2674d30b801SDavid Daney if (mix_orcnt.s.orcnt == 0) { 2684d30b801SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 2694d30b801SDavid Daney break; 2704d30b801SDavid Daney } 2714d30b801SDavid Daney 272d6aa60a1SDavid Daney dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, 273d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 274d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 275d6aa60a1SDavid Daney 276d6aa60a1SDavid Daney re.d64 = p->tx_ring[p->tx_next_clean]; 277d6aa60a1SDavid Daney p->tx_next_clean = 278d6aa60a1SDavid Daney (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; 279d6aa60a1SDavid Daney skb = __skb_dequeue(&p->tx_list); 280d6aa60a1SDavid Daney 281d6aa60a1SDavid Daney mix_orcnt.u64 = 0; 282d6aa60a1SDavid Daney mix_orcnt.s.orcnt = 1; 283d6aa60a1SDavid Daney 284d6aa60a1SDavid Daney /* Acknowledge to hardware that we have the buffer. */ 285368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); 286d6aa60a1SDavid Daney p->tx_current_fill--; 287d6aa60a1SDavid Daney 288d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 289d6aa60a1SDavid Daney 290d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr, re.s.len, 291d6aa60a1SDavid Daney DMA_TO_DEVICE); 2923d305850SChad Reese 2933d305850SChad Reese /* Read the hardware TX timestamp if one was recorded */ 2943d305850SChad Reese if (unlikely(re.s.tstamp)) { 2953d305850SChad Reese struct skb_shared_hwtstamps ts; 296208f7ca4SAaro Koskinen u64 ns; 297208f7ca4SAaro Koskinen 298c6d5fefaSWillem de Bruijn memset(&ts, 0, sizeof(ts)); 2993d305850SChad Reese /* Read the timestamp */ 300208f7ca4SAaro Koskinen ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); 3013d305850SChad Reese /* Remove the timestamp from the FIFO */ 3023d305850SChad Reese cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); 3033d305850SChad Reese /* Tell the kernel about the timestamp */ 3043d305850SChad Reese ts.hwtstamp = ns_to_ktime(ns); 3053d305850SChad Reese skb_tstamp_tx(skb, &ts); 3063d305850SChad Reese } 3073d305850SChad Reese 308d6aa60a1SDavid Daney dev_kfree_skb_any(skb); 309d6aa60a1SDavid Daney cleaned++; 310d6aa60a1SDavid Daney 311368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); 312d6aa60a1SDavid Daney } 313d6aa60a1SDavid Daney 314d6aa60a1SDavid Daney if (cleaned && netif_queue_stopped(p->netdev)) 315d6aa60a1SDavid Daney netif_wake_queue(p->netdev); 316d6aa60a1SDavid Daney } 317d6aa60a1SDavid Daney 318d6aa60a1SDavid Daney static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) 319d6aa60a1SDavid Daney { 320d6aa60a1SDavid Daney struct octeon_mgmt *p = (struct octeon_mgmt *)arg; 321d6aa60a1SDavid Daney octeon_mgmt_clean_tx_buffers(p); 322d6aa60a1SDavid Daney octeon_mgmt_enable_tx_irq(p); 323d6aa60a1SDavid Daney } 324d6aa60a1SDavid Daney 325d6aa60a1SDavid Daney static void octeon_mgmt_update_rx_stats(struct net_device *netdev) 326d6aa60a1SDavid Daney { 327d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 328d6aa60a1SDavid Daney unsigned long flags; 329d6aa60a1SDavid Daney u64 drop, bad; 330d6aa60a1SDavid Daney 331d6aa60a1SDavid Daney /* These reads also clear the count registers. */ 332368bec0dSDavid Daney drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); 333368bec0dSDavid Daney bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); 334d6aa60a1SDavid Daney 335d6aa60a1SDavid Daney if (drop || bad) { 336d6aa60a1SDavid Daney /* Do an atomic update. */ 337d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 338d6aa60a1SDavid Daney netdev->stats.rx_errors += bad; 339d6aa60a1SDavid Daney netdev->stats.rx_dropped += drop; 340d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 341d6aa60a1SDavid Daney } 342d6aa60a1SDavid Daney } 343d6aa60a1SDavid Daney 344d6aa60a1SDavid Daney static void octeon_mgmt_update_tx_stats(struct net_device *netdev) 345d6aa60a1SDavid Daney { 346d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 347d6aa60a1SDavid Daney unsigned long flags; 348d6aa60a1SDavid Daney 349d6aa60a1SDavid Daney union cvmx_agl_gmx_txx_stat0 s0; 350d6aa60a1SDavid Daney union cvmx_agl_gmx_txx_stat1 s1; 351d6aa60a1SDavid Daney 352d6aa60a1SDavid Daney /* These reads also clear the count registers. */ 353368bec0dSDavid Daney s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); 354368bec0dSDavid Daney s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); 355d6aa60a1SDavid Daney 356d6aa60a1SDavid Daney if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { 357d6aa60a1SDavid Daney /* Do an atomic update. */ 358d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 359d6aa60a1SDavid Daney netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; 360d6aa60a1SDavid Daney netdev->stats.collisions += s1.s.scol + s1.s.mcol; 361d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 362d6aa60a1SDavid Daney } 363d6aa60a1SDavid Daney } 364d6aa60a1SDavid Daney 365d6aa60a1SDavid Daney /* 366d6aa60a1SDavid Daney * Dequeue a receive skb and its corresponding ring entry. The ring 367d6aa60a1SDavid Daney * entry is returned, *pskb is updated to point to the skb. 368d6aa60a1SDavid Daney */ 369d6aa60a1SDavid Daney static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, 370d6aa60a1SDavid Daney struct sk_buff **pskb) 371d6aa60a1SDavid Daney { 372d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 373d6aa60a1SDavid Daney 374d6aa60a1SDavid Daney dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, 375d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 376d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 377d6aa60a1SDavid Daney 378d6aa60a1SDavid Daney re.d64 = p->rx_ring[p->rx_next]; 379d6aa60a1SDavid Daney p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; 380d6aa60a1SDavid Daney p->rx_current_fill--; 381d6aa60a1SDavid Daney *pskb = __skb_dequeue(&p->rx_list); 382d6aa60a1SDavid Daney 383d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr, 384d6aa60a1SDavid Daney ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, 385d6aa60a1SDavid Daney DMA_FROM_DEVICE); 386d6aa60a1SDavid Daney 387d6aa60a1SDavid Daney return re.d64; 388d6aa60a1SDavid Daney } 389d6aa60a1SDavid Daney 390d6aa60a1SDavid Daney 391d6aa60a1SDavid Daney static int octeon_mgmt_receive_one(struct octeon_mgmt *p) 392d6aa60a1SDavid Daney { 393d6aa60a1SDavid Daney struct net_device *netdev = p->netdev; 394d6aa60a1SDavid Daney union cvmx_mixx_ircnt mix_ircnt; 395d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 396d6aa60a1SDavid Daney struct sk_buff *skb; 397d6aa60a1SDavid Daney struct sk_buff *skb2; 398d6aa60a1SDavid Daney struct sk_buff *skb_new; 399d6aa60a1SDavid Daney union mgmt_port_ring_entry re2; 400d6aa60a1SDavid Daney int rc = 1; 401d6aa60a1SDavid Daney 402d6aa60a1SDavid Daney 403d6aa60a1SDavid Daney re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); 404d6aa60a1SDavid Daney if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { 405d6aa60a1SDavid Daney /* A good packet, send it up. */ 406d6aa60a1SDavid Daney skb_put(skb, re.s.len); 407d6aa60a1SDavid Daney good: 4083d305850SChad Reese /* Process the RX timestamp if it was recorded */ 4093d305850SChad Reese if (p->has_rx_tstamp) { 4103d305850SChad Reese /* The first 8 bytes are the timestamp */ 4113d305850SChad Reese u64 ns = *(u64 *)skb->data; 4123d305850SChad Reese struct skb_shared_hwtstamps *ts; 4133d305850SChad Reese ts = skb_hwtstamps(skb); 4143d305850SChad Reese ts->hwtstamp = ns_to_ktime(ns); 4153d305850SChad Reese __skb_pull(skb, 8); 4163d305850SChad Reese } 417d6aa60a1SDavid Daney skb->protocol = eth_type_trans(skb, netdev); 418d6aa60a1SDavid Daney netdev->stats.rx_packets++; 419d6aa60a1SDavid Daney netdev->stats.rx_bytes += skb->len; 420d6aa60a1SDavid Daney netif_receive_skb(skb); 421d6aa60a1SDavid Daney rc = 0; 422d6aa60a1SDavid Daney } else if (re.s.code == RING_ENTRY_CODE_MORE) { 423a0ce9b1eSDavid Daney /* Packet split across skbs. This can happen if we 424d6aa60a1SDavid Daney * increase the MTU. Buffers that are already in the 425d6aa60a1SDavid Daney * rx ring can then end up being too small. As the rx 426d6aa60a1SDavid Daney * ring is refilled, buffers sized for the new MTU 427d6aa60a1SDavid Daney * will be used and we should go back to the normal 428d6aa60a1SDavid Daney * non-split case. 429d6aa60a1SDavid Daney */ 430d6aa60a1SDavid Daney skb_put(skb, re.s.len); 431d6aa60a1SDavid Daney do { 432d6aa60a1SDavid Daney re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); 433d6aa60a1SDavid Daney if (re2.s.code != RING_ENTRY_CODE_MORE 434d6aa60a1SDavid Daney && re2.s.code != RING_ENTRY_CODE_DONE) 435d6aa60a1SDavid Daney goto split_error; 436d6aa60a1SDavid Daney skb_put(skb2, re2.s.len); 437d6aa60a1SDavid Daney skb_new = skb_copy_expand(skb, 0, skb2->len, 438d6aa60a1SDavid Daney GFP_ATOMIC); 439d6aa60a1SDavid Daney if (!skb_new) 440d6aa60a1SDavid Daney goto split_error; 441d6aa60a1SDavid Daney if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), 442d6aa60a1SDavid Daney skb2->len)) 443d6aa60a1SDavid Daney goto split_error; 444d6aa60a1SDavid Daney skb_put(skb_new, skb2->len); 445d6aa60a1SDavid Daney dev_kfree_skb_any(skb); 446d6aa60a1SDavid Daney dev_kfree_skb_any(skb2); 447d6aa60a1SDavid Daney skb = skb_new; 448d6aa60a1SDavid Daney } while (re2.s.code == RING_ENTRY_CODE_MORE); 449d6aa60a1SDavid Daney goto good; 450d6aa60a1SDavid Daney } else { 451d6aa60a1SDavid Daney /* Some other error, discard it. */ 452d6aa60a1SDavid Daney dev_kfree_skb_any(skb); 453a0ce9b1eSDavid Daney /* Error statistics are accumulated in 454d6aa60a1SDavid Daney * octeon_mgmt_update_rx_stats. 455d6aa60a1SDavid Daney */ 456d6aa60a1SDavid Daney } 457d6aa60a1SDavid Daney goto done; 458d6aa60a1SDavid Daney split_error: 459d6aa60a1SDavid Daney /* Discard the whole mess. */ 460d6aa60a1SDavid Daney dev_kfree_skb_any(skb); 461d6aa60a1SDavid Daney dev_kfree_skb_any(skb2); 462d6aa60a1SDavid Daney while (re2.s.code == RING_ENTRY_CODE_MORE) { 463d6aa60a1SDavid Daney re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); 464d6aa60a1SDavid Daney dev_kfree_skb_any(skb2); 465d6aa60a1SDavid Daney } 466d6aa60a1SDavid Daney netdev->stats.rx_errors++; 467d6aa60a1SDavid Daney 468d6aa60a1SDavid Daney done: 469d6aa60a1SDavid Daney /* Tell the hardware we processed a packet. */ 470d6aa60a1SDavid Daney mix_ircnt.u64 = 0; 471d6aa60a1SDavid Daney mix_ircnt.s.ircnt = 1; 472368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); 473d6aa60a1SDavid Daney return rc; 474d6aa60a1SDavid Daney } 475d6aa60a1SDavid Daney 476d6aa60a1SDavid Daney static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) 477d6aa60a1SDavid Daney { 478d6aa60a1SDavid Daney unsigned int work_done = 0; 479d6aa60a1SDavid Daney union cvmx_mixx_ircnt mix_ircnt; 480d6aa60a1SDavid Daney int rc; 481d6aa60a1SDavid Daney 482368bec0dSDavid Daney mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); 483d6aa60a1SDavid Daney while (work_done < budget && mix_ircnt.s.ircnt) { 484d6aa60a1SDavid Daney 485d6aa60a1SDavid Daney rc = octeon_mgmt_receive_one(p); 486d6aa60a1SDavid Daney if (!rc) 487d6aa60a1SDavid Daney work_done++; 488d6aa60a1SDavid Daney 489d6aa60a1SDavid Daney /* Check for more packets. */ 490368bec0dSDavid Daney mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); 491d6aa60a1SDavid Daney } 492d6aa60a1SDavid Daney 493d6aa60a1SDavid Daney octeon_mgmt_rx_fill_ring(p->netdev); 494d6aa60a1SDavid Daney 495d6aa60a1SDavid Daney return work_done; 496d6aa60a1SDavid Daney } 497d6aa60a1SDavid Daney 498d6aa60a1SDavid Daney static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) 499d6aa60a1SDavid Daney { 500d6aa60a1SDavid Daney struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); 501d6aa60a1SDavid Daney struct net_device *netdev = p->netdev; 502d6aa60a1SDavid Daney unsigned int work_done = 0; 503d6aa60a1SDavid Daney 504d6aa60a1SDavid Daney work_done = octeon_mgmt_receive_packets(p, budget); 505d6aa60a1SDavid Daney 506d6aa60a1SDavid Daney if (work_done < budget) { 507d6aa60a1SDavid Daney /* We stopped because no more packets were available. */ 5086ad20165SEric Dumazet napi_complete_done(napi, work_done); 509d6aa60a1SDavid Daney octeon_mgmt_enable_rx_irq(p); 510d6aa60a1SDavid Daney } 511d6aa60a1SDavid Daney octeon_mgmt_update_rx_stats(netdev); 512d6aa60a1SDavid Daney 513d6aa60a1SDavid Daney return work_done; 514d6aa60a1SDavid Daney } 515d6aa60a1SDavid Daney 516d6aa60a1SDavid Daney /* Reset the hardware to clean state. */ 517d6aa60a1SDavid Daney static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) 518d6aa60a1SDavid Daney { 519d6aa60a1SDavid Daney union cvmx_mixx_ctl mix_ctl; 520d6aa60a1SDavid Daney union cvmx_mixx_bist mix_bist; 521d6aa60a1SDavid Daney union cvmx_agl_gmx_bist agl_gmx_bist; 522d6aa60a1SDavid Daney 523d6aa60a1SDavid Daney mix_ctl.u64 = 0; 524368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 525d6aa60a1SDavid Daney do { 526368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); 527d6aa60a1SDavid Daney } while (mix_ctl.s.busy); 528d6aa60a1SDavid Daney mix_ctl.s.reset = 1; 529368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 530368bec0dSDavid Daney cvmx_read_csr(p->mix + MIX_CTL); 531eeae05aaSDavid Daney octeon_io_clk_delay(64); 532d6aa60a1SDavid Daney 533368bec0dSDavid Daney mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); 534d6aa60a1SDavid Daney if (mix_bist.u64) 535d6aa60a1SDavid Daney dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", 536d6aa60a1SDavid Daney (unsigned long long)mix_bist.u64); 537d6aa60a1SDavid Daney 538d6aa60a1SDavid Daney agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); 539d6aa60a1SDavid Daney if (agl_gmx_bist.u64) 540d6aa60a1SDavid Daney dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", 541d6aa60a1SDavid Daney (unsigned long long)agl_gmx_bist.u64); 542d6aa60a1SDavid Daney } 543d6aa60a1SDavid Daney 544d6aa60a1SDavid Daney struct octeon_mgmt_cam_state { 545d6aa60a1SDavid Daney u64 cam[6]; 546d6aa60a1SDavid Daney u64 cam_mask; 547d6aa60a1SDavid Daney int cam_index; 548d6aa60a1SDavid Daney }; 549d6aa60a1SDavid Daney 550d6aa60a1SDavid Daney static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, 551d6aa60a1SDavid Daney unsigned char *addr) 552d6aa60a1SDavid Daney { 553d6aa60a1SDavid Daney int i; 554d6aa60a1SDavid Daney 555d6aa60a1SDavid Daney for (i = 0; i < 6; i++) 556d6aa60a1SDavid Daney cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); 557d6aa60a1SDavid Daney cs->cam_mask |= (1ULL << cs->cam_index); 558d6aa60a1SDavid Daney cs->cam_index++; 559d6aa60a1SDavid Daney } 560d6aa60a1SDavid Daney 561d6aa60a1SDavid Daney static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) 562d6aa60a1SDavid Daney { 563d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 564d6aa60a1SDavid Daney union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; 565d6aa60a1SDavid Daney union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; 566d6aa60a1SDavid Daney unsigned long flags; 567d6aa60a1SDavid Daney unsigned int prev_packet_enable; 568d6aa60a1SDavid Daney unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ 569d6aa60a1SDavid Daney unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ 570d6aa60a1SDavid Daney struct octeon_mgmt_cam_state cam_state; 57122bedad3SJiri Pirko struct netdev_hw_addr *ha; 572d6aa60a1SDavid Daney int available_cam_entries; 573d6aa60a1SDavid Daney 574d6aa60a1SDavid Daney memset(&cam_state, 0, sizeof(cam_state)); 575d6aa60a1SDavid Daney 57662538d24SDavid Daney if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { 577d6aa60a1SDavid Daney cam_mode = 0; 578d6aa60a1SDavid Daney available_cam_entries = 8; 579d6aa60a1SDavid Daney } else { 580a0ce9b1eSDavid Daney /* One CAM entry for the primary address, leaves seven 581d6aa60a1SDavid Daney * for the secondary addresses. 582d6aa60a1SDavid Daney */ 58362538d24SDavid Daney available_cam_entries = 7 - netdev->uc.count; 584d6aa60a1SDavid Daney } 585d6aa60a1SDavid Daney 586d6aa60a1SDavid Daney if (netdev->flags & IFF_MULTICAST) { 5874cd24eafSJiri Pirko if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || 5884cd24eafSJiri Pirko netdev_mc_count(netdev) > available_cam_entries) 58962538d24SDavid Daney multicast_mode = 2; /* 2 - Accept all multicast. */ 590d6aa60a1SDavid Daney else 591d6aa60a1SDavid Daney multicast_mode = 0; /* 0 - Use CAM. */ 592d6aa60a1SDavid Daney } 593d6aa60a1SDavid Daney 594d6aa60a1SDavid Daney if (cam_mode == 1) { 595d6aa60a1SDavid Daney /* Add primary address. */ 596d6aa60a1SDavid Daney octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); 59762538d24SDavid Daney netdev_for_each_uc_addr(ha, netdev) 59862538d24SDavid Daney octeon_mgmt_cam_state_add(&cam_state, ha->addr); 599d6aa60a1SDavid Daney } 600d6aa60a1SDavid Daney if (multicast_mode == 0) { 60122bedad3SJiri Pirko netdev_for_each_mc_addr(ha, netdev) 60222bedad3SJiri Pirko octeon_mgmt_cam_state_add(&cam_state, ha->addr); 603d6aa60a1SDavid Daney } 604d6aa60a1SDavid Daney 605d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 606d6aa60a1SDavid Daney 607d6aa60a1SDavid Daney /* Disable packet I/O. */ 608368bec0dSDavid Daney agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 609d6aa60a1SDavid Daney prev_packet_enable = agl_gmx_prtx.s.en; 610d6aa60a1SDavid Daney agl_gmx_prtx.s.en = 0; 611368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); 612d6aa60a1SDavid Daney 613d6aa60a1SDavid Daney adr_ctl.u64 = 0; 614d6aa60a1SDavid Daney adr_ctl.s.cam_mode = cam_mode; 615d6aa60a1SDavid Daney adr_ctl.s.mcst = multicast_mode; 616d6aa60a1SDavid Daney adr_ctl.s.bcst = 1; /* Allow broadcast */ 617d6aa60a1SDavid Daney 618368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); 619d6aa60a1SDavid Daney 620368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); 621368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); 622368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); 623368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); 624368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); 625368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); 626368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); 627d6aa60a1SDavid Daney 628d6aa60a1SDavid Daney /* Restore packet I/O. */ 629d6aa60a1SDavid Daney agl_gmx_prtx.s.en = prev_packet_enable; 630368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); 631d6aa60a1SDavid Daney 632d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 633d6aa60a1SDavid Daney } 634d6aa60a1SDavid Daney 635d6aa60a1SDavid Daney static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) 636d6aa60a1SDavid Daney { 637f321238bSDavid Daney int r = eth_mac_addr(netdev, addr); 638d6aa60a1SDavid Daney 639f321238bSDavid Daney if (r) 640f321238bSDavid Daney return r; 641d6aa60a1SDavid Daney 642d6aa60a1SDavid Daney octeon_mgmt_set_rx_filtering(netdev); 643d6aa60a1SDavid Daney 644d6aa60a1SDavid Daney return 0; 645d6aa60a1SDavid Daney } 646d6aa60a1SDavid Daney 647d6aa60a1SDavid Daney static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) 648d6aa60a1SDavid Daney { 649d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 6504aac0b43SAlexander Sverdlin int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN; 651d6aa60a1SDavid Daney 652d6aa60a1SDavid Daney netdev->mtu = new_mtu; 653d6aa60a1SDavid Daney 6544aac0b43SAlexander Sverdlin /* HW lifts the limit if the frame is VLAN tagged 6554aac0b43SAlexander Sverdlin * (+4 bytes per each tag, up to two tags) 6564aac0b43SAlexander Sverdlin */ 6574aac0b43SAlexander Sverdlin cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet); 6584aac0b43SAlexander Sverdlin /* Set the hardware to truncate packets larger than the MTU. The jabber 6594aac0b43SAlexander Sverdlin * register must be set to a multiple of 8 bytes, so round up. JABBER is 6604aac0b43SAlexander Sverdlin * an unconditional limit, so we need to account for two possible VLAN 6614aac0b43SAlexander Sverdlin * tags. 6624aac0b43SAlexander Sverdlin */ 663368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, 6644aac0b43SAlexander Sverdlin (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8); 665d6aa60a1SDavid Daney 666d6aa60a1SDavid Daney return 0; 667d6aa60a1SDavid Daney } 668d6aa60a1SDavid Daney 669d6aa60a1SDavid Daney static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) 670d6aa60a1SDavid Daney { 671d6aa60a1SDavid Daney struct net_device *netdev = dev_id; 672d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 673d6aa60a1SDavid Daney union cvmx_mixx_isr mixx_isr; 674d6aa60a1SDavid Daney 675368bec0dSDavid Daney mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); 676d6aa60a1SDavid Daney 677d6aa60a1SDavid Daney /* Clear any pending interrupts */ 678368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); 679368bec0dSDavid Daney cvmx_read_csr(p->mix + MIX_ISR); 680d6aa60a1SDavid Daney 681d6aa60a1SDavid Daney if (mixx_isr.s.irthresh) { 682d6aa60a1SDavid Daney octeon_mgmt_disable_rx_irq(p); 683d6aa60a1SDavid Daney napi_schedule(&p->napi); 684d6aa60a1SDavid Daney } 685d6aa60a1SDavid Daney if (mixx_isr.s.orthresh) { 686d6aa60a1SDavid Daney octeon_mgmt_disable_tx_irq(p); 687d6aa60a1SDavid Daney tasklet_schedule(&p->tx_clean_tasklet); 688d6aa60a1SDavid Daney } 689d6aa60a1SDavid Daney 690d6aa60a1SDavid Daney return IRQ_HANDLED; 691d6aa60a1SDavid Daney } 692d6aa60a1SDavid Daney 6933d305850SChad Reese static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, 6943d305850SChad Reese struct ifreq *rq, int cmd) 6953d305850SChad Reese { 6963d305850SChad Reese struct octeon_mgmt *p = netdev_priv(netdev); 6973d305850SChad Reese struct hwtstamp_config config; 6983d305850SChad Reese union cvmx_mio_ptp_clock_cfg ptp; 6993d305850SChad Reese union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; 7003d305850SChad Reese bool have_hw_timestamps = false; 7013d305850SChad Reese 7023d305850SChad Reese if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 7033d305850SChad Reese return -EFAULT; 7043d305850SChad Reese 7053d305850SChad Reese if (config.flags) /* reserved for future extensions */ 7063d305850SChad Reese return -EINVAL; 7073d305850SChad Reese 7083d305850SChad Reese /* Check the status of hardware for tiemstamps */ 7093d305850SChad Reese if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 7103d305850SChad Reese /* Get the current state of the PTP clock */ 7113d305850SChad Reese ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); 7123d305850SChad Reese if (!ptp.s.ext_clk_en) { 7133d305850SChad Reese /* The clock has not been configured to use an 7143d305850SChad Reese * external source. Program it to use the main clock 7153d305850SChad Reese * reference. 7163d305850SChad Reese */ 7173d305850SChad Reese u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); 7183d305850SChad Reese if (!ptp.s.ptp_en) 7193d305850SChad Reese cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); 7201769af43SSteven J. Hill netdev_info(netdev, 7211769af43SSteven J. Hill "PTP Clock using sclk reference @ %lldHz\n", 7223d305850SChad Reese (NSEC_PER_SEC << 32) / clock_comp); 7233d305850SChad Reese } else { 7243d305850SChad Reese /* The clock is already programmed to use a GPIO */ 7253d305850SChad Reese u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); 7261769af43SSteven J. Hill netdev_info(netdev, 7271769af43SSteven J. Hill "PTP Clock using GPIO%d @ %lld Hz\n", 7281769af43SSteven J. Hill ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp); 7293d305850SChad Reese } 7303d305850SChad Reese 7313d305850SChad Reese /* Enable the clock if it wasn't done already */ 7323d305850SChad Reese if (!ptp.s.ptp_en) { 7333d305850SChad Reese ptp.s.ptp_en = 1; 7343d305850SChad Reese cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); 7353d305850SChad Reese } 7363d305850SChad Reese have_hw_timestamps = true; 7373d305850SChad Reese } 7383d305850SChad Reese 7393d305850SChad Reese if (!have_hw_timestamps) 7403d305850SChad Reese return -EINVAL; 7413d305850SChad Reese 7423d305850SChad Reese switch (config.tx_type) { 7433d305850SChad Reese case HWTSTAMP_TX_OFF: 7443d305850SChad Reese case HWTSTAMP_TX_ON: 7453d305850SChad Reese break; 7463d305850SChad Reese default: 7473d305850SChad Reese return -ERANGE; 7483d305850SChad Reese } 7493d305850SChad Reese 7503d305850SChad Reese switch (config.rx_filter) { 7513d305850SChad Reese case HWTSTAMP_FILTER_NONE: 7523d305850SChad Reese p->has_rx_tstamp = false; 7533d305850SChad Reese rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); 7543d305850SChad Reese rxx_frm_ctl.s.ptp_mode = 0; 7553d305850SChad Reese cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 7563d305850SChad Reese break; 7573d305850SChad Reese case HWTSTAMP_FILTER_ALL: 7583d305850SChad Reese case HWTSTAMP_FILTER_SOME: 7593d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 7603d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 7613d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 7623d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 7633d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 7643d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 7653d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 7663d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 7673d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 7683d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_EVENT: 7693d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_SYNC: 7703d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 771e3412575SMiroslav Lichvar case HWTSTAMP_FILTER_NTP_ALL: 7723d305850SChad Reese p->has_rx_tstamp = have_hw_timestamps; 7733d305850SChad Reese config.rx_filter = HWTSTAMP_FILTER_ALL; 7743d305850SChad Reese if (p->has_rx_tstamp) { 7753d305850SChad Reese rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); 7763d305850SChad Reese rxx_frm_ctl.s.ptp_mode = 1; 7773d305850SChad Reese cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 7783d305850SChad Reese } 7793d305850SChad Reese break; 7803d305850SChad Reese default: 7813d305850SChad Reese return -ERANGE; 7823d305850SChad Reese } 7833d305850SChad Reese 7843d305850SChad Reese if (copy_to_user(rq->ifr_data, &config, sizeof(config))) 7853d305850SChad Reese return -EFAULT; 7863d305850SChad Reese 7873d305850SChad Reese return 0; 7883d305850SChad Reese } 7893d305850SChad Reese 790d6aa60a1SDavid Daney static int octeon_mgmt_ioctl(struct net_device *netdev, 791d6aa60a1SDavid Daney struct ifreq *rq, int cmd) 792d6aa60a1SDavid Daney { 7933d305850SChad Reese switch (cmd) { 7943d305850SChad Reese case SIOCSHWTSTAMP: 7953d305850SChad Reese return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); 7963d305850SChad Reese default: 797c5d19a6eSHeiner Kallweit return phy_do_ioctl(netdev, rq, cmd); 7983d305850SChad Reese } 799d6aa60a1SDavid Daney } 800d6aa60a1SDavid Daney 801eeae05aaSDavid Daney static void octeon_mgmt_disable_link(struct octeon_mgmt *p) 802eeae05aaSDavid Daney { 803eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg; 804eeae05aaSDavid Daney 805eeae05aaSDavid Daney /* Disable GMX before we make any changes. */ 806eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 807eeae05aaSDavid Daney prtx_cfg.s.en = 0; 808eeae05aaSDavid Daney prtx_cfg.s.tx_en = 0; 809eeae05aaSDavid Daney prtx_cfg.s.rx_en = 0; 810eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 811eeae05aaSDavid Daney 812eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 813eeae05aaSDavid Daney int i; 814eeae05aaSDavid Daney for (i = 0; i < 10; i++) { 815eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 816eeae05aaSDavid Daney if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) 817eeae05aaSDavid Daney break; 818eeae05aaSDavid Daney mdelay(1); 819eeae05aaSDavid Daney i++; 820eeae05aaSDavid Daney } 821eeae05aaSDavid Daney } 822eeae05aaSDavid Daney } 823eeae05aaSDavid Daney 824eeae05aaSDavid Daney static void octeon_mgmt_enable_link(struct octeon_mgmt *p) 825eeae05aaSDavid Daney { 826eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg; 827eeae05aaSDavid Daney 828eeae05aaSDavid Daney /* Restore the GMX enable state only if link is set */ 829eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 830eeae05aaSDavid Daney prtx_cfg.s.tx_en = 1; 831eeae05aaSDavid Daney prtx_cfg.s.rx_en = 1; 832eeae05aaSDavid Daney prtx_cfg.s.en = 1; 833eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 834eeae05aaSDavid Daney } 835eeae05aaSDavid Daney 836eeae05aaSDavid Daney static void octeon_mgmt_update_link(struct octeon_mgmt *p) 837eeae05aaSDavid Daney { 8389e8e6e88SPhilippe Reynes struct net_device *ndev = p->netdev; 8399e8e6e88SPhilippe Reynes struct phy_device *phydev = ndev->phydev; 840eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg; 841eeae05aaSDavid Daney 842eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 843eeae05aaSDavid Daney 8449e8e6e88SPhilippe Reynes if (!phydev->link) 845eeae05aaSDavid Daney prtx_cfg.s.duplex = 1; 846eeae05aaSDavid Daney else 8479e8e6e88SPhilippe Reynes prtx_cfg.s.duplex = phydev->duplex; 848eeae05aaSDavid Daney 8499e8e6e88SPhilippe Reynes switch (phydev->speed) { 850eeae05aaSDavid Daney case 10: 851eeae05aaSDavid Daney prtx_cfg.s.speed = 0; 852eeae05aaSDavid Daney prtx_cfg.s.slottime = 0; 853eeae05aaSDavid Daney 854eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 855eeae05aaSDavid Daney prtx_cfg.s.burst = 1; 856eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 1; 857eeae05aaSDavid Daney } 858eeae05aaSDavid Daney break; 859eeae05aaSDavid Daney case 100: 860eeae05aaSDavid Daney prtx_cfg.s.speed = 0; 861eeae05aaSDavid Daney prtx_cfg.s.slottime = 0; 862eeae05aaSDavid Daney 863eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 864eeae05aaSDavid Daney prtx_cfg.s.burst = 1; 865eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 0; 866eeae05aaSDavid Daney } 867eeae05aaSDavid Daney break; 868eeae05aaSDavid Daney case 1000: 869eeae05aaSDavid Daney /* 1000 MBits is only supported on 6XXX chips */ 870eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 871eeae05aaSDavid Daney prtx_cfg.s.speed = 1; 872eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 0; 873eeae05aaSDavid Daney /* Only matters for half-duplex */ 874eeae05aaSDavid Daney prtx_cfg.s.slottime = 1; 8759e8e6e88SPhilippe Reynes prtx_cfg.s.burst = phydev->duplex; 876eeae05aaSDavid Daney } 877eeae05aaSDavid Daney break; 878eeae05aaSDavid Daney case 0: /* No link */ 879eeae05aaSDavid Daney default: 880eeae05aaSDavid Daney break; 881eeae05aaSDavid Daney } 882eeae05aaSDavid Daney 883eeae05aaSDavid Daney /* Write the new GMX setting with the port still disabled. */ 884eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 885eeae05aaSDavid Daney 886eeae05aaSDavid Daney /* Read GMX CFG again to make sure the config is completed. */ 887eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 888eeae05aaSDavid Daney 889eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 890eeae05aaSDavid Daney union cvmx_agl_gmx_txx_clk agl_clk; 891eeae05aaSDavid Daney union cvmx_agl_prtx_ctl prtx_ctl; 892eeae05aaSDavid Daney 893eeae05aaSDavid Daney prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 894eeae05aaSDavid Daney agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); 895eeae05aaSDavid Daney /* MII (both speeds) and RGMII 1000 speed. */ 896eeae05aaSDavid Daney agl_clk.s.clk_cnt = 1; 897eeae05aaSDavid Daney if (prtx_ctl.s.mode == 0) { /* RGMII mode */ 8989e8e6e88SPhilippe Reynes if (phydev->speed == 10) 899eeae05aaSDavid Daney agl_clk.s.clk_cnt = 50; 9009e8e6e88SPhilippe Reynes else if (phydev->speed == 100) 901eeae05aaSDavid Daney agl_clk.s.clk_cnt = 5; 902eeae05aaSDavid Daney } 903eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); 904eeae05aaSDavid Daney } 905d6aa60a1SDavid Daney } 906d6aa60a1SDavid Daney 907d6aa60a1SDavid Daney static void octeon_mgmt_adjust_link(struct net_device *netdev) 908d6aa60a1SDavid Daney { 909d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 9109e8e6e88SPhilippe Reynes struct phy_device *phydev = netdev->phydev; 911d6aa60a1SDavid Daney unsigned long flags; 912d6aa60a1SDavid Daney int link_changed = 0; 913d6aa60a1SDavid Daney 9149e8e6e88SPhilippe Reynes if (!phydev) 915eeae05aaSDavid Daney return; 916eeae05aaSDavid Daney 917d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 918eeae05aaSDavid Daney 919eeae05aaSDavid Daney 9209e8e6e88SPhilippe Reynes if (!phydev->link && p->last_link) 921d6aa60a1SDavid Daney link_changed = -1; 922eeae05aaSDavid Daney 9239e8e6e88SPhilippe Reynes if (phydev->link && 9249e8e6e88SPhilippe Reynes (p->last_duplex != phydev->duplex || 9259e8e6e88SPhilippe Reynes p->last_link != phydev->link || 9269e8e6e88SPhilippe Reynes p->last_speed != phydev->speed)) { 927eeae05aaSDavid Daney octeon_mgmt_disable_link(p); 928eeae05aaSDavid Daney link_changed = 1; 929eeae05aaSDavid Daney octeon_mgmt_update_link(p); 930eeae05aaSDavid Daney octeon_mgmt_enable_link(p); 931d6aa60a1SDavid Daney } 932eeae05aaSDavid Daney 9339e8e6e88SPhilippe Reynes p->last_link = phydev->link; 9349e8e6e88SPhilippe Reynes p->last_speed = phydev->speed; 9359e8e6e88SPhilippe Reynes p->last_duplex = phydev->duplex; 936eeae05aaSDavid Daney 937d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 938d6aa60a1SDavid Daney 939d6aa60a1SDavid Daney if (link_changed != 0) { 9401769af43SSteven J. Hill if (link_changed > 0) 9411769af43SSteven J. Hill netdev_info(netdev, "Link is up - %d/%s\n", 9421769af43SSteven J. Hill phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half"); 9431769af43SSteven J. Hill else 9441769af43SSteven J. Hill netdev_info(netdev, "Link is down\n"); 945d6aa60a1SDavid Daney } 946d6aa60a1SDavid Daney } 947d6aa60a1SDavid Daney 948d6aa60a1SDavid Daney static int octeon_mgmt_init_phy(struct net_device *netdev) 949d6aa60a1SDavid Daney { 950d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 9519e8e6e88SPhilippe Reynes struct phy_device *phydev = NULL; 952d6aa60a1SDavid Daney 953368bec0dSDavid Daney if (octeon_is_simulation() || p->phy_np == NULL) { 954d6aa60a1SDavid Daney /* No PHYs in the simulator. */ 955d6aa60a1SDavid Daney netif_carrier_on(netdev); 956d6aa60a1SDavid Daney return 0; 957d6aa60a1SDavid Daney } 958d6aa60a1SDavid Daney 9599e8e6e88SPhilippe Reynes phydev = of_phy_connect(netdev, p->phy_np, 960368bec0dSDavid Daney octeon_mgmt_adjust_link, 0, 961d6aa60a1SDavid Daney PHY_INTERFACE_MODE_MII); 962d6aa60a1SDavid Daney 9639e8e6e88SPhilippe Reynes if (!phydev) 964eeae05aaSDavid Daney return -ENODEV; 965d6aa60a1SDavid Daney 966d6aa60a1SDavid Daney return 0; 967d6aa60a1SDavid Daney } 968d6aa60a1SDavid Daney 969d6aa60a1SDavid Daney static int octeon_mgmt_open(struct net_device *netdev) 970d6aa60a1SDavid Daney { 971d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 972d6aa60a1SDavid Daney union cvmx_mixx_ctl mix_ctl; 973d6aa60a1SDavid Daney union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; 974d6aa60a1SDavid Daney union cvmx_mixx_oring1 oring1; 975d6aa60a1SDavid Daney union cvmx_mixx_iring1 iring1; 976d6aa60a1SDavid Daney union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; 977d6aa60a1SDavid Daney union cvmx_mixx_irhwm mix_irhwm; 978d6aa60a1SDavid Daney union cvmx_mixx_orhwm mix_orhwm; 979d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena; 980d6aa60a1SDavid Daney struct sockaddr sa; 981d6aa60a1SDavid Daney 982d6aa60a1SDavid Daney /* Allocate ring buffers. */ 983d6aa60a1SDavid Daney p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 984d6aa60a1SDavid Daney GFP_KERNEL); 985d6aa60a1SDavid Daney if (!p->tx_ring) 986d6aa60a1SDavid Daney return -ENOMEM; 987d6aa60a1SDavid Daney p->tx_ring_handle = 988d6aa60a1SDavid Daney dma_map_single(p->dev, p->tx_ring, 989d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 990d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 991d6aa60a1SDavid Daney p->tx_next = 0; 992d6aa60a1SDavid Daney p->tx_next_clean = 0; 993d6aa60a1SDavid Daney p->tx_current_fill = 0; 994d6aa60a1SDavid Daney 995d6aa60a1SDavid Daney 996d6aa60a1SDavid Daney p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 997d6aa60a1SDavid Daney GFP_KERNEL); 998d6aa60a1SDavid Daney if (!p->rx_ring) 999d6aa60a1SDavid Daney goto err_nomem; 1000d6aa60a1SDavid Daney p->rx_ring_handle = 1001d6aa60a1SDavid Daney dma_map_single(p->dev, p->rx_ring, 1002d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 1003d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1004d6aa60a1SDavid Daney 1005d6aa60a1SDavid Daney p->rx_next = 0; 1006d6aa60a1SDavid Daney p->rx_next_fill = 0; 1007d6aa60a1SDavid Daney p->rx_current_fill = 0; 1008d6aa60a1SDavid Daney 1009d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p); 1010d6aa60a1SDavid Daney 1011368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); 1012d6aa60a1SDavid Daney 1013d6aa60a1SDavid Daney /* Bring it out of reset if needed. */ 1014d6aa60a1SDavid Daney if (mix_ctl.s.reset) { 1015d6aa60a1SDavid Daney mix_ctl.s.reset = 0; 1016368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 1017d6aa60a1SDavid Daney do { 1018368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); 1019d6aa60a1SDavid Daney } while (mix_ctl.s.reset); 1020d6aa60a1SDavid Daney } 1021d6aa60a1SDavid Daney 1022eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { 1023d6aa60a1SDavid Daney agl_gmx_inf_mode.u64 = 0; 1024d6aa60a1SDavid Daney agl_gmx_inf_mode.s.en = 1; 1025d6aa60a1SDavid Daney cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 1026eeae05aaSDavid Daney } 1027eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) 1028eeae05aaSDavid Daney || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { 1029a0ce9b1eSDavid Daney /* Force compensation values, as they are not 1030eeae05aaSDavid Daney * determined properly by HW 1031eeae05aaSDavid Daney */ 1032eeae05aaSDavid Daney union cvmx_agl_gmx_drv_ctl drv_ctl; 1033eeae05aaSDavid Daney 1034eeae05aaSDavid Daney drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); 1035eeae05aaSDavid Daney if (p->port) { 1036eeae05aaSDavid Daney drv_ctl.s.byp_en1 = 1; 1037eeae05aaSDavid Daney drv_ctl.s.nctl1 = 6; 1038eeae05aaSDavid Daney drv_ctl.s.pctl1 = 6; 1039eeae05aaSDavid Daney } else { 1040eeae05aaSDavid Daney drv_ctl.s.byp_en = 1; 1041eeae05aaSDavid Daney drv_ctl.s.nctl = 6; 1042eeae05aaSDavid Daney drv_ctl.s.pctl = 6; 1043eeae05aaSDavid Daney } 1044eeae05aaSDavid Daney cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); 1045eeae05aaSDavid Daney } 1046d6aa60a1SDavid Daney 1047d6aa60a1SDavid Daney oring1.u64 = 0; 1048d6aa60a1SDavid Daney oring1.s.obase = p->tx_ring_handle >> 3; 1049d6aa60a1SDavid Daney oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; 1050368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); 1051d6aa60a1SDavid Daney 1052d6aa60a1SDavid Daney iring1.u64 = 0; 1053d6aa60a1SDavid Daney iring1.s.ibase = p->rx_ring_handle >> 3; 1054d6aa60a1SDavid Daney iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; 1055368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); 1056d6aa60a1SDavid Daney 1057d6aa60a1SDavid Daney memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); 1058d6aa60a1SDavid Daney octeon_mgmt_set_mac_address(netdev, &sa); 1059d6aa60a1SDavid Daney 1060d6aa60a1SDavid Daney octeon_mgmt_change_mtu(netdev, netdev->mtu); 1061d6aa60a1SDavid Daney 1062a0ce9b1eSDavid Daney /* Enable the port HW. Packets are not allowed until 1063d6aa60a1SDavid Daney * cvmx_mgmt_port_enable() is called. 1064d6aa60a1SDavid Daney */ 1065d6aa60a1SDavid Daney mix_ctl.u64 = 0; 1066d6aa60a1SDavid Daney mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ 1067d6aa60a1SDavid Daney mix_ctl.s.en = 1; /* Enable the port */ 1068d6aa60a1SDavid Daney mix_ctl.s.nbtarb = 0; /* Arbitration mode */ 1069d6aa60a1SDavid Daney /* MII CB-request FIFO programmable high watermark */ 1070d6aa60a1SDavid Daney mix_ctl.s.mrq_hwm = 1; 1071eeae05aaSDavid Daney #ifdef __LITTLE_ENDIAN 1072eeae05aaSDavid Daney mix_ctl.s.lendian = 1; 1073eeae05aaSDavid Daney #endif 1074368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 1075d6aa60a1SDavid Daney 1076eeae05aaSDavid Daney /* Read the PHY to find the mode of the interface. */ 1077eeae05aaSDavid Daney if (octeon_mgmt_init_phy(netdev)) { 1078eeae05aaSDavid Daney dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); 1079eeae05aaSDavid Daney goto err_noirq; 1080d6aa60a1SDavid Daney } 1081eeae05aaSDavid Daney 1082eeae05aaSDavid Daney /* Set the mode of the interface, RGMII/MII. */ 10839e8e6e88SPhilippe Reynes if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { 1084eeae05aaSDavid Daney union cvmx_agl_prtx_ctl agl_prtx_ctl; 10853c1bcc86SAndrew Lunn int rgmii_mode = 10863c1bcc86SAndrew Lunn (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, 10873c1bcc86SAndrew Lunn netdev->phydev->supported) | 10883c1bcc86SAndrew Lunn linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, 10893c1bcc86SAndrew Lunn netdev->phydev->supported)) != 0; 1090eeae05aaSDavid Daney 1091eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1092eeae05aaSDavid Daney agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; 1093eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); 1094eeae05aaSDavid Daney 1095eeae05aaSDavid Daney /* MII clocks counts are based on the 125Mhz 1096eeae05aaSDavid Daney * reference, which has an 8nS period. So our delays 1097eeae05aaSDavid Daney * need to be multiplied by this factor. 1098eeae05aaSDavid Daney */ 1099eeae05aaSDavid Daney #define NS_PER_PHY_CLK 8 1100eeae05aaSDavid Daney 1101eeae05aaSDavid Daney /* Take the DLL and clock tree out of reset */ 1102eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1103eeae05aaSDavid Daney agl_prtx_ctl.s.clkrst = 0; 1104eeae05aaSDavid Daney if (rgmii_mode) { 1105eeae05aaSDavid Daney agl_prtx_ctl.s.dllrst = 0; 1106eeae05aaSDavid Daney agl_prtx_ctl.s.clktx_byp = 0; 1107eeae05aaSDavid Daney } 1108eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); 1109eeae05aaSDavid Daney cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ 1110eeae05aaSDavid Daney 1111eeae05aaSDavid Daney /* Wait for the DLL to lock. External 125 MHz 1112eeae05aaSDavid Daney * reference clock must be stable at this point. 1113eeae05aaSDavid Daney */ 1114eeae05aaSDavid Daney ndelay(256 * NS_PER_PHY_CLK); 1115eeae05aaSDavid Daney 1116eeae05aaSDavid Daney /* Enable the interface */ 1117eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1118eeae05aaSDavid Daney agl_prtx_ctl.s.enable = 1; 1119eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); 1120eeae05aaSDavid Daney 1121eeae05aaSDavid Daney /* Read the value back to force the previous write */ 1122eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1123eeae05aaSDavid Daney 1124eeae05aaSDavid Daney /* Enable the compensation controller */ 1125eeae05aaSDavid Daney agl_prtx_ctl.s.comp = 1; 1126eeae05aaSDavid Daney agl_prtx_ctl.s.drv_byp = 0; 1127eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); 1128eeae05aaSDavid Daney /* Force write out before wait. */ 1129eeae05aaSDavid Daney cvmx_read_csr(p->agl_prt_ctl); 1130eeae05aaSDavid Daney 1131eeae05aaSDavid Daney /* For compensation state to lock. */ 1132eeae05aaSDavid Daney ndelay(1040 * NS_PER_PHY_CLK); 1133eeae05aaSDavid Daney 1134906996d6SDavid Daney /* Default Interframe Gaps are too small. Recommended 1135906996d6SDavid Daney * workaround is. 1136906996d6SDavid Daney * 1137906996d6SDavid Daney * AGL_GMX_TX_IFG[IFG1]=14 1138906996d6SDavid Daney * AGL_GMX_TX_IFG[IFG2]=10 1139eeae05aaSDavid Daney */ 1140906996d6SDavid Daney cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); 1141d6aa60a1SDavid Daney } 1142d6aa60a1SDavid Daney 1143d6aa60a1SDavid Daney octeon_mgmt_rx_fill_ring(netdev); 1144d6aa60a1SDavid Daney 1145d6aa60a1SDavid Daney /* Clear statistics. */ 1146d6aa60a1SDavid Daney /* Clear on read. */ 1147368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); 1148368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); 1149368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); 1150d6aa60a1SDavid Daney 1151368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); 1152368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); 1153368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); 1154d6aa60a1SDavid Daney 1155d6aa60a1SDavid Daney /* Clear any pending interrupts */ 1156368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); 1157d6aa60a1SDavid Daney 1158d6aa60a1SDavid Daney if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, 1159d6aa60a1SDavid Daney netdev)) { 1160d6aa60a1SDavid Daney dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); 1161d6aa60a1SDavid Daney goto err_noirq; 1162d6aa60a1SDavid Daney } 1163d6aa60a1SDavid Daney 1164d6aa60a1SDavid Daney /* Interrupt every single RX packet */ 1165d6aa60a1SDavid Daney mix_irhwm.u64 = 0; 1166d6aa60a1SDavid Daney mix_irhwm.s.irhwm = 0; 1167368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); 1168d6aa60a1SDavid Daney 1169b635e069SDavid Daney /* Interrupt when we have 1 or more packets to clean. */ 1170d6aa60a1SDavid Daney mix_orhwm.u64 = 0; 1171eeae05aaSDavid Daney mix_orhwm.s.orhwm = 0; 1172368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); 1173d6aa60a1SDavid Daney 1174d6aa60a1SDavid Daney /* Enable receive and transmit interrupts */ 1175d6aa60a1SDavid Daney mix_intena.u64 = 0; 1176d6aa60a1SDavid Daney mix_intena.s.ithena = 1; 1177d6aa60a1SDavid Daney mix_intena.s.othena = 1; 1178368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 1179d6aa60a1SDavid Daney 1180d6aa60a1SDavid Daney /* Enable packet I/O. */ 1181d6aa60a1SDavid Daney 1182d6aa60a1SDavid Daney rxx_frm_ctl.u64 = 0; 11833d305850SChad Reese rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; 1184d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_align = 1; 1185a0ce9b1eSDavid Daney /* When set, disables the length check for non-min sized pkts 1186d6aa60a1SDavid Daney * with padding in the client data. 1187d6aa60a1SDavid Daney */ 1188d6aa60a1SDavid Daney rxx_frm_ctl.s.pad_len = 1; 1189d6aa60a1SDavid Daney /* When set, disables the length check for VLAN pkts */ 1190d6aa60a1SDavid Daney rxx_frm_ctl.s.vlan_len = 1; 1191d6aa60a1SDavid Daney /* When set, PREAMBLE checking is less strict */ 1192d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_free = 1; 1193d6aa60a1SDavid Daney /* Control Pause Frames can match station SMAC */ 1194d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_smac = 0; 1195d6aa60a1SDavid Daney /* Control Pause Frames can match globally assign Multicast address */ 1196d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_mcst = 1; 1197d6aa60a1SDavid Daney /* Forward pause information to TX block */ 1198d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_bck = 1; 1199d6aa60a1SDavid Daney /* Drop Control Pause Frames */ 1200d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_drp = 1; 1201d6aa60a1SDavid Daney /* Strip off the preamble */ 1202d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_strp = 1; 1203a0ce9b1eSDavid Daney /* This port is configured to send PREAMBLE+SFD to begin every 1204d6aa60a1SDavid Daney * frame. GMX checks that the PREAMBLE is sent correctly. 1205d6aa60a1SDavid Daney */ 1206d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_chk = 1; 1207368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 1208d6aa60a1SDavid Daney 1209eeae05aaSDavid Daney /* Configure the port duplex, speed and enables */ 1210eeae05aaSDavid Daney octeon_mgmt_disable_link(p); 12119e8e6e88SPhilippe Reynes if (netdev->phydev) 1212eeae05aaSDavid Daney octeon_mgmt_update_link(p); 1213eeae05aaSDavid Daney octeon_mgmt_enable_link(p); 1214d6aa60a1SDavid Daney 1215d6aa60a1SDavid Daney p->last_link = 0; 1216eeae05aaSDavid Daney p->last_speed = 0; 1217eeae05aaSDavid Daney /* PHY is not present in simulator. The carrier is enabled 1218eeae05aaSDavid Daney * while initializing the phy for simulator, leave it enabled. 1219eeae05aaSDavid Daney */ 12209e8e6e88SPhilippe Reynes if (netdev->phydev) { 1221d6aa60a1SDavid Daney netif_carrier_off(netdev); 12229e8e6e88SPhilippe Reynes phy_start_aneg(netdev->phydev); 1223d6aa60a1SDavid Daney } 1224d6aa60a1SDavid Daney 1225d6aa60a1SDavid Daney netif_wake_queue(netdev); 1226d6aa60a1SDavid Daney napi_enable(&p->napi); 1227d6aa60a1SDavid Daney 1228d6aa60a1SDavid Daney return 0; 1229d6aa60a1SDavid Daney err_noirq: 1230d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p); 1231d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->rx_ring_handle, 1232d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 1233d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1234d6aa60a1SDavid Daney kfree(p->rx_ring); 1235d6aa60a1SDavid Daney err_nomem: 1236d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->tx_ring_handle, 1237d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 1238d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1239d6aa60a1SDavid Daney kfree(p->tx_ring); 1240d6aa60a1SDavid Daney return -ENOMEM; 1241d6aa60a1SDavid Daney } 1242d6aa60a1SDavid Daney 1243d6aa60a1SDavid Daney static int octeon_mgmt_stop(struct net_device *netdev) 1244d6aa60a1SDavid Daney { 1245d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 1246d6aa60a1SDavid Daney 1247d6aa60a1SDavid Daney napi_disable(&p->napi); 1248d6aa60a1SDavid Daney netif_stop_queue(netdev); 1249d6aa60a1SDavid Daney 12509e8e6e88SPhilippe Reynes if (netdev->phydev) 12519e8e6e88SPhilippe Reynes phy_disconnect(netdev->phydev); 1252d6aa60a1SDavid Daney 1253d6aa60a1SDavid Daney netif_carrier_off(netdev); 1254d6aa60a1SDavid Daney 1255d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p); 1256d6aa60a1SDavid Daney 1257d6aa60a1SDavid Daney free_irq(p->irq, netdev); 1258d6aa60a1SDavid Daney 1259d6aa60a1SDavid Daney /* dma_unmap is a nop on Octeon, so just free everything. */ 1260d6aa60a1SDavid Daney skb_queue_purge(&p->tx_list); 1261d6aa60a1SDavid Daney skb_queue_purge(&p->rx_list); 1262d6aa60a1SDavid Daney 1263d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->rx_ring_handle, 1264d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 1265d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1266d6aa60a1SDavid Daney kfree(p->rx_ring); 1267d6aa60a1SDavid Daney 1268d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->tx_ring_handle, 1269d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 1270d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1271d6aa60a1SDavid Daney kfree(p->tx_ring); 1272d6aa60a1SDavid Daney 1273d6aa60a1SDavid Daney return 0; 1274d6aa60a1SDavid Daney } 1275d6aa60a1SDavid Daney 1276ac1172deSYueHaibing static netdev_tx_t 1277ac1172deSYueHaibing octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) 1278d6aa60a1SDavid Daney { 1279d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 1280d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 1281d6aa60a1SDavid Daney unsigned long flags; 1282ac1172deSYueHaibing netdev_tx_t rv = NETDEV_TX_BUSY; 1283d6aa60a1SDavid Daney 1284d6aa60a1SDavid Daney re.d64 = 0; 12853d305850SChad Reese re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); 1286d6aa60a1SDavid Daney re.s.len = skb->len; 1287d6aa60a1SDavid Daney re.s.addr = dma_map_single(p->dev, skb->data, 1288d6aa60a1SDavid Daney skb->len, 1289d6aa60a1SDavid Daney DMA_TO_DEVICE); 1290d6aa60a1SDavid Daney 1291d6aa60a1SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags); 1292d6aa60a1SDavid Daney 12934e4a4f14SDavid Daney if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { 12944e4a4f14SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 12954e4a4f14SDavid Daney netif_stop_queue(netdev); 12964e4a4f14SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags); 12974e4a4f14SDavid Daney } 12984e4a4f14SDavid Daney 1299d6aa60a1SDavid Daney if (unlikely(p->tx_current_fill >= 1300d6aa60a1SDavid Daney ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { 1301d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 1302d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr, re.s.len, 1303d6aa60a1SDavid Daney DMA_TO_DEVICE); 13044e4a4f14SDavid Daney goto out; 1305d6aa60a1SDavid Daney } 1306d6aa60a1SDavid Daney 1307d6aa60a1SDavid Daney __skb_queue_tail(&p->tx_list, skb); 1308d6aa60a1SDavid Daney 1309d6aa60a1SDavid Daney /* Put it in the ring. */ 1310d6aa60a1SDavid Daney p->tx_ring[p->tx_next] = re.d64; 1311d6aa60a1SDavid Daney p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; 1312d6aa60a1SDavid Daney p->tx_current_fill++; 1313d6aa60a1SDavid Daney 1314d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 1315d6aa60a1SDavid Daney 1316d6aa60a1SDavid Daney dma_sync_single_for_device(p->dev, p->tx_ring_handle, 1317d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 1318d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1319d6aa60a1SDavid Daney 1320d6aa60a1SDavid Daney netdev->stats.tx_packets++; 1321d6aa60a1SDavid Daney netdev->stats.tx_bytes += skb->len; 1322d6aa60a1SDavid Daney 1323d6aa60a1SDavid Daney /* Ring the bell. */ 1324368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORING2, 1); 1325d6aa60a1SDavid Daney 1326860e9538SFlorian Westphal netif_trans_update(netdev); 13274e4a4f14SDavid Daney rv = NETDEV_TX_OK; 13284e4a4f14SDavid Daney out: 1329d6aa60a1SDavid Daney octeon_mgmt_update_tx_stats(netdev); 13304e4a4f14SDavid Daney return rv; 1331d6aa60a1SDavid Daney } 1332d6aa60a1SDavid Daney 1333d6aa60a1SDavid Daney #ifdef CONFIG_NET_POLL_CONTROLLER 1334d6aa60a1SDavid Daney static void octeon_mgmt_poll_controller(struct net_device *netdev) 1335d6aa60a1SDavid Daney { 1336d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 1337d6aa60a1SDavid Daney 1338d6aa60a1SDavid Daney octeon_mgmt_receive_packets(p, 16); 1339d6aa60a1SDavid Daney octeon_mgmt_update_rx_stats(netdev); 1340d6aa60a1SDavid Daney } 1341d6aa60a1SDavid Daney #endif 1342d6aa60a1SDavid Daney 1343d6aa60a1SDavid Daney static void octeon_mgmt_get_drvinfo(struct net_device *netdev, 1344d6aa60a1SDavid Daney struct ethtool_drvinfo *info) 1345d6aa60a1SDavid Daney { 13467826d43fSJiri Pirko strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 1347d6aa60a1SDavid Daney } 1348d6aa60a1SDavid Daney 1349f21105dfSDavid Daney static int octeon_mgmt_nway_reset(struct net_device *dev) 1350f21105dfSDavid Daney { 1351f21105dfSDavid Daney if (!capable(CAP_NET_ADMIN)) 1352f21105dfSDavid Daney return -EPERM; 1353f21105dfSDavid Daney 13549e8e6e88SPhilippe Reynes if (dev->phydev) 13559e8e6e88SPhilippe Reynes return phy_start_aneg(dev->phydev); 1356f21105dfSDavid Daney 1357f21105dfSDavid Daney return -EOPNOTSUPP; 1358d6aa60a1SDavid Daney } 1359d6aa60a1SDavid Daney 1360d6aa60a1SDavid Daney static const struct ethtool_ops octeon_mgmt_ethtool_ops = { 1361d6aa60a1SDavid Daney .get_drvinfo = octeon_mgmt_get_drvinfo, 1362f21105dfSDavid Daney .nway_reset = octeon_mgmt_nway_reset, 1363f21105dfSDavid Daney .get_link = ethtool_op_get_link, 1364f4400dedSPhilippe Reynes .get_link_ksettings = phy_ethtool_get_link_ksettings, 1365f4400dedSPhilippe Reynes .set_link_ksettings = phy_ethtool_set_link_ksettings, 1366d6aa60a1SDavid Daney }; 1367d6aa60a1SDavid Daney 1368d6aa60a1SDavid Daney static const struct net_device_ops octeon_mgmt_ops = { 1369d6aa60a1SDavid Daney .ndo_open = octeon_mgmt_open, 1370d6aa60a1SDavid Daney .ndo_stop = octeon_mgmt_stop, 1371d6aa60a1SDavid Daney .ndo_start_xmit = octeon_mgmt_xmit, 1372d6aa60a1SDavid Daney .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, 1373d6aa60a1SDavid Daney .ndo_set_mac_address = octeon_mgmt_set_mac_address, 1374d6aa60a1SDavid Daney .ndo_do_ioctl = octeon_mgmt_ioctl, 1375d6aa60a1SDavid Daney .ndo_change_mtu = octeon_mgmt_change_mtu, 1376d6aa60a1SDavid Daney #ifdef CONFIG_NET_POLL_CONTROLLER 1377d6aa60a1SDavid Daney .ndo_poll_controller = octeon_mgmt_poll_controller, 1378d6aa60a1SDavid Daney #endif 1379d6aa60a1SDavid Daney }; 1380d6aa60a1SDavid Daney 13815bc7ec70SBill Pemberton static int octeon_mgmt_probe(struct platform_device *pdev) 1382d6aa60a1SDavid Daney { 1383d6aa60a1SDavid Daney struct net_device *netdev; 1384d6aa60a1SDavid Daney struct octeon_mgmt *p; 1385368bec0dSDavid Daney const __be32 *data; 1386368bec0dSDavid Daney const u8 *mac; 1387368bec0dSDavid Daney struct resource *res_mix; 1388368bec0dSDavid Daney struct resource *res_agl; 1389eeae05aaSDavid Daney struct resource *res_agl_prt_ctl; 1390368bec0dSDavid Daney int len; 1391368bec0dSDavid Daney int result; 1392d6aa60a1SDavid Daney 1393d6aa60a1SDavid Daney netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); 1394d6aa60a1SDavid Daney if (netdev == NULL) 1395d6aa60a1SDavid Daney return -ENOMEM; 1396d6aa60a1SDavid Daney 1397052958e3SDavid Daney SET_NETDEV_DEV(netdev, &pdev->dev); 1398052958e3SDavid Daney 13998513fbd8SJingoo Han platform_set_drvdata(pdev, netdev); 1400d6aa60a1SDavid Daney p = netdev_priv(netdev); 1401d6aa60a1SDavid Daney netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, 1402d6aa60a1SDavid Daney OCTEON_MGMT_NAPI_WEIGHT); 1403d6aa60a1SDavid Daney 1404d6aa60a1SDavid Daney p->netdev = netdev; 1405d6aa60a1SDavid Daney p->dev = &pdev->dev; 14063d305850SChad Reese p->has_rx_tstamp = false; 1407d6aa60a1SDavid Daney 1408368bec0dSDavid Daney data = of_get_property(pdev->dev.of_node, "cell-index", &len); 1409368bec0dSDavid Daney if (data && len == sizeof(*data)) { 1410368bec0dSDavid Daney p->port = be32_to_cpup(data); 1411368bec0dSDavid Daney } else { 1412368bec0dSDavid Daney dev_err(&pdev->dev, "no 'cell-index' property\n"); 1413368bec0dSDavid Daney result = -ENXIO; 1414368bec0dSDavid Daney goto err; 1415368bec0dSDavid Daney } 1416368bec0dSDavid Daney 1417d6aa60a1SDavid Daney snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); 1418d6aa60a1SDavid Daney 1419368bec0dSDavid Daney result = platform_get_irq(pdev, 0); 1420368bec0dSDavid Daney if (result < 0) 1421d6aa60a1SDavid Daney goto err; 1422d6aa60a1SDavid Daney 1423368bec0dSDavid Daney p->irq = result; 1424368bec0dSDavid Daney 1425368bec0dSDavid Daney res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1426368bec0dSDavid Daney if (res_mix == NULL) { 1427368bec0dSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n"); 1428368bec0dSDavid Daney result = -ENXIO; 1429368bec0dSDavid Daney goto err; 1430368bec0dSDavid Daney } 1431368bec0dSDavid Daney 1432368bec0dSDavid Daney res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1433368bec0dSDavid Daney if (res_agl == NULL) { 1434368bec0dSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n"); 1435368bec0dSDavid Daney result = -ENXIO; 1436368bec0dSDavid Daney goto err; 1437368bec0dSDavid Daney } 1438368bec0dSDavid Daney 1439eeae05aaSDavid Daney res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1440eeae05aaSDavid Daney if (res_agl_prt_ctl == NULL) { 1441eeae05aaSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n"); 1442eeae05aaSDavid Daney result = -ENXIO; 1443eeae05aaSDavid Daney goto err; 1444eeae05aaSDavid Daney } 1445eeae05aaSDavid Daney 1446368bec0dSDavid Daney p->mix_phys = res_mix->start; 1447368bec0dSDavid Daney p->mix_size = resource_size(res_mix); 1448368bec0dSDavid Daney p->agl_phys = res_agl->start; 1449368bec0dSDavid Daney p->agl_size = resource_size(res_agl); 1450eeae05aaSDavid Daney p->agl_prt_ctl_phys = res_agl_prt_ctl->start; 1451eeae05aaSDavid Daney p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); 1452368bec0dSDavid Daney 1453368bec0dSDavid Daney 1454368bec0dSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, 1455368bec0dSDavid Daney res_mix->name)) { 1456368bec0dSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n", 1457368bec0dSDavid Daney res_mix->name); 1458368bec0dSDavid Daney result = -ENXIO; 1459368bec0dSDavid Daney goto err; 1460368bec0dSDavid Daney } 1461368bec0dSDavid Daney 1462368bec0dSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, 1463368bec0dSDavid Daney res_agl->name)) { 1464368bec0dSDavid Daney result = -ENXIO; 1465368bec0dSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n", 1466368bec0dSDavid Daney res_agl->name); 1467368bec0dSDavid Daney goto err; 1468368bec0dSDavid Daney } 1469368bec0dSDavid Daney 1470eeae05aaSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, 1471eeae05aaSDavid Daney p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { 1472eeae05aaSDavid Daney result = -ENXIO; 1473eeae05aaSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n", 1474eeae05aaSDavid Daney res_agl_prt_ctl->name); 1475eeae05aaSDavid Daney goto err; 1476eeae05aaSDavid Daney } 1477368bec0dSDavid Daney 1478368bec0dSDavid Daney p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); 1479368bec0dSDavid Daney p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); 1480eeae05aaSDavid Daney p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, 1481eeae05aaSDavid Daney p->agl_prt_ctl_size); 1482162809dfSArvind Yadav if (!p->mix || !p->agl || !p->agl_prt_ctl) { 1483162809dfSArvind Yadav dev_err(&pdev->dev, "failed to map I/O memory\n"); 1484162809dfSArvind Yadav result = -ENOMEM; 1485162809dfSArvind Yadav goto err; 1486162809dfSArvind Yadav } 1487162809dfSArvind Yadav 1488d6aa60a1SDavid Daney spin_lock_init(&p->lock); 1489d6aa60a1SDavid Daney 1490d6aa60a1SDavid Daney skb_queue_head_init(&p->tx_list); 1491d6aa60a1SDavid Daney skb_queue_head_init(&p->rx_list); 1492d6aa60a1SDavid Daney tasklet_init(&p->tx_clean_tasklet, 1493d6aa60a1SDavid Daney octeon_mgmt_clean_tx_tasklet, (unsigned long)p); 1494d6aa60a1SDavid Daney 149501789349SJiri Pirko netdev->priv_flags |= IFF_UNICAST_FLT; 149601789349SJiri Pirko 1497d6aa60a1SDavid Daney netdev->netdev_ops = &octeon_mgmt_ops; 1498d6aa60a1SDavid Daney netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; 1499d6aa60a1SDavid Daney 1500109cc165SJarod Wilson netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; 1501e4dd5608SAlexander Sverdlin netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN; 1502109cc165SJarod Wilson 1503368bec0dSDavid Daney mac = of_get_mac_address(pdev->dev.of_node); 1504d6aa60a1SDavid Daney 1505a51645f7SPetr Štetiar if (!IS_ERR(mac)) 15062d2924afSPetr Štetiar ether_addr_copy(netdev->dev_addr, mac); 150715c6ff3bSJiri Pirko else 1508f321238bSDavid Daney eth_hw_addr_random(netdev); 1509d6aa60a1SDavid Daney 1510368bec0dSDavid Daney p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1511368bec0dSDavid Daney 151226741a69SRussell King result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 151326741a69SRussell King if (result) 151426741a69SRussell King goto err; 1515368bec0dSDavid Daney 1516eeae05aaSDavid Daney netif_carrier_off(netdev); 1517368bec0dSDavid Daney result = register_netdev(netdev); 1518368bec0dSDavid Daney if (result) 1519d6aa60a1SDavid Daney goto err; 1520d6aa60a1SDavid Daney 1521d6aa60a1SDavid Daney return 0; 1522368bec0dSDavid Daney 1523d6aa60a1SDavid Daney err: 152446997066SPeter Chen of_node_put(p->phy_np); 1525d6aa60a1SDavid Daney free_netdev(netdev); 1526368bec0dSDavid Daney return result; 1527d6aa60a1SDavid Daney } 1528d6aa60a1SDavid Daney 15295bc7ec70SBill Pemberton static int octeon_mgmt_remove(struct platform_device *pdev) 1530d6aa60a1SDavid Daney { 15318513fbd8SJingoo Han struct net_device *netdev = platform_get_drvdata(pdev); 153246997066SPeter Chen struct octeon_mgmt *p = netdev_priv(netdev); 1533d6aa60a1SDavid Daney 1534d6aa60a1SDavid Daney unregister_netdev(netdev); 153546997066SPeter Chen of_node_put(p->phy_np); 1536d6aa60a1SDavid Daney free_netdev(netdev); 1537d6aa60a1SDavid Daney return 0; 1538d6aa60a1SDavid Daney } 1539d6aa60a1SDavid Daney 1540437dab40SFabian Frederick static const struct of_device_id octeon_mgmt_match[] = { 1541368bec0dSDavid Daney { 1542368bec0dSDavid Daney .compatible = "cavium,octeon-5750-mix", 1543368bec0dSDavid Daney }, 1544368bec0dSDavid Daney {}, 1545368bec0dSDavid Daney }; 1546368bec0dSDavid Daney MODULE_DEVICE_TABLE(of, octeon_mgmt_match); 1547368bec0dSDavid Daney 1548d6aa60a1SDavid Daney static struct platform_driver octeon_mgmt_driver = { 1549d6aa60a1SDavid Daney .driver = { 1550d6aa60a1SDavid Daney .name = "octeon_mgmt", 1551368bec0dSDavid Daney .of_match_table = octeon_mgmt_match, 1552d6aa60a1SDavid Daney }, 1553d6aa60a1SDavid Daney .probe = octeon_mgmt_probe, 15545bc7ec70SBill Pemberton .remove = octeon_mgmt_remove, 1555d6aa60a1SDavid Daney }; 1556d6aa60a1SDavid Daney 1557d6aa60a1SDavid Daney extern void octeon_mdiobus_force_mod_depencency(void); 1558d6aa60a1SDavid Daney 1559d6aa60a1SDavid Daney static int __init octeon_mgmt_mod_init(void) 1560d6aa60a1SDavid Daney { 1561d6aa60a1SDavid Daney /* Force our mdiobus driver module to be loaded first. */ 1562d6aa60a1SDavid Daney octeon_mdiobus_force_mod_depencency(); 1563d6aa60a1SDavid Daney return platform_driver_register(&octeon_mgmt_driver); 1564d6aa60a1SDavid Daney } 1565d6aa60a1SDavid Daney 1566d6aa60a1SDavid Daney static void __exit octeon_mgmt_mod_exit(void) 1567d6aa60a1SDavid Daney { 1568d6aa60a1SDavid Daney platform_driver_unregister(&octeon_mgmt_driver); 1569d6aa60a1SDavid Daney } 1570d6aa60a1SDavid Daney 1571d6aa60a1SDavid Daney module_init(octeon_mgmt_mod_init); 1572d6aa60a1SDavid Daney module_exit(octeon_mgmt_mod_exit); 1573d6aa60a1SDavid Daney 1574d6aa60a1SDavid Daney MODULE_DESCRIPTION(DRV_DESCRIPTION); 1575d6aa60a1SDavid Daney MODULE_AUTHOR("David Daney"); 1576d6aa60a1SDavid Daney MODULE_LICENSE("GPL"); 1577