1d6aa60a1SDavid Daney /* 2d6aa60a1SDavid Daney * This file is subject to the terms and conditions of the GNU General Public 3d6aa60a1SDavid Daney * License. See the file "COPYING" in the main directory of this archive 4d6aa60a1SDavid Daney * for more details. 5d6aa60a1SDavid Daney * 6eeae05aaSDavid Daney * Copyright (C) 2009-2012 Cavium, Inc 7d6aa60a1SDavid Daney */ 8d6aa60a1SDavid Daney 9d6aa60a1SDavid Daney #include <linux/platform_device.h> 10368bec0dSDavid Daney #include <linux/dma-mapping.h> 11d6aa60a1SDavid Daney #include <linux/etherdevice.h> 12368bec0dSDavid Daney #include <linux/capability.h> 133d305850SChad Reese #include <linux/net_tstamp.h> 14368bec0dSDavid Daney #include <linux/interrupt.h> 15368bec0dSDavid Daney #include <linux/netdevice.h> 16368bec0dSDavid Daney #include <linux/spinlock.h> 17d6aa60a1SDavid Daney #include <linux/if_vlan.h> 18368bec0dSDavid Daney #include <linux/of_mdio.h> 19368bec0dSDavid Daney #include <linux/module.h> 20368bec0dSDavid Daney #include <linux/of_net.h> 21368bec0dSDavid Daney #include <linux/init.h> 225a0e3ad6STejun Heo #include <linux/slab.h> 23d6aa60a1SDavid Daney #include <linux/phy.h> 24368bec0dSDavid Daney #include <linux/io.h> 25d6aa60a1SDavid Daney 26d6aa60a1SDavid Daney #include <asm/octeon/octeon.h> 27d6aa60a1SDavid Daney #include <asm/octeon/cvmx-mixx-defs.h> 28d6aa60a1SDavid Daney #include <asm/octeon/cvmx-agl-defs.h> 29d6aa60a1SDavid Daney 30d6aa60a1SDavid Daney #define DRV_NAME "octeon_mgmt" 31d6aa60a1SDavid Daney #define DRV_VERSION "2.0" 32d6aa60a1SDavid Daney #define DRV_DESCRIPTION \ 33d6aa60a1SDavid Daney "Cavium Networks Octeon MII (management) port Network Driver" 34d6aa60a1SDavid Daney 35d6aa60a1SDavid Daney #define OCTEON_MGMT_NAPI_WEIGHT 16 36d6aa60a1SDavid Daney 37a0ce9b1eSDavid Daney /* Ring sizes that are powers of two allow for more efficient modulo 38d6aa60a1SDavid Daney * opertions. 39d6aa60a1SDavid Daney */ 40d6aa60a1SDavid Daney #define OCTEON_MGMT_RX_RING_SIZE 512 41d6aa60a1SDavid Daney #define OCTEON_MGMT_TX_RING_SIZE 128 42d6aa60a1SDavid Daney 43d6aa60a1SDavid Daney /* Allow 8 bytes for vlan and FCS. */ 44d6aa60a1SDavid Daney #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) 45d6aa60a1SDavid Daney 46d6aa60a1SDavid Daney union mgmt_port_ring_entry { 47d6aa60a1SDavid Daney u64 d64; 48d6aa60a1SDavid Daney struct { 493ac19c90SDavid Daney #define RING_ENTRY_CODE_DONE 0xf 503ac19c90SDavid Daney #define RING_ENTRY_CODE_MORE 0x10 513ac19c90SDavid Daney #ifdef __BIG_ENDIAN_BITFIELD 52d6aa60a1SDavid Daney u64 reserved_62_63:2; 53d6aa60a1SDavid Daney /* Length of the buffer/packet in bytes */ 54d6aa60a1SDavid Daney u64 len:14; 55d6aa60a1SDavid Daney /* For TX, signals that the packet should be timestamped */ 56d6aa60a1SDavid Daney u64 tstamp:1; 57d6aa60a1SDavid Daney /* The RX error code */ 58d6aa60a1SDavid Daney u64 code:7; 59d6aa60a1SDavid Daney /* Physical address of the buffer */ 60d6aa60a1SDavid Daney u64 addr:40; 613ac19c90SDavid Daney #else 623ac19c90SDavid Daney u64 addr:40; 633ac19c90SDavid Daney u64 code:7; 643ac19c90SDavid Daney u64 tstamp:1; 653ac19c90SDavid Daney u64 len:14; 663ac19c90SDavid Daney u64 reserved_62_63:2; 673ac19c90SDavid Daney #endif 68d6aa60a1SDavid Daney } s; 69d6aa60a1SDavid Daney }; 70d6aa60a1SDavid Daney 71368bec0dSDavid Daney #define MIX_ORING1 0x0 72368bec0dSDavid Daney #define MIX_ORING2 0x8 73368bec0dSDavid Daney #define MIX_IRING1 0x10 74368bec0dSDavid Daney #define MIX_IRING2 0x18 75368bec0dSDavid Daney #define MIX_CTL 0x20 76368bec0dSDavid Daney #define MIX_IRHWM 0x28 77368bec0dSDavid Daney #define MIX_IRCNT 0x30 78368bec0dSDavid Daney #define MIX_ORHWM 0x38 79368bec0dSDavid Daney #define MIX_ORCNT 0x40 80368bec0dSDavid Daney #define MIX_ISR 0x48 81368bec0dSDavid Daney #define MIX_INTENA 0x50 82368bec0dSDavid Daney #define MIX_REMCNT 0x58 83368bec0dSDavid Daney #define MIX_BIST 0x78 84368bec0dSDavid Daney 85368bec0dSDavid Daney #define AGL_GMX_PRT_CFG 0x10 86368bec0dSDavid Daney #define AGL_GMX_RX_FRM_CTL 0x18 87368bec0dSDavid Daney #define AGL_GMX_RX_FRM_MAX 0x30 88368bec0dSDavid Daney #define AGL_GMX_RX_JABBER 0x38 89368bec0dSDavid Daney #define AGL_GMX_RX_STATS_CTL 0x50 90368bec0dSDavid Daney 91368bec0dSDavid Daney #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 92368bec0dSDavid Daney #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 93368bec0dSDavid Daney #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 94368bec0dSDavid Daney 95368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CTL 0x100 96368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM_EN 0x108 97368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM0 0x180 98368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM1 0x188 99368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM2 0x190 100368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM3 0x198 101368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM4 0x1a0 102368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM5 0x1a8 103368bec0dSDavid Daney 104eeae05aaSDavid Daney #define AGL_GMX_TX_CLK 0x208 105368bec0dSDavid Daney #define AGL_GMX_TX_STATS_CTL 0x268 106368bec0dSDavid Daney #define AGL_GMX_TX_CTL 0x270 107368bec0dSDavid Daney #define AGL_GMX_TX_STAT0 0x280 108368bec0dSDavid Daney #define AGL_GMX_TX_STAT1 0x288 109368bec0dSDavid Daney #define AGL_GMX_TX_STAT2 0x290 110368bec0dSDavid Daney #define AGL_GMX_TX_STAT3 0x298 111368bec0dSDavid Daney #define AGL_GMX_TX_STAT4 0x2a0 112368bec0dSDavid Daney #define AGL_GMX_TX_STAT5 0x2a8 113368bec0dSDavid Daney #define AGL_GMX_TX_STAT6 0x2b0 114368bec0dSDavid Daney #define AGL_GMX_TX_STAT7 0x2b8 115368bec0dSDavid Daney #define AGL_GMX_TX_STAT8 0x2c0 116368bec0dSDavid Daney #define AGL_GMX_TX_STAT9 0x2c8 117368bec0dSDavid Daney 118d6aa60a1SDavid Daney struct octeon_mgmt { 119d6aa60a1SDavid Daney struct net_device *netdev; 120368bec0dSDavid Daney u64 mix; 121368bec0dSDavid Daney u64 agl; 122eeae05aaSDavid Daney u64 agl_prt_ctl; 123d6aa60a1SDavid Daney int port; 124d6aa60a1SDavid Daney int irq; 1253d305850SChad Reese bool has_rx_tstamp; 126d6aa60a1SDavid Daney u64 *tx_ring; 127d6aa60a1SDavid Daney dma_addr_t tx_ring_handle; 128d6aa60a1SDavid Daney unsigned int tx_next; 129d6aa60a1SDavid Daney unsigned int tx_next_clean; 130d6aa60a1SDavid Daney unsigned int tx_current_fill; 131d6aa60a1SDavid Daney /* The tx_list lock also protects the ring related variables */ 132d6aa60a1SDavid Daney struct sk_buff_head tx_list; 133d6aa60a1SDavid Daney 134d6aa60a1SDavid Daney /* RX variables only touched in napi_poll. No locking necessary. */ 135d6aa60a1SDavid Daney u64 *rx_ring; 136d6aa60a1SDavid Daney dma_addr_t rx_ring_handle; 137d6aa60a1SDavid Daney unsigned int rx_next; 138d6aa60a1SDavid Daney unsigned int rx_next_fill; 139d6aa60a1SDavid Daney unsigned int rx_current_fill; 140d6aa60a1SDavid Daney struct sk_buff_head rx_list; 141d6aa60a1SDavid Daney 142d6aa60a1SDavid Daney spinlock_t lock; 143d6aa60a1SDavid Daney unsigned int last_duplex; 144d6aa60a1SDavid Daney unsigned int last_link; 145eeae05aaSDavid Daney unsigned int last_speed; 146d6aa60a1SDavid Daney struct device *dev; 147d6aa60a1SDavid Daney struct napi_struct napi; 148d6aa60a1SDavid Daney struct tasklet_struct tx_clean_tasklet; 149368bec0dSDavid Daney struct device_node *phy_np; 150368bec0dSDavid Daney resource_size_t mix_phys; 151368bec0dSDavid Daney resource_size_t mix_size; 152368bec0dSDavid Daney resource_size_t agl_phys; 153368bec0dSDavid Daney resource_size_t agl_size; 154eeae05aaSDavid Daney resource_size_t agl_prt_ctl_phys; 155eeae05aaSDavid Daney resource_size_t agl_prt_ctl_size; 156d6aa60a1SDavid Daney }; 157d6aa60a1SDavid Daney 158d6aa60a1SDavid Daney static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) 159d6aa60a1SDavid Daney { 160d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena; 161d6aa60a1SDavid Daney unsigned long flags; 162d6aa60a1SDavid Daney 163d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 164368bec0dSDavid Daney mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); 165d6aa60a1SDavid Daney mix_intena.s.ithena = enable ? 1 : 0; 166368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 167d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 168d6aa60a1SDavid Daney } 169d6aa60a1SDavid Daney 170d6aa60a1SDavid Daney static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) 171d6aa60a1SDavid Daney { 172d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena; 173d6aa60a1SDavid Daney unsigned long flags; 174d6aa60a1SDavid Daney 175d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 176368bec0dSDavid Daney mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); 177d6aa60a1SDavid Daney mix_intena.s.othena = enable ? 1 : 0; 178368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 179d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 180d6aa60a1SDavid Daney } 181d6aa60a1SDavid Daney 182e96f7515SDavid Daney static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) 183d6aa60a1SDavid Daney { 184d6aa60a1SDavid Daney octeon_mgmt_set_rx_irq(p, 1); 185d6aa60a1SDavid Daney } 186d6aa60a1SDavid Daney 187e96f7515SDavid Daney static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) 188d6aa60a1SDavid Daney { 189d6aa60a1SDavid Daney octeon_mgmt_set_rx_irq(p, 0); 190d6aa60a1SDavid Daney } 191d6aa60a1SDavid Daney 192e96f7515SDavid Daney static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) 193d6aa60a1SDavid Daney { 194d6aa60a1SDavid Daney octeon_mgmt_set_tx_irq(p, 1); 195d6aa60a1SDavid Daney } 196d6aa60a1SDavid Daney 197e96f7515SDavid Daney static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) 198d6aa60a1SDavid Daney { 199d6aa60a1SDavid Daney octeon_mgmt_set_tx_irq(p, 0); 200d6aa60a1SDavid Daney } 201d6aa60a1SDavid Daney 202d6aa60a1SDavid Daney static unsigned int ring_max_fill(unsigned int ring_size) 203d6aa60a1SDavid Daney { 204d6aa60a1SDavid Daney return ring_size - 8; 205d6aa60a1SDavid Daney } 206d6aa60a1SDavid Daney 207d6aa60a1SDavid Daney static unsigned int ring_size_to_bytes(unsigned int ring_size) 208d6aa60a1SDavid Daney { 209d6aa60a1SDavid Daney return ring_size * sizeof(union mgmt_port_ring_entry); 210d6aa60a1SDavid Daney } 211d6aa60a1SDavid Daney 212d6aa60a1SDavid Daney static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) 213d6aa60a1SDavid Daney { 214d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 215d6aa60a1SDavid Daney 216d6aa60a1SDavid Daney while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { 217d6aa60a1SDavid Daney unsigned int size; 218d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 219d6aa60a1SDavid Daney struct sk_buff *skb; 220d6aa60a1SDavid Daney 221d6aa60a1SDavid Daney /* CN56XX pass 1 needs 8 bytes of padding. */ 222d6aa60a1SDavid Daney size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; 223d6aa60a1SDavid Daney 224d6aa60a1SDavid Daney skb = netdev_alloc_skb(netdev, size); 225d6aa60a1SDavid Daney if (!skb) 226d6aa60a1SDavid Daney break; 227d6aa60a1SDavid Daney skb_reserve(skb, NET_IP_ALIGN); 228d6aa60a1SDavid Daney __skb_queue_tail(&p->rx_list, skb); 229d6aa60a1SDavid Daney 230d6aa60a1SDavid Daney re.d64 = 0; 231d6aa60a1SDavid Daney re.s.len = size; 232d6aa60a1SDavid Daney re.s.addr = dma_map_single(p->dev, skb->data, 233d6aa60a1SDavid Daney size, 234d6aa60a1SDavid Daney DMA_FROM_DEVICE); 235d6aa60a1SDavid Daney 236d6aa60a1SDavid Daney /* Put it in the ring. */ 237d6aa60a1SDavid Daney p->rx_ring[p->rx_next_fill] = re.d64; 238d6aa60a1SDavid Daney dma_sync_single_for_device(p->dev, p->rx_ring_handle, 239d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 240d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 241d6aa60a1SDavid Daney p->rx_next_fill = 242d6aa60a1SDavid Daney (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; 243d6aa60a1SDavid Daney p->rx_current_fill++; 244d6aa60a1SDavid Daney /* Ring the bell. */ 245368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRING2, 1); 246d6aa60a1SDavid Daney } 247d6aa60a1SDavid Daney } 248d6aa60a1SDavid Daney 249d6aa60a1SDavid Daney static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) 250d6aa60a1SDavid Daney { 251d6aa60a1SDavid Daney union cvmx_mixx_orcnt mix_orcnt; 252d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 253d6aa60a1SDavid Daney struct sk_buff *skb; 254d6aa60a1SDavid Daney int cleaned = 0; 255d6aa60a1SDavid Daney unsigned long flags; 256d6aa60a1SDavid Daney 257368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); 258d6aa60a1SDavid Daney while (mix_orcnt.s.orcnt) { 2594d30b801SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags); 2604d30b801SDavid Daney 261368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); 2624d30b801SDavid Daney 2634d30b801SDavid Daney if (mix_orcnt.s.orcnt == 0) { 2644d30b801SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 2654d30b801SDavid Daney break; 2664d30b801SDavid Daney } 2674d30b801SDavid Daney 268d6aa60a1SDavid Daney dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, 269d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 270d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 271d6aa60a1SDavid Daney 272d6aa60a1SDavid Daney re.d64 = p->tx_ring[p->tx_next_clean]; 273d6aa60a1SDavid Daney p->tx_next_clean = 274d6aa60a1SDavid Daney (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; 275d6aa60a1SDavid Daney skb = __skb_dequeue(&p->tx_list); 276d6aa60a1SDavid Daney 277d6aa60a1SDavid Daney mix_orcnt.u64 = 0; 278d6aa60a1SDavid Daney mix_orcnt.s.orcnt = 1; 279d6aa60a1SDavid Daney 280d6aa60a1SDavid Daney /* Acknowledge to hardware that we have the buffer. */ 281368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); 282d6aa60a1SDavid Daney p->tx_current_fill--; 283d6aa60a1SDavid Daney 284d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 285d6aa60a1SDavid Daney 286d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr, re.s.len, 287d6aa60a1SDavid Daney DMA_TO_DEVICE); 2883d305850SChad Reese 2893d305850SChad Reese /* Read the hardware TX timestamp if one was recorded */ 2903d305850SChad Reese if (unlikely(re.s.tstamp)) { 2913d305850SChad Reese struct skb_shared_hwtstamps ts; 292208f7ca4SAaro Koskinen u64 ns; 293208f7ca4SAaro Koskinen 294c6d5fefaSWillem de Bruijn memset(&ts, 0, sizeof(ts)); 2953d305850SChad Reese /* Read the timestamp */ 296208f7ca4SAaro Koskinen ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); 2973d305850SChad Reese /* Remove the timestamp from the FIFO */ 2983d305850SChad Reese cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); 2993d305850SChad Reese /* Tell the kernel about the timestamp */ 3003d305850SChad Reese ts.hwtstamp = ns_to_ktime(ns); 3013d305850SChad Reese skb_tstamp_tx(skb, &ts); 3023d305850SChad Reese } 3033d305850SChad Reese 304d6aa60a1SDavid Daney dev_kfree_skb_any(skb); 305d6aa60a1SDavid Daney cleaned++; 306d6aa60a1SDavid Daney 307368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); 308d6aa60a1SDavid Daney } 309d6aa60a1SDavid Daney 310d6aa60a1SDavid Daney if (cleaned && netif_queue_stopped(p->netdev)) 311d6aa60a1SDavid Daney netif_wake_queue(p->netdev); 312d6aa60a1SDavid Daney } 313d6aa60a1SDavid Daney 314d6aa60a1SDavid Daney static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) 315d6aa60a1SDavid Daney { 316d6aa60a1SDavid Daney struct octeon_mgmt *p = (struct octeon_mgmt *)arg; 317d6aa60a1SDavid Daney octeon_mgmt_clean_tx_buffers(p); 318d6aa60a1SDavid Daney octeon_mgmt_enable_tx_irq(p); 319d6aa60a1SDavid Daney } 320d6aa60a1SDavid Daney 321d6aa60a1SDavid Daney static void octeon_mgmt_update_rx_stats(struct net_device *netdev) 322d6aa60a1SDavid Daney { 323d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 324d6aa60a1SDavid Daney unsigned long flags; 325d6aa60a1SDavid Daney u64 drop, bad; 326d6aa60a1SDavid Daney 327d6aa60a1SDavid Daney /* These reads also clear the count registers. */ 328368bec0dSDavid Daney drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); 329368bec0dSDavid Daney bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); 330d6aa60a1SDavid Daney 331d6aa60a1SDavid Daney if (drop || bad) { 332d6aa60a1SDavid Daney /* Do an atomic update. */ 333d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 334d6aa60a1SDavid Daney netdev->stats.rx_errors += bad; 335d6aa60a1SDavid Daney netdev->stats.rx_dropped += drop; 336d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 337d6aa60a1SDavid Daney } 338d6aa60a1SDavid Daney } 339d6aa60a1SDavid Daney 340d6aa60a1SDavid Daney static void octeon_mgmt_update_tx_stats(struct net_device *netdev) 341d6aa60a1SDavid Daney { 342d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 343d6aa60a1SDavid Daney unsigned long flags; 344d6aa60a1SDavid Daney 345d6aa60a1SDavid Daney union cvmx_agl_gmx_txx_stat0 s0; 346d6aa60a1SDavid Daney union cvmx_agl_gmx_txx_stat1 s1; 347d6aa60a1SDavid Daney 348d6aa60a1SDavid Daney /* These reads also clear the count registers. */ 349368bec0dSDavid Daney s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); 350368bec0dSDavid Daney s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); 351d6aa60a1SDavid Daney 352d6aa60a1SDavid Daney if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { 353d6aa60a1SDavid Daney /* Do an atomic update. */ 354d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 355d6aa60a1SDavid Daney netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; 356d6aa60a1SDavid Daney netdev->stats.collisions += s1.s.scol + s1.s.mcol; 357d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 358d6aa60a1SDavid Daney } 359d6aa60a1SDavid Daney } 360d6aa60a1SDavid Daney 361d6aa60a1SDavid Daney /* 362d6aa60a1SDavid Daney * Dequeue a receive skb and its corresponding ring entry. The ring 363d6aa60a1SDavid Daney * entry is returned, *pskb is updated to point to the skb. 364d6aa60a1SDavid Daney */ 365d6aa60a1SDavid Daney static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, 366d6aa60a1SDavid Daney struct sk_buff **pskb) 367d6aa60a1SDavid Daney { 368d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 369d6aa60a1SDavid Daney 370d6aa60a1SDavid Daney dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, 371d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 372d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 373d6aa60a1SDavid Daney 374d6aa60a1SDavid Daney re.d64 = p->rx_ring[p->rx_next]; 375d6aa60a1SDavid Daney p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; 376d6aa60a1SDavid Daney p->rx_current_fill--; 377d6aa60a1SDavid Daney *pskb = __skb_dequeue(&p->rx_list); 378d6aa60a1SDavid Daney 379d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr, 380d6aa60a1SDavid Daney ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, 381d6aa60a1SDavid Daney DMA_FROM_DEVICE); 382d6aa60a1SDavid Daney 383d6aa60a1SDavid Daney return re.d64; 384d6aa60a1SDavid Daney } 385d6aa60a1SDavid Daney 386d6aa60a1SDavid Daney 387d6aa60a1SDavid Daney static int octeon_mgmt_receive_one(struct octeon_mgmt *p) 388d6aa60a1SDavid Daney { 389d6aa60a1SDavid Daney struct net_device *netdev = p->netdev; 390d6aa60a1SDavid Daney union cvmx_mixx_ircnt mix_ircnt; 391d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 392d6aa60a1SDavid Daney struct sk_buff *skb; 393d6aa60a1SDavid Daney struct sk_buff *skb2; 394d6aa60a1SDavid Daney struct sk_buff *skb_new; 395d6aa60a1SDavid Daney union mgmt_port_ring_entry re2; 396d6aa60a1SDavid Daney int rc = 1; 397d6aa60a1SDavid Daney 398d6aa60a1SDavid Daney 399d6aa60a1SDavid Daney re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); 400d6aa60a1SDavid Daney if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { 401d6aa60a1SDavid Daney /* A good packet, send it up. */ 402d6aa60a1SDavid Daney skb_put(skb, re.s.len); 403d6aa60a1SDavid Daney good: 4043d305850SChad Reese /* Process the RX timestamp if it was recorded */ 4053d305850SChad Reese if (p->has_rx_tstamp) { 4063d305850SChad Reese /* The first 8 bytes are the timestamp */ 4073d305850SChad Reese u64 ns = *(u64 *)skb->data; 4083d305850SChad Reese struct skb_shared_hwtstamps *ts; 4093d305850SChad Reese ts = skb_hwtstamps(skb); 4103d305850SChad Reese ts->hwtstamp = ns_to_ktime(ns); 4113d305850SChad Reese __skb_pull(skb, 8); 4123d305850SChad Reese } 413d6aa60a1SDavid Daney skb->protocol = eth_type_trans(skb, netdev); 414d6aa60a1SDavid Daney netdev->stats.rx_packets++; 415d6aa60a1SDavid Daney netdev->stats.rx_bytes += skb->len; 416d6aa60a1SDavid Daney netif_receive_skb(skb); 417d6aa60a1SDavid Daney rc = 0; 418d6aa60a1SDavid Daney } else if (re.s.code == RING_ENTRY_CODE_MORE) { 419a0ce9b1eSDavid Daney /* Packet split across skbs. This can happen if we 420d6aa60a1SDavid Daney * increase the MTU. Buffers that are already in the 421d6aa60a1SDavid Daney * rx ring can then end up being too small. As the rx 422d6aa60a1SDavid Daney * ring is refilled, buffers sized for the new MTU 423d6aa60a1SDavid Daney * will be used and we should go back to the normal 424d6aa60a1SDavid Daney * non-split case. 425d6aa60a1SDavid Daney */ 426d6aa60a1SDavid Daney skb_put(skb, re.s.len); 427d6aa60a1SDavid Daney do { 428d6aa60a1SDavid Daney re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); 429d6aa60a1SDavid Daney if (re2.s.code != RING_ENTRY_CODE_MORE 430d6aa60a1SDavid Daney && re2.s.code != RING_ENTRY_CODE_DONE) 431d6aa60a1SDavid Daney goto split_error; 432d6aa60a1SDavid Daney skb_put(skb2, re2.s.len); 433d6aa60a1SDavid Daney skb_new = skb_copy_expand(skb, 0, skb2->len, 434d6aa60a1SDavid Daney GFP_ATOMIC); 435d6aa60a1SDavid Daney if (!skb_new) 436d6aa60a1SDavid Daney goto split_error; 437d6aa60a1SDavid Daney if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), 438d6aa60a1SDavid Daney skb2->len)) 439d6aa60a1SDavid Daney goto split_error; 440d6aa60a1SDavid Daney skb_put(skb_new, skb2->len); 441d6aa60a1SDavid Daney dev_kfree_skb_any(skb); 442d6aa60a1SDavid Daney dev_kfree_skb_any(skb2); 443d6aa60a1SDavid Daney skb = skb_new; 444d6aa60a1SDavid Daney } while (re2.s.code == RING_ENTRY_CODE_MORE); 445d6aa60a1SDavid Daney goto good; 446d6aa60a1SDavid Daney } else { 447d6aa60a1SDavid Daney /* Some other error, discard it. */ 448d6aa60a1SDavid Daney dev_kfree_skb_any(skb); 449a0ce9b1eSDavid Daney /* Error statistics are accumulated in 450d6aa60a1SDavid Daney * octeon_mgmt_update_rx_stats. 451d6aa60a1SDavid Daney */ 452d6aa60a1SDavid Daney } 453d6aa60a1SDavid Daney goto done; 454d6aa60a1SDavid Daney split_error: 455d6aa60a1SDavid Daney /* Discard the whole mess. */ 456d6aa60a1SDavid Daney dev_kfree_skb_any(skb); 457d6aa60a1SDavid Daney dev_kfree_skb_any(skb2); 458d6aa60a1SDavid Daney while (re2.s.code == RING_ENTRY_CODE_MORE) { 459d6aa60a1SDavid Daney re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); 460d6aa60a1SDavid Daney dev_kfree_skb_any(skb2); 461d6aa60a1SDavid Daney } 462d6aa60a1SDavid Daney netdev->stats.rx_errors++; 463d6aa60a1SDavid Daney 464d6aa60a1SDavid Daney done: 465d6aa60a1SDavid Daney /* Tell the hardware we processed a packet. */ 466d6aa60a1SDavid Daney mix_ircnt.u64 = 0; 467d6aa60a1SDavid Daney mix_ircnt.s.ircnt = 1; 468368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); 469d6aa60a1SDavid Daney return rc; 470d6aa60a1SDavid Daney } 471d6aa60a1SDavid Daney 472d6aa60a1SDavid Daney static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) 473d6aa60a1SDavid Daney { 474d6aa60a1SDavid Daney unsigned int work_done = 0; 475d6aa60a1SDavid Daney union cvmx_mixx_ircnt mix_ircnt; 476d6aa60a1SDavid Daney int rc; 477d6aa60a1SDavid Daney 478368bec0dSDavid Daney mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); 479d6aa60a1SDavid Daney while (work_done < budget && mix_ircnt.s.ircnt) { 480d6aa60a1SDavid Daney 481d6aa60a1SDavid Daney rc = octeon_mgmt_receive_one(p); 482d6aa60a1SDavid Daney if (!rc) 483d6aa60a1SDavid Daney work_done++; 484d6aa60a1SDavid Daney 485d6aa60a1SDavid Daney /* Check for more packets. */ 486368bec0dSDavid Daney mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); 487d6aa60a1SDavid Daney } 488d6aa60a1SDavid Daney 489d6aa60a1SDavid Daney octeon_mgmt_rx_fill_ring(p->netdev); 490d6aa60a1SDavid Daney 491d6aa60a1SDavid Daney return work_done; 492d6aa60a1SDavid Daney } 493d6aa60a1SDavid Daney 494d6aa60a1SDavid Daney static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) 495d6aa60a1SDavid Daney { 496d6aa60a1SDavid Daney struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); 497d6aa60a1SDavid Daney struct net_device *netdev = p->netdev; 498d6aa60a1SDavid Daney unsigned int work_done = 0; 499d6aa60a1SDavid Daney 500d6aa60a1SDavid Daney work_done = octeon_mgmt_receive_packets(p, budget); 501d6aa60a1SDavid Daney 502d6aa60a1SDavid Daney if (work_done < budget) { 503d6aa60a1SDavid Daney /* We stopped because no more packets were available. */ 504d6aa60a1SDavid Daney napi_complete(napi); 505d6aa60a1SDavid Daney octeon_mgmt_enable_rx_irq(p); 506d6aa60a1SDavid Daney } 507d6aa60a1SDavid Daney octeon_mgmt_update_rx_stats(netdev); 508d6aa60a1SDavid Daney 509d6aa60a1SDavid Daney return work_done; 510d6aa60a1SDavid Daney } 511d6aa60a1SDavid Daney 512d6aa60a1SDavid Daney /* Reset the hardware to clean state. */ 513d6aa60a1SDavid Daney static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) 514d6aa60a1SDavid Daney { 515d6aa60a1SDavid Daney union cvmx_mixx_ctl mix_ctl; 516d6aa60a1SDavid Daney union cvmx_mixx_bist mix_bist; 517d6aa60a1SDavid Daney union cvmx_agl_gmx_bist agl_gmx_bist; 518d6aa60a1SDavid Daney 519d6aa60a1SDavid Daney mix_ctl.u64 = 0; 520368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 521d6aa60a1SDavid Daney do { 522368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); 523d6aa60a1SDavid Daney } while (mix_ctl.s.busy); 524d6aa60a1SDavid Daney mix_ctl.s.reset = 1; 525368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 526368bec0dSDavid Daney cvmx_read_csr(p->mix + MIX_CTL); 527eeae05aaSDavid Daney octeon_io_clk_delay(64); 528d6aa60a1SDavid Daney 529368bec0dSDavid Daney mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); 530d6aa60a1SDavid Daney if (mix_bist.u64) 531d6aa60a1SDavid Daney dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", 532d6aa60a1SDavid Daney (unsigned long long)mix_bist.u64); 533d6aa60a1SDavid Daney 534d6aa60a1SDavid Daney agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); 535d6aa60a1SDavid Daney if (agl_gmx_bist.u64) 536d6aa60a1SDavid Daney dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", 537d6aa60a1SDavid Daney (unsigned long long)agl_gmx_bist.u64); 538d6aa60a1SDavid Daney } 539d6aa60a1SDavid Daney 540d6aa60a1SDavid Daney struct octeon_mgmt_cam_state { 541d6aa60a1SDavid Daney u64 cam[6]; 542d6aa60a1SDavid Daney u64 cam_mask; 543d6aa60a1SDavid Daney int cam_index; 544d6aa60a1SDavid Daney }; 545d6aa60a1SDavid Daney 546d6aa60a1SDavid Daney static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, 547d6aa60a1SDavid Daney unsigned char *addr) 548d6aa60a1SDavid Daney { 549d6aa60a1SDavid Daney int i; 550d6aa60a1SDavid Daney 551d6aa60a1SDavid Daney for (i = 0; i < 6; i++) 552d6aa60a1SDavid Daney cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); 553d6aa60a1SDavid Daney cs->cam_mask |= (1ULL << cs->cam_index); 554d6aa60a1SDavid Daney cs->cam_index++; 555d6aa60a1SDavid Daney } 556d6aa60a1SDavid Daney 557d6aa60a1SDavid Daney static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) 558d6aa60a1SDavid Daney { 559d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 560d6aa60a1SDavid Daney union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; 561d6aa60a1SDavid Daney union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; 562d6aa60a1SDavid Daney unsigned long flags; 563d6aa60a1SDavid Daney unsigned int prev_packet_enable; 564d6aa60a1SDavid Daney unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ 565d6aa60a1SDavid Daney unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ 566d6aa60a1SDavid Daney struct octeon_mgmt_cam_state cam_state; 56722bedad3SJiri Pirko struct netdev_hw_addr *ha; 568d6aa60a1SDavid Daney int available_cam_entries; 569d6aa60a1SDavid Daney 570d6aa60a1SDavid Daney memset(&cam_state, 0, sizeof(cam_state)); 571d6aa60a1SDavid Daney 57262538d24SDavid Daney if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { 573d6aa60a1SDavid Daney cam_mode = 0; 574d6aa60a1SDavid Daney available_cam_entries = 8; 575d6aa60a1SDavid Daney } else { 576a0ce9b1eSDavid Daney /* One CAM entry for the primary address, leaves seven 577d6aa60a1SDavid Daney * for the secondary addresses. 578d6aa60a1SDavid Daney */ 57962538d24SDavid Daney available_cam_entries = 7 - netdev->uc.count; 580d6aa60a1SDavid Daney } 581d6aa60a1SDavid Daney 582d6aa60a1SDavid Daney if (netdev->flags & IFF_MULTICAST) { 5834cd24eafSJiri Pirko if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || 5844cd24eafSJiri Pirko netdev_mc_count(netdev) > available_cam_entries) 58562538d24SDavid Daney multicast_mode = 2; /* 2 - Accept all multicast. */ 586d6aa60a1SDavid Daney else 587d6aa60a1SDavid Daney multicast_mode = 0; /* 0 - Use CAM. */ 588d6aa60a1SDavid Daney } 589d6aa60a1SDavid Daney 590d6aa60a1SDavid Daney if (cam_mode == 1) { 591d6aa60a1SDavid Daney /* Add primary address. */ 592d6aa60a1SDavid Daney octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); 59362538d24SDavid Daney netdev_for_each_uc_addr(ha, netdev) 59462538d24SDavid Daney octeon_mgmt_cam_state_add(&cam_state, ha->addr); 595d6aa60a1SDavid Daney } 596d6aa60a1SDavid Daney if (multicast_mode == 0) { 59722bedad3SJiri Pirko netdev_for_each_mc_addr(ha, netdev) 59822bedad3SJiri Pirko octeon_mgmt_cam_state_add(&cam_state, ha->addr); 599d6aa60a1SDavid Daney } 600d6aa60a1SDavid Daney 601d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 602d6aa60a1SDavid Daney 603d6aa60a1SDavid Daney /* Disable packet I/O. */ 604368bec0dSDavid Daney agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 605d6aa60a1SDavid Daney prev_packet_enable = agl_gmx_prtx.s.en; 606d6aa60a1SDavid Daney agl_gmx_prtx.s.en = 0; 607368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); 608d6aa60a1SDavid Daney 609d6aa60a1SDavid Daney adr_ctl.u64 = 0; 610d6aa60a1SDavid Daney adr_ctl.s.cam_mode = cam_mode; 611d6aa60a1SDavid Daney adr_ctl.s.mcst = multicast_mode; 612d6aa60a1SDavid Daney adr_ctl.s.bcst = 1; /* Allow broadcast */ 613d6aa60a1SDavid Daney 614368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); 615d6aa60a1SDavid Daney 616368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); 617368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); 618368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); 619368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); 620368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); 621368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); 622368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); 623d6aa60a1SDavid Daney 624d6aa60a1SDavid Daney /* Restore packet I/O. */ 625d6aa60a1SDavid Daney agl_gmx_prtx.s.en = prev_packet_enable; 626368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); 627d6aa60a1SDavid Daney 628d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 629d6aa60a1SDavid Daney } 630d6aa60a1SDavid Daney 631d6aa60a1SDavid Daney static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) 632d6aa60a1SDavid Daney { 633f321238bSDavid Daney int r = eth_mac_addr(netdev, addr); 634d6aa60a1SDavid Daney 635f321238bSDavid Daney if (r) 636f321238bSDavid Daney return r; 637d6aa60a1SDavid Daney 638d6aa60a1SDavid Daney octeon_mgmt_set_rx_filtering(netdev); 639d6aa60a1SDavid Daney 640d6aa60a1SDavid Daney return 0; 641d6aa60a1SDavid Daney } 642d6aa60a1SDavid Daney 643d6aa60a1SDavid Daney static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) 644d6aa60a1SDavid Daney { 645d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 646d6aa60a1SDavid Daney int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; 647d6aa60a1SDavid Daney 648d6aa60a1SDavid Daney netdev->mtu = new_mtu; 649d6aa60a1SDavid Daney 650368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); 651368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, 652d6aa60a1SDavid Daney (size_without_fcs + 7) & 0xfff8); 653d6aa60a1SDavid Daney 654d6aa60a1SDavid Daney return 0; 655d6aa60a1SDavid Daney } 656d6aa60a1SDavid Daney 657d6aa60a1SDavid Daney static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) 658d6aa60a1SDavid Daney { 659d6aa60a1SDavid Daney struct net_device *netdev = dev_id; 660d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 661d6aa60a1SDavid Daney union cvmx_mixx_isr mixx_isr; 662d6aa60a1SDavid Daney 663368bec0dSDavid Daney mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); 664d6aa60a1SDavid Daney 665d6aa60a1SDavid Daney /* Clear any pending interrupts */ 666368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); 667368bec0dSDavid Daney cvmx_read_csr(p->mix + MIX_ISR); 668d6aa60a1SDavid Daney 669d6aa60a1SDavid Daney if (mixx_isr.s.irthresh) { 670d6aa60a1SDavid Daney octeon_mgmt_disable_rx_irq(p); 671d6aa60a1SDavid Daney napi_schedule(&p->napi); 672d6aa60a1SDavid Daney } 673d6aa60a1SDavid Daney if (mixx_isr.s.orthresh) { 674d6aa60a1SDavid Daney octeon_mgmt_disable_tx_irq(p); 675d6aa60a1SDavid Daney tasklet_schedule(&p->tx_clean_tasklet); 676d6aa60a1SDavid Daney } 677d6aa60a1SDavid Daney 678d6aa60a1SDavid Daney return IRQ_HANDLED; 679d6aa60a1SDavid Daney } 680d6aa60a1SDavid Daney 6813d305850SChad Reese static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, 6823d305850SChad Reese struct ifreq *rq, int cmd) 6833d305850SChad Reese { 6843d305850SChad Reese struct octeon_mgmt *p = netdev_priv(netdev); 6853d305850SChad Reese struct hwtstamp_config config; 6863d305850SChad Reese union cvmx_mio_ptp_clock_cfg ptp; 6873d305850SChad Reese union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; 6883d305850SChad Reese bool have_hw_timestamps = false; 6893d305850SChad Reese 6903d305850SChad Reese if (copy_from_user(&config, rq->ifr_data, sizeof(config))) 6913d305850SChad Reese return -EFAULT; 6923d305850SChad Reese 6933d305850SChad Reese if (config.flags) /* reserved for future extensions */ 6943d305850SChad Reese return -EINVAL; 6953d305850SChad Reese 6963d305850SChad Reese /* Check the status of hardware for tiemstamps */ 6973d305850SChad Reese if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 6983d305850SChad Reese /* Get the current state of the PTP clock */ 6993d305850SChad Reese ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); 7003d305850SChad Reese if (!ptp.s.ext_clk_en) { 7013d305850SChad Reese /* The clock has not been configured to use an 7023d305850SChad Reese * external source. Program it to use the main clock 7033d305850SChad Reese * reference. 7043d305850SChad Reese */ 7053d305850SChad Reese u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); 7063d305850SChad Reese if (!ptp.s.ptp_en) 7073d305850SChad Reese cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); 7083d305850SChad Reese pr_info("PTP Clock: Using sclk reference at %lld Hz\n", 7093d305850SChad Reese (NSEC_PER_SEC << 32) / clock_comp); 7103d305850SChad Reese } else { 7113d305850SChad Reese /* The clock is already programmed to use a GPIO */ 7123d305850SChad Reese u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); 7133d305850SChad Reese pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", 7143d305850SChad Reese ptp.s.ext_clk_in, 7153d305850SChad Reese (NSEC_PER_SEC << 32) / clock_comp); 7163d305850SChad Reese } 7173d305850SChad Reese 7183d305850SChad Reese /* Enable the clock if it wasn't done already */ 7193d305850SChad Reese if (!ptp.s.ptp_en) { 7203d305850SChad Reese ptp.s.ptp_en = 1; 7213d305850SChad Reese cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); 7223d305850SChad Reese } 7233d305850SChad Reese have_hw_timestamps = true; 7243d305850SChad Reese } 7253d305850SChad Reese 7263d305850SChad Reese if (!have_hw_timestamps) 7273d305850SChad Reese return -EINVAL; 7283d305850SChad Reese 7293d305850SChad Reese switch (config.tx_type) { 7303d305850SChad Reese case HWTSTAMP_TX_OFF: 7313d305850SChad Reese case HWTSTAMP_TX_ON: 7323d305850SChad Reese break; 7333d305850SChad Reese default: 7343d305850SChad Reese return -ERANGE; 7353d305850SChad Reese } 7363d305850SChad Reese 7373d305850SChad Reese switch (config.rx_filter) { 7383d305850SChad Reese case HWTSTAMP_FILTER_NONE: 7393d305850SChad Reese p->has_rx_tstamp = false; 7403d305850SChad Reese rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); 7413d305850SChad Reese rxx_frm_ctl.s.ptp_mode = 0; 7423d305850SChad Reese cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 7433d305850SChad Reese break; 7443d305850SChad Reese case HWTSTAMP_FILTER_ALL: 7453d305850SChad Reese case HWTSTAMP_FILTER_SOME: 7463d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 7473d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 7483d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 7493d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 7503d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 7513d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 7523d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: 7533d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 7543d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 7553d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_EVENT: 7563d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_SYNC: 7573d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 7583d305850SChad Reese p->has_rx_tstamp = have_hw_timestamps; 7593d305850SChad Reese config.rx_filter = HWTSTAMP_FILTER_ALL; 7603d305850SChad Reese if (p->has_rx_tstamp) { 7613d305850SChad Reese rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); 7623d305850SChad Reese rxx_frm_ctl.s.ptp_mode = 1; 7633d305850SChad Reese cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 7643d305850SChad Reese } 7653d305850SChad Reese break; 7663d305850SChad Reese default: 7673d305850SChad Reese return -ERANGE; 7683d305850SChad Reese } 7693d305850SChad Reese 7703d305850SChad Reese if (copy_to_user(rq->ifr_data, &config, sizeof(config))) 7713d305850SChad Reese return -EFAULT; 7723d305850SChad Reese 7733d305850SChad Reese return 0; 7743d305850SChad Reese } 7753d305850SChad Reese 776d6aa60a1SDavid Daney static int octeon_mgmt_ioctl(struct net_device *netdev, 777d6aa60a1SDavid Daney struct ifreq *rq, int cmd) 778d6aa60a1SDavid Daney { 7793d305850SChad Reese switch (cmd) { 7803d305850SChad Reese case SIOCSHWTSTAMP: 7813d305850SChad Reese return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); 7823d305850SChad Reese default: 7839e8e6e88SPhilippe Reynes if (netdev->phydev) 7849e8e6e88SPhilippe Reynes return phy_mii_ioctl(netdev->phydev, rq, cmd); 7853d305850SChad Reese return -EINVAL; 7863d305850SChad Reese } 787d6aa60a1SDavid Daney } 788d6aa60a1SDavid Daney 789eeae05aaSDavid Daney static void octeon_mgmt_disable_link(struct octeon_mgmt *p) 790eeae05aaSDavid Daney { 791eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg; 792eeae05aaSDavid Daney 793eeae05aaSDavid Daney /* Disable GMX before we make any changes. */ 794eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 795eeae05aaSDavid Daney prtx_cfg.s.en = 0; 796eeae05aaSDavid Daney prtx_cfg.s.tx_en = 0; 797eeae05aaSDavid Daney prtx_cfg.s.rx_en = 0; 798eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 799eeae05aaSDavid Daney 800eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 801eeae05aaSDavid Daney int i; 802eeae05aaSDavid Daney for (i = 0; i < 10; i++) { 803eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 804eeae05aaSDavid Daney if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) 805eeae05aaSDavid Daney break; 806eeae05aaSDavid Daney mdelay(1); 807eeae05aaSDavid Daney i++; 808eeae05aaSDavid Daney } 809eeae05aaSDavid Daney } 810eeae05aaSDavid Daney } 811eeae05aaSDavid Daney 812eeae05aaSDavid Daney static void octeon_mgmt_enable_link(struct octeon_mgmt *p) 813eeae05aaSDavid Daney { 814eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg; 815eeae05aaSDavid Daney 816eeae05aaSDavid Daney /* Restore the GMX enable state only if link is set */ 817eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 818eeae05aaSDavid Daney prtx_cfg.s.tx_en = 1; 819eeae05aaSDavid Daney prtx_cfg.s.rx_en = 1; 820eeae05aaSDavid Daney prtx_cfg.s.en = 1; 821eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 822eeae05aaSDavid Daney } 823eeae05aaSDavid Daney 824eeae05aaSDavid Daney static void octeon_mgmt_update_link(struct octeon_mgmt *p) 825eeae05aaSDavid Daney { 8269e8e6e88SPhilippe Reynes struct net_device *ndev = p->netdev; 8279e8e6e88SPhilippe Reynes struct phy_device *phydev = ndev->phydev; 828eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg; 829eeae05aaSDavid Daney 830eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 831eeae05aaSDavid Daney 8329e8e6e88SPhilippe Reynes if (!phydev->link) 833eeae05aaSDavid Daney prtx_cfg.s.duplex = 1; 834eeae05aaSDavid Daney else 8359e8e6e88SPhilippe Reynes prtx_cfg.s.duplex = phydev->duplex; 836eeae05aaSDavid Daney 8379e8e6e88SPhilippe Reynes switch (phydev->speed) { 838eeae05aaSDavid Daney case 10: 839eeae05aaSDavid Daney prtx_cfg.s.speed = 0; 840eeae05aaSDavid Daney prtx_cfg.s.slottime = 0; 841eeae05aaSDavid Daney 842eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 843eeae05aaSDavid Daney prtx_cfg.s.burst = 1; 844eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 1; 845eeae05aaSDavid Daney } 846eeae05aaSDavid Daney break; 847eeae05aaSDavid Daney case 100: 848eeae05aaSDavid Daney prtx_cfg.s.speed = 0; 849eeae05aaSDavid Daney prtx_cfg.s.slottime = 0; 850eeae05aaSDavid Daney 851eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 852eeae05aaSDavid Daney prtx_cfg.s.burst = 1; 853eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 0; 854eeae05aaSDavid Daney } 855eeae05aaSDavid Daney break; 856eeae05aaSDavid Daney case 1000: 857eeae05aaSDavid Daney /* 1000 MBits is only supported on 6XXX chips */ 858eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 859eeae05aaSDavid Daney prtx_cfg.s.speed = 1; 860eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 0; 861eeae05aaSDavid Daney /* Only matters for half-duplex */ 862eeae05aaSDavid Daney prtx_cfg.s.slottime = 1; 8639e8e6e88SPhilippe Reynes prtx_cfg.s.burst = phydev->duplex; 864eeae05aaSDavid Daney } 865eeae05aaSDavid Daney break; 866eeae05aaSDavid Daney case 0: /* No link */ 867eeae05aaSDavid Daney default: 868eeae05aaSDavid Daney break; 869eeae05aaSDavid Daney } 870eeae05aaSDavid Daney 871eeae05aaSDavid Daney /* Write the new GMX setting with the port still disabled. */ 872eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); 873eeae05aaSDavid Daney 874eeae05aaSDavid Daney /* Read GMX CFG again to make sure the config is completed. */ 875eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); 876eeae05aaSDavid Daney 877eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { 878eeae05aaSDavid Daney union cvmx_agl_gmx_txx_clk agl_clk; 879eeae05aaSDavid Daney union cvmx_agl_prtx_ctl prtx_ctl; 880eeae05aaSDavid Daney 881eeae05aaSDavid Daney prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 882eeae05aaSDavid Daney agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); 883eeae05aaSDavid Daney /* MII (both speeds) and RGMII 1000 speed. */ 884eeae05aaSDavid Daney agl_clk.s.clk_cnt = 1; 885eeae05aaSDavid Daney if (prtx_ctl.s.mode == 0) { /* RGMII mode */ 8869e8e6e88SPhilippe Reynes if (phydev->speed == 10) 887eeae05aaSDavid Daney agl_clk.s.clk_cnt = 50; 8889e8e6e88SPhilippe Reynes else if (phydev->speed == 100) 889eeae05aaSDavid Daney agl_clk.s.clk_cnt = 5; 890eeae05aaSDavid Daney } 891eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); 892eeae05aaSDavid Daney } 893d6aa60a1SDavid Daney } 894d6aa60a1SDavid Daney 895d6aa60a1SDavid Daney static void octeon_mgmt_adjust_link(struct net_device *netdev) 896d6aa60a1SDavid Daney { 897d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 8989e8e6e88SPhilippe Reynes struct phy_device *phydev = netdev->phydev; 899d6aa60a1SDavid Daney unsigned long flags; 900d6aa60a1SDavid Daney int link_changed = 0; 901d6aa60a1SDavid Daney 9029e8e6e88SPhilippe Reynes if (!phydev) 903eeae05aaSDavid Daney return; 904eeae05aaSDavid Daney 905d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags); 906eeae05aaSDavid Daney 907eeae05aaSDavid Daney 9089e8e6e88SPhilippe Reynes if (!phydev->link && p->last_link) 909d6aa60a1SDavid Daney link_changed = -1; 910eeae05aaSDavid Daney 9119e8e6e88SPhilippe Reynes if (phydev->link && 9129e8e6e88SPhilippe Reynes (p->last_duplex != phydev->duplex || 9139e8e6e88SPhilippe Reynes p->last_link != phydev->link || 9149e8e6e88SPhilippe Reynes p->last_speed != phydev->speed)) { 915eeae05aaSDavid Daney octeon_mgmt_disable_link(p); 916eeae05aaSDavid Daney link_changed = 1; 917eeae05aaSDavid Daney octeon_mgmt_update_link(p); 918eeae05aaSDavid Daney octeon_mgmt_enable_link(p); 919d6aa60a1SDavid Daney } 920eeae05aaSDavid Daney 9219e8e6e88SPhilippe Reynes p->last_link = phydev->link; 9229e8e6e88SPhilippe Reynes p->last_speed = phydev->speed; 9239e8e6e88SPhilippe Reynes p->last_duplex = phydev->duplex; 924eeae05aaSDavid Daney 925d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags); 926d6aa60a1SDavid Daney 927d6aa60a1SDavid Daney if (link_changed != 0) { 928d6aa60a1SDavid Daney if (link_changed > 0) { 929d6aa60a1SDavid Daney pr_info("%s: Link is up - %d/%s\n", netdev->name, 9309e8e6e88SPhilippe Reynes phydev->speed, 9319e8e6e88SPhilippe Reynes phydev->duplex == DUPLEX_FULL ? 932d6aa60a1SDavid Daney "Full" : "Half"); 933d6aa60a1SDavid Daney } else { 934d6aa60a1SDavid Daney pr_info("%s: Link is down\n", netdev->name); 935d6aa60a1SDavid Daney } 936d6aa60a1SDavid Daney } 937d6aa60a1SDavid Daney } 938d6aa60a1SDavid Daney 939d6aa60a1SDavid Daney static int octeon_mgmt_init_phy(struct net_device *netdev) 940d6aa60a1SDavid Daney { 941d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 9429e8e6e88SPhilippe Reynes struct phy_device *phydev = NULL; 943d6aa60a1SDavid Daney 944368bec0dSDavid Daney if (octeon_is_simulation() || p->phy_np == NULL) { 945d6aa60a1SDavid Daney /* No PHYs in the simulator. */ 946d6aa60a1SDavid Daney netif_carrier_on(netdev); 947d6aa60a1SDavid Daney return 0; 948d6aa60a1SDavid Daney } 949d6aa60a1SDavid Daney 9509e8e6e88SPhilippe Reynes phydev = of_phy_connect(netdev, p->phy_np, 951368bec0dSDavid Daney octeon_mgmt_adjust_link, 0, 952d6aa60a1SDavid Daney PHY_INTERFACE_MODE_MII); 953d6aa60a1SDavid Daney 9549e8e6e88SPhilippe Reynes if (!phydev) 955eeae05aaSDavid Daney return -ENODEV; 956d6aa60a1SDavid Daney 957d6aa60a1SDavid Daney return 0; 958d6aa60a1SDavid Daney } 959d6aa60a1SDavid Daney 960d6aa60a1SDavid Daney static int octeon_mgmt_open(struct net_device *netdev) 961d6aa60a1SDavid Daney { 962d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 963d6aa60a1SDavid Daney union cvmx_mixx_ctl mix_ctl; 964d6aa60a1SDavid Daney union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; 965d6aa60a1SDavid Daney union cvmx_mixx_oring1 oring1; 966d6aa60a1SDavid Daney union cvmx_mixx_iring1 iring1; 967d6aa60a1SDavid Daney union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; 968d6aa60a1SDavid Daney union cvmx_mixx_irhwm mix_irhwm; 969d6aa60a1SDavid Daney union cvmx_mixx_orhwm mix_orhwm; 970d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena; 971d6aa60a1SDavid Daney struct sockaddr sa; 972d6aa60a1SDavid Daney 973d6aa60a1SDavid Daney /* Allocate ring buffers. */ 974d6aa60a1SDavid Daney p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 975d6aa60a1SDavid Daney GFP_KERNEL); 976d6aa60a1SDavid Daney if (!p->tx_ring) 977d6aa60a1SDavid Daney return -ENOMEM; 978d6aa60a1SDavid Daney p->tx_ring_handle = 979d6aa60a1SDavid Daney dma_map_single(p->dev, p->tx_ring, 980d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 981d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 982d6aa60a1SDavid Daney p->tx_next = 0; 983d6aa60a1SDavid Daney p->tx_next_clean = 0; 984d6aa60a1SDavid Daney p->tx_current_fill = 0; 985d6aa60a1SDavid Daney 986d6aa60a1SDavid Daney 987d6aa60a1SDavid Daney p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 988d6aa60a1SDavid Daney GFP_KERNEL); 989d6aa60a1SDavid Daney if (!p->rx_ring) 990d6aa60a1SDavid Daney goto err_nomem; 991d6aa60a1SDavid Daney p->rx_ring_handle = 992d6aa60a1SDavid Daney dma_map_single(p->dev, p->rx_ring, 993d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 994d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 995d6aa60a1SDavid Daney 996d6aa60a1SDavid Daney p->rx_next = 0; 997d6aa60a1SDavid Daney p->rx_next_fill = 0; 998d6aa60a1SDavid Daney p->rx_current_fill = 0; 999d6aa60a1SDavid Daney 1000d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p); 1001d6aa60a1SDavid Daney 1002368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); 1003d6aa60a1SDavid Daney 1004d6aa60a1SDavid Daney /* Bring it out of reset if needed. */ 1005d6aa60a1SDavid Daney if (mix_ctl.s.reset) { 1006d6aa60a1SDavid Daney mix_ctl.s.reset = 0; 1007368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 1008d6aa60a1SDavid Daney do { 1009368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); 1010d6aa60a1SDavid Daney } while (mix_ctl.s.reset); 1011d6aa60a1SDavid Daney } 1012d6aa60a1SDavid Daney 1013eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { 1014d6aa60a1SDavid Daney agl_gmx_inf_mode.u64 = 0; 1015d6aa60a1SDavid Daney agl_gmx_inf_mode.s.en = 1; 1016d6aa60a1SDavid Daney cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); 1017eeae05aaSDavid Daney } 1018eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) 1019eeae05aaSDavid Daney || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { 1020a0ce9b1eSDavid Daney /* Force compensation values, as they are not 1021eeae05aaSDavid Daney * determined properly by HW 1022eeae05aaSDavid Daney */ 1023eeae05aaSDavid Daney union cvmx_agl_gmx_drv_ctl drv_ctl; 1024eeae05aaSDavid Daney 1025eeae05aaSDavid Daney drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); 1026eeae05aaSDavid Daney if (p->port) { 1027eeae05aaSDavid Daney drv_ctl.s.byp_en1 = 1; 1028eeae05aaSDavid Daney drv_ctl.s.nctl1 = 6; 1029eeae05aaSDavid Daney drv_ctl.s.pctl1 = 6; 1030eeae05aaSDavid Daney } else { 1031eeae05aaSDavid Daney drv_ctl.s.byp_en = 1; 1032eeae05aaSDavid Daney drv_ctl.s.nctl = 6; 1033eeae05aaSDavid Daney drv_ctl.s.pctl = 6; 1034eeae05aaSDavid Daney } 1035eeae05aaSDavid Daney cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); 1036eeae05aaSDavid Daney } 1037d6aa60a1SDavid Daney 1038d6aa60a1SDavid Daney oring1.u64 = 0; 1039d6aa60a1SDavid Daney oring1.s.obase = p->tx_ring_handle >> 3; 1040d6aa60a1SDavid Daney oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; 1041368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); 1042d6aa60a1SDavid Daney 1043d6aa60a1SDavid Daney iring1.u64 = 0; 1044d6aa60a1SDavid Daney iring1.s.ibase = p->rx_ring_handle >> 3; 1045d6aa60a1SDavid Daney iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; 1046368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); 1047d6aa60a1SDavid Daney 1048d6aa60a1SDavid Daney memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); 1049d6aa60a1SDavid Daney octeon_mgmt_set_mac_address(netdev, &sa); 1050d6aa60a1SDavid Daney 1051d6aa60a1SDavid Daney octeon_mgmt_change_mtu(netdev, netdev->mtu); 1052d6aa60a1SDavid Daney 1053a0ce9b1eSDavid Daney /* Enable the port HW. Packets are not allowed until 1054d6aa60a1SDavid Daney * cvmx_mgmt_port_enable() is called. 1055d6aa60a1SDavid Daney */ 1056d6aa60a1SDavid Daney mix_ctl.u64 = 0; 1057d6aa60a1SDavid Daney mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ 1058d6aa60a1SDavid Daney mix_ctl.s.en = 1; /* Enable the port */ 1059d6aa60a1SDavid Daney mix_ctl.s.nbtarb = 0; /* Arbitration mode */ 1060d6aa60a1SDavid Daney /* MII CB-request FIFO programmable high watermark */ 1061d6aa60a1SDavid Daney mix_ctl.s.mrq_hwm = 1; 1062eeae05aaSDavid Daney #ifdef __LITTLE_ENDIAN 1063eeae05aaSDavid Daney mix_ctl.s.lendian = 1; 1064eeae05aaSDavid Daney #endif 1065368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); 1066d6aa60a1SDavid Daney 1067eeae05aaSDavid Daney /* Read the PHY to find the mode of the interface. */ 1068eeae05aaSDavid Daney if (octeon_mgmt_init_phy(netdev)) { 1069eeae05aaSDavid Daney dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); 1070eeae05aaSDavid Daney goto err_noirq; 1071d6aa60a1SDavid Daney } 1072eeae05aaSDavid Daney 1073eeae05aaSDavid Daney /* Set the mode of the interface, RGMII/MII. */ 10749e8e6e88SPhilippe Reynes if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) { 1075eeae05aaSDavid Daney union cvmx_agl_prtx_ctl agl_prtx_ctl; 10769e8e6e88SPhilippe Reynes int rgmii_mode = (netdev->phydev->supported & 1077eeae05aaSDavid Daney (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; 1078eeae05aaSDavid Daney 1079eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1080eeae05aaSDavid Daney agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; 1081eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); 1082eeae05aaSDavid Daney 1083eeae05aaSDavid Daney /* MII clocks counts are based on the 125Mhz 1084eeae05aaSDavid Daney * reference, which has an 8nS period. So our delays 1085eeae05aaSDavid Daney * need to be multiplied by this factor. 1086eeae05aaSDavid Daney */ 1087eeae05aaSDavid Daney #define NS_PER_PHY_CLK 8 1088eeae05aaSDavid Daney 1089eeae05aaSDavid Daney /* Take the DLL and clock tree out of reset */ 1090eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1091eeae05aaSDavid Daney agl_prtx_ctl.s.clkrst = 0; 1092eeae05aaSDavid Daney if (rgmii_mode) { 1093eeae05aaSDavid Daney agl_prtx_ctl.s.dllrst = 0; 1094eeae05aaSDavid Daney agl_prtx_ctl.s.clktx_byp = 0; 1095eeae05aaSDavid Daney } 1096eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); 1097eeae05aaSDavid Daney cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ 1098eeae05aaSDavid Daney 1099eeae05aaSDavid Daney /* Wait for the DLL to lock. External 125 MHz 1100eeae05aaSDavid Daney * reference clock must be stable at this point. 1101eeae05aaSDavid Daney */ 1102eeae05aaSDavid Daney ndelay(256 * NS_PER_PHY_CLK); 1103eeae05aaSDavid Daney 1104eeae05aaSDavid Daney /* Enable the interface */ 1105eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1106eeae05aaSDavid Daney agl_prtx_ctl.s.enable = 1; 1107eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); 1108eeae05aaSDavid Daney 1109eeae05aaSDavid Daney /* Read the value back to force the previous write */ 1110eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); 1111eeae05aaSDavid Daney 1112eeae05aaSDavid Daney /* Enable the compensation controller */ 1113eeae05aaSDavid Daney agl_prtx_ctl.s.comp = 1; 1114eeae05aaSDavid Daney agl_prtx_ctl.s.drv_byp = 0; 1115eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); 1116eeae05aaSDavid Daney /* Force write out before wait. */ 1117eeae05aaSDavid Daney cvmx_read_csr(p->agl_prt_ctl); 1118eeae05aaSDavid Daney 1119eeae05aaSDavid Daney /* For compensation state to lock. */ 1120eeae05aaSDavid Daney ndelay(1040 * NS_PER_PHY_CLK); 1121eeae05aaSDavid Daney 1122906996d6SDavid Daney /* Default Interframe Gaps are too small. Recommended 1123906996d6SDavid Daney * workaround is. 1124906996d6SDavid Daney * 1125906996d6SDavid Daney * AGL_GMX_TX_IFG[IFG1]=14 1126906996d6SDavid Daney * AGL_GMX_TX_IFG[IFG2]=10 1127eeae05aaSDavid Daney */ 1128906996d6SDavid Daney cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); 1129d6aa60a1SDavid Daney } 1130d6aa60a1SDavid Daney 1131d6aa60a1SDavid Daney octeon_mgmt_rx_fill_ring(netdev); 1132d6aa60a1SDavid Daney 1133d6aa60a1SDavid Daney /* Clear statistics. */ 1134d6aa60a1SDavid Daney /* Clear on read. */ 1135368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); 1136368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); 1137368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); 1138d6aa60a1SDavid Daney 1139368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); 1140368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); 1141368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); 1142d6aa60a1SDavid Daney 1143d6aa60a1SDavid Daney /* Clear any pending interrupts */ 1144368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); 1145d6aa60a1SDavid Daney 1146d6aa60a1SDavid Daney if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, 1147d6aa60a1SDavid Daney netdev)) { 1148d6aa60a1SDavid Daney dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); 1149d6aa60a1SDavid Daney goto err_noirq; 1150d6aa60a1SDavid Daney } 1151d6aa60a1SDavid Daney 1152d6aa60a1SDavid Daney /* Interrupt every single RX packet */ 1153d6aa60a1SDavid Daney mix_irhwm.u64 = 0; 1154d6aa60a1SDavid Daney mix_irhwm.s.irhwm = 0; 1155368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); 1156d6aa60a1SDavid Daney 1157b635e069SDavid Daney /* Interrupt when we have 1 or more packets to clean. */ 1158d6aa60a1SDavid Daney mix_orhwm.u64 = 0; 1159eeae05aaSDavid Daney mix_orhwm.s.orhwm = 0; 1160368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); 1161d6aa60a1SDavid Daney 1162d6aa60a1SDavid Daney /* Enable receive and transmit interrupts */ 1163d6aa60a1SDavid Daney mix_intena.u64 = 0; 1164d6aa60a1SDavid Daney mix_intena.s.ithena = 1; 1165d6aa60a1SDavid Daney mix_intena.s.othena = 1; 1166368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); 1167d6aa60a1SDavid Daney 1168d6aa60a1SDavid Daney /* Enable packet I/O. */ 1169d6aa60a1SDavid Daney 1170d6aa60a1SDavid Daney rxx_frm_ctl.u64 = 0; 11713d305850SChad Reese rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; 1172d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_align = 1; 1173a0ce9b1eSDavid Daney /* When set, disables the length check for non-min sized pkts 1174d6aa60a1SDavid Daney * with padding in the client data. 1175d6aa60a1SDavid Daney */ 1176d6aa60a1SDavid Daney rxx_frm_ctl.s.pad_len = 1; 1177d6aa60a1SDavid Daney /* When set, disables the length check for VLAN pkts */ 1178d6aa60a1SDavid Daney rxx_frm_ctl.s.vlan_len = 1; 1179d6aa60a1SDavid Daney /* When set, PREAMBLE checking is less strict */ 1180d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_free = 1; 1181d6aa60a1SDavid Daney /* Control Pause Frames can match station SMAC */ 1182d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_smac = 0; 1183d6aa60a1SDavid Daney /* Control Pause Frames can match globally assign Multicast address */ 1184d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_mcst = 1; 1185d6aa60a1SDavid Daney /* Forward pause information to TX block */ 1186d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_bck = 1; 1187d6aa60a1SDavid Daney /* Drop Control Pause Frames */ 1188d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_drp = 1; 1189d6aa60a1SDavid Daney /* Strip off the preamble */ 1190d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_strp = 1; 1191a0ce9b1eSDavid Daney /* This port is configured to send PREAMBLE+SFD to begin every 1192d6aa60a1SDavid Daney * frame. GMX checks that the PREAMBLE is sent correctly. 1193d6aa60a1SDavid Daney */ 1194d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_chk = 1; 1195368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); 1196d6aa60a1SDavid Daney 1197eeae05aaSDavid Daney /* Configure the port duplex, speed and enables */ 1198eeae05aaSDavid Daney octeon_mgmt_disable_link(p); 11999e8e6e88SPhilippe Reynes if (netdev->phydev) 1200eeae05aaSDavid Daney octeon_mgmt_update_link(p); 1201eeae05aaSDavid Daney octeon_mgmt_enable_link(p); 1202d6aa60a1SDavid Daney 1203d6aa60a1SDavid Daney p->last_link = 0; 1204eeae05aaSDavid Daney p->last_speed = 0; 1205eeae05aaSDavid Daney /* PHY is not present in simulator. The carrier is enabled 1206eeae05aaSDavid Daney * while initializing the phy for simulator, leave it enabled. 1207eeae05aaSDavid Daney */ 12089e8e6e88SPhilippe Reynes if (netdev->phydev) { 1209d6aa60a1SDavid Daney netif_carrier_off(netdev); 12109e8e6e88SPhilippe Reynes phy_start_aneg(netdev->phydev); 1211d6aa60a1SDavid Daney } 1212d6aa60a1SDavid Daney 1213d6aa60a1SDavid Daney netif_wake_queue(netdev); 1214d6aa60a1SDavid Daney napi_enable(&p->napi); 1215d6aa60a1SDavid Daney 1216d6aa60a1SDavid Daney return 0; 1217d6aa60a1SDavid Daney err_noirq: 1218d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p); 1219d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->rx_ring_handle, 1220d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 1221d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1222d6aa60a1SDavid Daney kfree(p->rx_ring); 1223d6aa60a1SDavid Daney err_nomem: 1224d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->tx_ring_handle, 1225d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 1226d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1227d6aa60a1SDavid Daney kfree(p->tx_ring); 1228d6aa60a1SDavid Daney return -ENOMEM; 1229d6aa60a1SDavid Daney } 1230d6aa60a1SDavid Daney 1231d6aa60a1SDavid Daney static int octeon_mgmt_stop(struct net_device *netdev) 1232d6aa60a1SDavid Daney { 1233d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 1234d6aa60a1SDavid Daney 1235d6aa60a1SDavid Daney napi_disable(&p->napi); 1236d6aa60a1SDavid Daney netif_stop_queue(netdev); 1237d6aa60a1SDavid Daney 12389e8e6e88SPhilippe Reynes if (netdev->phydev) 12399e8e6e88SPhilippe Reynes phy_disconnect(netdev->phydev); 1240d6aa60a1SDavid Daney 1241d6aa60a1SDavid Daney netif_carrier_off(netdev); 1242d6aa60a1SDavid Daney 1243d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p); 1244d6aa60a1SDavid Daney 1245d6aa60a1SDavid Daney free_irq(p->irq, netdev); 1246d6aa60a1SDavid Daney 1247d6aa60a1SDavid Daney /* dma_unmap is a nop on Octeon, so just free everything. */ 1248d6aa60a1SDavid Daney skb_queue_purge(&p->tx_list); 1249d6aa60a1SDavid Daney skb_queue_purge(&p->rx_list); 1250d6aa60a1SDavid Daney 1251d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->rx_ring_handle, 1252d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), 1253d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1254d6aa60a1SDavid Daney kfree(p->rx_ring); 1255d6aa60a1SDavid Daney 1256d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->tx_ring_handle, 1257d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 1258d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1259d6aa60a1SDavid Daney kfree(p->tx_ring); 1260d6aa60a1SDavid Daney 1261d6aa60a1SDavid Daney return 0; 1262d6aa60a1SDavid Daney } 1263d6aa60a1SDavid Daney 1264d6aa60a1SDavid Daney static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) 1265d6aa60a1SDavid Daney { 1266d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 1267d6aa60a1SDavid Daney union mgmt_port_ring_entry re; 1268d6aa60a1SDavid Daney unsigned long flags; 12694e4a4f14SDavid Daney int rv = NETDEV_TX_BUSY; 1270d6aa60a1SDavid Daney 1271d6aa60a1SDavid Daney re.d64 = 0; 12723d305850SChad Reese re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); 1273d6aa60a1SDavid Daney re.s.len = skb->len; 1274d6aa60a1SDavid Daney re.s.addr = dma_map_single(p->dev, skb->data, 1275d6aa60a1SDavid Daney skb->len, 1276d6aa60a1SDavid Daney DMA_TO_DEVICE); 1277d6aa60a1SDavid Daney 1278d6aa60a1SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags); 1279d6aa60a1SDavid Daney 12804e4a4f14SDavid Daney if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { 12814e4a4f14SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 12824e4a4f14SDavid Daney netif_stop_queue(netdev); 12834e4a4f14SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags); 12844e4a4f14SDavid Daney } 12854e4a4f14SDavid Daney 1286d6aa60a1SDavid Daney if (unlikely(p->tx_current_fill >= 1287d6aa60a1SDavid Daney ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { 1288d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 1289d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr, re.s.len, 1290d6aa60a1SDavid Daney DMA_TO_DEVICE); 12914e4a4f14SDavid Daney goto out; 1292d6aa60a1SDavid Daney } 1293d6aa60a1SDavid Daney 1294d6aa60a1SDavid Daney __skb_queue_tail(&p->tx_list, skb); 1295d6aa60a1SDavid Daney 1296d6aa60a1SDavid Daney /* Put it in the ring. */ 1297d6aa60a1SDavid Daney p->tx_ring[p->tx_next] = re.d64; 1298d6aa60a1SDavid Daney p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; 1299d6aa60a1SDavid Daney p->tx_current_fill++; 1300d6aa60a1SDavid Daney 1301d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags); 1302d6aa60a1SDavid Daney 1303d6aa60a1SDavid Daney dma_sync_single_for_device(p->dev, p->tx_ring_handle, 1304d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), 1305d6aa60a1SDavid Daney DMA_BIDIRECTIONAL); 1306d6aa60a1SDavid Daney 1307d6aa60a1SDavid Daney netdev->stats.tx_packets++; 1308d6aa60a1SDavid Daney netdev->stats.tx_bytes += skb->len; 1309d6aa60a1SDavid Daney 1310d6aa60a1SDavid Daney /* Ring the bell. */ 1311368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORING2, 1); 1312d6aa60a1SDavid Daney 1313860e9538SFlorian Westphal netif_trans_update(netdev); 13144e4a4f14SDavid Daney rv = NETDEV_TX_OK; 13154e4a4f14SDavid Daney out: 1316d6aa60a1SDavid Daney octeon_mgmt_update_tx_stats(netdev); 13174e4a4f14SDavid Daney return rv; 1318d6aa60a1SDavid Daney } 1319d6aa60a1SDavid Daney 1320d6aa60a1SDavid Daney #ifdef CONFIG_NET_POLL_CONTROLLER 1321d6aa60a1SDavid Daney static void octeon_mgmt_poll_controller(struct net_device *netdev) 1322d6aa60a1SDavid Daney { 1323d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev); 1324d6aa60a1SDavid Daney 1325d6aa60a1SDavid Daney octeon_mgmt_receive_packets(p, 16); 1326d6aa60a1SDavid Daney octeon_mgmt_update_rx_stats(netdev); 1327d6aa60a1SDavid Daney } 1328d6aa60a1SDavid Daney #endif 1329d6aa60a1SDavid Daney 1330d6aa60a1SDavid Daney static void octeon_mgmt_get_drvinfo(struct net_device *netdev, 1331d6aa60a1SDavid Daney struct ethtool_drvinfo *info) 1332d6aa60a1SDavid Daney { 13337826d43fSJiri Pirko strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); 13347826d43fSJiri Pirko strlcpy(info->version, DRV_VERSION, sizeof(info->version)); 13357826d43fSJiri Pirko strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); 13367826d43fSJiri Pirko strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); 1337d6aa60a1SDavid Daney } 1338d6aa60a1SDavid Daney 1339f21105dfSDavid Daney static int octeon_mgmt_nway_reset(struct net_device *dev) 1340f21105dfSDavid Daney { 1341f21105dfSDavid Daney if (!capable(CAP_NET_ADMIN)) 1342f21105dfSDavid Daney return -EPERM; 1343f21105dfSDavid Daney 13449e8e6e88SPhilippe Reynes if (dev->phydev) 13459e8e6e88SPhilippe Reynes return phy_start_aneg(dev->phydev); 1346f21105dfSDavid Daney 1347f21105dfSDavid Daney return -EOPNOTSUPP; 1348d6aa60a1SDavid Daney } 1349d6aa60a1SDavid Daney 1350d6aa60a1SDavid Daney static const struct ethtool_ops octeon_mgmt_ethtool_ops = { 1351d6aa60a1SDavid Daney .get_drvinfo = octeon_mgmt_get_drvinfo, 1352f21105dfSDavid Daney .nway_reset = octeon_mgmt_nway_reset, 1353f21105dfSDavid Daney .get_link = ethtool_op_get_link, 1354f4400dedSPhilippe Reynes .get_link_ksettings = phy_ethtool_get_link_ksettings, 1355f4400dedSPhilippe Reynes .set_link_ksettings = phy_ethtool_set_link_ksettings, 1356d6aa60a1SDavid Daney }; 1357d6aa60a1SDavid Daney 1358d6aa60a1SDavid Daney static const struct net_device_ops octeon_mgmt_ops = { 1359d6aa60a1SDavid Daney .ndo_open = octeon_mgmt_open, 1360d6aa60a1SDavid Daney .ndo_stop = octeon_mgmt_stop, 1361d6aa60a1SDavid Daney .ndo_start_xmit = octeon_mgmt_xmit, 1362d6aa60a1SDavid Daney .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, 1363d6aa60a1SDavid Daney .ndo_set_mac_address = octeon_mgmt_set_mac_address, 1364d6aa60a1SDavid Daney .ndo_do_ioctl = octeon_mgmt_ioctl, 1365d6aa60a1SDavid Daney .ndo_change_mtu = octeon_mgmt_change_mtu, 1366d6aa60a1SDavid Daney #ifdef CONFIG_NET_POLL_CONTROLLER 1367d6aa60a1SDavid Daney .ndo_poll_controller = octeon_mgmt_poll_controller, 1368d6aa60a1SDavid Daney #endif 1369d6aa60a1SDavid Daney }; 1370d6aa60a1SDavid Daney 13715bc7ec70SBill Pemberton static int octeon_mgmt_probe(struct platform_device *pdev) 1372d6aa60a1SDavid Daney { 1373d6aa60a1SDavid Daney struct net_device *netdev; 1374d6aa60a1SDavid Daney struct octeon_mgmt *p; 1375368bec0dSDavid Daney const __be32 *data; 1376368bec0dSDavid Daney const u8 *mac; 1377368bec0dSDavid Daney struct resource *res_mix; 1378368bec0dSDavid Daney struct resource *res_agl; 1379eeae05aaSDavid Daney struct resource *res_agl_prt_ctl; 1380368bec0dSDavid Daney int len; 1381368bec0dSDavid Daney int result; 1382d6aa60a1SDavid Daney 1383d6aa60a1SDavid Daney netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); 1384d6aa60a1SDavid Daney if (netdev == NULL) 1385d6aa60a1SDavid Daney return -ENOMEM; 1386d6aa60a1SDavid Daney 1387052958e3SDavid Daney SET_NETDEV_DEV(netdev, &pdev->dev); 1388052958e3SDavid Daney 13898513fbd8SJingoo Han platform_set_drvdata(pdev, netdev); 1390d6aa60a1SDavid Daney p = netdev_priv(netdev); 1391d6aa60a1SDavid Daney netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, 1392d6aa60a1SDavid Daney OCTEON_MGMT_NAPI_WEIGHT); 1393d6aa60a1SDavid Daney 1394d6aa60a1SDavid Daney p->netdev = netdev; 1395d6aa60a1SDavid Daney p->dev = &pdev->dev; 13963d305850SChad Reese p->has_rx_tstamp = false; 1397d6aa60a1SDavid Daney 1398368bec0dSDavid Daney data = of_get_property(pdev->dev.of_node, "cell-index", &len); 1399368bec0dSDavid Daney if (data && len == sizeof(*data)) { 1400368bec0dSDavid Daney p->port = be32_to_cpup(data); 1401368bec0dSDavid Daney } else { 1402368bec0dSDavid Daney dev_err(&pdev->dev, "no 'cell-index' property\n"); 1403368bec0dSDavid Daney result = -ENXIO; 1404368bec0dSDavid Daney goto err; 1405368bec0dSDavid Daney } 1406368bec0dSDavid Daney 1407d6aa60a1SDavid Daney snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); 1408d6aa60a1SDavid Daney 1409368bec0dSDavid Daney result = platform_get_irq(pdev, 0); 1410368bec0dSDavid Daney if (result < 0) 1411d6aa60a1SDavid Daney goto err; 1412d6aa60a1SDavid Daney 1413368bec0dSDavid Daney p->irq = result; 1414368bec0dSDavid Daney 1415368bec0dSDavid Daney res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1416368bec0dSDavid Daney if (res_mix == NULL) { 1417368bec0dSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n"); 1418368bec0dSDavid Daney result = -ENXIO; 1419368bec0dSDavid Daney goto err; 1420368bec0dSDavid Daney } 1421368bec0dSDavid Daney 1422368bec0dSDavid Daney res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1423368bec0dSDavid Daney if (res_agl == NULL) { 1424368bec0dSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n"); 1425368bec0dSDavid Daney result = -ENXIO; 1426368bec0dSDavid Daney goto err; 1427368bec0dSDavid Daney } 1428368bec0dSDavid Daney 1429eeae05aaSDavid Daney res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1430eeae05aaSDavid Daney if (res_agl_prt_ctl == NULL) { 1431eeae05aaSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n"); 1432eeae05aaSDavid Daney result = -ENXIO; 1433eeae05aaSDavid Daney goto err; 1434eeae05aaSDavid Daney } 1435eeae05aaSDavid Daney 1436368bec0dSDavid Daney p->mix_phys = res_mix->start; 1437368bec0dSDavid Daney p->mix_size = resource_size(res_mix); 1438368bec0dSDavid Daney p->agl_phys = res_agl->start; 1439368bec0dSDavid Daney p->agl_size = resource_size(res_agl); 1440eeae05aaSDavid Daney p->agl_prt_ctl_phys = res_agl_prt_ctl->start; 1441eeae05aaSDavid Daney p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); 1442368bec0dSDavid Daney 1443368bec0dSDavid Daney 1444368bec0dSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, 1445368bec0dSDavid Daney res_mix->name)) { 1446368bec0dSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n", 1447368bec0dSDavid Daney res_mix->name); 1448368bec0dSDavid Daney result = -ENXIO; 1449368bec0dSDavid Daney goto err; 1450368bec0dSDavid Daney } 1451368bec0dSDavid Daney 1452368bec0dSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, 1453368bec0dSDavid Daney res_agl->name)) { 1454368bec0dSDavid Daney result = -ENXIO; 1455368bec0dSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n", 1456368bec0dSDavid Daney res_agl->name); 1457368bec0dSDavid Daney goto err; 1458368bec0dSDavid Daney } 1459368bec0dSDavid Daney 1460eeae05aaSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, 1461eeae05aaSDavid Daney p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { 1462eeae05aaSDavid Daney result = -ENXIO; 1463eeae05aaSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n", 1464eeae05aaSDavid Daney res_agl_prt_ctl->name); 1465eeae05aaSDavid Daney goto err; 1466eeae05aaSDavid Daney } 1467368bec0dSDavid Daney 1468368bec0dSDavid Daney p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); 1469368bec0dSDavid Daney p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); 1470eeae05aaSDavid Daney p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, 1471eeae05aaSDavid Daney p->agl_prt_ctl_size); 1472d6aa60a1SDavid Daney spin_lock_init(&p->lock); 1473d6aa60a1SDavid Daney 1474d6aa60a1SDavid Daney skb_queue_head_init(&p->tx_list); 1475d6aa60a1SDavid Daney skb_queue_head_init(&p->rx_list); 1476d6aa60a1SDavid Daney tasklet_init(&p->tx_clean_tasklet, 1477d6aa60a1SDavid Daney octeon_mgmt_clean_tx_tasklet, (unsigned long)p); 1478d6aa60a1SDavid Daney 147901789349SJiri Pirko netdev->priv_flags |= IFF_UNICAST_FLT; 148001789349SJiri Pirko 1481d6aa60a1SDavid Daney netdev->netdev_ops = &octeon_mgmt_ops; 1482d6aa60a1SDavid Daney netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; 1483d6aa60a1SDavid Daney 1484*109cc165SJarod Wilson netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM; 1485*109cc165SJarod Wilson netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM; 1486*109cc165SJarod Wilson 1487368bec0dSDavid Daney mac = of_get_mac_address(pdev->dev.of_node); 1488d6aa60a1SDavid Daney 148909ec0d05SLuka Perkov if (mac) 1490f321238bSDavid Daney memcpy(netdev->dev_addr, mac, ETH_ALEN); 149115c6ff3bSJiri Pirko else 1492f321238bSDavid Daney eth_hw_addr_random(netdev); 1493d6aa60a1SDavid Daney 1494368bec0dSDavid Daney p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); 1495368bec0dSDavid Daney 149626741a69SRussell King result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); 149726741a69SRussell King if (result) 149826741a69SRussell King goto err; 1499368bec0dSDavid Daney 1500eeae05aaSDavid Daney netif_carrier_off(netdev); 1501368bec0dSDavid Daney result = register_netdev(netdev); 1502368bec0dSDavid Daney if (result) 1503d6aa60a1SDavid Daney goto err; 1504d6aa60a1SDavid Daney 1505d6aa60a1SDavid Daney dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); 1506d6aa60a1SDavid Daney return 0; 1507368bec0dSDavid Daney 1508d6aa60a1SDavid Daney err: 150946997066SPeter Chen of_node_put(p->phy_np); 1510d6aa60a1SDavid Daney free_netdev(netdev); 1511368bec0dSDavid Daney return result; 1512d6aa60a1SDavid Daney } 1513d6aa60a1SDavid Daney 15145bc7ec70SBill Pemberton static int octeon_mgmt_remove(struct platform_device *pdev) 1515d6aa60a1SDavid Daney { 15168513fbd8SJingoo Han struct net_device *netdev = platform_get_drvdata(pdev); 151746997066SPeter Chen struct octeon_mgmt *p = netdev_priv(netdev); 1518d6aa60a1SDavid Daney 1519d6aa60a1SDavid Daney unregister_netdev(netdev); 152046997066SPeter Chen of_node_put(p->phy_np); 1521d6aa60a1SDavid Daney free_netdev(netdev); 1522d6aa60a1SDavid Daney return 0; 1523d6aa60a1SDavid Daney } 1524d6aa60a1SDavid Daney 1525437dab40SFabian Frederick static const struct of_device_id octeon_mgmt_match[] = { 1526368bec0dSDavid Daney { 1527368bec0dSDavid Daney .compatible = "cavium,octeon-5750-mix", 1528368bec0dSDavid Daney }, 1529368bec0dSDavid Daney {}, 1530368bec0dSDavid Daney }; 1531368bec0dSDavid Daney MODULE_DEVICE_TABLE(of, octeon_mgmt_match); 1532368bec0dSDavid Daney 1533d6aa60a1SDavid Daney static struct platform_driver octeon_mgmt_driver = { 1534d6aa60a1SDavid Daney .driver = { 1535d6aa60a1SDavid Daney .name = "octeon_mgmt", 1536368bec0dSDavid Daney .of_match_table = octeon_mgmt_match, 1537d6aa60a1SDavid Daney }, 1538d6aa60a1SDavid Daney .probe = octeon_mgmt_probe, 15395bc7ec70SBill Pemberton .remove = octeon_mgmt_remove, 1540d6aa60a1SDavid Daney }; 1541d6aa60a1SDavid Daney 1542d6aa60a1SDavid Daney extern void octeon_mdiobus_force_mod_depencency(void); 1543d6aa60a1SDavid Daney 1544d6aa60a1SDavid Daney static int __init octeon_mgmt_mod_init(void) 1545d6aa60a1SDavid Daney { 1546d6aa60a1SDavid Daney /* Force our mdiobus driver module to be loaded first. */ 1547d6aa60a1SDavid Daney octeon_mdiobus_force_mod_depencency(); 1548d6aa60a1SDavid Daney return platform_driver_register(&octeon_mgmt_driver); 1549d6aa60a1SDavid Daney } 1550d6aa60a1SDavid Daney 1551d6aa60a1SDavid Daney static void __exit octeon_mgmt_mod_exit(void) 1552d6aa60a1SDavid Daney { 1553d6aa60a1SDavid Daney platform_driver_unregister(&octeon_mgmt_driver); 1554d6aa60a1SDavid Daney } 1555d6aa60a1SDavid Daney 1556d6aa60a1SDavid Daney module_init(octeon_mgmt_mod_init); 1557d6aa60a1SDavid Daney module_exit(octeon_mgmt_mod_exit); 1558d6aa60a1SDavid Daney 1559d6aa60a1SDavid Daney MODULE_DESCRIPTION(DRV_DESCRIPTION); 1560d6aa60a1SDavid Daney MODULE_AUTHOR("David Daney"); 1561d6aa60a1SDavid Daney MODULE_LICENSE("GPL"); 1562d6aa60a1SDavid Daney MODULE_VERSION(DRV_VERSION); 1563