1d6aa60a1SDavid Daney /*
2d6aa60a1SDavid Daney * This file is subject to the terms and conditions of the GNU General Public
3d6aa60a1SDavid Daney * License. See the file "COPYING" in the main directory of this archive
4d6aa60a1SDavid Daney * for more details.
5d6aa60a1SDavid Daney *
6eeae05aaSDavid Daney * Copyright (C) 2009-2012 Cavium, Inc
7d6aa60a1SDavid Daney */
8d6aa60a1SDavid Daney
9d6aa60a1SDavid Daney #include <linux/platform_device.h>
10368bec0dSDavid Daney #include <linux/dma-mapping.h>
11d6aa60a1SDavid Daney #include <linux/etherdevice.h>
12368bec0dSDavid Daney #include <linux/capability.h>
133d305850SChad Reese #include <linux/net_tstamp.h>
14368bec0dSDavid Daney #include <linux/interrupt.h>
15368bec0dSDavid Daney #include <linux/netdevice.h>
16368bec0dSDavid Daney #include <linux/spinlock.h>
17d6aa60a1SDavid Daney #include <linux/if_vlan.h>
18368bec0dSDavid Daney #include <linux/of_mdio.h>
19368bec0dSDavid Daney #include <linux/module.h>
20368bec0dSDavid Daney #include <linux/of_net.h>
21368bec0dSDavid Daney #include <linux/init.h>
225a0e3ad6STejun Heo #include <linux/slab.h>
23d6aa60a1SDavid Daney #include <linux/phy.h>
24368bec0dSDavid Daney #include <linux/io.h>
25d6aa60a1SDavid Daney
26d6aa60a1SDavid Daney #include <asm/octeon/octeon.h>
27d6aa60a1SDavid Daney #include <asm/octeon/cvmx-mixx-defs.h>
28d6aa60a1SDavid Daney #include <asm/octeon/cvmx-agl-defs.h>
29d6aa60a1SDavid Daney
30d6aa60a1SDavid Daney #define DRV_NAME "octeon_mgmt"
31d6aa60a1SDavid Daney #define DRV_DESCRIPTION \
32d6aa60a1SDavid Daney "Cavium Networks Octeon MII (management) port Network Driver"
33d6aa60a1SDavid Daney
34d6aa60a1SDavid Daney #define OCTEON_MGMT_NAPI_WEIGHT 16
35d6aa60a1SDavid Daney
36a0ce9b1eSDavid Daney /* Ring sizes that are powers of two allow for more efficient modulo
37d6aa60a1SDavid Daney * opertions.
38d6aa60a1SDavid Daney */
39d6aa60a1SDavid Daney #define OCTEON_MGMT_RX_RING_SIZE 512
40d6aa60a1SDavid Daney #define OCTEON_MGMT_TX_RING_SIZE 128
41d6aa60a1SDavid Daney
42d6aa60a1SDavid Daney /* Allow 8 bytes for vlan and FCS. */
43d6aa60a1SDavid Daney #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN)
44d6aa60a1SDavid Daney
45d6aa60a1SDavid Daney union mgmt_port_ring_entry {
46d6aa60a1SDavid Daney u64 d64;
47d6aa60a1SDavid Daney struct {
483ac19c90SDavid Daney #define RING_ENTRY_CODE_DONE 0xf
493ac19c90SDavid Daney #define RING_ENTRY_CODE_MORE 0x10
503ac19c90SDavid Daney #ifdef __BIG_ENDIAN_BITFIELD
51d6aa60a1SDavid Daney u64 reserved_62_63:2;
52d6aa60a1SDavid Daney /* Length of the buffer/packet in bytes */
53d6aa60a1SDavid Daney u64 len:14;
54d6aa60a1SDavid Daney /* For TX, signals that the packet should be timestamped */
55d6aa60a1SDavid Daney u64 tstamp:1;
56d6aa60a1SDavid Daney /* The RX error code */
57d6aa60a1SDavid Daney u64 code:7;
58d6aa60a1SDavid Daney /* Physical address of the buffer */
59d6aa60a1SDavid Daney u64 addr:40;
603ac19c90SDavid Daney #else
613ac19c90SDavid Daney u64 addr:40;
623ac19c90SDavid Daney u64 code:7;
633ac19c90SDavid Daney u64 tstamp:1;
643ac19c90SDavid Daney u64 len:14;
653ac19c90SDavid Daney u64 reserved_62_63:2;
663ac19c90SDavid Daney #endif
67d6aa60a1SDavid Daney } s;
68d6aa60a1SDavid Daney };
69d6aa60a1SDavid Daney
70368bec0dSDavid Daney #define MIX_ORING1 0x0
71368bec0dSDavid Daney #define MIX_ORING2 0x8
72368bec0dSDavid Daney #define MIX_IRING1 0x10
73368bec0dSDavid Daney #define MIX_IRING2 0x18
74368bec0dSDavid Daney #define MIX_CTL 0x20
75368bec0dSDavid Daney #define MIX_IRHWM 0x28
76368bec0dSDavid Daney #define MIX_IRCNT 0x30
77368bec0dSDavid Daney #define MIX_ORHWM 0x38
78368bec0dSDavid Daney #define MIX_ORCNT 0x40
79368bec0dSDavid Daney #define MIX_ISR 0x48
80368bec0dSDavid Daney #define MIX_INTENA 0x50
81368bec0dSDavid Daney #define MIX_REMCNT 0x58
82368bec0dSDavid Daney #define MIX_BIST 0x78
83368bec0dSDavid Daney
84368bec0dSDavid Daney #define AGL_GMX_PRT_CFG 0x10
85368bec0dSDavid Daney #define AGL_GMX_RX_FRM_CTL 0x18
86368bec0dSDavid Daney #define AGL_GMX_RX_FRM_MAX 0x30
87368bec0dSDavid Daney #define AGL_GMX_RX_JABBER 0x38
88368bec0dSDavid Daney #define AGL_GMX_RX_STATS_CTL 0x50
89368bec0dSDavid Daney
90368bec0dSDavid Daney #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0
91368bec0dSDavid Daney #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8
92368bec0dSDavid Daney #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0
93368bec0dSDavid Daney
94368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CTL 0x100
95368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM_EN 0x108
96368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM0 0x180
97368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM1 0x188
98368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM2 0x190
99368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM3 0x198
100368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM4 0x1a0
101368bec0dSDavid Daney #define AGL_GMX_RX_ADR_CAM5 0x1a8
102368bec0dSDavid Daney
103eeae05aaSDavid Daney #define AGL_GMX_TX_CLK 0x208
104368bec0dSDavid Daney #define AGL_GMX_TX_STATS_CTL 0x268
105368bec0dSDavid Daney #define AGL_GMX_TX_CTL 0x270
106368bec0dSDavid Daney #define AGL_GMX_TX_STAT0 0x280
107368bec0dSDavid Daney #define AGL_GMX_TX_STAT1 0x288
108368bec0dSDavid Daney #define AGL_GMX_TX_STAT2 0x290
109368bec0dSDavid Daney #define AGL_GMX_TX_STAT3 0x298
110368bec0dSDavid Daney #define AGL_GMX_TX_STAT4 0x2a0
111368bec0dSDavid Daney #define AGL_GMX_TX_STAT5 0x2a8
112368bec0dSDavid Daney #define AGL_GMX_TX_STAT6 0x2b0
113368bec0dSDavid Daney #define AGL_GMX_TX_STAT7 0x2b8
114368bec0dSDavid Daney #define AGL_GMX_TX_STAT8 0x2c0
115368bec0dSDavid Daney #define AGL_GMX_TX_STAT9 0x2c8
116368bec0dSDavid Daney
117d6aa60a1SDavid Daney struct octeon_mgmt {
118d6aa60a1SDavid Daney struct net_device *netdev;
119368bec0dSDavid Daney u64 mix;
120368bec0dSDavid Daney u64 agl;
121eeae05aaSDavid Daney u64 agl_prt_ctl;
122d6aa60a1SDavid Daney int port;
123d6aa60a1SDavid Daney int irq;
1243d305850SChad Reese bool has_rx_tstamp;
125d6aa60a1SDavid Daney u64 *tx_ring;
126d6aa60a1SDavid Daney dma_addr_t tx_ring_handle;
127d6aa60a1SDavid Daney unsigned int tx_next;
128d6aa60a1SDavid Daney unsigned int tx_next_clean;
129d6aa60a1SDavid Daney unsigned int tx_current_fill;
130d6aa60a1SDavid Daney /* The tx_list lock also protects the ring related variables */
131d6aa60a1SDavid Daney struct sk_buff_head tx_list;
132d6aa60a1SDavid Daney
133d6aa60a1SDavid Daney /* RX variables only touched in napi_poll. No locking necessary. */
134d6aa60a1SDavid Daney u64 *rx_ring;
135d6aa60a1SDavid Daney dma_addr_t rx_ring_handle;
136d6aa60a1SDavid Daney unsigned int rx_next;
137d6aa60a1SDavid Daney unsigned int rx_next_fill;
138d6aa60a1SDavid Daney unsigned int rx_current_fill;
139d6aa60a1SDavid Daney struct sk_buff_head rx_list;
140d6aa60a1SDavid Daney
141d6aa60a1SDavid Daney spinlock_t lock;
142d6aa60a1SDavid Daney unsigned int last_duplex;
143d6aa60a1SDavid Daney unsigned int last_link;
144eeae05aaSDavid Daney unsigned int last_speed;
145d6aa60a1SDavid Daney struct device *dev;
146d6aa60a1SDavid Daney struct napi_struct napi;
147d6aa60a1SDavid Daney struct tasklet_struct tx_clean_tasklet;
148368bec0dSDavid Daney struct device_node *phy_np;
149368bec0dSDavid Daney resource_size_t mix_phys;
150368bec0dSDavid Daney resource_size_t mix_size;
151368bec0dSDavid Daney resource_size_t agl_phys;
152368bec0dSDavid Daney resource_size_t agl_size;
153eeae05aaSDavid Daney resource_size_t agl_prt_ctl_phys;
154eeae05aaSDavid Daney resource_size_t agl_prt_ctl_size;
155d6aa60a1SDavid Daney };
156d6aa60a1SDavid Daney
octeon_mgmt_set_rx_irq(struct octeon_mgmt * p,int enable)157d6aa60a1SDavid Daney static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable)
158d6aa60a1SDavid Daney {
159d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena;
160d6aa60a1SDavid Daney unsigned long flags;
161d6aa60a1SDavid Daney
162d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags);
163368bec0dSDavid Daney mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
164d6aa60a1SDavid Daney mix_intena.s.ithena = enable ? 1 : 0;
165368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
166d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags);
167d6aa60a1SDavid Daney }
168d6aa60a1SDavid Daney
octeon_mgmt_set_tx_irq(struct octeon_mgmt * p,int enable)169d6aa60a1SDavid Daney static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable)
170d6aa60a1SDavid Daney {
171d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena;
172d6aa60a1SDavid Daney unsigned long flags;
173d6aa60a1SDavid Daney
174d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags);
175368bec0dSDavid Daney mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA);
176d6aa60a1SDavid Daney mix_intena.s.othena = enable ? 1 : 0;
177368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
178d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags);
179d6aa60a1SDavid Daney }
180d6aa60a1SDavid Daney
octeon_mgmt_enable_rx_irq(struct octeon_mgmt * p)181e96f7515SDavid Daney static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p)
182d6aa60a1SDavid Daney {
183d6aa60a1SDavid Daney octeon_mgmt_set_rx_irq(p, 1);
184d6aa60a1SDavid Daney }
185d6aa60a1SDavid Daney
octeon_mgmt_disable_rx_irq(struct octeon_mgmt * p)186e96f7515SDavid Daney static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p)
187d6aa60a1SDavid Daney {
188d6aa60a1SDavid Daney octeon_mgmt_set_rx_irq(p, 0);
189d6aa60a1SDavid Daney }
190d6aa60a1SDavid Daney
octeon_mgmt_enable_tx_irq(struct octeon_mgmt * p)191e96f7515SDavid Daney static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p)
192d6aa60a1SDavid Daney {
193d6aa60a1SDavid Daney octeon_mgmt_set_tx_irq(p, 1);
194d6aa60a1SDavid Daney }
195d6aa60a1SDavid Daney
octeon_mgmt_disable_tx_irq(struct octeon_mgmt * p)196e96f7515SDavid Daney static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p)
197d6aa60a1SDavid Daney {
198d6aa60a1SDavid Daney octeon_mgmt_set_tx_irq(p, 0);
199d6aa60a1SDavid Daney }
200d6aa60a1SDavid Daney
ring_max_fill(unsigned int ring_size)201d6aa60a1SDavid Daney static unsigned int ring_max_fill(unsigned int ring_size)
202d6aa60a1SDavid Daney {
203d6aa60a1SDavid Daney return ring_size - 8;
204d6aa60a1SDavid Daney }
205d6aa60a1SDavid Daney
ring_size_to_bytes(unsigned int ring_size)206d6aa60a1SDavid Daney static unsigned int ring_size_to_bytes(unsigned int ring_size)
207d6aa60a1SDavid Daney {
208d6aa60a1SDavid Daney return ring_size * sizeof(union mgmt_port_ring_entry);
209d6aa60a1SDavid Daney }
210d6aa60a1SDavid Daney
octeon_mgmt_rx_fill_ring(struct net_device * netdev)211d6aa60a1SDavid Daney static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
212d6aa60a1SDavid Daney {
213d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
214d6aa60a1SDavid Daney
215d6aa60a1SDavid Daney while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) {
216d6aa60a1SDavid Daney unsigned int size;
217d6aa60a1SDavid Daney union mgmt_port_ring_entry re;
218d6aa60a1SDavid Daney struct sk_buff *skb;
219d6aa60a1SDavid Daney
220d6aa60a1SDavid Daney /* CN56XX pass 1 needs 8 bytes of padding. */
221d6aa60a1SDavid Daney size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN;
222d6aa60a1SDavid Daney
223d6aa60a1SDavid Daney skb = netdev_alloc_skb(netdev, size);
224d6aa60a1SDavid Daney if (!skb)
225d6aa60a1SDavid Daney break;
226d6aa60a1SDavid Daney skb_reserve(skb, NET_IP_ALIGN);
227d6aa60a1SDavid Daney __skb_queue_tail(&p->rx_list, skb);
228d6aa60a1SDavid Daney
229d6aa60a1SDavid Daney re.d64 = 0;
230d6aa60a1SDavid Daney re.s.len = size;
231d6aa60a1SDavid Daney re.s.addr = dma_map_single(p->dev, skb->data,
232d6aa60a1SDavid Daney size,
233d6aa60a1SDavid Daney DMA_FROM_DEVICE);
234d6aa60a1SDavid Daney
235d6aa60a1SDavid Daney /* Put it in the ring. */
236d6aa60a1SDavid Daney p->rx_ring[p->rx_next_fill] = re.d64;
2370c34bb59SAlexander Sverdlin /* Make sure there is no reorder of filling the ring and ringing
2380c34bb59SAlexander Sverdlin * the bell
2390c34bb59SAlexander Sverdlin */
2400c34bb59SAlexander Sverdlin wmb();
2410c34bb59SAlexander Sverdlin
242d6aa60a1SDavid Daney dma_sync_single_for_device(p->dev, p->rx_ring_handle,
243d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
244d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
245d6aa60a1SDavid Daney p->rx_next_fill =
246d6aa60a1SDavid Daney (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE;
247d6aa60a1SDavid Daney p->rx_current_fill++;
248d6aa60a1SDavid Daney /* Ring the bell. */
249368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRING2, 1);
250d6aa60a1SDavid Daney }
251d6aa60a1SDavid Daney }
252d6aa60a1SDavid Daney
octeon_mgmt_clean_tx_buffers(struct octeon_mgmt * p)253d6aa60a1SDavid Daney static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
254d6aa60a1SDavid Daney {
255d6aa60a1SDavid Daney union cvmx_mixx_orcnt mix_orcnt;
256d6aa60a1SDavid Daney union mgmt_port_ring_entry re;
257d6aa60a1SDavid Daney struct sk_buff *skb;
258d6aa60a1SDavid Daney int cleaned = 0;
259d6aa60a1SDavid Daney unsigned long flags;
260d6aa60a1SDavid Daney
261368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
262d6aa60a1SDavid Daney while (mix_orcnt.s.orcnt) {
2634d30b801SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags);
2644d30b801SDavid Daney
265368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
2664d30b801SDavid Daney
2674d30b801SDavid Daney if (mix_orcnt.s.orcnt == 0) {
2684d30b801SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags);
2694d30b801SDavid Daney break;
2704d30b801SDavid Daney }
2714d30b801SDavid Daney
272d6aa60a1SDavid Daney dma_sync_single_for_cpu(p->dev, p->tx_ring_handle,
273d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
274d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
275d6aa60a1SDavid Daney
276d6aa60a1SDavid Daney re.d64 = p->tx_ring[p->tx_next_clean];
277d6aa60a1SDavid Daney p->tx_next_clean =
278d6aa60a1SDavid Daney (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE;
279d6aa60a1SDavid Daney skb = __skb_dequeue(&p->tx_list);
280d6aa60a1SDavid Daney
281d6aa60a1SDavid Daney mix_orcnt.u64 = 0;
282d6aa60a1SDavid Daney mix_orcnt.s.orcnt = 1;
283d6aa60a1SDavid Daney
284d6aa60a1SDavid Daney /* Acknowledge to hardware that we have the buffer. */
285368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64);
286d6aa60a1SDavid Daney p->tx_current_fill--;
287d6aa60a1SDavid Daney
288d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags);
289d6aa60a1SDavid Daney
290d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr, re.s.len,
291d6aa60a1SDavid Daney DMA_TO_DEVICE);
2923d305850SChad Reese
2933d305850SChad Reese /* Read the hardware TX timestamp if one was recorded */
2943d305850SChad Reese if (unlikely(re.s.tstamp)) {
2953d305850SChad Reese struct skb_shared_hwtstamps ts;
296208f7ca4SAaro Koskinen u64 ns;
297208f7ca4SAaro Koskinen
298c6d5fefaSWillem de Bruijn memset(&ts, 0, sizeof(ts));
2993d305850SChad Reese /* Read the timestamp */
300208f7ca4SAaro Koskinen ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
3013d305850SChad Reese /* Remove the timestamp from the FIFO */
3023d305850SChad Reese cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
3033d305850SChad Reese /* Tell the kernel about the timestamp */
3043d305850SChad Reese ts.hwtstamp = ns_to_ktime(ns);
3053d305850SChad Reese skb_tstamp_tx(skb, &ts);
3063d305850SChad Reese }
3073d305850SChad Reese
308d6aa60a1SDavid Daney dev_kfree_skb_any(skb);
309d6aa60a1SDavid Daney cleaned++;
310d6aa60a1SDavid Daney
311368bec0dSDavid Daney mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT);
312d6aa60a1SDavid Daney }
313d6aa60a1SDavid Daney
314d6aa60a1SDavid Daney if (cleaned && netif_queue_stopped(p->netdev))
315d6aa60a1SDavid Daney netif_wake_queue(p->netdev);
316d6aa60a1SDavid Daney }
317d6aa60a1SDavid Daney
octeon_mgmt_clean_tx_tasklet(struct tasklet_struct * t)318dfe4e612SAllen Pais static void octeon_mgmt_clean_tx_tasklet(struct tasklet_struct *t)
319d6aa60a1SDavid Daney {
320dfe4e612SAllen Pais struct octeon_mgmt *p = from_tasklet(p, t, tx_clean_tasklet);
321d6aa60a1SDavid Daney octeon_mgmt_clean_tx_buffers(p);
322d6aa60a1SDavid Daney octeon_mgmt_enable_tx_irq(p);
323d6aa60a1SDavid Daney }
324d6aa60a1SDavid Daney
octeon_mgmt_update_rx_stats(struct net_device * netdev)325d6aa60a1SDavid Daney static void octeon_mgmt_update_rx_stats(struct net_device *netdev)
326d6aa60a1SDavid Daney {
327d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
328d6aa60a1SDavid Daney unsigned long flags;
329d6aa60a1SDavid Daney u64 drop, bad;
330d6aa60a1SDavid Daney
331d6aa60a1SDavid Daney /* These reads also clear the count registers. */
332368bec0dSDavid Daney drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP);
333368bec0dSDavid Daney bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD);
334d6aa60a1SDavid Daney
335d6aa60a1SDavid Daney if (drop || bad) {
336d6aa60a1SDavid Daney /* Do an atomic update. */
337d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags);
338d6aa60a1SDavid Daney netdev->stats.rx_errors += bad;
339d6aa60a1SDavid Daney netdev->stats.rx_dropped += drop;
340d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags);
341d6aa60a1SDavid Daney }
342d6aa60a1SDavid Daney }
343d6aa60a1SDavid Daney
octeon_mgmt_update_tx_stats(struct net_device * netdev)344d6aa60a1SDavid Daney static void octeon_mgmt_update_tx_stats(struct net_device *netdev)
345d6aa60a1SDavid Daney {
346d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
347d6aa60a1SDavid Daney unsigned long flags;
348d6aa60a1SDavid Daney
349d6aa60a1SDavid Daney union cvmx_agl_gmx_txx_stat0 s0;
350d6aa60a1SDavid Daney union cvmx_agl_gmx_txx_stat1 s1;
351d6aa60a1SDavid Daney
352d6aa60a1SDavid Daney /* These reads also clear the count registers. */
353368bec0dSDavid Daney s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0);
354368bec0dSDavid Daney s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1);
355d6aa60a1SDavid Daney
356d6aa60a1SDavid Daney if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) {
357d6aa60a1SDavid Daney /* Do an atomic update. */
358d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags);
359d6aa60a1SDavid Daney netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol;
360d6aa60a1SDavid Daney netdev->stats.collisions += s1.s.scol + s1.s.mcol;
361d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags);
362d6aa60a1SDavid Daney }
363d6aa60a1SDavid Daney }
364d6aa60a1SDavid Daney
365d6aa60a1SDavid Daney /*
366d6aa60a1SDavid Daney * Dequeue a receive skb and its corresponding ring entry. The ring
367d6aa60a1SDavid Daney * entry is returned, *pskb is updated to point to the skb.
368d6aa60a1SDavid Daney */
octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt * p,struct sk_buff ** pskb)369d6aa60a1SDavid Daney static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p,
370d6aa60a1SDavid Daney struct sk_buff **pskb)
371d6aa60a1SDavid Daney {
372d6aa60a1SDavid Daney union mgmt_port_ring_entry re;
373d6aa60a1SDavid Daney
374d6aa60a1SDavid Daney dma_sync_single_for_cpu(p->dev, p->rx_ring_handle,
375d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
376d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
377d6aa60a1SDavid Daney
378d6aa60a1SDavid Daney re.d64 = p->rx_ring[p->rx_next];
379d6aa60a1SDavid Daney p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE;
380d6aa60a1SDavid Daney p->rx_current_fill--;
381d6aa60a1SDavid Daney *pskb = __skb_dequeue(&p->rx_list);
382d6aa60a1SDavid Daney
383d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr,
384d6aa60a1SDavid Daney ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM,
385d6aa60a1SDavid Daney DMA_FROM_DEVICE);
386d6aa60a1SDavid Daney
387d6aa60a1SDavid Daney return re.d64;
388d6aa60a1SDavid Daney }
389d6aa60a1SDavid Daney
390d6aa60a1SDavid Daney
octeon_mgmt_receive_one(struct octeon_mgmt * p)391d6aa60a1SDavid Daney static int octeon_mgmt_receive_one(struct octeon_mgmt *p)
392d6aa60a1SDavid Daney {
393d6aa60a1SDavid Daney struct net_device *netdev = p->netdev;
394d6aa60a1SDavid Daney union cvmx_mixx_ircnt mix_ircnt;
395d6aa60a1SDavid Daney union mgmt_port_ring_entry re;
396d6aa60a1SDavid Daney struct sk_buff *skb;
397d6aa60a1SDavid Daney struct sk_buff *skb2;
398d6aa60a1SDavid Daney struct sk_buff *skb_new;
399d6aa60a1SDavid Daney union mgmt_port_ring_entry re2;
400d6aa60a1SDavid Daney int rc = 1;
401d6aa60a1SDavid Daney
402d6aa60a1SDavid Daney
403d6aa60a1SDavid Daney re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb);
404d6aa60a1SDavid Daney if (likely(re.s.code == RING_ENTRY_CODE_DONE)) {
405d6aa60a1SDavid Daney /* A good packet, send it up. */
406d6aa60a1SDavid Daney skb_put(skb, re.s.len);
407d6aa60a1SDavid Daney good:
4083d305850SChad Reese /* Process the RX timestamp if it was recorded */
4093d305850SChad Reese if (p->has_rx_tstamp) {
4103d305850SChad Reese /* The first 8 bytes are the timestamp */
4113d305850SChad Reese u64 ns = *(u64 *)skb->data;
4123d305850SChad Reese struct skb_shared_hwtstamps *ts;
4133d305850SChad Reese ts = skb_hwtstamps(skb);
4143d305850SChad Reese ts->hwtstamp = ns_to_ktime(ns);
4153d305850SChad Reese __skb_pull(skb, 8);
4163d305850SChad Reese }
417d6aa60a1SDavid Daney skb->protocol = eth_type_trans(skb, netdev);
418d6aa60a1SDavid Daney netdev->stats.rx_packets++;
419d6aa60a1SDavid Daney netdev->stats.rx_bytes += skb->len;
420d6aa60a1SDavid Daney netif_receive_skb(skb);
421d6aa60a1SDavid Daney rc = 0;
422d6aa60a1SDavid Daney } else if (re.s.code == RING_ENTRY_CODE_MORE) {
423a0ce9b1eSDavid Daney /* Packet split across skbs. This can happen if we
424d6aa60a1SDavid Daney * increase the MTU. Buffers that are already in the
425d6aa60a1SDavid Daney * rx ring can then end up being too small. As the rx
426d6aa60a1SDavid Daney * ring is refilled, buffers sized for the new MTU
427d6aa60a1SDavid Daney * will be used and we should go back to the normal
428d6aa60a1SDavid Daney * non-split case.
429d6aa60a1SDavid Daney */
430d6aa60a1SDavid Daney skb_put(skb, re.s.len);
431d6aa60a1SDavid Daney do {
432d6aa60a1SDavid Daney re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
433d6aa60a1SDavid Daney if (re2.s.code != RING_ENTRY_CODE_MORE
434d6aa60a1SDavid Daney && re2.s.code != RING_ENTRY_CODE_DONE)
435d6aa60a1SDavid Daney goto split_error;
436d6aa60a1SDavid Daney skb_put(skb2, re2.s.len);
437d6aa60a1SDavid Daney skb_new = skb_copy_expand(skb, 0, skb2->len,
438d6aa60a1SDavid Daney GFP_ATOMIC);
439d6aa60a1SDavid Daney if (!skb_new)
440d6aa60a1SDavid Daney goto split_error;
441d6aa60a1SDavid Daney if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new),
442d6aa60a1SDavid Daney skb2->len))
443d6aa60a1SDavid Daney goto split_error;
444d6aa60a1SDavid Daney skb_put(skb_new, skb2->len);
445d6aa60a1SDavid Daney dev_kfree_skb_any(skb);
446d6aa60a1SDavid Daney dev_kfree_skb_any(skb2);
447d6aa60a1SDavid Daney skb = skb_new;
448d6aa60a1SDavid Daney } while (re2.s.code == RING_ENTRY_CODE_MORE);
449d6aa60a1SDavid Daney goto good;
450d6aa60a1SDavid Daney } else {
451d6aa60a1SDavid Daney /* Some other error, discard it. */
452d6aa60a1SDavid Daney dev_kfree_skb_any(skb);
453a0ce9b1eSDavid Daney /* Error statistics are accumulated in
454d6aa60a1SDavid Daney * octeon_mgmt_update_rx_stats.
455d6aa60a1SDavid Daney */
456d6aa60a1SDavid Daney }
457d6aa60a1SDavid Daney goto done;
458d6aa60a1SDavid Daney split_error:
459d6aa60a1SDavid Daney /* Discard the whole mess. */
460d6aa60a1SDavid Daney dev_kfree_skb_any(skb);
461d6aa60a1SDavid Daney dev_kfree_skb_any(skb2);
462d6aa60a1SDavid Daney while (re2.s.code == RING_ENTRY_CODE_MORE) {
463d6aa60a1SDavid Daney re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2);
464d6aa60a1SDavid Daney dev_kfree_skb_any(skb2);
465d6aa60a1SDavid Daney }
466d6aa60a1SDavid Daney netdev->stats.rx_errors++;
467d6aa60a1SDavid Daney
468d6aa60a1SDavid Daney done:
469d6aa60a1SDavid Daney /* Tell the hardware we processed a packet. */
470d6aa60a1SDavid Daney mix_ircnt.u64 = 0;
471d6aa60a1SDavid Daney mix_ircnt.s.ircnt = 1;
472368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64);
473d6aa60a1SDavid Daney return rc;
474d6aa60a1SDavid Daney }
475d6aa60a1SDavid Daney
octeon_mgmt_receive_packets(struct octeon_mgmt * p,int budget)476d6aa60a1SDavid Daney static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget)
477d6aa60a1SDavid Daney {
478d6aa60a1SDavid Daney unsigned int work_done = 0;
479d6aa60a1SDavid Daney union cvmx_mixx_ircnt mix_ircnt;
480d6aa60a1SDavid Daney int rc;
481d6aa60a1SDavid Daney
482368bec0dSDavid Daney mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
483d6aa60a1SDavid Daney while (work_done < budget && mix_ircnt.s.ircnt) {
484d6aa60a1SDavid Daney
485d6aa60a1SDavid Daney rc = octeon_mgmt_receive_one(p);
486d6aa60a1SDavid Daney if (!rc)
487d6aa60a1SDavid Daney work_done++;
488d6aa60a1SDavid Daney
489d6aa60a1SDavid Daney /* Check for more packets. */
490368bec0dSDavid Daney mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT);
491d6aa60a1SDavid Daney }
492d6aa60a1SDavid Daney
493d6aa60a1SDavid Daney octeon_mgmt_rx_fill_ring(p->netdev);
494d6aa60a1SDavid Daney
495d6aa60a1SDavid Daney return work_done;
496d6aa60a1SDavid Daney }
497d6aa60a1SDavid Daney
octeon_mgmt_napi_poll(struct napi_struct * napi,int budget)498d6aa60a1SDavid Daney static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget)
499d6aa60a1SDavid Daney {
500d6aa60a1SDavid Daney struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi);
501d6aa60a1SDavid Daney struct net_device *netdev = p->netdev;
502d6aa60a1SDavid Daney unsigned int work_done = 0;
503d6aa60a1SDavid Daney
504d6aa60a1SDavid Daney work_done = octeon_mgmt_receive_packets(p, budget);
505d6aa60a1SDavid Daney
506d6aa60a1SDavid Daney if (work_done < budget) {
507d6aa60a1SDavid Daney /* We stopped because no more packets were available. */
5086ad20165SEric Dumazet napi_complete_done(napi, work_done);
509d6aa60a1SDavid Daney octeon_mgmt_enable_rx_irq(p);
510d6aa60a1SDavid Daney }
511d6aa60a1SDavid Daney octeon_mgmt_update_rx_stats(netdev);
512d6aa60a1SDavid Daney
513d6aa60a1SDavid Daney return work_done;
514d6aa60a1SDavid Daney }
515d6aa60a1SDavid Daney
516d6aa60a1SDavid Daney /* Reset the hardware to clean state. */
octeon_mgmt_reset_hw(struct octeon_mgmt * p)517d6aa60a1SDavid Daney static void octeon_mgmt_reset_hw(struct octeon_mgmt *p)
518d6aa60a1SDavid Daney {
519d6aa60a1SDavid Daney union cvmx_mixx_ctl mix_ctl;
520d6aa60a1SDavid Daney union cvmx_mixx_bist mix_bist;
521d6aa60a1SDavid Daney union cvmx_agl_gmx_bist agl_gmx_bist;
522d6aa60a1SDavid Daney
523d6aa60a1SDavid Daney mix_ctl.u64 = 0;
524368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
525d6aa60a1SDavid Daney do {
526368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
527d6aa60a1SDavid Daney } while (mix_ctl.s.busy);
528d6aa60a1SDavid Daney mix_ctl.s.reset = 1;
529368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
530368bec0dSDavid Daney cvmx_read_csr(p->mix + MIX_CTL);
531eeae05aaSDavid Daney octeon_io_clk_delay(64);
532d6aa60a1SDavid Daney
533368bec0dSDavid Daney mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST);
534d6aa60a1SDavid Daney if (mix_bist.u64)
535d6aa60a1SDavid Daney dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n",
536d6aa60a1SDavid Daney (unsigned long long)mix_bist.u64);
537d6aa60a1SDavid Daney
538d6aa60a1SDavid Daney agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
539d6aa60a1SDavid Daney if (agl_gmx_bist.u64)
540d6aa60a1SDavid Daney dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n",
541d6aa60a1SDavid Daney (unsigned long long)agl_gmx_bist.u64);
542d6aa60a1SDavid Daney }
543d6aa60a1SDavid Daney
544d6aa60a1SDavid Daney struct octeon_mgmt_cam_state {
545d6aa60a1SDavid Daney u64 cam[6];
546d6aa60a1SDavid Daney u64 cam_mask;
547d6aa60a1SDavid Daney int cam_index;
548d6aa60a1SDavid Daney };
549d6aa60a1SDavid Daney
octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state * cs,const unsigned char * addr)550d6aa60a1SDavid Daney static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs,
551a9c2cf9eSJakub Kicinski const unsigned char *addr)
552d6aa60a1SDavid Daney {
553d6aa60a1SDavid Daney int i;
554d6aa60a1SDavid Daney
555d6aa60a1SDavid Daney for (i = 0; i < 6; i++)
556d6aa60a1SDavid Daney cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index));
557d6aa60a1SDavid Daney cs->cam_mask |= (1ULL << cs->cam_index);
558d6aa60a1SDavid Daney cs->cam_index++;
559d6aa60a1SDavid Daney }
560d6aa60a1SDavid Daney
octeon_mgmt_set_rx_filtering(struct net_device * netdev)561d6aa60a1SDavid Daney static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
562d6aa60a1SDavid Daney {
563d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
564d6aa60a1SDavid Daney union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
565d6aa60a1SDavid Daney union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
566d6aa60a1SDavid Daney unsigned long flags;
567d6aa60a1SDavid Daney unsigned int prev_packet_enable;
568d6aa60a1SDavid Daney unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
569d6aa60a1SDavid Daney unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */
570d6aa60a1SDavid Daney struct octeon_mgmt_cam_state cam_state;
57122bedad3SJiri Pirko struct netdev_hw_addr *ha;
572d6aa60a1SDavid Daney int available_cam_entries;
573d6aa60a1SDavid Daney
574d6aa60a1SDavid Daney memset(&cam_state, 0, sizeof(cam_state));
575d6aa60a1SDavid Daney
57662538d24SDavid Daney if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
577d6aa60a1SDavid Daney cam_mode = 0;
578d6aa60a1SDavid Daney available_cam_entries = 8;
579d6aa60a1SDavid Daney } else {
580a0ce9b1eSDavid Daney /* One CAM entry for the primary address, leaves seven
581d6aa60a1SDavid Daney * for the secondary addresses.
582d6aa60a1SDavid Daney */
58362538d24SDavid Daney available_cam_entries = 7 - netdev->uc.count;
584d6aa60a1SDavid Daney }
585d6aa60a1SDavid Daney
586d6aa60a1SDavid Daney if (netdev->flags & IFF_MULTICAST) {
5874cd24eafSJiri Pirko if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
5884cd24eafSJiri Pirko netdev_mc_count(netdev) > available_cam_entries)
58962538d24SDavid Daney multicast_mode = 2; /* 2 - Accept all multicast. */
590d6aa60a1SDavid Daney else
591d6aa60a1SDavid Daney multicast_mode = 0; /* 0 - Use CAM. */
592d6aa60a1SDavid Daney }
593d6aa60a1SDavid Daney
594d6aa60a1SDavid Daney if (cam_mode == 1) {
595d6aa60a1SDavid Daney /* Add primary address. */
596d6aa60a1SDavid Daney octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
59762538d24SDavid Daney netdev_for_each_uc_addr(ha, netdev)
59862538d24SDavid Daney octeon_mgmt_cam_state_add(&cam_state, ha->addr);
599d6aa60a1SDavid Daney }
600d6aa60a1SDavid Daney if (multicast_mode == 0) {
60122bedad3SJiri Pirko netdev_for_each_mc_addr(ha, netdev)
60222bedad3SJiri Pirko octeon_mgmt_cam_state_add(&cam_state, ha->addr);
603d6aa60a1SDavid Daney }
604d6aa60a1SDavid Daney
605d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags);
606d6aa60a1SDavid Daney
607d6aa60a1SDavid Daney /* Disable packet I/O. */
608368bec0dSDavid Daney agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
609d6aa60a1SDavid Daney prev_packet_enable = agl_gmx_prtx.s.en;
610d6aa60a1SDavid Daney agl_gmx_prtx.s.en = 0;
611368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
612d6aa60a1SDavid Daney
613d6aa60a1SDavid Daney adr_ctl.u64 = 0;
614d6aa60a1SDavid Daney adr_ctl.s.cam_mode = cam_mode;
615d6aa60a1SDavid Daney adr_ctl.s.mcst = multicast_mode;
616d6aa60a1SDavid Daney adr_ctl.s.bcst = 1; /* Allow broadcast */
617d6aa60a1SDavid Daney
618368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64);
619d6aa60a1SDavid Daney
620368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]);
621368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]);
622368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]);
623368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]);
624368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]);
625368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]);
626368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask);
627d6aa60a1SDavid Daney
628d6aa60a1SDavid Daney /* Restore packet I/O. */
629d6aa60a1SDavid Daney agl_gmx_prtx.s.en = prev_packet_enable;
630368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64);
631d6aa60a1SDavid Daney
632d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags);
633d6aa60a1SDavid Daney }
634d6aa60a1SDavid Daney
octeon_mgmt_set_mac_address(struct net_device * netdev,void * addr)635d6aa60a1SDavid Daney static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr)
636d6aa60a1SDavid Daney {
637f321238bSDavid Daney int r = eth_mac_addr(netdev, addr);
638d6aa60a1SDavid Daney
639f321238bSDavid Daney if (r)
640f321238bSDavid Daney return r;
641d6aa60a1SDavid Daney
642d6aa60a1SDavid Daney octeon_mgmt_set_rx_filtering(netdev);
643d6aa60a1SDavid Daney
644d6aa60a1SDavid Daney return 0;
645d6aa60a1SDavid Daney }
646d6aa60a1SDavid Daney
octeon_mgmt_change_mtu(struct net_device * netdev,int new_mtu)647d6aa60a1SDavid Daney static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu)
648d6aa60a1SDavid Daney {
649d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
6504aac0b43SAlexander Sverdlin int max_packet = new_mtu + ETH_HLEN + ETH_FCS_LEN;
651d6aa60a1SDavid Daney
6521eb2cdedSEric Dumazet WRITE_ONCE(netdev->mtu, new_mtu);
653d6aa60a1SDavid Daney
6544aac0b43SAlexander Sverdlin /* HW lifts the limit if the frame is VLAN tagged
6554aac0b43SAlexander Sverdlin * (+4 bytes per each tag, up to two tags)
6564aac0b43SAlexander Sverdlin */
6574aac0b43SAlexander Sverdlin cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, max_packet);
6584aac0b43SAlexander Sverdlin /* Set the hardware to truncate packets larger than the MTU. The jabber
6594aac0b43SAlexander Sverdlin * register must be set to a multiple of 8 bytes, so round up. JABBER is
6604aac0b43SAlexander Sverdlin * an unconditional limit, so we need to account for two possible VLAN
6614aac0b43SAlexander Sverdlin * tags.
6624aac0b43SAlexander Sverdlin */
663368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER,
6644aac0b43SAlexander Sverdlin (max_packet + 7 + VLAN_HLEN * 2) & 0xfff8);
665d6aa60a1SDavid Daney
666d6aa60a1SDavid Daney return 0;
667d6aa60a1SDavid Daney }
668d6aa60a1SDavid Daney
octeon_mgmt_interrupt(int cpl,void * dev_id)669d6aa60a1SDavid Daney static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
670d6aa60a1SDavid Daney {
671d6aa60a1SDavid Daney struct net_device *netdev = dev_id;
672d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
673d6aa60a1SDavid Daney union cvmx_mixx_isr mixx_isr;
674d6aa60a1SDavid Daney
675368bec0dSDavid Daney mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR);
676d6aa60a1SDavid Daney
677d6aa60a1SDavid Daney /* Clear any pending interrupts */
678368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64);
679368bec0dSDavid Daney cvmx_read_csr(p->mix + MIX_ISR);
680d6aa60a1SDavid Daney
681d6aa60a1SDavid Daney if (mixx_isr.s.irthresh) {
682d6aa60a1SDavid Daney octeon_mgmt_disable_rx_irq(p);
683d6aa60a1SDavid Daney napi_schedule(&p->napi);
684d6aa60a1SDavid Daney }
685d6aa60a1SDavid Daney if (mixx_isr.s.orthresh) {
686d6aa60a1SDavid Daney octeon_mgmt_disable_tx_irq(p);
687d6aa60a1SDavid Daney tasklet_schedule(&p->tx_clean_tasklet);
688d6aa60a1SDavid Daney }
689d6aa60a1SDavid Daney
690d6aa60a1SDavid Daney return IRQ_HANDLED;
691d6aa60a1SDavid Daney }
692d6aa60a1SDavid Daney
octeon_mgmt_ioctl_hwtstamp(struct net_device * netdev,struct ifreq * rq,int cmd)6933d305850SChad Reese static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev,
6943d305850SChad Reese struct ifreq *rq, int cmd)
6953d305850SChad Reese {
6963d305850SChad Reese struct octeon_mgmt *p = netdev_priv(netdev);
6973d305850SChad Reese struct hwtstamp_config config;
6983d305850SChad Reese union cvmx_mio_ptp_clock_cfg ptp;
6993d305850SChad Reese union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
7003d305850SChad Reese bool have_hw_timestamps = false;
7013d305850SChad Reese
7023d305850SChad Reese if (copy_from_user(&config, rq->ifr_data, sizeof(config)))
7033d305850SChad Reese return -EFAULT;
7043d305850SChad Reese
7053d305850SChad Reese /* Check the status of hardware for tiemstamps */
7063d305850SChad Reese if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
7073d305850SChad Reese /* Get the current state of the PTP clock */
7083d305850SChad Reese ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG);
7093d305850SChad Reese if (!ptp.s.ext_clk_en) {
7103d305850SChad Reese /* The clock has not been configured to use an
7113d305850SChad Reese * external source. Program it to use the main clock
7123d305850SChad Reese * reference.
7133d305850SChad Reese */
7143d305850SChad Reese u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate();
7153d305850SChad Reese if (!ptp.s.ptp_en)
7163d305850SChad Reese cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp);
7171769af43SSteven J. Hill netdev_info(netdev,
7181769af43SSteven J. Hill "PTP Clock using sclk reference @ %lldHz\n",
7193d305850SChad Reese (NSEC_PER_SEC << 32) / clock_comp);
7203d305850SChad Reese } else {
7213d305850SChad Reese /* The clock is already programmed to use a GPIO */
7223d305850SChad Reese u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP);
7231769af43SSteven J. Hill netdev_info(netdev,
7241769af43SSteven J. Hill "PTP Clock using GPIO%d @ %lld Hz\n",
7251769af43SSteven J. Hill ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp);
7263d305850SChad Reese }
7273d305850SChad Reese
7283d305850SChad Reese /* Enable the clock if it wasn't done already */
7293d305850SChad Reese if (!ptp.s.ptp_en) {
7303d305850SChad Reese ptp.s.ptp_en = 1;
7313d305850SChad Reese cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64);
7323d305850SChad Reese }
7333d305850SChad Reese have_hw_timestamps = true;
7343d305850SChad Reese }
7353d305850SChad Reese
7363d305850SChad Reese if (!have_hw_timestamps)
7373d305850SChad Reese return -EINVAL;
7383d305850SChad Reese
7393d305850SChad Reese switch (config.tx_type) {
7403d305850SChad Reese case HWTSTAMP_TX_OFF:
7413d305850SChad Reese case HWTSTAMP_TX_ON:
7423d305850SChad Reese break;
7433d305850SChad Reese default:
7443d305850SChad Reese return -ERANGE;
7453d305850SChad Reese }
7463d305850SChad Reese
7473d305850SChad Reese switch (config.rx_filter) {
7483d305850SChad Reese case HWTSTAMP_FILTER_NONE:
7493d305850SChad Reese p->has_rx_tstamp = false;
7503d305850SChad Reese rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
7513d305850SChad Reese rxx_frm_ctl.s.ptp_mode = 0;
7523d305850SChad Reese cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
7533d305850SChad Reese break;
7543d305850SChad Reese case HWTSTAMP_FILTER_ALL:
7553d305850SChad Reese case HWTSTAMP_FILTER_SOME:
7563d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
7573d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
7583d305850SChad Reese case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
7593d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
7603d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
7613d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
7623d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
7633d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
7643d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
7653d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_EVENT:
7663d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_SYNC:
7673d305850SChad Reese case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
768e3412575SMiroslav Lichvar case HWTSTAMP_FILTER_NTP_ALL:
7693d305850SChad Reese p->has_rx_tstamp = have_hw_timestamps;
7703d305850SChad Reese config.rx_filter = HWTSTAMP_FILTER_ALL;
7713d305850SChad Reese if (p->has_rx_tstamp) {
7723d305850SChad Reese rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL);
7733d305850SChad Reese rxx_frm_ctl.s.ptp_mode = 1;
7743d305850SChad Reese cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
7753d305850SChad Reese }
7763d305850SChad Reese break;
7773d305850SChad Reese default:
7783d305850SChad Reese return -ERANGE;
7793d305850SChad Reese }
7803d305850SChad Reese
7813d305850SChad Reese if (copy_to_user(rq->ifr_data, &config, sizeof(config)))
7823d305850SChad Reese return -EFAULT;
7833d305850SChad Reese
7843d305850SChad Reese return 0;
7853d305850SChad Reese }
7863d305850SChad Reese
octeon_mgmt_ioctl(struct net_device * netdev,struct ifreq * rq,int cmd)787d6aa60a1SDavid Daney static int octeon_mgmt_ioctl(struct net_device *netdev,
788d6aa60a1SDavid Daney struct ifreq *rq, int cmd)
789d6aa60a1SDavid Daney {
7903d305850SChad Reese switch (cmd) {
7913d305850SChad Reese case SIOCSHWTSTAMP:
7923d305850SChad Reese return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd);
7933d305850SChad Reese default:
794c5d19a6eSHeiner Kallweit return phy_do_ioctl(netdev, rq, cmd);
7953d305850SChad Reese }
796d6aa60a1SDavid Daney }
797d6aa60a1SDavid Daney
octeon_mgmt_disable_link(struct octeon_mgmt * p)798eeae05aaSDavid Daney static void octeon_mgmt_disable_link(struct octeon_mgmt *p)
799eeae05aaSDavid Daney {
800eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg;
801eeae05aaSDavid Daney
802eeae05aaSDavid Daney /* Disable GMX before we make any changes. */
803eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
804eeae05aaSDavid Daney prtx_cfg.s.en = 0;
805eeae05aaSDavid Daney prtx_cfg.s.tx_en = 0;
806eeae05aaSDavid Daney prtx_cfg.s.rx_en = 0;
807eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
808eeae05aaSDavid Daney
809eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
810eeae05aaSDavid Daney int i;
811eeae05aaSDavid Daney for (i = 0; i < 10; i++) {
812eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
813eeae05aaSDavid Daney if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1)
814eeae05aaSDavid Daney break;
815eeae05aaSDavid Daney mdelay(1);
816eeae05aaSDavid Daney i++;
817eeae05aaSDavid Daney }
818eeae05aaSDavid Daney }
819eeae05aaSDavid Daney }
820eeae05aaSDavid Daney
octeon_mgmt_enable_link(struct octeon_mgmt * p)821eeae05aaSDavid Daney static void octeon_mgmt_enable_link(struct octeon_mgmt *p)
822eeae05aaSDavid Daney {
823eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg;
824eeae05aaSDavid Daney
825eeae05aaSDavid Daney /* Restore the GMX enable state only if link is set */
826eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
827eeae05aaSDavid Daney prtx_cfg.s.tx_en = 1;
828eeae05aaSDavid Daney prtx_cfg.s.rx_en = 1;
829eeae05aaSDavid Daney prtx_cfg.s.en = 1;
830eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
831eeae05aaSDavid Daney }
832eeae05aaSDavid Daney
octeon_mgmt_update_link(struct octeon_mgmt * p)833eeae05aaSDavid Daney static void octeon_mgmt_update_link(struct octeon_mgmt *p)
834eeae05aaSDavid Daney {
8359e8e6e88SPhilippe Reynes struct net_device *ndev = p->netdev;
8369e8e6e88SPhilippe Reynes struct phy_device *phydev = ndev->phydev;
837eeae05aaSDavid Daney union cvmx_agl_gmx_prtx_cfg prtx_cfg;
838eeae05aaSDavid Daney
839eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
840eeae05aaSDavid Daney
8419e8e6e88SPhilippe Reynes if (!phydev->link)
842eeae05aaSDavid Daney prtx_cfg.s.duplex = 1;
843eeae05aaSDavid Daney else
8449e8e6e88SPhilippe Reynes prtx_cfg.s.duplex = phydev->duplex;
845eeae05aaSDavid Daney
8469e8e6e88SPhilippe Reynes switch (phydev->speed) {
847eeae05aaSDavid Daney case 10:
848eeae05aaSDavid Daney prtx_cfg.s.speed = 0;
849eeae05aaSDavid Daney prtx_cfg.s.slottime = 0;
850eeae05aaSDavid Daney
851eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
852eeae05aaSDavid Daney prtx_cfg.s.burst = 1;
853eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 1;
854eeae05aaSDavid Daney }
855eeae05aaSDavid Daney break;
856eeae05aaSDavid Daney case 100:
857eeae05aaSDavid Daney prtx_cfg.s.speed = 0;
858eeae05aaSDavid Daney prtx_cfg.s.slottime = 0;
859eeae05aaSDavid Daney
860eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
861eeae05aaSDavid Daney prtx_cfg.s.burst = 1;
862eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 0;
863eeae05aaSDavid Daney }
864eeae05aaSDavid Daney break;
865eeae05aaSDavid Daney case 1000:
866eeae05aaSDavid Daney /* 1000 MBits is only supported on 6XXX chips */
867eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
868eeae05aaSDavid Daney prtx_cfg.s.speed = 1;
869eeae05aaSDavid Daney prtx_cfg.s.speed_msb = 0;
870eeae05aaSDavid Daney /* Only matters for half-duplex */
871eeae05aaSDavid Daney prtx_cfg.s.slottime = 1;
8729e8e6e88SPhilippe Reynes prtx_cfg.s.burst = phydev->duplex;
873eeae05aaSDavid Daney }
874eeae05aaSDavid Daney break;
875eeae05aaSDavid Daney case 0: /* No link */
876eeae05aaSDavid Daney default:
877eeae05aaSDavid Daney break;
878eeae05aaSDavid Daney }
879eeae05aaSDavid Daney
880eeae05aaSDavid Daney /* Write the new GMX setting with the port still disabled. */
881eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64);
882eeae05aaSDavid Daney
883eeae05aaSDavid Daney /* Read GMX CFG again to make sure the config is completed. */
884eeae05aaSDavid Daney prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG);
885eeae05aaSDavid Daney
886eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) {
887eeae05aaSDavid Daney union cvmx_agl_gmx_txx_clk agl_clk;
888eeae05aaSDavid Daney union cvmx_agl_prtx_ctl prtx_ctl;
889eeae05aaSDavid Daney
890eeae05aaSDavid Daney prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
891eeae05aaSDavid Daney agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK);
892eeae05aaSDavid Daney /* MII (both speeds) and RGMII 1000 speed. */
893eeae05aaSDavid Daney agl_clk.s.clk_cnt = 1;
894eeae05aaSDavid Daney if (prtx_ctl.s.mode == 0) { /* RGMII mode */
8959e8e6e88SPhilippe Reynes if (phydev->speed == 10)
896eeae05aaSDavid Daney agl_clk.s.clk_cnt = 50;
8979e8e6e88SPhilippe Reynes else if (phydev->speed == 100)
898eeae05aaSDavid Daney agl_clk.s.clk_cnt = 5;
899eeae05aaSDavid Daney }
900eeae05aaSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64);
901eeae05aaSDavid Daney }
902d6aa60a1SDavid Daney }
903d6aa60a1SDavid Daney
octeon_mgmt_adjust_link(struct net_device * netdev)904d6aa60a1SDavid Daney static void octeon_mgmt_adjust_link(struct net_device *netdev)
905d6aa60a1SDavid Daney {
906d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
9079e8e6e88SPhilippe Reynes struct phy_device *phydev = netdev->phydev;
908d6aa60a1SDavid Daney unsigned long flags;
909d6aa60a1SDavid Daney int link_changed = 0;
910d6aa60a1SDavid Daney
9119e8e6e88SPhilippe Reynes if (!phydev)
912eeae05aaSDavid Daney return;
913eeae05aaSDavid Daney
914d6aa60a1SDavid Daney spin_lock_irqsave(&p->lock, flags);
915eeae05aaSDavid Daney
916eeae05aaSDavid Daney
9179e8e6e88SPhilippe Reynes if (!phydev->link && p->last_link)
918d6aa60a1SDavid Daney link_changed = -1;
919eeae05aaSDavid Daney
9209e8e6e88SPhilippe Reynes if (phydev->link &&
9219e8e6e88SPhilippe Reynes (p->last_duplex != phydev->duplex ||
9229e8e6e88SPhilippe Reynes p->last_link != phydev->link ||
9239e8e6e88SPhilippe Reynes p->last_speed != phydev->speed)) {
924eeae05aaSDavid Daney octeon_mgmt_disable_link(p);
925eeae05aaSDavid Daney link_changed = 1;
926eeae05aaSDavid Daney octeon_mgmt_update_link(p);
927eeae05aaSDavid Daney octeon_mgmt_enable_link(p);
928d6aa60a1SDavid Daney }
929eeae05aaSDavid Daney
9309e8e6e88SPhilippe Reynes p->last_link = phydev->link;
9319e8e6e88SPhilippe Reynes p->last_speed = phydev->speed;
9329e8e6e88SPhilippe Reynes p->last_duplex = phydev->duplex;
933eeae05aaSDavid Daney
934d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->lock, flags);
935d6aa60a1SDavid Daney
936d6aa60a1SDavid Daney if (link_changed != 0) {
9371769af43SSteven J. Hill if (link_changed > 0)
9381769af43SSteven J. Hill netdev_info(netdev, "Link is up - %d/%s\n",
9391769af43SSteven J. Hill phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half");
9401769af43SSteven J. Hill else
9411769af43SSteven J. Hill netdev_info(netdev, "Link is down\n");
942d6aa60a1SDavid Daney }
943d6aa60a1SDavid Daney }
944d6aa60a1SDavid Daney
octeon_mgmt_init_phy(struct net_device * netdev)945d6aa60a1SDavid Daney static int octeon_mgmt_init_phy(struct net_device *netdev)
946d6aa60a1SDavid Daney {
947d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
9489e8e6e88SPhilippe Reynes struct phy_device *phydev = NULL;
949d6aa60a1SDavid Daney
950368bec0dSDavid Daney if (octeon_is_simulation() || p->phy_np == NULL) {
951d6aa60a1SDavid Daney /* No PHYs in the simulator. */
952d6aa60a1SDavid Daney netif_carrier_on(netdev);
953d6aa60a1SDavid Daney return 0;
954d6aa60a1SDavid Daney }
955d6aa60a1SDavid Daney
9569e8e6e88SPhilippe Reynes phydev = of_phy_connect(netdev, p->phy_np,
957368bec0dSDavid Daney octeon_mgmt_adjust_link, 0,
958d6aa60a1SDavid Daney PHY_INTERFACE_MODE_MII);
959d6aa60a1SDavid Daney
9609e8e6e88SPhilippe Reynes if (!phydev)
961791e5f61SAndrew Lunn return -EPROBE_DEFER;
962d6aa60a1SDavid Daney
963d6aa60a1SDavid Daney return 0;
964d6aa60a1SDavid Daney }
965d6aa60a1SDavid Daney
octeon_mgmt_open(struct net_device * netdev)966d6aa60a1SDavid Daney static int octeon_mgmt_open(struct net_device *netdev)
967d6aa60a1SDavid Daney {
968d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
969d6aa60a1SDavid Daney union cvmx_mixx_ctl mix_ctl;
970d6aa60a1SDavid Daney union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
971d6aa60a1SDavid Daney union cvmx_mixx_oring1 oring1;
972d6aa60a1SDavid Daney union cvmx_mixx_iring1 iring1;
973d6aa60a1SDavid Daney union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
974d6aa60a1SDavid Daney union cvmx_mixx_irhwm mix_irhwm;
975d6aa60a1SDavid Daney union cvmx_mixx_orhwm mix_orhwm;
976d6aa60a1SDavid Daney union cvmx_mixx_intena mix_intena;
977d6aa60a1SDavid Daney struct sockaddr sa;
978d6aa60a1SDavid Daney
979d6aa60a1SDavid Daney /* Allocate ring buffers. */
980d6aa60a1SDavid Daney p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
981d6aa60a1SDavid Daney GFP_KERNEL);
982d6aa60a1SDavid Daney if (!p->tx_ring)
983d6aa60a1SDavid Daney return -ENOMEM;
984d6aa60a1SDavid Daney p->tx_ring_handle =
985d6aa60a1SDavid Daney dma_map_single(p->dev, p->tx_ring,
986d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
987d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
988d6aa60a1SDavid Daney p->tx_next = 0;
989d6aa60a1SDavid Daney p->tx_next_clean = 0;
990d6aa60a1SDavid Daney p->tx_current_fill = 0;
991d6aa60a1SDavid Daney
992d6aa60a1SDavid Daney
993d6aa60a1SDavid Daney p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
994d6aa60a1SDavid Daney GFP_KERNEL);
995d6aa60a1SDavid Daney if (!p->rx_ring)
996d6aa60a1SDavid Daney goto err_nomem;
997d6aa60a1SDavid Daney p->rx_ring_handle =
998d6aa60a1SDavid Daney dma_map_single(p->dev, p->rx_ring,
999d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1000d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
1001d6aa60a1SDavid Daney
1002d6aa60a1SDavid Daney p->rx_next = 0;
1003d6aa60a1SDavid Daney p->rx_next_fill = 0;
1004d6aa60a1SDavid Daney p->rx_current_fill = 0;
1005d6aa60a1SDavid Daney
1006d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p);
1007d6aa60a1SDavid Daney
1008368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1009d6aa60a1SDavid Daney
1010d6aa60a1SDavid Daney /* Bring it out of reset if needed. */
1011d6aa60a1SDavid Daney if (mix_ctl.s.reset) {
1012d6aa60a1SDavid Daney mix_ctl.s.reset = 0;
1013368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1014d6aa60a1SDavid Daney do {
1015368bec0dSDavid Daney mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
1016d6aa60a1SDavid Daney } while (mix_ctl.s.reset);
1017d6aa60a1SDavid Daney }
1018d6aa60a1SDavid Daney
1019eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
1020d6aa60a1SDavid Daney agl_gmx_inf_mode.u64 = 0;
1021d6aa60a1SDavid Daney agl_gmx_inf_mode.s.en = 1;
1022d6aa60a1SDavid Daney cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
1023eeae05aaSDavid Daney }
1024eeae05aaSDavid Daney if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
1025eeae05aaSDavid Daney || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
1026a0ce9b1eSDavid Daney /* Force compensation values, as they are not
1027eeae05aaSDavid Daney * determined properly by HW
1028eeae05aaSDavid Daney */
1029eeae05aaSDavid Daney union cvmx_agl_gmx_drv_ctl drv_ctl;
1030eeae05aaSDavid Daney
1031eeae05aaSDavid Daney drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
1032eeae05aaSDavid Daney if (p->port) {
1033eeae05aaSDavid Daney drv_ctl.s.byp_en1 = 1;
1034eeae05aaSDavid Daney drv_ctl.s.nctl1 = 6;
1035eeae05aaSDavid Daney drv_ctl.s.pctl1 = 6;
1036eeae05aaSDavid Daney } else {
1037eeae05aaSDavid Daney drv_ctl.s.byp_en = 1;
1038eeae05aaSDavid Daney drv_ctl.s.nctl = 6;
1039eeae05aaSDavid Daney drv_ctl.s.pctl = 6;
1040eeae05aaSDavid Daney }
1041eeae05aaSDavid Daney cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
1042eeae05aaSDavid Daney }
1043d6aa60a1SDavid Daney
1044d6aa60a1SDavid Daney oring1.u64 = 0;
1045d6aa60a1SDavid Daney oring1.s.obase = p->tx_ring_handle >> 3;
1046d6aa60a1SDavid Daney oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
1047368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);
1048d6aa60a1SDavid Daney
1049d6aa60a1SDavid Daney iring1.u64 = 0;
1050d6aa60a1SDavid Daney iring1.s.ibase = p->rx_ring_handle >> 3;
1051d6aa60a1SDavid Daney iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
1052368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);
1053d6aa60a1SDavid Daney
1054d6aa60a1SDavid Daney memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
1055d6aa60a1SDavid Daney octeon_mgmt_set_mac_address(netdev, &sa);
1056d6aa60a1SDavid Daney
1057d6aa60a1SDavid Daney octeon_mgmt_change_mtu(netdev, netdev->mtu);
1058d6aa60a1SDavid Daney
1059a0ce9b1eSDavid Daney /* Enable the port HW. Packets are not allowed until
1060d6aa60a1SDavid Daney * cvmx_mgmt_port_enable() is called.
1061d6aa60a1SDavid Daney */
1062d6aa60a1SDavid Daney mix_ctl.u64 = 0;
1063d6aa60a1SDavid Daney mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */
1064d6aa60a1SDavid Daney mix_ctl.s.en = 1; /* Enable the port */
1065d6aa60a1SDavid Daney mix_ctl.s.nbtarb = 0; /* Arbitration mode */
1066d6aa60a1SDavid Daney /* MII CB-request FIFO programmable high watermark */
1067d6aa60a1SDavid Daney mix_ctl.s.mrq_hwm = 1;
1068eeae05aaSDavid Daney #ifdef __LITTLE_ENDIAN
1069eeae05aaSDavid Daney mix_ctl.s.lendian = 1;
1070eeae05aaSDavid Daney #endif
1071368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
1072d6aa60a1SDavid Daney
1073eeae05aaSDavid Daney /* Read the PHY to find the mode of the interface. */
1074eeae05aaSDavid Daney if (octeon_mgmt_init_phy(netdev)) {
1075eeae05aaSDavid Daney dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
1076eeae05aaSDavid Daney goto err_noirq;
1077d6aa60a1SDavid Daney }
1078eeae05aaSDavid Daney
1079eeae05aaSDavid Daney /* Set the mode of the interface, RGMII/MII. */
10809e8e6e88SPhilippe Reynes if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && netdev->phydev) {
1081eeae05aaSDavid Daney union cvmx_agl_prtx_ctl agl_prtx_ctl;
10823c1bcc86SAndrew Lunn int rgmii_mode =
10833c1bcc86SAndrew Lunn (linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT,
10843c1bcc86SAndrew Lunn netdev->phydev->supported) |
10853c1bcc86SAndrew Lunn linkmode_test_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT,
10863c1bcc86SAndrew Lunn netdev->phydev->supported)) != 0;
1087eeae05aaSDavid Daney
1088eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1089eeae05aaSDavid Daney agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
1090eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1091eeae05aaSDavid Daney
1092eeae05aaSDavid Daney /* MII clocks counts are based on the 125Mhz
1093eeae05aaSDavid Daney * reference, which has an 8nS period. So our delays
1094eeae05aaSDavid Daney * need to be multiplied by this factor.
1095eeae05aaSDavid Daney */
1096eeae05aaSDavid Daney #define NS_PER_PHY_CLK 8
1097eeae05aaSDavid Daney
1098eeae05aaSDavid Daney /* Take the DLL and clock tree out of reset */
1099eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1100eeae05aaSDavid Daney agl_prtx_ctl.s.clkrst = 0;
1101eeae05aaSDavid Daney if (rgmii_mode) {
1102eeae05aaSDavid Daney agl_prtx_ctl.s.dllrst = 0;
1103eeae05aaSDavid Daney agl_prtx_ctl.s.clktx_byp = 0;
1104eeae05aaSDavid Daney }
1105eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1106eeae05aaSDavid Daney cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */
1107eeae05aaSDavid Daney
1108eeae05aaSDavid Daney /* Wait for the DLL to lock. External 125 MHz
1109eeae05aaSDavid Daney * reference clock must be stable at this point.
1110eeae05aaSDavid Daney */
1111eeae05aaSDavid Daney ndelay(256 * NS_PER_PHY_CLK);
1112eeae05aaSDavid Daney
1113eeae05aaSDavid Daney /* Enable the interface */
1114eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1115eeae05aaSDavid Daney agl_prtx_ctl.s.enable = 1;
1116eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1117eeae05aaSDavid Daney
1118eeae05aaSDavid Daney /* Read the value back to force the previous write */
1119eeae05aaSDavid Daney agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
1120eeae05aaSDavid Daney
1121eeae05aaSDavid Daney /* Enable the compensation controller */
1122eeae05aaSDavid Daney agl_prtx_ctl.s.comp = 1;
1123eeae05aaSDavid Daney agl_prtx_ctl.s.drv_byp = 0;
1124eeae05aaSDavid Daney cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);
1125eeae05aaSDavid Daney /* Force write out before wait. */
1126eeae05aaSDavid Daney cvmx_read_csr(p->agl_prt_ctl);
1127eeae05aaSDavid Daney
1128eeae05aaSDavid Daney /* For compensation state to lock. */
1129eeae05aaSDavid Daney ndelay(1040 * NS_PER_PHY_CLK);
1130eeae05aaSDavid Daney
1131906996d6SDavid Daney /* Default Interframe Gaps are too small. Recommended
1132906996d6SDavid Daney * workaround is.
1133906996d6SDavid Daney *
1134906996d6SDavid Daney * AGL_GMX_TX_IFG[IFG1]=14
1135906996d6SDavid Daney * AGL_GMX_TX_IFG[IFG2]=10
1136eeae05aaSDavid Daney */
1137906996d6SDavid Daney cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
1138d6aa60a1SDavid Daney }
1139d6aa60a1SDavid Daney
1140d6aa60a1SDavid Daney octeon_mgmt_rx_fill_ring(netdev);
1141d6aa60a1SDavid Daney
1142d6aa60a1SDavid Daney /* Clear statistics. */
1143d6aa60a1SDavid Daney /* Clear on read. */
1144368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
1145368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
1146368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);
1147d6aa60a1SDavid Daney
1148368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
1149368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
1150368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);
1151d6aa60a1SDavid Daney
1152d6aa60a1SDavid Daney /* Clear any pending interrupts */
1153368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));
1154d6aa60a1SDavid Daney
1155d6aa60a1SDavid Daney if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
1156d6aa60a1SDavid Daney netdev)) {
1157d6aa60a1SDavid Daney dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
1158d6aa60a1SDavid Daney goto err_noirq;
1159d6aa60a1SDavid Daney }
1160d6aa60a1SDavid Daney
1161d6aa60a1SDavid Daney /* Interrupt every single RX packet */
1162d6aa60a1SDavid Daney mix_irhwm.u64 = 0;
1163d6aa60a1SDavid Daney mix_irhwm.s.irhwm = 0;
1164368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);
1165d6aa60a1SDavid Daney
1166b635e069SDavid Daney /* Interrupt when we have 1 or more packets to clean. */
1167d6aa60a1SDavid Daney mix_orhwm.u64 = 0;
1168eeae05aaSDavid Daney mix_orhwm.s.orhwm = 0;
1169368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);
1170d6aa60a1SDavid Daney
1171d6aa60a1SDavid Daney /* Enable receive and transmit interrupts */
1172d6aa60a1SDavid Daney mix_intena.u64 = 0;
1173d6aa60a1SDavid Daney mix_intena.s.ithena = 1;
1174d6aa60a1SDavid Daney mix_intena.s.othena = 1;
1175368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);
1176d6aa60a1SDavid Daney
1177d6aa60a1SDavid Daney /* Enable packet I/O. */
1178d6aa60a1SDavid Daney
1179d6aa60a1SDavid Daney rxx_frm_ctl.u64 = 0;
11803d305850SChad Reese rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
1181d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_align = 1;
1182a0ce9b1eSDavid Daney /* When set, disables the length check for non-min sized pkts
1183d6aa60a1SDavid Daney * with padding in the client data.
1184d6aa60a1SDavid Daney */
1185d6aa60a1SDavid Daney rxx_frm_ctl.s.pad_len = 1;
1186d6aa60a1SDavid Daney /* When set, disables the length check for VLAN pkts */
1187d6aa60a1SDavid Daney rxx_frm_ctl.s.vlan_len = 1;
1188d6aa60a1SDavid Daney /* When set, PREAMBLE checking is less strict */
1189d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_free = 1;
1190d6aa60a1SDavid Daney /* Control Pause Frames can match station SMAC */
1191d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_smac = 0;
1192d6aa60a1SDavid Daney /* Control Pause Frames can match globally assign Multicast address */
1193d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_mcst = 1;
1194d6aa60a1SDavid Daney /* Forward pause information to TX block */
1195d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_bck = 1;
1196d6aa60a1SDavid Daney /* Drop Control Pause Frames */
1197d6aa60a1SDavid Daney rxx_frm_ctl.s.ctl_drp = 1;
1198d6aa60a1SDavid Daney /* Strip off the preamble */
1199d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_strp = 1;
1200a0ce9b1eSDavid Daney /* This port is configured to send PREAMBLE+SFD to begin every
1201d6aa60a1SDavid Daney * frame. GMX checks that the PREAMBLE is sent correctly.
1202d6aa60a1SDavid Daney */
1203d6aa60a1SDavid Daney rxx_frm_ctl.s.pre_chk = 1;
1204368bec0dSDavid Daney cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);
1205d6aa60a1SDavid Daney
1206eeae05aaSDavid Daney /* Configure the port duplex, speed and enables */
1207eeae05aaSDavid Daney octeon_mgmt_disable_link(p);
12089e8e6e88SPhilippe Reynes if (netdev->phydev)
1209eeae05aaSDavid Daney octeon_mgmt_update_link(p);
1210eeae05aaSDavid Daney octeon_mgmt_enable_link(p);
1211d6aa60a1SDavid Daney
1212d6aa60a1SDavid Daney p->last_link = 0;
1213eeae05aaSDavid Daney p->last_speed = 0;
1214eeae05aaSDavid Daney /* PHY is not present in simulator. The carrier is enabled
1215eeae05aaSDavid Daney * while initializing the phy for simulator, leave it enabled.
1216eeae05aaSDavid Daney */
12179e8e6e88SPhilippe Reynes if (netdev->phydev) {
1218d6aa60a1SDavid Daney netif_carrier_off(netdev);
12194663ff60SIvan Khoronzhuk phy_start(netdev->phydev);
1220d6aa60a1SDavid Daney }
1221d6aa60a1SDavid Daney
1222d6aa60a1SDavid Daney netif_wake_queue(netdev);
1223d6aa60a1SDavid Daney napi_enable(&p->napi);
1224d6aa60a1SDavid Daney
1225d6aa60a1SDavid Daney return 0;
1226d6aa60a1SDavid Daney err_noirq:
1227d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p);
1228d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->rx_ring_handle,
1229d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1230d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
1231d6aa60a1SDavid Daney kfree(p->rx_ring);
1232d6aa60a1SDavid Daney err_nomem:
1233d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->tx_ring_handle,
1234d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1235d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
1236d6aa60a1SDavid Daney kfree(p->tx_ring);
1237d6aa60a1SDavid Daney return -ENOMEM;
1238d6aa60a1SDavid Daney }
1239d6aa60a1SDavid Daney
octeon_mgmt_stop(struct net_device * netdev)1240d6aa60a1SDavid Daney static int octeon_mgmt_stop(struct net_device *netdev)
1241d6aa60a1SDavid Daney {
1242d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
1243d6aa60a1SDavid Daney
1244d6aa60a1SDavid Daney napi_disable(&p->napi);
1245d6aa60a1SDavid Daney netif_stop_queue(netdev);
1246d6aa60a1SDavid Daney
12474663ff60SIvan Khoronzhuk if (netdev->phydev) {
12484663ff60SIvan Khoronzhuk phy_stop(netdev->phydev);
12499e8e6e88SPhilippe Reynes phy_disconnect(netdev->phydev);
12504663ff60SIvan Khoronzhuk }
1251d6aa60a1SDavid Daney
1252d6aa60a1SDavid Daney netif_carrier_off(netdev);
1253d6aa60a1SDavid Daney
1254d6aa60a1SDavid Daney octeon_mgmt_reset_hw(p);
1255d6aa60a1SDavid Daney
1256d6aa60a1SDavid Daney free_irq(p->irq, netdev);
1257d6aa60a1SDavid Daney
1258d6aa60a1SDavid Daney /* dma_unmap is a nop on Octeon, so just free everything. */
1259d6aa60a1SDavid Daney skb_queue_purge(&p->tx_list);
1260d6aa60a1SDavid Daney skb_queue_purge(&p->rx_list);
1261d6aa60a1SDavid Daney
1262d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->rx_ring_handle,
1263d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
1264d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
1265d6aa60a1SDavid Daney kfree(p->rx_ring);
1266d6aa60a1SDavid Daney
1267d6aa60a1SDavid Daney dma_unmap_single(p->dev, p->tx_ring_handle,
1268d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1269d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
1270d6aa60a1SDavid Daney kfree(p->tx_ring);
1271d6aa60a1SDavid Daney
1272d6aa60a1SDavid Daney return 0;
1273d6aa60a1SDavid Daney }
1274d6aa60a1SDavid Daney
1275ac1172deSYueHaibing static netdev_tx_t
octeon_mgmt_xmit(struct sk_buff * skb,struct net_device * netdev)1276ac1172deSYueHaibing octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
1277d6aa60a1SDavid Daney {
1278d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
1279d6aa60a1SDavid Daney union mgmt_port_ring_entry re;
1280d6aa60a1SDavid Daney unsigned long flags;
1281ac1172deSYueHaibing netdev_tx_t rv = NETDEV_TX_BUSY;
1282d6aa60a1SDavid Daney
1283d6aa60a1SDavid Daney re.d64 = 0;
12843d305850SChad Reese re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
1285d6aa60a1SDavid Daney re.s.len = skb->len;
1286d6aa60a1SDavid Daney re.s.addr = dma_map_single(p->dev, skb->data,
1287d6aa60a1SDavid Daney skb->len,
1288d6aa60a1SDavid Daney DMA_TO_DEVICE);
1289d6aa60a1SDavid Daney
1290d6aa60a1SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags);
1291d6aa60a1SDavid Daney
12924e4a4f14SDavid Daney if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
12934e4a4f14SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags);
12944e4a4f14SDavid Daney netif_stop_queue(netdev);
12954e4a4f14SDavid Daney spin_lock_irqsave(&p->tx_list.lock, flags);
12964e4a4f14SDavid Daney }
12974e4a4f14SDavid Daney
1298d6aa60a1SDavid Daney if (unlikely(p->tx_current_fill >=
1299d6aa60a1SDavid Daney ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
1300d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags);
1301d6aa60a1SDavid Daney dma_unmap_single(p->dev, re.s.addr, re.s.len,
1302d6aa60a1SDavid Daney DMA_TO_DEVICE);
13034e4a4f14SDavid Daney goto out;
1304d6aa60a1SDavid Daney }
1305d6aa60a1SDavid Daney
1306d6aa60a1SDavid Daney __skb_queue_tail(&p->tx_list, skb);
1307d6aa60a1SDavid Daney
1308d6aa60a1SDavid Daney /* Put it in the ring. */
1309d6aa60a1SDavid Daney p->tx_ring[p->tx_next] = re.d64;
1310d6aa60a1SDavid Daney p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
1311d6aa60a1SDavid Daney p->tx_current_fill++;
1312d6aa60a1SDavid Daney
1313d6aa60a1SDavid Daney spin_unlock_irqrestore(&p->tx_list.lock, flags);
1314d6aa60a1SDavid Daney
1315d6aa60a1SDavid Daney dma_sync_single_for_device(p->dev, p->tx_ring_handle,
1316d6aa60a1SDavid Daney ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
1317d6aa60a1SDavid Daney DMA_BIDIRECTIONAL);
1318d6aa60a1SDavid Daney
1319d6aa60a1SDavid Daney netdev->stats.tx_packets++;
1320d6aa60a1SDavid Daney netdev->stats.tx_bytes += skb->len;
1321d6aa60a1SDavid Daney
1322d6aa60a1SDavid Daney /* Ring the bell. */
1323368bec0dSDavid Daney cvmx_write_csr(p->mix + MIX_ORING2, 1);
1324d6aa60a1SDavid Daney
1325860e9538SFlorian Westphal netif_trans_update(netdev);
13264e4a4f14SDavid Daney rv = NETDEV_TX_OK;
13274e4a4f14SDavid Daney out:
1328d6aa60a1SDavid Daney octeon_mgmt_update_tx_stats(netdev);
13294e4a4f14SDavid Daney return rv;
1330d6aa60a1SDavid Daney }
1331d6aa60a1SDavid Daney
1332d6aa60a1SDavid Daney #ifdef CONFIG_NET_POLL_CONTROLLER
octeon_mgmt_poll_controller(struct net_device * netdev)1333d6aa60a1SDavid Daney static void octeon_mgmt_poll_controller(struct net_device *netdev)
1334d6aa60a1SDavid Daney {
1335d6aa60a1SDavid Daney struct octeon_mgmt *p = netdev_priv(netdev);
1336d6aa60a1SDavid Daney
1337d6aa60a1SDavid Daney octeon_mgmt_receive_packets(p, 16);
1338d6aa60a1SDavid Daney octeon_mgmt_update_rx_stats(netdev);
1339d6aa60a1SDavid Daney }
1340d6aa60a1SDavid Daney #endif
1341d6aa60a1SDavid Daney
octeon_mgmt_get_drvinfo(struct net_device * netdev,struct ethtool_drvinfo * info)1342d6aa60a1SDavid Daney static void octeon_mgmt_get_drvinfo(struct net_device *netdev,
1343d6aa60a1SDavid Daney struct ethtool_drvinfo *info)
1344d6aa60a1SDavid Daney {
1345f029c781SWolfram Sang strscpy(info->driver, DRV_NAME, sizeof(info->driver));
1346d6aa60a1SDavid Daney }
1347d6aa60a1SDavid Daney
octeon_mgmt_nway_reset(struct net_device * dev)1348f21105dfSDavid Daney static int octeon_mgmt_nway_reset(struct net_device *dev)
1349f21105dfSDavid Daney {
1350f21105dfSDavid Daney if (!capable(CAP_NET_ADMIN))
1351f21105dfSDavid Daney return -EPERM;
1352f21105dfSDavid Daney
13539e8e6e88SPhilippe Reynes if (dev->phydev)
13549e8e6e88SPhilippe Reynes return phy_start_aneg(dev->phydev);
1355f21105dfSDavid Daney
1356f21105dfSDavid Daney return -EOPNOTSUPP;
1357d6aa60a1SDavid Daney }
1358d6aa60a1SDavid Daney
1359d6aa60a1SDavid Daney static const struct ethtool_ops octeon_mgmt_ethtool_ops = {
1360d6aa60a1SDavid Daney .get_drvinfo = octeon_mgmt_get_drvinfo,
1361f21105dfSDavid Daney .nway_reset = octeon_mgmt_nway_reset,
1362f21105dfSDavid Daney .get_link = ethtool_op_get_link,
1363f4400dedSPhilippe Reynes .get_link_ksettings = phy_ethtool_get_link_ksettings,
1364f4400dedSPhilippe Reynes .set_link_ksettings = phy_ethtool_set_link_ksettings,
1365d6aa60a1SDavid Daney };
1366d6aa60a1SDavid Daney
1367d6aa60a1SDavid Daney static const struct net_device_ops octeon_mgmt_ops = {
1368d6aa60a1SDavid Daney .ndo_open = octeon_mgmt_open,
1369d6aa60a1SDavid Daney .ndo_stop = octeon_mgmt_stop,
1370d6aa60a1SDavid Daney .ndo_start_xmit = octeon_mgmt_xmit,
1371d6aa60a1SDavid Daney .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering,
1372d6aa60a1SDavid Daney .ndo_set_mac_address = octeon_mgmt_set_mac_address,
1373a7605370SArnd Bergmann .ndo_eth_ioctl = octeon_mgmt_ioctl,
1374d6aa60a1SDavid Daney .ndo_change_mtu = octeon_mgmt_change_mtu,
1375d6aa60a1SDavid Daney #ifdef CONFIG_NET_POLL_CONTROLLER
1376d6aa60a1SDavid Daney .ndo_poll_controller = octeon_mgmt_poll_controller,
1377d6aa60a1SDavid Daney #endif
1378d6aa60a1SDavid Daney };
1379d6aa60a1SDavid Daney
octeon_mgmt_probe(struct platform_device * pdev)13805bc7ec70SBill Pemberton static int octeon_mgmt_probe(struct platform_device *pdev)
1381d6aa60a1SDavid Daney {
1382d6aa60a1SDavid Daney struct net_device *netdev;
1383d6aa60a1SDavid Daney struct octeon_mgmt *p;
1384368bec0dSDavid Daney const __be32 *data;
1385368bec0dSDavid Daney struct resource *res_mix;
1386368bec0dSDavid Daney struct resource *res_agl;
1387eeae05aaSDavid Daney struct resource *res_agl_prt_ctl;
1388368bec0dSDavid Daney int len;
1389368bec0dSDavid Daney int result;
1390d6aa60a1SDavid Daney
1391d6aa60a1SDavid Daney netdev = alloc_etherdev(sizeof(struct octeon_mgmt));
1392d6aa60a1SDavid Daney if (netdev == NULL)
1393d6aa60a1SDavid Daney return -ENOMEM;
1394d6aa60a1SDavid Daney
1395052958e3SDavid Daney SET_NETDEV_DEV(netdev, &pdev->dev);
1396052958e3SDavid Daney
13978513fbd8SJingoo Han platform_set_drvdata(pdev, netdev);
1398d6aa60a1SDavid Daney p = netdev_priv(netdev);
1399899b8cd0SJakub Kicinski netif_napi_add_weight(netdev, &p->napi, octeon_mgmt_napi_poll,
1400d6aa60a1SDavid Daney OCTEON_MGMT_NAPI_WEIGHT);
1401d6aa60a1SDavid Daney
1402d6aa60a1SDavid Daney p->netdev = netdev;
1403d6aa60a1SDavid Daney p->dev = &pdev->dev;
14043d305850SChad Reese p->has_rx_tstamp = false;
1405d6aa60a1SDavid Daney
1406368bec0dSDavid Daney data = of_get_property(pdev->dev.of_node, "cell-index", &len);
1407368bec0dSDavid Daney if (data && len == sizeof(*data)) {
1408368bec0dSDavid Daney p->port = be32_to_cpup(data);
1409368bec0dSDavid Daney } else {
1410368bec0dSDavid Daney dev_err(&pdev->dev, "no 'cell-index' property\n");
1411368bec0dSDavid Daney result = -ENXIO;
1412368bec0dSDavid Daney goto err;
1413368bec0dSDavid Daney }
1414368bec0dSDavid Daney
1415d6aa60a1SDavid Daney snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port);
1416d6aa60a1SDavid Daney
1417368bec0dSDavid Daney result = platform_get_irq(pdev, 0);
1418368bec0dSDavid Daney if (result < 0)
1419d6aa60a1SDavid Daney goto err;
1420d6aa60a1SDavid Daney
1421368bec0dSDavid Daney p->irq = result;
1422368bec0dSDavid Daney
1423368bec0dSDavid Daney res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1424368bec0dSDavid Daney if (res_mix == NULL) {
1425368bec0dSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n");
1426368bec0dSDavid Daney result = -ENXIO;
1427368bec0dSDavid Daney goto err;
1428368bec0dSDavid Daney }
1429368bec0dSDavid Daney
1430368bec0dSDavid Daney res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1431368bec0dSDavid Daney if (res_agl == NULL) {
1432368bec0dSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n");
1433368bec0dSDavid Daney result = -ENXIO;
1434368bec0dSDavid Daney goto err;
1435368bec0dSDavid Daney }
1436368bec0dSDavid Daney
1437eeae05aaSDavid Daney res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1438eeae05aaSDavid Daney if (res_agl_prt_ctl == NULL) {
1439eeae05aaSDavid Daney dev_err(&pdev->dev, "no 'reg' resource\n");
1440eeae05aaSDavid Daney result = -ENXIO;
1441eeae05aaSDavid Daney goto err;
1442eeae05aaSDavid Daney }
1443eeae05aaSDavid Daney
1444368bec0dSDavid Daney p->mix_phys = res_mix->start;
1445368bec0dSDavid Daney p->mix_size = resource_size(res_mix);
1446368bec0dSDavid Daney p->agl_phys = res_agl->start;
1447368bec0dSDavid Daney p->agl_size = resource_size(res_agl);
1448eeae05aaSDavid Daney p->agl_prt_ctl_phys = res_agl_prt_ctl->start;
1449eeae05aaSDavid Daney p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl);
1450368bec0dSDavid Daney
1451368bec0dSDavid Daney
1452368bec0dSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size,
1453368bec0dSDavid Daney res_mix->name)) {
1454368bec0dSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1455368bec0dSDavid Daney res_mix->name);
1456368bec0dSDavid Daney result = -ENXIO;
1457368bec0dSDavid Daney goto err;
1458368bec0dSDavid Daney }
1459368bec0dSDavid Daney
1460368bec0dSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size,
1461368bec0dSDavid Daney res_agl->name)) {
1462368bec0dSDavid Daney result = -ENXIO;
1463368bec0dSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1464368bec0dSDavid Daney res_agl->name);
1465368bec0dSDavid Daney goto err;
1466368bec0dSDavid Daney }
1467368bec0dSDavid Daney
1468eeae05aaSDavid Daney if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys,
1469eeae05aaSDavid Daney p->agl_prt_ctl_size, res_agl_prt_ctl->name)) {
1470eeae05aaSDavid Daney result = -ENXIO;
1471eeae05aaSDavid Daney dev_err(&pdev->dev, "request_mem_region (%s) failed\n",
1472eeae05aaSDavid Daney res_agl_prt_ctl->name);
1473eeae05aaSDavid Daney goto err;
1474eeae05aaSDavid Daney }
1475368bec0dSDavid Daney
1476368bec0dSDavid Daney p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size);
1477368bec0dSDavid Daney p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size);
1478eeae05aaSDavid Daney p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys,
1479eeae05aaSDavid Daney p->agl_prt_ctl_size);
1480162809dfSArvind Yadav if (!p->mix || !p->agl || !p->agl_prt_ctl) {
1481162809dfSArvind Yadav dev_err(&pdev->dev, "failed to map I/O memory\n");
1482162809dfSArvind Yadav result = -ENOMEM;
1483162809dfSArvind Yadav goto err;
1484162809dfSArvind Yadav }
1485162809dfSArvind Yadav
1486d6aa60a1SDavid Daney spin_lock_init(&p->lock);
1487d6aa60a1SDavid Daney
1488d6aa60a1SDavid Daney skb_queue_head_init(&p->tx_list);
1489d6aa60a1SDavid Daney skb_queue_head_init(&p->rx_list);
1490dfe4e612SAllen Pais tasklet_setup(&p->tx_clean_tasklet,
1491dfe4e612SAllen Pais octeon_mgmt_clean_tx_tasklet);
1492d6aa60a1SDavid Daney
149301789349SJiri Pirko netdev->priv_flags |= IFF_UNICAST_FLT;
149401789349SJiri Pirko
1495d6aa60a1SDavid Daney netdev->netdev_ops = &octeon_mgmt_ops;
1496d6aa60a1SDavid Daney netdev->ethtool_ops = &octeon_mgmt_ethtool_ops;
1497d6aa60a1SDavid Daney
1498109cc165SJarod Wilson netdev->min_mtu = 64 - OCTEON_MGMT_RX_HEADROOM;
1499e4dd5608SAlexander Sverdlin netdev->max_mtu = 16383 - OCTEON_MGMT_RX_HEADROOM - VLAN_HLEN;
1500109cc165SJarod Wilson
15019ca01b25SJakub Kicinski result = of_get_ethdev_address(pdev->dev.of_node, netdev);
150283216e39SMichael Walle if (result)
1503f321238bSDavid Daney eth_hw_addr_random(netdev);
1504d6aa60a1SDavid Daney
1505368bec0dSDavid Daney p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0);
1506368bec0dSDavid Daney
150726741a69SRussell King result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
150826741a69SRussell King if (result)
150926741a69SRussell King goto err;
1510368bec0dSDavid Daney
1511eeae05aaSDavid Daney netif_carrier_off(netdev);
1512368bec0dSDavid Daney result = register_netdev(netdev);
1513368bec0dSDavid Daney if (result)
1514d6aa60a1SDavid Daney goto err;
1515d6aa60a1SDavid Daney
1516d6aa60a1SDavid Daney return 0;
1517368bec0dSDavid Daney
1518d6aa60a1SDavid Daney err:
151946997066SPeter Chen of_node_put(p->phy_np);
1520d6aa60a1SDavid Daney free_netdev(netdev);
1521368bec0dSDavid Daney return result;
1522d6aa60a1SDavid Daney }
1523d6aa60a1SDavid Daney
octeon_mgmt_remove(struct platform_device * pdev)1524de413f46SUwe Kleine-König static void octeon_mgmt_remove(struct platform_device *pdev)
1525d6aa60a1SDavid Daney {
15268513fbd8SJingoo Han struct net_device *netdev = platform_get_drvdata(pdev);
152746997066SPeter Chen struct octeon_mgmt *p = netdev_priv(netdev);
1528d6aa60a1SDavid Daney
1529d6aa60a1SDavid Daney unregister_netdev(netdev);
153046997066SPeter Chen of_node_put(p->phy_np);
1531d6aa60a1SDavid Daney free_netdev(netdev);
1532d6aa60a1SDavid Daney }
1533d6aa60a1SDavid Daney
1534437dab40SFabian Frederick static const struct of_device_id octeon_mgmt_match[] = {
1535368bec0dSDavid Daney {
1536368bec0dSDavid Daney .compatible = "cavium,octeon-5750-mix",
1537368bec0dSDavid Daney },
1538368bec0dSDavid Daney {},
1539368bec0dSDavid Daney };
1540368bec0dSDavid Daney MODULE_DEVICE_TABLE(of, octeon_mgmt_match);
1541368bec0dSDavid Daney
1542d6aa60a1SDavid Daney static struct platform_driver octeon_mgmt_driver = {
1543d6aa60a1SDavid Daney .driver = {
1544d6aa60a1SDavid Daney .name = "octeon_mgmt",
1545368bec0dSDavid Daney .of_match_table = octeon_mgmt_match,
1546d6aa60a1SDavid Daney },
1547d6aa60a1SDavid Daney .probe = octeon_mgmt_probe,
1548*e96321faSUwe Kleine-König .remove = octeon_mgmt_remove,
1549d6aa60a1SDavid Daney };
1550d6aa60a1SDavid Daney
1551afa4f675Sdingsenjie module_platform_driver(octeon_mgmt_driver);
1552d6aa60a1SDavid Daney
1553791e5f61SAndrew Lunn MODULE_SOFTDEP("pre: mdio-cavium");
1554d6aa60a1SDavid Daney MODULE_DESCRIPTION(DRV_DESCRIPTION);
1555d6aa60a1SDavid Daney MODULE_AUTHOR("David Daney");
1556d6aa60a1SDavid Daney MODULE_LICENSE("GPL");
1557