xref: /linux/drivers/net/phy/mscc/mscc_ptp.c (revision 37816488247ddddbc3de113c78c83572274b1e2e)
1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /*
3  * Driver for Microsemi VSC85xx PHYs - timestamping and PHC support
4  *
5  * Authors: Quentin Schulz & Antoine Tenart
6  * License: Dual MIT/GPL
7  * Copyright (c) 2020 Microsemi Corporation
8  */
9 
10 #include <linux/gpio/consumer.h>
11 #include <linux/ip.h>
12 #include <linux/net_tstamp.h>
13 #include <linux/mii.h>
14 #include <linux/phy.h>
15 #include <linux/ptp_classify.h>
16 #include <linux/ptp_clock_kernel.h>
17 #include <linux/udp.h>
18 #include <linux/unaligned.h>
19 
20 #include "../phylib.h"
21 #include "mscc.h"
22 #include "mscc_ptp.h"
23 
24 /* Two PHYs share the same 1588 processor and it's to be entirely configured
25  * through the base PHY of this processor.
26  */
27 /* phydev->bus->mdio_lock should be locked when using this function */
phy_ts_base_write(struct phy_device * phydev,u32 regnum,u16 val)28 static int phy_ts_base_write(struct phy_device *phydev, u32 regnum, u16 val)
29 {
30 	struct vsc8531_private *priv = phydev->priv;
31 
32 	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
33 	return __mdiobus_write(phydev->mdio.bus, priv->ts_base_addr, regnum,
34 			       val);
35 }
36 
37 /* phydev->bus->mdio_lock should be locked when using this function */
phy_ts_base_read(struct phy_device * phydev,u32 regnum)38 static int phy_ts_base_read(struct phy_device *phydev, u32 regnum)
39 {
40 	struct vsc8531_private *priv = phydev->priv;
41 
42 	WARN_ON_ONCE(!mutex_is_locked(&phydev->mdio.bus->mdio_lock));
43 	return __mdiobus_read(phydev->mdio.bus, priv->ts_base_addr, regnum);
44 }
45 
46 enum ts_blk_hw {
47 	INGRESS_ENGINE_0,
48 	EGRESS_ENGINE_0,
49 	INGRESS_ENGINE_1,
50 	EGRESS_ENGINE_1,
51 	INGRESS_ENGINE_2,
52 	EGRESS_ENGINE_2,
53 	PROCESSOR_0,
54 	PROCESSOR_1,
55 };
56 
57 enum ts_blk {
58 	INGRESS,
59 	EGRESS,
60 	PROCESSOR,
61 };
62 
vsc85xx_ts_read_csr(struct phy_device * phydev,enum ts_blk blk,u16 addr)63 static u32 vsc85xx_ts_read_csr(struct phy_device *phydev, enum ts_blk blk,
64 			       u16 addr)
65 {
66 	struct vsc8531_private *priv = phydev->priv;
67 	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
68 	u32 val, cnt = 0;
69 	enum ts_blk_hw blk_hw;
70 
71 	switch (blk) {
72 	case INGRESS:
73 		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
74 		break;
75 	case EGRESS:
76 		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
77 		break;
78 	case PROCESSOR:
79 	default:
80 		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
81 		break;
82 	}
83 
84 	phy_lock_mdio_bus(phydev);
85 
86 	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
87 
88 	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
89 			  BIU_ADDR_READ | BIU_BLK_ID(blk_hw) |
90 			  BIU_CSR_ADDR(addr));
91 
92 	do {
93 		val = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
94 	} while (!(val & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
95 
96 	val = phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_MSB);
97 	val <<= 16;
98 	val |= phy_ts_base_read(phydev, MSCC_PHY_TS_CSR_DATA_LSB);
99 
100 	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
101 
102 	phy_unlock_mdio_bus(phydev);
103 
104 	return val;
105 }
106 
vsc85xx_ts_write_csr(struct phy_device * phydev,enum ts_blk blk,u16 addr,u32 val)107 static void vsc85xx_ts_write_csr(struct phy_device *phydev, enum ts_blk blk,
108 				 u16 addr, u32 val)
109 {
110 	struct vsc8531_private *priv = phydev->priv;
111 	bool base_port = phydev->mdio.addr == priv->ts_base_addr;
112 	u32 reg, bypass, cnt = 0, lower = val & 0xffff, upper = val >> 16;
113 	bool cond = (addr == MSCC_PHY_PTP_LTC_CTRL ||
114 		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_MASK ||
115 		     addr == MSCC_PHY_1588_VSC85XX_INT_MASK ||
116 		     addr == MSCC_PHY_1588_INGR_VSC85XX_INT_STATUS ||
117 		     addr == MSCC_PHY_1588_VSC85XX_INT_STATUS) &&
118 		    blk == PROCESSOR;
119 	enum ts_blk_hw blk_hw;
120 
121 	switch (blk) {
122 	case INGRESS:
123 		blk_hw = base_port ? INGRESS_ENGINE_0 : INGRESS_ENGINE_1;
124 		break;
125 	case EGRESS:
126 		blk_hw = base_port ? EGRESS_ENGINE_0 : EGRESS_ENGINE_1;
127 		break;
128 	case PROCESSOR:
129 	default:
130 		blk_hw = base_port ? PROCESSOR_0 : PROCESSOR_1;
131 		break;
132 	}
133 
134 	phy_lock_mdio_bus(phydev);
135 
136 	bypass = phy_ts_base_read(phydev, MSCC_PHY_BYPASS_CONTROL);
137 
138 	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_1588);
139 
140 	if (!cond || upper)
141 		phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_MSB, upper);
142 
143 	phy_ts_base_write(phydev, MSCC_PHY_TS_CSR_DATA_LSB, lower);
144 
145 	phy_ts_base_write(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL, BIU_ADDR_EXE |
146 			  BIU_ADDR_WRITE | BIU_BLK_ID(blk_hw) |
147 			  BIU_CSR_ADDR(addr));
148 
149 	do {
150 		reg = phy_ts_base_read(phydev, MSCC_PHY_TS_BIU_ADDR_CNTL);
151 	} while (!(reg & BIU_ADDR_EXE) && cnt++ < BIU_ADDR_CNT_MAX);
152 
153 	phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS, MSCC_PHY_PAGE_STANDARD);
154 
155 	if (cond && upper)
156 		phy_ts_base_write(phydev, MSCC_PHY_BYPASS_CONTROL, bypass);
157 
158 	phy_unlock_mdio_bus(phydev);
159 }
160 
161 /* Pick bytes from PTP header */
162 #define PTP_HEADER_TRNSP_MSG		26
163 #define PTP_HEADER_DOMAIN_NUM		25
164 #define PTP_HEADER_BYTE_8_31(x)		(31 - (x))
165 #define MAC_ADDRESS_BYTE(x)		((x) + (35 - ETH_ALEN + 1))
166 
vsc85xx_ts_fsb_init(struct phy_device * phydev)167 static int vsc85xx_ts_fsb_init(struct phy_device *phydev)
168 {
169 	u8 sig_sel[16] = {};
170 	signed char i, pos = 0;
171 
172 	/* Seq ID is 2B long and starts at 30th byte */
173 	for (i = 1; i >= 0; i--)
174 		sig_sel[pos++] = PTP_HEADER_BYTE_8_31(30 + i);
175 
176 	/* DomainNum */
177 	sig_sel[pos++] = PTP_HEADER_DOMAIN_NUM;
178 
179 	/* MsgType */
180 	sig_sel[pos++] = PTP_HEADER_TRNSP_MSG;
181 
182 	/* MAC address is 6B long */
183 	for (i = ETH_ALEN - 1; i >= 0; i--)
184 		sig_sel[pos++] = MAC_ADDRESS_BYTE(i);
185 
186 	/* Fill the last bytes of the signature to reach a 16B signature */
187 	for (; pos < ARRAY_SIZE(sig_sel); pos++)
188 		sig_sel[pos] = PTP_HEADER_TRNSP_MSG;
189 
190 	for (i = 0; i <= 2; i++) {
191 		u32 val = 0;
192 
193 		for (pos = i * 5 + 4; pos >= i * 5; pos--)
194 			val = (val << 6) | sig_sel[pos];
195 
196 		vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(i),
197 				     val);
198 	}
199 
200 	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_REG(3),
201 			     sig_sel[15]);
202 
203 	return 0;
204 }
205 
206 static const u32 vsc85xx_egr_latency[] = {
207 	/* Copper Egress */
208 	1272, /* 1000Mbps */
209 	12516, /* 100Mbps */
210 	125444, /* 10Mbps */
211 	/* Fiber Egress */
212 	1277, /* 1000Mbps */
213 	12537, /* 100Mbps */
214 };
215 
216 static const u32 vsc85xx_egr_latency_macsec[] = {
217 	/* Copper Egress ON */
218 	3496, /* 1000Mbps */
219 	34760, /* 100Mbps */
220 	347844, /* 10Mbps */
221 	/* Fiber Egress ON */
222 	3502, /* 1000Mbps */
223 	34780, /* 100Mbps */
224 };
225 
226 static const u32 vsc85xx_ingr_latency[] = {
227 	/* Copper Ingress */
228 	208, /* 1000Mbps */
229 	304, /* 100Mbps */
230 	2023, /* 10Mbps */
231 	/* Fiber Ingress */
232 	98, /* 1000Mbps */
233 	197, /* 100Mbps */
234 };
235 
236 static const u32 vsc85xx_ingr_latency_macsec[] = {
237 	/* Copper Ingress */
238 	2408, /* 1000Mbps */
239 	22300, /* 100Mbps */
240 	222009, /* 10Mbps */
241 	/* Fiber Ingress */
242 	2299, /* 1000Mbps */
243 	22192, /* 100Mbps */
244 };
245 
vsc85xx_ts_set_latencies(struct phy_device * phydev)246 static void vsc85xx_ts_set_latencies(struct phy_device *phydev)
247 {
248 	u32 val, ingr_latency, egr_latency;
249 	u8 idx;
250 
251 	/* No need to set latencies of packets if the PHY is not connected */
252 	if (!phydev->link)
253 		return;
254 
255 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_STALL_LATENCY,
256 			     STALL_EGR_LATENCY(phydev->speed));
257 
258 	switch (phydev->speed) {
259 	case SPEED_100:
260 		idx = 1;
261 		break;
262 	case SPEED_1000:
263 		idx = 0;
264 		break;
265 	default:
266 		idx = 2;
267 		break;
268 	}
269 
270 	ingr_latency = IS_ENABLED(CONFIG_MACSEC) ?
271 		vsc85xx_ingr_latency_macsec[idx] : vsc85xx_ingr_latency[idx];
272 	egr_latency = IS_ENABLED(CONFIG_MACSEC) ?
273 		vsc85xx_egr_latency_macsec[idx] : vsc85xx_egr_latency[idx];
274 
275 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_LOCAL_LATENCY,
276 			     PTP_INGR_LOCAL_LATENCY(ingr_latency));
277 
278 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
279 				  MSCC_PHY_PTP_INGR_TSP_CTRL);
280 	val |= PHY_PTP_INGR_TSP_CTRL_LOAD_DELAYS;
281 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
282 			     val);
283 
284 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_LOCAL_LATENCY,
285 			     PTP_EGR_LOCAL_LATENCY(egr_latency));
286 
287 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
288 	val |= PHY_PTP_EGR_TSP_CTRL_LOAD_DELAYS;
289 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
290 }
291 
vsc85xx_ts_disable_flows(struct phy_device * phydev,enum ts_blk blk)292 static int vsc85xx_ts_disable_flows(struct phy_device *phydev, enum ts_blk blk)
293 {
294 	u8 i;
295 
296 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP, 0);
297 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
298 			     IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2));
299 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_NXT_COMP, 0);
300 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_NXT_PROT_UDP_CHKSUM,
301 			     IP2_NXT_PROT_UDP_CHKSUM_WIDTH(2));
302 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_MPLS_COMP_NXT_COMP, 0);
303 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, 0);
304 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH2_NTX_PROT, 0);
305 
306 	for (i = 0; i < COMP_MAX_FLOWS; i++) {
307 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(i),
308 				     IP1_FLOW_VALID_CH0 | IP1_FLOW_VALID_CH1);
309 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP2_FLOW_ENA(i),
310 				     IP2_FLOW_VALID_CH0 | IP2_FLOW_VALID_CH1);
311 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(i),
312 				     ETH1_FLOW_VALID_CH0 | ETH1_FLOW_VALID_CH1);
313 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH2_FLOW_ENA(i),
314 				     ETH2_FLOW_VALID_CH0 | ETH2_FLOW_VALID_CH1);
315 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_MPLS_FLOW_CTRL(i),
316 				     MPLS_FLOW_VALID_CH0 | MPLS_FLOW_VALID_CH1);
317 
318 		if (i >= PTP_COMP_MAX_FLOWS)
319 			continue;
320 
321 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i), 0);
322 		vsc85xx_ts_write_csr(phydev, blk,
323 				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), 0);
324 		vsc85xx_ts_write_csr(phydev, blk,
325 				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i), 0);
326 		vsc85xx_ts_write_csr(phydev, blk,
327 				     MSCC_ANA_PTP_FLOW_MASK_LOWER(i), 0);
328 		vsc85xx_ts_write_csr(phydev, blk,
329 				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i), 0);
330 		vsc85xx_ts_write_csr(phydev, blk,
331 				     MSCC_ANA_PTP_FLOW_MATCH_LOWER(i), 0);
332 		vsc85xx_ts_write_csr(phydev, blk,
333 				     MSCC_ANA_PTP_FLOW_PTP_ACTION(i), 0);
334 		vsc85xx_ts_write_csr(phydev, blk,
335 				     MSCC_ANA_PTP_FLOW_PTP_ACTION2(i), 0);
336 		vsc85xx_ts_write_csr(phydev, blk,
337 				     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(i), 0);
338 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_OAM_PTP_FLOW_ENA(i),
339 				     0);
340 	}
341 
342 	return 0;
343 }
344 
vsc85xx_ts_eth_cmp1_sig(struct phy_device * phydev)345 static int vsc85xx_ts_eth_cmp1_sig(struct phy_device *phydev)
346 {
347 	u32 val;
348 
349 	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT);
350 	val &= ~ANA_ETH1_NTX_PROT_SIG_OFF_MASK;
351 	val |= ANA_ETH1_NTX_PROT_SIG_OFF(0);
352 	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
353 
354 	val = vsc85xx_ts_read_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG);
355 	val &= ~ANA_FSB_ADDR_FROM_BLOCK_SEL_MASK;
356 	val |= ANA_FSB_ADDR_FROM_ETH1;
357 	vsc85xx_ts_write_csr(phydev, EGRESS, MSCC_PHY_ANA_FSB_CFG, val);
358 
359 	return 0;
360 }
361 
get_ptp_header_l4(struct sk_buff * skb,struct iphdr * iphdr,struct udphdr * udphdr)362 static struct vsc85xx_ptphdr *get_ptp_header_l4(struct sk_buff *skb,
363 						struct iphdr *iphdr,
364 						struct udphdr *udphdr)
365 {
366 	if (iphdr->version != 4 || iphdr->protocol != IPPROTO_UDP)
367 		return NULL;
368 
369 	return (struct vsc85xx_ptphdr *)(((unsigned char *)udphdr) + UDP_HLEN);
370 }
371 
get_ptp_header_tx(struct sk_buff * skb)372 static struct vsc85xx_ptphdr *get_ptp_header_tx(struct sk_buff *skb)
373 {
374 	struct ethhdr *ethhdr = eth_hdr(skb);
375 	struct udphdr *udphdr;
376 	struct iphdr *iphdr;
377 
378 	if (ethhdr->h_proto == htons(ETH_P_1588))
379 		return (struct vsc85xx_ptphdr *)(((unsigned char *)ethhdr) +
380 						 skb_mac_header_len(skb));
381 
382 	if (ethhdr->h_proto != htons(ETH_P_IP))
383 		return NULL;
384 
385 	iphdr = ip_hdr(skb);
386 	udphdr = udp_hdr(skb);
387 
388 	return get_ptp_header_l4(skb, iphdr, udphdr);
389 }
390 
get_ptp_header_rx(struct sk_buff * skb,enum hwtstamp_rx_filters rx_filter)391 static struct vsc85xx_ptphdr *get_ptp_header_rx(struct sk_buff *skb,
392 						enum hwtstamp_rx_filters rx_filter)
393 {
394 	struct udphdr *udphdr;
395 	struct iphdr *iphdr;
396 
397 	if (rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT)
398 		return (struct vsc85xx_ptphdr *)skb->data;
399 
400 	iphdr = (struct iphdr *)skb->data;
401 	udphdr = (struct udphdr *)(skb->data + iphdr->ihl * 4);
402 
403 	return get_ptp_header_l4(skb, iphdr, udphdr);
404 }
405 
get_sig(struct sk_buff * skb,u8 * sig)406 static int get_sig(struct sk_buff *skb, u8 *sig)
407 {
408 	struct vsc85xx_ptphdr *ptphdr = get_ptp_header_tx(skb);
409 	struct ethhdr *ethhdr = eth_hdr(skb);
410 	unsigned int i;
411 
412 	if (!ptphdr)
413 		return -EOPNOTSUPP;
414 
415 	sig[0] = (__force u16)ptphdr->seq_id >> 8;
416 	sig[1] = (__force u16)ptphdr->seq_id & GENMASK(7, 0);
417 	sig[2] = ptphdr->domain;
418 	sig[3] = ptphdr->tsmt & GENMASK(3, 0);
419 
420 	memcpy(&sig[4], ethhdr->h_dest, ETH_ALEN);
421 
422 	/* Fill the last bytes of the signature to reach a 16B signature */
423 	for (i = 10; i < 16; i++)
424 		sig[i] = ptphdr->tsmt & GENMASK(3, 0);
425 
426 	return 0;
427 }
428 
vsc85xx_dequeue_skb(struct vsc85xx_ptp * ptp)429 static void vsc85xx_dequeue_skb(struct vsc85xx_ptp *ptp)
430 {
431 	struct skb_shared_hwtstamps shhwtstamps;
432 	struct vsc85xx_ts_fifo fifo;
433 	struct sk_buff *skb;
434 	u8 skb_sig[16], *p;
435 	int i, len;
436 	u32 reg;
437 
438 	memset(&fifo, 0, sizeof(fifo));
439 	p = (u8 *)&fifo;
440 
441 	reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
442 				  MSCC_PHY_PTP_EGR_TS_FIFO(0));
443 	if (reg & PTP_EGR_TS_FIFO_EMPTY)
444 		return;
445 
446 	*p++ = reg & 0xff;
447 	*p++ = (reg >> 8) & 0xff;
448 
449 	/* Read the current FIFO item. Reading FIFO6 pops the next one. */
450 	for (i = 1; i < 7; i++) {
451 		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
452 					  MSCC_PHY_PTP_EGR_TS_FIFO(i));
453 		*p++ = reg & 0xff;
454 		*p++ = (reg >> 8) & 0xff;
455 		*p++ = (reg >> 16) & 0xff;
456 		*p++ = (reg >> 24) & 0xff;
457 	}
458 
459 	len = skb_queue_len(&ptp->tx_queue);
460 	if (len < 1)
461 		return;
462 
463 	while (len--) {
464 		skb = __skb_dequeue(&ptp->tx_queue);
465 		if (!skb)
466 			return;
467 
468 		/* Can't get the signature of the packet, won't ever
469 		 * be able to have one so let's dequeue the packet.
470 		 */
471 		if (get_sig(skb, skb_sig) < 0) {
472 			kfree_skb(skb);
473 			continue;
474 		}
475 
476 		/* Check if we found the signature we were looking for. */
477 		if (!memcmp(skb_sig, fifo.sig, sizeof(fifo.sig))) {
478 			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
479 			shhwtstamps.hwtstamp = ktime_set(fifo.secs, fifo.ns);
480 			skb_complete_tx_timestamp(skb, &shhwtstamps);
481 
482 			return;
483 		}
484 
485 		/* Valid signature but does not match the one of the
486 		 * packet in the FIFO right now, reschedule it for later
487 		 * packets.
488 		 */
489 		__skb_queue_tail(&ptp->tx_queue, skb);
490 	}
491 }
492 
vsc85xx_get_tx_ts(struct vsc85xx_ptp * ptp)493 static void vsc85xx_get_tx_ts(struct vsc85xx_ptp *ptp)
494 {
495 	u32 reg;
496 
497 	do {
498 		vsc85xx_dequeue_skb(ptp);
499 
500 		/* If other timestamps are available in the FIFO, process them. */
501 		reg = vsc85xx_ts_read_csr(ptp->phydev, PROCESSOR,
502 					  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
503 	} while (PTP_EGR_FIFO_LEVEL_LAST_READ(reg) > 1);
504 }
505 
vsc85xx_ptp_cmp_init(struct phy_device * phydev,enum ts_blk blk)506 static int vsc85xx_ptp_cmp_init(struct phy_device *phydev, enum ts_blk blk)
507 {
508 	struct vsc8531_private *vsc8531 = phydev->priv;
509 	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
510 	static const u8 msgs[] = {
511 		PTP_MSGTYPE_SYNC,
512 		PTP_MSGTYPE_DELAY_REQ
513 	};
514 	u32 val;
515 	u8 i;
516 
517 	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
518 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
519 				     base ? PTP_FLOW_VALID_CH0 :
520 				     PTP_FLOW_VALID_CH1);
521 
522 		val = vsc85xx_ts_read_csr(phydev, blk,
523 					  MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i));
524 		val &= ~PTP_FLOW_DOMAIN_RANGE_ENA;
525 		vsc85xx_ts_write_csr(phydev, blk,
526 				     MSCC_ANA_PTP_FLOW_DOMAIN_RANGE(i), val);
527 
528 		vsc85xx_ts_write_csr(phydev, blk,
529 				     MSCC_ANA_PTP_FLOW_MATCH_UPPER(i),
530 				     msgs[i] << 24);
531 
532 		vsc85xx_ts_write_csr(phydev, blk,
533 				     MSCC_ANA_PTP_FLOW_MASK_UPPER(i),
534 				     PTP_FLOW_MSG_TYPE_MASK);
535 	}
536 
537 	return 0;
538 }
539 
vsc85xx_eth_cmp1_init(struct phy_device * phydev,enum ts_blk blk)540 static int vsc85xx_eth_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
541 {
542 	struct vsc8531_private *vsc8531 = phydev->priv;
543 	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
544 	u32 val;
545 
546 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NXT_PROT_TAG, 0);
547 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT_VLAN_TPID,
548 			     ANA_ETH1_NTX_PROT_VLAN_TPID(ETH_P_8021AD));
549 
550 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0),
551 			     base ? ETH1_FLOW_VALID_CH0 : ETH1_FLOW_VALID_CH1);
552 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
553 			     ANA_ETH1_FLOW_MATCH_VLAN_TAG2);
554 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
555 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), 0);
556 	vsc85xx_ts_write_csr(phydev, blk,
557 			     MSCC_ANA_ETH1_FLOW_VLAN_RANGE_I_TAG(0), 0);
558 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_VLAN_TAG1(0), 0);
559 	vsc85xx_ts_write_csr(phydev, blk,
560 			     MSCC_ANA_ETH1_FLOW_VLAN_TAG2_I_TAG(0), 0);
561 
562 	val = vsc85xx_ts_read_csr(phydev, blk,
563 				  MSCC_ANA_ETH1_FLOW_MATCH_MODE(0));
564 	val &= ~ANA_ETH1_FLOW_MATCH_VLAN_TAG_MASK;
565 	val |= ANA_ETH1_FLOW_MATCH_VLAN_VERIFY;
566 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_MATCH_MODE(0),
567 			     val);
568 
569 	return 0;
570 }
571 
vsc85xx_ip_cmp1_init(struct phy_device * phydev,enum ts_blk blk)572 static int vsc85xx_ip_cmp1_init(struct phy_device *phydev, enum ts_blk blk)
573 {
574 	struct vsc8531_private *vsc8531 = phydev->priv;
575 	bool base = phydev->mdio.addr == vsc8531->ts_base_addr;
576 	u32 val;
577 
578 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_UPPER,
579 			     PTP_EV_PORT);
580 	/* Match on dest port only, ignore src */
581 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_UPPER,
582 			     0xffff);
583 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MATCH2_LOWER,
584 			     0);
585 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_MASK2_LOWER, 0);
586 
587 	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
588 	val &= ~IP1_FLOW_ENA_CHANNEL_MASK_MASK;
589 	val |= base ? IP1_FLOW_VALID_CH0 : IP1_FLOW_VALID_CH1;
590 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
591 
592 	/* Match all IPs */
593 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER(0), 0);
594 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER(0), 0);
595 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_UPPER_MID(0),
596 			     0);
597 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_UPPER_MID(0),
598 			     0);
599 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER_MID(0),
600 			     0);
601 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER_MID(0),
602 			     0);
603 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MATCH_LOWER(0), 0);
604 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_MASK_LOWER(0), 0);
605 
606 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_IP_CHKSUM_SEL, 0);
607 
608 	return 0;
609 }
610 
vsc85xx_adjfine(struct ptp_clock_info * info,long scaled_ppm)611 static int vsc85xx_adjfine(struct ptp_clock_info *info, long scaled_ppm)
612 {
613 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
614 	struct phy_device *phydev = ptp->phydev;
615 	struct vsc8531_private *priv = phydev->priv;
616 	u64 adj = 0;
617 	u32 val;
618 
619 	if (abs(scaled_ppm) < 66 || abs(scaled_ppm) > 65536UL * 1000000UL)
620 		return 0;
621 
622 	adj = div64_u64(1000000ULL * 65536ULL, abs(scaled_ppm));
623 	if (adj > 1000000000L)
624 		adj = 1000000000L;
625 
626 	val = PTP_AUTO_ADJ_NS_ROLLOVER(adj);
627 	val |= scaled_ppm > 0 ? PTP_AUTO_ADJ_ADD_1NS : PTP_AUTO_ADJ_SUB_1NS;
628 
629 	mutex_lock(&priv->phc_lock);
630 
631 	/* Update the ppb val in nano seconds to the auto adjust reg. */
632 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_AUTO_ADJ,
633 			     val);
634 
635 	/* The auto adjust update val is set to 0 after write operation. */
636 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
637 	val |= PTP_LTC_CTRL_AUTO_ADJ_UPDATE;
638 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
639 
640 	mutex_unlock(&priv->phc_lock);
641 
642 	return 0;
643 }
644 
__vsc85xx_gettime(struct ptp_clock_info * info,struct timespec64 * ts)645 static int __vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
646 {
647 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
648 	struct phy_device *phydev = ptp->phydev;
649 	struct vsc8531_private *priv = phydev->priv;
650 	struct vsc85xx_shared_private *shared;
651 	u32 val;
652 
653 	shared = phy_package_get_priv(phydev);
654 
655 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
656 	val |= PTP_LTC_CTRL_SAVE_ENA;
657 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
658 
659 	/* Local Time Counter (LTC) is put in SAVE* regs on rising edge of
660 	 * LOAD_SAVE pin.
661 	 */
662 	mutex_lock(&shared->gpio_lock);
663 	gpiod_set_value(priv->load_save, 1);
664 
665 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
666 				  MSCC_PHY_PTP_LTC_SAVED_SEC_MSB);
667 
668 	ts->tv_sec = ((time64_t)val) << 32;
669 
670 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
671 				  MSCC_PHY_PTP_LTC_SAVED_SEC_LSB);
672 	ts->tv_sec += val;
673 
674 	ts->tv_nsec = vsc85xx_ts_read_csr(phydev, PROCESSOR,
675 					  MSCC_PHY_PTP_LTC_SAVED_NS);
676 
677 	gpiod_set_value(priv->load_save, 0);
678 	mutex_unlock(&shared->gpio_lock);
679 
680 	return 0;
681 }
682 
vsc85xx_gettime(struct ptp_clock_info * info,struct timespec64 * ts)683 static int vsc85xx_gettime(struct ptp_clock_info *info, struct timespec64 *ts)
684 {
685 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
686 	struct phy_device *phydev = ptp->phydev;
687 	struct vsc8531_private *priv = phydev->priv;
688 
689 	mutex_lock(&priv->phc_lock);
690 	__vsc85xx_gettime(info, ts);
691 	mutex_unlock(&priv->phc_lock);
692 
693 	return 0;
694 }
695 
__vsc85xx_settime(struct ptp_clock_info * info,const struct timespec64 * ts)696 static int __vsc85xx_settime(struct ptp_clock_info *info,
697 			     const struct timespec64 *ts)
698 {
699 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
700 	struct phy_device *phydev = ptp->phydev;
701 	struct vsc8531_private *priv = phydev->priv;
702 	struct vsc85xx_shared_private *shared;
703 	u32 val;
704 
705 	shared = phy_package_get_priv(phydev);
706 
707 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_MSB,
708 			     PTP_LTC_LOAD_SEC_MSB(ts->tv_sec));
709 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_SEC_LSB,
710 			     PTP_LTC_LOAD_SEC_LSB(ts->tv_sec));
711 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_LOAD_NS,
712 			     PTP_LTC_LOAD_NS(ts->tv_nsec));
713 
714 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
715 	val |= PTP_LTC_CTRL_LOAD_ENA;
716 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
717 
718 	/* Local Time Counter (LTC) is set from LOAD* regs on rising edge of
719 	 * LOAD_SAVE pin.
720 	 */
721 	mutex_lock(&shared->gpio_lock);
722 	gpiod_set_value(priv->load_save, 1);
723 
724 	val &= ~PTP_LTC_CTRL_LOAD_ENA;
725 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
726 
727 	gpiod_set_value(priv->load_save, 0);
728 	mutex_unlock(&shared->gpio_lock);
729 
730 	return 0;
731 }
732 
vsc85xx_settime(struct ptp_clock_info * info,const struct timespec64 * ts)733 static int vsc85xx_settime(struct ptp_clock_info *info,
734 			   const struct timespec64 *ts)
735 {
736 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
737 	struct phy_device *phydev = ptp->phydev;
738 	struct vsc8531_private *priv = phydev->priv;
739 
740 	mutex_lock(&priv->phc_lock);
741 	__vsc85xx_settime(info, ts);
742 	mutex_unlock(&priv->phc_lock);
743 
744 	return 0;
745 }
746 
vsc85xx_adjtime(struct ptp_clock_info * info,s64 delta)747 static int vsc85xx_adjtime(struct ptp_clock_info *info, s64 delta)
748 {
749 	struct vsc85xx_ptp *ptp = container_of(info, struct vsc85xx_ptp, caps);
750 	struct phy_device *phydev = ptp->phydev;
751 	struct vsc8531_private *priv = phydev->priv;
752 	u32 val;
753 
754 	/* Can't recover that big of an offset. Let's set the time directly. */
755 	if (abs(delta) >= NSEC_PER_SEC) {
756 		struct timespec64 ts;
757 		u64 now;
758 
759 		mutex_lock(&priv->phc_lock);
760 
761 		__vsc85xx_gettime(info, &ts);
762 		now = ktime_to_ns(timespec64_to_ktime(ts));
763 		ts = ns_to_timespec64(now + delta);
764 		__vsc85xx_settime(info, &ts);
765 
766 		mutex_unlock(&priv->phc_lock);
767 
768 		return 0;
769 	}
770 
771 	mutex_lock(&priv->phc_lock);
772 
773 	val = PTP_LTC_OFFSET_VAL(abs(delta)) | PTP_LTC_OFFSET_ADJ;
774 	if (delta > 0)
775 		val |= PTP_LTC_OFFSET_ADD;
776 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_OFFSET, val);
777 
778 	mutex_unlock(&priv->phc_lock);
779 
780 	return 0;
781 }
782 
vsc85xx_eth1_next_comp(struct phy_device * phydev,enum ts_blk blk,u32 next_comp,u32 etype)783 static int vsc85xx_eth1_next_comp(struct phy_device *phydev, enum ts_blk blk,
784 				  u32 next_comp, u32 etype)
785 {
786 	u32 val;
787 
788 	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT);
789 	val &= ~ANA_ETH1_NTX_PROT_COMPARATOR_MASK;
790 	val |= next_comp;
791 	vsc85xx_ts_write_csr(phydev, blk, MSCC_PHY_ANA_ETH1_NTX_PROT, val);
792 
793 	val = ANA_ETH1_NXT_PROT_ETYPE_MATCH(etype) |
794 		ANA_ETH1_NXT_PROT_ETYPE_MATCH_ENA;
795 	vsc85xx_ts_write_csr(phydev, blk,
796 			     MSCC_PHY_ANA_ETH1_NXT_PROT_ETYPE_MATCH, val);
797 
798 	return 0;
799 }
800 
vsc85xx_ip1_next_comp(struct phy_device * phydev,enum ts_blk blk,u32 next_comp,u32 header)801 static int vsc85xx_ip1_next_comp(struct phy_device *phydev, enum ts_blk blk,
802 				 u32 next_comp, u32 header)
803 {
804 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_NXT_COMP,
805 			     ANA_IP1_NXT_PROT_NXT_COMP_BYTES_HDR(header) |
806 			     next_comp);
807 
808 	return 0;
809 }
810 
vsc85xx_ts_ptp_action_flow(struct phy_device * phydev,enum ts_blk blk,u8 flow,enum ptp_cmd cmd)811 static int vsc85xx_ts_ptp_action_flow(struct phy_device *phydev, enum ts_blk blk, u8 flow, enum ptp_cmd cmd)
812 {
813 	u32 val;
814 
815 	/* Check non-zero reserved field */
816 	val = PTP_FLOW_PTP_0_FIELD_PTP_FRAME | PTP_FLOW_PTP_0_FIELD_RSVRD_CHECK;
817 	vsc85xx_ts_write_csr(phydev, blk,
818 			     MSCC_ANA_PTP_FLOW_PTP_0_FIELD(flow), val);
819 
820 	val = PTP_FLOW_PTP_ACTION_CORR_OFFSET(8) |
821 	      PTP_FLOW_PTP_ACTION_TIME_OFFSET(8) |
822 	      PTP_FLOW_PTP_ACTION_PTP_CMD(cmd == PTP_SAVE_IN_TS_FIFO ?
823 					  PTP_NOP : cmd);
824 	if (cmd == PTP_SAVE_IN_TS_FIFO)
825 		val |= PTP_FLOW_PTP_ACTION_SAVE_LOCAL_TIME;
826 	else if (cmd == PTP_WRITE_NS)
827 		val |= PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_UPDATE |
828 		       PTP_FLOW_PTP_ACTION_MOD_FRAME_STATUS_BYTE_OFFSET(6);
829 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_PTP_ACTION(flow),
830 			     val);
831 
832 	if (cmd == PTP_WRITE_1588)
833 		/* Rewrite timestamp directly in frame */
834 		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(34) |
835 		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(10);
836 	else if (cmd == PTP_SAVE_IN_TS_FIFO)
837 		/* no rewrite */
838 		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(0) |
839 		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(0);
840 	else
841 		/* Write in reserved field */
842 		val = PTP_FLOW_PTP_ACTION2_REWRITE_OFFSET(16) |
843 		      PTP_FLOW_PTP_ACTION2_REWRITE_BYTES(4);
844 	vsc85xx_ts_write_csr(phydev, blk,
845 			     MSCC_ANA_PTP_FLOW_PTP_ACTION2(flow), val);
846 
847 	return 0;
848 }
849 
vsc85xx_ptp_conf(struct phy_device * phydev,enum ts_blk blk,bool one_step,bool enable)850 static int vsc85xx_ptp_conf(struct phy_device *phydev, enum ts_blk blk,
851 			    bool one_step, bool enable)
852 {
853 	static const u8 msgs[] = {
854 		PTP_MSGTYPE_SYNC,
855 		PTP_MSGTYPE_DELAY_REQ
856 	};
857 	u32 val;
858 	u8 i;
859 
860 	for (i = 0; i < ARRAY_SIZE(msgs); i++) {
861 		if (blk == INGRESS)
862 			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
863 						   PTP_WRITE_NS);
864 		else if (msgs[i] == PTP_MSGTYPE_SYNC && one_step)
865 			/* no need to know Sync t when sending in one_step */
866 			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
867 						   PTP_WRITE_1588);
868 		else
869 			vsc85xx_ts_ptp_action_flow(phydev, blk, msgs[i],
870 						   PTP_SAVE_IN_TS_FIFO);
871 
872 		val = vsc85xx_ts_read_csr(phydev, blk,
873 					  MSCC_ANA_PTP_FLOW_ENA(i));
874 		val &= ~PTP_FLOW_ENA;
875 		if (enable)
876 			val |= PTP_FLOW_ENA;
877 		vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_PTP_FLOW_ENA(i),
878 				     val);
879 	}
880 
881 	return 0;
882 }
883 
vsc85xx_eth1_conf(struct phy_device * phydev,enum ts_blk blk,bool enable)884 static int vsc85xx_eth1_conf(struct phy_device *phydev, enum ts_blk blk,
885 			     bool enable)
886 {
887 	struct vsc8531_private *vsc8531 = phydev->priv;
888 	u32 val = ANA_ETH1_FLOW_ADDR_MATCH2_DEST;
889 
890 	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
891 		/* PTP over Ethernet multicast address for SYNC and DELAY msg */
892 		u8 ptp_multicast[6] = {0x01, 0x1b, 0x19, 0x00, 0x00, 0x00};
893 
894 		val |= ANA_ETH1_FLOW_ADDR_MATCH2_FULL_ADDR |
895 		       get_unaligned_be16(&ptp_multicast[4]);
896 		vsc85xx_ts_write_csr(phydev, blk,
897 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
898 		vsc85xx_ts_write_csr(phydev, blk,
899 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0),
900 				     get_unaligned_be32(ptp_multicast));
901 	} else {
902 		val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_MULTICAST;
903 		val |= ANA_ETH1_FLOW_ADDR_MATCH2_ANY_UNICAST;
904 		vsc85xx_ts_write_csr(phydev, blk,
905 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH2(0), val);
906 		vsc85xx_ts_write_csr(phydev, blk,
907 				     MSCC_ANA_ETH1_FLOW_ADDR_MATCH1(0), 0);
908 	}
909 
910 	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0));
911 	val &= ~ETH1_FLOW_ENA;
912 	if (enable)
913 		val |= ETH1_FLOW_ENA;
914 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_ETH1_FLOW_ENA(0), val);
915 
916 	return 0;
917 }
918 
vsc85xx_ip1_conf(struct phy_device * phydev,enum ts_blk blk,bool enable)919 static int vsc85xx_ip1_conf(struct phy_device *phydev, enum ts_blk blk,
920 			    bool enable)
921 {
922 	u32 val;
923 
924 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP1_MODE,
925 			     ANA_IP1_NXT_PROT_IPV4 |
926 			     ANA_IP1_NXT_PROT_FLOW_OFFSET_IPV4);
927 
928 	/* Matching UDP protocol number */
929 	val = ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MASK(0xff) |
930 	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_MATCH(IPPROTO_UDP) |
931 	      ANA_IP1_NXT_PROT_IP_MATCH1_PROT_OFF(9);
932 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_IP_MATCH1,
933 			     val);
934 
935 	/* End of IP protocol, start of next protocol (UDP) */
936 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_OFFSET2,
937 			     ANA_IP1_NXT_PROT_OFFSET2(20));
938 
939 	val = vsc85xx_ts_read_csr(phydev, blk,
940 				  MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM);
941 	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_OFF_MASK |
942 		 IP1_NXT_PROT_UDP_CHKSUM_WIDTH_MASK);
943 	val |= IP1_NXT_PROT_UDP_CHKSUM_WIDTH(2);
944 
945 	val &= ~(IP1_NXT_PROT_UDP_CHKSUM_UPDATE |
946 		 IP1_NXT_PROT_UDP_CHKSUM_CLEAR);
947 	/* UDP checksum offset in IPv4 packet
948 	 * according to: https://tools.ietf.org/html/rfc768
949 	 */
950 	val |= IP1_NXT_PROT_UDP_CHKSUM_OFF(26);
951 	if (enable)
952 		val |= IP1_NXT_PROT_UDP_CHKSUM_CLEAR;
953 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_NXT_PROT_UDP_CHKSUM,
954 			     val);
955 
956 	val = vsc85xx_ts_read_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0));
957 	val &= ~(IP1_FLOW_MATCH_ADDR_MASK | IP1_FLOW_ENA);
958 	val |= IP1_FLOW_MATCH_DEST_SRC_ADDR;
959 	if (enable)
960 		val |= IP1_FLOW_ENA;
961 	vsc85xx_ts_write_csr(phydev, blk, MSCC_ANA_IP1_FLOW_ENA(0), val);
962 
963 	return 0;
964 }
965 
vsc85xx_ts_engine_init(struct phy_device * phydev,bool one_step)966 static int vsc85xx_ts_engine_init(struct phy_device *phydev, bool one_step)
967 {
968 	struct vsc8531_private *vsc8531 = phydev->priv;
969 	bool ptp_l4, base = phydev->mdio.addr == vsc8531->ts_base_addr;
970 	u8 eng_id = base ? 0 : 1;
971 	u32 val;
972 
973 	ptp_l4 = vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L4_EVENT;
974 
975 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
976 				  MSCC_PHY_PTP_ANALYZER_MODE);
977 	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
978 	val &= ~(PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id)) |
979 		 PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id)));
980 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
981 			     val);
982 
983 	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_PTP_V2_L2_EVENT) {
984 		vsc85xx_eth1_next_comp(phydev, INGRESS,
985 				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
986 		vsc85xx_eth1_next_comp(phydev, EGRESS,
987 				       ANA_ETH1_NTX_PROT_PTP_OAM, ETH_P_1588);
988 	} else {
989 		vsc85xx_eth1_next_comp(phydev, INGRESS,
990 				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
991 				       ETH_P_IP);
992 		vsc85xx_eth1_next_comp(phydev, EGRESS,
993 				       ANA_ETH1_NTX_PROT_IP_UDP_ACH_1,
994 				       ETH_P_IP);
995 		/* Header length of IPv[4/6] + UDP */
996 		vsc85xx_ip1_next_comp(phydev, INGRESS,
997 				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
998 		vsc85xx_ip1_next_comp(phydev, EGRESS,
999 				      ANA_ETH1_NTX_PROT_PTP_OAM, 28);
1000 	}
1001 
1002 	vsc85xx_eth1_conf(phydev, INGRESS,
1003 			  vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1004 	vsc85xx_ip1_conf(phydev, INGRESS,
1005 			 ptp_l4 && vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1006 	vsc85xx_ptp_conf(phydev, INGRESS, one_step,
1007 			 vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE);
1008 
1009 	vsc85xx_eth1_conf(phydev, EGRESS,
1010 			  vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1011 	vsc85xx_ip1_conf(phydev, EGRESS,
1012 			 ptp_l4 && vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1013 	vsc85xx_ptp_conf(phydev, EGRESS, one_step,
1014 			 vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF);
1015 
1016 	val &= ~PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1017 	if (vsc8531->ptp->tx_type != HWTSTAMP_TX_OFF)
1018 		val |= PTP_ANALYZER_MODE_EGR_ENA(BIT(eng_id));
1019 
1020 	val &= ~PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1021 	if (vsc8531->ptp->rx_filter != HWTSTAMP_FILTER_NONE)
1022 		val |= PTP_ANALYZER_MODE_INGR_ENA(BIT(eng_id));
1023 
1024 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1025 			     val);
1026 
1027 	return 0;
1028 }
1029 
vsc85xx_link_change_notify(struct phy_device * phydev)1030 void vsc85xx_link_change_notify(struct phy_device *phydev)
1031 {
1032 	struct vsc8531_private *priv = phydev->priv;
1033 
1034 	mutex_lock(&priv->ts_lock);
1035 	vsc85xx_ts_set_latencies(phydev);
1036 	mutex_unlock(&priv->ts_lock);
1037 }
1038 
vsc85xx_ts_reset_fifo(struct phy_device * phydev)1039 static void vsc85xx_ts_reset_fifo(struct phy_device *phydev)
1040 {
1041 	u32 val;
1042 
1043 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1044 				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1045 	val |= PTP_EGR_TS_FIFO_RESET;
1046 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1047 			     val);
1048 
1049 	val &= ~PTP_EGR_TS_FIFO_RESET;
1050 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1051 			     val);
1052 }
1053 
vsc85xx_hwtstamp(struct mii_timestamper * mii_ts,struct kernel_hwtstamp_config * cfg,struct netlink_ext_ack * extack)1054 static int vsc85xx_hwtstamp(struct mii_timestamper *mii_ts,
1055 			    struct kernel_hwtstamp_config *cfg,
1056 			    struct netlink_ext_ack *extack)
1057 {
1058 	struct vsc8531_private *vsc8531 =
1059 		container_of(mii_ts, struct vsc8531_private, mii_ts);
1060 	struct phy_device *phydev = vsc8531->ptp->phydev;
1061 	bool one_step = false;
1062 	u32 val;
1063 
1064 	switch (cfg->tx_type) {
1065 	case HWTSTAMP_TX_ONESTEP_SYNC:
1066 		one_step = true;
1067 		break;
1068 	case HWTSTAMP_TX_ON:
1069 		break;
1070 	case HWTSTAMP_TX_OFF:
1071 		break;
1072 	default:
1073 		return -ERANGE;
1074 	}
1075 
1076 	vsc8531->ptp->tx_type = cfg->tx_type;
1077 
1078 	switch (cfg->rx_filter) {
1079 	case HWTSTAMP_FILTER_NONE:
1080 		break;
1081 	case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
1082 		/* ETH->IP->UDP->PTP */
1083 		break;
1084 	case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
1085 		/* ETH->PTP */
1086 		break;
1087 	default:
1088 		return -ERANGE;
1089 	}
1090 
1091 	vsc8531->ptp->rx_filter = cfg->rx_filter;
1092 
1093 	mutex_lock(&vsc8531->ts_lock);
1094 
1095 	__skb_queue_purge(&vsc8531->ptp->tx_queue);
1096 	__skb_queue_head_init(&vsc8531->ptp->tx_queue);
1097 
1098 	/* Disable predictor while configuring the 1588 block */
1099 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1100 				  MSCC_PHY_PTP_INGR_PREDICTOR);
1101 	val &= ~PTP_INGR_PREDICTOR_EN;
1102 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1103 			     val);
1104 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1105 				  MSCC_PHY_PTP_EGR_PREDICTOR);
1106 	val &= ~PTP_EGR_PREDICTOR_EN;
1107 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1108 			     val);
1109 
1110 	/* Bypass egress or ingress blocks if timestamping isn't used */
1111 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1112 	val &= ~(PTP_IFACE_CTRL_EGR_BYPASS | PTP_IFACE_CTRL_INGR_BYPASS);
1113 	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1114 		val |= PTP_IFACE_CTRL_EGR_BYPASS;
1115 	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE)
1116 		val |= PTP_IFACE_CTRL_INGR_BYPASS;
1117 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1118 
1119 	/* Resetting FIFO so that it's empty after reconfiguration */
1120 	vsc85xx_ts_reset_fifo(phydev);
1121 
1122 	vsc85xx_ts_engine_init(phydev, one_step);
1123 
1124 	/* Re-enable predictors now */
1125 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1126 				  MSCC_PHY_PTP_INGR_PREDICTOR);
1127 	val |= PTP_INGR_PREDICTOR_EN;
1128 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1129 			     val);
1130 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1131 				  MSCC_PHY_PTP_EGR_PREDICTOR);
1132 	val |= PTP_EGR_PREDICTOR_EN;
1133 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1134 			     val);
1135 
1136 	vsc8531->ptp->configured = 1;
1137 	mutex_unlock(&vsc8531->ts_lock);
1138 
1139 	return 0;
1140 }
1141 
vsc85xx_ts_info(struct mii_timestamper * mii_ts,struct kernel_ethtool_ts_info * info)1142 static int vsc85xx_ts_info(struct mii_timestamper *mii_ts,
1143 			   struct kernel_ethtool_ts_info *info)
1144 {
1145 	struct vsc8531_private *vsc8531 =
1146 		container_of(mii_ts, struct vsc8531_private, mii_ts);
1147 
1148 	info->phc_index = ptp_clock_index(vsc8531->ptp->ptp_clock);
1149 	info->so_timestamping =
1150 		SOF_TIMESTAMPING_TX_HARDWARE |
1151 		SOF_TIMESTAMPING_RX_HARDWARE |
1152 		SOF_TIMESTAMPING_RAW_HARDWARE;
1153 	info->tx_types =
1154 		(1 << HWTSTAMP_TX_OFF) |
1155 		(1 << HWTSTAMP_TX_ON) |
1156 		(1 << HWTSTAMP_TX_ONESTEP_SYNC);
1157 	info->rx_filters =
1158 		(1 << HWTSTAMP_FILTER_NONE) |
1159 		(1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) |
1160 		(1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT);
1161 
1162 	return 0;
1163 }
1164 
vsc85xx_txtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1165 static void vsc85xx_txtstamp(struct mii_timestamper *mii_ts,
1166 			     struct sk_buff *skb, int type)
1167 {
1168 	struct vsc8531_private *vsc8531 =
1169 		container_of(mii_ts, struct vsc8531_private, mii_ts);
1170 
1171 	if (!vsc8531->ptp->configured)
1172 		goto out;
1173 
1174 	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_OFF)
1175 		goto out;
1176 
1177 	if (vsc8531->ptp->tx_type == HWTSTAMP_TX_ONESTEP_SYNC)
1178 		if (ptp_msg_is_sync(skb, type))
1179 			goto out;
1180 
1181 	skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1182 
1183 	mutex_lock(&vsc8531->ts_lock);
1184 	__skb_queue_tail(&vsc8531->ptp->tx_queue, skb);
1185 	mutex_unlock(&vsc8531->ts_lock);
1186 	return;
1187 
1188 out:
1189 	kfree_skb(skb);
1190 }
1191 
vsc85xx_rxtstamp(struct mii_timestamper * mii_ts,struct sk_buff * skb,int type)1192 static bool vsc85xx_rxtstamp(struct mii_timestamper *mii_ts,
1193 			     struct sk_buff *skb, int type)
1194 {
1195 	struct vsc8531_private *vsc8531 =
1196 		container_of(mii_ts, struct vsc8531_private, mii_ts);
1197 	struct skb_shared_hwtstamps *shhwtstamps = NULL;
1198 	struct vsc85xx_ptphdr *ptphdr;
1199 	struct timespec64 ts;
1200 	unsigned long ns;
1201 
1202 	if (!vsc8531->ptp->configured)
1203 		return false;
1204 
1205 	if (vsc8531->ptp->rx_filter == HWTSTAMP_FILTER_NONE ||
1206 	    type == PTP_CLASS_NONE)
1207 		return false;
1208 
1209 	vsc85xx_gettime(&vsc8531->ptp->caps, &ts);
1210 
1211 	ptphdr = get_ptp_header_rx(skb, vsc8531->ptp->rx_filter);
1212 	if (!ptphdr)
1213 		return false;
1214 
1215 	shhwtstamps = skb_hwtstamps(skb);
1216 	memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
1217 
1218 	ns = ntohl(ptphdr->rsrvd2);
1219 
1220 	/* nsec is in reserved field */
1221 	if (ts.tv_nsec < ns)
1222 		ts.tv_sec--;
1223 
1224 	shhwtstamps->hwtstamp = ktime_set(ts.tv_sec, ns);
1225 	netif_rx(skb);
1226 
1227 	return true;
1228 }
1229 
1230 static const struct ptp_clock_info vsc85xx_clk_caps = {
1231 	.owner		= THIS_MODULE,
1232 	.name		= "VSC85xx timer",
1233 	.max_adj	= S32_MAX,
1234 	.n_alarm	= 0,
1235 	.n_pins		= 0,
1236 	.n_ext_ts	= 0,
1237 	.n_per_out	= 0,
1238 	.pps		= 0,
1239 	.adjtime        = &vsc85xx_adjtime,
1240 	.adjfine	= &vsc85xx_adjfine,
1241 	.gettime64	= &vsc85xx_gettime,
1242 	.settime64	= &vsc85xx_settime,
1243 };
1244 
vsc8584_base_priv(struct phy_device * phydev)1245 static struct vsc8531_private *vsc8584_base_priv(struct phy_device *phydev)
1246 {
1247 	struct vsc8531_private *vsc8531 = phydev->priv;
1248 
1249 	if (vsc8531->ts_base_addr != phydev->mdio.addr) {
1250 		struct mdio_device *dev;
1251 
1252 		dev = phydev->mdio.bus->mdio_map[vsc8531->ts_base_addr];
1253 		phydev = container_of(dev, struct phy_device, mdio);
1254 
1255 		return phydev->priv;
1256 	}
1257 
1258 	return vsc8531;
1259 }
1260 
vsc8584_is_1588_input_clk_configured(struct phy_device * phydev)1261 static bool vsc8584_is_1588_input_clk_configured(struct phy_device *phydev)
1262 {
1263 	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1264 
1265 	return vsc8531->input_clk_init;
1266 }
1267 
vsc8584_set_input_clk_configured(struct phy_device * phydev)1268 static void vsc8584_set_input_clk_configured(struct phy_device *phydev)
1269 {
1270 	struct vsc8531_private *vsc8531 = vsc8584_base_priv(phydev);
1271 
1272 	vsc8531->input_clk_init = true;
1273 }
1274 
__vsc8584_init_ptp(struct phy_device * phydev)1275 static int __vsc8584_init_ptp(struct phy_device *phydev)
1276 {
1277 	struct vsc8531_private *vsc8531 = phydev->priv;
1278 	static const u32 ltc_seq_e[] = { 0, 400000, 0, 0, 0 };
1279 	static const u8  ltc_seq_a[] = { 8, 6, 5, 4, 2 };
1280 	u32 val;
1281 
1282 	if (!vsc8584_is_1588_input_clk_configured(phydev)) {
1283 		phy_lock_mdio_bus(phydev);
1284 
1285 		/* 1588_DIFF_INPUT_CLK configuration: Use an external clock for
1286 		 * the LTC, as per 3.13.29 in the VSC8584 datasheet.
1287 		 */
1288 		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1289 				  MSCC_PHY_PAGE_1588);
1290 		phy_ts_base_write(phydev, 29, 0x7ae0);
1291 		phy_ts_base_write(phydev, 30, 0xb71c);
1292 		phy_ts_base_write(phydev, MSCC_EXT_PAGE_ACCESS,
1293 				  MSCC_PHY_PAGE_STANDARD);
1294 
1295 		phy_unlock_mdio_bus(phydev);
1296 
1297 		vsc8584_set_input_clk_configured(phydev);
1298 	}
1299 
1300 	/* Disable predictor before configuring the 1588 block */
1301 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1302 				  MSCC_PHY_PTP_INGR_PREDICTOR);
1303 	val &= ~PTP_INGR_PREDICTOR_EN;
1304 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_PREDICTOR,
1305 			     val);
1306 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1307 				  MSCC_PHY_PTP_EGR_PREDICTOR);
1308 	val &= ~PTP_EGR_PREDICTOR_EN;
1309 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_PREDICTOR,
1310 			     val);
1311 
1312 	/* By default, the internal clock of fixed rate 250MHz is used */
1313 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL);
1314 	val &= ~PTP_LTC_CTRL_CLK_SEL_MASK;
1315 	val |= PTP_LTC_CTRL_CLK_SEL_INTERNAL_250;
1316 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_CTRL, val);
1317 
1318 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE);
1319 	val &= ~PTP_LTC_SEQUENCE_A_MASK;
1320 	val |= PTP_LTC_SEQUENCE_A(ltc_seq_a[PHC_CLK_250MHZ]);
1321 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQUENCE, val);
1322 
1323 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ);
1324 	val &= ~(PTP_LTC_SEQ_ERR_MASK | PTP_LTC_SEQ_ADD_SUB);
1325 	if (ltc_seq_e[PHC_CLK_250MHZ])
1326 		val |= PTP_LTC_SEQ_ADD_SUB;
1327 	val |= PTP_LTC_SEQ_ERR(ltc_seq_e[PHC_CLK_250MHZ]);
1328 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_SEQ, val);
1329 
1330 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_LTC_1PPS_WIDTH_ADJ,
1331 			     PPS_WIDTH_ADJ);
1332 
1333 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_DELAY_FIFO,
1334 			     IS_ENABLED(CONFIG_MACSEC) ?
1335 			     PTP_INGR_DELAY_FIFO_DEPTH_MACSEC :
1336 			     PTP_INGR_DELAY_FIFO_DEPTH_DEFAULT);
1337 
1338 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_DELAY_FIFO,
1339 			     IS_ENABLED(CONFIG_MACSEC) ?
1340 			     PTP_EGR_DELAY_FIFO_DEPTH_MACSEC :
1341 			     PTP_EGR_DELAY_FIFO_DEPTH_DEFAULT);
1342 
1343 	/* Enable n-phase sampler for Viper Rev-B */
1344 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1345 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1346 	val &= ~(PTP_ACCUR_PPS_OUT_BYPASS | PTP_ACCUR_PPS_IN_BYPASS |
1347 		 PTP_ACCUR_EGR_SOF_BYPASS | PTP_ACCUR_INGR_SOF_BYPASS |
1348 		 PTP_ACCUR_LOAD_SAVE_BYPASS);
1349 	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1350 	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1351 	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1352 	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1353 	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1354 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1355 			     val);
1356 
1357 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1358 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1359 	val |= PTP_ACCUR_CALIB_TRIGG;
1360 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1361 			     val);
1362 
1363 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1364 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1365 	val &= ~PTP_ACCUR_CALIB_TRIGG;
1366 	val |= PTP_ACCUR_PPS_OUT_CALIB_ERR | PTP_ACCUR_PPS_OUT_CALIB_DONE |
1367 	       PTP_ACCUR_PPS_IN_CALIB_ERR | PTP_ACCUR_PPS_IN_CALIB_DONE |
1368 	       PTP_ACCUR_EGR_SOF_CALIB_ERR | PTP_ACCUR_EGR_SOF_CALIB_DONE |
1369 	       PTP_ACCUR_INGR_SOF_CALIB_ERR | PTP_ACCUR_INGR_SOF_CALIB_DONE |
1370 	       PTP_ACCUR_LOAD_SAVE_CALIB_ERR | PTP_ACCUR_LOAD_SAVE_CALIB_DONE;
1371 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1372 			     val);
1373 
1374 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1375 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1376 	val |= PTP_ACCUR_CALIB_TRIGG;
1377 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1378 			     val);
1379 
1380 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1381 				  MSCC_PHY_PTP_ACCUR_CFG_STATUS);
1382 	val &= ~PTP_ACCUR_CALIB_TRIGG;
1383 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ACCUR_CFG_STATUS,
1384 			     val);
1385 
1386 	/* Do not access FIFO via SI */
1387 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1388 				  MSCC_PHY_PTP_TSTAMP_FIFO_SI);
1389 	val &= ~PTP_TSTAMP_FIFO_SI_EN;
1390 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_TSTAMP_FIFO_SI,
1391 			     val);
1392 
1393 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1394 				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1395 	val &= ~PTP_INGR_REWRITER_REDUCE_PREAMBLE;
1396 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1397 			     val);
1398 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1399 				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1400 	val &= ~PTP_EGR_REWRITER_REDUCE_PREAMBLE;
1401 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1402 			     val);
1403 
1404 	/* Put the flag that indicates the frame has been modified to bit 7 */
1405 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1406 				  MSCC_PHY_PTP_INGR_REWRITER_CTRL);
1407 	val |= PTP_INGR_REWRITER_FLAG_BIT_OFF(7) | PTP_INGR_REWRITER_FLAG_VAL;
1408 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_REWRITER_CTRL,
1409 			     val);
1410 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1411 				  MSCC_PHY_PTP_EGR_REWRITER_CTRL);
1412 	val |= PTP_EGR_REWRITER_FLAG_BIT_OFF(7);
1413 	val &= ~PTP_EGR_REWRITER_FLAG_VAL;
1414 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_REWRITER_CTRL,
1415 			     val);
1416 
1417 	/* 30bit mode for RX timestamp, only the nanoseconds are kept in
1418 	 * reserved field.
1419 	 */
1420 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1421 				  MSCC_PHY_PTP_INGR_TSP_CTRL);
1422 	val |= PHY_PTP_INGR_TSP_CTRL_FRACT_NS;
1423 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_INGR_TSP_CTRL,
1424 			     val);
1425 
1426 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL);
1427 	val |= PHY_PTP_EGR_TSP_CTRL_FRACT_NS;
1428 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TSP_CTRL, val);
1429 
1430 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1431 				  MSCC_PHY_PTP_SERIAL_TOD_IFACE);
1432 	val |= PTP_SERIAL_TOD_IFACE_LS_AUTO_CLR;
1433 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_SERIAL_TOD_IFACE,
1434 			     val);
1435 
1436 	vsc85xx_ts_fsb_init(phydev);
1437 
1438 	/* Set the Egress timestamp FIFO configuration and status register */
1439 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1440 				  MSCC_PHY_PTP_EGR_TS_FIFO_CTRL);
1441 	val &= ~(PTP_EGR_TS_FIFO_SIG_BYTES_MASK | PTP_EGR_TS_FIFO_THRESH_MASK);
1442 	/* 16 bytes for the signature, 10 for the timestamp in the TS FIFO */
1443 	val |= PTP_EGR_TS_FIFO_SIG_BYTES(16) | PTP_EGR_TS_FIFO_THRESH(7);
1444 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_EGR_TS_FIFO_CTRL,
1445 			     val);
1446 
1447 	vsc85xx_ts_reset_fifo(phydev);
1448 
1449 	val = PTP_IFACE_CTRL_CLK_ENA;
1450 	if (!IS_ENABLED(CONFIG_MACSEC))
1451 		val |= PTP_IFACE_CTRL_GMII_PROT;
1452 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1453 
1454 	vsc85xx_ts_set_latencies(phydev);
1455 
1456 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_VERSION_CODE);
1457 
1458 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL);
1459 	val |= PTP_IFACE_CTRL_EGR_BYPASS;
1460 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_IFACE_CTRL, val);
1461 
1462 	vsc85xx_ts_disable_flows(phydev, EGRESS);
1463 	vsc85xx_ts_disable_flows(phydev, INGRESS);
1464 
1465 	val = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1466 				  MSCC_PHY_PTP_ANALYZER_MODE);
1467 	/* Disable INGRESS and EGRESS so engine eng_id can be reconfigured */
1468 	val &= ~(PTP_ANALYZER_MODE_EGR_ENA_MASK |
1469 		 PTP_ANALYZER_MODE_INGR_ENA_MASK |
1470 		 PTP_ANA_INGR_ENCAP_FLOW_MODE_MASK |
1471 		 PTP_ANA_EGR_ENCAP_FLOW_MODE_MASK);
1472 	/* Strict matching in flow (packets should match flows from the same
1473 	 * index in all enabled comparators (except PTP)).
1474 	 */
1475 	val |= PTP_ANA_SPLIT_ENCAP_FLOW | PTP_ANA_INGR_ENCAP_FLOW_MODE(0x7) |
1476 	       PTP_ANA_EGR_ENCAP_FLOW_MODE(0x7);
1477 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_PTP_ANALYZER_MODE,
1478 			     val);
1479 
1480 	/* Initialized for ingress and egress flows:
1481 	 * - The Ethernet comparator.
1482 	 * - The IP comparator.
1483 	 * - The PTP comparator.
1484 	 */
1485 	vsc85xx_eth_cmp1_init(phydev, INGRESS);
1486 	vsc85xx_ip_cmp1_init(phydev, INGRESS);
1487 	vsc85xx_ptp_cmp_init(phydev, INGRESS);
1488 	vsc85xx_eth_cmp1_init(phydev, EGRESS);
1489 	vsc85xx_ip_cmp1_init(phydev, EGRESS);
1490 	vsc85xx_ptp_cmp_init(phydev, EGRESS);
1491 
1492 	vsc85xx_ts_eth_cmp1_sig(phydev);
1493 
1494 	vsc8531->mii_ts.rxtstamp = vsc85xx_rxtstamp;
1495 	vsc8531->mii_ts.txtstamp = vsc85xx_txtstamp;
1496 	vsc8531->mii_ts.hwtstamp = vsc85xx_hwtstamp;
1497 	vsc8531->mii_ts.ts_info  = vsc85xx_ts_info;
1498 	phydev->mii_ts = &vsc8531->mii_ts;
1499 
1500 	memcpy(&vsc8531->ptp->caps, &vsc85xx_clk_caps, sizeof(vsc85xx_clk_caps));
1501 
1502 	vsc8531->ptp->ptp_clock = ptp_clock_register(&vsc8531->ptp->caps,
1503 						     &phydev->mdio.dev);
1504 	return PTR_ERR_OR_ZERO(vsc8531->ptp->ptp_clock);
1505 }
1506 
vsc8584_config_ts_intr(struct phy_device * phydev)1507 void vsc8584_config_ts_intr(struct phy_device *phydev)
1508 {
1509 	struct vsc8531_private *priv = phydev->priv;
1510 
1511 	mutex_lock(&priv->ts_lock);
1512 	vsc85xx_ts_write_csr(phydev, PROCESSOR, MSCC_PHY_1588_VSC85XX_INT_MASK,
1513 			     VSC85XX_1588_INT_MASK_MASK);
1514 	mutex_unlock(&priv->ts_lock);
1515 }
1516 
vsc8584_ptp_init(struct phy_device * phydev)1517 int vsc8584_ptp_init(struct phy_device *phydev)
1518 {
1519 	switch (phydev->phy_id & phydev->drv->phy_id_mask) {
1520 	case PHY_ID_VSC8572:
1521 	case PHY_ID_VSC8574:
1522 	case PHY_ID_VSC8575:
1523 	case PHY_ID_VSC8582:
1524 	case PHY_ID_VSC8584:
1525 		return __vsc8584_init_ptp(phydev);
1526 	}
1527 
1528 	return 0;
1529 }
1530 
vsc8584_handle_ts_interrupt(struct phy_device * phydev)1531 irqreturn_t vsc8584_handle_ts_interrupt(struct phy_device *phydev)
1532 {
1533 	struct vsc8531_private *priv = phydev->priv;
1534 	int rc;
1535 
1536 	mutex_lock(&priv->ts_lock);
1537 	rc = vsc85xx_ts_read_csr(phydev, PROCESSOR,
1538 				 MSCC_PHY_1588_VSC85XX_INT_STATUS);
1539 	/* Ack the PTP interrupt */
1540 	vsc85xx_ts_write_csr(phydev, PROCESSOR,
1541 			     MSCC_PHY_1588_VSC85XX_INT_STATUS, rc);
1542 
1543 	if (!(rc & VSC85XX_1588_INT_MASK_MASK)) {
1544 		mutex_unlock(&priv->ts_lock);
1545 		return IRQ_NONE;
1546 	}
1547 
1548 	if (rc & VSC85XX_1588_INT_FIFO_ADD) {
1549 		vsc85xx_get_tx_ts(priv->ptp);
1550 	} else if (rc & VSC85XX_1588_INT_FIFO_OVERFLOW) {
1551 		__skb_queue_purge(&priv->ptp->tx_queue);
1552 		vsc85xx_ts_reset_fifo(phydev);
1553 	}
1554 
1555 	mutex_unlock(&priv->ts_lock);
1556 	return IRQ_HANDLED;
1557 }
1558 
vsc8584_ptp_probe(struct phy_device * phydev)1559 int vsc8584_ptp_probe(struct phy_device *phydev)
1560 {
1561 	struct vsc8531_private *vsc8531 = phydev->priv;
1562 
1563 	vsc8531->ptp = devm_kzalloc(&phydev->mdio.dev, sizeof(*vsc8531->ptp),
1564 				    GFP_KERNEL);
1565 	if (!vsc8531->ptp)
1566 		return -ENOMEM;
1567 
1568 	mutex_init(&vsc8531->phc_lock);
1569 	mutex_init(&vsc8531->ts_lock);
1570 
1571 	/* Retrieve the shared load/save GPIO. Request it as non exclusive as
1572 	 * the same GPIO can be requested by all the PHYs of the same package.
1573 	 * This GPIO must be used with the gpio_lock taken (the lock is shared
1574 	 * between all PHYs).
1575 	 */
1576 	vsc8531->load_save = devm_gpiod_get_optional(&phydev->mdio.dev, "load-save",
1577 						     GPIOD_FLAGS_BIT_NONEXCLUSIVE |
1578 						     GPIOD_OUT_LOW);
1579 	if (IS_ERR(vsc8531->load_save)) {
1580 		phydev_err(phydev, "Can't get load-save GPIO (%ld)\n",
1581 			   PTR_ERR(vsc8531->load_save));
1582 		return PTR_ERR(vsc8531->load_save);
1583 	}
1584 
1585 	/* Timestamp selected by default to keep legacy API */
1586 	phydev->default_timestamp = true;
1587 
1588 	vsc8531->ptp->phydev = phydev;
1589 
1590 	return 0;
1591 }
1592 
vsc8584_ptp_probe_once(struct phy_device * phydev)1593 int vsc8584_ptp_probe_once(struct phy_device *phydev)
1594 {
1595 	struct vsc85xx_shared_private *shared = phy_package_get_priv(phydev);
1596 
1597 	/* Initialize shared GPIO lock */
1598 	mutex_init(&shared->gpio_lock);
1599 
1600 	return 0;
1601 }
1602